From 483926a283e118590da3f9ecfa75a8a4d62143ce Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Wed, 1 Dec 2021 07:15:11 +0100 Subject: Merging upstream version 1.32.0. Signed-off-by: Daniel Baumann --- .codeclimate.yml | 11 +- .github/CODEOWNERS | 67 +- .github/data/build-matrix.json | 114 + .github/dockerfiles/Dockerfile.build_test | 9 +- .github/labeler.yml | 6 + .github/scripts/build-static-x86_64.sh | 58 - .github/scripts/build-static.sh | 61 + .github/scripts/check-updater.sh | 6 +- .github/scripts/docker-test.sh | 58 + .github/scripts/pkg-test.sh | 128 + .github/scripts/run-updater-check.sh | 9 + .github/scripts/run_install_with_dist_file.sh | 3 +- .github/workflows/build-and-install.yml | 215 - .github/workflows/build.yml | 538 + .github/workflows/checks.yml | 41 +- .github/workflows/coverity.yml | 7 +- .github/workflows/dashboard-pr.yml | 35 +- .github/workflows/docker.yml | 88 +- .github/workflows/docs.yml | 2 + .github/workflows/labeler.yml | 4 +- .github/workflows/packaging.yml | 95 +- .github/workflows/repoconfig-packages.yml | 80 + .github/workflows/review.yml | 120 +- .github/workflows/tests.yml | 9 +- .github/workflows/updater.yml | 85 - .gitignore | 14 + .gitmodules | 12 + .lgtm.yml | 2 + .travis.yml | 71 +- .travis/create_artifacts.sh | 14 +- .travis/current_build_status | 1 + .travis/trigger_artifact_build.sh | 20 + .yamllint.yml | 14 +- CHANGELOG.md | 452 +- CMakeLists.txt | 200 +- Makefile.am | 312 +- README.md | 18 +- aclk/README.md | 50 +- aclk/aclk.c | 459 +- aclk/aclk.h | 63 +- aclk/aclk_alarm_api.c | 44 + aclk/aclk_alarm_api.h | 14 + aclk/aclk_api.c | 228 + aclk/aclk_api.h | 56 + aclk/aclk_charts_api.c | 68 + aclk/aclk_charts_api.h | 20 + aclk/aclk_collector_list.c | 2 +- aclk/aclk_collector_list.h | 4 +- aclk/aclk_otp.c | 24 +- aclk/aclk_otp.h | 3 +- aclk/aclk_proxy.c | 186 + aclk/aclk_proxy.h | 22 + aclk/aclk_query.c | 174 +- aclk/aclk_query_queue.c | 84 +- aclk/aclk_query_queue.h | 37 +- aclk/aclk_rrdhost_state.h | 44 + aclk/aclk_rx_msgs.c | 193 +- aclk/aclk_rx_msgs.h | 6 +- aclk/aclk_stats.c | 93 +- aclk/aclk_stats.h | 20 +- aclk/aclk_tx_msgs.c | 122 +- aclk/aclk_tx_msgs.h | 15 +- aclk/aclk_util.c | 256 +- aclk/aclk_util.h | 45 +- aclk/https_client.c | 46 +- aclk/legacy/aclk_common.c | 213 +- aclk/legacy/aclk_common.h | 37 +- aclk/legacy/aclk_lws_https_client.c | 6 - aclk/legacy/aclk_lws_https_client.h | 2 +- aclk/legacy/aclk_lws_wss_client.c | 27 +- aclk/legacy/aclk_lws_wss_client.h | 2 +- aclk/legacy/aclk_query.c | 96 +- aclk/legacy/aclk_query.h | 19 +- aclk/legacy/aclk_rrdhost_state.h | 42 - aclk/legacy/aclk_rx_msgs.c | 75 +- aclk/legacy/aclk_rx_msgs.h | 4 +- aclk/legacy/aclk_stats.c | 134 +- aclk/legacy/aclk_stats.h | 26 +- aclk/legacy/agent_cloud_link.c | 432 +- aclk/legacy/agent_cloud_link.h | 42 +- aclk/legacy/mqtt.c | 10 +- aclk/legacy/mqtt.h | 2 +- aclk/schema-wrappers/alarm_config.cc | 147 + aclk/schema-wrappers/alarm_config.h | 69 + aclk/schema-wrappers/alarm_stream.cc | 248 + aclk/schema-wrappers/alarm_stream.h | 134 + aclk/schema-wrappers/chart_config.cc | 105 + aclk/schema-wrappers/chart_config.h | 50 + aclk/schema-wrappers/chart_stream.cc | 342 + aclk/schema-wrappers/chart_stream.h | 121 + aclk/schema-wrappers/connection.cc | 63 + aclk/schema-wrappers/connection.h | 43 + aclk/schema-wrappers/node_connection.cc | 37 + aclk/schema-wrappers/node_connection.h | 29 + aclk/schema-wrappers/node_creation.cc | 39 + aclk/schema-wrappers/node_creation.h | 31 + aclk/schema-wrappers/node_info.cc | 95 + aclk/schema-wrappers/node_info.h | 69 + aclk/schema-wrappers/schema_wrapper_utils.cc | 15 + aclk/schema-wrappers/schema_wrapper_utils.h | 20 + aclk/schema-wrappers/schema_wrappers.h | 17 + backends/WALKTHROUGH.md | 12 +- backends/prometheus/README.md | 72 +- .../scenarios/aclk-testing/agent-compose.yml | 7 +- .../aclk-testing/agent-valgrind-compose.yml | 7 +- .../scenarios/aclk-testing/agent_netdata.conf | 56 +- .../scenarios/aclk-testing/paho-compose.yml | 2 +- .../scenarios/aclk-testing/vernemq-compose.yml | 1 - build_external/scenarios/gaps_hi/child-compose.yml | 3 +- .../scenarios/gaps_hi/middle-compose.yml | 2 +- build_external/scenarios/gaps_lo/child-compose.yml | 1 - .../scenarios/only-agent/docker-compose.yml | 4 +- claim/README.md | 299 +- claim/claim.c | 8 +- claim/claim.h | 2 +- claim/netdata-claim.sh.in | 2 +- cli/README.md | 2 +- cli/cli.c | 4 +- cli/cli.h | 2 +- collectors/COLLECTORS.md | 4 +- collectors/all.h | 161 +- collectors/apps.plugin/apps_groups.conf | 24 + collectors/apps.plugin/apps_plugin.c | 112 +- collectors/cgroups.plugin/cgroup-name.sh.in | 33 +- collectors/cgroups.plugin/cgroup-network-helper.sh | 17 +- collectors/cgroups.plugin/sys_fs_cgroup.c | 184 +- collectors/cgroups.plugin/sys_fs_cgroup.h | 34 +- collectors/charts.d.plugin/ap/ap.chart.sh | 14 +- .../charts.d.plugin/apcupsd/apcupsd.chart.sh | 22 +- collectors/charts.d.plugin/charts.d.conf | 1 + collectors/charts.d.plugin/charts.d.plugin.in | 2 +- .../charts.d.plugin/example/example.chart.sh | 6 +- .../charts.d.plugin/libreswan/libreswan.chart.sh | 6 +- collectors/charts.d.plugin/nut/nut.chart.sh | 2 +- .../charts.d.plugin/opensips/opensips.chart.sh | 42 +- collectors/charts.d.plugin/sensors/README.md | 35 +- .../charts.d.plugin/sensors/sensors.chart.sh | 16 +- collectors/checks.plugin/plugin_checks.h | 2 +- collectors/cups.plugin/cups_plugin.c | 2 +- collectors/diskspace.plugin/plugin_diskspace.c | 33 +- collectors/diskspace.plugin/plugin_diskspace.h | 2 +- collectors/ebpf.plugin/Makefile.am | 20 +- collectors/ebpf.plugin/README.md | 722 +- collectors/ebpf.plugin/ebpf.c | 1002 +- collectors/ebpf.plugin/ebpf.d.conf | 44 +- collectors/ebpf.plugin/ebpf.d/cachestat.conf | 21 +- collectors/ebpf.plugin/ebpf.d/dcstat.conf | 18 +- collectors/ebpf.plugin/ebpf.d/disk.conf | 9 + collectors/ebpf.plugin/ebpf.d/fd.conf | 19 + collectors/ebpf.plugin/ebpf.d/filesystem.conf | 20 + collectors/ebpf.plugin/ebpf.d/hardirq.conf | 8 + collectors/ebpf.plugin/ebpf.d/mdflush.conf | 7 + collectors/ebpf.plugin/ebpf.d/mount.conf | 8 + collectors/ebpf.plugin/ebpf.d/network.conf | 14 +- collectors/ebpf.plugin/ebpf.d/oomkill.conf | 7 + collectors/ebpf.plugin/ebpf.d/process.conf | 21 +- collectors/ebpf.plugin/ebpf.d/shm.conf | 24 + collectors/ebpf.plugin/ebpf.d/softirq.conf | 8 + collectors/ebpf.plugin/ebpf.d/swap.conf | 17 + collectors/ebpf.plugin/ebpf.d/sync.conf | 16 +- collectors/ebpf.plugin/ebpf.d/vfs.conf | 17 + collectors/ebpf.plugin/ebpf.h | 67 +- collectors/ebpf.plugin/ebpf_apps.c | 57 +- collectors/ebpf.plugin/ebpf_apps.h | 54 +- collectors/ebpf.plugin/ebpf_cachestat.c | 483 +- collectors/ebpf.plugin/ebpf_cachestat.h | 19 +- collectors/ebpf.plugin/ebpf_cgroup.c | 348 + collectors/ebpf.plugin/ebpf_cgroup.h | 70 + collectors/ebpf.plugin/ebpf_dcstat.c | 495 +- collectors/ebpf.plugin/ebpf_dcstat.h | 15 +- collectors/ebpf.plugin/ebpf_disk.c | 842 + collectors/ebpf.plugin/ebpf_disk.h | 78 + collectors/ebpf.plugin/ebpf_fd.c | 865 + collectors/ebpf.plugin/ebpf_fd.h | 85 + collectors/ebpf.plugin/ebpf_filesystem.c | 661 + collectors/ebpf.plugin/ebpf_filesystem.h | 68 + collectors/ebpf.plugin/ebpf_hardirq.c | 494 + collectors/ebpf.plugin/ebpf_hardirq.h | 73 + collectors/ebpf.plugin/ebpf_mdflush.c | 312 + collectors/ebpf.plugin/ebpf_mdflush.h | 42 + collectors/ebpf.plugin/ebpf_mount.c | 260 + collectors/ebpf.plugin/ebpf_mount.h | 36 + collectors/ebpf.plugin/ebpf_oomkill.c | 400 + collectors/ebpf.plugin/ebpf_oomkill.h | 29 + collectors/ebpf.plugin/ebpf_process.c | 1127 +- collectors/ebpf.plugin/ebpf_process.h | 115 +- collectors/ebpf.plugin/ebpf_shm.c | 855 + collectors/ebpf.plugin/ebpf_shm.h | 63 + collectors/ebpf.plugin/ebpf_socket.c | 777 +- collectors/ebpf.plugin/ebpf_socket.h | 38 +- collectors/ebpf.plugin/ebpf_softirq.c | 273 + collectors/ebpf.plugin/ebpf_softirq.h | 34 + collectors/ebpf.plugin/ebpf_swap.c | 698 + collectors/ebpf.plugin/ebpf_swap.h | 53 + collectors/ebpf.plugin/ebpf_sync.c | 75 +- collectors/ebpf.plugin/ebpf_sync.h | 6 +- collectors/ebpf.plugin/ebpf_vfs.c | 1601 ++ collectors/ebpf.plugin/ebpf_vfs.h | 151 + collectors/ebpf.plugin/reset_netdata_trace.sh.in | 9 - collectors/freebsd.plugin/freebsd_getifaddrs.c | 2 +- collectors/freebsd.plugin/freebsd_kstat_zfs.c | 10 +- collectors/freebsd.plugin/freebsd_sysctl.c | 82 +- collectors/freebsd.plugin/plugin_freebsd.c | 4 +- collectors/freeipmi.plugin/README.md | 5 + collectors/freeipmi.plugin/freeipmi_plugin.c | 2 +- collectors/idlejitter.plugin/plugin_idlejitter.h | 2 +- collectors/macos.plugin/macos_fw.c | 30 +- collectors/macos.plugin/macos_mach_smi.c | 8 +- collectors/macos.plugin/macos_sysctl.c | 62 +- collectors/macos.plugin/plugin_macos.c | 4 +- collectors/macos.plugin/plugin_macos.h | 2 +- collectors/nfacct.plugin/plugin_nfacct.c | 2 +- collectors/node.d.plugin/named/named.node.js | 4 +- collectors/perf.plugin/perf_plugin.c | 76 +- collectors/plugins.d/plugins_d.h | 2 +- collectors/plugins.d/pluginsd_parser.c | 50 + collectors/plugins.d/pluginsd_parser.h | 5 +- collectors/proc.plugin/README.md | 2 +- collectors/proc.plugin/plugin_proc.h | 2 +- collectors/proc.plugin/proc_interrupts.c | 2 +- collectors/proc.plugin/proc_mdstat.c | 6 +- collectors/proc.plugin/proc_net_dev.c | 2 +- collectors/proc.plugin/proc_net_rpc_nfsd.c | 196 +- collectors/proc.plugin/proc_net_stat_synproxy.c | 41 +- collectors/proc.plugin/proc_pagetypeinfo.c | 2 +- collectors/proc.plugin/proc_spl_kstat_zfs.c | 2 +- collectors/proc.plugin/sys_class_infiniband.c | 2 +- collectors/proc.plugin/zfs_common.h | 2 +- collectors/python.d.plugin/anomalies/README.md | 4 +- .../python.d.plugin/anomalies/anomalies.chart.py | 4 +- .../python.d.plugin/anomalies/anomalies.conf | 4 +- collectors/python.d.plugin/changefinder/README.md | 12 +- .../elasticsearch/elasticsearch.chart.py | 38 +- .../python.d.plugin/go_expvar/go_expvar.chart.py | 2 +- collectors/python.d.plugin/httpcheck/README.md | 2 +- .../python.d.plugin/mongodb/mongodb.chart.py | 4 +- collectors/python.d.plugin/mysql/README.md | 2 +- collectors/python.d.plugin/mysql/mysql.chart.py | 4 +- collectors/python.d.plugin/postgres/README.md | 19 + .../python.d.plugin/postgres/postgres.chart.py | 245 +- collectors/python.d.plugin/postgres/postgres.conf | 9 +- collectors/python.d.plugin/python.d.plugin.in | 43 +- .../python.d.plugin/smartd_log/smartd_log.chart.py | 2 +- .../python.d.plugin/varnish/varnish.chart.py | 2 +- collectors/python.d.plugin/zscores/README.md | 12 +- .../python.d.plugin/zscores/zscores.chart.py | 4 +- collectors/python.d.plugin/zscores/zscores.conf | 6 +- collectors/slabinfo.plugin/slabinfo.c | 9 +- collectors/statsd.plugin/README.md | 6 +- collectors/statsd.plugin/statsd.c | 23 +- collectors/statsd.plugin/statsd.h | 2 +- collectors/tc.plugin/plugin_tc.h | 2 +- collectors/timex.plugin/plugin_timex.h | 2 +- collectors/xenstat.plugin/xenstat_plugin.c | 2 +- configure.ac | 462 +- contrib/debian/control | 3 +- contrib/debian/control.xenial | 62 + contrib/debian/netdata.postinst | 2 +- contrib/debian/rules | 17 +- daemon/README.md | 2 +- daemon/analytics.c | 202 +- daemon/analytics.h | 12 +- daemon/anonymous-statistics.sh.in | 22 +- daemon/buildinfo.c | 134 +- daemon/commands.c | 21 +- daemon/commands.h | 1 + daemon/common.c | 2 + daemon/common.h | 14 +- daemon/daemon.c | 5 +- daemon/global_statistics.c | 2 +- daemon/main.c | 94 +- daemon/service.c | 38 + daemon/service.h | 19 + daemon/system-info.sh | 2 + daemon/unit_test.c | 81 +- daemon/unit_test.h | 1 + database/engine/datafile.c | 4 +- database/engine/journalfile.c | 11 +- database/engine/metadata_log/logfile.c | 2 +- database/engine/metadata_log/metalogpluginsd.h | 6 +- database/engine/pagecache.c | 11 +- database/engine/rrdengine.c | 3 +- database/engine/rrdengine.h | 2 +- database/engine/rrdengineapi.c | 21 +- database/engine/rrdengineapi.h | 2 +- database/engine/rrdenginelib.h | 8 +- database/rrd.h | 69 +- database/rrdcalc.c | 4 + database/rrdcalc.h | 38 + database/rrdcalctemplate.c | 4 +- database/rrdcalctemplate.h | 1 + database/rrddim.c | 41 +- database/rrdhost.c | 127 +- database/rrdset.c | 96 +- database/sqlite/sqlite3.c | 17145 ++++++++++++------- database/sqlite/sqlite3.h | 257 +- database/sqlite/sqlite_aclk.c | 820 + database/sqlite/sqlite_aclk.h | 232 + database/sqlite/sqlite_aclk_alert.c | 885 + database/sqlite/sqlite_aclk_alert.h | 20 + database/sqlite/sqlite_aclk_chart.c | 993 ++ database/sqlite/sqlite_aclk_chart.h | 37 + database/sqlite/sqlite_aclk_node.c | 65 + database/sqlite/sqlite_aclk_node.h | 8 + database/sqlite/sqlite_functions.c | 546 +- database/sqlite/sqlite_functions.h | 37 +- database/sqlite/sqlite_health.c | 944 + database/sqlite/sqlite_health.h | 17 + docs/Demo-Sites.md | 2 +- docs/Running-behind-lighttpd.md | 2 +- docs/agent-cloud.md | 6 +- docs/anonymous-statistics.md | 10 +- docs/configure/nodes.md | 7 +- docs/configure/secure-nodes.md | 2 +- docs/contributing/style-guide.md | 4 +- docs/dashboard/customize.mdx | 4 +- docs/dashboard/dimensions-contexts-families.mdx | 4 +- docs/dashboard/how-dashboard-works.mdx | 2 +- docs/dashboard/import-export-print-snapshot.mdx | 8 +- docs/dashboard/interact-charts.mdx | 9 +- docs/dashboard/select-timeframes.mdx | 85 - .../visualization-date-and-time-controls.mdx | 121 + docs/get-started.mdx | 2 +- docs/getting-started.md | 4 +- docs/guides/deploy/ansible.md | 18 +- docs/guides/monitor-cockroachdb.md | 2 +- docs/guides/monitor/anomaly-detection.md | 4 +- docs/guides/monitor/kubernetes-k8s-netdata.md | 8 +- docs/guides/monitor/lamp-stack.md | 2 +- docs/guides/monitor/statsd.md | 7 +- docs/guides/python-collector.md | 12 +- docs/guides/step-by-step/step-00.md | 1 - docs/guides/step-by-step/step-03.md | 4 +- docs/guides/step-by-step/step-05.md | 7 + .../monitor-debug-applications-ebpf.md | 4 +- docs/guides/using-host-labels.md | 2 +- .../enable-streaming.mdx | 4 +- docs/monitor/enable-notifications.md | 19 +- docs/monitor/view-active-alarms.md | 7 +- docs/netdata-security.md | 8 +- docs/overview/what-is-netdata.md | 6 +- docs/quickstart/infrastructure.md | 4 +- docs/visualize/interact-dashboards-charts.md | 4 + exporting/README.md | 2 + exporting/WALKTHROUGH.md | 12 +- exporting/check_filters.c | 2 + exporting/clean_connectors.c | 4 + exporting/exporting.conf | 5 + exporting/exporting_engine.c | 16 + exporting/exporting_engine.h | 4 + exporting/graphite/README.md | 7 +- exporting/graphite/graphite.c | 2 + exporting/init_connectors.c | 73 +- exporting/json/README.md | 7 +- exporting/json/json.c | 2 + exporting/mongodb/mongodb.c | 2 + exporting/opentsdb/README.md | 7 +- exporting/opentsdb/opentsdb.c | 2 + exporting/process_data.c | 2 + exporting/prometheus/README.md | 70 +- exporting/prometheus/prometheus.c | 8 +- exporting/prometheus/remote_write/README.md | 7 + exporting/prometheus/remote_write/remote_write.c | 2 + exporting/read_config.c | 4 + exporting/tests/exporting_doubles.c | 2 + exporting/tests/exporting_fixtures.c | 2 + exporting/tests/test_exporting_engine.c | 24 +- health/Makefile.am | 19 +- health/REFERENCE.md | 60 +- health/health.c | 73 +- health/health.d/adaptec_raid.conf | 8 +- health/health.d/am2320.conf | 15 - health/health.d/anomalies.conf | 8 +- health/health.d/apache.conf | 17 - health/health.d/apcupsd.conf | 12 +- health/health.d/backend.conf | 12 +- health/health.d/bcache.conf | 8 +- health/health.d/beanstalkd.conf | 4 +- health/health.d/bind_rndc.conf | 4 +- health/health.d/boinc.conf | 16 +- health/health.d/btrfs.conf | 16 +- health/health.d/ceph.conf | 4 +- health/health.d/cgroups.conf | 8 +- health/health.d/cockroachdb.conf | 72 +- health/health.d/couchdb.conf | 16 - health/health.d/cpu.conf | 16 +- health/health.d/dbengine.conf | 16 +- health/health.d/disks.conf | 26 +- health/health.d/dns_query.conf | 4 +- health/health.d/dnsmasq_dhcp.conf | 4 +- health/health.d/dockerd.conf | 4 +- health/health.d/elasticsearch.conf | 15 - health/health.d/entropy.conf | 4 +- health/health.d/exporting.conf | 29 +- health/health.d/fping.conf | 16 +- health/health.d/fronius.conf | 4 +- health/health.d/gearman.conf | 20 +- health/health.d/geth.conf | 12 + health/health.d/go.d.plugin.conf | 17 + health/health.d/haproxy.conf | 21 +- health/health.d/hdfs.conf | 37 +- health/health.d/httpcheck.conf | 46 +- health/health.d/ioping.conf | 4 +- health/health.d/ipc.conf | 8 +- health/health.d/ipfs.conf | 4 +- health/health.d/ipmi.conf | 8 +- health/health.d/kubelet.conf | 36 +- health/health.d/lighttpd.conf | 17 - health/health.d/linux_power_supply.conf | 4 +- health/health.d/load.conf | 16 +- health/health.d/mdstat.conf | 16 +- health/health.d/megacli.conf | 20 +- health/health.d/memcached.conf | 29 +- health/health.d/memory.conf | 12 +- health/health.d/mongodb.conf | 16 - health/health.d/mysql.conf | 62 +- health/health.d/named.conf | 17 - health/health.d/net.conf | 60 +- health/health.d/netfilter.conf | 4 +- health/health.d/nginx.conf | 17 - health/health.d/nginx_plus.conf | 17 - health/health.d/phpfpm.conf | 17 - health/health.d/pihole.conf | 49 +- health/health.d/portcheck.conf | 26 +- health/health.d/postgres.conf | 16 - health/health.d/processes.conf | 4 +- health/health.d/pulsar.conf | 16 - health/health.d/python.d.plugin.conf | 17 + health/health.d/ram.conf | 48 +- health/health.d/redis.conf | 24 +- health/health.d/retroshare.conf | 19 +- health/health.d/riakkv.conf | 38 +- health/health.d/scaleio.conf | 24 +- health/health.d/softnet.conf | 12 +- health/health.d/squid.conf | 17 - health/health.d/stiebeleltron.conf | 4 +- health/health.d/swap.conf | 10 +- health/health.d/systemdunits.conf | 40 +- health/health.d/tcp_conn.conf | 4 +- health/health.d/tcp_listen.conf | 16 +- health/health.d/tcp_mem.conf | 4 +- health/health.d/tcp_orphans.conf | 4 +- health/health.d/tcp_resets.conf | 16 +- health/health.d/timex.conf | 17 + health/health.d/udp_errors.conf | 8 +- health/health.d/unbound.conf | 24 +- health/health.d/varnish.conf | 12 - health/health.d/vcsa.conf | 48 +- health/health.d/vernemq.conf | 120 +- health/health.d/vsphere.conf | 44 +- health/health.d/web_log.conf | 135 +- health/health.d/whoisquery.conf | 21 +- health/health.d/wmi.conf | 50 +- health/health.d/x509check.conf | 25 +- health/health.d/zfs.conf | 12 +- health/health.d/zookeeper.conf | 17 - health/health.h | 6 +- health/health_config.c | 153 +- health/health_json.c | 18 + health/health_log.c | 64 +- health/notifications/alarm-notify.sh.in | 888 +- health/notifications/custom/README.md | 6 +- health/notifications/email/README.md | 18 +- health/notifications/health_alarm_notify.conf | 4 +- health/notifications/syslog/README.md | 2 +- libnetdata/config/appconfig.c | 26 + libnetdata/config/appconfig.h | 1 + libnetdata/ebpf/ebpf.c | 392 +- libnetdata/ebpf/ebpf.h | 76 +- libnetdata/json/jsmn.c | 2 +- libnetdata/libnetdata.h | 7 + libnetdata/log/log.h | 2 +- libnetdata/procfile/procfile.c | 4 +- libnetdata/socket/security.c | 1 + libnetdata/storage_number/storage_number.c | 18 +- libnetdata/storage_number/storage_number.h | 17 +- .../storage_number/tests/test_storage_number.c | 2 +- ml/BitBufferCounter.cc | 29 + ml/BitBufferCounter.h | 54 + ml/BitRateWindow.cc | 75 + ml/BitRateWindow.h | 170 + ml/Config.cc | 128 + ml/Config.h | 45 + ml/Database.cc | 127 + ml/Database.h | 131 + ml/Dimension.cc | 169 + ml/Dimension.h | 124 + ml/Host.cc | 458 + ml/Host.h | 104 + ml/Makefile.am | 8 + ml/Query.h | 49 + ml/Tests.cc | 301 + ml/kmeans/KMeans.cc | 55 + ml/kmeans/KMeans.h | 34 + ml/kmeans/Makefile.am | 4 + ml/kmeans/SamplesBuffer.cc | 144 + ml/kmeans/SamplesBuffer.h | 140 + ml/kmeans/Tests.cc | 143 + ml/ml-dummy.c | 38 + ml/ml-private.h | 26 + ml/ml.cc | 153 + ml/ml.h | 41 + netdata-installer.sh | 330 +- netdata.spec.in | 45 +- packaging/Dockerfile.packager | 43 - packaging/bundle-ebpf.sh | 18 +- packaging/bundle-libbpf.sh | 16 +- packaging/bundle-lws.sh | 2 +- packaging/current_libbpf.checksums | 1 + packaging/current_libbpf.version | 1 + packaging/docker/Dockerfile | 14 +- packaging/docker/README.md | 3 + packaging/docker/run.sh | 17 + packaging/ebpf.checksums | 6 +- packaging/ebpf.version | 2 +- packaging/go.d.checksums | 32 +- packaging/go.d.version | 2 +- packaging/installer/README.md | 12 +- packaging/installer/dependencies/alpine.sh | 0 packaging/installer/dependencies/arch.sh | 0 packaging/installer/dependencies/centos.sh | 0 packaging/installer/dependencies/clearlinux.sh | 0 packaging/installer/dependencies/debian.sh | 0 packaging/installer/dependencies/freebsd.sh | 0 packaging/installer/dependencies/gentoo.sh | 0 packaging/installer/dependencies/macos.sh | 0 packaging/installer/dependencies/ol.sh | 0 packaging/installer/dependencies/opensuse.sh | 0 packaging/installer/dependencies/rhel.sh | 0 packaging/installer/dependencies/sabayon.sh | 0 packaging/installer/dependencies/ubuntu.sh | 0 packaging/installer/functions.sh | 9 +- packaging/installer/install-required-packages.sh | 128 +- packaging/installer/kickstart-ng.sh | 1362 ++ packaging/installer/kickstart-static64.sh | 48 +- packaging/installer/kickstart.sh | 248 +- packaging/installer/methods/kickstart-64.md | 12 +- packaging/installer/methods/kickstart.md | 64 +- packaging/installer/methods/kubernetes.md | 27 +- packaging/installer/methods/macos.md | 17 +- packaging/installer/methods/manual.md | 6 +- packaging/installer/methods/packages.md | 2 - packaging/installer/methods/pfsense.md | 28 +- packaging/installer/methods/source.md | 4 +- packaging/installer/methods/synology.md | 10 +- packaging/installer/netdata-updater.sh | 50 +- packaging/libbpf.checksums | 2 +- packaging/libbpf.version | 2 +- packaging/libbpf_0_0_9.checksums | 1 + packaging/libbpf_0_0_9.version | 1 + packaging/makeself/build-static.sh | 69 + packaging/makeself/build-x86_64-static.sh | 45 +- packaging/makeself/build.sh | 9 + packaging/makeself/functions.sh | 14 +- packaging/makeself/install-alpine-packages.sh | 50 +- packaging/makeself/install-or-update.sh | 6 +- .../jobs/10-prepare-destination.install.sh | 10 +- packaging/makeself/jobs/20-openssl.install.sh | 11 +- packaging/makeself/jobs/50-bash-5.0.install.sh | 33 - packaging/makeself/jobs/50-bash-5.1.8.install.sh | 41 + packaging/makeself/jobs/50-curl-7.73.0.install.sh | 36 - packaging/makeself/jobs/50-curl-7.78.0.install.sh | 54 + packaging/makeself/jobs/50-fping-5.0.install.sh | 11 +- packaging/makeself/jobs/50-ioping-1.2.install.sh | 11 +- packaging/makeself/jobs/70-netdata-git.install.sh | 17 +- packaging/makeself/jobs/99-makeself.install.sh | 19 +- packaging/makeself/makeself.sh | 2 +- packaging/makeself/openssl.version | 2 +- packaging/protobuf.checksums | 1 + packaging/protobuf.version | 1 + packaging/repoconfig/Makefile | 31 + packaging/repoconfig/build-deb.sh | 32 + packaging/repoconfig/build-rpm.sh | 26 + packaging/repoconfig/debian/changelog | 6 + packaging/repoconfig/debian/compat | 1 + packaging/repoconfig/debian/control | 19 + packaging/repoconfig/debian/copyright | 10 + packaging/repoconfig/debian/rules | 21 + packaging/repoconfig/debian/source/format | 1 + packaging/repoconfig/netdata-edge.repo.centos | 19 + packaging/repoconfig/netdata-edge.repo.fedora | 19 + packaging/repoconfig/netdata-edge.repo.suse | 19 + packaging/repoconfig/netdata-repo.spec | 89 + packaging/repoconfig/netdata.list.in | 2 + packaging/repoconfig/netdata.repo.centos | 19 + packaging/repoconfig/netdata.repo.fedora | 19 + packaging/repoconfig/netdata.repo.suse | 19 + packaging/scripts/install.sh | 83 - packaging/scripts/test.sh | 52 - packaging/version | 2 +- parser/parser.c | 2 + parser/parser.h | 6 +- registry/registry.c | 2 +- registry/registry.h | 2 +- registry/registry_db.c | 2 +- registry/registry_init.c | 2 +- registry/registry_internals.c | 2 +- registry/registry_log.c | 2 +- registry/registry_machine.c | 2 +- registry/registry_person.c | 2 +- registry/registry_url.c | 2 +- spawn/spawn.c | 2 +- spawn/spawn.h | 2 +- spawn/spawn_client.c | 4 +- streaming/README.md | 10 +- streaming/receiver.c | 18 +- streaming/rrdpush.c | 37 +- streaming/rrdpush.h | 12 +- streaming/sender.c | 7 +- system/.install-type | 1 + system/Makefile.am | 4 + system/netdata.service.in | 1 - tests/ebpf/README.md | 1 + tests/ebpf/sync_tester.c | 120 + tests/profile/benchmark-procfile-parser.c | 4 +- web/api/formatters/rrd2json.c | 3 - web/api/netdata-swagger.json | 4 +- web/api/netdata-swagger.yaml | 8 +- web/api/queries/query.c | 20 +- web/api/queries/rrdr.h | 3 +- web/api/tests/valid_urls.c | 10 +- web/api/tests/web_api.c | 8 +- web/api/web_api_v1.c | 151 +- web/gui/bundle_dashboard.py | 24 +- web/gui/custom/README.md | 11 +- web/gui/dashboard/Makefile.am | 62 +- web/gui/dashboard/asset-manifest.json | 70 +- web/gui/dashboard/css/dashboard.css | 2 +- web/gui/dashboard/css/dashboard.slate.css | 2 +- web/gui/dashboard/dashboard.css | 2 +- web/gui/dashboard/dashboard.html | 8 +- web/gui/dashboard/dashboard.js | 2 +- web/gui/dashboard/dashboard.slate.css | 2 +- web/gui/dashboard/demo.html | 2 +- web/gui/dashboard/demo2.html | 2 +- web/gui/dashboard/demosites.html | 2 +- web/gui/dashboard/demosites2.html | 2 +- web/gui/dashboard/index-node-view.html | 2 +- web/gui/dashboard/index.html | 4 +- web/gui/dashboard/infographic.html | 2 +- web/gui/dashboard/lib/jquery-2.2.4.min.js | 5 - web/gui/dashboard/lib/jquery-3.6.0.min.js | 2 + ...he-manifest.14a1e41ead8f8b6e26e356372042ef5a.js | 190 + ...he-manifest.1a96c027aec7f2d07341fa69aa6b82fa.js | 190 - web/gui/dashboard/service-worker.js | 2 +- web/gui/dashboard/static/css/2.20fd0a40.chunk.css | 15 + .../dashboard/static/css/2.20fd0a40.chunk.css.map | 1 + web/gui/dashboard/static/css/2.6b842ba1.chunk.css | 15 - .../dashboard/static/css/2.6b842ba1.chunk.css.map | 1 - .../dashboard/static/css/main.a46a34fa.chunk.css | 2 + .../static/css/main.a46a34fa.chunk.css.map | 1 + .../dashboard/static/css/main.d931154a.chunk.css | 2 - .../static/css/main.d931154a.chunk.css.map | 1 - web/gui/dashboard/static/js/10.a23c74b9.chunk.js | 2 + .../dashboard/static/js/10.a23c74b9.chunk.js.map | 1 + web/gui/dashboard/static/js/10.db7e8e19.chunk.js | 2 - .../dashboard/static/js/10.db7e8e19.chunk.js.map | 1 - web/gui/dashboard/static/js/2.252b3a57.chunk.js | 3 - .../static/js/2.252b3a57.chunk.js.LICENSE | 236 - .../dashboard/static/js/2.252b3a57.chunk.js.map | 1 - web/gui/dashboard/static/js/2.3456bb26.chunk.js | 3 + .../static/js/2.3456bb26.chunk.js.LICENSE | 251 + .../dashboard/static/js/2.3456bb26.chunk.js.map | 1 + web/gui/dashboard/static/js/3.5ef4adcd.chunk.js | 2 + .../dashboard/static/js/3.5ef4adcd.chunk.js.map | 1 + web/gui/dashboard/static/js/3.99238dcb.chunk.js | 2 - .../dashboard/static/js/3.99238dcb.chunk.js.map | 1 - web/gui/dashboard/static/js/4.1621c1ad.chunk.js | 2 + .../dashboard/static/js/4.1621c1ad.chunk.js.map | 1 + web/gui/dashboard/static/js/4.6ef9bdcb.chunk.js | 2 - .../dashboard/static/js/4.6ef9bdcb.chunk.js.map | 1 - web/gui/dashboard/static/js/5.05b274a5.chunk.js | 3 + .../static/js/5.05b274a5.chunk.js.LICENSE | 3 + .../dashboard/static/js/5.05b274a5.chunk.js.map | 1 + web/gui/dashboard/static/js/5.96a698ab.chunk.js | 3 - .../static/js/5.96a698ab.chunk.js.LICENSE | 3 - .../dashboard/static/js/5.96a698ab.chunk.js.map | 1 - web/gui/dashboard/static/js/6.299c0acd.chunk.js | 2 + .../dashboard/static/js/6.299c0acd.chunk.js.map | 1 + web/gui/dashboard/static/js/6.d9713eb9.chunk.js | 2 - .../dashboard/static/js/6.d9713eb9.chunk.js.map | 1 - web/gui/dashboard/static/js/7.12e939e5.chunk.js | 2 - .../dashboard/static/js/7.12e939e5.chunk.js.map | 1 - web/gui/dashboard/static/js/7.850d6c32.chunk.js | 2 + .../dashboard/static/js/7.850d6c32.chunk.js.map | 1 + web/gui/dashboard/static/js/8.91852cf4.chunk.js | 2 - .../dashboard/static/js/8.91852cf4.chunk.js.map | 1 - web/gui/dashboard/static/js/8.fb328a3a.chunk.js | 2 + .../dashboard/static/js/8.fb328a3a.chunk.js.map | 1 + web/gui/dashboard/static/js/9.e3a9ce26.chunk.js | 2 - .../dashboard/static/js/9.e3a9ce26.chunk.js.map | 1 - web/gui/dashboard/static/js/9.f8eb4edd.chunk.js | 2 + .../dashboard/static/js/9.f8eb4edd.chunk.js.map | 1 + web/gui/dashboard/static/js/main.8aa70c75.chunk.js | 3 - .../static/js/main.8aa70c75.chunk.js.LICENSE | 8 - .../dashboard/static/js/main.8aa70c75.chunk.js.map | 1 - web/gui/dashboard/static/js/main.fa83a16e.chunk.js | 3 + .../static/js/main.fa83a16e.chunk.js.LICENSE | 8 + .../dashboard/static/js/main.fa83a16e.chunk.js.map | 1 + .../dashboard/static/js/runtime-main.6c7b39cd.js | 2 - .../static/js/runtime-main.6c7b39cd.js.map | 1 - .../dashboard/static/js/runtime-main.ea592e2f.js | 2 + .../static/js/runtime-main.ea592e2f.js.map | 1 + web/gui/dashboard/tv-react.html | 2 +- web/gui/dashboard/tv.html | 2 +- web/gui/dashboard_info.js | 3240 +++- web/gui/main.js | 8 + web/gui/src/dashboard.js/dependencies.js | 2 +- web/gui/static/img/netdata-logomark.svg | 4 +- web/server/web_client.c | 2 +- web/server/web_client_cache.c | 2 +- 711 files changed, 49090 insertions(+), 14360 deletions(-) create mode 100644 .github/data/build-matrix.json delete mode 100755 .github/scripts/build-static-x86_64.sh create mode 100755 .github/scripts/build-static.sh create mode 100755 .github/scripts/docker-test.sh create mode 100755 .github/scripts/pkg-test.sh create mode 100755 .github/scripts/run-updater-check.sh delete mode 100644 .github/workflows/build-and-install.yml create mode 100644 .github/workflows/build.yml create mode 100644 .github/workflows/repoconfig-packages.yml delete mode 100644 .github/workflows/updater.yml create mode 100644 .travis/current_build_status create mode 100755 .travis/trigger_artifact_build.sh create mode 100644 aclk/aclk_alarm_api.c create mode 100644 aclk/aclk_alarm_api.h create mode 100644 aclk/aclk_api.c create mode 100644 aclk/aclk_api.h create mode 100644 aclk/aclk_charts_api.c create mode 100644 aclk/aclk_charts_api.h create mode 100644 aclk/aclk_proxy.c create mode 100644 aclk/aclk_proxy.h create mode 100644 aclk/aclk_rrdhost_state.h delete mode 100644 aclk/legacy/aclk_rrdhost_state.h create mode 100644 aclk/schema-wrappers/alarm_config.cc create mode 100644 aclk/schema-wrappers/alarm_config.h create mode 100644 aclk/schema-wrappers/alarm_stream.cc create mode 100644 aclk/schema-wrappers/alarm_stream.h create mode 100644 aclk/schema-wrappers/chart_config.cc create mode 100644 aclk/schema-wrappers/chart_config.h create mode 100644 aclk/schema-wrappers/chart_stream.cc create mode 100644 aclk/schema-wrappers/chart_stream.h create mode 100644 aclk/schema-wrappers/connection.cc create mode 100644 aclk/schema-wrappers/connection.h create mode 100644 aclk/schema-wrappers/node_connection.cc create mode 100644 aclk/schema-wrappers/node_connection.h create mode 100644 aclk/schema-wrappers/node_creation.cc create mode 100644 aclk/schema-wrappers/node_creation.h create mode 100644 aclk/schema-wrappers/node_info.cc create mode 100644 aclk/schema-wrappers/node_info.h create mode 100644 aclk/schema-wrappers/schema_wrapper_utils.cc create mode 100644 aclk/schema-wrappers/schema_wrapper_utils.h create mode 100644 aclk/schema-wrappers/schema_wrappers.h create mode 100644 collectors/ebpf.plugin/ebpf.d/disk.conf create mode 100644 collectors/ebpf.plugin/ebpf.d/fd.conf create mode 100644 collectors/ebpf.plugin/ebpf.d/filesystem.conf create mode 100644 collectors/ebpf.plugin/ebpf.d/hardirq.conf create mode 100644 collectors/ebpf.plugin/ebpf.d/mdflush.conf create mode 100644 collectors/ebpf.plugin/ebpf.d/mount.conf create mode 100644 collectors/ebpf.plugin/ebpf.d/oomkill.conf create mode 100644 collectors/ebpf.plugin/ebpf.d/shm.conf create mode 100644 collectors/ebpf.plugin/ebpf.d/softirq.conf create mode 100644 collectors/ebpf.plugin/ebpf.d/swap.conf create mode 100644 collectors/ebpf.plugin/ebpf.d/vfs.conf create mode 100644 collectors/ebpf.plugin/ebpf_cgroup.c create mode 100644 collectors/ebpf.plugin/ebpf_cgroup.h create mode 100644 collectors/ebpf.plugin/ebpf_disk.c create mode 100644 collectors/ebpf.plugin/ebpf_disk.h create mode 100644 collectors/ebpf.plugin/ebpf_fd.c create mode 100644 collectors/ebpf.plugin/ebpf_fd.h create mode 100644 collectors/ebpf.plugin/ebpf_filesystem.c create mode 100644 collectors/ebpf.plugin/ebpf_filesystem.h create mode 100644 collectors/ebpf.plugin/ebpf_hardirq.c create mode 100644 collectors/ebpf.plugin/ebpf_hardirq.h create mode 100644 collectors/ebpf.plugin/ebpf_mdflush.c create mode 100644 collectors/ebpf.plugin/ebpf_mdflush.h create mode 100644 collectors/ebpf.plugin/ebpf_mount.c create mode 100644 collectors/ebpf.plugin/ebpf_mount.h create mode 100644 collectors/ebpf.plugin/ebpf_oomkill.c create mode 100644 collectors/ebpf.plugin/ebpf_oomkill.h create mode 100644 collectors/ebpf.plugin/ebpf_shm.c create mode 100644 collectors/ebpf.plugin/ebpf_shm.h create mode 100644 collectors/ebpf.plugin/ebpf_softirq.c create mode 100644 collectors/ebpf.plugin/ebpf_softirq.h create mode 100644 collectors/ebpf.plugin/ebpf_swap.c create mode 100644 collectors/ebpf.plugin/ebpf_swap.h create mode 100644 collectors/ebpf.plugin/ebpf_vfs.c create mode 100644 collectors/ebpf.plugin/ebpf_vfs.h delete mode 100644 collectors/ebpf.plugin/reset_netdata_trace.sh.in create mode 100644 contrib/debian/control.xenial create mode 100644 daemon/service.c create mode 100644 daemon/service.h create mode 100644 database/sqlite/sqlite_aclk.c create mode 100644 database/sqlite/sqlite_aclk.h create mode 100644 database/sqlite/sqlite_aclk_alert.c create mode 100644 database/sqlite/sqlite_aclk_alert.h create mode 100644 database/sqlite/sqlite_aclk_chart.c create mode 100644 database/sqlite/sqlite_aclk_chart.h create mode 100644 database/sqlite/sqlite_aclk_node.c create mode 100644 database/sqlite/sqlite_aclk_node.h create mode 100644 database/sqlite/sqlite_health.c create mode 100644 database/sqlite/sqlite_health.h delete mode 100644 docs/dashboard/select-timeframes.mdx create mode 100644 docs/dashboard/visualization-date-and-time-controls.mdx delete mode 100644 health/health.d/am2320.conf delete mode 100644 health/health.d/apache.conf delete mode 100644 health/health.d/couchdb.conf delete mode 100644 health/health.d/elasticsearch.conf create mode 100644 health/health.d/geth.conf create mode 100644 health/health.d/go.d.plugin.conf delete mode 100644 health/health.d/lighttpd.conf delete mode 100644 health/health.d/mongodb.conf delete mode 100644 health/health.d/named.conf delete mode 100644 health/health.d/nginx.conf delete mode 100644 health/health.d/nginx_plus.conf delete mode 100644 health/health.d/phpfpm.conf delete mode 100644 health/health.d/postgres.conf delete mode 100644 health/health.d/pulsar.conf create mode 100644 health/health.d/python.d.plugin.conf delete mode 100644 health/health.d/squid.conf create mode 100644 health/health.d/timex.conf delete mode 100644 health/health.d/varnish.conf delete mode 100644 health/health.d/zookeeper.conf create mode 100644 ml/BitBufferCounter.cc create mode 100644 ml/BitBufferCounter.h create mode 100644 ml/BitRateWindow.cc create mode 100644 ml/BitRateWindow.h create mode 100644 ml/Config.cc create mode 100644 ml/Config.h create mode 100644 ml/Database.cc create mode 100644 ml/Database.h create mode 100644 ml/Dimension.cc create mode 100644 ml/Dimension.h create mode 100644 ml/Host.cc create mode 100644 ml/Host.h create mode 100644 ml/Makefile.am create mode 100644 ml/Query.h create mode 100644 ml/Tests.cc create mode 100644 ml/kmeans/KMeans.cc create mode 100644 ml/kmeans/KMeans.h create mode 100644 ml/kmeans/Makefile.am create mode 100644 ml/kmeans/SamplesBuffer.cc create mode 100644 ml/kmeans/SamplesBuffer.h create mode 100644 ml/kmeans/Tests.cc create mode 100644 ml/ml-dummy.c create mode 100644 ml/ml-private.h create mode 100644 ml/ml.cc create mode 100644 ml/ml.h delete mode 100644 packaging/Dockerfile.packager create mode 100644 packaging/current_libbpf.checksums create mode 100644 packaging/current_libbpf.version create mode 100755 packaging/installer/dependencies/alpine.sh create mode 100755 packaging/installer/dependencies/arch.sh create mode 100755 packaging/installer/dependencies/centos.sh create mode 100755 packaging/installer/dependencies/clearlinux.sh create mode 100755 packaging/installer/dependencies/debian.sh create mode 100755 packaging/installer/dependencies/freebsd.sh create mode 100755 packaging/installer/dependencies/gentoo.sh create mode 100755 packaging/installer/dependencies/macos.sh create mode 100755 packaging/installer/dependencies/ol.sh create mode 100755 packaging/installer/dependencies/opensuse.sh create mode 100755 packaging/installer/dependencies/rhel.sh create mode 100755 packaging/installer/dependencies/sabayon.sh create mode 100755 packaging/installer/dependencies/ubuntu.sh create mode 100644 packaging/installer/kickstart-ng.sh create mode 100644 packaging/libbpf_0_0_9.checksums create mode 100644 packaging/libbpf_0_0_9.version create mode 100755 packaging/makeself/build-static.sh delete mode 100755 packaging/makeself/jobs/50-bash-5.0.install.sh create mode 100755 packaging/makeself/jobs/50-bash-5.1.8.install.sh delete mode 100755 packaging/makeself/jobs/50-curl-7.73.0.install.sh create mode 100755 packaging/makeself/jobs/50-curl-7.78.0.install.sh create mode 100644 packaging/protobuf.checksums create mode 100644 packaging/protobuf.version create mode 100644 packaging/repoconfig/Makefile create mode 100755 packaging/repoconfig/build-deb.sh create mode 100755 packaging/repoconfig/build-rpm.sh create mode 100644 packaging/repoconfig/debian/changelog create mode 100644 packaging/repoconfig/debian/compat create mode 100644 packaging/repoconfig/debian/control create mode 100644 packaging/repoconfig/debian/copyright create mode 100755 packaging/repoconfig/debian/rules create mode 100644 packaging/repoconfig/debian/source/format create mode 100644 packaging/repoconfig/netdata-edge.repo.centos create mode 100644 packaging/repoconfig/netdata-edge.repo.fedora create mode 100644 packaging/repoconfig/netdata-edge.repo.suse create mode 100644 packaging/repoconfig/netdata-repo.spec create mode 100644 packaging/repoconfig/netdata.list.in create mode 100644 packaging/repoconfig/netdata.repo.centos create mode 100644 packaging/repoconfig/netdata.repo.fedora create mode 100644 packaging/repoconfig/netdata.repo.suse delete mode 100755 packaging/scripts/install.sh delete mode 100755 packaging/scripts/test.sh create mode 100644 system/.install-type create mode 100644 tests/ebpf/README.md create mode 100644 tests/ebpf/sync_tester.c delete mode 100644 web/gui/dashboard/lib/jquery-2.2.4.min.js create mode 100644 web/gui/dashboard/lib/jquery-3.6.0.min.js create mode 100644 web/gui/dashboard/precache-manifest.14a1e41ead8f8b6e26e356372042ef5a.js delete mode 100644 web/gui/dashboard/precache-manifest.1a96c027aec7f2d07341fa69aa6b82fa.js create mode 100644 web/gui/dashboard/static/css/2.20fd0a40.chunk.css create mode 100644 web/gui/dashboard/static/css/2.20fd0a40.chunk.css.map delete mode 100644 web/gui/dashboard/static/css/2.6b842ba1.chunk.css delete mode 100644 web/gui/dashboard/static/css/2.6b842ba1.chunk.css.map create mode 100644 web/gui/dashboard/static/css/main.a46a34fa.chunk.css create mode 100644 web/gui/dashboard/static/css/main.a46a34fa.chunk.css.map delete mode 100644 web/gui/dashboard/static/css/main.d931154a.chunk.css delete mode 100644 web/gui/dashboard/static/css/main.d931154a.chunk.css.map create mode 100644 web/gui/dashboard/static/js/10.a23c74b9.chunk.js create mode 100644 web/gui/dashboard/static/js/10.a23c74b9.chunk.js.map delete mode 100644 web/gui/dashboard/static/js/10.db7e8e19.chunk.js delete mode 100644 web/gui/dashboard/static/js/10.db7e8e19.chunk.js.map delete mode 100644 web/gui/dashboard/static/js/2.252b3a57.chunk.js delete mode 100644 web/gui/dashboard/static/js/2.252b3a57.chunk.js.LICENSE delete mode 100644 web/gui/dashboard/static/js/2.252b3a57.chunk.js.map create mode 100644 web/gui/dashboard/static/js/2.3456bb26.chunk.js create mode 100644 web/gui/dashboard/static/js/2.3456bb26.chunk.js.LICENSE create mode 100644 web/gui/dashboard/static/js/2.3456bb26.chunk.js.map create mode 100644 web/gui/dashboard/static/js/3.5ef4adcd.chunk.js create mode 100644 web/gui/dashboard/static/js/3.5ef4adcd.chunk.js.map delete mode 100644 web/gui/dashboard/static/js/3.99238dcb.chunk.js delete mode 100644 web/gui/dashboard/static/js/3.99238dcb.chunk.js.map create mode 100644 web/gui/dashboard/static/js/4.1621c1ad.chunk.js create mode 100644 web/gui/dashboard/static/js/4.1621c1ad.chunk.js.map delete mode 100644 web/gui/dashboard/static/js/4.6ef9bdcb.chunk.js delete mode 100644 web/gui/dashboard/static/js/4.6ef9bdcb.chunk.js.map create mode 100644 web/gui/dashboard/static/js/5.05b274a5.chunk.js create mode 100644 web/gui/dashboard/static/js/5.05b274a5.chunk.js.LICENSE create mode 100644 web/gui/dashboard/static/js/5.05b274a5.chunk.js.map delete mode 100644 web/gui/dashboard/static/js/5.96a698ab.chunk.js delete mode 100644 web/gui/dashboard/static/js/5.96a698ab.chunk.js.LICENSE delete mode 100644 web/gui/dashboard/static/js/5.96a698ab.chunk.js.map create mode 100644 web/gui/dashboard/static/js/6.299c0acd.chunk.js create mode 100644 web/gui/dashboard/static/js/6.299c0acd.chunk.js.map delete mode 100644 web/gui/dashboard/static/js/6.d9713eb9.chunk.js delete mode 100644 web/gui/dashboard/static/js/6.d9713eb9.chunk.js.map delete mode 100644 web/gui/dashboard/static/js/7.12e939e5.chunk.js delete mode 100644 web/gui/dashboard/static/js/7.12e939e5.chunk.js.map create mode 100644 web/gui/dashboard/static/js/7.850d6c32.chunk.js create mode 100644 web/gui/dashboard/static/js/7.850d6c32.chunk.js.map delete mode 100644 web/gui/dashboard/static/js/8.91852cf4.chunk.js delete mode 100644 web/gui/dashboard/static/js/8.91852cf4.chunk.js.map create mode 100644 web/gui/dashboard/static/js/8.fb328a3a.chunk.js create mode 100644 web/gui/dashboard/static/js/8.fb328a3a.chunk.js.map delete mode 100644 web/gui/dashboard/static/js/9.e3a9ce26.chunk.js delete mode 100644 web/gui/dashboard/static/js/9.e3a9ce26.chunk.js.map create mode 100644 web/gui/dashboard/static/js/9.f8eb4edd.chunk.js create mode 100644 web/gui/dashboard/static/js/9.f8eb4edd.chunk.js.map delete mode 100644 web/gui/dashboard/static/js/main.8aa70c75.chunk.js delete mode 100644 web/gui/dashboard/static/js/main.8aa70c75.chunk.js.LICENSE delete mode 100644 web/gui/dashboard/static/js/main.8aa70c75.chunk.js.map create mode 100644 web/gui/dashboard/static/js/main.fa83a16e.chunk.js create mode 100644 web/gui/dashboard/static/js/main.fa83a16e.chunk.js.LICENSE create mode 100644 web/gui/dashboard/static/js/main.fa83a16e.chunk.js.map delete mode 100644 web/gui/dashboard/static/js/runtime-main.6c7b39cd.js delete mode 100644 web/gui/dashboard/static/js/runtime-main.6c7b39cd.js.map create mode 100644 web/gui/dashboard/static/js/runtime-main.ea592e2f.js create mode 100644 web/gui/dashboard/static/js/runtime-main.ea592e2f.js.map diff --git a/.codeclimate.yml b/.codeclimate.yml index 8a11c84a6..59fb3e795 100644 --- a/.codeclimate.yml +++ b/.codeclimate.yml @@ -43,11 +43,11 @@ plugins: enabled: false config: languages: - - javascript: - mass_threshold: 100 - - python: - python_version: 3 - mass_threshold: 100 + - javascript: + mass_threshold: 100 + - python: + python_version: 3 + mass_threshold: 100 checks: Similar code: enabled: false @@ -96,4 +96,3 @@ exclude_patterns: - "collectors/node.d.plugin/node_modules/extend.js" - "collectors/node.d.plugin/node_modules/pixl-xml.js" - "collectors/node.d.plugin/node_modules/net-snmp.js" - diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 185ececd2..50aa6797f 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -2,20 +2,20 @@ # This way we prevent modifications which will be overwriten by automation. # Global (default) code owner -* @ktsaou @Ferroin +* @Ferroin # Ownership by directory structure -.travis/ @Ferroin @kaskavel @vkalintiris -.github/ @Ferroin @kaskavel @vkalintiris +.travis/ @Ferroin @iigorkarpov @maneamarius @kaskavel @vkalintiris +.github/ @Ferroin @iigorkarpov @maneamarius @kaskavel @vkalintiris aclk/ @stelfrag @underhood backends/ @thiagoftsm @vlvkobal backends/graphite/ @thiagoftsm @vlvkobal backends/json/ @thiagoftsm @vlvkobal backends/opentsdb/ @thiagoftsm @vlvkobal backends/prometheus/ @vlvkobal @thiagoftsm -build/ @Ferroin -contrib/debian @Ferroin @vkalintiris -collectors/ @vlvkobal @mfundul +build/ @Ferroin @iigorkarpov @maneamarius +contrib/debian @Ferroin @iigorkarpov @maneamarius @vkalintiris +collectors/ @vlvkobal collectors/charts.d.plugin/ @ilyam8 @Ferroin collectors/freebsd.plugin/ @vlvkobal @thiagoftsm collectors/ebpf.plugin/ @thiagoftsm @vlvkobal @@ -27,40 +27,41 @@ collectors/node.d.plugin/stiebeleltron/ @ccremer collectors/python.d.plugin/ @ilyam8 collectors/cups.plugin/ @simonnagl @vlvkobal @thiagoftsm exporting/ @vlvkobal @thiagoftsm -daemon/ @thiagoftsm @mfundul -database/ @mfundul @thiagoftsm -docs/ @joelhans -health/ @thiagoftsm @vlvkobal +daemon/ @thiagoftsm +database/ @thiagoftsm @vkalintiris +docs/ @DShreve2 @kickoke +health/ @thiagoftsm @vlvkobal @vkalintiris health/health.d/ @thiagoftsm @vlvkobal health/notifications/ @Ferroin @thiagoftsm -libnetdata/ @thiagofsm @mfundul -packaging/ @Ferroin @vkalintiris +ml/ @siamaktavakoli @vkalintiris +libnetdata/ @thiagofsm +packaging/ @Ferroin @iigorkarpov @maneamarius @vkalintiris registry/ @jacekkolasa streaming/ @thiagoftsm @vlvkobal -system/ @Ferroin @vkalintiris -tests/ @Ferroin @kaskavel -web/ @thiagoftsm @mfundul @vlvkobal +system/ @Ferroin @iigorkarpov @maneamarius @vkalintiris +tests/ @Ferroin @iigorkarpov @maneamarius @kaskavel @vkalintiris +web/ @thiagoftsm @vlvkobal @vkalintiris web/gui/ @jacekkolasa # Ownership by filetype (overwrites ownership by directory) -*.am @Ferroin -*.md @joelhans -Dockerfile* @Ferroin @knatsakis +*.am @Ferroin @iigorkarpov @maneamarius @vkalintiris +*.md @DShreve2 @kickoke +Dockerfile* @Ferroin @iigorkarpov @maneamarius @vkalintiris # Ownership of specific files -.gitignore @Ferroin @knatsakis -.travis.yml @Ferroin @knatsakis @kaskavel @vkalintiris -.lgtm.yml @Ferroin @knatsakis -.eslintrc @Ferroin @knatsakis -.eslintignore @Ferroin @knatsakis -.csslintrc @Ferroin @knatsakis -.codeclimate.yml @Ferroin @knatsakis -.codacy.yml @Ferroin @knatsakis -.yamllint.yml @Ferroin @knatsakis -netdata.spec.in @Ferroin @knatsakis @vkalintiris -netdata-installer.sh @Ferroin @knatsakis @vkalintiris -package.json @jacekkolasa @Ferroin @knatsakis -packaging/version @netdatabot @Ferroin @knatsakis +.gitignore @Ferroin @iigorkarpov @maneamarius @vkalintiris +.travis.yml @Ferroin @iigorkarpov @maneamarius @kaskavel @vkalintiris +.lgtm.yml @Ferroin @iigorkarpov @maneamarius @vkalintiris +.eslintrc @Ferroin @iigorkarpov @maneamarius @vkalintiris +.eslintignore @Ferroin @iigorkarpov @maneamarius @vkalintiris +.csslintrc @Ferroin @iigorkarpov @maneamarius @vkalintiris +.codeclimate.yml @Ferroin @iigorkarpov @maneamarius @vkalintiris +.codacy.yml @Ferroin @iigorkarpov @maneamarius @vkalintiris +.yamllint.yml @Ferroin @iigorkarpov @maneamarius @vkalintiris +netdata.spec.in @Ferroin @iigorkarpov @maneamarius @vkalintiris +netdata-installer.sh @Ferroin @iigorkarpov @maneamarius @vkalintiris +package.json @jacekkolasa @Ferroin @iigorkarpov @maneamarius @vkalintiris +packaging/version @netdatabot @Ferroin @iigorkarpov @maneamarius @vkalintiris -LICENSE.md @joelhans @Ferroin -CHANGELOG.md @netdatabot @Ferroin +LICENSE.md @DShreve2 @Ferroin @vkalintiris +CHANGELOG.md @netdatabot @Ferroin @iigorkarpov @maneamarius @vkalintiris diff --git a/.github/data/build-matrix.json b/.github/data/build-matrix.json new file mode 100644 index 000000000..b066311f3 --- /dev/null +++ b/.github/data/build-matrix.json @@ -0,0 +1,114 @@ +{ + "include": [ + { + "distro": "alpine:edge", + "artifact_key": "alpine-edge", + "pre": "apk add -U bash", + "rmjsonc": "apk del json-c-dev" + }, + { + "distro": "alpine:3.14", + "artifact_key": "alpine-3.14", + "pre": "apk add -U bash", + "rmjsonc": "apk del json-c-dev" + }, + { + "distro": "alpine:3.13", + "artifact_key": "alpine-3.13", + "pre": "apk add -U bash", + "rmjsonc": "apk del json-c-dev" + }, + { + "distro": "alpine:3.12", + "artifact_key": "alpine-3.12", + "pre": "apk add -U bash", + "rmjsonc": "apk del json-c-dev" + }, + { + "distro": "archlinux:latest", + "artifact_key": "archlinux", + "pre": "pacman --noconfirm -Syu && pacman --noconfirm -Sy grep libffi" + }, + { + "distro": "centos:7", + "artifact_key": "centos7" + }, + { + "distro": "centos:8", + "artifact_key": "centos8", + "rmjsonc": "dnf remove -y json-c-devel" + }, + { + "distro": "debian:11", + "artifact_key": "debian11", + "pre": "apt-get update", + "rmjsonc": "apt-get remove -y libjson-c-dev" + }, + { + "distro": "debian:10", + "artifact_key": "debian10", + "pre": "apt-get update", + "rmjsonc": "apt-get remove -y libjson-c-dev" + }, + { + "distro": "debian:9", + "artifact_key": "debian9", + "pre": "apt-get update", + "rmjsonc": "apt-get remove -y libjson-c-dev" + }, + { + "distro": "fedora:35", + "artifact_key": "fedora35", + "rmjsonc": "dnf remove -y json-c-devel" + }, + { + "distro": "fedora:34", + "artifact_key": "fedora34", + "rmjsonc": "dnf remove -y json-c-devel" + }, + { + "distro": "fedora:33", + "artifact_key": "fedora33", + "rmjsonc": "dnf remove -y json-c-devel" + }, + { + "distro": "opensuse/leap:15.3", + "artifact_key": "opensuse15.3", + "rmjsonc": "zypper rm -y libjson-c-devel" + }, + { + "distro": "opensuse/leap:15.2", + "artifact_key": "opensuse15.2", + "rmjsonc": "zypper rm -y libjson-c-devel" + }, + { + "distro": "oraclelinux:8", + "artifact_key": "oraclelinux8", + "rmjsonc": "dnf remove -y json-c-devel" + }, + { + "distro": "ubuntu:21.10", + "artifact_key": "ubuntu21.10", + "pre": "rm -f /etc/apt/apt.conf.d/docker && apt-get update", + "rmjsonc": "apt-get remove -y libjson-c-dev" + }, + { + "distro": "ubuntu:21.04", + "artifact_key": "ubuntu21.04", + "pre": "apt-get update", + "rmjsonc": "apt-get remove -y libjson-c-dev" + }, + { + "distro": "ubuntu:20.04", + "artifact_key": "ubuntu20.04", + "pre": "apt-get update", + "rmjsonc": "apt-get remove -y libjson-c-dev" + }, + { + "distro": "ubuntu:18.04", + "artifact_key": "ubuntu18.04", + "pre": "apt-get update", + "rmjsonc": "apt-get remove -y libjson-c-dev" + } + ] +} diff --git a/.github/dockerfiles/Dockerfile.build_test b/.github/dockerfiles/Dockerfile.build_test index 1dc3e303d..aca89c09b 100644 --- a/.github/dockerfiles/Dockerfile.build_test +++ b/.github/dockerfiles/Dockerfile.build_test @@ -4,8 +4,15 @@ FROM ${BASE} ARG PRE ENV PRE=${PRE} +ARG RMJSONC +ENV RMJSONC=${RMJSONC} +ENV DO_NOT_TRACK=1 +ENV GITHUB_ACTIONS=true + +RUN echo "${PRE}" > /prep-cmd.sh && \ + echo "${RMJSONC}" > /rmjsonc.sh && chmod +x /rmjsonc.sh && \ + /bin/sh /prep-cmd.sh COPY . /netdata -RUN /bin/sh /netdata/prep-cmd.sh RUN /netdata/packaging/installer/install-required-packages.sh --dont-wait --non-interactive netdata-all diff --git a/.github/labeler.yml b/.github/labeler.yml index 544b420c6..a1082a2a8 100644 --- a/.github/labeler.yml +++ b/.github/labeler.yml @@ -13,6 +13,7 @@ ACLK: - aclk/* - aclk/**/* + - mqtt_websockets area/backends: - backends/* @@ -52,6 +53,7 @@ area/database: area/docs: - "**/*.md" + - "**/*.mdx" - diagrams/* - diagrams/**/* @@ -69,6 +71,10 @@ area/health: - health/* - health/**/* +area/ml: + - ml/* + - ml/**/* + area/packaging: - contrib/* - contrib/**/* diff --git a/.github/scripts/build-static-x86_64.sh b/.github/scripts/build-static-x86_64.sh deleted file mode 100755 index 2676b6321..000000000 --- a/.github/scripts/build-static-x86_64.sh +++ /dev/null @@ -1,58 +0,0 @@ -#!/bin/sh -# -# Builds the netdata-vX.Y.Z-xxxx.gz.run (static x86_64) artifact. - -set -e - -# shellcheck source=.github/scripts/functions.sh -. "$(dirname "$0")/functions.sh" - -NAME="${NAME:-netdata}" -VERSION="${VERSION:-"$(git describe)"}" -BASENAME="$NAME-$VERSION" - -prepare_build() { - progress "Preparing build" - ( - test -d artifacts || mkdir -p artifacts - ) >&2 -} - -build_static_x86_64() { - progress "Building static x86_64" - ( - USER="" ./packaging/makeself/build-x86_64-static.sh - ) >&2 -} - -prepare_assets() { - progress "Preparing assets" - ( - cp packaging/version artifacts/latest-version.txt - - cd artifacts || exit 1 - ln -s "${BASENAME}.gz.run" netdata-latest.gz.run - sha256sum -b ./* > "sha256sums.txt" - ) >&2 -} - -steps="prepare_build build_static_x86_64" -steps="$steps prepare_assets" - -_main() { - for step in $steps; do - if ! run "$step"; then - if [ -t 1 ]; then - debug - else - fail "Build failed" - fi - fi - done - - echo "🎉 All Done!" -} - -if [ -n "$0" ] && [ x"$0" != x"-bash" ]; then - _main "$@" -fi diff --git a/.github/scripts/build-static.sh b/.github/scripts/build-static.sh new file mode 100755 index 000000000..e81051438 --- /dev/null +++ b/.github/scripts/build-static.sh @@ -0,0 +1,61 @@ +#!/bin/sh +# +# Builds the netdata-vX.Y.Z-xxxx.gz.run (static x86_64) artifact. + +set -e + +# shellcheck source=.github/scripts/functions.sh +. "$(dirname "$0")/functions.sh" + +BUILDARCH="${1}" +NAME="${NAME:-netdata}" +VERSION="${VERSION:-"$(git describe)"}" +BASENAME="$NAME-$BUILDARCH-$VERSION" + +prepare_build() { + progress "Preparing build" + ( + test -d artifacts || mkdir -p artifacts + ) >&2 +} + +build_static() { + progress "Building static ${BUILDARCH}" + ( + USER="" ./packaging/makeself/build-static.sh "${BUILDARCH}" + ) >&2 +} + +prepare_assets() { + progress "Preparing assets" + ( + cp packaging/version artifacts/latest-version.txt + + cd artifacts || exit 1 + ln -s "${BASENAME}.gz.run" "netdata-${BUILDARCH}-latest.gz.run" + if [ "${BUILDARCH}" = "x86_64" ]; then + ln -s "${BASENAME}.gz.run" netdata-latest.gz.run + fi + ) >&2 +} + +steps="prepare_build build_static" +steps="$steps prepare_assets" + +_main() { + for step in $steps; do + if ! run "$step"; then + if [ -t 1 ]; then + debug + else + fail "Build failed" + fi + fi + done + + echo "🎉 All Done!" +} + +if [ -n "$0" ] && [ x"$0" != x"-bash" ]; then + _main "$@" +fi diff --git a/.github/scripts/check-updater.sh b/.github/scripts/check-updater.sh index 3ef4857f9..1051f1eee 100755 --- a/.github/scripts/check-updater.sh +++ b/.github/scripts/check-updater.sh @@ -4,20 +4,20 @@ set -e # shellcheck source=.github/scripts/functions.sh . "$(dirname "$0")/functions.sh" -check_successfull_update() { +check_successful_update() { progress "Check netdata version after update" ( netdata_version=$(netdata -v | awk '{print $2}') updater_version=$(cat packaging/version) if [ "$netdata_version" = "$updater_version" ]; then - echo "Update successfull!" + echo "Update successful!" else exit 1 fi ) >&2 } -steps="check_successfull_update" +steps="check_successful_update" _main() { for step in $steps; do diff --git a/.github/scripts/docker-test.sh b/.github/scripts/docker-test.sh new file mode 100755 index 000000000..795711b1a --- /dev/null +++ b/.github/scripts/docker-test.sh @@ -0,0 +1,58 @@ +#!/bin/sh + +export DEBIAN_FRONTEND=noninteractive + +wait_for() { + host="${1}" + port="${2}" + name="${3}" + timeout="30" + + if command -v nc > /dev/null ; then + netcat="nc" + elif command -v netcat > /dev/null ; then + netcat="netcat" + else + printf "Unable to find a usable netcat command.\n" + return 1 + fi + + printf "Waiting for %s on %s:%s ... " "${name}" "${host}" "${port}" + + sleep 30 + + i=0 + while ! ${netcat} -z "${host}" "${port}"; do + sleep 1 + if [ "$i" -gt "$timeout" ]; then + printf "Timed out!\n" + return 1 + fi + i="$((i + 1))" + done + printf "OK\n" +} + +apt-get update && apt-get upgrade -y && apt get install -y netcat + +docker run -d --name=netdata \ + -p 19999:19999 \ + -v netdataconfig:/etc/netdata \ + -v netdatalib:/var/lib/netdata \ + -v netdatacache:/var/cache/netdata \ + -v /etc/passwd:/host/etc/passwd:ro \ + -v /etc/group:/host/etc/group:ro \ + -v /proc:/host/proc:ro \ + -v /sys:/host/sys:ro \ + -v /etc/os-release:/host/etc/os-release:ro \ + --cap-add SYS_PTRACE \ + --security-opt apparmor=unconfined \ + netdata/netdata:test + +wait_for localhost 19999 netdata || exit 1 + +curl -sS http://127.0.0.1:19999/api/v1/info > ./response || exit 1 + +cat ./response + +jq '.version' ./response || exit 1 diff --git a/.github/scripts/pkg-test.sh b/.github/scripts/pkg-test.sh new file mode 100755 index 000000000..196fefa99 --- /dev/null +++ b/.github/scripts/pkg-test.sh @@ -0,0 +1,128 @@ +#!/bin/sh + +install_debian_like() { + # This is needed to ensure package installs don't prompt for any user input. + export DEBIAN_FRONTEND=noninteractive + + apt-get update + + # Install Netdata + apt-get install -y /netdata/artifacts/netdata_"${VERSION}"_*.deb || exit 1 + + # Install testing tools + apt-get install -y --no-install-recommends curl netcat jq || exit 1 +} + +install_fedora_like() { + # Using a glob pattern here because I can't reliably determine what the + # resulting package name will be (TODO: There must be a better way!) + + PKGMGR="$( (command -v dnf > /dev/null && echo "dnf") || echo "yum")" + + pkg_version="$(echo "${VERSION}" | tr - .)" + + # Install Netdata + "$PKGMGR" install -y /netdata/artifacts/netdata-"${pkg_version}"-*.rpm + + # Install testing tools + "$PKGMGR" install -y curl nc jq || exit 1 +} + +install_centos() { + # Using a glob pattern here because I can't reliably determine what the + # resulting package name will be (TODO: There must be a better way!) + + PKGMGR="$( (command -v dnf > /dev/null && echo "dnf") || echo "yum")" + + pkg_version="$(echo "${VERSION}" | tr - .)" + + # Install EPEL (needed for `jq` + "$PKGMGR" install -y epel-release || exit 1 + + # Install Netdata + "$PKGMGR" install -y /netdata/artifacts/netdata-"${pkg_version}"-*.rpm + + # Install testing tools + "$PKGMGR" install -y curl nc jq || exit 1 +} + +install_suse_like() { + # Using a glob pattern here because I can't reliably determine what the + # resulting package name will be (TODO: There must be a better way!) + + pkg_version="$(echo "${VERSION}" | tr - .)" + + # Install Netdata + zypper install -y --allow-unsigned-rpm /netdata/artifacts/netdata-"${pkg_version}"-*.rpm + + # Install testing tools + zypper install -y --no-recommends curl netcat-openbsd jq || exit 1 +} + +dump_log() { + cat ./netdata.log +} + +wait_for() { + host="${1}" + port="${2}" + name="${3}" + timeout="30" + + if command -v nc > /dev/null ; then + netcat="nc" + elif command -v netcat > /dev/null ; then + netcat="netcat" + else + printf "Unable to find a usable netcat command.\n" + return 1 + fi + + printf "Waiting for %s on %s:%s ... " "${name}" "${host}" "${port}" + + sleep 30 + + i=0 + while ! ${netcat} -z "${host}" "${port}"; do + sleep 1 + if [ "$i" -gt "$timeout" ]; then + printf "Timed out!\n" + return 1 + fi + i="$((i + 1))" + done + printf "OK\n" +} + +case "${DISTRO}" in + debian | ubuntu) + install_debian_like + ;; + fedora | oraclelinux) + install_fedora_like + ;; + centos) + install_centos + ;; + opensuse) + install_suse_like + ;; + *) + printf "ERROR: unsupported distro: %s_%s\n" "${DISTRO}" "${DISTRO_VERSION}" + exit 1 + ;; +esac + +trap dump_log EXIT + +/usr/sbin/netdata -D > ./netdata.log 2>&1 & + +wait_for localhost 19999 netdata || exit 1 + +curl -sS http://127.0.0.1:19999/api/v1/info > ./response || exit 1 + +cat ./response + +jq '.version' ./response || exit 1 + +trap - EXIT diff --git a/.github/scripts/run-updater-check.sh b/.github/scripts/run-updater-check.sh new file mode 100755 index 000000000..e6969a2d1 --- /dev/null +++ b/.github/scripts/run-updater-check.sh @@ -0,0 +1,9 @@ +#!/bin/sh + +echo ">>> Installing Netdata..." +/netdata/packaging/installer/kickstart.sh --dont-wait --disable-telemetry || exit 1 +echo ">>> Updating Netdata..." +export NETDATA_NIGHTLIES_BASEURL="http://localhost:8080/artifacts/" # Pull the tarball from the local web server. +/netdata/packaging/installer/netdata-updater.sh --not-running-from-cron --no-updater-self-update || exit 1 +echo ">>> Checking if update was successful..." +/netdata/.github/scripts/check-updater.sh || exit 1 diff --git a/.github/scripts/run_install_with_dist_file.sh b/.github/scripts/run_install_with_dist_file.sh index 9453dff35..18cf80927 100755 --- a/.github/scripts/run_install_with_dist_file.sh +++ b/.github/scripts/run_install_with_dist_file.sh @@ -18,7 +18,7 @@ shift printf >&2 "Opening dist archive %s ... " "${distfile}" tar -xovf "${distfile}" -distdir="$(echo "${distfile}" | cut -d. -f1,2,3)" +distdir="$(echo "${distfile}" | rev | cut -d. -f3- | rev)" cp -a packaging/installer/install-required-packages.sh "${distdir}/install-required-packages.sh" if [ ! -d "${distdir}" ]; then printf >&2 "ERROR: %s is not a directory" "${distdir}" @@ -29,6 +29,7 @@ printf >&2 "Entering %s and starting docker run ..." "${distdir}" pushd "${distdir}" || exit 1 docker run \ + -e DO_NOT_TRACK=1 \ -v "${PWD}:/netdata" \ -w /netdata \ "ubuntu:latest" \ diff --git a/.github/workflows/build-and-install.yml b/.github/workflows/build-and-install.yml deleted file mode 100644 index 251a65162..000000000 --- a/.github/workflows/build-and-install.yml +++ /dev/null @@ -1,215 +0,0 @@ ---- -name: Builder -on: - push: - branches: - - master - pull_request: -jobs: - static-build: - name: Build (x86_64) - runs-on: ubuntu-latest - steps: - - name: Git clone repository - uses: actions/checkout@v2 - with: - submodules: recursive - - run: | - git fetch --prune --unshallow --tags - - name: Build - run: | - .github/scripts/build-static-x86_64.sh - source-build: - name: Build & Install - strategy: - fail-fast: false - max-parallel: 8 - matrix: - distro: - - 'alpine:edge' - - 'alpine:3.13' - - 'alpine:3.12' - - 'alpine:3.11' - - 'alpine:3.10' - - 'archlinux:latest' - - 'centos:8' - - 'centos:7' - - 'clearlinux:latest' - - 'debian:10' - - 'debian:9' - - 'fedora:34' - - 'fedora:33' - - 'fedora:32' - - 'opensuse/leap:15.2' - - 'opensuse/tumbleweed:latest' - - 'ubuntu:21.04' - - 'ubuntu:20.10' - - 'ubuntu:20.04' - - 'ubuntu:18.04' - - 'ubuntu:16.04' - include: - - distro: 'alpine:edge' - pre: 'apk add -U bash' - rmjsonc: 'apk del json-c-dev' - - distro: 'alpine:3.13' - pre: 'apk add -U bash' - rmjsonc: 'apk del json-c-dev' - - distro: 'alpine:3.12' - pre: 'apk add -U bash' - rmjsonc: 'apk del json-c-dev' - - distro: 'alpine:3.11' - pre: 'apk add -U bash' - rmjsonc: 'apk del json-c-dev' - - distro: 'alpine:3.10' - pre: 'apk add -U bash' - rmjsonc: 'apk del json-c-dev' - - - distro: 'archlinux:latest' - pre: 'pacman --noconfirm -Syu && pacman --noconfirm -Sy grep libffi' - - - distro: 'centos:8' - rmjsonc: 'dnf remove -y json-c-devel' - - - distro: 'debian:10' - pre: 'apt-get update' - rmjsonc: 'apt-get remove -y libjson-c-dev' - - distro: 'debian:9' - pre: 'apt-get update' - rmjsonc: 'apt-get remove -y libjson-c-dev' - - - distro: 'fedora:34' - rmjsonc: 'dnf remove -y json-c-devel' - - distro: 'fedora:33' - rmjsonc: 'dnf remove -y json-c-devel' - - distro: 'fedora:32' - rmjsonc: 'dnf remove -y json-c-devel' - - - distro: 'opensuse/leap:15.2' - rmjsonc: 'zypper rm -y libjson-c-devel' - - distro: 'opensuse/tumbleweed:latest' - rmjsonc: 'zypper rm -y libjson-c-devel' - - - distro: 'ubuntu:21.04' - pre: 'apt-get update' - rmjsonc: 'apt-get remove -y libjson-c-dev' - - distro: 'ubuntu:20.10' - pre: 'apt-get update' - rmjsonc: 'apt-get remove -y libjson-c-dev' - - distro: 'ubuntu:20.04' - pre: 'apt-get update' - rmjsonc: 'apt-get remove -y libjson-c-dev' - - distro: 'ubuntu:18.04' - pre: 'apt-get update' - rmjsonc: 'apt-get remove -y libjson-c-dev' - - distro: 'ubuntu:16.04' - pre: 'apt-get update' - rmjsonc: 'apt-get remove -y libjson-c-dev' - runs-on: ubuntu-latest - steps: - - name: Git clone repository - uses: actions/checkout@v2 - with: - submodules: recursive - - name: install-required-packages.sh on ${{ matrix.distro }} - env: - PRE: ${{ matrix.pre }} - RMJSONC: ${{ matrix.rmjsonc }} - run: | - echo $PRE > ./prep-cmd.sh - echo $RMJSONC > ./rmjsonc.sh && chmod +x ./rmjsonc.sh - docker build . -f .github/dockerfiles/Dockerfile.build_test -t test --build-arg BASE=${{ matrix.distro }} - - name: Regular build on ${{ matrix.distro }} - run: | - docker run -w /netdata test /bin/sh -c 'autoreconf -ivf && ./configure && make -j2' - - name: netdata-installer on ${{ matrix.distro }}, disable cloud - run: | - docker run -w /netdata test /bin/sh -c './netdata-installer.sh --dont-wait --dont-start-it --disable-cloud' - - name: netdata-installer on ${{ matrix.distro }}, require cloud - run: | - docker run -w /netdata test /bin/sh -c './netdata-installer.sh --dont-wait --dont-start-it --require-cloud' - - name: netdata-installer on ${{ matrix.distro }}, require cloud, no JSON-C - if: matrix.rmjsonc != '' - run: | - docker run -w /netdata test \ - /bin/sh -c '/netdata/rmjsonc.sh && ./netdata-installer.sh --dont-wait --dont-start-it --require-cloud' - aws-kinesis-build: - name: With AWS Kinesis SDK - strategy: - fail-fast: false - matrix: - distro: - - 'centos:8' - - 'debian:buster' - - 'fedora:32' - - 'ubuntu:20.04' - include: - - distro: 'centos:8' - pre: >- - yum -y update && - yum -y groupinstall 'Development Tools' && - yum -y install libcurl-devel openssl-devel libuuid-devel - build_kinesis: >- - git clone --branch 1.8.186 --depth 1 https://github.com/aws/aws-sdk-cpp.git && - cmake -DCMAKE_INSTALL_PREFIX=/usr - -DBUILD_ONLY=kinesis - ./aws-sdk-cpp && - make && - make install - - distro: 'debian:buster' - pre: >- - apt-get update && - DEBIAN_FRONTEND=noninteractive apt-get install -y build-essential && - DEBIAN_FRONTEND=noninteractive apt-get install -y libcurl4-openssl-dev libssl-dev uuid-dev zlib1g-dev libpulse-dev - build_kinesis: >- - git clone --branch 1.8.186 --depth 1 https://github.com/aws/aws-sdk-cpp.git && - cmake -DCMAKE_INSTALL_PREFIX=/usr - -DBUILD_ONLY=kinesis - ./aws-sdk-cpp && - make && - make install - - distro: 'fedora:32' - pre: >- - dnf -y update && - dnf -y groupinstall 'Development Tools' && - dnf -y install libcurl-devel openssl-devel libuuid-devel - build_kinesis: >- - git clone --branch 1.8.186 --depth 1 https://github.com/aws/aws-sdk-cpp.git && - cmake -DCMAKE_INSTALL_PREFIX=/usr - -DBUILD_ONLY=kinesis - ./aws-sdk-cpp && - make && - make install - - distro: 'ubuntu:20.04' - pre: >- - apt-get update && - DEBIAN_FRONTEND=noninteractive apt-get install -y build-essential && - DEBIAN_FRONTEND=noninteractive apt-get install -y libcurl4-openssl-dev libssl-dev uuid-dev zlib1g-dev libpulse-dev - build_kinesis: >- - git clone --branch 1.8.186 --depth 1 https://github.com/aws/aws-sdk-cpp.git && - cmake -DCMAKE_INSTALL_PREFIX=/usr - -DBUILD_ONLY=kinesis - ./aws-sdk-cpp && - make && - make install - runs-on: ubuntu-latest - steps: - - name: Git clone repository - uses: actions/checkout@v2 - with: - submodules: recursive - - name: install-required-packages.sh on ${{ matrix.distro }} - env: - PRE: ${{ matrix.pre }} - BUILD_KINESIS: ${{ matrix.build_kinesis }} - run: | - echo $PRE > ./prep-cmd.sh - echo $BUILD_KINESIS > ./build-kinesis.sh && chmod +x ./build-kinesis.sh - docker build . -f .github/dockerfiles/Dockerfile.build_test -t test --build-arg BASE=${{ matrix.distro }} - - name: Build on ${{ matrix.distro }} - env: - RUNCMD: >- - ./build-kinesis.sh && - ./netdata-installer.sh --dont-wait --dont-start-it --enable-backend-kinesis - run: | - docker run -w /netdata test \ - /bin/sh -c "$RUNCMD" diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml new file mode 100644 index 000000000..2732a1851 --- /dev/null +++ b/.github/workflows/build.yml @@ -0,0 +1,538 @@ +--- +# Ci code for building release artifacts. +name: Build +on: + push: # Master branch checks only validate the build and generate artifacts for testing. + branches: + - master + pull_request: null # PR checks only validate the build and generate artifacts for testing. + workflow_dispatch: # Dispatch runs build and validate, then push to the appropriate storage location. + inputs: + type: + description: Build Type + default: nightly + required: true + version: + description: Version Tag + default: nightly + required: true +concurrency: # This keeps multiple instances of the job from running concurrently for the same ref and event type. + group: release-${{ github.ref }}-${{ github.event_name }} + cancel-in-progress: true +jobs: + build-dist: # Build the distribution tarball and store it as an artifact. + name: Build Distribution Tarball + runs-on: ubuntu-latest + outputs: + distfile: ${{ steps.build.outputs.distfile }} + steps: + - name: Checkout + uses: actions/checkout@v2 + with: + fetch-depth: 0 + submodules: recursive + - name: Mark Stable + if: github.event_name == 'workflow_dispatch' && github.event.inputs.type != 'nightly' + run: | + sed -i 's/^RELEASE_CHANNEL="nightly" *#/RELEASE_CHANNEL="stable" #/' netdata-installer.sh + - name: Build + id: build + run: | + mkdir -p artifacts + ./packaging/installer/install-required-packages.sh --dont-wait --non-interactive netdata + autoreconf -ivf + ./configure --prefix=/usr \ + --sysconfdir=/etc \ + --localstatedir=/var \ + --libexecdir=/usr/libexec \ + --with-zlib \ + --with-math \ + --with-user=netdata + make dist + echo "::set-output name=distfile::$(find . -name 'netdata-*.tar.gz')" + cp netdata-*.tar.gz artifacts/ + - name: Store + uses: actions/upload-artifact@v2 + with: + name: dist-tarball + path: artifacts/*.tar.gz + retention-days: 30 + - name: Failure Notification + uses: rtCamp/action-slack-notify@v2 + env: + SLACK_COLOR: 'danger' + SLACK_FOOTER: '' + SLACK_ICON_EMOJI: ':github-actions:' + SLACK_TITLE: 'Distribution tarball build failed:' + SLACK_USERNAME: 'GitHub Actions' + SLACK_MESSAGE: "Distribution tarball build failed." + SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK_URL }} + if: >- + ${{ + failure() + && startsWith(github.ref, 'refs/heads/master') + && github.event_name != 'pull_request' + }} + + build-static: # Build the static binary archives, and store them as artifacts. + name: Build Static + runs-on: ubuntu-latest + strategy: + matrix: + arch: + - 'x86_64' + - 'armv7l' + - 'aarch64' + - 'ppc64le' + steps: + - name: Checkout + uses: actions/checkout@v2 + with: + fetch-depth: 0 + submodules: recursive + - name: Mark Stable + if: github.event_name == 'workflow_dispatch' && github.event.inputs.type != 'nightly' + run: | + sed -i 's/^RELEASE_CHANNEL="nightly" *#/RELEASE_CHANNEL="stable" #/' netdata-installer.sh packaging/makeself/install-or-update.sh + - name: Build + run: .github/scripts/build-static.sh ${{ matrix.arch }} + - name: Store + uses: actions/upload-artifact@v2 + with: + name: static-archive + path: artifacts/*.gz.run + retention-days: 30 + - name: Failure Notification + uses: rtCamp/action-slack-notify@v2 + env: + SLACK_COLOR: 'danger' + SLACK_FOOTER: '' + SLACK_ICON_EMOJI: ':github-actions:' + SLACK_TITLE: 'Static build for ${{ matrix.arch }} failed:' + SLACK_USERNAME: 'GitHub Actions' + SLACK_MESSAGE: "Static build for ${{ matrix.arch }} failed." + SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK_URL }} + if: >- + ${{ + failure() + && startsWith(github.ref, 'refs/heads/master') + && github.event_name != 'pull_request' + }} + + matrix: # Generate the shared build matrix for our build tests. + name: Prepare Build Matrix + runs-on: ubuntu-latest + outputs: + matrix: ${{ steps.set-matrix.outputs.matrix }} + steps: + - name: Checkout + uses: actions/checkout@v2 + - name: Prepare tools + run: | + sudo apt-get update && sudo apt-get install -y jq + - name: Read build matrix + id: set-matrix + run: | + TASKS="$(jq -c . .github/data/build-matrix.json)" + echo "Generated Matrix: $TASKS" + echo "::set-output name=matrix::$TASKS" + + prepare-test-images: # Prepare the test environments for our build checks. This also checks dependency handling code for each tested environment. + name: Prepare Test Environments + runs-on: ubuntu-latest + needs: + - matrix + strategy: + # Unlike the actal build tests, this completes _very_ fast (average of about 3 minutes for each job), so we + # just run everything in parallel instead lof limiting job concurrency. + fail-fast: false + matrix: ${{ fromJson(needs.matrix.outputs.matrix) }} + steps: + - name: Git clone repository + uses: actions/checkout@v2 + - name: Setup Buildx + uses: docker/setup-buildx-action@v1 + - name: Build test environment + uses: docker/build-push-action@v2 + with: + push: false + load: false + file: .github/dockerfiles/Dockerfile.build_test + build-args: | + BASE=${{ matrix.distro }} + PRE=${{ matrix.pre }} + RMJSONC=${{ matrix.rmjsonc }} + outputs: type=oci,dest=/tmp/image.tar + tags: test:${{ matrix.artifact_key }} + - name: Upload image artifact + uses: actions/upload-artifact@v2 + with: + name: ${{ matrix.artifact_key }}-test-env + path: /tmp/image.tar + retention-days: 30 + - name: Failure Notification + uses: rtCamp/action-slack-notify@v2 + env: + SLACK_COLOR: 'danger' + SLACK_FOOTER: '' + SLACK_ICON_EMOJI: ':github-actions:' + SLACK_TITLE: 'Test environment preparation for ${{ matrix.distro }} failed:' + SLACK_USERNAME: 'GitHub Actions' + SLACK_MESSAGE: "Test environment preparation for ${{ matrix.distro }} failed." + SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK_URL }} + if: >- + ${{ + failure() + && startsWith(github.ref, 'refs/heads/master') + && github.event_name != 'pull_request' + }} + + source-build: # Test various source build arrangements. + name: Test Source Build + runs-on: ubuntu-latest + needs: + - matrix + - prepare-test-images + strategy: + fail-fast: false + max-parallel: 8 + matrix: ${{ fromJson(needs.matrix.outputs.matrix) }} + steps: + - name: Git clone repository + uses: actions/checkout@v2 + with: + submodules: recursive + - name: Fetch test environment + uses: actions/download-artifact@v2 + with: + name: ${{ matrix.artifact_key }}-test-env + - name: Load test environment + id: load + run: | + docker load --input image.tar | tee image-info.txt + echo "::set-output name=image::$(cut -d ':' -f 3 image-info.txt)" + - name: Regular build on ${{ matrix.distro }} + run: | + docker run --security-opt seccomp=unconfined -w /netdata sha256:${{ steps.load.outputs.image }} \ + /bin/sh -c 'autoreconf -ivf && ./configure && make -j2' + - name: netdata-installer on ${{ matrix.distro }}, disable cloud + run: | + docker run --security-opt seccomp=unconfined -w /netdata sha256:${{ steps.load.outputs.image }} \ + /bin/sh -c './netdata-installer.sh --dont-wait --dont-start-it --disable-cloud' + - name: netdata-installer on ${{ matrix.distro }}, require cloud + run: | + docker run --security-opt seccomp=unconfined -w /netdata sha256:${{ steps.load.outputs.image }} \ + /bin/sh -c './netdata-installer.sh --dont-wait --dont-start-it --require-cloud' + - name: netdata-installer on ${{ matrix.distro }}, require cloud, require ACLK-NG + run: | + docker run --security-opt seccomp=unconfined -w /netdata -e NETDATA_CONFIGURE_OPTIONS='--with-aclk-ng' \ + sha256:${{ steps.load.outputs.image }} /bin/sh -c './netdata-installer.sh --dont-wait --dont-start-it --require-cloud' + - name: netdata-installer on ${{ matrix.distro }}, require cloud, no JSON-C + if: matrix.rmjsonc != '' + run: | + docker run --security-opt seccomp=unconfined -w /netdata sha256:${{ steps.load.outputs.image }} \ + /bin/sh -c '/rmjsonc.sh && ./netdata-installer.sh --dont-wait --dont-start-it --require-cloud' + - name: Failure Notification + uses: rtCamp/action-slack-notify@v2 + env: + SLACK_COLOR: 'danger' + SLACK_FOOTER: '' + SLACK_ICON_EMOJI: ':github-actions:' + SLACK_TITLE: 'Build tests for ${{ matrix.distro }} failed:' + SLACK_USERNAME: 'GitHub Actions' + SLACK_MESSAGE: "Build tests for ${{ matrix.distro }} failed." + SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK_URL }} + if: >- + ${{ + failure() + && startsWith(github.ref, 'refs/heads/master') + && github.event_name != 'pull_request' + }} + + updater-check: # Test the generated dist archive using the updater code. + name: Test Generated Distfile and Updater Code + runs-on: ubuntu-latest + needs: + - build-dist + - matrix + - prepare-test-images + strategy: + fail-fast: false + max-parallel: 8 + matrix: ${{ fromJson(needs.matrix.outputs.matrix) }} + services: + apache: # This gets used to serve the dist tarball for the updater script. + image: httpd:2.4 + ports: + - 8080:80 + volumes: + - ${{ github.workspace }}:/usr/local/apache2/htdocs/ + steps: + - name: Checkout + uses: actions/checkout@v2 + - name: Fetch dist tarball artifacts + uses: actions/download-artifact@v2 + with: + name: dist-tarball + path: dist-tarball + - name: Prepare artifact directory + run: | + mkdir -p artifacts || exit 1 + echo "9999.0.0-0" > artifacts/latest-version.txt || exit 1 + cp dist-tarball/* artifacts || exit 1 + cd artifacts || exit 1 + ln -s ${{ needs.build-dist.outputs.distfile }} netdata-latest.tar.gz || exit 1 + sha256sum -b ./* > "sha256sums.txt" || exit 1 + cat sha256sums.txt + - name: Fetch test environment + uses: actions/download-artifact@v2 + with: + name: ${{ matrix.artifact_key }}-test-env + - name: Load test environment + id: load + run: | + docker load --input image.tar | tee image-info.txt + echo "::set-output name=image::$(cut -d ':' -f 3 image-info.txt)" + - name: Install netdata and run the updater on ${{ matrix.distro }} + run: | + docker run --security-opt seccomp=unconfined -e DO_NOT_TRACK=1 --network host -w /netdata sha256:${{ steps.load.outputs.image }} \ + /netdata/.github/scripts/run-updater-check.sh + - name: Failure Notification + uses: rtCamp/action-slack-notify@v2 + env: + SLACK_COLOR: 'danger' + SLACK_FOOTER: '' + SLACK_ICON_EMOJI: ':github-actions:' + SLACK_TITLE: 'Updater checks for ${{ matrix.distro }} failed:' + SLACK_USERNAME: 'GitHub Actions' + SLACK_MESSAGE: "Updater checks for ${{ matrix.distro }} failed." + SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK_URL }} + if: >- + ${{ + failure() + && startsWith(github.ref, 'refs/heads/master') + && github.event_name != 'pull_request' + }} + + prepare-upload: # Consolidate the artifacts for uploading or releasing. + name: Prepare Artifacts + runs-on: ubuntu-latest + needs: + - build-dist + - build-static + steps: + - name: Checkout + uses: actions/checkout@v2 + - name: Prepare Environment + run: mkdir -p artifacts + - name: Retrieve Dist Tarball + uses: actions/download-artifact@v2 + with: + name: dist-tarball + path: dist-tarball + - name: Retrieve Static Build Artifacts + uses: actions/download-artifact@v2 + with: + name: static-archive + path: static-archive + - name: Prepare Artifacts + working-directory: ./artifacts/ + run: | + mv ../dist-tarball/* . || exit 1 + mv ../static-archive/* . || exit 1 + ln -s ${{ needs.build-dist.outputs.distfile }} netdata-latest.tar.gz || exit 1 + cp ../packaging/version ./latest-version.txt || exit 1 + sha256sum -b ./* > sha256sums.txt || exit 1 + cat sha256sums.txt + - name: Store Artifacts + uses: actions/upload-artifact@v2 + with: + name: final-artifacts + path: artifacts/* + retention-days: 30 + - name: Failure Notification + uses: rtCamp/action-slack-notify@v2 + env: + SLACK_COLOR: 'danger' + SLACK_FOOTER: '' + SLACK_ICON_EMOJI: ':github-actions:' + SLACK_TITLE: 'Failed to prepare release artifacts for upload:' + SLACK_USERNAME: 'GitHub Actions' + SLACK_MESSAGE: "Failed to prepare release artifacts for upload." + SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK_URL }} + if: >- + ${{ + failure() + && startsWith(github.ref, 'refs/heads/master') + && github.event_name != 'pull_request' + }} + + artifact-verification-dist: # Verify the regular installer works with the consolidated artifacts. + name: Test Consolidated Artifacts (Source) + runs-on: ubuntu-latest + needs: + - prepare-upload + services: + apache: # This gets used to serve the dist tarball for the updater script. + image: httpd:2.4 + ports: + - 8080:80 + volumes: + - ${{ github.workspace }}:/usr/local/apache2/htdocs/ + steps: + - name: Checkout + uses: actions/checkout@v2 + - name: Fetch artifacts + uses: actions/download-artifact@v2 + with: + name: final-artifacts + path: artifacts + - name: Verify that artifacts work with installer + env: + NETDATA_TARBALL_BASEURL: http://localhost:8080/artifacts + run: packaging/installer/kickstart.sh --dont-start-it --disable-telemetry --dont-wait + - name: Failure Notification + uses: rtCamp/action-slack-notify@v2 + env: + SLACK_COLOR: 'danger' + SLACK_FOOTER: '' + SLACK_ICON_EMOJI: ':github-actions:' + SLACK_TITLE: 'Artifact verification for source tarball failed.' + SLACK_USERNAME: 'GitHub Actions' + SLACK_MESSAGE: "Artifact verification for source tarball failed." + SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK_URL }} + if: >- + ${{ + failure() + && startsWith(github.ref, 'refs/heads/master') + && github.event_name != 'pull_request' + }} + + artifact-verification-static: # Verify the static installer works with the consolidated artifacts. + name: Test Consolidated Artifacts (Static) + runs-on: ubuntu-latest + needs: + - prepare-upload + services: + apache: # This gets used to serve the static archives. + image: httpd:2.4 + ports: + - 8080:80 + volumes: + - ${{ github.workspace }}:/usr/local/apache2/htdocs/ + steps: + - name: Checkout + uses: actions/checkout@v2 + - name: Fetch artifacts + uses: actions/download-artifact@v2 + with: + name: final-artifacts + path: artifacts + - name: Verify that artifacts work with installer + env: + NETDATA_TARBALL_BASEURL: http://localhost:8080/artifacts + run: packaging/installer/kickstart-static64.sh --dont-start-it --disable-telemetry --dont-wait + - name: Failure Notification + uses: rtCamp/action-slack-notify@v2 + env: + SLACK_COLOR: 'danger' + SLACK_FOOTER: '' + SLACK_ICON_EMOJI: ':github-actions:' + SLACK_TITLE: 'Artifact verification for static build failed.' + SLACK_USERNAME: 'GitHub Actions' + SLACK_MESSAGE: "Artifact verification for static build failed." + SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK_URL }} + if: >- + ${{ + failure() + && startsWith(github.ref, 'refs/heads/master') + && github.event_name != 'pull_request' + }} + + upload-nightly: # Upload the nightly build artifacts to GCS. + name: Upload Nightly Artifacts + runs-on: ubuntu-latest + if: github.event_name == 'workflow_dispatch' && github.event.inputs.type == 'nightly' + needs: + - updater-check + - source-build + - artifact-verification-dist + - artifact-verification-static + steps: + - name: Retrieve Artifacts + uses: actions/download-artifact@v2 + with: + name: final-artifacts + path: final-artifacts + - name: Setup Gcloud + uses: google-github-actions/setup-gcloud@v0.2.1 + with: + project_id: ${{ secrets.GCP_NIGHTLY_STORAGE_PROJECT }} + service_account_key: ${{ secrets.GCP_STORAGE_SERVICE_ACCOUNT_KEY }} + export_default_credentials: true + - name: Upload Artifacts + uses: google-github-actions/upload-cloud-storage@v0.4.0 + with: + destination: ${{ secrets.GCP_NIGHTLY_STORAGE_BUCKET }} + gzip: false + path: ./final-artifacts + parent: false + - name: Failure Notification + uses: rtCamp/action-slack-notify@v2 + env: + SLACK_COLOR: 'danger' + SLACK_FOOTER: '' + SLACK_ICON_EMOJI: ':github-actions:' + SLACK_TITLE: 'Failed to upload nightly release artifacts:' + SLACK_USERNAME: 'GitHub Actions' + SLACK_MESSAGE: "Failed to upload nightly release artifacts." + SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK_URL }} + if: >- + ${{ + failure() + && startsWith(github.ref, 'refs/heads/master') + && github.event_name != 'pull_request' + }} + + upload-release: # Create the draft release and upload the build artifacts. + name: Create Release Draft + runs-on: ubuntu-latest + if: github.event_name == 'workflow_dispatch' && github.event.inputs.type == 'release' + needs: + - updater-check + - source-build + - artifact-verification-dist + - artifact-verification-static + steps: + - name: Checkout + uses: actions/checkout@v2 + - name: Retrieve Artifacts + uses: actions/download-artifact@v2 + with: + name: final-artifacts + path: final-artifacts + - name: Create Release + uses: ncipollo/release-action@v1 + with: + allowUpdates: false + artifactErrorsFailBuild: true + artifacts: 'final-artifacts/sha256sums.txt,final-artifacts/netdata-*.tar.gz,final-artifacts/netdata-*.gz.run' + draft: true + tag: ${{ github.event.inputs.version }} + token: ${{ secrets.NETDATABOT_TOKEN }} + - name: Failure Notification + uses: rtCamp/action-slack-notify@v2 + env: + SLACK_COLOR: 'danger' + SLACK_FOOTER: '' + SLACK_ICON_EMOJI: ':github-actions:' + SLACK_TITLE: 'Failed to draft release:' + SLACK_USERNAME: 'GitHub Actions' + SLACK_MESSAGE: "Failed to draft release." + SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK_URL }} + if: >- + ${{ + failure() + && startsWith(github.ref, 'refs/heads/master') + && github.event_name == 'workflow_dispatch' + }} diff --git a/.github/workflows/checks.yml b/.github/workflows/checks.yml index e4025dd20..bc746f9ac 100644 --- a/.github/workflows/checks.yml +++ b/.github/workflows/checks.yml @@ -4,7 +4,12 @@ on: push: branches: - master - pull_request: + pull_request: null +env: + DO_NOT_TRACK: 1 +concurrency: + group: checks-${{ github.ref }} + cancel-in-progress: true jobs: checksum-checks: name: Checksums @@ -40,44 +45,14 @@ jobs: clang-checks: name: Clang runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@v2 - - name: Build - run: | - docker build -f .github/dockerfiles/Dockerfile.clang . - dist-checks: - name: Dist - runs-on: ubuntu-latest steps: - name: Checkout uses: actions/checkout@v2 with: submodules: recursive - - name: Prepare environment - run: | - ./packaging/installer/install-required-packages.sh --dont-wait --non-interactive netdata - sudo apt-get install -y libjson-c-dev libipmimonitoring-dev libcups2-dev libsnappy-dev \ - libprotobuf-dev libprotoc-dev libssl-dev protobuf-compiler \ - libnetfilter-acct-dev - - name: Configure - run: | - autoreconf -ivf - ./configure \ - --with-zlib \ - --with-math \ - --with-user=netdata \ - CFLAGS=-O2 - - name: Make dist - run: | - make dist - - name: Verify & Set distfile - run: | - ls -lah netdata-*.tar.gz - echo "DISTFILE=$(ls netdata-*.tar.gz)" >> $GITHUB_ENV - - name: Run run_install_with_dist_file.sh + - name: Build run: | - ./.github/scripts/run_install_with_dist_file.sh "${DISTFILE}" + docker build -f .github/dockerfiles/Dockerfile.clang . gitignore-check: name: .gitignore runs-on: ubuntu-latest diff --git a/.github/workflows/coverity.yml b/.github/workflows/coverity.yml index 766275ed9..c9e25ebf9 100644 --- a/.github/workflows/coverity.yml +++ b/.github/workflows/coverity.yml @@ -8,6 +8,11 @@ on: paths: - .github/workflows/coverity.yml - coverity-scan.sh +env: + DO_NOT_TRACK: 1 +concurrency: + group: coverity-${{ github.ref }} + cancel-in-progress: true jobs: coverity: if: github.repository == 'netdata/netdata' @@ -38,7 +43,7 @@ jobs: uses: rtCamp/action-slack-notify@v2 env: SLACK_COLOR: 'danger' - SLACK_FOOTER: + SLACK_FOOTER: '' SLACK_ICON_EMOJI: ':github-actions:' SLACK_TITLE: 'Coverity run failed:' SLACK_USERNAME: 'GitHub Actions' diff --git a/.github/workflows/dashboard-pr.yml b/.github/workflows/dashboard-pr.yml index 8f5a40f2b..711cf1e1f 100644 --- a/.github/workflows/dashboard-pr.yml +++ b/.github/workflows/dashboard-pr.yml @@ -8,9 +8,12 @@ on: dashboard_version: # This must be specified, and must _exactly_ match the version # tag for the release to be used for the update. - name: Dashboard Version + description: Dashboard Version required: true +env: + DO_NOT_TRACK: 1 + jobs: dashboard-pr: name: Generate Dashboard Version Bump PR @@ -18,30 +21,16 @@ jobs: steps: - name: Checkout uses: actions/checkout@v2 - - name: Create Branch - # This is needed because we want to do a PR, and the commit - # action used below requires the branch it is commiting to to - # already exist. - run: | - git checkout -b dashboard-${{ github.event.inputs.dashboard_version }} - git push -u origin dashboard-${{ github.event.inputs.dashboard_version }} - name: Update Files run: | web/gui/bundle_dashboard.py ${{ github.event.inputs.dashboard_version }} - - name: Commit Changes - uses: swinton/commit@v2.x - env: - GH_TOKEN: ${{ secrets.NETDATABOT_GITHUB_TOKEN }} + - name: Create Pull Request + uses: peter-evans/create-pull-request@v3 with: - files: | - packaging/dashboard.version - packaging/dashboard.checksums + title: 'Update dashboard to version ${{ github.event.inputs.dashboard_version }}.' + body: 'See https://github.com/netdata/dashboard/releases/tag/${{ github.event.inputs.dashboard_version }} for changes.' + branch: dashboard-${{ github.event.inputs.dashboard_version }} + branch-suffix: timestamp + delete-branch: true commit-message: 'Update dashboard to version ${{ github.event.inputs.dashboard_version }}.' - ref: refs/heads/dashboard-${{ github.event.inputs.dashboard_version }} - - name: Create PR - uses: repo-sync/pull-request@v2 - with: - source_branch: dashboard-${{ github.event.inputs.dashboard_version }} - pr_title: 'Update dashboard to version ${{ github.event.inputs.dashboard_version }}.' - pr_body: 'See https://github.com/netdata/dashboard/releases/tag/${{ github.event.inputs.dashboard_version }} for changes.' - github_token: ${{ secrets.NETDATABOT_GITHUB_TOKEN }} + token: ${{ secrets.NETDATABOT_GITHUB_TOKEN }} diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 4f18ce5b6..f8a1d6855 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -4,43 +4,94 @@ on: push: branches: - master - pull_request: + pull_request: null workflow_dispatch: inputs: version: name: Version Tag default: nightly required: true +env: + DO_NOT_TRACK: 1 +concurrency: + group: docker-${{ github.ref }}-${{ github.event_name }} + cancel-in-progress: true jobs: - docker-build: - name: Docker Build + docker-test: + name: Docker Runtime Test runs-on: ubuntu-latest steps: - name: Checkout uses: actions/checkout@v2 with: submodules: recursive - - name: Determine if we should push changes and which tags to use - if: github.event_name == 'workflow_dispatch' && github.event.inputs.version != 'nightly' + - name: Setup Buildx + uses: docker/setup-buildx-action@v1 + - name: Test Build + uses: docker/build-push-action@v2 + with: + load: true + push: false + tags: netdata/netdata:test + - name: Test Image + run: .github/scripts/docker-test.sh + + docker-ci: + if: github.event_name != 'workflow_dispatch' + name: Docker Alt Arch Builds + needs: docker-test + runs-on: ubuntu-latest + strategy: + matrix: + platforms: + - linux/i386 + - linux/arm/v7 + - linux/arm64 + - linux/ppc64le + steps: + - name: Checkout + uses: actions/checkout@v2 + with: + submodules: recursive + - name: Setup QEMU + if: matrix.platforms != 'linux/i386' + uses: docker/setup-qemu-action@v1 + - name: Setup Buildx + uses: docker/setup-buildx-action@v1 + - name: Build + uses: docker/build-push-action@v2 + with: + platforms: ${{ matrix.platforms }} + load: false + push: false + tags: netdata/netdata:test + + docker-publish: + if: github.event_name == 'workflow_dispatch' + name: Docker Build and Publish + needs: docker-test + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v2 + with: + submodules: recursive + - name: Determine which tags to use + if: github.event.inputs.version != 'nightly' run: | - echo "publish=true" >> $GITHUB_ENV echo "tags=netdata/netdata:latest,netdata/netdata:stable,$(.github/scripts/gen-docker-tags.py ${{ github.event.inputs.version }})" >> $GITHUB_ENV - - name: Determine if we should push changes and which tags to use - if: github.event_name == 'workflow_dispatch' && github.event.inputs.version == 'nightly' + - name: Determine which tags to use + if: github.event.inputs.version == 'nightly' run: | - echo "publish=true" >> $GITHUB_ENV echo "tags=netdata/netdata:latest,netdata/netdata:edge" >> $GITHUB_ENV - - name: Determine if we should push changes and which tags to use - if: github.event_name != 'workflow_dispatch' - run: | - echo "publish=false" >> $GITHUB_ENV - echo "tags=netdata/netdata:test" >> $GITHUB_ENV + - name: Mark image as official + if: github.repository == 'netdata/netdata' + run: echo "OFFICIAL_IMAGE=true" >> $GITHUB_ENV - name: Setup QEMU uses: docker/setup-qemu-action@v1 - name: Setup Buildx uses: docker/setup-buildx-action@v1 - name: Docker Hub Login - if: github.event_name == 'workflow_dispatch' uses: docker/login-action@v1 with: username: ${{ secrets.DOCKER_HUB_USERNAME }} @@ -48,14 +99,15 @@ jobs: - name: Docker Build uses: docker/build-push-action@v2 with: - platforms: linux/amd64,linux/i386,linux/arm/v7,linux/arm64 - push: ${{ env.publish }} + platforms: linux/amd64,linux/i386,linux/arm/v7,linux/arm64,linux/ppc64le + push: true tags: ${{ env.tags }} + build-args: OFFICIAL_IMAGE=${{ env.OFFICIAL_IMAGE }} - name: Failure Notification uses: rtCamp/action-slack-notify@v2 env: SLACK_COLOR: 'danger' - SLACK_FOOTER: + SLACK_FOOTER: '' SLACK_ICON_EMOJI: ':github-actions:' SLACK_TITLE: 'Docker Build failed:' SLACK_USERNAME: 'GitHub Actions' diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 9f7234f92..73bcc8773 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -9,6 +9,8 @@ on: pull_request: paths: - '**.md' +env: + DO_NOT_TRACK: 1 jobs: markdown-link-check: name: Broken Links diff --git a/.github/workflows/labeler.yml b/.github/workflows/labeler.yml index 24842e73d..a4937a9db 100644 --- a/.github/workflows/labeler.yml +++ b/.github/workflows/labeler.yml @@ -3,7 +3,9 @@ name: Pull Request Labeler on: schedule: - - cron: '*/5 * * * *' + - cron: '*/10 * * * *' +env: + DO_NOT_TRACK: 1 jobs: labeler: runs-on: ubuntu-latest diff --git a/.github/workflows/packaging.yml b/.github/workflows/packaging.yml index 934aa264b..80c14e24b 100644 --- a/.github/workflows/packaging.yml +++ b/.github/workflows/packaging.yml @@ -15,6 +15,11 @@ on: version: name: Package version required: false +env: + DO_NOT_TRACK: 1 +concurrency: + group: packages-${{ github.ref }}-${{ github.event_name }} + cancel-in-progress: true jobs: build: name: Build @@ -26,21 +31,47 @@ jobs: include: - {distro: debian, version: "9", pkgclouddistro: debian/stretch, format: deb, base_image: debian, platform: linux/amd64, arch: amd64} - {distro: debian, version: "9", pkgclouddistro: debian/stretch, format: deb, base_image: debian, platform: linux/i386, arch: i386} + - {distro: debian, version: "9", pkgclouddistro: debian/stretch, format: deb, base_image: debian, platform: linux/arm/v7, arch: armhf} + - {distro: debian, version: "9", pkgclouddistro: debian/stretch, format: deb, base_image: debian, platform: linux/arm64/v8, arch: arm64} - {distro: debian, version: "10", pkgclouddistro: debian/buster, format: deb, base_image: debian, platform: linux/amd64, arch: amd64} - {distro: debian, version: "10", pkgclouddistro: debian/buster, format: deb, base_image: debian, platform: linux/i386, arch: i386} - - {distro: ubuntu, version: "16.04", pkgclouddistro: ubuntu/xenial, format: deb, base_image: ubuntu, platform: linux/amd64, arch: amd64} - - {distro: ubuntu, version: "16.04", pkgclouddistro: ubuntu/xenial, format: deb, base_image: ubuntu, platform: linux/i386, arch: i386} + - {distro: debian, version: "10", pkgclouddistro: debian/buster, format: deb, base_image: debian, platform: linux/arm/v7, arch: armhf} + - {distro: debian, version: "10", pkgclouddistro: debian/buster, format: deb, base_image: debian, platform: linux/arm64/v8, arch: arm64} + - {distro: debian, version: "11", pkgclouddistro: debian/bullseye, format: deb, base_image: debian, platform: linux/amd64, arch: amd64, alias: bullseye} + - {distro: debian, version: "11", pkgclouddistro: debian/bullseye, format: deb, base_image: debian, platform: linux/i386, arch: i386, alias: bullseye} + - {distro: debian, version: "11", pkgclouddistro: debian/bullseye, format: deb, base_image: debian, platform: linux/arm/v7, arch: armhf, alias: bullseye} + - {distro: debian, version: "11", pkgclouddistro: debian/bullseye, format: deb, base_image: debian, platform: linux/arm64/v8, arch: arm64, alias: bullseye} - {distro: ubuntu, version: "18.04", pkgclouddistro: ubuntu/bionic, format: deb, base_image: ubuntu, platform: linux/amd64, arch: amd64} - {distro: ubuntu, version: "18.04", pkgclouddistro: ubuntu/bionic, format: deb, base_image: ubuntu, platform: linux/i386, arch: i386} + - {distro: ubuntu, version: "18.04", pkgclouddistro: ubuntu/bionic, format: deb, base_image: ubuntu, platform: linux/arm/v7, arch: armhf} + - {distro: ubuntu, version: "18.04", pkgclouddistro: ubuntu/bionic, format: deb, base_image: ubuntu, platform: linux/arm64/v8, arch: arm64} - {distro: ubuntu, version: "20.04", pkgclouddistro: ubuntu/focal, format: deb, base_image: ubuntu, platform: linux/amd64, arch: amd64} - - {distro: ubuntu, version: "20.10", pkgclouddistro: ubuntu/groovy, format: deb, base_image: ubuntu, platform: linux/amd64, arch: amd64} + - {distro: ubuntu, version: "20.04", pkgclouddistro: ubuntu/focal, format: deb, base_image: ubuntu, platform: linux/arm/v7, arch: armhf} + - {distro: ubuntu, version: "20.04", pkgclouddistro: ubuntu/focal, format: deb, base_image: ubuntu, platform: linux/arm64/v8, arch: arm64} - {distro: ubuntu, version: "21.04", pkgclouddistro: ubuntu/hirsute, format: deb, base_image: ubuntu, platform: linux/amd64, arch: amd64} + - {distro: ubuntu, version: "21.04", pkgclouddistro: ubuntu/hirsute, format: deb, base_image: ubuntu, platform: linux/arm/v7, arch: armhf} + - {distro: ubuntu, version: "21.04", pkgclouddistro: ubuntu/hirsute, format: deb, base_image: ubuntu, platform: linux/arm64/v8, arch: arm64} + - {distro: ubuntu, version: "21.10", pkgclouddistro: ubuntu/impish, format: deb, base_image: ubuntu, platform: linux/amd64, arch: amd64} + - {distro: ubuntu, version: "21.10", pkgclouddistro: ubuntu/impish, format: deb, base_image: ubuntu, platform: linux/arm/v7, arch: armhf} + - {distro: ubuntu, version: "21.10", pkgclouddistro: ubuntu/impish, format: deb, base_image: ubuntu, platform: linux/arm64/v8, arch: arm64} - {distro: centos, version: "7", pkgclouddistro: el/7, format: rpm, base_image: centos, platform: linux/amd64, arch: amd64} - {distro: centos, version: "8", pkgclouddistro: el/8, format: rpm, base_image: centos, platform: linux/amd64, arch: amd64} - - {distro: fedora, version: "32", pkgclouddistro: fedora/32, format: rpm, base_image: fedora, platform: linux/amd64, arch: amd64} + - {distro: centos, version: "8", pkgclouddistro: el/8, format: rpm, base_image: centos, platform: linux/arm64/v8, arch: arm64} - {distro: fedora, version: "33", pkgclouddistro: fedora/33, format: rpm, base_image: fedora, platform: linux/amd64, arch: amd64} + - {distro: fedora, version: "33", pkgclouddistro: fedora/33, format: rpm, base_image: fedora, platform: linux/arm/v7, arch: armhf} + - {distro: fedora, version: "33", pkgclouddistro: fedora/33, format: rpm, base_image: fedora, platform: linux/arm64/v8, arch: arm64} - {distro: fedora, version: "34", pkgclouddistro: fedora/34, format: rpm, base_image: fedora, platform: linux/amd64, arch: amd64} + - {distro: fedora, version: "34", pkgclouddistro: fedora/34, format: rpm, base_image: fedora, platform: linux/arm/v7, arch: armhf} + - {distro: fedora, version: "34", pkgclouddistro: fedora/34, format: rpm, base_image: fedora, platform: linux/arm64/v8, arch: arm64} + - {distro: fedora, version: "35", pkgclouddistro: fedora/35, format: rpm, base_image: fedora, platform: linux/amd64, arch: amd64} + - {distro: fedora, version: "35", pkgclouddistro: fedora/35, format: rpm, base_image: fedora, platform: linux/arm/v7, arch: armhf} + - {distro: fedora, version: "35", pkgclouddistro: fedora/35, format: rpm, base_image: fedora, platform: linux/arm64/v8, arch: arm64} - {distro: opensuse, version: "15.2", pkgclouddistro: opensuse/15.2, format: rpm, base_image: opensuse/leap, platform: linux/amd64, arch: amd64} + - {distro: opensuse, version: "15.2", pkgclouddistro: opensuse/15.2, format: rpm, base_image: opensuse/leap, platform: linux/arm64/v8, arch: arm64} + - {distro: opensuse, version: "15.3", pkgclouddistro: opensuse/15.3, format: rpm, base_image: opensuse/leap, platform: linux/amd64, arch: amd64} + - {distro: opensuse, version: "15.3", pkgclouddistro: opensuse/15.3, format: rpm, base_image: opensuse/leap, platform: linux/arm64/v8, arch: arm64} + - {distro: oraclelinux, version: "8", pkgclouddistro: ol/8, format: rpm, base_image: oraclelinux, platform: linux/amd64, arch: amd64} + - {distro: oraclelinux, version: "8", pkgclouddistro: ol/8, format: rpm, base_image: oraclelinux, platform: linux/arm64/v8, arch: arm64} # We intentiaonally disable the fail-fast behavior so that a # build failure for one version doesn't prevent us from publishing # successfully built and tested packages for another version. @@ -52,14 +83,14 @@ jobs: uses: actions/checkout@v2 with: fetch-depth: 0 # We need full history for versioning - submodules: true + submodules: recursive - name: Checkout Tag # Otherwise check out the tag that triggered this. if: github.event_name == 'workflow_dispatch' uses: actions/checkout@v2 with: ref: ${{ github.event.ref }} fetch-depth: 0 # We need full history for versioning - submodules: true + submodules: recursive - name: Check Base Branch run: | if [ "${{ github.event_name }}" == "workflow_dispatch" ]; then @@ -86,35 +117,44 @@ jobs: echo "pkg_version=$(cut -d'-' -f 1 packaging/version | sed -e 's/^v//')" >> $GITHUB_ENV fi - name: Setup QEMU - if: matrix.platform != 'linux/amd64' + if: matrix.platform != 'linux/amd64' && matrix.platform != 'linux/i386' uses: docker/setup-qemu-action@v1 - - name: Setup Buildx - uses: docker/setup-buildx-action@v1 - name: Prepare Docker Environment shell: bash run: | echo '{"cgroup-parent": "/actions_job", "experimental": true}' | sudo tee /etc/docker/daemon.json 2>/dev/null sudo service docker restart - - name: Build Packages - uses: docker/build-push-action@v2 + - name: Set Base Image Version + shell: bash + run: | + if [ -z "${{ matrix.alias }}" ] ; then + echo "version=${{ matrix.version }}" >> $GITHUB_ENV + else + echo "version=${{ matrix.alias }}" >> $GITHUB_ENV + fi + - name: Fetch base image + uses: nick-invision/retry@v2 with: - platforms: ${{ matrix.platform }} - file: packaging/Dockerfile.packager - tags: local/package-builder:${{ matrix.distro}}${{ matrix.version }} - push: false - load: true - build-args: | - ARCH=${{ matrix.arch }} - DISTRO=${{ matrix.distro }} - TEST_BASE=${{ matrix.base_image }} - DISTRO_VERSION=${{ matrix.version }} - PKG_VERSION=${{ env.pkg_version }} - - name: Extract Packages + max_attempts: 3 + retry_wait_seconds: 30 + timeout_seconds: 900 + command: | + docker pull --platform ${{ matrix.platform }} ${{ matrix.base_image }}:${{ env.version }} + docker pull --platform ${{ matrix.platform }} netdata/package-builders:${{ matrix.distro }}${{ matrix.version }} + - name: Build Packages + shell: bash + run: | + docker run --security-opt seccomp=unconfined -e DO_NOT_TRACK=1 -e VERSION=${{ env.pkg_version }} --platform=${{ matrix.platform }} -v $PWD:/netdata netdata/package-builders:${{ matrix.distro }}${{ matrix.version }} + - name: Test Packages shell: bash run: | - mkdir -p artifacts - docker run --platform ${{ matrix.platform }} -v $PWD/artifacts:/artifacts local/package-builder:${{ matrix.distro }}${{ matrix.version }} - - name: Upload + docker run --security-opt seccomp=unconfined -e DO_NOT_TRACK=1 -e DISTRO=${{ matrix.distro }} -e VERSION=${{ env.pkg_version }} -e DISTRO_VERSION=${{ env.version }} --platform=${{ matrix.platform }} -v $PWD:/netdata ${{ matrix.base_image }}:${{ env.version }} /netdata/.github/scripts/pkg-test.sh + - name: Save Packages + uses: actions/upload-artifact@v2 + with: + name: ${{ matrix.distro }}-${{ matrix.version }}-${{ matrix.arch }}-packages + path: ${{ github.workspace }}/artifacts/* + - name: Upload to PackageCloud if: github.event_name == 'workflow_dispatch' shell: bash env: @@ -122,7 +162,7 @@ jobs: run: | echo "Packages to upload:\n$(ls artifacts/*.${{ matrix.format }})" for pkgfile in artifacts/*.${{ matrix.format }} ; do - .github/scripts/package_cloud_wrapper.sh yank ${{ env.repo }}/${{ matrix.pkgclouddistro }} ${pkgfile} || true + .github/scripts/package_cloud_wrapper.sh yank ${{ env.repo }}/${{ matrix.pkgclouddistro }} $(basename ${pkgfile}) || true .github/scripts/package_cloud_wrapper.sh push ${{ env.repo }}/${{ matrix.pkgclouddistro }} ${pkgfile} done - name: Clean @@ -137,7 +177,6 @@ jobs: uses: rtCamp/action-slack-notify@v2 env: SLACK_COLOR: 'danger' - SLACK_FOOTER: SLACK_ICON_EMOJI: ':github-actions:' SLACK_TITLE: 'Package Build failed:' SLACK_USERNAME: 'GitHub Actions' diff --git a/.github/workflows/repoconfig-packages.yml b/.github/workflows/repoconfig-packages.yml new file mode 100644 index 000000000..c132ec8b5 --- /dev/null +++ b/.github/workflows/repoconfig-packages.yml @@ -0,0 +1,80 @@ +--- +# Handles building of binary packages for the agent. +name: Repository Packages +on: + workflow_dispatch: null +env: + DO_NOT_TRACK: 1 +jobs: + build: + name: Build + runs-on: ubuntu-latest + env: + DO_NOT_TRACK: 1 + DOCKER_CLI_EXPERIMENTAL: enabled + strategy: + # This needs to be kept in sync with the matrix in packaging.yml, but should only include the AMD64 lines. + matrix: + include: + - {distro: debian, version: "9", pkgclouddistro: debian/stretch, format: deb, base_image: debian, platform: linux/amd64, arch: amd64} + - {distro: debian, version: "10", pkgclouddistro: debian/buster, format: deb, base_image: debian, platform: linux/amd64, arch: amd64} + - {distro: debian, version: "11", pkgclouddistro: debian/bullseye, format: deb, base_image: debian, platform: linux/amd64, arch: amd64} + - {distro: ubuntu, version: "18.04", pkgclouddistro: ubuntu/bionic, format: deb, base_image: ubuntu, platform: linux/amd64, arch: amd64} + - {distro: ubuntu, version: "20.04", pkgclouddistro: ubuntu/focal, format: deb, base_image: ubuntu, platform: linux/amd64, arch: amd64} + - {distro: ubuntu, version: "21.04", pkgclouddistro: ubuntu/hirsute, format: deb, base_image: ubuntu, platform: linux/amd64, arch: amd64} + - {distro: ubuntu, version: "21.10", pkgclouddistro: ubuntu/impish, format: deb, base_image: ubuntu, platform: linux/amd64, arch: amd64} + - {distro: centos, version: "7", pkgclouddistro: el/7, format: rpm, base_image: centos, platform: linux/amd64, arch: amd64} + - {distro: centos, version: "8", pkgclouddistro: el/8, format: rpm, base_image: centos, platform: linux/amd64, arch: amd64} + - {distro: fedora, version: "33", pkgclouddistro: fedora/33, format: rpm, base_image: fedora, platform: linux/amd64, arch: amd64} + - {distro: fedora, version: "34", pkgclouddistro: fedora/34, format: rpm, base_image: fedora, platform: linux/amd64, arch: amd64} + - {distro: fedora, version: "35", pkgclouddistro: fedora/35, format: rpm, base_image: fedora, platform: linux/amd64, arch: amd64} + - {distro: opensuse, version: "15.2", pkgclouddistro: opensuse/15.2, format: rpm, base_image: opensuse/leap, platform: linux/amd64, arch: amd64} + - {distro: opensuse, version: "15.3", pkgclouddistro: opensuse/15.3, format: rpm, base_image: opensuse/leap, platform: linux/amd64, arch: amd64} + - {distro: oraclelinux, version: "8", pkgclouddistro: ol/8, format: rpm, base_image: oraclelinux, platform: linux/amd64, arch: amd64} + # We intentiaonally disable the fail-fast behavior so that a + # build failure for one version doesn't prevent us from publishing + # successfully built and tested packages for another version. + fail-fast: false + max-parallel: 8 + steps: + - name: Checkout + uses: actions/checkout@v2 + # Unlike normally, we do not need a deep clone or submodules for this. + - name: Fetch base image + uses: nick-invision/retry@v2 + with: + max_attempts: 3 + retry_wait_seconds: 30 + timeout_seconds: 900 + command: docker pull --platform ${{ matrix.platform }} ${{ matrix.base_image }}:${{ matrix.version }} + - name: Build Packages + shell: bash + run: | + docker run --security-opt seccomp=unconfined -e DO_NOT_TRACK=1 --platform ${{ matrix.platform }} \ + -v $PWD:/netdata ${{ matrix.base_image }}:${{ matrix.version }} \ + /netdata/packaging/repoconfig/build-${{ matrix.format }}.sh + - name: Upload Packages + shell: bash + env: + PKG_CLOUD_TOKEN: ${{ secrets.PACKAGE_CLOUD_API_KEY }} + run: | + echo "Packages to upload:\n$(ls artifacts/*.${{ matrix.format }})" + for pkgfile in artifacts/*.${{ matrix.format }} ; do + .github/scripts/package_cloud_wrapper.sh yank ${{ secrets.PACKAGE_CLOUD_REPO }}/${{ matrix.pkgclouddistro }} $(basename ${pkgfile}) || true + .github/scripts/package_cloud_wrapper.sh push ${{ secrets.PACKAGE_CLOUD_REPO }}/${{ matrix.pkgclouddistro }} ${pkgfile} + .github/scripts/package_cloud_wrapper.sh yank ${{ secrets.PACKAGE_CLOUD_REPO }}-edge/${{ matrix.pkgclouddistro }} $(basename ${pkgfile}) || true + .github/scripts/package_cloud_wrapper.sh push ${{ secrets.PACKAGE_CLOUD_REPO }}-edge/${{ matrix.pkgclouddistro }} ${pkgfile} + .github/scripts/package_cloud_wrapper.sh yank ${{ secrets.PACKAGE_CLOUD_REPO }}-repoconfig/${{ matrix.pkgclouddistro }} $(basename ${pkgfile}) || true + .github/scripts/package_cloud_wrapper.sh push ${{ secrets.PACKAGE_CLOUD_REPO }}-repoconfig/${{ matrix.pkgclouddistro }} ${pkgfile} + done + - name: Failure Notification + if: ${{ failure() }} + uses: rtCamp/action-slack-notify@v2 + env: + SLACK_COLOR: 'danger' + SLACK_FOOTER: '' + SLACK_ICON_EMOJI: ':github-actions:' + SLACK_TITLE: 'Repository Package Build failed:' + SLACK_USERNAME: 'GitHub Actions' + SLACK_MESSAGE: "${{ matrix.pkgclouddistro }} ${{ matrix.version }} repository package build failed." + SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK_URL }} diff --git a/.github/workflows/review.yml b/.github/workflows/review.yml index a267fea3f..e9972303b 100644 --- a/.github/workflows/review.yml +++ b/.github/workflows/review.yml @@ -2,15 +2,95 @@ # Runs various ReviewDog based checks against PR with suggested changes to improve quality name: Review on: - pull_request: + pull_request: null env: - run_eslint: 0 - run_hadolint: 0 - run_shellcheck: 0 - run_yamllint: 0 + DO_NOT_TRACK: 1 +concurrency: + group: review-${{ github.ref }} + cancel-in-progress: true jobs: + prep-review: + name: Prepare Review Jobs + runs-on: ubuntu-latest + outputs: + actionlint: ${{ steps.actionlint.outputs.run }} + eslint: ${{ steps.eslint.outputs.run }} + hadolint: ${{ steps.hadolint.outputs.run }} + shellcheck: ${{ steps.shellcheck.outputs.run }} + yamllint: ${{ steps.yamllint.outputs.run }} + steps: + - name: Clone repository + uses: actions/checkout@v2 + with: + submodules: recursive + fetch-depth: 0 + - name: Check files for actionlint + id: actionlint + run: | + if git diff --name-only origin/${{ github.base_ref }} HEAD | grep -Eq '\.github/workflows/.*' ; then + echo '::set-output name=run::true' + echo 'GitHub Actions workflows have changed, need to run actionlint.' + else + echo '::set-output name=run::false' + fi + - name: Check files for eslint + id: eslint + run: | + if git diff --name-only origin/${{ github.base_ref }} HEAD | grep -v "web/gui/dashboard" | grep -Eq '.*\.js|node\.d\.plugin\.in' ; then + echo '::set-output name=run::true' + echo 'JS files have changed, need to run ESLint.' + else + echo '::set-output name=run::false' + fi + - name: Check files for hadolint + id: hadolint + run: | + if git diff --name-only origin/${{ github.base_ref }} HEAD | grep -Eq '.*Dockerfile.*' ; then + echo '::set-output name=run::true' + echo 'Dockerfiles have changed, need to run Hadolint.' + else + echo '::set-output name=run::false' + fi + - name: Check files for shellcheck + id: shellcheck + run: | + if git diff --name-only origin/${{ github.base_ref }} HEAD | grep -Eq '.*\.sh.*' ; then + echo '::set-output name=run::true' + echo 'Shell scripts have changed, need to run shellcheck.' + else + echo '::set-output name=run::false' + fi + - name: Check files for yamllint + id: yamllint + run: | + if git diff --name-only origin/${{ github.base_ref }} HEAD | grep -Eq '.*\.ya?ml|python\.d/.*\.conf' ; then + echo '::set-output name=run::true' + echo 'YAML files have changed, need to run yamllint.' + else + echo '::set-output name=run::false' + fi + + actionlint: + name: actionlint + needs: prep-review + if: needs.prep-review.outputs.actionlint == 'true' + runs-on: ubuntu-latest + steps: + - name: Git clone repository + uses: actions/checkout@v2 + with: + submodules: recursive + fetch-depth: 0 + - name: Run actionlint + uses: reviewdog/action-actionlint@v1 + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + reporter: github-pr-check + eslint: name: eslint + needs: prep-review + if: needs.prep-review.outputs.eslint == 'true' runs-on: ubuntu-latest steps: - name: Git clone repository @@ -18,13 +98,7 @@ jobs: with: submodules: recursive fetch-depth: 0 - - name: Check files - run: | - if git diff --name-only origin/${{ github.base_ref }} HEAD | grep -Eq '*\.js|node\.d\.plugin\.in' ; then - echo 'run_eslint=1' >> $GITHUB_ENV - fi - name: Run eslint - if: env.run_eslint == 1 uses: reviewdog/action-eslint@v1 with: github_token: ${{ secrets.GITHUB_TOKEN }} @@ -33,19 +107,15 @@ jobs: hadolint: name: hadolint + needs: prep-review + if: needs.prep-review.outputs.hadolint == 'true' runs-on: ubuntu-latest steps: - name: Git clone repository uses: actions/checkout@v2 with: fetch-depth: 0 - - name: Check files - run: | - if git diff --name-only origin/${{ github.base_ref }} HEAD | grep -Eq '*Dockerfile*' ; then - echo 'run_hadolint=1' >> $GITHUB_ENV - fi - name: Run hadolint - if: env.run_hadolint == 1 uses: reviewdog/action-hadolint@v1 with: github_token: ${{ secrets.GITHUB_TOKEN }} @@ -53,6 +123,8 @@ jobs: shellcheck: name: shellcheck + needs: prep-review + if: needs.prep-review.outputs.shellcheck == 'true' runs-on: ubuntu-latest steps: - name: Git clone repository @@ -60,13 +132,7 @@ jobs: with: submodules: recursive fetch-depth: 0 - - name: Check files - run: | - if git diff --name-only origin/${{ github.base_ref }} HEAD | grep -Eq '*\.sh.*' ; then - echo 'run_shellcheck=1' >> $GITHUB_ENV - fi - name: Run shellcheck - if: env.run_shellcheck == 1 uses: reviewdog/action-shellcheck@v1 with: github_token: ${{ secrets.GITHUB_TOKEN }} @@ -77,6 +143,8 @@ jobs: yamllint: name: yamllint + needs: prep-review + if: needs.prep-review.outputs.yamllint == 'true' runs-on: ubuntu-latest steps: - name: Git clone repository @@ -84,13 +152,7 @@ jobs: with: submodules: recursive fetch-depth: 0 - - name: Check files - run: | - if git diff --name-only origin/${{ github.base_ref }} HEAD | grep -Eq '*\.ya?ml|python\.d/.*\.conf' ; then - echo 'run_yamllint=1' >> $GITHUB_ENV - fi - name: Run yamllint - if: env.run_yamllint == 1 uses: reviewdog/action-yamllint@v1 with: github_token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 7e47f12da..20c6f7b8c 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -14,6 +14,11 @@ on: - 'CMakeLists.txt' - '**.c' - '**.h' +env: + DO_NOT_TRACK: 1 +concurrency: + group: tests-${{ github.ref }} + cancel-in-progress: true jobs: unit-tests-legacy: name: Unit Tests (legacy) @@ -52,7 +57,7 @@ jobs: - name: Configure run: | autoreconf -ivf - ./configure --without-aclk-ng + ./configure --disable-ml # XXX: Work-around for bug with libbson-1.0 in Ubuntu 18.04 # See: https://bugs.launchpad.net/ubuntu/+source/libmongoc/+bug/1790771 # https://jira.mongodb.org/browse/CDRIVER-2818 @@ -85,7 +90,7 @@ jobs: find . -type f -name '*.log' -exec cp {} ../logs/ \; popd || exit 1 - name: Upload Artifacts - uses: actions/upload-artifact@v2.2.3 + uses: actions/upload-artifact@v2.2.4 if: always() with: name: logs diff --git a/.github/workflows/updater.yml b/.github/workflows/updater.yml deleted file mode 100644 index 5f8d77c4a..000000000 --- a/.github/workflows/updater.yml +++ /dev/null @@ -1,85 +0,0 @@ ---- -name: Updater -on: - push: - branches: - - master - pull_request: - branches: - - master - -jobs: - source-build: - name: Install, Build & Update - strategy: - fail-fast: false - max-parallel: 8 - matrix: - distro: - - 'alpine:3.12' - - 'alpine:3.13' - - 'archlinux:latest' - - 'centos:7' - - 'centos:8' - - 'clearlinux:latest' - - 'debian:9' - - 'debian:10' - - 'fedora:33' - - 'fedora:34' - - 'ubuntu:16.04' - - 'ubuntu:18.04' - - 'ubuntu:20.04' - - 'ubuntu:20.10' - - 'ubuntu:21.04' - include: - - distro: 'alpine:3.12' - pre: 'apk add -U bash' - - distro: 'alpine:3.13' - pre: 'apk add -U bash' - - distro: 'debian:9' - pre: 'apt-get update' - - distro: 'debian:10' - pre: 'apt-get update' - - distro: 'ubuntu:16.04' - pre: 'apt-get update' - - distro: 'ubuntu:18.04' - pre: 'apt-get update' - - distro: 'ubuntu:20.04' - pre: 'apt-get update' - - distro: 'ubuntu:20.10' - pre: 'apt-get update' - - distro: 'ubuntu:21.04' - pre: 'apt-get update' - runs-on: ubuntu-latest - steps: - - name: Git clone repository - uses: actions/checkout@v2 - with: - submodules: recursive - - name: Install required packages & build tarball - run: | - ./packaging/installer/install-required-packages.sh --dont-wait --non-interactive netdata-all - .github/scripts/build-dist.sh - - name: Run a dockerised web server to serve files used by the custom update script - run: | - docker run -dit --name my-apache-app -p 8080:80 -v "$PWD":/usr/local/apache2/htdocs/ httpd:2.4 - - name: Replace URLs in updater script to point at the local web server - run: | - ORIG_TARBALL="export NETDATA_TARBALL_URL=.*" - ORIG_CHECKSUM="export NETDATA_TARBALL_CHECKSUM_URL=.*" - CURRENT_VERSION="current_version=.*" - NEW_TARBALL="export NETDATA_TARBALL_URL=http://localhost:8080/artifacts/netdata-latest.tar.gz" - NEW_CHECKSUM="export NETDATA_TARBALL_CHECKSUM_URL=http://localhost:8080/artifacts/sha256sums.txt" - sed -i "s|${ORIG_TARBALL}|${NEW_TARBALL}|g" packaging/installer/netdata-updater.sh - sed -i "s|${ORIG_CHECKSUM}|${NEW_CHECKSUM}|g" packaging/installer/netdata-updater.sh - sed -i "s|"current_version=.*"|"current_version=1"|g" packaging/installer/netdata-updater.sh - - name: Install netdata and run the updater on ${{ matrix.distro }} - env: - PRE: ${{ matrix.pre }} - run: | - echo $PRE > ./prep-cmd.sh - docker build . -f .github/dockerfiles/Dockerfile.build_test -t test --build-arg BASE=${{ matrix.distro }} - docker run --network host -w /netdata test \ - /bin/sh -c '/netdata/packaging/installer/kickstart.sh --dont-wait \ - && /netdata/packaging/installer/netdata-updater.sh --not-running-from-cron --no-updater-self-update \ - && bash /netdata/.github/scripts/check-updater.sh' diff --git a/.gitignore b/.gitignore index 46371060c..ecf2b648f 100644 --- a/.gitignore +++ b/.gitignore @@ -7,6 +7,9 @@ gcs-credentials.json .project .pydevproject +# local dev outputs +dist/ + *.o *.a config.h.in @@ -82,6 +85,9 @@ packaging/installer/.environment.sh # netdata makeself downloads packaging/makeself/tmp/ +# Libbpf is always overwritten depending of kernel version +packaging/libbpf.* + # coverity cov-int/ netdata-coverity-analysis.tgz @@ -165,6 +171,7 @@ diagrams/plantuml.jar # cppcheck cppcheck-build/ +# python virtual environment venv/ # debugging / profiling @@ -209,3 +216,10 @@ netdata.cbp # External dependencies externaldeps/ + +# vim sessions +Session.vim +Session.*.vim + +# Special exceptions +!packaging/repoconfig/Makefile diff --git a/.gitmodules b/.gitmodules index ef9349b38..0f01d3d68 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,3 +1,15 @@ [submodule "mqtt_websockets"] path = mqtt_websockets url = https://github.com/underhood/mqtt_websockets.git +[submodule "aclk/aclk-schemas"] + path = aclk/aclk-schemas + url = https://github.com/netdata/aclk-schemas.git +[submodule "ml/kmeans/dlib"] + path = ml/kmeans/dlib + url = https://github.com/davisking/dlib.git + shallow = true + ignore = dirty +[submodule "ml/json"] + path = ml/json + url = https://github.com/nlohmann/json.git + shallow = true diff --git a/.lgtm.yml b/.lgtm.yml index 38f6675cb..b79f8bb7f 100644 --- a/.lgtm.yml +++ b/.lgtm.yml @@ -17,6 +17,8 @@ path_classifiers: - collectors/node.d.plugin/node_modules/extend.js - collectors/node.d.plugin/node_modules/net-snmp.js - collectors/node.d.plugin/node_modules/pixl-xml.js + - ml/kmeans/dlib/ + - ml/json/ - web/gui/lib/ - web/gui/src/ - web/gui/css/ diff --git a/.travis.yml b/.travis.yml index ea297bad1..00872cffd 100644 --- a/.travis.yml +++ b/.travis.yml @@ -18,7 +18,7 @@ before_install: # install: - sudo apt-get install -y libuv1-dev liblz4-dev libjudy-dev libcap2-bin zlib1g-dev uuid-dev fakeroot libipmimonitoring-dev libmnl-dev libnetfilter-acct-dev gnupg python3-pip - - sudo pip install git-semver==0.3.2 # 11/Sep/2019: git-semver tip was broken, so we had to force last good run of it + - sudo pip3 install git-semver==0.3.2 # 11/Sep/2019: git-semver tip was broken, so we had to force last good run of it - source tests/installer/slack.sh - export NOTIF_CHANNEL="automation-beta" - if [ "${TRAVIS_REPO_SLUG}" = "netdata/netdata" ]; then export NOTIF_CHANNEL="automation"; fi; @@ -104,20 +104,11 @@ jobs: # We only publish if a TAG has been set during packaging - stage: Publish for release - name: Create release draft - git: - depth: false - env: - - RELEASE_CHANNEL=stable - before_script: post_message "TRAVIS_MESSAGE" "Drafting release on github" "${NOTIF_CHANNEL}" + name: Trigger release build and draft release creation script: - - echo "GIT Branch:" && git branch - - echo "Last commit:" && git log -1 - - echo "GIT Describe:" && git describe - - echo "packaging/version:" && cat packaging/version - - echo "Generating release artifacts" && .travis/create_artifacts.sh # Could/should be a common storage to put this and share between jobs - - .travis/draft_release.sh - after_failure: post_message "TRAVIS_MESSAGE" " Draft release submission failed" + - git checkout "${TRAVIS_BRANCH}" && export BUILD_VERSION="$(cat packaging/version | sed 's/^v//')" + - .travis/trigger_artifact_build.sh "${GITHUB_TOKEN}" "${BUILD_VERSION}" "release" + after_failure: post_message "TRAVIS_MESSAGE" " Failed to trigger release artifact build during nightly release" "${NOTIF_CHANNEL}" - name: Trigger Docker image build and publish script: @@ -149,55 +140,11 @@ jobs: # This is the nightly execution step # - stage: Nightly release - name: Create nightly release artifacts, publish to GCS + name: Trigger nightly artifact build and upload script: - - echo "GIT Branch:" && git branch - - echo "Last commit:" && git log -1 - - echo "GIT Describe:" && git describe - - echo "packaging/version:" && cat packaging/version - - .travis/create_artifacts.sh - after_failure: post_message "TRAVIS_MESSAGE" " Nightly artifacts generation failed" - git: - depth: false - before_deploy: - echo "Preparing creds under ${TRAVIS_REPO_SLUG}"; - if [ "${TRAVIS_REPO_SLUG}" == "netdata/netdata" ]; then - openssl aes-256-cbc -K $encrypted_8daf19481253_key -iv $encrypted_8daf19481253_iv -in .travis/gcs-credentials.json.enc -out .travis/gcs-credentials.json -d; - else - echo "Beta deployment stage in progress"; - openssl aes-256-cbc -K $encrypted_8daf19481253_key -iv $encrypted_8daf19481253_iv -in .travis/gcs-credentials.json.enc -out .travis/gcs-credentials.json -d; - fi; - deploy: - # Beta storage, used for testing purposes - - provider: gcs - edge: - branch: gcs-ng - project_id: netdata-storage - credentials: .travis/gcs-credentials.json - bucket: "netdata-dev-nightlies" - skip_cleanup: true - local_dir: "artifacts" - on: - # Only deploy on netdata/netdata, master branch, when artifacts directory is created - repo: ${TRAVIS_REPO_SLUG} - branch: master - condition: -d "artifacts" && ${TRAVIS_REPO_SLUG} != "netdata/netdata" - - # Production storage - - provider: gcs - edge: - branch: gcs-ng - project_id: netdata-storage - credentials: .travis/gcs-credentials.json - bucket: "netdata-nightlies" - skip_cleanup: true - local_dir: "artifacts" - on: - # Only deploy on netdata/netdata, master branch, when artifacts directory is created - repo: netdata/netdata - branch: master - condition: -d "artifacts" && ${TRAVIS_REPO_SLUG} = "netdata/netdata" - after_deploy: rm -f .travis/gcs-credentials.json + - git checkout "${TRAVIS_BRANCH}" && export BUILD_VERSION="$(cat packaging/version | sed 's/^v//')" + - .travis/trigger_artifact_build.sh "${GITHUB_TOKEN}" "${BUILD_VERSION}" "nightly" + after_failure: post_message "TRAVIS_MESSAGE" " Failed to trigger release artifact build during nightly release" "${NOTIF_CHANNEL}" - name: Trigger Docker image build and publish script: .travis/trigger_docker_build.sh "${GITHUB_TOKEN}" "nightly" diff --git a/.travis/create_artifacts.sh b/.travis/create_artifacts.sh index a5349cca3..27428913e 100755 --- a/.travis/create_artifacts.sh +++ b/.travis/create_artifacts.sh @@ -52,8 +52,12 @@ make dist mv "${BASENAME}.tar.gz" artifacts/ echo "--- Create self-extractor ---" -command -v git > /dev/null && [ -d .git ] && git clean -d -f -./packaging/makeself/build-x86_64-static.sh +sxarches="x86_64 armv7l aarch64" +for arch in ${sxarches}; do + git clean -d -f + rm -rf packating/makeself/tmp + ./packaging/makeself/build-static.sh ${arch} +done # Needed for GCS echo "--- Copy artifacts to separate directory ---" @@ -61,7 +65,13 @@ echo "--- Copy artifacts to separate directory ---" cp packaging/version artifacts/latest-version.txt cd artifacts ln -s "${BASENAME}.tar.gz" netdata-latest.tar.gz + +for arch in ${sxarches}; do + ln -s "netdata-${arch}-$(git describe).gz.run" netdata-${arch}-latest.gz.run +done + ln -s "${BASENAME}.gz.run" netdata-latest.gz.run + sha256sum -b ./* > "sha256sums.txt" echo "checksums:" cat sha256sums.txt diff --git a/.travis/current_build_status b/.travis/current_build_status new file mode 100644 index 000000000..11a6d0a54 --- /dev/null +++ b/.travis/current_build_status @@ -0,0 +1 @@ +changes-#18220 diff --git a/.travis/trigger_artifact_build.sh b/.travis/trigger_artifact_build.sh new file mode 100755 index 000000000..3ec5d02d0 --- /dev/null +++ b/.travis/trigger_artifact_build.sh @@ -0,0 +1,20 @@ +#!/bin/sh + +token="${1}" +version="${2}" +type="${3}" + +resp="$(curl -X POST \ + -H 'Accept: application/vnd.github.v3+json' \ + -H "Authorization: Bearer ${token}" \ + "https://api.github.com/repos/netdata/netdata/actions/workflows/build.yml/dispatches" \ + -d "{\"ref\": \"master\", \"inputs\": {\"version\": \"${version}\", \"type\": \"${type}\"}}")" + +if [ -z "${resp}" ]; then + echo "Successfully triggered release artifact build." + exit 0 +else + echo "Failed to trigger release artifact build. Output:" + echo "${resp}" + exit 1 +fi diff --git a/.yamllint.yml b/.yamllint.yml index 09d99aa8c..b05de2062 100644 --- a/.yamllint.yml +++ b/.yamllint.yml @@ -6,10 +6,11 @@ yaml-files: - 'collectors/python.d.plugin/*.conf' - 'collectors/python.d.plugin/*/*.conf' +ignore: | + mqtt_websockets/ + packaging/makeself/tmp/ + rules: - line-length: - max: 120 - level: warning braces: enable brackets: enable colons: enable @@ -19,9 +20,14 @@ rules: document-end: disable document-start: disable empty-lines: enable - empty-values: disable + empty-values: enable hyphens: enable indentation: enable + line-length: + max: 150 + level: warning + allow-non-breakable-words: true + allow-non-breakable-inline-mappings: true key-duplicates: enable key-ordering: disable new-line-at-end-of-file: enable diff --git a/CHANGELOG.md b/CHANGELOG.md index f5e05e0bc..e3af9f3f8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,320 +1,200 @@ # Changelog -## [v1.31.0](https://github.com/netdata/netdata/tree/v1.31.0) (2021-05-19) +## [v1.32.0](https://github.com/netdata/netdata/tree/v1.32.0) (2021-11-30) -[Full Changelog](https://github.com/netdata/netdata/compare/v1.30.1...v1.31.0) +[Full Changelog](https://github.com/netdata/netdata/compare/v1.31.0...v1.32.0) **Merged pull requests:** -- Fix broken link in dimensions/contexts/families doc [\#11148](https://github.com/netdata/netdata/pull/11148) ([joelhans](https://github.com/joelhans)) -- Add info on other memory modes to performance.md [\#11144](https://github.com/netdata/netdata/pull/11144) ([cakrit](https://github.com/cakrit)) -- Use size\_t instead of int for vfs\_bufspace\_count in FreeBSD plugin [\#11142](https://github.com/netdata/netdata/pull/11142) ([diizzyy](https://github.com/diizzyy)) -- Bundle the react dashboard code into the agent repo directly. [\#11139](https://github.com/netdata/netdata/pull/11139) ([Ferroin](https://github.com/Ferroin)) -- Reduce the number of ACLK chart updates during chart obsoletion [\#11133](https://github.com/netdata/netdata/pull/11133) ([stelfrag](https://github.com/stelfrag)) -- Update k6.md [\#11127](https://github.com/netdata/netdata/pull/11127) ([OdysLam](https://github.com/OdysLam)) -- Fix broken link in doc [\#11122](https://github.com/netdata/netdata/pull/11122) ([forest0](https://github.com/forest0)) -- analytics: reduce alarms notifications dump logging [\#11116](https://github.com/netdata/netdata/pull/11116) ([ilyam8](https://github.com/ilyam8)) -- Check configuration for CUSTOM and MSTEAM [\#11113](https://github.com/netdata/netdata/pull/11113) ([MrZammler](https://github.com/MrZammler)) -- Fix broken links in various docs [\#11109](https://github.com/netdata/netdata/pull/11109) ([joelhans](https://github.com/joelhans)) -- minor - fixes typo in ACLK-NG log [\#11107](https://github.com/netdata/netdata/pull/11107) ([underhood](https://github.com/underhood)) -- Update mqtt\_websockets [\#11105](https://github.com/netdata/netdata/pull/11105) ([underhood](https://github.com/underhood)) -- packaging: update go.d.plugin version to v0.28.2 [\#11104](https://github.com/netdata/netdata/pull/11104) ([ilyam8](https://github.com/ilyam8)) -- aclk/legacy: change aclk statistics charts units from kB/s to KiB/s [\#11103](https://github.com/netdata/netdata/pull/11103) ([ilyam8](https://github.com/ilyam8)) -- Check the version of the default cgroup mountpoint [\#11102](https://github.com/netdata/netdata/pull/11102) ([vlvkobal](https://github.com/vlvkobal)) -- Don't repeat the cgroup discovery cleanup info message [\#11101](https://github.com/netdata/netdata/pull/11101) ([vlvkobal](https://github.com/vlvkobal)) -- Add host\_cloud\_enabled attribute to analytics [\#11100](https://github.com/netdata/netdata/pull/11100) ([MrZammler](https://github.com/MrZammler)) -- Improve dashboard documentation \(part 3\) [\#11099](https://github.com/netdata/netdata/pull/11099) ([joelhans](https://github.com/joelhans)) -- cgroups: fix network interfaces detection when using `virsh` [\#11096](https://github.com/netdata/netdata/pull/11096) ([ilyam8](https://github.com/ilyam8)) -- Reduce send statistics logging [\#11091](https://github.com/netdata/netdata/pull/11091) ([MrZammler](https://github.com/MrZammler)) -- fix SSL random failures when using multithreaded web server with OpenSSL \< 1.1.0 [\#11089](https://github.com/netdata/netdata/pull/11089) ([thiagoftsm](https://github.com/thiagoftsm)) -- health: clarify which health configuration entities are required / optional [\#11086](https://github.com/netdata/netdata/pull/11086) ([ilyam8](https://github.com/ilyam8)) -- build mqtt\_websockets with netdata autotools [\#11083](https://github.com/netdata/netdata/pull/11083) ([underhood](https://github.com/underhood)) -- Fixed a single typo in documentation [\#11082](https://github.com/netdata/netdata/pull/11082) ([yavin87](https://github.com/yavin87)) -- netdata-installer.sh: Enable IPv6 support in libwebsockets [\#11080](https://github.com/netdata/netdata/pull/11080) ([pjakuszew](https://github.com/pjakuszew)) -- Add an event when an incomplete agent shutdown is detected [\#11078](https://github.com/netdata/netdata/pull/11078) ([stelfrag](https://github.com/stelfrag)) -- Remove dash-example, place in community repo [\#11077](https://github.com/netdata/netdata/pull/11077) ([tnyeanderson](https://github.com/tnyeanderson)) -- Change eBPF chart type [\#11074](https://github.com/netdata/netdata/pull/11074) ([thiagoftsm](https://github.com/thiagoftsm)) -- Add a module for ZFS pool state [\#11071](https://github.com/netdata/netdata/pull/11071) ([vlvkobal](https://github.com/vlvkobal)) -- Improve dashboard documentation \(part 2\) [\#11065](https://github.com/netdata/netdata/pull/11065) ([joelhans](https://github.com/joelhans)) -- Fix coverity issue \(CID 370510\) [\#11060](https://github.com/netdata/netdata/pull/11060) ([stelfrag](https://github.com/stelfrag)) -- Add functionality to store node\_id for a host [\#11059](https://github.com/netdata/netdata/pull/11059) ([stelfrag](https://github.com/stelfrag)) -- Add `charts` to templates [\#11054](https://github.com/netdata/netdata/pull/11054) ([thiagoftsm](https://github.com/thiagoftsm)) -- Add documentation for claiming during kickstart installation [\#11052](https://github.com/netdata/netdata/pull/11052) ([joelhans](https://github.com/joelhans)) -- Remove dots in cgroup ids [\#11050](https://github.com/netdata/netdata/pull/11050) ([vlvkobal](https://github.com/vlvkobal)) -- health/vernemq: use `average` instead of `sum` [\#11037](https://github.com/netdata/netdata/pull/11037) ([ilyam8](https://github.com/ilyam8)) -- Fix storing an NULL claim id on a parent node [\#11036](https://github.com/netdata/netdata/pull/11036) ([stelfrag](https://github.com/stelfrag)) -- Improve installation method for Alpine [\#11035](https://github.com/netdata/netdata/pull/11035) ([tiramiseb](https://github.com/tiramiseb)) -- Load names [\#11034](https://github.com/netdata/netdata/pull/11034) ([thiagoftsm](https://github.com/thiagoftsm)) -- Add Third-party collector: nextcloud plugin [\#11032](https://github.com/netdata/netdata/pull/11032) ([tknobi](https://github.com/tknobi)) -- Create ebpf.d directory in PLUGINDIR for debian and rpm package\(netdata\#11017\) [\#11031](https://github.com/netdata/netdata/pull/11031) ([wangpei-nice](https://github.com/wangpei-nice)) -- proc/mdstat: add raid level to the family [\#11024](https://github.com/netdata/netdata/pull/11024) ([ilyam8](https://github.com/ilyam8)) -- bump to netdata-pandas==0.0.38 [\#11022](https://github.com/netdata/netdata/pull/11022) ([andrewm4894](https://github.com/andrewm4894)) -- Provide more agent analytics to posthog [\#11020](https://github.com/netdata/netdata/pull/11020) ([MrZammler](https://github.com/MrZammler)) -- Rename struct fields from class to classification. [\#11019](https://github.com/netdata/netdata/pull/11019) ([vkalintiris](https://github.com/vkalintiris)) -- Improve dashboard documentation \(part 1\) [\#11015](https://github.com/netdata/netdata/pull/11015) ([joelhans](https://github.com/joelhans)) -- Remove links to old install doc [\#11014](https://github.com/netdata/netdata/pull/11014) ([joelhans](https://github.com/joelhans)) -- Revert "Provide more agent analytics to posthog" [\#11011](https://github.com/netdata/netdata/pull/11011) ([MrZammler](https://github.com/MrZammler)) -- anonymous-statistics: add a timeout when using `curl` [\#11010](https://github.com/netdata/netdata/pull/11010) ([ilyam8](https://github.com/ilyam8)) -- python.d: add plugin and module names to the runtime charts [\#11007](https://github.com/netdata/netdata/pull/11007) ([ilyam8](https://github.com/ilyam8)) -- \[area/collectors\] Added support for libvirtd LXC containers to the `cgroup-name.sh` cgroup name normalization script [\#11006](https://github.com/netdata/netdata/pull/11006) ([endreszabo](https://github.com/endreszabo)) -- Allow the remote write configuration have multiple destinations [\#11005](https://github.com/netdata/netdata/pull/11005) ([vlvkobal](https://github.com/vlvkobal)) -- improvements to anomalies collector following dogfooding [\#11003](https://github.com/netdata/netdata/pull/11003) ([andrewm4894](https://github.com/andrewm4894)) -- Backend chart filtering backward compatibility fix [\#11002](https://github.com/netdata/netdata/pull/11002) ([vlvkobal](https://github.com/vlvkobal)) -- Add a chart with netdata uptime [\#10997](https://github.com/netdata/netdata/pull/10997) ([vlvkobal](https://github.com/vlvkobal)) -- Improve get started/installation docs [\#10995](https://github.com/netdata/netdata/pull/10995) ([joelhans](https://github.com/joelhans)) -- Persist claim ids in local database for parent and children [\#10993](https://github.com/netdata/netdata/pull/10993) ([stelfrag](https://github.com/stelfrag)) -- ci: fix aws-kinesis builds [\#10992](https://github.com/netdata/netdata/pull/10992) ([ilyam8](https://github.com/ilyam8)) -- Move global stats to a separate thread [\#10991](https://github.com/netdata/netdata/pull/10991) ([vlvkobal](https://github.com/vlvkobal)) -- adds missing SPDX license info into ACLK-NG [\#10990](https://github.com/netdata/netdata/pull/10990) ([underhood](https://github.com/underhood)) -- K6 quality of life updates [\#10985](https://github.com/netdata/netdata/pull/10985) ([OdysLam](https://github.com/OdysLam)) -- Add sections for class, component and type. [\#10984](https://github.com/netdata/netdata/pull/10984) ([MrZammler](https://github.com/MrZammler)) -- Update eBPF documentation [\#10982](https://github.com/netdata/netdata/pull/10982) ([thiagoftsm](https://github.com/thiagoftsm)) -- remove vneg from ACLK-NG [\#10980](https://github.com/netdata/netdata/pull/10980) ([underhood](https://github.com/underhood)) -- Remove outdated privacy policy and terms of use [\#10979](https://github.com/netdata/netdata/pull/10979) ([joelhans](https://github.com/joelhans)) -- collectors/charts.d/opensips: fix detection of `opensipsctl` executable [\#10978](https://github.com/netdata/netdata/pull/10978) ([ilyam8](https://github.com/ilyam8)) -- Update fping version [\#10977](https://github.com/netdata/netdata/pull/10977) ([Habetdin](https://github.com/Habetdin)) -- fix uil in statsd guide [\#10975](https://github.com/netdata/netdata/pull/10975) ([OdysLam](https://github.com/OdysLam)) -- health: fix alarm line options syntax in the docs [\#10974](https://github.com/netdata/netdata/pull/10974) ([ilyam8](https://github.com/ilyam8)) -- Upgrade OKay repository RPM for RHEL8 [\#10973](https://github.com/netdata/netdata/pull/10973) ([BastienBalaud](https://github.com/BastienBalaud)) -- Remove condition that was creating gaps [\#10972](https://github.com/netdata/netdata/pull/10972) ([thiagoftsm](https://github.com/thiagoftsm)) -- Add a metric for percpu memory [\#10964](https://github.com/netdata/netdata/pull/10964) ([vlvkobal](https://github.com/vlvkobal)) -- Bring flexible adjust for eBPF hash tables [\#10962](https://github.com/netdata/netdata/pull/10962) ([thiagoftsm](https://github.com/thiagoftsm)) -- Provide new attributes in health conf files [\#10961](https://github.com/netdata/netdata/pull/10961) ([MrZammler](https://github.com/MrZammler)) -- Fix epbf crash when process exit [\#10957](https://github.com/netdata/netdata/pull/10957) ([thiagoftsm](https://github.com/thiagoftsm)) -- Contributing revamp, take 2 [\#10956](https://github.com/netdata/netdata/pull/10956) ([OdysLam](https://github.com/OdysLam)) -- health: add Inconsistent state to the mysql\_galera\_cluster\_state alarm [\#10945](https://github.com/netdata/netdata/pull/10945) ([ilyam8](https://github.com/ilyam8)) -- Update cloud-providers.md [\#10942](https://github.com/netdata/netdata/pull/10942) ([Avre](https://github.com/Avre)) -- ACLK new cloud architecture new TBEB [\#10941](https://github.com/netdata/netdata/pull/10941) ([underhood](https://github.com/underhood)) -- Add new charts for extended disk metrics [\#10939](https://github.com/netdata/netdata/pull/10939) ([vlvkobal](https://github.com/vlvkobal)) -- Adds --recursive to docu git clones [\#10932](https://github.com/netdata/netdata/pull/10932) ([underhood](https://github.com/underhood)) -- Add lists of monitored metrics to the cgroups plugin documentation [\#10924](https://github.com/netdata/netdata/pull/10924) ([vlvkobal](https://github.com/vlvkobal)) -- Spelling web gui [\#10922](https://github.com/netdata/netdata/pull/10922) ([jsoref](https://github.com/jsoref)) -- Spelling web api server [\#10921](https://github.com/netdata/netdata/pull/10921) ([jsoref](https://github.com/jsoref)) -- Spelling tests [\#10920](https://github.com/netdata/netdata/pull/10920) ([jsoref](https://github.com/jsoref)) -- Spelling streaming [\#10919](https://github.com/netdata/netdata/pull/10919) ([jsoref](https://github.com/jsoref)) -- spelling: bidirectional [\#10918](https://github.com/netdata/netdata/pull/10918) ([jsoref](https://github.com/jsoref)) -- Spelling libnetdata [\#10917](https://github.com/netdata/netdata/pull/10917) ([jsoref](https://github.com/jsoref)) -- Spelling health [\#10916](https://github.com/netdata/netdata/pull/10916) ([jsoref](https://github.com/jsoref)) -- Spelling exporting [\#10915](https://github.com/netdata/netdata/pull/10915) ([jsoref](https://github.com/jsoref)) -- Spelling database [\#10914](https://github.com/netdata/netdata/pull/10914) ([jsoref](https://github.com/jsoref)) -- Spelling daemon [\#10913](https://github.com/netdata/netdata/pull/10913) ([jsoref](https://github.com/jsoref)) -- Spelling collectors [\#10912](https://github.com/netdata/netdata/pull/10912) ([jsoref](https://github.com/jsoref)) -- spelling: backend [\#10911](https://github.com/netdata/netdata/pull/10911) ([jsoref](https://github.com/jsoref)) -- Spelling aclk [\#10910](https://github.com/netdata/netdata/pull/10910) ([jsoref](https://github.com/jsoref)) -- Spelling build [\#10909](https://github.com/netdata/netdata/pull/10909) ([jsoref](https://github.com/jsoref)) -- health: add synchronization.conf to the Makefile [\#10907](https://github.com/netdata/netdata/pull/10907) ([ilyam8](https://github.com/ilyam8)) -- health: add systemdunits alarms [\#10906](https://github.com/netdata/netdata/pull/10906) ([ilyam8](https://github.com/ilyam8)) -- web/gui: add systemdunits info to the dashboard\_info.js [\#10904](https://github.com/netdata/netdata/pull/10904) ([ilyam8](https://github.com/ilyam8)) -- Add a plugin for the system clock synchronization state [\#10895](https://github.com/netdata/netdata/pull/10895) ([vlvkobal](https://github.com/vlvkobal)) -- Provide more agent analytics to posthog [\#10887](https://github.com/netdata/netdata/pull/10887) ([MrZammler](https://github.com/MrZammler)) -- Add a chart for out of memory kills [\#10880](https://github.com/netdata/netdata/pull/10880) ([vlvkobal](https://github.com/vlvkobal)) -- Remove RewriteEngine for dedicated vHost [\#10873](https://github.com/netdata/netdata/pull/10873) ([Steve8291](https://github.com/Steve8291)) -- python.d\(smartd\_log\): collect attribute 249 -- NAND Writes 1GiB [\#10872](https://github.com/netdata/netdata/pull/10872) ([RaitoBezarius](https://github.com/RaitoBezarius)) -- Improvements to dash-example.html [\#10870](https://github.com/netdata/netdata/pull/10870) ([tnyeanderson](https://github.com/tnyeanderson)) -- Replace references to Google Analytics with Posthog where relevant [\#10868](https://github.com/netdata/netdata/pull/10868) ([andrewm4894](https://github.com/andrewm4894)) -- ACLK Passwd endpoint update [\#10859](https://github.com/netdata/netdata/pull/10859) ([underhood](https://github.com/underhood)) -- Dashboard version 2.17.0 [\#10856](https://github.com/netdata/netdata/pull/10856) ([allelos](https://github.com/allelos)) -- Ebpf directory cache [\#10855](https://github.com/netdata/netdata/pull/10855) ([thiagoftsm](https://github.com/thiagoftsm)) -- prevents mqtt connection attempt on OTP failure [\#10839](https://github.com/netdata/netdata/pull/10839) ([underhood](https://github.com/underhood)) -- implements ACLK env endpoint [\#10833](https://github.com/netdata/netdata/pull/10833) ([underhood](https://github.com/underhood)) -- implements new https client for ACLK [\#10805](https://github.com/netdata/netdata/pull/10805) ([underhood](https://github.com/underhood)) -- Support mulitple jobs in make\(1\) when building LWS. [\#10799](https://github.com/netdata/netdata/pull/10799) ([vkalintiris](https://github.com/vkalintiris)) -- Overhaul streaming documentation [\#10709](https://github.com/netdata/netdata/pull/10709) ([joelhans](https://github.com/joelhans)) -- Zscores python collector [\#10673](https://github.com/netdata/netdata/pull/10673) ([andrewm4894](https://github.com/andrewm4894)) -- add python changefinder collector [\#10672](https://github.com/netdata/netdata/pull/10672) ([andrewm4894](https://github.com/andrewm4894)) +- fix\(health\): `pihole\_blocklist\_gravity\_file` and `pihole\_status` info lines [\#11844](https://github.com/netdata/netdata/pull/11844) ([ilyam8](https://github.com/ilyam8)) +- Optional proto support fix [\#11840](https://github.com/netdata/netdata/pull/11840) ([underhood](https://github.com/underhood)) +- feat\(apps.plugin\): add consul to apps\_groups.conf [\#11839](https://github.com/netdata/netdata/pull/11839) ([ilyam8](https://github.com/ilyam8)) +- Add a note about pkg-config file location for freeipmi [\#11831](https://github.com/netdata/netdata/pull/11831) ([vlvkobal](https://github.com/vlvkobal)) +- Remove pihole\_blocked\_queries alert [\#11829](https://github.com/netdata/netdata/pull/11829) ([Ancairon](https://github.com/Ancairon)) +- Add commands to check and fix database corruption [\#11828](https://github.com/netdata/netdata/pull/11828) ([stelfrag](https://github.com/stelfrag)) +- Set NETDATA\_CONTAINER\_OS\_DETECTION properly [\#11827](https://github.com/netdata/netdata/pull/11827) ([MrZammler](https://github.com/MrZammler)) +- feat\(apps.plugin\): add aws to apps\_groups.conf [\#11826](https://github.com/netdata/netdata/pull/11826) ([ilyam8](https://github.com/ilyam8)) +- Updating ansible steps for clarity [\#11823](https://github.com/netdata/netdata/pull/11823) ([kickoke](https://github.com/kickoke)) +- Don't use wc struct if it might not exist [\#11820](https://github.com/netdata/netdata/pull/11820) ([MrZammler](https://github.com/MrZammler)) +- specify pip3 when installing git-semver package [\#11817](https://github.com/netdata/netdata/pull/11817) ([maneamarius](https://github.com/maneamarius)) +- Cleanup compilation warnings [\#11810](https://github.com/netdata/netdata/pull/11810) ([stelfrag](https://github.com/stelfrag)) +- Fix coverity issues [\#11809](https://github.com/netdata/netdata/pull/11809) ([stelfrag](https://github.com/stelfrag)) +- Fix broken link in charts.mdx [\#11808](https://github.com/netdata/netdata/pull/11808) ([DShreve2](https://github.com/DShreve2)) +- Always queue alerts to aclk\_alert [\#11806](https://github.com/netdata/netdata/pull/11806) ([MrZammler](https://github.com/MrZammler)) +- Use two digits after the decimal point for the anomaly rate. [\#11804](https://github.com/netdata/netdata/pull/11804) ([vkalintiris](https://github.com/vkalintiris)) +- Add POWER8+ static builds. [\#11802](https://github.com/netdata/netdata/pull/11802) ([Ferroin](https://github.com/Ferroin)) +- Update libbpf [\#11800](https://github.com/netdata/netdata/pull/11800) ([thiagoftsm](https://github.com/thiagoftsm)) +- Assorted cleanups to static builds. [\#11798](https://github.com/netdata/netdata/pull/11798) ([Ferroin](https://github.com/Ferroin)) +- Use the proper format specifier when logging configuration options. [\#11795](https://github.com/netdata/netdata/pull/11795) ([vkalintiris](https://github.com/vkalintiris)) +- Verify checksums of makeself deps. [\#11791](https://github.com/netdata/netdata/pull/11791) ([vkalintiris](https://github.com/vkalintiris)) +- packaging: update go.d.plugin version to v0.31.0 [\#11789](https://github.com/netdata/netdata/pull/11789) ([ilyam8](https://github.com/ilyam8)) +- Add some logging for cloud new architecture to access.log [\#11788](https://github.com/netdata/netdata/pull/11788) ([MrZammler](https://github.com/MrZammler)) +- Simple fix for the data API query [\#11787](https://github.com/netdata/netdata/pull/11787) ([vlvkobal](https://github.com/vlvkobal)) +- Use correct hop count if host is already in memory [\#11785](https://github.com/netdata/netdata/pull/11785) ([stelfrag](https://github.com/stelfrag)) +- Fix proc/interrupts parser [\#11783](https://github.com/netdata/netdata/pull/11783) ([maximethebault](https://github.com/maximethebault)) +- Fix typos [\#11782](https://github.com/netdata/netdata/pull/11782) ([rex4539](https://github.com/rex4539)) +- add nightly release version to readme [\#11780](https://github.com/netdata/netdata/pull/11780) ([andrewm4894](https://github.com/andrewm4894)) +- Delete from aclk alerts table if ack'ed from cloud one day ago [\#11779](https://github.com/netdata/netdata/pull/11779) ([MrZammler](https://github.com/MrZammler)) +- Add Oracle Linux 8 to CI and package builds. [\#11776](https://github.com/netdata/netdata/pull/11776) ([Ferroin](https://github.com/Ferroin)) +- Temporary fix for cgroup renaming [\#11775](https://github.com/netdata/netdata/pull/11775) ([vlvkobal](https://github.com/vlvkobal)) +- Remove feature flag for ACLK new cloud architecture [\#11774](https://github.com/netdata/netdata/pull/11774) ([stelfrag](https://github.com/stelfrag)) +- Fix link to new charts. [\#11773](https://github.com/netdata/netdata/pull/11773) ([DShreve2](https://github.com/DShreve2)) +- Update netdata-security.md [\#11772](https://github.com/netdata/netdata/pull/11772) ([jlbriston](https://github.com/jlbriston)) +- Skip sending hidden dimensions via ACLK [\#11770](https://github.com/netdata/netdata/pull/11770) ([stelfrag](https://github.com/stelfrag)) +- Insert alert into aclk\_alert directly instead of queuing it [\#11769](https://github.com/netdata/netdata/pull/11769) ([MrZammler](https://github.com/MrZammler)) +- Fix host hop count reported to the cloud [\#11768](https://github.com/netdata/netdata/pull/11768) ([stelfrag](https://github.com/stelfrag)) +- Show stats for protected mount points in diskspace plugin [\#11767](https://github.com/netdata/netdata/pull/11767) ([vlvkobal](https://github.com/vlvkobal)) +- Adding parenthesis [\#11766](https://github.com/netdata/netdata/pull/11766) ([ShimonOhayon](https://github.com/ShimonOhayon)) +- fix log if D\_ACLK is used [\#11763](https://github.com/netdata/netdata/pull/11763) ([underhood](https://github.com/underhood)) +- Don't interrupt popcorn timer for children [\#11758](https://github.com/netdata/netdata/pull/11758) ([underhood](https://github.com/underhood)) +- fix \(cgroups.plugin\): containers name resolution for crio/containerd cri [\#11756](https://github.com/netdata/netdata/pull/11756) ([ilyam8](https://github.com/ilyam8)) +- Add SSL\_MODE\_ENABLE\_PARTIAL\_WRITE to netdata\_srv\_ctx [\#11754](https://github.com/netdata/netdata/pull/11754) ([MrZammler](https://github.com/MrZammler)) +- Update eBPF documenation \(Filesystem and HardIRQ\) [\#11752](https://github.com/netdata/netdata/pull/11752) ([UmanShahzad](https://github.com/UmanShahzad)) +- Adds exit points between env and OTP [\#11751](https://github.com/netdata/netdata/pull/11751) ([underhood](https://github.com/underhood)) +- Teach GH about ML label and its code owners. [\#11750](https://github.com/netdata/netdata/pull/11750) ([vkalintiris](https://github.com/vkalintiris)) +- Update enable-streaming.mdx [\#11747](https://github.com/netdata/netdata/pull/11747) ([caleno](https://github.com/caleno)) +- Minor improvement to CPU number function regarding macOS. [\#11746](https://github.com/netdata/netdata/pull/11746) ([iigorkarpov](https://github.com/iigorkarpov)) +- minor - popocorn no more [\#11745](https://github.com/netdata/netdata/pull/11745) ([underhood](https://github.com/underhood)) +- Update dashboard to version v2.20.11. [\#11743](https://github.com/netdata/netdata/pull/11743) ([netdatabot](https://github.com/netdatabot)) +- Update eBPF documentation [\#11741](https://github.com/netdata/netdata/pull/11741) ([thiagoftsm](https://github.com/thiagoftsm)) +- Change comma possition in v1/info if ml-info is missing [\#11739](https://github.com/netdata/netdata/pull/11739) ([MrZammler](https://github.com/MrZammler)) +- Disable C++ warnings from dlib library. [\#11738](https://github.com/netdata/netdata/pull/11738) ([vkalintiris](https://github.com/vkalintiris)) +- Fix typo in aclk\_query.c [\#11737](https://github.com/netdata/netdata/pull/11737) ([eltociear](https://github.com/eltociear)) +- Fix online chart in NG not updated properly [\#11734](https://github.com/netdata/netdata/pull/11734) ([underhood](https://github.com/underhood)) +- Add command for new health entity file. [\#11733](https://github.com/netdata/netdata/pull/11733) ([DShreve2](https://github.com/DShreve2)) +- Removing dated contact suggestion. [\#11732](https://github.com/netdata/netdata/pull/11732) ([DShreve2](https://github.com/DShreve2)) +- Fix Link to New Charts [\#11729](https://github.com/netdata/netdata/pull/11729) ([DShreve2](https://github.com/DShreve2)) +- Fix Header Link.md [\#11728](https://github.com/netdata/netdata/pull/11728) ([DShreve2](https://github.com/DShreve2)) +- Implements cloud initiated disconnect command [\#11723](https://github.com/netdata/netdata/pull/11723) ([underhood](https://github.com/underhood)) +- Adding \(eBPF\) to submenu [\#11721](https://github.com/netdata/netdata/pull/11721) ([thiagoftsm](https://github.com/thiagoftsm)) +- Fix coverity CID \#373610 [\#11719](https://github.com/netdata/netdata/pull/11719) ([MrZammler](https://github.com/MrZammler)) +- add sensors to charts.d.conf and add a note how to enable it [\#11715](https://github.com/netdata/netdata/pull/11715) ([ilyam8](https://github.com/ilyam8)) +- Add Cloud sign-up link to README.md [\#11714](https://github.com/netdata/netdata/pull/11714) ([DShreve2](https://github.com/DShreve2)) +- Updating Docker Node Instructions for Clarity [\#11713](https://github.com/netdata/netdata/pull/11713) ([DShreve2](https://github.com/DShreve2)) +- Update jQuery Dependency [\#11710](https://github.com/netdata/netdata/pull/11710) ([rupokify](https://github.com/rupokify)) +- Bring eBPF to static binaries [\#11709](https://github.com/netdata/netdata/pull/11709) ([thiagoftsm](https://github.com/thiagoftsm)) +- Fix kickstart.md Installation Guide Links [\#11708](https://github.com/netdata/netdata/pull/11708) ([DShreve2](https://github.com/DShreve2)) +- Queue removed alerts to cloud for new architecture [\#11704](https://github.com/netdata/netdata/pull/11704) ([MrZammler](https://github.com/MrZammler)) +- Ebpf doc [\#11703](https://github.com/netdata/netdata/pull/11703) ([thiagoftsm](https://github.com/thiagoftsm)) +- Charts 2.0 - fix broken link [\#11701](https://github.com/netdata/netdata/pull/11701) ([hugovalente-pm](https://github.com/hugovalente-pm)) +- postgres collector: Fix crash the wal query if wal-file was removed concurrently [\#11697](https://github.com/netdata/netdata/pull/11697) ([unhandled-exception](https://github.com/unhandled-exception)) +- Fix handling of disabling telemetry in static installs. [\#11689](https://github.com/netdata/netdata/pull/11689) ([Ferroin](https://github.com/Ferroin)) +- fix "lsns: unknown column" logging in cgroup-network-helper script [\#11687](https://github.com/netdata/netdata/pull/11687) ([ilyam8](https://github.com/ilyam8)) +- Fix coverity issues 373612 & 373611 [\#11684](https://github.com/netdata/netdata/pull/11684) ([MrZammler](https://github.com/MrZammler)) +- eBPF mdflush [\#11681](https://github.com/netdata/netdata/pull/11681) ([UmanShahzad](https://github.com/UmanShahzad)) +- New eBPF and libbpf releases [\#11680](https://github.com/netdata/netdata/pull/11680) ([thiagoftsm](https://github.com/thiagoftsm)) +- Mark g++ for freebsd as NOTREQUIRED [\#11678](https://github.com/netdata/netdata/pull/11678) ([MrZammler](https://github.com/MrZammler)) +- Fix warnings from -Wformat-truncation=2 [\#11676](https://github.com/netdata/netdata/pull/11676) ([MrZammler](https://github.com/MrZammler)) +- Stream chart labels [\#11675](https://github.com/netdata/netdata/pull/11675) ([MrZammler](https://github.com/MrZammler)) +- Update pfsense.md [\#11674](https://github.com/netdata/netdata/pull/11674) ([78Star](https://github.com/78Star)) +- fix swap\_used alarm calc [\#11672](https://github.com/netdata/netdata/pull/11672) ([ilyam8](https://github.com/ilyam8)) +- Fix line arguments \(eBPF\) [\#11670](https://github.com/netdata/netdata/pull/11670) ([thiagoftsm](https://github.com/thiagoftsm)) +- Add snapshot message for cloud new architecture [\#11664](https://github.com/netdata/netdata/pull/11664) ([MrZammler](https://github.com/MrZammler)) +- Fix interval usage and reduce I/O [\#11662](https://github.com/netdata/netdata/pull/11662) ([thiagoftsm](https://github.com/thiagoftsm)) +- Update dashboard to version v2.20.9. [\#11661](https://github.com/netdata/netdata/pull/11661) ([netdatabot](https://github.com/netdatabot)) +- Optimize static build and update various dependencies. [\#11660](https://github.com/netdata/netdata/pull/11660) ([Ferroin](https://github.com/Ferroin)) +- Sanely handle installing on systems with limited RAM. [\#11658](https://github.com/netdata/netdata/pull/11658) ([Ferroin](https://github.com/Ferroin)) +- Mark unmaintained tests as expected failures. [\#11657](https://github.com/netdata/netdata/pull/11657) ([vkalintiris](https://github.com/vkalintiris)) +- Fix build issue related to legacy aclk and new arch code [\#11655](https://github.com/netdata/netdata/pull/11655) ([MrZammler](https://github.com/MrZammler)) +- minor - fixes typo in URL when calling env [\#11651](https://github.com/netdata/netdata/pull/11651) ([underhood](https://github.com/underhood)) +- Fix false poll timeout [\#11650](https://github.com/netdata/netdata/pull/11650) ([underhood](https://github.com/underhood)) +- Use submodules in Clang build checks. [\#11649](https://github.com/netdata/netdata/pull/11649) ([Ferroin](https://github.com/Ferroin)) +- Fix chart config overflow [\#11645](https://github.com/netdata/netdata/pull/11645) ([stelfrag](https://github.com/stelfrag)) +- Explicitly opt out of LTO in RPM builds. [\#11644](https://github.com/netdata/netdata/pull/11644) ([Ferroin](https://github.com/Ferroin)) +- eBPF process \(collector improvements\) [\#11643](https://github.com/netdata/netdata/pull/11643) ([thiagoftsm](https://github.com/thiagoftsm)) +- eBPF cgroup integration [\#11642](https://github.com/netdata/netdata/pull/11642) ([thiagoftsm](https://github.com/thiagoftsm)) +- Add Fedora 35 to CI. [\#11641](https://github.com/netdata/netdata/pull/11641) ([Ferroin](https://github.com/Ferroin)) +- various fixes and updates for dashboard info [\#11639](https://github.com/netdata/netdata/pull/11639) ([ilyam8](https://github.com/ilyam8)) +- Fix an overflow when unsigned integer subtracted [\#11638](https://github.com/netdata/netdata/pull/11638) ([vlvkobal](https://github.com/vlvkobal)) +- add note for the new release of charts on the cloud [\#11637](https://github.com/netdata/netdata/pull/11637) ([hugovalente-pm](https://github.com/hugovalente-pm)) +- add timex.plugin charts info [\#11635](https://github.com/netdata/netdata/pull/11635) ([ilyam8](https://github.com/ilyam8)) +- Add protobuf to `-W buildinfo` output. [\#11634](https://github.com/netdata/netdata/pull/11634) ([Ferroin](https://github.com/Ferroin)) +- Revert "Update alarms info" [\#11633](https://github.com/netdata/netdata/pull/11633) ([ilyam8](https://github.com/ilyam8)) +- Fix nfsd RPC metrics and remove unused nfsd charts and metrics [\#11632](https://github.com/netdata/netdata/pull/11632) ([vlvkobal](https://github.com/vlvkobal)) +- add proc zfs charts info [\#11630](https://github.com/netdata/netdata/pull/11630) ([ilyam8](https://github.com/ilyam8)) +- Update dashboard to version v2.20.7. [\#11629](https://github.com/netdata/netdata/pull/11629) ([netdatabot](https://github.com/netdatabot)) +- add sys\_class\_infiniband charts info [\#11628](https://github.com/netdata/netdata/pull/11628) ([ilyam8](https://github.com/ilyam8)) +- add proc\_pagetypeinfo charts info [\#11627](https://github.com/netdata/netdata/pull/11627) ([ilyam8](https://github.com/ilyam8)) +- add proc\_net\_wireless charts info [\#11626](https://github.com/netdata/netdata/pull/11626) ([ilyam8](https://github.com/ilyam8)) +- add proc\_net\_rpc\_nfs and nfsd charts info [\#11625](https://github.com/netdata/netdata/pull/11625) ([ilyam8](https://github.com/ilyam8)) +- fix proc nfsd "proc4ops" chart family [\#11623](https://github.com/netdata/netdata/pull/11623) ([ilyam8](https://github.com/ilyam8)) +- Initialize struct with zeroes [\#11621](https://github.com/netdata/netdata/pull/11621) ([MrZammler](https://github.com/MrZammler)) +- add sys\_class\_power\_supply charts info [\#11619](https://github.com/netdata/netdata/pull/11619) ([ilyam8](https://github.com/ilyam8)) +- add cgroups.plugin systemd units charts info [\#11618](https://github.com/netdata/netdata/pull/11618) ([ilyam8](https://github.com/ilyam8)) +- Fix swap size calculation for cgroups [\#11617](https://github.com/netdata/netdata/pull/11617) ([vlvkobal](https://github.com/vlvkobal)) +- Fix RSS memory counter for systemd services [\#11616](https://github.com/netdata/netdata/pull/11616) ([vlvkobal](https://github.com/vlvkobal)) +- Add @iigorkarpov to CODEOWNERS. [\#11614](https://github.com/netdata/netdata/pull/11614) ([Ferroin](https://github.com/Ferroin)) +- Adds new alarm status protocol messages [\#11612](https://github.com/netdata/netdata/pull/11612) ([underhood](https://github.com/underhood)) +- eBPF and cgroup \(process, file descriptor, VFS, directory cache and OOMkill\) [\#11611](https://github.com/netdata/netdata/pull/11611) ([thiagoftsm](https://github.com/thiagoftsm)) +- apps: disable reporting min/avg/max group uptime by default [\#11609](https://github.com/netdata/netdata/pull/11609) ([ilyam8](https://github.com/ilyam8)) +- fix https client [\#11608](https://github.com/netdata/netdata/pull/11608) ([underhood](https://github.com/underhood)) +- add cgroups.plugin charts descriptions [\#11607](https://github.com/netdata/netdata/pull/11607) ([ilyam8](https://github.com/ilyam8)) +- Add flag to mark containers as created from official images in analytics. [\#11606](https://github.com/netdata/netdata/pull/11606) ([Ferroin](https://github.com/Ferroin)) +- Update optional parameters for upcoming installer. [\#11604](https://github.com/netdata/netdata/pull/11604) ([DShreve2](https://github.com/DShreve2)) +- add apps.plugin charts descriptions [\#11601](https://github.com/netdata/netdata/pull/11601) ([ilyam8](https://github.com/ilyam8)) +- add proc\_vmstat charts info [\#11597](https://github.com/netdata/netdata/pull/11597) ([ilyam8](https://github.com/ilyam8)) +- fix varnish VBE parsing [\#11596](https://github.com/netdata/netdata/pull/11596) ([ilyam8](https://github.com/ilyam8)) +- add sys\_kernel\_mm\_ksm charts info [\#11595](https://github.com/netdata/netdata/pull/11595) ([ilyam8](https://github.com/ilyam8)) +- Update dashboard to version v2.20.2. [\#11593](https://github.com/netdata/netdata/pull/11593) ([netdatabot](https://github.com/netdatabot)) +- Add POWER8+ support to our official Docker images. [\#11592](https://github.com/netdata/netdata/pull/11592) ([Ferroin](https://github.com/Ferroin)) +- add sys\_devices\_system\_edac\_mc charts info [\#11589](https://github.com/netdata/netdata/pull/11589) ([ilyam8](https://github.com/ilyam8)) +- Adds local webserver API/v1 call "aclk" [\#11588](https://github.com/netdata/netdata/pull/11588) ([underhood](https://github.com/underhood)) +- Makes New Cloud architecture optional for ACLK-NG [\#11587](https://github.com/netdata/netdata/pull/11587) ([underhood](https://github.com/underhood)) +- add proc\_stat charts info [\#11586](https://github.com/netdata/netdata/pull/11586) ([ilyam8](https://github.com/ilyam8)) +- Add Ubuntu 21.10 to CI. [\#11585](https://github.com/netdata/netdata/pull/11585) ([Ferroin](https://github.com/Ferroin)) +- Remove unused synproxy chart [\#11582](https://github.com/netdata/netdata/pull/11582) ([vlvkobal](https://github.com/vlvkobal)) +- add proc\_net\_stat\_synproxy charts info [\#11581](https://github.com/netdata/netdata/pull/11581) ([ilyam8](https://github.com/ilyam8)) +- Sorting the Postgres cluster databases in the postgres collector [\#11580](https://github.com/netdata/netdata/pull/11580) ([unhandled-exception](https://github.com/unhandled-exception)) +- Enable additional functionality for the new cloud architecture [\#11579](https://github.com/netdata/netdata/pull/11579) ([stelfrag](https://github.com/stelfrag)) +- Fix CID 339027 and reverse arguments [\#11578](https://github.com/netdata/netdata/pull/11578) ([thiagoftsm](https://github.com/thiagoftsm)) +- add proc\_softirqs charts info [\#11577](https://github.com/netdata/netdata/pull/11577) ([ilyam8](https://github.com/ilyam8)) +- add proc\_net\_stat\_conntrack charts info [\#11576](https://github.com/netdata/netdata/pull/11576) ([ilyam8](https://github.com/ilyam8)) +- Free analytics data when analytics thread stops [\#11575](https://github.com/netdata/netdata/pull/11575) ([MrZammler](https://github.com/MrZammler)) +- add missing privilege to fix MySQL slave reporting [\#11574](https://github.com/netdata/netdata/pull/11574) ([steffenweber](https://github.com/steffenweber)) +- Integrate eBPF and cgroup \(consumer side\) [\#11573](https://github.com/netdata/netdata/pull/11573) ([thiagoftsm](https://github.com/thiagoftsm)) +- add proc\_uptime charts info [\#11569](https://github.com/netdata/netdata/pull/11569) ([ilyam8](https://github.com/ilyam8)) +- add proc\_net\_sockstat and sockstat6 charts info [\#11567](https://github.com/netdata/netdata/pull/11567) ([ilyam8](https://github.com/ilyam8)) +- Disable eBPF compilation in different platforms [\#11566](https://github.com/netdata/netdata/pull/11566) ([thiagoftsm](https://github.com/thiagoftsm)) +- add proc\_net\_snmp6 charts info [\#11565](https://github.com/netdata/netdata/pull/11565) ([ilyam8](https://github.com/ilyam8)) +- add proc\_net\_sctp\_snmp charts info [\#11564](https://github.com/netdata/netdata/pull/11564) ([ilyam8](https://github.com/ilyam8)) +- eBPF Shared Memory system call tracking [\#11560](https://github.com/netdata/netdata/pull/11560) ([UmanShahzad](https://github.com/UmanShahzad)) +- Add shared memory to cgroup [\#11559](https://github.com/netdata/netdata/pull/11559) ([thiagoftsm](https://github.com/thiagoftsm)) +- End of support for Ubuntu 16.04 [\#11556](https://github.com/netdata/netdata/pull/11556) ([Ferroin](https://github.com/Ferroin)) +- Add alert message support for ACLK new architecture [\#11552](https://github.com/netdata/netdata/pull/11552) ([MrZammler](https://github.com/MrZammler)) +- Anomaly Detection MVP [\#11548](https://github.com/netdata/netdata/pull/11548) ([vkalintiris](https://github.com/vkalintiris)) +- Fix handling of claiming in kickstart script when running as non-root. [\#11507](https://github.com/netdata/netdata/pull/11507) ([Ferroin](https://github.com/Ferroin)) +- Added static builds for ARMv7l and ARMv8a [\#11490](https://github.com/netdata/netdata/pull/11490) ([Ferroin](https://github.com/Ferroin)) +- Update alarms info [\#11481](https://github.com/netdata/netdata/pull/11481) ([ilyam8](https://github.com/ilyam8)) +- Announce proto capability and enable if cloud supports [\#11476](https://github.com/netdata/netdata/pull/11476) ([underhood](https://github.com/underhood)) -## [v1.30.1](https://github.com/netdata/netdata/tree/v1.30.1) (2021-04-12) +## [v1.31.0](https://github.com/netdata/netdata/tree/v1.31.0) (2021-05-19) -[Full Changelog](https://github.com/netdata/netdata/compare/v1.30.0...v1.30.1) +[Full Changelog](https://github.com/netdata/netdata/compare/v1.30.1...v1.31.0) -**Merged pull requests:** +## [v1.30.1](https://github.com/netdata/netdata/tree/v1.30.1) (2021-04-12) -- Don’t use glob expansion in argument to `cd` in updater. [\#10936](https://github.com/netdata/netdata/pull/10936) ([Ferroin](https://github.com/Ferroin)) -- Fix memory corruption issue when executing context queries in RAM/SAVE memory mode [\#10933](https://github.com/netdata/netdata/pull/10933) ([stelfrag](https://github.com/stelfrag)) -- Update CODEOWNERS [\#10928](https://github.com/netdata/netdata/pull/10928) ([knatsakis](https://github.com/knatsakis)) -- Update news and GIF in README, fix typo [\#10900](https://github.com/netdata/netdata/pull/10900) ([joelhans](https://github.com/joelhans)) -- Update README.md [\#10898](https://github.com/netdata/netdata/pull/10898) ([slimanio](https://github.com/slimanio)) -- Fixed bundling of ACLK-NG components in dist tarballs. [\#10894](https://github.com/netdata/netdata/pull/10894) ([Ferroin](https://github.com/Ferroin)) -- Add a CRASH event when the agent fails to properly shutdown [\#10893](https://github.com/netdata/netdata/pull/10893) ([stelfrag](https://github.com/stelfrag)) -- Bumped version of OpenSSL bundled in static builds to 1.1.1k. [\#10884](https://github.com/netdata/netdata/pull/10884) ([Ferroin](https://github.com/Ferroin)) -- Fix incorrect health log entries [\#10822](https://github.com/netdata/netdata/pull/10822) ([stelfrag](https://github.com/stelfrag)) +[Full Changelog](https://github.com/netdata/netdata/compare/v1.30.0...v1.30.1) ## [v1.30.0](https://github.com/netdata/netdata/tree/v1.30.0) (2021-03-31) [Full Changelog](https://github.com/netdata/netdata/compare/v1.29.3...v1.30.0) -**Merged pull requests:** - -- Properly handle different netcat command names in binary package test code. [\#10883](https://github.com/netdata/netdata/pull/10883) ([Ferroin](https://github.com/Ferroin)) -- Add carrier and mtu charts for network interfaces [\#10866](https://github.com/netdata/netdata/pull/10866) ([vlvkobal](https://github.com/vlvkobal)) -- Fix typo in main.h [\#10858](https://github.com/netdata/netdata/pull/10858) ([eltociear](https://github.com/eltociear)) -- health: improve alarms infos [\#10853](https://github.com/netdata/netdata/pull/10853) ([ilyam8](https://github.com/ilyam8)) -- minor - add info about --aclk-ng into netdata-installer [\#10852](https://github.com/netdata/netdata/pull/10852) ([underhood](https://github.com/underhood)) -- mqtt-c coverity fix [\#10851](https://github.com/netdata/netdata/pull/10851) ([underhood](https://github.com/underhood)) -- web/gui: make network state map sytanx consistent in the dashboard info [\#10849](https://github.com/netdata/netdata/pull/10849) ([ilyam8](https://github.com/ilyam8)) -- fix\_repeat: Update repeat\_every and avoid unecessary test [\#10846](https://github.com/netdata/netdata/pull/10846) ([thiagoftsm](https://github.com/thiagoftsm)) -- Fix agent crash when executing data query with context and non-existing chart\_label\_key [\#10844](https://github.com/netdata/netdata/pull/10844) ([stelfrag](https://github.com/stelfrag)) -- Check device names in diskstats plugin [\#10843](https://github.com/netdata/netdata/pull/10843) ([vlvkobal](https://github.com/vlvkobal)) -- Fix memory leak when archived data is requested [\#10837](https://github.com/netdata/netdata/pull/10837) ([stelfrag](https://github.com/stelfrag)) -- add Installation method to the bug template [\#10836](https://github.com/netdata/netdata/pull/10836) ([ilyam8](https://github.com/ilyam8)) -- Add lock check to avoid shutdown when compiled with internal and locking checks [\#10835](https://github.com/netdata/netdata/pull/10835) ([stelfrag](https://github.com/stelfrag)) -- health: apply megacli alarms for all adapters/physical disks [\#10834](https://github.com/netdata/netdata/pull/10834) ([ilyam8](https://github.com/ilyam8)) -- Fix broken link in StatsD guide [\#10831](https://github.com/netdata/netdata/pull/10831) ([joelhans](https://github.com/joelhans)) -- health: add collector prefix to the external collectors alarms/templates [\#10830](https://github.com/netdata/netdata/pull/10830) ([ilyam8](https://github.com/ilyam8)) -- health: remove exporting\_metrics\_lost template [\#10829](https://github.com/netdata/netdata/pull/10829) ([ilyam8](https://github.com/ilyam8)) -- Fix name of PackageCLoud API token secret in workflows. [\#10828](https://github.com/netdata/netdata/pull/10828) ([Ferroin](https://github.com/Ferroin)) -- installer: update go.d.plugin version to v0.28.1 [\#10826](https://github.com/netdata/netdata/pull/10826) ([ilyam8](https://github.com/ilyam8)) -- alarm\(irc\): add support to change IRC\_PORT [\#10824](https://github.com/netdata/netdata/pull/10824) ([RaitoBezarius](https://github.com/RaitoBezarius)) -- Update syntax for Caddy v2 [\#10823](https://github.com/netdata/netdata/pull/10823) ([salazarp](https://github.com/salazarp)) -- health: apply adapter\_raid alarms for every logical/physical device [\#10820](https://github.com/netdata/netdata/pull/10820) ([ilyam8](https://github.com/ilyam8)) -- Fix handling of nightly and release packages in GHA workflows. [\#10819](https://github.com/netdata/netdata/pull/10819) ([Ferroin](https://github.com/Ferroin)) -- health: log an error if any when send email notification [\#10818](https://github.com/netdata/netdata/pull/10818) ([ilyam8](https://github.com/ilyam8)) -- Ebpf extend sync [\#10814](https://github.com/netdata/netdata/pull/10814) ([thiagoftsm](https://github.com/thiagoftsm)) -- Fix coverity issue \(CID 367566\) [\#10813](https://github.com/netdata/netdata/pull/10813) ([stelfrag](https://github.com/stelfrag)) -- fix claiming via env vars in docker container [\#10811](https://github.com/netdata/netdata/pull/10811) ([ilyam8](https://github.com/ilyam8)) -- Fix eBPF compilation [\#10810](https://github.com/netdata/netdata/pull/10810) ([thiagoftsm](https://github.com/thiagoftsm)) -- update bug report template [\#10807](https://github.com/netdata/netdata/pull/10807) ([underhood](https://github.com/underhood)) -- health: exclude cgroups net ifaces from packets dropped alarms [\#10806](https://github.com/netdata/netdata/pull/10806) ([ilyam8](https://github.com/ilyam8)) -- Don't show alarms for charts without data [\#10804](https://github.com/netdata/netdata/pull/10804) ([vlvkobal](https://github.com/vlvkobal)) -- claiming: increase curl connect-timeout and decrease number of claim attempts [\#10800](https://github.com/netdata/netdata/pull/10800) ([ilyam8](https://github.com/ilyam8)) -- Added Ubuntu 21.04 and Fedora 34 to our CI checks and binary package builds. [\#10791](https://github.com/netdata/netdata/pull/10791) ([Ferroin](https://github.com/Ferroin)) -- health: remove ram\_in\_swap alarm [\#10789](https://github.com/netdata/netdata/pull/10789) ([ilyam8](https://github.com/ilyam8)) -- Add a new parameter 'chart' to the /api/v1/alarm\_log. [\#10788](https://github.com/netdata/netdata/pull/10788) ([MrZammler](https://github.com/MrZammler)) -- Add check for children connecting to a parent agent with unsupported memory mode [\#10787](https://github.com/netdata/netdata/pull/10787) ([stelfrag](https://github.com/stelfrag)) -- health: use separate packets\_dropped\_ratio alarms for wifi network interfaces [\#10785](https://github.com/netdata/netdata/pull/10785) ([ilyam8](https://github.com/ilyam8)) -- ACLK separate https client [\#10784](https://github.com/netdata/netdata/pull/10784) ([underhood](https://github.com/underhood)) -- health: add `wmi\_` prefix to the wmi collector network alarms [\#10782](https://github.com/netdata/netdata/pull/10782) ([ilyam8](https://github.com/ilyam8)) -- web/gui: add max value to the nvidia\_smi.fan\_speed gauge [\#10780](https://github.com/netdata/netdata/pull/10780) ([ilyam8](https://github.com/ilyam8)) -- health/: fix various alarms critical and warning thresholds hysteresis [\#10779](https://github.com/netdata/netdata/pull/10779) ([ilyam8](https://github.com/ilyam8)) -- Adds \_aclk\_impl label [\#10778](https://github.com/netdata/netdata/pull/10778) ([underhood](https://github.com/underhood)) -- adding a default job with some params and example of additional job. [\#10777](https://github.com/netdata/netdata/pull/10777) ([andrewm4894](https://github.com/andrewm4894)) -- Fix typo in dashboard\_info.js [\#10775](https://github.com/netdata/netdata/pull/10775) ([eltociear](https://github.com/eltociear)) -- Fixed Travis config issues related to new packaging workflows. [\#10774](https://github.com/netdata/netdata/pull/10774) ([Ferroin](https://github.com/Ferroin)) -- add a dump\_methods parameter to alarm-notify.sh.in [\#10772](https://github.com/netdata/netdata/pull/10772) ([MrZammler](https://github.com/MrZammler)) -- Add data query support for archived charts [\#10771](https://github.com/netdata/netdata/pull/10771) ([stelfrag](https://github.com/stelfrag)) -- health: make vernemq alarms less sensitive [\#10770](https://github.com/netdata/netdata/pull/10770) ([ilyam8](https://github.com/ilyam8)) -- Fixed handling of perf.plugin capabilities. [\#10766](https://github.com/netdata/netdata/pull/10766) ([Ferroin](https://github.com/Ferroin)) -- dashboard@v2.13.28 [\#10761](https://github.com/netdata/netdata/pull/10761) ([jacekkolasa](https://github.com/jacekkolasa)) -- collectors/cgroups: fix cpuset.cpus count [\#10757](https://github.com/netdata/netdata/pull/10757) ([ilyam8](https://github.com/ilyam8)) -- eBPF plugin \(fixes 10727\) [\#10756](https://github.com/netdata/netdata/pull/10756) ([thiagoftsm](https://github.com/thiagoftsm)) -- web/gui: add supervisord to the dashboard\_info.js [\#10754](https://github.com/netdata/netdata/pull/10754) ([ilyam8](https://github.com/ilyam8)) -- Add state map to duplex and operstate charts [\#10752](https://github.com/netdata/netdata/pull/10752) ([vlvkobal](https://github.com/vlvkobal)) -- comment out memory mode mention in example [\#10751](https://github.com/netdata/netdata/pull/10751) ([OdysLam](https://github.com/OdysLam)) -- collectors/apps.plugin: Add wireguard to vpn [\#10743](https://github.com/netdata/netdata/pull/10743) ([liepumartins](https://github.com/liepumartins)) -- Enable metadata persistence in all memory modes [\#10742](https://github.com/netdata/netdata/pull/10742) ([stelfrag](https://github.com/stelfrag)) -- Move network interface speed, duplex, and operstate variables to charts [\#10740](https://github.com/netdata/netdata/pull/10740) ([vlvkobal](https://github.com/vlvkobal)) -- Use of out-of-line struct definitions. [\#10739](https://github.com/netdata/netdata/pull/10739) ([vkalintiris](https://github.com/vkalintiris)) -- Use a parameter name that is not a reserved keyword in C++ [\#10738](https://github.com/netdata/netdata/pull/10738) ([vkalintiris](https://github.com/vkalintiris)) -- Skip C++ incompatible header in main libnetdata header [\#10737](https://github.com/netdata/netdata/pull/10737) ([vkalintiris](https://github.com/vkalintiris)) -- Rename struct avl to avl\_element and the typedef to avl\_t [\#10735](https://github.com/netdata/netdata/pull/10735) ([vkalintiris](https://github.com/vkalintiris)) -- Fix claim behind squid proxy [\#10734](https://github.com/netdata/netdata/pull/10734) ([underhood](https://github.com/underhood)) -- add k6.conf [\#10733](https://github.com/netdata/netdata/pull/10733) ([OdysLam](https://github.com/OdysLam)) -- Always configure multihost database context [\#10732](https://github.com/netdata/netdata/pull/10732) ([stelfrag](https://github.com/stelfrag)) -- Removes unused fnc warning in ACLK Legacy [\#10731](https://github.com/netdata/netdata/pull/10731) ([underhood](https://github.com/underhood)) -- Update chart's metadata in database when it already exists during creation [\#10728](https://github.com/netdata/netdata/pull/10728) ([stelfrag](https://github.com/stelfrag)) -- New thread for ebpf.plugin [\#10726](https://github.com/netdata/netdata/pull/10726) ([thiagoftsm](https://github.com/thiagoftsm)) -- Support VS Code container devenv [\#10723](https://github.com/netdata/netdata/pull/10723) ([OdysLam](https://github.com/OdysLam)) -- Fixed detection of already claimed node in Docker images. [\#10720](https://github.com/netdata/netdata/pull/10720) ([Ferroin](https://github.com/Ferroin)) -- Add statsd guide [\#10719](https://github.com/netdata/netdata/pull/10719) ([OdysLam](https://github.com/OdysLam)) -- Add the ability to store chart labels in the database [\#10718](https://github.com/netdata/netdata/pull/10718) ([stelfrag](https://github.com/stelfrag)) -- Fix a parameter binding issue when storing chart names in the database [\#10717](https://github.com/netdata/netdata/pull/10717) ([stelfrag](https://github.com/stelfrag)) -- Fix typo in backend\_prometheus.c [\#10716](https://github.com/netdata/netdata/pull/10716) ([eltociear](https://github.com/eltociear)) -- Add guide: Unsupervised anomaly detection for Raspberry Pi monitoring [\#10713](https://github.com/netdata/netdata/pull/10713) ([joelhans](https://github.com/joelhans)) -- Add Working Set charts to the cgroups plugin [\#10712](https://github.com/netdata/netdata/pull/10712) ([vlvkobal](https://github.com/vlvkobal)) -- python.d/smartd\_log: collect attribute 233 \(Media Wearout Indicator \(SSD\)\). [\#10711](https://github.com/netdata/netdata/pull/10711) ([aazedo](https://github.com/aazedo)) -- Add guide: Develop a custom data collector for Netdata in Python [\#10710](https://github.com/netdata/netdata/pull/10710) ([joelhans](https://github.com/joelhans)) -- New version eBPF programs. [\#10707](https://github.com/netdata/netdata/pull/10707) ([thiagoftsm](https://github.com/thiagoftsm)) -- Add JSON output option for buildinfo. [\#10706](https://github.com/netdata/netdata/pull/10706) ([Ferroin](https://github.com/Ferroin)) -- Fix disk utilization and backlog charts [\#10705](https://github.com/netdata/netdata/pull/10705) ([vlvkobal](https://github.com/vlvkobal)) -- update\_kernel\_version: Fix overflow on Centos and probably Ubuntu [\#10704](https://github.com/netdata/netdata/pull/10704) ([thiagoftsm](https://github.com/thiagoftsm)) -- Docs: Convert references to `service` to `systemctl` [\#10703](https://github.com/netdata/netdata/pull/10703) ([joelhans](https://github.com/joelhans)) -- Add noauthcodecheck workaround flag to the freeipmi plugin [\#10701](https://github.com/netdata/netdata/pull/10701) ([vlvkobal](https://github.com/vlvkobal)) -- Add guide: LAMP stack monitoring [\#10698](https://github.com/netdata/netdata/pull/10698) ([joelhans](https://github.com/joelhans)) -- Log ACLK cloud commands to access.log [\#10697](https://github.com/netdata/netdata/pull/10697) ([stelfrag](https://github.com/stelfrag)) -- Add Linux page cache metrics to eBPF [\#10693](https://github.com/netdata/netdata/pull/10693) ([thiagoftsm](https://github.com/thiagoftsm)) -- Update guide: Kubernetes monitoring with Netdata: Overview and visualizations [\#10691](https://github.com/netdata/netdata/pull/10691) ([joelhans](https://github.com/joelhans)) -- health: make alarms less sensitive [\#10688](https://github.com/netdata/netdata/pull/10688) ([ilyam8](https://github.com/ilyam8)) -- Ebpf support new collectors [\#10680](https://github.com/netdata/netdata/pull/10680) ([thiagoftsm](https://github.com/thiagoftsm)) -- Fix broken links in active alarms doc [\#10678](https://github.com/netdata/netdata/pull/10678) ([joelhans](https://github.com/joelhans)) -- Add new cookie to fix 8094 [\#10676](https://github.com/netdata/netdata/pull/10676) ([thiagoftsm](https://github.com/thiagoftsm)) -- Alarms collector add alarm values [\#10675](https://github.com/netdata/netdata/pull/10675) ([andrewm4894](https://github.com/andrewm4894)) -- Don't add duplicate \_total suffixes for the prometheus go.d module [\#10674](https://github.com/netdata/netdata/pull/10674) ([vlvkobal](https://github.com/vlvkobal)) -- fix a typo in the email notifications readme [\#10668](https://github.com/netdata/netdata/pull/10668) ([ossimantylahti](https://github.com/ossimantylahti)) -- Update screenshots and text for new Cloud nav [\#10664](https://github.com/netdata/netdata/pull/10664) ([joelhans](https://github.com/joelhans)) -- Improve the Kubernetes deployment documentation [\#10662](https://github.com/netdata/netdata/pull/10662) ([joelhans](https://github.com/joelhans)) -- installer: update go.d.plugin version to v0.28.0 [\#10660](https://github.com/netdata/netdata/pull/10660) ([ilyam8](https://github.com/ilyam8)) -- Changed Docker image tagging to use semver tags for releases. [\#10648](https://github.com/netdata/netdata/pull/10648) ([Ferroin](https://github.com/Ferroin)) -- Revamp statsd docs [\#10637](https://github.com/netdata/netdata/pull/10637) ([OdysLam](https://github.com/OdysLam)) -- replace GA with PostHog for backend telemetry events. [\#10636](https://github.com/netdata/netdata/pull/10636) ([andrewm4894](https://github.com/andrewm4894)) -- cpu stats per query thread [\#10634](https://github.com/netdata/netdata/pull/10634) ([MrZammler](https://github.com/MrZammler)) -- Assorted updater fixes. [\#10613](https://github.com/netdata/netdata/pull/10613) ([Ferroin](https://github.com/Ferroin)) -- add stats per cloud query type [\#10602](https://github.com/netdata/netdata/pull/10602) ([underhood](https://github.com/underhood)) -- Add a new workflow to test that updater works as expected [\#10599](https://github.com/netdata/netdata/pull/10599) ([kaskavel](https://github.com/kaskavel)) -- Add support for changing the number of pages per extent [\#10593](https://github.com/netdata/netdata/pull/10593) ([mfundul](https://github.com/mfundul)) -- web/gui: Fix broken external links [\#10586](https://github.com/netdata/netdata/pull/10586) ([Habetdin](https://github.com/Habetdin)) -- Fix wrong count for entries [\#10564](https://github.com/netdata/netdata/pull/10564) ([thiagoftsm](https://github.com/thiagoftsm)) -- Try to keep all pages from extents read from disk in the cache. [\#10558](https://github.com/netdata/netdata/pull/10558) ([mfundul](https://github.com/mfundul)) - ## [v1.29.3](https://github.com/netdata/netdata/tree/v1.29.3) (2021-02-23) [Full Changelog](https://github.com/netdata/netdata/compare/v1.29.2...v1.29.3) -**Merged pull requests:** - -- Invalidate RRDSETVAR pointers on obsoletion. [\#10667](https://github.com/netdata/netdata/pull/10667) ([mfundul](https://github.com/mfundul)) -- Fixed condition controlling use of static LWS in RPM builds. [\#10661](https://github.com/netdata/netdata/pull/10661) ([Ferroin](https://github.com/Ferroin)) -- fix wrong link on docs Netdata Agent Daemon [\#10659](https://github.com/netdata/netdata/pull/10659) ([OdysLam](https://github.com/OdysLam)) -- Fix broken links in docs and add collectors to list [\#10651](https://github.com/netdata/netdata/pull/10651) ([joelhans](https://github.com/joelhans)) -- Statsd dashboard [\#10640](https://github.com/netdata/netdata/pull/10640) ([OdysLam](https://github.com/OdysLam)) - ## [v1.29.2](https://github.com/netdata/netdata/tree/v1.29.2) (2021-02-18) [Full Changelog](https://github.com/netdata/netdata/compare/v1.29.1...v1.29.2) -**Merged pull requests:** - -- Fix the context filtering on the data query endpoint [\#10652](https://github.com/netdata/netdata/pull/10652) ([stelfrag](https://github.com/stelfrag)) -- fix container/host detection in system-info script [\#10647](https://github.com/netdata/netdata/pull/10647) ([ilyam8](https://github.com/ilyam8)) -- Enable apps.plugin aggregation debug messages [\#10645](https://github.com/netdata/netdata/pull/10645) ([vlvkobal](https://github.com/vlvkobal)) -- add small delay to the ipv4\_tcp\_resets alarms [\#10644](https://github.com/netdata/netdata/pull/10644) ([ilyam8](https://github.com/ilyam8)) -- collectors/proc: fix collecting operstate for virtual network interfaces [\#10633](https://github.com/netdata/netdata/pull/10633) ([ilyam8](https://github.com/ilyam8)) -- fix sendmail unrecognized option F error [\#10631](https://github.com/netdata/netdata/pull/10631) ([ilyam8](https://github.com/ilyam8)) -- Fix typo in web/gui/readme.md [\#10623](https://github.com/netdata/netdata/pull/10623) ([OdysLam](https://github.com/OdysLam)) -- add freeswitch to apps\_groups [\#10621](https://github.com/netdata/netdata/pull/10621) ([fayak](https://github.com/fayak)) -- Add ACLK proxy setting as host label [\#10619](https://github.com/netdata/netdata/pull/10619) ([underhood](https://github.com/underhood)) -- dashboard@v2.13.6 [\#10618](https://github.com/netdata/netdata/pull/10618) ([jacekkolasa](https://github.com/jacekkolasa)) -- Disable stock alarms [\#10617](https://github.com/netdata/netdata/pull/10617) ([thiagoftsm](https://github.com/thiagoftsm)) -- Fixes \#10597 raw binary data should never be printed [\#10603](https://github.com/netdata/netdata/pull/10603) ([rda0](https://github.com/rda0)) -- collectors/proc: change ksm mem chart type to stacked [\#10598](https://github.com/netdata/netdata/pull/10598) ([ilyam8](https://github.com/ilyam8)) -- ACLK reduce excessive logging [\#10596](https://github.com/netdata/netdata/pull/10596) ([underhood](https://github.com/underhood)) -- add k8s\_cluster\_id host label [\#10588](https://github.com/netdata/netdata/pull/10588) ([ilyam8](https://github.com/ilyam8)) -- add resetting CapabilityBoundingSet workaround to the python.d collectors \(that use `sudo`\) readmes [\#10587](https://github.com/netdata/netdata/pull/10587) ([ilyam8](https://github.com/ilyam8)) -- collectors/elasticsearch: document `scheme` option [\#10572](https://github.com/netdata/netdata/pull/10572) ([vjt](https://github.com/vjt)) -- Update claiming docs for Docker containers. [\#10570](https://github.com/netdata/netdata/pull/10570) ([Ferroin](https://github.com/Ferroin)) -- health: make Opsgenie API URL configurable [\#10561](https://github.com/netdata/netdata/pull/10561) ([tinyhammers](https://github.com/tinyhammers)) - ## [v1.29.1](https://github.com/netdata/netdata/tree/v1.29.1) (2021-02-09) [Full Changelog](https://github.com/netdata/netdata/compare/v1.29.0...v1.29.1) -**Merged pull requests:** - -- Fix crash during shutdown of cgroups internal plugin. [\#10614](https://github.com/netdata/netdata/pull/10614) ([mfundul](https://github.com/mfundul)) -- Update latest release on main README [\#10590](https://github.com/netdata/netdata/pull/10590) ([joelhans](https://github.com/joelhans)) - ## [v1.29.0](https://github.com/netdata/netdata/tree/v1.29.0) (2021-02-03) [Full Changelog](https://github.com/netdata/netdata/compare/v1.27.0_0104103941...v1.29.0) -**Merged pull requests:** - -- Fixed Netdata Cloud support in RPM packages. [\#10578](https://github.com/netdata/netdata/pull/10578) ([Ferroin](https://github.com/Ferroin)) -- Fix container detection from systemd-detect-virt [\#10569](https://github.com/netdata/netdata/pull/10569) ([cakrit](https://github.com/cakrit)) -- dashboard v2.13.0 [\#10565](https://github.com/netdata/netdata/pull/10565) ([jacekkolasa](https://github.com/jacekkolasa)) -- bytes after last '}' trip JSON parser [\#10563](https://github.com/netdata/netdata/pull/10563) ([underhood](https://github.com/underhood)) -- Fix prometheus remote write header [\#10560](https://github.com/netdata/netdata/pull/10560) ([vlvkobal](https://github.com/vlvkobal)) -- fix minor vulnerability alert, updating socket-io dependency [\#10557](https://github.com/netdata/netdata/pull/10557) ([jacekkolasa](https://github.com/jacekkolasa)) - ## [v1.27.0_0104103941](https://github.com/netdata/netdata/tree/v1.27.0_0104103941) (2021-01-04) [Full Changelog](https://github.com/netdata/netdata/compare/v1.28.0...v1.27.0_0104103941) diff --git a/CMakeLists.txt b/CMakeLists.txt index ddce9882b..d21ea89c4 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -490,14 +490,38 @@ set(EBPF_PROCESS_PLUGIN_FILES collectors/ebpf.plugin/ebpf_cachestat.h collectors/ebpf.plugin/ebpf_dcstat.c collectors/ebpf.plugin/ebpf_dcstat.h + collectors/ebpf.plugin/ebpf_disk.c + collectors/ebpf.plugin/ebpf_disk.h + collectors/ebpf.plugin/ebpf_fd.c + collectors/ebpf.plugin/ebpf_fd.h + collectors/ebpf.plugin/ebpf_hardirq.c + collectors/ebpf.plugin/ebpf_hardirq.h + collectors/ebpf.plugin/ebpf_mdflush.c + collectors/ebpf.plugin/ebpf_mdflush.h + collectors/ebpf.plugin/ebpf_mount.c + collectors/ebpf.plugin/ebpf_mount.h + collectors/ebpf.plugin/ebpf_filesystem.c + collectors/ebpf.plugin/ebpf_filesystem.h + collectors/ebpf.plugin/ebpf_oomkill.c + collectors/ebpf.plugin/ebpf_oomkill.h collectors/ebpf.plugin/ebpf_process.c collectors/ebpf.plugin/ebpf_process.h + collectors/ebpf.plugin/ebpf_shm.c + collectors/ebpf.plugin/ebpf_shm.h collectors/ebpf.plugin/ebpf_socket.c collectors/ebpf.plugin/ebpf_socket.h + collectors/ebpf.plugin/ebpf_softirq.c + collectors/ebpf.plugin/ebpf_softirq.h collectors/ebpf.plugin/ebpf_sync.c collectors/ebpf.plugin/ebpf_sync.h + collectors/ebpf.plugin/ebpf_swap.c + collectors/ebpf.plugin/ebpf_swap.h + collectors/ebpf.plugin/ebpf_vfs.c + collectors/ebpf.plugin/ebpf_vfs.h collectors/ebpf.plugin/ebpf_apps.c collectors/ebpf.plugin/ebpf_apps.h + collectors/ebpf.plugin/ebpf_cgroup.c + collectors/ebpf.plugin/ebpf_cgroup.h ) set(PROC_PLUGIN_FILES @@ -611,6 +635,16 @@ set(RRD_PLUGIN_FILES database/rrdvar.h database/sqlite/sqlite_functions.c database/sqlite/sqlite_functions.h + database/sqlite/sqlite_aclk.c + database/sqlite/sqlite_aclk.h + database/sqlite/sqlite_health.c + database/sqlite/sqlite_health.h + database/sqlite/sqlite_aclk_node.c + database/sqlite/sqlite_aclk_node.h + database/sqlite/sqlite_aclk_chart.c + database/sqlite/sqlite_aclk_chart.h + database/sqlite/sqlite_aclk_alert.c + database/sqlite/sqlite_aclk_alert.h database/sqlite/sqlite3.c database/sqlite/sqlite3.h database/engine/rrdengine.c @@ -724,12 +758,22 @@ set(BACKENDS_PLUGIN_FILES set(CLAIM_PLUGIN_FILES claim/claim.c claim/claim.h - aclk/legacy/aclk_rrdhost_state.h - aclk/legacy/aclk_common.c - aclk/legacy/aclk_common.h ) -set(ACLK_PLUGIN_FILES +set(ACLK_ALWAYS_BUILD + aclk/aclk_rrdhost_state.h + aclk/aclk_api.c + aclk/aclk_api.h + aclk/aclk_proxy.c + aclk/aclk_proxy.h + ) + +set(ACLK_COMMON_FILES + aclk/aclk_collector_list.c + aclk/aclk_collector_list.h + ) + +set(ACLK_LEGACY_FILES aclk/legacy/agent_cloud_link.c aclk/legacy/agent_cloud_link.h aclk/legacy/aclk_query.c @@ -744,6 +788,63 @@ set(ACLK_PLUGIN_FILES aclk/legacy/aclk_stats.h aclk/legacy/aclk_rx_msgs.c aclk/legacy/aclk_rx_msgs.h + aclk/legacy/aclk_common.c + aclk/legacy/aclk_common.h + ) + +set(ACLK_NG_FILES + aclk/aclk.c + aclk/aclk.h + aclk/aclk_util.c + aclk/aclk_util.h + aclk/aclk_stats.c + aclk/aclk_stats.h + aclk/aclk_query.c + aclk/aclk_query.h + aclk/aclk_query_queue.c + aclk/aclk_query_queue.h + aclk/aclk_otp.c + aclk/aclk_otp.h + aclk/aclk_tx_msgs.c + aclk/aclk_tx_msgs.h + aclk/aclk_rx_msgs.c + aclk/aclk_rx_msgs.h + aclk/https_client.c + aclk/https_client.h + aclk/aclk_charts_api.c + aclk/aclk_charts_api.h + aclk/aclk_alarm_api.c + aclk/aclk_alarm_api.h + mqtt_websockets/src/mqtt_wss_client.c + mqtt_websockets/src/include/mqtt_wss_client.h + mqtt_websockets/src/mqtt_wss_log.c + mqtt_websockets/src/include/mqtt_wss_log.h + mqtt_websockets/src/ws_client.c + mqtt_websockets/src/include/ws_client.h + mqtt_websockets/c-rbuf/src/ringbuffer.c + mqtt_websockets/c-rbuf/include/ringbuffer.h + mqtt_websockets/c-rbuf/src/ringbuffer_internal.h + mqtt_websockets/MQTT-C/src/mqtt.c + mqtt_websockets/MQTT-C/include/mqtt.h + aclk/schema-wrappers/connection.cc + aclk/schema-wrappers/connection.h + aclk/schema-wrappers/node_connection.cc + aclk/schema-wrappers/node_connection.h + aclk/schema-wrappers/node_creation.cc + aclk/schema-wrappers/node_creation.h + aclk/schema-wrappers/chart_stream.cc + aclk/schema-wrappers/chart_stream.h + aclk/schema-wrappers/chart_config.cc + aclk/schema-wrappers/chart_config.h + aclk/schema-wrappers/alarm_stream.cc + aclk/schema-wrappers/alarm_stream.h + aclk/schema-wrappers/alarm_config.cc + aclk/schema-wrappers/alarm_config.h + aclk/schema-wrappers/node_info.cc + aclk/schema-wrappers/node_info.h + aclk/schema-wrappers/schema_wrappers.h + aclk/schema-wrappers/schema_wrapper_utils.cc + aclk/schema-wrappers/schema_wrapper_utils.h ) set(SPAWN_PLUGIN_FILES @@ -753,7 +854,7 @@ set(SPAWN_PLUGIN_FILES spawn/spawn.h ) -set(ACLK_STATIC_LIBS +set(ACLK_LEGACY_STATIC_LIBS ${CMAKE_SOURCE_DIR}/externaldeps/mosquitto/libmosquitto.a ${CMAKE_SOURCE_DIR}/externaldeps/libwebsockets/libwebsockets.a ) @@ -836,12 +937,19 @@ set(DAEMON_FILES daemon/main.h daemon/signals.c daemon/signals.h + daemon/service.c + daemon/service.h daemon/commands.c daemon/commands.h daemon/unit_test.c daemon/unit_test.h ) +set(ML_FILES + ml/ml.h + ml/ml-dummy.c +) + set(NETDATA_FILES collectors/all.h ${DAEMON_FILES} @@ -851,6 +959,7 @@ set(NETDATA_FILES ${CHECKS_PLUGIN_FILES} ${HEALTH_PLUGIN_FILES} ${IDLEJITTER_PLUGIN_FILES} + ${ML_FILES} ${PLUGINSD_PLUGIN_FILES} ${RRD_PLUGIN_FILES} ${REGISTRY_PLUGIN_FILES} @@ -961,21 +1070,21 @@ ENDIF() set(NETDATA_COMMON_LIBRARIES ${NETDATA_COMMON_LIBRARIES} m ${CMAKE_THREAD_LIBS_INIT}) -set(ACLK_CAN_BUILD 1) +set(ACLK_LEGACY_CAN_BUILD 1) if(NOT EXISTS "${CMAKE_SOURCE_DIR}/externaldeps/mosquitto/libmosquitto.a") message(WARNING "Static build of mosquitto not found. Disabling ACLK") - set(ACLK_CAN_BUILD 0) + set(ACLK_LEGACY_CAN_BUILD 0) ENDIF() if(NOT EXISTS "${CMAKE_SOURCE_DIR}/externaldeps/libwebsockets/libwebsockets.a") message(WARNING "Static build of libwebsockets not found. Disabling ACLK") - set(ACLK_CAN_BUILD 0) + set(ACLK_LEGACY_CAN_BUILD 0) ENDIF() -IF(ACLK_CAN_BUILD) - message(STATUS "agent-cloud-link: enabled") - list(APPEND NETDATA_FILES ${ACLK_PLUGIN_FILES}) - list(APPEND NETDATA_COMMON_LIBRARIES ${ACLK_STATIC_LIBS}) +IF(ACLK_LEGACY_CAN_BUILD) + message(STATUS "agent-cloud-link Legacy: enabled") + list(APPEND NETDATA_FILES ${ACLK_LEGACY_FILES}) + list(APPEND NETDATA_COMMON_LIBRARIES ${ACLK_LEGACY_STATIC_LIBS}) include_directories(BEFORE ${CMAKE_SOURCE_DIR}/externaldeps/libwebsockets/include) IF(LINUX AND CAP_FOUND) list(APPEND NETDATA_COMMON_LIBRARIES ${CAP_LIBRARIES}) @@ -983,9 +1092,74 @@ IF(ACLK_CAN_BUILD) list(APPEND NETDATA_COMMON_CFLAGS ${CAP_CFLAGS_OTHER}) ENDIF() ELSE() - message(STATUS "agent-cloud-link: disabled") + message(STATUS "agent-cloud-link Legacy: disabled") ENDIF() +find_package(Protobuf REQUIRED) + +function(PROTOBUF_ACLK_GENERATE_CPP SRCS HDRS) + if(NOT ARGN) + message(SEND_ERROR "Error: PROTOBUF_ACLK_GENERATE_CPP() called without any proto files") + return() + endif() + + set(${SRCS}) + set(${HDRS}) + foreach(FIL ${ARGN}) + get_filename_component(ABS_FIL ${FIL} ABSOLUTE) + get_filename_component(DIR ${ABS_FIL} DIRECTORY) + get_filename_component(FIL_WE ${FIL} NAME_WE) + set(GENERATED_PB_CC "${DIR}/${FIL_WE}.pb.cc") + set(GENERATED_PB_H "${DIR}/${FIL_WE}.pb.h") +# cmake > 3.20 required :( +# cmake_path(SET GENERATED_PB_CC "${DIR}") +# cmake_path(SET GENERATED_PB_H "${DIR}") +# cmake_path(APPEND GENERATED_PB_CC "${FIL_WE}.pb.cc") +# cmake_path(APPEND GENERATED_PB_H "${FIL_WE}.pb.h") + + list(APPEND ${SRCS} ${GENERATED_PB_CC}) + list(APPEND ${HDRS} ${GENERATED_PB_H}) + add_custom_command( + OUTPUT ${GENERATED_PB_CC} + ${GENERATED_PB_H} + COMMAND ${PROTOBUF_PROTOC_EXECUTABLE} + ARGS -I=${CMAKE_SOURCE_DIR}/aclk/aclk-schemas --cpp_out=${CMAKE_SOURCE_DIR}/aclk/aclk-schemas ${ABS_FIL} + DEPENDS ${ABS_FIL} ${PROTOBUF_PROTOC_EXECUTABLE} + COMMENT "Running C++ protocol buffer compiler on ${FIL}" + VERBATIM ) + endforeach() + set_source_files_properties(${${SRCS}} ${${HDRS}} PROPERTIES GENERATED TRUE) + set(${SRCS} ${${SRCS}} PARENT_SCOPE) + set(${HDRS} ${${HDRS}} PARENT_SCOPE) +endfunction() + +set(ACLK_NG_PROTO_DEFS + aclk/aclk-schemas/proto/aclk/v1/lib.proto + aclk/aclk-schemas/proto/agent/v1/disconnect.proto + aclk/aclk-schemas/proto/agent/v1/connection.proto + aclk/aclk-schemas/proto/alarm/v1/config.proto + aclk/aclk-schemas/proto/alarm/v1/stream.proto + aclk/aclk-schemas/proto/chart/v1/config.proto + aclk/aclk-schemas/proto/chart/v1/dimension.proto + aclk/aclk-schemas/proto/chart/v1/instance.proto + aclk/aclk-schemas/proto/chart/v1/stream.proto + aclk/aclk-schemas/proto/nodeinstance/connection/v1/connection.proto + aclk/aclk-schemas/proto/nodeinstance/create/v1/creation.proto + aclk/aclk-schemas/proto/nodeinstance/info/v1/info.proto + ) +PROTOBUF_ACLK_GENERATE_CPP(ACLK_NG_PROTO_BUILT_SRCS ACLK_NG_PROTO_BUILT_HDRS ${ACLK_NG_PROTO_DEFS}) + +list(APPEND NETDATA_COMMON_LIBRARIES ${PROTOBUF_LIBRARIES}) +list(APPEND NETDATA_COMMON_INCLUDE_DIRS ${PROTOBUF_INCLUDE_DIRS}) +list(APPEND NETDATA_COMMON_CFLAGS ${PROTOBUF_CFLAGS_OTHER}) +list(APPEND NETDATA_FILES ${ACLK_ALWAYS_BUILD}) +list(APPEND NETDATA_FILES ${ACLK_NG_FILES} ${ACLK_NG_PROTO_BUILT_SRCS} ${ACLK_NG_PROTO_BUILT_HDRS}) +list(APPEND NETDATA_FILES ${ACLK_COMMON_FILES}) +include_directories(BEFORE ${CMAKE_SOURCE_DIR}/aclk/aclk-schemas) +include_directories(BEFORE ${CMAKE_SOURCE_DIR}/mqtt_websockets/MQTT-C/include) +include_directories(BEFORE ${CMAKE_SOURCE_DIR}/mqtt_websockets/src/include) +include_directories(BEFORE ${CMAKE_SOURCE_DIR}/mqtt_websockets/c-rbuf/include) + # ----------------------------------------------------------------------------- # netdata diff --git a/Makefile.am b/Makefile.am index 43780959b..61213ca8d 100644 --- a/Makefile.am +++ b/Makefile.am @@ -3,6 +3,9 @@ AUTOMAKE_OPTIONS = foreign subdir-objects 1.11 ACLOCAL_AMFLAGS = -I build/m4 +nodist_netdata_SOURCES=$(NULL) +BUILT_SOURCES=$(NULL) + MAINTAINERCLEANFILES = \ config.log config.status \ $(srcdir)/Makefile.in \ @@ -36,6 +39,7 @@ EXTRA_DIST = \ build/m4/ax_c_mallopt.m4 \ build/m4/tcmalloc.m4 \ build/m4/ax_c__generic.m4 \ + ml/kmeans/dlib \ README.md \ LICENSE \ REDISTRIBUTED.md \ @@ -80,6 +84,8 @@ dist_noinst_DATA = \ packaging/libwebsockets.version \ packaging/mosquitto.checksums \ packaging/mosquitto.version \ + packaging/protobuf.checksums \ + packaging/protobuf.version \ packaging/version \ $(NULL) @@ -112,9 +118,10 @@ SUBDIRS += \ claim \ parser \ spawn \ + ml \ $(NULL) -if !ACLK_NG +if ENABLE_ACLK SUBDIRS += \ aclk/legacy \ $(NULL) @@ -228,6 +235,50 @@ HEALTH_PLUGIN_FILES = \ health/health_log.c \ $(NULL) +ML_FILES = \ + ml/ml.h \ + ml/ml-dummy.c \ + $(NULL) + +if ENABLE_ML + +ML_FILES += \ + ml/BitBufferCounter.h \ + ml/BitBufferCounter.cc \ + ml/BitRateWindow.h \ + ml/BitRateWindow.cc \ + ml/Config.h \ + ml/Config.cc \ + ml/Database.h \ + ml/Database.cc \ + ml/Dimension.cc \ + ml/Dimension.h \ + ml/Host.h \ + ml/Host.cc \ + ml/Query.h \ + ml/kmeans/KMeans.h \ + ml/kmeans/KMeans.cc \ + ml/kmeans/SamplesBuffer.h \ + ml/kmeans/SamplesBuffer.cc \ + ml/kmeans/dlib/dlib/all/source.cpp \ + ml/json/single_include/nlohmann/json.hpp \ + ml/ml.cc \ + ml/ml-private.h \ + $(NULL) + +# Disable warnings from dlib library +ml/kmeans/dlib/dlib/all/source.$(OBJEXT) : CXXFLAGS += -Wno-sign-compare -Wno-type-limits + +endif + + +if ENABLE_ML_TESTS +ML_TESTS_FILES = \ + ml/kmeans/Tests.cc \ + ml/Tests.cc \ + $(NULL) +endif + IDLEJITTER_PLUGIN_FILES = \ collectors/idlejitter.plugin/plugin_idlejitter.c \ collectors/idlejitter.plugin/plugin_idlejitter.h \ @@ -289,15 +340,39 @@ EBPF_PLUGIN_FILES = \ collectors/ebpf.plugin/ebpf_cachestat.h \ collectors/ebpf.plugin/ebpf_dcstat.c \ collectors/ebpf.plugin/ebpf_dcstat.h \ + collectors/ebpf.plugin/ebpf_disk.c \ + collectors/ebpf.plugin/ebpf_disk.h \ + collectors/ebpf.plugin/ebpf_fd.c \ + collectors/ebpf.plugin/ebpf_fd.h \ + collectors/ebpf.plugin/ebpf_filesystem.c \ + collectors/ebpf.plugin/ebpf_filesystem.h \ + collectors/ebpf.plugin/ebpf_hardirq.c \ + collectors/ebpf.plugin/ebpf_hardirq.h \ + collectors/ebpf.plugin/ebpf_mdflush.c \ + collectors/ebpf.plugin/ebpf_mdflush.h \ + collectors/ebpf.plugin/ebpf_mount.c \ + collectors/ebpf.plugin/ebpf_mount.h \ + collectors/ebpf.plugin/ebpf_oomkill.c \ + collectors/ebpf.plugin/ebpf_oomkill.h \ collectors/ebpf.plugin/ebpf_process.c \ collectors/ebpf.plugin/ebpf_process.h \ + collectors/ebpf.plugin/ebpf_shm.c \ + collectors/ebpf.plugin/ebpf_shm.h \ collectors/ebpf.plugin/ebpf_socket.c \ collectors/ebpf.plugin/ebpf_socket.h \ + collectors/ebpf.plugin/ebpf_softirq.c \ + collectors/ebpf.plugin/ebpf_softirq.h \ collectors/ebpf.plugin/ebpf_sync.c \ collectors/ebpf.plugin/ebpf_sync.h \ + collectors/ebpf.plugin/ebpf_swap.c \ + collectors/ebpf.plugin/ebpf_swap.h \ + collectors/ebpf.plugin/ebpf_vfs.c \ + collectors/ebpf.plugin/ebpf_vfs.h \ collectors/ebpf.plugin/ebpf.h \ collectors/ebpf.plugin/ebpf_apps.c \ collectors/ebpf.plugin/ebpf_apps.h \ + collectors/ebpf.plugin/ebpf_cgroup.c \ + collectors/ebpf.plugin/ebpf_cgroup.h \ $(LIBNETDATA_FILES) \ $(NULL) @@ -386,6 +461,16 @@ RRD_PLUGIN_FILES = \ database/rrdvar.h \ database/sqlite/sqlite_functions.c \ database/sqlite/sqlite_functions.h \ + database/sqlite/sqlite_aclk.c \ + database/sqlite/sqlite_aclk.h \ + database/sqlite/sqlite_health.c \ + database/sqlite/sqlite_health.h \ + database/sqlite/sqlite_aclk_node.c \ + database/sqlite/sqlite_aclk_node.h \ + database/sqlite/sqlite_aclk_chart.c \ + database/sqlite/sqlite_aclk_chart.h \ + database/sqlite/sqlite_aclk_alert.c \ + database/sqlite/sqlite_aclk_alert.h \ database/sqlite/sqlite3.c \ database/sqlite/sqlite3.h \ $(NULL) @@ -534,7 +619,7 @@ PARSER_FILES = \ $(NULL) if ACLK_NG -ACLK_FILES = \ +ACLK_NG_FILES = \ aclk/aclk.c \ aclk/aclk.h \ aclk/aclk_util.c \ @@ -545,8 +630,6 @@ ACLK_FILES = \ aclk/aclk_query.h \ aclk/aclk_query_queue.c \ aclk/aclk_query_queue.h \ - aclk/aclk_collector_list.c \ - aclk/aclk_collector_list.h \ aclk/aclk_otp.c \ aclk/aclk_otp.h \ aclk/aclk_tx_msgs.c \ @@ -565,19 +648,152 @@ ACLK_FILES = \ mqtt_websockets/c-rbuf/include/ringbuffer.h \ mqtt_websockets/c-rbuf/src/ringbuffer_internal.h \ mqtt_websockets/MQTT-C/src/mqtt.c \ - mqtt_websockets/MQTT-C/include/mqtt.h \ + mqtt_websockets/MQTT-C/include/mqtt.h $(NULL) -else #ACLK_NG -ACLK_FILES = \ - aclk/legacy/aclk_rrdhost_state.h \ - aclk/legacy/aclk_common.c \ - aclk/legacy/aclk_common.h \ - aclk/legacy/aclk_stats.c \ - aclk/legacy/aclk_stats.h \ + +if ENABLE_NEW_CLOUD_PROTOCOL +ACLK_NG_FILES += \ + aclk/aclk_charts_api.c \ + aclk/aclk_charts_api.h \ + aclk/aclk_alarm_api.c \ + aclk/aclk_alarm_api.h \ + aclk/schema-wrappers/connection.cc \ + aclk/schema-wrappers/connection.h \ + aclk/schema-wrappers/node_connection.cc \ + aclk/schema-wrappers/node_connection.h \ + aclk/schema-wrappers/node_creation.cc \ + aclk/schema-wrappers/node_creation.h \ + aclk/schema-wrappers/chart_stream.cc \ + aclk/schema-wrappers/chart_stream.h \ + aclk/schema-wrappers/chart_config.cc \ + aclk/schema-wrappers/chart_config.h \ + aclk/schema-wrappers/alarm_stream.cc \ + aclk/schema-wrappers/alarm_stream.h \ + aclk/schema-wrappers/alarm_config.cc \ + aclk/schema-wrappers/alarm_config.h \ + aclk/schema-wrappers/node_info.cc \ + aclk/schema-wrappers/node_info.h \ + aclk/schema-wrappers/schema_wrappers.h \ + aclk/schema-wrappers/schema_wrapper_utils.cc \ + aclk/schema-wrappers/schema_wrapper_utils.h \ + $(NULL) + +ACLK_NG_PROTO_DEFINITIONS = \ + aclk/aclk-schemas/proto/aclk/v1/lib.proto \ + aclk/aclk-schemas/proto/agent/v1/disconnect.proto \ + aclk/aclk-schemas/proto/agent/v1/connection.proto \ + aclk/aclk-schemas/proto/alarm/v1/config.proto \ + aclk/aclk-schemas/proto/alarm/v1/stream.proto \ + aclk/aclk-schemas/proto/chart/v1/config.proto \ + aclk/aclk-schemas/proto/chart/v1/dimension.proto \ + aclk/aclk-schemas/proto/chart/v1/instance.proto \ + aclk/aclk-schemas/proto/chart/v1/stream.proto \ + aclk/aclk-schemas/proto/nodeinstance/connection/v1/connection.proto \ + aclk/aclk-schemas/proto/nodeinstance/create/v1/creation.proto \ + aclk/aclk-schemas/proto/nodeinstance/info/v1/info.proto \ + $(NULL) + +dist_noinst_DATA += $(ACLK_NG_PROTO_DEFINITIONS) + +ACLK_NG_PROTO_BUILT_FILES = aclk/aclk-schemas/proto/agent/v1/connection.pb.cc \ + aclk/aclk-schemas/proto/agent/v1/connection.pb.h \ + aclk/aclk-schemas/proto/nodeinstance/connection/v1/connection.pb.cc \ + aclk/aclk-schemas/proto/nodeinstance/connection/v1/connection.pb.h \ + aclk/aclk-schemas/proto/nodeinstance/create/v1/creation.pb.cc \ + aclk/aclk-schemas/proto/nodeinstance/create/v1/creation.pb.h \ + aclk/aclk-schemas/proto/chart/v1/stream.pb.cc \ + aclk/aclk-schemas/proto/chart/v1/stream.pb.h \ + aclk/aclk-schemas/proto/chart/v1/instance.pb.cc \ + aclk/aclk-schemas/proto/chart/v1/instance.pb.h \ + aclk/aclk-schemas/proto/chart/v1/dimension.pb.cc \ + aclk/aclk-schemas/proto/chart/v1/dimension.pb.h \ + aclk/aclk-schemas/proto/chart/v1/config.pb.cc \ + aclk/aclk-schemas/proto/chart/v1/config.pb.h \ + aclk/aclk-schemas/proto/aclk/v1/lib.pb.cc \ + aclk/aclk-schemas/proto/aclk/v1/lib.pb.h \ + aclk/aclk-schemas/proto/agent/v1/disconnect.pb.cc \ + aclk/aclk-schemas/proto/agent/v1/disconnect.pb.h \ + aclk/aclk-schemas/proto/alarm/v1/config.pb.cc \ + aclk/aclk-schemas/proto/alarm/v1/config.pb.h \ + aclk/aclk-schemas/proto/alarm/v1/stream.pb.cc \ + aclk/aclk-schemas/proto/alarm/v1/stream.pb.h \ + aclk/aclk-schemas/proto/nodeinstance/info/v1/info.pb.cc \ + aclk/aclk-schemas/proto/nodeinstance/info/v1/info.pb.h \ $(NULL) +BUILT_SOURCES += $(ACLK_NG_PROTO_BUILT_FILES) +nodist_netdata_SOURCES += $(ACLK_NG_PROTO_BUILT_FILES) +CLEANFILES += $(ACLK_NG_PROTO_BUILT_FILES) + +aclk/aclk-schemas/proto/agent/v1/connection.pb.cc \ +aclk/aclk-schemas/proto/agent/v1/connection.pb.h: aclk/aclk-schemas/proto/agent/v1/connection.proto + $(PROTOC) -I=aclk/aclk-schemas --cpp_out=$(builddir)/aclk/aclk-schemas $^ + +aclk/aclk-schemas/proto/nodeinstance/connection/v1/connection.pb.cc \ +aclk/aclk-schemas/proto/nodeinstance/connection/v1/connection.pb.h: aclk/aclk-schemas/proto/nodeinstance/connection/v1/connection.proto + $(PROTOC) -I=aclk/aclk-schemas --cpp_out=$(builddir)/aclk/aclk-schemas $^ + +aclk/aclk-schemas/proto/nodeinstance/create/v1/creation.pb.cc \ +aclk/aclk-schemas/proto/nodeinstance/create/v1/creation.pb.h: aclk/aclk-schemas/proto/nodeinstance/create/v1/creation.proto + $(PROTOC) -I=aclk/aclk-schemas --cpp_out=$(builddir)/aclk/aclk-schemas $^ + +aclk/aclk-schemas/proto/chart/v1/stream.pb.cc \ +aclk/aclk-schemas/proto/chart/v1/stream.pb.h: aclk/aclk-schemas/proto/chart/v1/stream.proto + $(PROTOC) -I=aclk/aclk-schemas --cpp_out=$(builddir)/aclk/aclk-schemas $^ + +aclk/aclk-schemas/proto/chart/v1/instance.pb.cc \ +aclk/aclk-schemas/proto/chart/v1/instance.pb.h: aclk/aclk-schemas/proto/chart/v1/instance.proto + $(PROTOC) -I=aclk/aclk-schemas --cpp_out=$(builddir)/aclk/aclk-schemas $^ + +aclk/aclk-schemas/proto/chart/v1/dimension.pb.cc \ +aclk/aclk-schemas/proto/chart/v1/dimension.pb.h: aclk/aclk-schemas/proto/chart/v1/dimension.proto + $(PROTOC) -I=aclk/aclk-schemas --cpp_out=$(builddir)/aclk/aclk-schemas $^ + +aclk/aclk-schemas/proto/chart/v1/config.pb.cc \ +aclk/aclk-schemas/proto/chart/v1/config.pb.h: aclk/aclk-schemas/proto/chart/v1/config.proto + $(PROTOC) -I=aclk/aclk-schemas --cpp_out=$(builddir)/aclk/aclk-schemas $^ + +aclk/aclk-schemas/proto/aclk/v1/lib.pb.cc \ +aclk/aclk-schemas/proto/aclk/v1/lib.pb.h: aclk/aclk-schemas/proto/aclk/v1/lib.proto + $(PROTOC) -I=aclk/aclk-schemas --cpp_out=$(builddir)/aclk/aclk-schemas $^ + +aclk/aclk-schemas/proto/agent/v1/disconnect.pb.cc \ +aclk/aclk-schemas/proto/agent/v1/disconnect.pb.h: aclk/aclk-schemas/proto/agent/v1/disconnect.proto + $(PROTOC) -I=aclk/aclk-schemas --cpp_out=$(builddir)/aclk/aclk-schemas $^ + +aclk/aclk-schemas/proto/alarm/v1/config.pb.cc \ +aclk/aclk-schemas/proto/alarm/v1/config.pb.h: aclk/aclk-schemas/proto/alarm/v1/config.proto + $(PROTOC) -I=aclk/aclk-schemas --cpp_out=$(builddir)/aclk/aclk-schemas $^ + +aclk/aclk-schemas/proto/alarm/v1/stream.pb.cc \ +aclk/aclk-schemas/proto/alarm/v1/stream.pb.h: aclk/aclk-schemas/proto/alarm/v1/stream.proto + $(PROTOC) -I=aclk/aclk-schemas --cpp_out=$(builddir)/aclk/aclk-schemas $^ + +aclk/aclk-schemas/proto/nodeinstance/info/v1/info.pb.cc \ +aclk/aclk-schemas/proto/nodeinstance/info/v1/info.pb.h: aclk/aclk-schemas/proto/nodeinstance/info/v1/info.proto + $(PROTOC) -I=aclk/aclk-schemas --cpp_out=$(builddir)/aclk/aclk-schemas $^ + +endif #ENABLE_NEW_CLOUD_PROTOCOL + +endif #ACLK_NG + if ENABLE_ACLK -ACLK_FILES += \ +ACLK_COMMON_FILES = \ + aclk/aclk_collector_list.c \ + aclk/aclk_collector_list.h \ + $(NULL) +endif + +ACLK_ALWAYS_BUILD_FILES = \ + aclk/aclk_rrdhost_state.h \ + aclk/aclk_api.c \ + aclk/aclk_api.h \ + aclk/aclk_proxy.c \ + aclk/aclk_proxy.h \ + $(NULL) + +if ACLK_LEGACY +ACLK_LEGACY_FILES = \ aclk/legacy/agent_cloud_link.c \ aclk/legacy/agent_cloud_link.h \ aclk/legacy/aclk_query.c \ @@ -590,9 +806,12 @@ ACLK_FILES += \ aclk/legacy/aclk_lws_wss_client.h \ aclk/legacy/aclk_lws_https_client.c \ aclk/legacy/aclk_lws_https_client.h \ + aclk/legacy/aclk_common.c \ + aclk/legacy/aclk_common.h \ + aclk/legacy/aclk_stats.c \ + aclk/legacy/aclk_stats.h \ $(NULL) -endif #ENABLE_ACLK -endif #ACLK_NG +endif #ACLK_LEGACY SPAWN_PLUGIN_FILES = \ spawn/spawn.c \ @@ -680,6 +899,8 @@ DAEMON_FILES = \ daemon/main.h \ daemon/signals.c \ daemon/signals.h \ + daemon/service.c \ + daemon/service.h \ daemon/commands.c \ daemon/commands.h \ daemon/unit_test.c \ @@ -695,6 +916,8 @@ NETDATA_FILES = \ $(EXPORTING_ENGINE_FILES) \ $(CHECKS_PLUGIN_FILES) \ $(HEALTH_PLUGIN_FILES) \ + $(ML_FILES) \ + $(ML_TESTS_FILES) \ $(IDLEJITTER_PLUGIN_FILES) \ $(PLUGINSD_PLUGIN_FILES) \ $(REGISTRY_PLUGIN_FILES) \ @@ -704,7 +927,10 @@ NETDATA_FILES = \ $(WEB_PLUGIN_FILES) \ $(CLAIM_FILES) \ $(PARSER_FILES) \ - $(ACLK_FILES) \ + $(ACLK_ALWAYS_BUILD_FILES) \ + $(ACLK_COMMON_FILES) \ + $(ACLK_LEGACY_FILES) \ + $(ACLK_NG_FILES) \ $(SPAWN_PLUGIN_FILES) \ $(NULL) @@ -746,7 +972,7 @@ NETDATA_COMMON_LIBS = \ $(NULL) if LINK_STATIC_JSONC - NETDATA_COMMON_LIBS += externaldeps/jsonc/libjson-c.a + NETDATA_COMMON_LIBS += $(abs_top_srcdir)/externaldeps/jsonc/libjson-c.a endif NETDATACLI_FILES = \ @@ -759,21 +985,32 @@ NETDATACLI_FILES = \ sbin_PROGRAMS += netdata netdata_SOURCES = $(NETDATA_FILES) +if LINUX + NETDATA_COMMON_LIBS += -lrt +endif netdata_LDADD = \ $(NETDATA_COMMON_LIBS) \ $(NULL) -if !ACLK_NG -if ENABLE_ACLK -netdata_LDADD += \ - externaldeps/mosquitto/libmosquitto.a \ - $(OPTIONAL_LIBCAP_LIBS) \ - $(OPTIONAL_LWS_LIBS) \ - $(NETDATA_COMMON_LIBS) \ - $(NULL) -endif #ENABLE_ACLK -endif #ACLK_NG +if ACLK_NG + netdata_LDADD += $(OPTIONAL_PROTOBUF_LIBS) \ + $(OPTIONAL_ATOMIC_LIBS) \ + $(NULL) +endif + +if ENABLE_ML_TESTS + netdata_LDADD += $(OPTIONAL_ML_TESTS_LIBS) \ + $(NULL) +endif + +if ACLK_LEGACY + netdata_LDADD += \ + $(abs_top_srcdir)/externaldeps/mosquitto/libmosquitto.a \ + $(OPTIONAL_LWS_LIBS) \ + $(OPTIONAL_LIBCAP_LIBS) \ + $(NULL) +endif #ACLK_LEGACY if ENABLE_CXX_LINKER netdata_LINK = $(CXXLD) $(CXXFLAGS) $(LDFLAGS) -o $@ @@ -881,12 +1118,15 @@ endif if ENABLE_BACKEND_PROMETHEUS_REMOTE_WRITE netdata_SOURCES += $(PROMETHEUS_REMOTE_WRITE_BACKEND_FILES) $(PROMETHEUS_REMOTE_WRITE_EXPORTING_FILES) - netdata_LDADD += $(OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS) - BUILT_SOURCES = \ + netdata_LDADD += $(OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS) \ + $(OPTIONAL_PROTOBUF_LIBS) \ + $(NULL) + BACKEND_PROMETHEUS_BUILT_SOURCES = \ exporting/prometheus/remote_write/remote_write.pb.cc \ exporting/prometheus/remote_write/remote_write.pb.h \ $(NULL) - nodist_netdata_SOURCES = $(BUILT_SOURCES) + BUILT_SOURCES += $(BACKEND_PROMETHEUS_BUILT_SOURCES) + nodist_netdata_SOURCES += $(BACKEND_PROMETHEUS_BUILT_SOURCES) exporting/prometheus/remote_write/remote_write.pb.cc \ exporting/prometheus/remote_write/remote_write.pb.h: exporting/prometheus/remote_write/remote_write.proto @@ -911,6 +1151,11 @@ if ENABLE_UNITTESTS TESTS = $(check_PROGRAMS) + XFAIL_TESTS = \ + web/api/tests/web_api_testdriver \ + web/api/tests/valid_urls_testdriver \ + $(NULL) + web_api_tests_valid_urls_testdriver_LDFLAGS = \ -Wl,--wrap=rrdhost_find_by_hostname \ -Wl,--wrap=finished_web_request_statistics \ @@ -1015,14 +1260,17 @@ if ENABLE_UNITTESTS exporting_tests_exporting_engine_testdriver_LDADD = $(NETDATA_COMMON_LIBS) $(TEST_LIBS) if ENABLE_BACKEND_PROMETHEUS_REMOTE_WRITE exporting_tests_exporting_engine_testdriver_SOURCES += $(PROMETHEUS_REMOTE_WRITE_EXPORTING_FILES) - exporting_tests_exporting_engine_testdriver_LDADD += $(OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS) + exporting_tests_exporting_engine_testdriver_LDADD += \ + $(OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS) \ + $(OPTIONAL_PROTOBUF_LIBS) \ + $(NULL) exporting_tests_exporting_engine_testdriver_LDFLAGS += \ -Wl,--wrap=init_write_request \ -Wl,--wrap=add_host_info \ -Wl,--wrap=add_label \ -Wl,--wrap=add_metric \ $(NULL) - nodist_exporting_tests_exporting_engine_testdriver_SOURCES = $(BUILT_SOURCES) + nodist_exporting_tests_exporting_engine_testdriver_SOURCES = $(BACKEND_PROMETHEUS_BUILT_SOURCES) endif if ENABLE_BACKEND_KINESIS exporting_tests_exporting_engine_testdriver_SOURCES += $(KINESIS_EXPORTING_FILES) diff --git a/README.md b/README.md index 794ab7743..20c449193 100644 --- a/README.md +++ b/README.md @@ -4,6 +4,7 @@

Latest release + Nightly release Build status CII Best Practices License: GPL v3+ @@ -34,11 +35,11 @@ People get addicted to Netdata. Once you use it on your systems, there's no goin ![Users who are addicted to Netdata](https://user-images.githubusercontent.com/1153921/96495792-2e881380-11fd-11eb-85a3-53d3a84dcb29.png) -> **[Latest release](https://github.com/netdata/netdata/releases/latest): v1.30.0, March 31, 2021** +> **[Latest release](https://github.com/netdata/netdata/releases/latest): v1.31.0, May 19, 2021** > -> The v1.30.0 release of Netdata brings major improvements to our packaging and completely replaces Google Analytics/GTM -> for product telemetry. We're also releasing the first changes in an upcoming overhaul to both our dashboard UI/UX and -> the suite of preconfigured alarms that comes with every installation. +> The v1.31.0 release of Netdata comes with re-packaged and redesigned elements of the dashboard to help you focus on +> your metrics, even more Linux kernel insights via eBPF, on-node machine learning to help you find anomalies, and much +> more. ## Menu @@ -47,6 +48,7 @@ Netdata](https://user-images.githubusercontent.com/1153921/96495792-2e881380-11f - [Docker](#docker) - [Other operating systems](#other-operating-systems) - [Post-installation](#post-installation) + - [Netdata Cloud](#netdata-cloud) - [How it works](#how-it-works) - [Infographic](#infographic) - [Documentation](#documentation) @@ -167,6 +169,14 @@ Read through Netdata's [documentation](https://learn.netdata.cloud/docs), which solutions, to enable features like health monitoring, alarm notifications, long-term metrics storage, exporting to external databases, and more. +### Netdata Cloud + +Netdata Cloud works with Netdata's free, open-source monitoring agent to help you monitor and troubleshoot every +layer of your systems to find weaknesses before they turn into outages. [Using both tools](https://learn.netdata.cloud/docs/agent/claim) +can help you turn data into insights immediately. + +[Get Netdata Cloud now!](https://app.netdata.cloud/) + ## How it works Netdata is a highly efficient, highly modular, metrics management engine. Its lockless design makes it ideal for diff --git a/aclk/README.md b/aclk/README.md index ffd036b97..13a9be27f 100644 --- a/aclk/README.md +++ b/aclk/README.md @@ -9,18 +9,19 @@ custom_edit_url: https://github.com/netdata/netdata/edit/master/aclk/README.md The Agent-Cloud link (ACLK) is the mechanism responsible for securely connecting a Netdata Agent to your web browser through Netdata Cloud. The ACLK establishes an outgoing secure WebSocket (WSS) connection to Netdata Cloud on port -`443`. The ACLK is encrypted, safe, and _is only established if you claim your node_. +`443`. The ACLK is encrypted, safe, and _is only established if you connect your node_. The Cloud App lives at app.netdata.cloud which currently resolves to 35.196.244.138. However, this IP or range of IPs can change without notice. Watch this page for updates. -For a guide to claiming a node using the ACLK, plus additional troubleshooting and reference information, read our [get -started with Cloud](https://learn.netdata.cloud/docs/cloud/get-started) guide or the full [claiming +For a guide to connecting a node using the ACLK, plus additional troubleshooting and reference information, read our [get +started with Cloud](https://learn.netdata.cloud/docs/cloud/get-started) guide or the full [connect to Cloud documentation](/claim/README.md). ## Data privacy -Privacy is very important to us. We firmly believe that your data belongs to you. This is why **we don't store any metric data in Netdata Cloud**. +[Data privacy](https://netdata.cloud/data-privacy/) is very important to us. We firmly believe that your data belongs to +you. This is why **we don't store any metric data in Netdata Cloud**. All the data that the user sees in the web browser when using Netdata Cloud, are actually streamed directly from the Netdata Agent to the Netdata Cloud dashboard. They pass through our systems, but they are not stored. @@ -30,7 +31,7 @@ We do however store a limited number of *metadata* to be able to offer the stunn The information we store in Netdata Cloud is the following (using the publicly available demo server `frankfurt.my-netdata.io` as an example): - The email address you used to sign up/or sign in -- For each node claimed to your Spaces in Netdata Cloud: +- For each node connected to your Spaces in Netdata Cloud: - Hostname (as it appears in Netdata Cloud) - Information shown in `/api/v1/info`. For example: [https://frankfurt.my-netdata.io/api/v1/info](https://frankfurt.my-netdata.io/api/v1/info). - The chart metadata shown in `/api/v1/charts`. For example: [https://frankfurt.my-netdata.io/api/v1/info](https://frankfurt.my-netdata.io/api/v1/info). @@ -45,7 +46,7 @@ How we use them: ## Enable and configure the ACLK The ACLK is enabled by default, with its settings automatically configured and stored in the Agent's memory. No file is -created at `/var/lib/netdata/cloud.d/cloud.conf` until you either claim a node or create it yourself. The default +created at `/var/lib/netdata/cloud.d/cloud.conf` until you either connect a node or create it yourself. The default configuration uses two settings: ```conf @@ -55,17 +56,41 @@ configuration uses two settings: ``` If your Agent needs to use a proxy to access the internet, you must [set up a proxy for -claiming](/claim/README.md#claim-through-a-proxy). +connecting to cloud](/claim/README.md#connect-through-a-proxy). You can configure following keys in the `netdata.conf` section `[cloud]`: ``` [cloud] - statistics = yes - query thread count = 2 + statistics = yes + query thread count = 2 + aclk implementation = legacy ``` - `statistics` enables/disables ACLK related statistics and their charts. You can disable this to save some space in the database and slightly reduce memory usage of Netdata Agent. - `query thread count` specifies the number of threads to process cloud queries. Increasing this setting is useful for nodes with many children (streaming), which can expect to handle more queries (and/or more complicated queries). +- `aclk implementation` - see [ACLK implementation](#aclk-implementation) section + +## ACLK implementation + +Currently we are in process of switching ACLK to brand new technical stack called ACLK-NG. To choose your implementation, change the `aclk implementation` setting in your `netdata.conf` (accepted values `ng` or `legacy`). + +Before changing this value, check the desired implementation is available (determined at build time) by running `netdata -W buildinfo`. Following lines indicate which ACLK implementations are available: + +``` +Features: + ACLK Next Generation: YES + ACLK Legacy: YES +``` + +To verify which ACLK implementation Netdata uses, visit the `/api/v1/info` endpoint on your local dashboard and check the `aclk-implementation` key. + +New Netdata Cloud features will be implemented on top of ACLK-NG from this point on. ACLK Legacy is therefore kept as a fallback in case some users have issues with ACLK-NG or need to use features which are not yet available in ACLK-NG *(like IPv6 support and SOCKS proxy)*. + +### Improvements of ACLK-NG over Legacy are: +- No dependency on custom patched `libmosquitto` (no bundling of libraries). Which should remove obstacles many GNU/Linux distribution package maintainers had trying to provide Netdata with Cloud support +- No dependency on libwebsockets +- Lower latency and higher throughput +- More up to date, new features for Netdata Cloud are currently developed on top of ACLK-NG first ## Disable the ACLK @@ -79,7 +104,7 @@ You can pass the `--disable-cloud` parameter to the Agent installation when usin Git](/packaging/installer/methods/manual.md). When you pass this parameter, the installer does not download or compile any extra libraries. Once running, the Agent -kills the thread responsible for the ACLK and claiming behavior, and behaves as though the ACLK, and thus Netdata Cloud, +kills the thread responsible for the ACLK and connecting behavior, and behaves as though the ACLK, and thus Netdata Cloud, does not exist. ### Disable at runtime @@ -135,7 +160,7 @@ If you first disable the ACLK and any Cloud functionality and then decide you wo If you passed `--disable-cloud` to `netdata-installer.sh` during installation, you must [reinstall](/packaging/installer/REINSTALL.md) your Agent. Use the same method as before, but pass `--require-cloud` to -the installer. When installation finishes you can [claim your node](/claim/README.md#how-to-claim-a-node). +the installer. When installation finishes you can [connect your node](/claim/README.md#how-to-connect-a-node). If you changed the runtime setting in your `var/lib/netdata/cloud.d/cloud.conf` file, edit the file again and change `enabled` to `yes`: @@ -145,7 +170,6 @@ If you changed the runtime setting in your `var/lib/netdata/cloud.d/cloud.conf` enabled = yes ``` -Restart your Agent and [claim your node](/claim/README.md#how-to-claim-a-node). +Restart your Agent and [connect your node](/claim/README.md#how-to-connect-a-node). [![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Faclk%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>) - diff --git a/aclk/aclk.c b/aclk/aclk.c index 35549cfea..a24d258c5 100644 --- a/aclk/aclk.c +++ b/aclk/aclk.c @@ -13,6 +13,8 @@ #include "aclk_collector_list.h" #include "https_client.h" +#include "aclk_proxy.h" + #ifdef ACLK_LOG_CONVERSATION_DIR #include #include @@ -21,20 +23,12 @@ #define ACLK_STABLE_TIMEOUT 3 // Minimum delay to mark AGENT as stable -//TODO remove most (as in 99.999999999%) of this crap -int aclk_connected = 0; -int aclk_disable_runtime = 0; -int aclk_disable_single_updates = 0; -int aclk_kill_link = 0; - int aclk_pubacks_per_conn = 0; // How many PubAcks we got since MQTT conn est. +int disconnect_req = 0; -time_t aclk_block_until = 0; +int aclk_alert_reloaded = 1; //1 on startup, and again on health_reload -usec_t aclk_session_us = 0; // Used by the mqtt layer -time_t aclk_session_sec = 0; // Used by the mqtt layer - -aclk_env_t *aclk_env = NULL; +time_t aclk_block_until = 0; mqtt_wss_client mqttwss_client; @@ -43,22 +37,12 @@ netdata_mutex_t aclk_shared_state_mutex = NETDATA_MUTEX_INITIALIZER; #define ACLK_SHARED_STATE_UNLOCK netdata_mutex_unlock(&aclk_shared_state_mutex) struct aclk_shared_state aclk_shared_state = { - .agent_state = AGENT_INITIALIZING, + .agent_state = ACLK_HOST_INITIALIZING, .last_popcorn_interrupt = 0, .mqtt_shutdown_msg_id = -1, .mqtt_shutdown_msg_rcvd = 0 }; -void aclk_single_update_disable() -{ - aclk_disable_single_updates = 1; -} - -void aclk_single_update_enable() -{ - aclk_disable_single_updates = 0; -} - //ENDTODO static RSA *aclk_private_key = NULL; @@ -197,8 +181,9 @@ void aclk_mqtt_wss_log_cb(mqtt_wss_log_type_t log_type, const char* str) //TODO prevent big buffer on stack #define RX_MSGLEN_MAX 4096 -static void msg_callback(const char *topic, const void *msg, size_t msglen, int qos) +static void msg_callback_old_protocol(const char *topic, const void *msg, size_t msglen, int qos) { + UNUSED(qos); char cmsg[RX_MSGLEN_MAX]; size_t len = (msglen < RX_MSGLEN_MAX - 1) ? msglen : (RX_MSGLEN_MAX - 1); const char *cmd_topic = aclk_get_topic(ACLK_TOPICID_COMMAND); @@ -233,13 +218,61 @@ static void msg_callback(const char *topic, const void *msg, size_t msglen, int error("Received message on unexpected topic %s", topic); if (aclk_shared_state.mqtt_shutdown_msg_id > 0) { - error("Link is shutting down. Ignoring message."); + error("Link is shutting down. Ignoring incoming message."); return; } aclk_handle_cloud_message(cmsg); } +#ifdef ENABLE_NEW_CLOUD_PROTOCOL +static void msg_callback_new_protocol(const char *topic, const void *msg, size_t msglen, int qos) +{ + UNUSED(qos); + if (msglen > RX_MSGLEN_MAX) + error("Incoming ACLK message was bigger than MAX of %d and got truncated.", RX_MSGLEN_MAX); + + debug(D_ACLK, "Got Message From Broker Topic \"%s\" QOS %d", topic, qos); + + if (aclk_shared_state.mqtt_shutdown_msg_id > 0) { + error("Link is shutting down. Ignoring incoming message."); + return; + } + + const char *msgtype = strrchr(topic, '/'); + if (unlikely(!msgtype)) { + error_report("Cannot get message type from topic. Ignoring message from topic \"%s\"", topic); + return; + } + msgtype++; + if (unlikely(!*msgtype)) { + error_report("Message type empty. Ignoring message from topic \"%s\"", topic); + return; + } + +#ifdef ACLK_LOG_CONVERSATION_DIR +#define FN_MAX_LEN 512 + char filename[FN_MAX_LEN]; + int logfd; + snprintf(filename, FN_MAX_LEN, ACLK_LOG_CONVERSATION_DIR "/%010d-rx-%s.bin", ACLK_GET_CONV_LOG_NEXT(), msgtype); + logfd = open(filename, O_CREAT | O_TRUNC | O_WRONLY, S_IRUSR | S_IWUSR ); + if(logfd < 0) + error("Error opening ACLK Conversation logfile \"%s\" for RX message.", filename); + write(logfd, msg, msglen); + close(logfd); +#endif + + aclk_handle_new_cloud_msg(msgtype, msg, msglen); +} + +static inline void msg_callback(const char *topic, const void *msg, size_t msglen, int qos) { + if (aclk_use_new_cloud_arch) + msg_callback_new_protocol(topic, msg, msglen, qos); + else + msg_callback_old_protocol(topic, msg, msglen, qos); +} +#endif /* ENABLE_NEW_CLOUD_PROTOCOL */ + static void puback_callback(uint16_t packet_id) { if (++aclk_pubacks_per_conn == ACLK_PUBACKS_CONN_STABLE) @@ -250,7 +283,7 @@ static void puback_callback(uint16_t packet_id) #endif if (aclk_shared_state.mqtt_shutdown_msg_id == (int)packet_id) { - error("Got PUBACK for shutdown message. Can exit gracefully."); + info("Shutdown message has been acknowledged by the cloud. Exiting gracefully"); aclk_shared_state.mqtt_shutdown_msg_rcvd = 1; } } @@ -268,6 +301,8 @@ static int read_query_thread_count() return threads; } +void aclk_graceful_disconnect(mqtt_wss_client client); + /* Keeps connection alive and handles all network comms. * Returns on error or when netdata is shutting down. * @param client instance of mqtt_wss_client @@ -281,7 +316,16 @@ static int handle_connection(mqtt_wss_client client) // timeout 1000 to check at least once a second // for netdata_exit if (mqtt_wss_service(client, 1000) < 0){ - error("Connection Error or Dropped"); + error_report("Connection Error or Dropped"); + return 1; + } + + if (disconnect_req) { + disconnect_req = 0; + aclk_graceful_disconnect(client); + aclk_queue_unlock(); + aclk_shared_state.mqtt_shutdown_msg_id = -1; + aclk_shared_state.mqtt_shutdown_msg_rcvd = 0; return 1; } @@ -298,10 +342,21 @@ static int handle_connection(mqtt_wss_client client) return 0; } +inline static int aclk_popcorn_check() +{ + ACLK_SHARED_STATE_LOCK; + if (unlikely(aclk_shared_state.agent_state == ACLK_HOST_INITIALIZING)) { + ACLK_SHARED_STATE_UNLOCK; + return 1; + } + ACLK_SHARED_STATE_UNLOCK; + return 0; +} + inline static int aclk_popcorn_check_bump() { ACLK_SHARED_STATE_LOCK; - if (unlikely(aclk_shared_state.agent_state == AGENT_INITIALIZING)) { + if (unlikely(aclk_shared_state.agent_state == ACLK_HOST_INITIALIZING)) { aclk_shared_state.last_popcorn_interrupt = now_realtime_sec(); ACLK_SHARED_STATE_UNLOCK; return 1; @@ -323,11 +378,6 @@ static inline void queue_connect_payloads(void) static inline void mqtt_connected_actions(mqtt_wss_client client) { - // TODO global vars? - usec_t now = now_realtime_usec(); - aclk_session_sec = now / USEC_PER_SEC; - aclk_session_us = now % USEC_PER_SEC; - const char *topic = aclk_get_topic(ACLK_TOPICID_COMMAND); if (!topic) @@ -335,16 +385,34 @@ static inline void mqtt_connected_actions(mqtt_wss_client client) else mqtt_wss_subscribe(client, topic, 1); +#ifdef ENABLE_NEW_CLOUD_PROTOCOL + if (aclk_use_new_cloud_arch) { + topic = aclk_get_topic(ACLK_TOPICID_CMD_NG_V1); + if (!topic) + error("Unable to fetch topic for protobuf COMMAND (to subscribe)"); + else + mqtt_wss_subscribe(client, topic, 1); + } +#endif + aclk_stats_upd_online(1); aclk_connected = 1; aclk_pubacks_per_conn = 0; - ACLK_SHARED_STATE_LOCK; - if (aclk_shared_state.agent_state != AGENT_INITIALIZING) { - error("Sending `connect` payload immediately as popcorning was finished already."); - queue_connect_payloads(); +#ifdef ENABLE_NEW_CLOUD_PROTOCOL + if (!aclk_use_new_cloud_arch) { +#endif + ACLK_SHARED_STATE_LOCK; + if (aclk_shared_state.agent_state != ACLK_HOST_INITIALIZING) { + error("Sending `connect` payload immediately as popcorning was finished already."); + queue_connect_payloads(); + } + ACLK_SHARED_STATE_UNLOCK; +#ifdef ENABLE_NEW_CLOUD_PROTOCOL + } else { + aclk_send_agent_connection_update(client, 1); } - ACLK_SHARED_STATE_UNLOCK; +#endif } /* Waits until agent is ready or needs to exit @@ -354,29 +422,29 @@ static inline void mqtt_connected_actions(mqtt_wss_client client) * @return 0 - Popcorning Finished - Agent STABLE, * !0 - netdata_exit */ -static int wait_popcorning_finishes(mqtt_wss_client client, struct aclk_query_threads *query_threads) +static int wait_popcorning_finishes() { time_t elapsed; int need_wait; + if (aclk_use_new_cloud_arch) + return 0; + while (!netdata_exit) { ACLK_SHARED_STATE_LOCK; - if (likely(aclk_shared_state.agent_state != AGENT_INITIALIZING)) { + if (likely(aclk_shared_state.agent_state != ACLK_HOST_INITIALIZING)) { ACLK_SHARED_STATE_UNLOCK; return 0; } elapsed = now_realtime_sec() - aclk_shared_state.last_popcorn_interrupt; if (elapsed >= ACLK_STABLE_TIMEOUT) { - aclk_shared_state.agent_state = AGENT_STABLE; + aclk_shared_state.agent_state = ACLK_HOST_STABLE; ACLK_SHARED_STATE_UNLOCK; - error("ACLK localhost popocorn finished"); - if (unlikely(!query_threads->thread_list)) - aclk_query_threads_start(query_threads, client); - queue_connect_payloads(); + error("ACLK localhost popcorn timer finished"); return 0; } ACLK_SHARED_STATE_UNLOCK; need_wait = ACLK_STABLE_TIMEOUT - elapsed; - error("ACLK localhost popocorn wait %d seconds longer", need_wait); + error("ACLK localhost popcorn timer - wait %d seconds longer", need_wait); sleep(need_wait); } return 1; @@ -384,10 +452,16 @@ static int wait_popcorning_finishes(mqtt_wss_client client, struct aclk_query_th void aclk_graceful_disconnect(mqtt_wss_client client) { - error("Preparing to Gracefully Shutdown the ACLK"); + info("Preparing to gracefully shutdown ACLK connection"); aclk_queue_lock(); aclk_queue_flush(); - aclk_shared_state.mqtt_shutdown_msg_id = aclk_send_app_layer_disconnect(client, "graceful"); +#ifdef ENABLE_NEW_CLOUD_PROTOCOL + if (aclk_use_new_cloud_arch) + aclk_shared_state.mqtt_shutdown_msg_id = aclk_send_agent_connection_update(client, 0); + else +#endif + aclk_shared_state.mqtt_shutdown_msg_id = aclk_send_app_layer_disconnect(client, "graceful"); + time_t t = now_monotonic_sec(); while (!mqtt_wss_service(client, 100)) { if (now_monotonic_sec() - t >= 2) { @@ -395,14 +469,16 @@ void aclk_graceful_disconnect(mqtt_wss_client client) break; } if (aclk_shared_state.mqtt_shutdown_msg_rcvd) { - error("MQTT App Layer `disconnect` message sent successfully"); + info("MQTT App Layer `disconnect` message sent successfully"); break; } } + info("ACLK link is down"); + log_access("ACLK DISCONNECTED"); aclk_stats_upd_online(0); aclk_connected = 0; - error("Attempting to Gracefully Shutdown MQTT/WSS connection"); + info("Attempting to gracefully shutdown the MQTT/WSS connection"); mqtt_wss_disconnect(client, 1000); } @@ -433,7 +509,7 @@ static unsigned long aclk_reconnect_delay() { return aclk_tbeb_delay(0, aclk_env->backoff.base, aclk_env->backoff.min_s, aclk_env->backoff.max_s); } -/* Block till aclk_reconnect_delay is satisifed or netdata_exit is signalled +/* Block till aclk_reconnect_delay is satisfied or netdata_exit is signalled * @return 0 - Go ahead and connect (delay expired) * 1 - netdata_exit */ @@ -455,7 +531,7 @@ static int aclk_block_till_recon_allowed() { sleep_usec(recon_delay * USEC_PER_MS); recon_delay = 0; } - return 0; + return netdata_exit; } #ifndef ACLK_DISABLE_CHALLENGE @@ -477,7 +553,7 @@ static int aclk_get_transport_idx(aclk_env_t *env) { /* Attempts to make a connection to MQTT broker over WSS * @param client instance of mqtt_wss_client - * @return 0 - Successfull Connection, + * @return 0 - Successful Connection, * <0 - Irrecoverable Error -> Kill ACLK, * >0 - netdata_exit */ @@ -498,7 +574,7 @@ static int aclk_attempt_to_connect(mqtt_wss_client client) url_t mqtt_url; #endif - json_object *lwt; + json_object *lwt = NULL; while (!netdata_exit) { char *cloud_base_url = appconfig_get(&cloud_config, CONFIG_SECTION_GLOBAL, "cloud base url", NULL); @@ -529,9 +605,17 @@ static int aclk_attempt_to_connect(mqtt_wss_client client) .will_topic = "lwt", .will_msg = NULL, .will_flags = MQTT_WSS_PUB_QOS2, - .keep_alive = 60 + .keep_alive = 60, + .drop_on_publish_fail = 1 }; +#if defined(ENABLE_NEW_CLOUD_PROTOCOL) && defined(ACLK_NEWARCH_DEVMODE) + aclk_use_new_cloud_arch = 1; + info("Switching ACLK to new protobuf protocol. Due to #define ACLK_NEWARCH_DEVMODE."); +#else + aclk_use_new_cloud_arch = 0; +#endif + #ifndef ACLK_DISABLE_CHALLENGE if (aclk_env) { aclk_env_t_destroy(aclk_env); @@ -547,6 +631,24 @@ static int aclk_attempt_to_connect(mqtt_wss_client client) continue; } + if (netdata_exit) + return 1; + +#ifndef ACLK_NEWARCH_DEVMODE + if (aclk_env->encoding == ACLK_ENC_PROTO) { +#ifndef ENABLE_NEW_CLOUD_PROTOCOL + error("Cloud requested New Cloud Protocol to be used but this agent cannot support it!"); + continue; +#endif + if (!aclk_env_has_capa("proto")) { + error ("Can't encoding=proto without at least \"proto\" capability."); + continue; + } + info("Switching ACLK to new protobuf protocol. Due to /env response."); + aclk_use_new_cloud_arch = 1; + } +#endif + memset(&auth_url, 0, sizeof(url_t)); if (url_parse(aclk_env->auth_endpoint, &auth_url)) { error("Parsing URL returned by env endpoint for authentication failed. \"%s\"", aclk_env->auth_endpoint); @@ -563,7 +665,11 @@ static int aclk_attempt_to_connect(mqtt_wss_client client) // aclk_get_topic moved here as during OTP we // generate the topic cache - mqtt_conn_params.will_topic = aclk_get_topic(ACLK_TOPICID_METADATA); + if (aclk_use_new_cloud_arch) + mqtt_conn_params.will_topic = aclk_get_topic(ACLK_TOPICID_AGENT_CONN); + else + mqtt_conn_params.will_topic = aclk_get_topic(ACLK_TOPICID_METADATA); + if (!mqtt_conn_params.will_topic) { error("Couldn't get LWT topic. Will not send LWT."); continue; @@ -584,9 +690,21 @@ static int aclk_attempt_to_connect(mqtt_wss_client client) } #endif - lwt = aclk_generate_disconnect(NULL); - mqtt_conn_params.will_msg = json_object_to_json_string_ext(lwt, JSON_C_TO_STRING_PLAIN); - mqtt_conn_params.will_msg_len = strlen(mqtt_conn_params.will_msg); + aclk_session_newarch = now_realtime_usec(); + aclk_session_sec = aclk_session_newarch / USEC_PER_SEC; + aclk_session_us = aclk_session_newarch % USEC_PER_SEC; + +#ifdef ENABLE_NEW_CLOUD_PROTOCOL + if (aclk_use_new_cloud_arch) { + mqtt_conn_params.will_msg = aclk_generate_lwt(&mqtt_conn_params.will_msg_len); + } else { +#endif + lwt = aclk_generate_disconnect(NULL); + mqtt_conn_params.will_msg = json_object_to_json_string_ext(lwt, JSON_C_TO_STRING_PLAIN); + mqtt_conn_params.will_msg_len = strlen(mqtt_conn_params.will_msg); +#ifdef ENABLE_NEW_CLOUD_PROTOCOL + } +#endif #ifdef ACLK_DISABLE_CHALLENGE ret = mqtt_wss_connect(client, base_url.host, base_url.port, &mqtt_conn_params, ACLK_SSL_FLAGS, &proxy_conf); @@ -600,15 +718,19 @@ static int aclk_attempt_to_connect(mqtt_wss_client client) freez((char*)mqtt_conn_params.username); #endif - json_object_put(lwt); + if (aclk_use_new_cloud_arch) + freez((char *)mqtt_conn_params.will_msg); + else + json_object_put(lwt); if (!ret) { - info("MQTTWSS connection succeeded"); + info("ACLK connection successfully established"); + log_access("ACLK CONNECTED"); mqtt_connected_actions(client); return 0; } - error("Connect failed\n"); + error_report("Connect failed"); } return 1; @@ -659,11 +781,20 @@ void *aclk_main(void *ptr) if (wait_till_agent_claim_ready()) goto exit; +#ifdef ENABLE_NEW_CLOUD_PROTOCOL if (!(mqttwss_client = mqtt_wss_new("mqtt_wss", aclk_mqtt_wss_log_cb, msg_callback, puback_callback))) { +#else + if (!(mqttwss_client = mqtt_wss_new("mqtt_wss", aclk_mqtt_wss_log_cb, msg_callback_old_protocol, puback_callback))) { +#endif error("Couldn't initialize MQTT_WSS network library"); goto exit; } + // Enable MQTT buffer growth if necessary + // e.g. old cloud architecture clients with huge nodes + // that send JSON payloads of 10 MB as single messages + mqtt_wss_set_max_buf_size(mqttwss_client, 25*1024*1024); + aclk_stats_enabled = config_get_boolean(CONFIG_SECTION_CLOUD, "statistics", CONFIG_BOOLEAN_YES); if (aclk_stats_enabled) { stats_thread = callocz(1, sizeof(struct aclk_stats_thread)); @@ -683,12 +814,19 @@ void *aclk_main(void *ptr) // warning this assumes the popcorning is relative short (3s) // if that changes call mqtt_wss_service from within // to keep OpenSSL, WSS and MQTT connection alive - if (wait_popcorning_finishes(mqttwss_client, &query_threads)) + if (wait_popcorning_finishes()) goto exit_full; + + if (unlikely(!query_threads.thread_list)) + aclk_query_threads_start(&query_threads, mqttwss_client); + + if (!aclk_use_new_cloud_arch) + queue_connect_payloads(); - if (!handle_connection(mqttwss_client)) { + if (handle_connection(mqttwss_client)) { aclk_stats_upd_online(0); aclk_connected = 0; + log_access("ACLK DISCONNECTED"); } } while (!netdata_exit); @@ -721,10 +859,10 @@ exit: // fix this in both old and new ACLK extern void health_alarm_entry2json_nolock(BUFFER *wb, ALARM_ENTRY *ae, RRDHOST *host); -void aclk_alarm_reload(void) +void ng_aclk_alarm_reload(void) { ACLK_SHARED_STATE_LOCK; - if (unlikely(aclk_shared_state.agent_state == AGENT_INITIALIZING)) { + if (unlikely(aclk_shared_state.agent_state == ACLK_HOST_INITIALIZING)) { ACLK_SHARED_STATE_UNLOCK; return; } @@ -733,7 +871,7 @@ void aclk_alarm_reload(void) aclk_queue_query(aclk_query_new(METADATA_ALARMS)); } -int aclk_update_alarm(RRDHOST *host, ALARM_ENTRY *ae) +int ng_aclk_update_alarm(RRDHOST *host, ALARM_ENTRY *ae) { BUFFER *local_buffer; json_object *msg; @@ -742,7 +880,7 @@ int aclk_update_alarm(RRDHOST *host, ALARM_ENTRY *ae) return 0; ACLK_SHARED_STATE_LOCK; - if (unlikely(aclk_shared_state.agent_state == AGENT_INITIALIZING)) { + if (unlikely(aclk_shared_state.agent_state == ACLK_HOST_INITIALIZING)) { ACLK_SHARED_STATE_UNLOCK; return 0; } @@ -764,11 +902,11 @@ int aclk_update_alarm(RRDHOST *host, ALARM_ENTRY *ae) return 0; } -int aclk_update_chart(RRDHOST *host, char *chart_name, int create) +int ng_aclk_update_chart(RRDHOST *host, char *chart_name, int create) { struct aclk_query *query; - if (aclk_popcorn_check_bump()) + if (host == localhost ? aclk_popcorn_check_bump() : aclk_popcorn_check()) return 0; query = aclk_query_new(create ? CHART_NEW : CHART_DEL); @@ -788,11 +926,11 @@ int aclk_update_chart(RRDHOST *host, char *chart_name, int create) * Add a new collector to the list * If it exists, update the chart count */ -void aclk_add_collector(RRDHOST *host, const char *plugin_name, const char *module_name) +void ng_aclk_add_collector(RRDHOST *host, const char *plugin_name, const char *module_name) { struct aclk_query *query; struct _collector *tmp_collector; - if (unlikely(!netdata_ready)) { + if (unlikely(!netdata_ready || aclk_use_new_cloud_arch)) { return; } @@ -831,11 +969,11 @@ void aclk_add_collector(RRDHOST *host, const char *plugin_name, const char *modu * This function will release the memory used and schedule * a cloud update */ -void aclk_del_collector(RRDHOST *host, const char *plugin_name, const char *module_name) +void ng_aclk_del_collector(RRDHOST *host, const char *plugin_name, const char *module_name) { struct aclk_query *query; struct _collector *tmp_collector; - if (unlikely(!netdata_ready)) { + if (unlikely(!netdata_ready || aclk_use_new_cloud_arch)) { return; } @@ -872,26 +1010,165 @@ void aclk_del_collector(RRDHOST *host, const char *plugin_name, const char *modu aclk_queue_query(query); } -struct label *add_aclk_host_labels(struct label *label) { -#ifdef ENABLE_ACLK - ACLK_PROXY_TYPE aclk_proxy; - char *proxy_str; - aclk_get_proxy(&aclk_proxy); +void ng_aclk_host_state_update(RRDHOST *host, int cmd) +{ + uuid_t node_id; + int ret; - switch(aclk_proxy) { - case PROXY_TYPE_SOCKS5: - proxy_str = "SOCKS5"; - break; - case PROXY_TYPE_HTTP: - proxy_str = "HTTP"; - break; - default: - proxy_str = "none"; - break; + if (!aclk_connected || !aclk_use_new_cloud_arch) + return; + + ret = get_node_id(&host->host_uuid, &node_id); + if (ret > 0) { + // this means we were not able to check if node_id already present + error("Unable to check for node_id. Ignoring the host state update."); + return; + } + if (ret < 0) { + // node_id not found + aclk_query_t create_query; + create_query = aclk_query_new(REGISTER_NODE); + rrdhost_aclk_state_lock(localhost); + create_query->data.node_creation.claim_id = strdupz(localhost->aclk_state.claimed_id); + rrdhost_aclk_state_unlock(localhost); + create_query->data.node_creation.hops = (uint32_t) host->system_info->hops; + create_query->data.node_creation.hostname = strdupz(host->hostname); + create_query->data.node_creation.machine_guid = strdupz(host->machine_guid); + info("Registering host=%s, hops=%u",host->machine_guid, host->system_info->hops); + aclk_queue_query(create_query); + return; } - label = add_label_to_list(label, "_aclk_impl", "Next Generation", LABEL_SOURCE_AUTO); - return add_label_to_list(label, "_aclk_proxy", proxy_str, LABEL_SOURCE_AUTO); + + aclk_query_t query = aclk_query_new(NODE_STATE_UPDATE); + query->data.node_update.hops = (uint32_t) host->system_info->hops; + rrdhost_aclk_state_lock(localhost); + query->data.node_update.claim_id = strdupz(localhost->aclk_state.claimed_id); + rrdhost_aclk_state_unlock(localhost); + query->data.node_update.live = cmd; + query->data.node_update.node_id = mallocz(UUID_STR_LEN); + uuid_unparse_lower(node_id, (char*)query->data.node_update.node_id); + query->data.node_update.queryable = 1; + query->data.node_update.session_id = aclk_session_newarch; + info("Queuing status update for node=%s, live=%d, hops=%u",(char*)query->data.node_update.node_id, cmd, + host->system_info->hops); + aclk_queue_query(query); +} + +void aclk_send_node_instances() +{ + struct node_instance_list *list_head = get_node_list(); + struct node_instance_list *list = list_head; + if (unlikely(!list)) { + error_report("Failure to get_node_list from DB!"); + return; + } + while (!uuid_is_null(list->host_id)) { + if (!uuid_is_null(list->node_id)) { + aclk_query_t query = aclk_query_new(NODE_STATE_UPDATE); + rrdhost_aclk_state_lock(localhost); + query->data.node_update.claim_id = strdupz(localhost->aclk_state.claimed_id); + rrdhost_aclk_state_unlock(localhost); + query->data.node_update.live = list->live; + query->data.node_update.hops = list->hops; + query->data.node_update.node_id = mallocz(UUID_STR_LEN); + uuid_unparse_lower(list->node_id, (char*)query->data.node_update.node_id); + query->data.node_update.queryable = 1; + query->data.node_update.session_id = aclk_session_newarch; + info("Queuing status update for node=%s, live=%d, hops=%d",(char*)query->data.node_update.node_id, + list->live, + list->hops); + aclk_queue_query(query); + } else { + aclk_query_t create_query; + create_query = aclk_query_new(REGISTER_NODE); + rrdhost_aclk_state_lock(localhost); + create_query->data.node_creation.claim_id = strdupz(localhost->aclk_state.claimed_id); + rrdhost_aclk_state_unlock(localhost); + create_query->data.node_creation.hops = list->hops; + create_query->data.node_creation.hostname = list->hostname; + create_query->data.node_creation.machine_guid = mallocz(UUID_STR_LEN); + uuid_unparse_lower(list->host_id, (char*)create_query->data.node_creation.machine_guid); + info("Queuing registration for host=%s, hops=%d",(char*)create_query->data.node_creation.machine_guid, + list->hops); + aclk_queue_query(create_query); + } + + list++; + } + freez(list_head); +} + +void aclk_send_bin_msg(char *msg, size_t msg_len, enum aclk_topics subtopic, const char *msgname) +{ + aclk_send_bin_message_subtopic_pid(mqttwss_client, msg, msg_len, subtopic, msgname); +} + +char *ng_aclk_state(void) +{ + BUFFER *wb = buffer_create(1024); + char *ret; + + buffer_strcat(wb, + "ACLK Available: Yes\n" + "ACLK Implementation: Next Generation\n" +#ifdef ENABLE_NEW_CLOUD_PROTOCOL + "New Cloud Protocol Support: Yes\n" #else - return label; + "New Cloud Protocol Support: No\n" #endif + "Claimed: " + ); + + char *agent_id = is_agent_claimed(); + if (agent_id == NULL) + buffer_strcat(wb, "No\n"); + else { + buffer_sprintf(wb, "Yes\nClaimed Id: %s\n", agent_id); + freez(agent_id); + } + + buffer_sprintf(wb, "Online: %s\nUsed Cloud Protocol: %s", aclk_connected ? "Yes" : "No", aclk_use_new_cloud_arch ? "New" : "Legacy"); + + ret = strdupz(buffer_tostring(wb)); + buffer_free(wb); + return ret; +} + +char *ng_aclk_state_json(void) +{ + json_object *tmp, *msg = json_object_new_object(); + + tmp = json_object_new_boolean(1); + json_object_object_add(msg, "aclk-available", tmp); + + tmp = json_object_new_string("Next Generation"); + json_object_object_add(msg, "aclk-implementation", tmp); + +#ifdef ENABLE_NEW_CLOUD_PROTOCOL + tmp = json_object_new_boolean(1); +#else + tmp = json_object_new_boolean(0); +#endif + json_object_object_add(msg, "new-cloud-protocol-supported", tmp); + + char *agent_id = is_agent_claimed(); + tmp = json_object_new_boolean(agent_id != NULL); + json_object_object_add(msg, "agent-claimed", tmp); + + if (agent_id) { + tmp = json_object_new_string(agent_id); + freez(agent_id); + } else + tmp = NULL; + json_object_object_add(msg, "claimed-id", tmp); + + tmp = json_object_new_boolean(aclk_connected); + json_object_object_add(msg, "online", tmp); + + tmp = json_object_new_string(aclk_use_new_cloud_arch ? "New" : "Legacy"); + json_object_object_add(msg, "used-cloud-protocol", tmp); + + char *str = strdupz(json_object_to_json_string_ext(msg, JSON_C_TO_STRING_PLAIN)); + json_object_put(msg); + return str; } diff --git a/aclk/aclk.h b/aclk/aclk.h index b02b93d75..444de86be 100644 --- a/aclk/aclk.h +++ b/aclk/aclk.h @@ -2,83 +2,54 @@ #ifndef ACLK_H #define ACLK_H -typedef struct aclk_rrdhost_state { - char *claimed_id; // Claimed ID if host has one otherwise NULL -} aclk_rrdhost_state; - -#include "../daemon/common.h" +#include "daemon/common.h" #include "aclk_util.h" - -// version for aclk legacy (old cloud arch) -#define ACLK_VERSION 2 - -// Define ACLK Feature Version Boundaries Here -#define ACLK_V_COMPRESSION 2 +#include "aclk_rrdhost_state.h" // How many MQTT PUBACKs we need to get to consider connection // stable for the purposes of TBEB (truncated binary exponential backoff) #define ACLK_PUBACKS_CONN_STABLE 3 -// TODO get rid of this shit -extern int aclk_disable_runtime; -extern int aclk_disable_single_updates; -extern int aclk_kill_link; -extern int aclk_connected; - extern time_t aclk_block_until; -extern usec_t aclk_session_us; -extern time_t aclk_session_sec; - -extern aclk_env_t *aclk_env; +extern int disconnect_req; void *aclk_main(void *ptr); -void aclk_single_update_disable(); -void aclk_single_update_enable(); - -#define NETDATA_ACLK_HOOK \ - { .name = "ACLK_Main", \ - .config_section = NULL, \ - .config_name = NULL, \ - .enabled = 1, \ - .thread = NULL, \ - .init_routine = NULL, \ - .start_routine = aclk_main }, extern netdata_mutex_t aclk_shared_state_mutex; #define ACLK_SHARED_STATE_LOCK netdata_mutex_lock(&aclk_shared_state_mutex) #define ACLK_SHARED_STATE_UNLOCK netdata_mutex_unlock(&aclk_shared_state_mutex) -typedef enum aclk_agent_state { - AGENT_INITIALIZING, - AGENT_STABLE -} ACLK_AGENT_STATE; extern struct aclk_shared_state { ACLK_AGENT_STATE agent_state; time_t last_popcorn_interrupt; // To wait for `disconnect` message PUBACK - // when shuting down + // when shutting down // at the same time if > 0 we know link is // shutting down int mqtt_shutdown_msg_id; int mqtt_shutdown_msg_rcvd; } aclk_shared_state; -void aclk_alarm_reload(void); -int aclk_update_alarm(RRDHOST *host, ALARM_ENTRY *ae); +void ng_aclk_alarm_reload(void); +int ng_aclk_update_alarm(RRDHOST *host, ALARM_ENTRY *ae); -// TODO this is for bacward compatibility with ACLK legacy -#define ACLK_CMD_CHART 1 -#define ACLK_CMD_CHARTDEL 0 /* Informs ACLK about created/deleted chart * @param create 0 - if chart was deleted, other if chart created */ -int aclk_update_chart(RRDHOST *host, char *chart_name, int create); +int ng_aclk_update_chart(RRDHOST *host, char *chart_name, int create); + +void ng_aclk_add_collector(RRDHOST *host, const char *plugin_name, const char *module_name); +void ng_aclk_del_collector(RRDHOST *host, const char *plugin_name, const char *module_name); + +void ng_aclk_host_state_update(RRDHOST *host, int cmd); + +void aclk_send_node_instances(void); -void aclk_add_collector(RRDHOST *host, const char *plugin_name, const char *module_name); -void aclk_del_collector(RRDHOST *host, const char *plugin_name, const char *module_name); +void aclk_send_bin_msg(char *msg, size_t msg_len, enum aclk_topics subtopic, const char *msgname); -struct label *add_aclk_host_labels(struct label *label); +char *ng_aclk_state(void); +char *ng_aclk_state_json(void); #endif /* ACLK_H */ diff --git a/aclk/aclk_alarm_api.c b/aclk/aclk_alarm_api.c new file mode 100644 index 000000000..7df51a7b5 --- /dev/null +++ b/aclk/aclk_alarm_api.c @@ -0,0 +1,44 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "aclk_alarm_api.h" + +#include "aclk_query_queue.h" + +#include "aclk_util.h" + +#include "aclk.h" + +void aclk_send_alarm_log_health(struct alarm_log_health *log_health) +{ + aclk_query_t query = aclk_query_new(ALARM_LOG_HEALTH); + query->data.bin_payload.payload = generate_alarm_log_health(&query->data.bin_payload.size, log_health); + query->data.bin_payload.topic = ACLK_TOPICID_ALARM_HEALTH; + query->data.bin_payload.msg_name = "AlarmLogHealth"; + QUEUE_IF_PAYLOAD_PRESENT(query); +} + +void aclk_send_alarm_log_entry(struct alarm_log_entry *log_entry) +{ + size_t payload_size; + char *payload = generate_alarm_log_entry(&payload_size, log_entry); + + aclk_send_bin_msg(payload, payload_size, ACLK_TOPICID_ALARM_LOG, "AlarmLogEntry"); +} + +void aclk_send_provide_alarm_cfg(struct provide_alarm_configuration *cfg) +{ + aclk_query_t query = aclk_query_new(ALARM_PROVIDE_CFG); + query->data.bin_payload.payload = generate_provide_alarm_configuration(&query->data.bin_payload.size, cfg); + query->data.bin_payload.topic = ACLK_TOPICID_ALARM_CONFIG; + query->data.bin_payload.msg_name = "ProvideAlarmConfiguration"; + QUEUE_IF_PAYLOAD_PRESENT(query); +} + +void aclk_send_alarm_snapshot(alarm_snapshot_proto_ptr_t snapshot) +{ + aclk_query_t query = aclk_query_new(ALARM_SNAPSHOT); + query->data.bin_payload.payload = generate_alarm_snapshot_bin(&query->data.bin_payload.size, snapshot); + query->data.bin_payload.topic = ACLK_TOPICID_ALARM_SNAPSHOT; + query->data.bin_payload.msg_name = "AlarmSnapshot"; + QUEUE_IF_PAYLOAD_PRESENT(query); +} diff --git a/aclk/aclk_alarm_api.h b/aclk/aclk_alarm_api.h new file mode 100644 index 000000000..e3fa92b5b --- /dev/null +++ b/aclk/aclk_alarm_api.h @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef ACLK_ALARM_API_H +#define ACLK_ALARM_API_H + +#include "../daemon/common.h" +#include "schema-wrappers/schema_wrappers.h" + +void aclk_send_alarm_log_health(struct alarm_log_health *log_health); +void aclk_send_alarm_log_entry(struct alarm_log_entry *log_entry); +void aclk_send_provide_alarm_cfg(struct provide_alarm_configuration *cfg); +void aclk_send_alarm_snapshot(alarm_snapshot_proto_ptr_t snapshot); + +#endif /* ACLK_ALARM_API_H */ diff --git a/aclk/aclk_api.c b/aclk/aclk_api.c new file mode 100644 index 000000000..251f5b708 --- /dev/null +++ b/aclk/aclk_api.c @@ -0,0 +1,228 @@ +// SPDX-License-Identifier: GPL-3.0-or-later +#include "libnetdata/libnetdata.h" +#include "database/rrd.h" + +#ifdef ACLK_NG +#include "aclk.h" +#endif +#ifdef ACLK_LEGACY +#include "legacy/agent_cloud_link.h" +#endif + +int aclk_connected = 0; +int aclk_kill_link = 0; + +usec_t aclk_session_us = 0; +time_t aclk_session_sec = 0; + +int aclk_disable_runtime = 0; +int aclk_disable_single_updates = 0; + +int aclk_stats_enabled; + +#ifdef ACLK_NG +int aclk_ng = 1; +#else +int aclk_ng = 0; +#endif + +#define ACLK_IMPL_KEY_NAME "aclk implementation" + +#ifdef ENABLE_ACLK +void *aclk_starter(void *ptr) { + char *aclk_impl_req = config_get(CONFIG_SECTION_CLOUD, ACLK_IMPL_KEY_NAME, "ng"); + + if (!strcasecmp(aclk_impl_req, "ng")) { + aclk_ng = 1; + } else if (!strcasecmp(aclk_impl_req, "legacy")) { + aclk_ng = 0; + } else { + error("Unknown value \"%s\" of key \"" ACLK_IMPL_KEY_NAME "\" in section \"" CONFIG_SECTION_CLOUD "\". Trying default ACLK %s.", aclk_impl_req, aclk_ng ? "NG" : "Legacy"); + } + +#ifndef ACLK_NG + if (aclk_ng) { + error("Configuration requests ACLK-NG but it is not available in this agent. Switching to Legacy."); + aclk_ng = 0; + } +#endif + +#ifndef ACLK_LEGACY + if (!aclk_ng) { + error("Configuration requests ACLK Legacy but it is not available in this agent. Switching to NG."); + aclk_ng = 1; + } +#endif + +#ifdef ACLK_NG + if (aclk_ng) { + info("Starting ACLK-NG"); + return aclk_main(ptr); + } +#endif +#ifdef ACLK_LEGACY + if (!aclk_ng) { + info("Starting ACLK Legacy"); + return legacy_aclk_main(ptr); + } +#endif + error_report("No ACLK could be started"); + return NULL; +} + +void aclk_single_update_disable() +{ + aclk_disable_single_updates = 1; +} + +void aclk_single_update_enable() +{ + aclk_disable_single_updates = 0; +} + +void aclk_alarm_reload(void) +{ +#ifdef ACLK_NG + if (aclk_ng) + ng_aclk_alarm_reload(); +#endif +#ifdef ACLK_LEGACY + if (!aclk_ng) + legacy_aclk_alarm_reload(); +#endif +} + +int aclk_update_chart(RRDHOST *host, char *chart_name, int create) +{ +#ifdef ACLK_NG + if (aclk_ng) + return ng_aclk_update_chart(host, chart_name, create); +#endif +#ifdef ACLK_LEGACY + if (!aclk_ng) + return legacy_aclk_update_chart(host, chart_name, create); +#endif + error_report("No usable aclk_update_chart implementation"); + return 1; +} + +int aclk_update_alarm(RRDHOST *host, ALARM_ENTRY *ae) +{ +#ifdef ACLK_NG + if (aclk_ng) + return ng_aclk_update_alarm(host, ae); +#endif +#ifdef ACLK_LEGACY + if (!aclk_ng) + return legacy_aclk_update_alarm(host, ae); +#endif + error_report("No usable aclk_update_alarm implementation"); + return 1; +} + +void aclk_add_collector(RRDHOST *host, const char *plugin_name, const char *module_name) +{ +#ifdef ACLK_NG + if (aclk_ng) + return ng_aclk_add_collector(host, plugin_name, module_name); +#endif +#ifdef ACLK_LEGACY + if (!aclk_ng) + return legacy_aclk_add_collector(host, plugin_name, module_name); +#endif + error_report("No usable aclk_add_collector implementation"); +} + +void aclk_del_collector(RRDHOST *host, const char *plugin_name, const char *module_name) +{ +#ifdef ACLK_NG + if (aclk_ng) + return ng_aclk_del_collector(host, plugin_name, module_name); +#endif +#ifdef ACLK_LEGACY + if (!aclk_ng) + return legacy_aclk_del_collector(host, plugin_name, module_name); +#endif + error_report("No usable aclk_del_collector implementation"); +} + +void aclk_host_state_update(RRDHOST *host, int connect) +{ +#ifdef ACLK_NG + if (aclk_ng) + return ng_aclk_host_state_update(host, connect); +#endif +#ifdef ACLK_LEGACY + if (!aclk_ng) + return legacy_aclk_host_state_update(host, connect); +#endif + error_report("Couldn't use any version of aclk_host_state_update"); +} + +#endif /* ENABLE_ACLK */ + +struct label *add_aclk_host_labels(struct label *label) { +#ifdef ACLK_NG + label = add_label_to_list(label, "_aclk_ng_available", "true", LABEL_SOURCE_AUTO); +#else + label = add_label_to_list(label, "_aclk_ng_available", "false", LABEL_SOURCE_AUTO); +#endif +#ifdef ACLK_LEGACY + label = add_label_to_list(label, "_aclk_legacy_available", "true", LABEL_SOURCE_AUTO); +#else + label = add_label_to_list(label, "_aclk_legacy_available", "false", LABEL_SOURCE_AUTO); +#endif +#ifdef ENABLE_ACLK + ACLK_PROXY_TYPE aclk_proxy; + char *proxy_str; + aclk_get_proxy(&aclk_proxy); + + switch(aclk_proxy) { + case PROXY_TYPE_SOCKS5: + proxy_str = "SOCKS5"; + break; + case PROXY_TYPE_HTTP: + proxy_str = "HTTP"; + break; + default: + proxy_str = "none"; + break; + } + + label = add_label_to_list(label, "_aclk_impl", aclk_ng ? "Next Generation" : "Legacy", LABEL_SOURCE_AUTO); + label = add_label_to_list(label, "_aclk_proxy", proxy_str, LABEL_SOURCE_AUTO); +#endif + return label; +} + +char *aclk_state(void) { +#ifndef ENABLE_ACLK + return strdupz("ACLK Available: No"); +#else +#ifdef ACLK_NG + if (aclk_ng) + return ng_aclk_state(); +#endif +#ifdef ACLK_LEGACY + if (!aclk_ng) + return legacy_aclk_state(); +#endif +#endif /* ENABLE_ACLK */ + return NULL; +} + +char *aclk_state_json(void) { +#ifndef ENABLE_ACLK + return strdupz("{\"aclk-available\": false}"); +#else +#ifdef ACLK_NG + if (aclk_ng) + return ng_aclk_state_json(); +#endif +#ifdef ACLK_LEGACY + if (!aclk_ng) + return legacy_aclk_state_json(); +#endif +#endif /* ENABLE_ACLK */ + return NULL; +} diff --git a/aclk/aclk_api.h b/aclk/aclk_api.h new file mode 100644 index 000000000..9958b0e11 --- /dev/null +++ b/aclk/aclk_api.h @@ -0,0 +1,56 @@ +// SPDX-License-Identifier: GPL-3.0-or-later +#ifndef ACLK_API_H +#define ACLK_API_H + +#include "libnetdata/libnetdata.h" + +#include "aclk_proxy.h" + +// TODO get rid global vars as soon as +// ACLK Legacy is removed +extern int aclk_connected; +extern int aclk_kill_link; + +extern usec_t aclk_session_us; +extern time_t aclk_session_sec; + +extern int aclk_disable_runtime; +extern int aclk_disable_single_updates; + +extern int aclk_stats_enabled; +extern int aclk_alert_reloaded; + +extern int aclk_ng; + +#ifdef ENABLE_ACLK +void *aclk_starter(void *ptr); + +void aclk_single_update_disable(); +void aclk_single_update_enable(); + +void aclk_alarm_reload(void); + +int aclk_update_chart(RRDHOST *host, char *chart_name, int create); +int aclk_update_alarm(RRDHOST *host, ALARM_ENTRY *ae); + +void aclk_add_collector(RRDHOST *host, const char *plugin_name, const char *module_name); +void aclk_del_collector(RRDHOST *host, const char *plugin_name, const char *module_name); + +void aclk_host_state_update(RRDHOST *host, int connect); + +#define NETDATA_ACLK_HOOK \ + { .name = "ACLK_Main", \ + .config_section = NULL, \ + .config_name = NULL, \ + .enabled = 1, \ + .thread = NULL, \ + .init_routine = NULL, \ + .start_routine = aclk_starter }, + +#endif + +struct label *add_aclk_host_labels(struct label *label); +char *aclk_state(void); +char *aclk_state_json(void); + +#endif /* ACLK_API_H */ diff --git a/aclk/aclk_charts_api.c b/aclk/aclk_charts_api.c new file mode 100644 index 000000000..4e1c466e8 --- /dev/null +++ b/aclk/aclk_charts_api.c @@ -0,0 +1,68 @@ +// SPDX-License-Identifier: GPL-3.0-or-later +#include "aclk_charts_api.h" + +#include "aclk_query_queue.h" + +#define CHART_DIM_UPDATE_NAME "ChartsAndDimensionsUpdated" + +void aclk_chart_inst_update(char **payloads, size_t *payload_sizes, struct aclk_message_position *new_positions) +{ + aclk_query_t query = aclk_query_new(CHART_DIMS_UPDATE); + query->data.bin_payload.payload = generate_charts_updated(&query->data.bin_payload.size, payloads, payload_sizes, new_positions); + query->data.bin_payload.msg_name = CHART_DIM_UPDATE_NAME; + QUEUE_IF_PAYLOAD_PRESENT(query); +} + +void aclk_chart_dim_update(char **payloads, size_t *payload_sizes, struct aclk_message_position *new_positions) +{ + aclk_query_t query = aclk_query_new(CHART_DIMS_UPDATE); + query->data.bin_payload.topic = ACLK_TOPICID_CHART_DIMS; + query->data.bin_payload.payload = generate_chart_dimensions_updated(&query->data.bin_payload.size, payloads, payload_sizes, new_positions); + query->data.bin_payload.msg_name = CHART_DIM_UPDATE_NAME; + QUEUE_IF_PAYLOAD_PRESENT(query); +} + +void aclk_chart_inst_and_dim_update(char **payloads, size_t *payload_sizes, int *is_dim, struct aclk_message_position *new_positions, uint64_t batch_id) +{ + aclk_query_t query = aclk_query_new(CHART_DIMS_UPDATE); + query->data.bin_payload.topic = ACLK_TOPICID_CHART_DIMS; + query->data.bin_payload.payload = generate_charts_and_dimensions_updated(&query->data.bin_payload.size, payloads, payload_sizes, is_dim, new_positions, batch_id); + query->data.bin_payload.msg_name = CHART_DIM_UPDATE_NAME; + QUEUE_IF_PAYLOAD_PRESENT(query); +} + +void aclk_chart_config_updated(struct chart_config_updated *config_list, int list_size) +{ + aclk_query_t query = aclk_query_new(CHART_CONFIG_UPDATED); + query->data.bin_payload.topic = ACLK_TOPICID_CHART_CONFIGS_UPDATED; + query->data.bin_payload.payload = generate_chart_configs_updated(&query->data.bin_payload.size, config_list, list_size); + query->data.bin_payload.msg_name = "ChartConfigsUpdated"; + QUEUE_IF_PAYLOAD_PRESENT(query); +} + +void aclk_chart_reset(chart_reset_t reset) +{ + aclk_query_t query = aclk_query_new(CHART_RESET); + query->data.bin_payload.topic = ACLK_TOPICID_CHART_RESET; + query->data.bin_payload.payload = generate_reset_chart_messages(&query->data.bin_payload.size, reset); + query->data.bin_payload.msg_name = "ResetChartMessages"; + QUEUE_IF_PAYLOAD_PRESENT(query); +} + +void aclk_retention_updated(struct retention_updated *data) +{ + aclk_query_t query = aclk_query_new(RETENTION_UPDATED); + query->data.bin_payload.topic = ACLK_TOPICID_RETENTION_UPDATED; + query->data.bin_payload.payload = generate_retention_updated(&query->data.bin_payload.size, data); + query->data.bin_payload.msg_name = "RetentionUpdated"; + QUEUE_IF_PAYLOAD_PRESENT(query); +} + +void aclk_update_node_info(struct update_node_info *info) +{ + aclk_query_t query = aclk_query_new(UPDATE_NODE_INFO); + query->data.bin_payload.topic = ACLK_TOPICID_NODE_INFO; + query->data.bin_payload.payload = generate_update_node_info_message(&query->data.bin_payload.size, info); + query->data.bin_payload.msg_name = "UpdateNodeInfo"; + QUEUE_IF_PAYLOAD_PRESENT(query); +} diff --git a/aclk/aclk_charts_api.h b/aclk/aclk_charts_api.h new file mode 100644 index 000000000..305fe4f74 --- /dev/null +++ b/aclk/aclk_charts_api.h @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: GPL-3.0-or-later +#ifndef ACLK_CHARTS_H +#define ACLK_CHARTS_H + +#include "../daemon/common.h" +#include "schema-wrappers/schema_wrappers.h" + +void aclk_chart_inst_update(char **payloads, size_t *payload_sizes, struct aclk_message_position *new_positions); +void aclk_chart_dim_update(char **payloads, size_t *payload_sizes, struct aclk_message_position *new_positions); +void aclk_chart_inst_and_dim_update(char **payloads, size_t *payload_sizes, int *is_dim, struct aclk_message_position *new_positions, uint64_t batch_id); + +void aclk_chart_config_updated(struct chart_config_updated *config_list, int list_size); + +void aclk_chart_reset(chart_reset_t reset); + +void aclk_retention_updated(struct retention_updated *data); + +void aclk_update_node_info(struct update_node_info *info); + +#endif /* ACLK_CHARTS_H */ diff --git a/aclk/aclk_collector_list.c b/aclk/aclk_collector_list.c index a251a23a8..2920c9a5c 100644 --- a/aclk/aclk_collector_list.c +++ b/aclk/aclk_collector_list.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-3.0-or-later -// This is copied from Legacy ACLK, Original Autor: amoss +// This is copied from Legacy ACLK, Original Author: amoss // TODO unmess this diff --git a/aclk/aclk_collector_list.h b/aclk/aclk_collector_list.h index 98d30ba94..09c06b14a 100644 --- a/aclk/aclk_collector_list.h +++ b/aclk/aclk_collector_list.h @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-3.0-or-later -// This is copied from Legacy ACLK, Original Autor: amoss +// This is copied from Legacy ACLK, Original Author: amoss // TODO unmess this @@ -31,6 +31,8 @@ struct _collector { struct _collector *next; }; +extern struct _collector *collector_list; + struct _collector *_add_collector(const char *hostname, const char *plugin_name, const char *module_name); struct _collector *_del_collector(const char *hostname, const char *plugin_name, const char *module_name); void _reset_collector_list(); diff --git a/aclk/aclk_otp.c b/aclk/aclk_otp.c index 411a5f891..658e04f9b 100644 --- a/aclk/aclk_otp.c +++ b/aclk/aclk_otp.c @@ -2,16 +2,12 @@ // SPDX-License-Identifier: GPL-3.0-or-later #include "aclk_otp.h" +#include "aclk_util.h" +#include "aclk.h" -#include "../daemon/common.h" +#include "daemon/common.h" -#include "../mqtt_websockets/c-rbuf/include/ringbuffer.h" - -// CentOS 7 has older version that doesn't define this -// same goes for MacOS -#ifndef UUID_STR_LEN -#define UUID_STR_LEN 37 -#endif +#include "mqtt_websockets/c-rbuf/include/ringbuffer.h" struct dictionary_singleton { char *key; @@ -213,7 +209,7 @@ static int parse_passwd_response(const char *json_str, struct auth_data *auth) { json = json_tokener_parse(json_str); if (!json) { - error("JSON-C failed to parse the payload of http respons of /env endpoint"); + error("JSON-C failed to parse the payload of http response of /env endpoint"); return 1; } @@ -363,7 +359,7 @@ static int aclk_parse_otp_error(const char *json_str) { json = json_tokener_parse(json_str); if (!json) { - error("JSON-C failed to parse the payload of http respons of /env endpoint"); + error("JSON-C failed to parse the payload of http response of /env endpoint"); return 1; } @@ -734,7 +730,7 @@ static int parse_json_env(const char *json_str, aclk_env_t *env) { json = json_tokener_parse(json_str); if (!json) { - error("JSON-C failed to parse the payload of http respons of /env endpoint"); + error("JSON-C failed to parse the payload of http response of /env endpoint"); return 1; } @@ -846,7 +842,11 @@ int aclk_get_env(aclk_env_t *env, const char* aclk_hostname, int aclk_port) { return 1; } - buffer_sprintf(buf, "/api/v1/env?v=%s&cap=json$claim_id=%s", &(VERSION[1]) /* skip 'v' at beginning */, agent_id); +#ifdef ENABLE_NEW_CLOUD_PROTOCOL + buffer_sprintf(buf, "/api/v1/env?v=%s&cap=json,proto&claim_id=%s", &(VERSION[1]) /* skip 'v' at beginning */, agent_id); +#else + buffer_sprintf(buf, "/api/v1/env?v=%s&cap=json&claim_id=%s", &(VERSION[1]) /* skip 'v' at beginning */, agent_id); +#endif freez(agent_id); req.host = (char*)aclk_hostname; diff --git a/aclk/aclk_otp.h b/aclk/aclk_otp.h index d2044f6fd..1ca9245c2 100644 --- a/aclk/aclk_otp.h +++ b/aclk/aclk_otp.h @@ -3,9 +3,10 @@ #ifndef ACLK_OTP_H #define ACLK_OTP_H -#include "../daemon/common.h" +#include "daemon/common.h" #include "https_client.h" +#include "aclk_util.h" int aclk_get_mqtt_otp(RSA *p_key, char **mqtt_id, char **mqtt_usr, char **mqtt_pass, url_t *target); int aclk_get_env(aclk_env_t *env, const char *aclk_hostname, int aclk_port); diff --git a/aclk/aclk_proxy.c b/aclk/aclk_proxy.c new file mode 100644 index 000000000..1701eb8e8 --- /dev/null +++ b/aclk/aclk_proxy.c @@ -0,0 +1,186 @@ +#include "aclk_proxy.h" + +#include "daemon/common.h" + +#define ACLK_PROXY_ENV "env" +#define ACLK_PROXY_CONFIG_VAR "proxy" + +struct { + ACLK_PROXY_TYPE type; + const char *url_str; +} supported_proxy_types[] = { + { .type = PROXY_TYPE_SOCKS5, .url_str = "socks5" ACLK_PROXY_PROTO_ADDR_SEPARATOR }, + { .type = PROXY_TYPE_SOCKS5, .url_str = "socks5h" ACLK_PROXY_PROTO_ADDR_SEPARATOR }, + { .type = PROXY_TYPE_HTTP, .url_str = "http" ACLK_PROXY_PROTO_ADDR_SEPARATOR }, + { .type = PROXY_TYPE_UNKNOWN, .url_str = NULL }, +}; + +const char *aclk_proxy_type_to_s(ACLK_PROXY_TYPE *type) +{ + switch (*type) { + case PROXY_DISABLED: + return "disabled"; + case PROXY_TYPE_HTTP: + return "HTTP"; + case PROXY_TYPE_SOCKS5: + return "SOCKS"; + default: + return "Unknown"; + } +} + +static inline ACLK_PROXY_TYPE aclk_find_proxy(const char *string) +{ + int i = 0; + while (supported_proxy_types[i].url_str) { + if (!strncmp(supported_proxy_types[i].url_str, string, strlen(supported_proxy_types[i].url_str))) + return supported_proxy_types[i].type; + i++; + } + return PROXY_TYPE_UNKNOWN; +} + +ACLK_PROXY_TYPE aclk_verify_proxy(const char *string) +{ + if (!string) + return PROXY_TYPE_UNKNOWN; + + while (*string == 0x20) + string++; + + if (!*string) + return PROXY_TYPE_UNKNOWN; + + return aclk_find_proxy(string); +} + +// helper function to censor user&password +// for logging purposes +void safe_log_proxy_censor(char *proxy) +{ + size_t length = strlen(proxy); + char *auth = proxy + length - 1; + char *cur; + + while ((auth >= proxy) && (*auth != '@')) + auth--; + + //if not found or @ is first char do nothing + if (auth <= proxy) + return; + + cur = strstr(proxy, ACLK_PROXY_PROTO_ADDR_SEPARATOR); + if (!cur) + cur = proxy; + else + cur += strlen(ACLK_PROXY_PROTO_ADDR_SEPARATOR); + + while (cur < auth) { + *cur = 'X'; + cur++; + } +} + +static inline void safe_log_proxy_error(char *str, const char *proxy) +{ + char *log = strdupz(proxy); + safe_log_proxy_censor(log); + error("%s Provided Value:\"%s\"", str, log); + freez(log); +} + +static inline int check_socks_enviroment(const char **proxy) +{ + char *tmp = getenv("socks_proxy"); + + if (!tmp) + return 1; + + if (aclk_verify_proxy(tmp) == PROXY_TYPE_SOCKS5) { + *proxy = tmp; + return 0; + } + + safe_log_proxy_error( + "Environment var \"socks_proxy\" defined but of unknown format. Supported syntax: \"socks5[h]://[user:pass@]host:ip\".", + tmp); + return 1; +} + +static inline int check_http_enviroment(const char **proxy) +{ + char *tmp = getenv("http_proxy"); + + if (!tmp) + return 1; + + if (aclk_verify_proxy(tmp) == PROXY_TYPE_HTTP) { + *proxy = tmp; + return 0; + } + + safe_log_proxy_error( + "Environment var \"http_proxy\" defined but of unknown format. Supported syntax: \"http[s]://[user:pass@]host:ip\".", + tmp); + return 1; +} + +const char *aclk_lws_wss_get_proxy_setting(ACLK_PROXY_TYPE *type) +{ + const char *proxy = config_get(CONFIG_SECTION_CLOUD, ACLK_PROXY_CONFIG_VAR, ACLK_PROXY_ENV); + *type = PROXY_DISABLED; + + if (strcmp(proxy, "none") == 0) + return proxy; + + if (strcmp(proxy, ACLK_PROXY_ENV) == 0) { + if (check_socks_enviroment(&proxy) == 0) { +#ifdef LWS_WITH_SOCKS5 + *type = PROXY_TYPE_SOCKS5; + return proxy; +#else + safe_log_proxy_error("socks_proxy environment variable set to use SOCKS5 proxy " + "but Libwebsockets used doesn't have SOCKS5 support built in. " + "Ignoring and checking for other options.", + proxy); +#endif + } + if (check_http_enviroment(&proxy) == 0) + *type = PROXY_TYPE_HTTP; + return proxy; + } + + *type = aclk_verify_proxy(proxy); +#ifndef LWS_WITH_SOCKS5 + if (*type == PROXY_TYPE_SOCKS5) { + safe_log_proxy_error( + "Config var \"" ACLK_PROXY_CONFIG_VAR + "\" set to use SOCKS5 proxy but Libwebsockets used is built without support for SOCKS proxy. ACLK will be disabled.", + proxy); + } +#endif + if (*type == PROXY_TYPE_UNKNOWN) { + *type = PROXY_DISABLED; + safe_log_proxy_error( + "Config var \"" ACLK_PROXY_CONFIG_VAR + "\" defined but of unknown format. Supported syntax: \"socks5[h]://[user:pass@]host:ip\".", + proxy); + } + + return proxy; +} + +// helper function to read settings only once (static) +// as claiming, challenge/response and ACLK +// read the same thing, no need to parse again +const char *aclk_get_proxy(ACLK_PROXY_TYPE *type) +{ + static const char *proxy = NULL; + static ACLK_PROXY_TYPE proxy_type = PROXY_NOT_SET; + + if (proxy_type == PROXY_NOT_SET) + proxy = aclk_lws_wss_get_proxy_setting(&proxy_type); + + *type = proxy_type; + return proxy; +} diff --git a/aclk/aclk_proxy.h b/aclk/aclk_proxy.h new file mode 100644 index 000000000..b4ceb7df8 --- /dev/null +++ b/aclk/aclk_proxy.h @@ -0,0 +1,22 @@ +#ifndef ACLK_PROXY_H +#define ACLK_PROXY_H + +#include + +#define ACLK_PROXY_PROTO_ADDR_SEPARATOR "://" + +typedef enum aclk_proxy_type { + PROXY_TYPE_UNKNOWN = 0, + PROXY_TYPE_SOCKS5, + PROXY_TYPE_HTTP, + PROXY_DISABLED, + PROXY_NOT_SET, +} ACLK_PROXY_TYPE; + +const char *aclk_proxy_type_to_s(ACLK_PROXY_TYPE *type); +ACLK_PROXY_TYPE aclk_verify_proxy(const char *string); +const char *aclk_lws_wss_get_proxy_setting(ACLK_PROXY_TYPE *type); +void safe_log_proxy_censor(char *proxy); +const char *aclk_get_proxy(ACLK_PROXY_TYPE *type); + +#endif /* ACLK_PROXY_H */ diff --git a/aclk/aclk_query.c b/aclk/aclk_query.c index 3e2f88e46..001c1ba02 100644 --- a/aclk/aclk_query.c +++ b/aclk/aclk_query.c @@ -17,20 +17,20 @@ pthread_mutex_t query_lock_wait = PTHREAD_MUTEX_INITIALIZER; typedef struct aclk_query_handler { aclk_query_type_t type; char *name; // for logging purposes - int(*fnc)(mqtt_wss_client client, aclk_query_t query); + int(*fnc)(struct aclk_query_thread *query_thr, aclk_query_t query); } aclk_query_handler; -static int info_metadata(mqtt_wss_client client, aclk_query_t query) +static int info_metadata(struct aclk_query_thread *query_thr, aclk_query_t query) { - aclk_send_info_metadata(client, + aclk_send_info_metadata(query_thr->client, !query->data.metadata_info.initial_on_connect, query->data.metadata_info.host); return 0; } -static int alarms_metadata(mqtt_wss_client client, aclk_query_t query) +static int alarms_metadata(struct aclk_query_thread *query_thr, aclk_query_t query) { - aclk_send_alarm_metadata(client, + aclk_send_alarm_metadata(query_thr->client, !query->data.metadata_info.initial_on_connect); return 0; } @@ -55,11 +55,34 @@ static usec_t aclk_web_api_v1_request(RRDHOST *host, struct web_client *w, char return t; } -static int http_api_v2(mqtt_wss_client client, aclk_query_t query) +static RRDHOST *node_id_2_rrdhost(const char *node_id) +{ + int res; + uuid_t node_id_bin, host_id_bin; + char host_id[UUID_STR_LEN]; + if (uuid_parse(node_id, node_id_bin)) { + error("Couldn't parse UUID %s", node_id); + return NULL; + } + if ((res = get_host_id(&node_id_bin, &host_id_bin))) { + error("node not found rc=%d", res); + return NULL; + } + uuid_unparse_lower(host_id_bin, host_id); + return rrdhost_find_by_guid(host_id, 0); +} + +#define NODE_ID_QUERY "/node/" +// TODO this function should be quarantied and written nicely +// lots of skeletons from initial ACLK Legacy impl. +// quick and dirty from the start +static int http_api_v2(struct aclk_query_thread *query_thr, aclk_query_t query) { int retval = 0; usec_t t; BUFFER *local_buffer = NULL; + BUFFER *log_buffer = buffer_create(NETDATA_WEB_REQUEST_URL_SIZE); + RRDHOST *query_host = localhost; #ifdef NETDATA_WITH_ZLIB int z_ret; @@ -76,6 +99,26 @@ static int http_api_v2(mqtt_wss_client client, aclk_query_t query) w->cookie2[0] = 0; // Simulate web_client_create_on_fd() w->acl = 0x1f; + if (!strncmp(query->data.http_api_v2.query, NODE_ID_QUERY, strlen(NODE_ID_QUERY))) { + char *node_uuid = query->data.http_api_v2.query + strlen(NODE_ID_QUERY); + char nodeid[UUID_STR_LEN]; + if (strlen(node_uuid) < (UUID_STR_LEN - 1)) { + error("URL requests node_id but there is not enough chars following"); + retval = 1; + goto cleanup; + } + strncpyz(nodeid, node_uuid, UUID_STR_LEN - 1); + + query_host = node_id_2_rrdhost(nodeid); + if (!query_host) { + error("Host with node_id \"%s\" not found! Query Ignored!", node_uuid); + retval = 1; + goto cleanup; + } + } + + buffer_strcat(log_buffer, query->data.http_api_v2.query); + char *mysep = strchr(query->data.http_api_v2.query, '?'); if (mysep) { url_decode_r(w->decoded_query_string, mysep, NETDATA_WEB_REQUEST_URL_SIZE + 1); @@ -85,8 +128,19 @@ static int http_api_v2(mqtt_wss_client client, aclk_query_t query) mysep = strrchr(query->data.http_api_v2.query, '/'); + if (aclk_stats_enabled) { + ACLK_STATS_LOCK; + int stat_idx = aclk_cloud_req_http_type_to_idx(mysep ? mysep + 1 : "other"); + aclk_metrics_per_sample.cloud_req_http_by_type[stat_idx]++; + ACLK_STATS_UNLOCK; + } + // execute the query - t = aclk_web_api_v1_request(localhost, w, mysep ? mysep + 1 : "noop"); + w->tv_in = query->created_tv; + now_realtime_timeval(&w->tv_ready); + t = aclk_web_api_v1_request(query_host, w, mysep ? mysep + 1 : "noop"); + size_t size = (w->mode == WEB_CLIENT_MODE_FILECOPY) ? w->response.rlen : w->response.data->len; + size_t sent = size; #ifdef NETDATA_WITH_ZLIB // check if gzip encoding can and should be used @@ -128,14 +182,13 @@ static int http_api_v2(mqtt_wss_client client, aclk_query_t query) z_buffer->len += bytes_to_cpy; } while(z_ret != Z_STREAM_END); // so that web_client_build_http_header - // puts correct content lenght into header + // puts correct content length into header buffer_free(w->response.data); w->response.data = z_buffer; z_buffer = NULL; } #endif - now_realtime_timeval(&w->tv_ready); w->response.data->date = w->tv_ready.tv_sec; web_client_build_http_header(w); local_buffer = buffer_create(NETDATA_WEB_RESPONSE_INITIAL_SIZE); @@ -149,6 +202,7 @@ static int http_api_v2(mqtt_wss_client client, aclk_query_t query) buffer_need_bytes(local_buffer, w->response.data->len); memcpy(&local_buffer->buffer[local_buffer->len], w->response.data->buffer, w->response.data->len); local_buffer->len += w->response.data->len; + sent = sent - size + w->response.data->len; } else { #endif buffer_strcat(local_buffer, w->response.data->buffer); @@ -157,7 +211,26 @@ static int http_api_v2(mqtt_wss_client client, aclk_query_t query) #endif } - aclk_http_msg_v2(client, query->callback_topic, query->msg_id, t, query->created, w->response.code, local_buffer->buffer, local_buffer->len); + // send msg. + aclk_http_msg_v2(query_thr->client, query->callback_topic, query->msg_id, t, query->created, w->response.code, local_buffer->buffer, local_buffer->len); + + // log. + struct timeval tv; + now_realtime_timeval(&tv); + log_access("%llu: %d '[ACLK]:%d' '%s' (sent/all = %zu/%zu bytes %0.0f%%, prep/sent/total = %0.2f/%0.2f/%0.2f ms) %d '%s'", + w->id + , gettid() + , query_thr->idx + , "DATA" + , sent + , size + , size > sent ? -(((size - sent) / (double)size) * 100.0) : ((size > 0) ? (((sent - size ) / (double)size) * 100.0) : 0.0) + , dt_usec(&w->tv_ready, &w->tv_in) / 1000.0 + , dt_usec(&tv, &w->tv_ready) / 1000.0 + , dt_usec(&tv, &w->tv_in) / 1000.0 + , w->response.code + , strip_control_characters((char *)buffer_tostring(log_buffer)) + ); cleanup: #ifdef NETDATA_WITH_ZLIB @@ -170,45 +243,83 @@ cleanup: buffer_free(w->response.header_output); freez(w); buffer_free(local_buffer); + buffer_free(log_buffer); return retval; } -static int chart_query(mqtt_wss_client client, aclk_query_t query) +static int chart_query(struct aclk_query_thread *query_thr, aclk_query_t query) { - aclk_chart_msg(client, query->data.chart_add_del.host, query->data.chart_add_del.chart_name); + aclk_chart_msg(query_thr->client, query->data.chart_add_del.host, query->data.chart_add_del.chart_name); return 0; } -static int alarm_state_update_query(mqtt_wss_client client, aclk_query_t query) +static int alarm_state_update_query(struct aclk_query_thread *query_thr, aclk_query_t query) { - aclk_alarm_state_msg(client, query->data.alarm_update); + aclk_alarm_state_msg(query_thr->client, query->data.alarm_update); // aclk_alarm_state_msg frees the json object including the header it generates query->data.alarm_update = NULL; return 0; } +#ifdef ENABLE_NEW_CLOUD_PROTOCOL +static int register_node(struct aclk_query_thread *query_thr, aclk_query_t query) { + // TODO create a pending registrations list + // with some timeouts to detect registration requests that + // go unanswered from the cloud + aclk_generate_node_registration(query_thr->client, &query->data.node_creation); + return 0; +} + +static int node_state_update(struct aclk_query_thread *query_thr, aclk_query_t query) { + // TODO create a pending registrations list + // with some timeouts to detect registration requests that + // go unanswered from the cloud + aclk_generate_node_state_update(query_thr->client, &query->data.node_update); + return 0; +} + +static int send_bin_msg(struct aclk_query_thread *query_thr, aclk_query_t query) +{ + // this will be simplified when legacy support is removed + aclk_send_bin_message_subtopic_pid(query_thr->client, query->data.bin_payload.payload, query->data.bin_payload.size, query->data.bin_payload.topic, query->data.bin_payload.msg_name); + return 0; +} +#endif + aclk_query_handler aclk_query_handlers[] = { - { .type = HTTP_API_V2, .name = "http api request v2", .fnc = http_api_v2 }, - { .type = ALARM_STATE_UPDATE, .name = "alarm state update", .fnc = alarm_state_update_query }, - { .type = METADATA_INFO, .name = "info metadata", .fnc = info_metadata }, - { .type = METADATA_ALARMS, .name = "alarms metadata", .fnc = alarms_metadata }, - { .type = CHART_NEW, .name = "chart new", .fnc = chart_query }, - { .type = CHART_DEL, .name = "chart delete", .fnc = info_metadata }, - { .type = UNKNOWN, .name = NULL, .fnc = NULL } + { .type = HTTP_API_V2, .name = "http api request v2", .fnc = http_api_v2 }, + { .type = ALARM_STATE_UPDATE, .name = "alarm state update", .fnc = alarm_state_update_query }, + { .type = METADATA_INFO, .name = "info metadata", .fnc = info_metadata }, + { .type = METADATA_ALARMS, .name = "alarms metadata", .fnc = alarms_metadata }, + { .type = CHART_NEW, .name = "chart new", .fnc = chart_query }, + { .type = CHART_DEL, .name = "chart delete", .fnc = info_metadata }, +#ifdef ENABLE_NEW_CLOUD_PROTOCOL + { .type = REGISTER_NODE, .name = "register node", .fnc = register_node }, + { .type = NODE_STATE_UPDATE, .name = "node state update", .fnc = node_state_update }, + { .type = CHART_DIMS_UPDATE, .name = "chart and dim update bin", .fnc = send_bin_msg }, + { .type = CHART_CONFIG_UPDATED, .name = "chart config updated", .fnc = send_bin_msg }, + { .type = CHART_RESET, .name = "reset chart messages", .fnc = send_bin_msg }, + { .type = RETENTION_UPDATED, .name = "update retention info", .fnc = send_bin_msg }, + { .type = UPDATE_NODE_INFO, .name = "update node info", .fnc = send_bin_msg }, + { .type = ALARM_LOG_HEALTH, .name = "alarm log health", .fnc = send_bin_msg }, + { .type = ALARM_PROVIDE_CFG, .name = "provide alarm config", .fnc = send_bin_msg }, + { .type = ALARM_SNAPSHOT, .name = "alarm snapshot", .fnc = send_bin_msg }, +#endif + { .type = UNKNOWN, .name = NULL, .fnc = NULL } }; -static void aclk_query_process_msg(struct aclk_query_thread *info, aclk_query_t query) +static void aclk_query_process_msg(struct aclk_query_thread *query_thr, aclk_query_t query) { for (int i = 0; aclk_query_handlers[i].type != UNKNOWN; i++) { if (aclk_query_handlers[i].type == query->type) { debug(D_ACLK, "Processing Queued Message of type: \"%s\"", aclk_query_handlers[i].name); - aclk_query_handlers[i].fnc(info->client, query); + aclk_query_handlers[i].fnc(query_thr, query); aclk_query_free(query); if (aclk_stats_enabled) { ACLK_STATS_LOCK; aclk_metrics_per_sample.queries_dispatched++; - aclk_queries_per_thread[info->idx]++; + aclk_queries_per_thread[query_thr->idx]++; ACLK_STATS_UNLOCK; } return; @@ -219,11 +330,11 @@ static void aclk_query_process_msg(struct aclk_query_thread *info, aclk_query_t /* Processes messages from queue. Compete for work with other threads */ -int aclk_query_process_msgs(struct aclk_query_thread *info) +int aclk_query_process_msgs(struct aclk_query_thread *query_thr) { aclk_query_t query; while ((query = aclk_queue_pop())) - aclk_query_process_msg(info, query); + aclk_query_process_msg(query_thr, query); return 0; } @@ -233,21 +344,20 @@ int aclk_query_process_msgs(struct aclk_query_thread *info) */ void *aclk_query_main_thread(void *ptr) { - struct aclk_query_thread *info = ptr; + struct aclk_query_thread *query_thr = ptr; + while (!netdata_exit) { - aclk_query_process_msgs(info); + aclk_query_process_msgs(query_thr); QUERY_THREAD_LOCK; - if (unlikely(pthread_cond_wait(&query_cond_wait, &query_lock_wait))) sleep_usec(USEC_PER_SEC * 1); - QUERY_THREAD_UNLOCK; } return NULL; } -#define TASK_LEN_MAX 16 +#define TASK_LEN_MAX 22 void aclk_query_threads_start(struct aclk_query_threads *query_threads, mqtt_wss_client client) { info("Starting %d query threads.", query_threads->count); @@ -257,7 +367,7 @@ void aclk_query_threads_start(struct aclk_query_threads *query_threads, mqtt_wss for (int i = 0; i < query_threads->count; i++) { query_threads->thread_list[i].idx = i; //thread needs to know its index for statistics - if(unlikely(snprintf(thread_name, TASK_LEN_MAX, "%s_%d", ACLK_QUERY_THREAD_NAME, i) < 0)) + if(unlikely(snprintfz(thread_name, TASK_LEN_MAX, "%s_%d", ACLK_QUERY_THREAD_NAME, i) < 0)) error("snprintf encoding error"); netdata_thread_create( &query_threads->thread_list[i].thread, thread_name, NETDATA_THREAD_OPTION_JOINABLE, aclk_query_main_thread, diff --git a/aclk/aclk_query_queue.c b/aclk/aclk_query_queue.c index c9461b233..18b4783ee 100644 --- a/aclk/aclk_query_queue.c +++ b/aclk/aclk_query_queue.c @@ -20,7 +20,9 @@ static struct aclk_query_queue { static inline int _aclk_queue_query(aclk_query_t query) { + now_realtime_timeval(&query->created_tv); query->created = now_realtime_usec(); + ACLK_QUEUE_LOCK; if (aclk_query_queue.block_push) { ACLK_QUEUE_UNLOCK; @@ -43,14 +45,49 @@ static inline int _aclk_queue_query(aclk_query_t query) } +// Gets a pointer to the metric associated with a particular query type. +// NULL if the query type has no associated metric. +static inline volatile uint32_t *aclk_stats_qmetric_for_qtype(aclk_query_type_t qtype) { + switch (qtype) { + case HTTP_API_V2: + return &aclk_metrics_per_sample.cloud_req_type_http; + case ALARM_STATE_UPDATE: + return &aclk_metrics_per_sample.cloud_req_type_alarm_upd; + case METADATA_INFO: + return &aclk_metrics_per_sample.cloud_req_type_metadata_info; + case METADATA_ALARMS: + return &aclk_metrics_per_sample.cloud_req_type_metadata_alarms; + case CHART_NEW: + return &aclk_metrics_per_sample.cloud_req_type_chart_new; + case CHART_DEL: + return &aclk_metrics_per_sample.cloud_req_type_chart_del; + case REGISTER_NODE: + return &aclk_metrics_per_sample.cloud_req_type_register_node; + case NODE_STATE_UPDATE: + return &aclk_metrics_per_sample.cloud_req_type_node_upd; + default: + return NULL; + } +} + int aclk_queue_query(aclk_query_t query) { int ret = _aclk_queue_query(query); if (!ret) { + // local cache of query type before we wake up query thread, which may + // free the query in a race. + aclk_query_type_t qtype = query->type; QUERY_THREAD_WAKEUP; + if (aclk_stats_enabled) { + // get target query type metric before lock so we keep lock for + // minimal time. + volatile uint32_t *metric = aclk_stats_qmetric_for_qtype(qtype); + ACLK_STATS_LOCK; aclk_metrics_per_sample.queries_queued++; + if (metric) + *metric += 1; ACLK_STATS_UNLOCK; } } @@ -102,17 +139,47 @@ aclk_query_t aclk_query_new(aclk_query_type_t type) void aclk_query_free(aclk_query_t query) { - if (query->type == HTTP_API_V2) { + switch (query->type) { + case HTTP_API_V2: freez(query->data.http_api_v2.payload); if (query->data.http_api_v2.query != query->dedup_id) freez(query->data.http_api_v2.query); - } + break; - if (query->type == CHART_NEW) + case CHART_NEW: freez(query->data.chart_add_del.chart_name); + break; - if (query->type == ALARM_STATE_UPDATE && query->data.alarm_update) - json_object_put(query->data.alarm_update); + case ALARM_STATE_UPDATE: + if (query->data.alarm_update) + json_object_put(query->data.alarm_update); + break; + + case NODE_STATE_UPDATE: + freez((void*)query->data.node_update.claim_id); + freez((void*)query->data.node_update.node_id); + break; + + case REGISTER_NODE: + freez((void*)query->data.node_creation.claim_id); + freez((void*)query->data.node_creation.hostname); + freez((void*)query->data.node_creation.machine_guid); + break; + + case CHART_DIMS_UPDATE: + case CHART_CONFIG_UPDATED: + case CHART_RESET: + case RETENTION_UPDATED: + case UPDATE_NODE_INFO: + case ALARM_LOG_HEALTH: + case ALARM_PROVIDE_CFG: + case ALARM_SNAPSHOT: + freez(query->data.bin_payload.payload); + break; + + default: + break; + } freez(query->dedup_id); freez(query->callback_topic); @@ -126,3 +193,10 @@ void aclk_queue_lock(void) aclk_query_queue.block_push = 1; ACLK_QUEUE_UNLOCK; } + +void aclk_queue_unlock(void) +{ + ACLK_QUEUE_LOCK; + aclk_query_queue.block_push = 0; + ACLK_QUEUE_UNLOCK; +} diff --git a/aclk/aclk_query_queue.h b/aclk/aclk_query_queue.h index c46513567..db6354433 100644 --- a/aclk/aclk_query_queue.h +++ b/aclk/aclk_query_queue.h @@ -4,7 +4,10 @@ #define NETDATA_ACLK_QUERY_QUEUE_H #include "libnetdata/libnetdata.h" -#include "../daemon/common.h" +#include "daemon/common.h" +#include "schema-wrappers/schema_wrappers.h" + +#include "aclk_util.h" typedef enum { UNKNOWN, @@ -13,7 +16,17 @@ typedef enum { HTTP_API_V2, CHART_NEW, CHART_DEL, - ALARM_STATE_UPDATE + ALARM_STATE_UPDATE, + REGISTER_NODE, + NODE_STATE_UPDATE, + CHART_DIMS_UPDATE, + CHART_CONFIG_UPDATED, + CHART_RESET, + RETENTION_UPDATED, + UPDATE_NODE_INFO, + ALARM_LOG_HEALTH, + ALARM_PROVIDE_CFG, + ALARM_SNAPSHOT } aclk_query_type_t; struct aclk_query_metadata { @@ -31,6 +44,13 @@ struct aclk_query_http_api_v2 { char *query; }; +struct aclk_bin_payload { + char *payload; + size_t size; + enum aclk_topics topic; + const char *msg_name; +}; + typedef struct aclk_query *aclk_query_t; struct aclk_query { aclk_query_type_t type; @@ -44,6 +64,7 @@ struct aclk_query { char *callback_topic; char *msg_id; + struct timeval created_tv; usec_t created; aclk_query_t next; @@ -55,6 +76,9 @@ struct aclk_query { struct aclk_query_metadata metadata_alarms; struct aclk_query_http_api_v2 http_api_v2; struct aclk_query_chart_add_del chart_add_del; + node_instance_creation_t node_creation; + node_instance_connection_t node_update; + struct aclk_bin_payload bin_payload; json_object *alarm_update; } data; }; @@ -67,5 +91,14 @@ aclk_query_t aclk_queue_pop(void); void aclk_queue_flush(void); void aclk_queue_lock(void); +void aclk_queue_unlock(void); + +#define QUEUE_IF_PAYLOAD_PRESENT(query) \ + if (likely(query->data.bin_payload.payload)) { \ + aclk_queue_query(query); \ + } else { \ + error("Failed to generate payload (%s)", __FUNCTION__); \ + aclk_query_free(query); \ + } #endif /* NETDATA_ACLK_QUERY_QUEUE_H */ diff --git a/aclk/aclk_rrdhost_state.h b/aclk/aclk_rrdhost_state.h new file mode 100644 index 000000000..73925b330 --- /dev/null +++ b/aclk/aclk_rrdhost_state.h @@ -0,0 +1,44 @@ +#ifndef ACLK_RRDHOST_STATE_H +#define ACLK_RRDHOST_STATE_H + +#include "libnetdata/libnetdata.h" + +#ifdef ACLK_LEGACY +typedef enum aclk_cmd { + ACLK_CMD_CLOUD, + ACLK_CMD_ONCONNECT, + ACLK_CMD_INFO, + ACLK_CMD_CHART, + ACLK_CMD_CHARTDEL, + ACLK_CMD_ALARM, + ACLK_CMD_CLOUD_QUERY_2, + ACLK_CMD_CHILD_CONNECT, + ACLK_CMD_CHILD_DISCONNECT +} ACLK_CMD; + +typedef enum aclk_metadata_state { + ACLK_METADATA_REQUIRED, + ACLK_METADATA_CMD_QUEUED, + ACLK_METADATA_SENT +} ACLK_METADATA_STATE; +#endif + +typedef enum aclk_agent_state { + ACLK_HOST_INITIALIZING, + ACLK_HOST_STABLE +} ACLK_AGENT_STATE; + +typedef struct aclk_rrdhost_state { + char *claimed_id; // Claimed ID if host has one otherwise NULL + +#ifdef ACLK_LEGACY + // per child popcorning + ACLK_AGENT_STATE state; + ACLK_METADATA_STATE metadata; + + time_t timestamp_created; + time_t t_last_popcorn_update; +#endif /* ACLK_LEGACY */ +} aclk_rrdhost_state; + +#endif /* ACLK_RRDHOST_STATE_H */ diff --git a/aclk/aclk_rx_msgs.c b/aclk/aclk_rx_msgs.c index 3d3ab5e2c..e7ce932ea 100644 --- a/aclk/aclk_rx_msgs.c +++ b/aclk/aclk_rx_msgs.c @@ -4,9 +4,12 @@ #include "aclk_stats.h" #include "aclk_query_queue.h" +#include "aclk.h" #define ACLK_V2_PAYLOAD_SEPARATOR "\x0D\x0A\x0D\x0A" -#define ACLK_CLOUD_REQ_V2_PREFIX "GET /api/v1/" +#define ACLK_CLOUD_REQ_V2_PREFIX "GET /" + +#define ACLK_V_COMPRESSION 2 struct aclk_request { char *type_id; @@ -18,7 +21,7 @@ struct aclk_request { int max_version; }; -int cloud_to_agent_parse(JSON_ENTRY *e) +static int cloud_to_agent_parse(JSON_ENTRY *e) { struct aclk_request *data = e->callback_data; @@ -88,6 +91,7 @@ static inline int aclk_v2_payload_get_query(const char *payload, char **query_ur { const char *start, *end; + // TODO better check of URL if(strncmp(payload, ACLK_CLOUD_REQ_V2_PREFIX, strlen(ACLK_CLOUD_REQ_V2_PREFIX))) { errno = 0; error("Only accepting requests that start with \"%s\" from CLOUD.", ACLK_CLOUD_REQ_V2_PREFIX); @@ -108,7 +112,7 @@ static inline int aclk_v2_payload_get_query(const char *payload, char **query_ur } #define HTTP_CHECK_AGENT_INITIALIZED() ACLK_SHARED_STATE_LOCK;\ - if (unlikely(aclk_shared_state.agent_state == AGENT_INITIALIZING)) {\ + if (unlikely(aclk_shared_state.agent_state == ACLK_HOST_INITIALIZING)) {\ debug(D_ACLK, "Ignoring \"http\" cloud request; agent not in stable state");\ ACLK_SHARED_STATE_UNLOCK;\ return 1;\ @@ -117,7 +121,9 @@ static inline int aclk_v2_payload_get_query(const char *payload, char **query_ur static int aclk_handle_cloud_request_v2(struct aclk_request *cloud_to_agent, char *raw_payload) { - HTTP_CHECK_AGENT_INITIALIZED(); + if (!aclk_use_new_cloud_arch) { + HTTP_CHECK_AGENT_INITIALIZED(); + } aclk_query_t query; @@ -253,3 +259,182 @@ err_cleanup_nojson: return 1; } + +#ifdef ENABLE_NEW_CLOUD_PROTOCOL +void aclk_handle_new_cloud_msg(const char *message_type, const char *msg, size_t msg_len) +{ + // TODO do the look up table with hashes to optimize when there are more + // than few + if (!strcmp(message_type, "cmd")) { + // msg is binary payload in all other cases + // however in this message from old legacy cloud + // we have to convert it to C string + char *str = mallocz(msg_len+1); + memcpy(str, msg, msg_len); + str[msg_len] = 0; + aclk_handle_cloud_message(str); + freez(str); + return; + } + if (!strcmp(message_type, "CreateNodeInstanceResult")) { + node_instance_creation_result_t res = parse_create_node_instance_result(msg, msg_len); + if (!res.machine_guid || !res.node_id) { + error_report("Error parsing CreateNodeInstanceResult"); + freez(res.machine_guid); + freez(res.node_id); + return; + } + + debug(D_ACLK, "CreateNodeInstanceResult: guid:%s nodeid:%s", res.machine_guid, res.node_id); + + uuid_t host_id, node_id; + if (uuid_parse(res.machine_guid, host_id)) { + error("Error parsing machine_guid provided by CreateNodeInstanceResult"); + freez(res.machine_guid); + freez(res.node_id); + return; + } + if (uuid_parse(res.node_id, node_id)) { + error("Error parsing node_id provided by CreateNodeInstanceResult"); + freez(res.machine_guid); + freez(res.node_id); + return; + } + update_node_id(&host_id, &node_id); + + aclk_query_t query = aclk_query_new(NODE_STATE_UPDATE); + query->data.node_update.hops = 1; //TODO - real hop count instead of hardcoded + rrdhost_aclk_state_lock(localhost); + query->data.node_update.claim_id = strdupz(localhost->aclk_state.claimed_id); + rrdhost_aclk_state_unlock(localhost); + + RRDHOST *host = rrdhost_find_by_guid(res.machine_guid, 0); + query->data.node_update.live = 0; + + if (host) { + // not all host must have RRDHOST struct created for them + // if they never connected during runtime of agent + if (host == localhost) { + query->data.node_update.live = 1; + query->data.node_update.hops = 0; + } else { + netdata_mutex_lock(&host->receiver_lock); + query->data.node_update.live = (host->receiver != NULL); + netdata_mutex_unlock(&host->receiver_lock); + query->data.node_update.hops = host->system_info->hops; + } + } + + query->data.node_update.node_id = res.node_id; // aclk_query_free will free it + query->data.node_update.queryable = 1; + query->data.node_update.session_id = aclk_session_newarch; + aclk_queue_query(query); + freez(res.machine_guid); + return; + } + if (!strcmp(message_type, "SendNodeInstances")) { + debug(D_ACLK, "Got SendNodeInstances"); + aclk_send_node_instances(); + return; + } + + if (!strcmp(message_type, "StreamChartsAndDimensions")) { + stream_charts_and_dims_t res = parse_stream_charts_and_dims(msg, msg_len); + if (!res.claim_id || !res.node_id) { + error("Error parsing StreamChartsAndDimensions msg"); + freez(res.claim_id); + freez(res.node_id); + return; + } + chart_batch_id = res.batch_id; + aclk_start_streaming(res.node_id, res.seq_id, res.seq_id_created_at.tv_sec, res.batch_id); + freez(res.claim_id); + freez(res.node_id); + return; + } + if (!strcmp(message_type, "ChartsAndDimensionsAck")) { + chart_and_dim_ack_t res = parse_chart_and_dimensions_ack(msg, msg_len); + if (!res.claim_id || !res.node_id) { + error("Error parsing StreamChartsAndDimensions msg"); + freez(res.claim_id); + freez(res.node_id); + return; + } + aclk_ack_chart_sequence_id(res.node_id, res.last_seq_id); + freez(res.claim_id); + freez(res.node_id); + return; + } + if (!strcmp(message_type, "UpdateChartConfigs")) { + struct update_chart_config res = parse_update_chart_config(msg, msg_len); + if (!res.claim_id || !res.node_id || !res.hashes) + error("Error parsing UpdateChartConfigs msg"); + else + aclk_get_chart_config(res.hashes); + destroy_update_chart_config(&res); + return; + } + if (!strcmp(message_type, "StartAlarmStreaming")) { + struct start_alarm_streaming res = parse_start_alarm_streaming(msg, msg_len); + if (!res.node_id || !res.batch_id) { + error("Error parsing StartAlarmStreaming"); + freez(res.node_id); + return; + } + aclk_start_alert_streaming(res.node_id, res.batch_id, res.start_seq_id); + freez(res.node_id); + return; + } + if (!strcmp(message_type, "SendAlarmLogHealth")) { + char *node_id = parse_send_alarm_log_health(msg, msg_len); + if (!node_id) { + error("Error parsing SendAlarmLogHealth"); + return; + } + aclk_send_alarm_health_log(node_id); + freez(node_id); + return; + } + if (!strcmp(message_type, "SendAlarmConfiguration")) { + char *config_hash = parse_send_alarm_configuration(msg, msg_len); + if (!config_hash || !*config_hash) { + error("Error parsing SendAlarmConfiguration"); + freez(config_hash); + return; + } + aclk_send_alarm_configuration(config_hash); + freez(config_hash); + return; + } + if (!strcmp(message_type, "SendAlarmSnapshot")) { + struct send_alarm_snapshot *sas = parse_send_alarm_snapshot(msg, msg_len); + if (!sas->node_id || !sas->claim_id) { + error("Error parsing SendAlarmSnapshot"); + destroy_send_alarm_snapshot(sas); + return; + } + aclk_process_send_alarm_snapshot(sas->node_id, sas->claim_id, sas->snapshot_id, sas->sequence_id); + destroy_send_alarm_snapshot(sas); + return; + } + if (!strcmp(message_type, "DisconnectReq")) { + struct disconnect_cmd *cmd = parse_disconnect_cmd(msg, msg_len); + if (!cmd) + return; + if (cmd->permaban) { + error ("Cloud Banned This Agent!"); + aclk_disable_runtime = 1; + } + info ("Cloud requested disconnect (EC=%u, \"%s\")", (unsigned int)cmd->error_code, cmd->error_description); + if (cmd->reconnect_after_s > 0) { + aclk_block_until = now_monotonic_sec() + cmd->reconnect_after_s; + info ("Cloud asks not to reconnect for %u seconds. We shall honor that request", (unsigned int)cmd->reconnect_after_s); + } + disconnect_req = 1; + freez(cmd->error_description); + freez(cmd); + return; + } + error ("Unknown new cloud arch message type received \"%s\"", message_type); +} +#endif diff --git a/aclk/aclk_rx_msgs.h b/aclk/aclk_rx_msgs.h index e24252bee..074dc004a 100644 --- a/aclk/aclk_rx_msgs.h +++ b/aclk/aclk_rx_msgs.h @@ -5,9 +5,13 @@ #ifndef ACLK_RX_MSGS_H #define ACLK_RX_MSGS_H -#include "../daemon/common.h" +#include "daemon/common.h" #include "libnetdata/libnetdata.h" int aclk_handle_cloud_message(char *payload); +#ifdef ENABLE_NEW_CLOUD_PROTOCOL +void aclk_handle_new_cloud_msg(const char *message_type, const char *msg, size_t msg_len); +#endif + #endif /* ACLK_RX_MSGS_H */ diff --git a/aclk/aclk_stats.c b/aclk/aclk_stats.c index a599cfda5..765c6a333 100644 --- a/aclk/aclk_stats.c +++ b/aclk/aclk_stats.c @@ -4,8 +4,6 @@ netdata_mutex_t aclk_stats_mutex = NETDATA_MUTEX_INITIALIZER; -int aclk_stats_enabled; - int query_thread_count; // data ACLK stats need per query thread @@ -112,7 +110,87 @@ static void aclk_stats_cloud_req(struct aclk_metrics_per_sample *per_sample) rrdset_done(st); } -#define MAX_DIM_NAME 16 +static void aclk_stats_cloud_req_type(struct aclk_metrics_per_sample *per_sample) +{ + static RRDSET *st = NULL; + static RRDDIM *rd_type_http = NULL; + static RRDDIM *rd_type_alarm_upd = NULL; + static RRDDIM *rd_type_metadata_info = NULL; + static RRDDIM *rd_type_metadata_alarms = NULL; + static RRDDIM *rd_type_chart_new = NULL; + static RRDDIM *rd_type_chart_del = NULL; + static RRDDIM *rd_type_register_node = NULL; + static RRDDIM *rd_type_node_upd = NULL; + + if (unlikely(!st)) { + st = rrdset_create_localhost( + "netdata", "aclk_cloud_req_type", NULL, "aclk", NULL, "Requests received from cloud by their type", "req/s", + "netdata", "stats", 200006, localhost->rrd_update_every, RRDSET_TYPE_STACKED); + + rd_type_http = rrddim_add(st, "http", NULL, 1, localhost->rrd_update_every, RRD_ALGORITHM_ABSOLUTE); + rd_type_alarm_upd = rrddim_add(st, "alarm update", NULL, 1, localhost->rrd_update_every, RRD_ALGORITHM_ABSOLUTE); + rd_type_metadata_info = rrddim_add(st, "info metadata", NULL, 1, localhost->rrd_update_every, RRD_ALGORITHM_ABSOLUTE); + rd_type_metadata_alarms = rrddim_add(st, "alarms metadata", NULL, 1, localhost->rrd_update_every, RRD_ALGORITHM_ABSOLUTE); + rd_type_chart_new = rrddim_add(st, "chart new", NULL, 1, localhost->rrd_update_every, RRD_ALGORITHM_ABSOLUTE); + rd_type_chart_del = rrddim_add(st, "chart delete", NULL, 1, localhost->rrd_update_every, RRD_ALGORITHM_ABSOLUTE); + rd_type_register_node = rrddim_add(st, "register node", NULL, 1, localhost->rrd_update_every, RRD_ALGORITHM_ABSOLUTE); + rd_type_node_upd = rrddim_add(st, "node update", NULL, 1, localhost->rrd_update_every, RRD_ALGORITHM_ABSOLUTE); + } else + rrdset_next(st); + + rrddim_set_by_pointer(st, rd_type_http, per_sample->cloud_req_type_http); + rrddim_set_by_pointer(st, rd_type_alarm_upd, per_sample->cloud_req_type_alarm_upd); + rrddim_set_by_pointer(st, rd_type_metadata_info, per_sample->cloud_req_type_metadata_info); + rrddim_set_by_pointer(st, rd_type_metadata_alarms, per_sample->cloud_req_type_metadata_alarms); + rrddim_set_by_pointer(st, rd_type_chart_new, per_sample->cloud_req_type_chart_new); + rrddim_set_by_pointer(st, rd_type_chart_del, per_sample->cloud_req_type_chart_del); + rrddim_set_by_pointer(st, rd_type_register_node, per_sample->cloud_req_type_register_node); + rrddim_set_by_pointer(st, rd_type_node_upd, per_sample->cloud_req_type_node_upd); + + rrdset_done(st); +} + +static char *cloud_req_http_type_names[ACLK_STATS_CLOUD_HTTP_REQ_TYPE_CNT] = { + "other", + "info", + "data", + "alarms", + "alarm_log", + "chart", + "charts" + // if you change then update `ACLK_STATS_CLOUD_HTTP_REQ_TYPE_CNT`. +}; + +int aclk_cloud_req_http_type_to_idx(const char *name) +{ + for (int i = 1; i < ACLK_STATS_CLOUD_HTTP_REQ_TYPE_CNT; i++) + if (!strcmp(cloud_req_http_type_names[i], name)) + return i; + return 0; +} + +static void aclk_stats_cloud_req_http_type(struct aclk_metrics_per_sample *per_sample) +{ + static RRDSET *st = NULL; + static RRDDIM *rd_rq_types[ACLK_STATS_CLOUD_HTTP_REQ_TYPE_CNT]; + + if (unlikely(!st)) { + st = rrdset_create_localhost( + "netdata", "aclk_cloud_req_http_type", NULL, "aclk", NULL, "Requests received from cloud via HTTP by their type", "req/s", + "netdata", "stats", 200007, localhost->rrd_update_every, RRDSET_TYPE_STACKED); + + for (int i = 0; i < ACLK_STATS_CLOUD_HTTP_REQ_TYPE_CNT; i++) + rd_rq_types[i] = rrddim_add(st, cloud_req_http_type_names[i], NULL, 1, localhost->rrd_update_every, RRD_ALGORITHM_ABSOLUTE); + } else + rrdset_next(st); + + for (int i = 0; i < ACLK_STATS_CLOUD_HTTP_REQ_TYPE_CNT; i++) + rrddim_set_by_pointer(st, rd_rq_types[i], per_sample->cloud_req_http_by_type[i]); + + rrdset_done(st); +} + +#define MAX_DIM_NAME 22 static void aclk_stats_query_threads(uint32_t *queries_per_thread) { static RRDSET *st = NULL; @@ -122,10 +200,10 @@ static void aclk_stats_query_threads(uint32_t *queries_per_thread) if (unlikely(!st)) { st = rrdset_create_localhost( "netdata", "aclk_query_threads", NULL, "aclk", NULL, "Queries Processed Per Thread", "req/s", - "netdata", "stats", 200007, localhost->rrd_update_every, RRDSET_TYPE_STACKED); + "netdata", "stats", 200009, localhost->rrd_update_every, RRDSET_TYPE_STACKED); for (int i = 0; i < query_thread_count; i++) { - if (snprintf(dim_name, MAX_DIM_NAME, "Query %d", i) < 0) + if (snprintfz(dim_name, MAX_DIM_NAME, "Query %d", i) < 0) error("snprintf encoding error"); aclk_qt_data[i].dim = rrddim_add(st, dim_name, NULL, 1, localhost->rrd_update_every, RRD_ALGORITHM_ABSOLUTE); } @@ -149,7 +227,7 @@ static void aclk_stats_query_time(struct aclk_metrics_per_sample *per_sample) if (unlikely(!st)) { st = rrdset_create_localhost( "netdata", "aclk_query_time", NULL, "aclk", NULL, "Time it took to process cloud requested DB queries", "us", - "netdata", "stats", 200006, localhost->rrd_update_every, RRDSET_TYPE_LINE); + "netdata", "stats", 200008, localhost->rrd_update_every, RRDSET_TYPE_LINE); rd_rq_avg = rrddim_add(st, "avg", NULL, 1, localhost->rrd_update_every, RRD_ALGORITHM_ABSOLUTE); rd_rq_max = rrddim_add(st, "max", NULL, 1, localhost->rrd_update_every, RRD_ALGORITHM_ABSOLUTE); @@ -218,6 +296,9 @@ void *aclk_stats_main_thread(void *ptr) #endif aclk_stats_cloud_req(&per_sample); + aclk_stats_cloud_req_type(&per_sample); + aclk_stats_cloud_req_http_type(&per_sample); + aclk_stats_query_threads(aclk_queries_per_thread_sample); aclk_stats_query_time(&per_sample); diff --git a/aclk/aclk_stats.h b/aclk/aclk_stats.h index 33d016965..317a34ba4 100644 --- a/aclk/aclk_stats.h +++ b/aclk/aclk_stats.h @@ -3,7 +3,7 @@ #ifndef NETDATA_ACLK_STATS_H #define NETDATA_ACLK_STATS_H -#include "../daemon/common.h" +#include "daemon/common.h" #include "libnetdata/libnetdata.h" #define ACLK_STATS_THREAD_NAME "ACLK_Stats" @@ -13,7 +13,10 @@ extern netdata_mutex_t aclk_stats_mutex; #define ACLK_STATS_LOCK netdata_mutex_lock(&aclk_stats_mutex) #define ACLK_STATS_UNLOCK netdata_mutex_unlock(&aclk_stats_mutex) -extern int aclk_stats_enabled; +// if you change update `cloud_req_http_type_names`. +#define ACLK_STATS_CLOUD_HTTP_REQ_TYPE_CNT 7 + +int aclk_cloud_req_http_type_to_idx(const char *name); struct aclk_stats_thread { netdata_thread_t *thread; @@ -45,6 +48,19 @@ extern struct aclk_metrics_per_sample { volatile uint32_t cloud_req_recvd; volatile uint32_t cloud_req_err; + // request types. + volatile uint32_t cloud_req_type_http; + volatile uint32_t cloud_req_type_alarm_upd; + volatile uint32_t cloud_req_type_metadata_info; + volatile uint32_t cloud_req_type_metadata_alarms; + volatile uint32_t cloud_req_type_chart_new; + volatile uint32_t cloud_req_type_chart_del; + volatile uint32_t cloud_req_type_register_node; + volatile uint32_t cloud_req_type_node_upd; + + // HTTP-specific request types. + volatile uint32_t cloud_req_http_by_type[ACLK_STATS_CLOUD_HTTP_REQ_TYPE_CNT]; + volatile uint32_t cloud_q_process_total; volatile uint32_t cloud_q_process_count; volatile uint32_t cloud_q_process_max; diff --git a/aclk/aclk_tx_msgs.c b/aclk/aclk_tx_msgs.c index 144008e4d..237c1bdd2 100644 --- a/aclk/aclk_tx_msgs.c +++ b/aclk/aclk_tx_msgs.c @@ -1,14 +1,18 @@ // SPDX-License-Identifier: GPL-3.0-or-later #include "aclk_tx_msgs.h" -#include "../daemon/common.h" +#include "daemon/common.h" #include "aclk_util.h" #include "aclk_stats.h" +#include "aclk.h" #ifndef __GNUC__ #pragma region aclk_tx_msgs helper functions #endif +// version for aclk legacy (old cloud arch) +#define ACLK_VERSION 2 + static void aclk_send_message_subtopic(mqtt_wss_client client, json_object *msg, enum aclk_topics subtopic) { uint16_t packet_id; @@ -16,7 +20,7 @@ static void aclk_send_message_subtopic(mqtt_wss_client client, json_object *msg, const char *topic = aclk_get_topic(subtopic); if (unlikely(!topic)) { - error("Couldn't get topic. Aborting mesage send"); + error("Couldn't get topic. Aborting message send"); return; } @@ -32,6 +36,37 @@ static void aclk_send_message_subtopic(mqtt_wss_client client, json_object *msg, #endif } +uint16_t aclk_send_bin_message_subtopic_pid(mqtt_wss_client client, char *msg, size_t msg_len, enum aclk_topics subtopic, const char *msgname) +{ +#ifndef ACLK_LOG_CONVERSATION_DIR + UNUSED(msgname); +#endif + uint16_t packet_id; + const char *topic = aclk_get_topic(subtopic); + + if (unlikely(!topic)) { + error("Couldn't get topic. Aborting message send."); + return 0; + } + + mqtt_wss_publish_pid(client, topic, msg, msg_len, MQTT_WSS_PUB_QOS1, &packet_id); +#ifdef NETDATA_INTERNAL_CHECKS + aclk_stats_msg_published(packet_id); +#endif +#ifdef ACLK_LOG_CONVERSATION_DIR +#define FN_MAX_LEN 1024 + char filename[FN_MAX_LEN]; + snprintf(filename, FN_MAX_LEN, ACLK_LOG_CONVERSATION_DIR "/%010d-tx-%s.bin", ACLK_GET_CONV_LOG_NEXT(), msgname); + FILE *fptr; + if (fptr = fopen(filename,"w")) { + fwrite(msg, msg_len, 1, fptr); + fclose(fptr); + } +#endif + + return packet_id; +} + static uint16_t aclk_send_message_subtopic_pid(mqtt_wss_client client, json_object *msg, enum aclk_topics subtopic) { uint16_t packet_id; @@ -39,7 +74,7 @@ static uint16_t aclk_send_message_subtopic_pid(mqtt_wss_client client, json_obje const char *topic = aclk_get_topic(subtopic); if (unlikely(!topic)) { - error("Couldn't get topic. Aborting mesage send"); + error("Couldn't get topic. Aborting message send"); return 0; } @@ -368,6 +403,87 @@ int aclk_send_app_layer_disconnect(mqtt_wss_client client, const char *message) return pid; } +#ifdef ENABLE_NEW_CLOUD_PROTOCOL +// new protobuf msgs +uint16_t aclk_send_agent_connection_update(mqtt_wss_client client, int reachable) { + size_t len; + uint16_t pid; + update_agent_connection_t conn = { + .reachable = (reachable ? 1 : 0), + .lwt = 0, + .session_id = aclk_session_newarch + }; + + rrdhost_aclk_state_lock(localhost); + if (unlikely(!localhost->aclk_state.claimed_id)) { + error("Internal error. Should not come here if not claimed"); + rrdhost_aclk_state_unlock(localhost); + return 0; + } + conn.claim_id = localhost->aclk_state.claimed_id; + + char *msg = generate_update_agent_connection(&len, &conn); + rrdhost_aclk_state_unlock(localhost); + + if (!msg) { + error("Error generating agent::v1::UpdateAgentConnection payload"); + return 0; + } + + pid = aclk_send_bin_message_subtopic_pid(client, msg, len, ACLK_TOPICID_AGENT_CONN, "UpdateAgentConnection"); + freez(msg); + return pid; +} + +char *aclk_generate_lwt(size_t *size) { + update_agent_connection_t conn = { + .reachable = 0, + .lwt = 1, + .session_id = aclk_session_newarch + }; + + rrdhost_aclk_state_lock(localhost); + if (unlikely(!localhost->aclk_state.claimed_id)) { + error("Internal error. Should not come here if not claimed"); + rrdhost_aclk_state_unlock(localhost); + return NULL; + } + conn.claim_id = localhost->aclk_state.claimed_id; + + char *msg = generate_update_agent_connection(size, &conn); + rrdhost_aclk_state_unlock(localhost); + + if (!msg) + error("Error generating agent::v1::UpdateAgentConnection payload for LWT"); + + return msg; +} + +void aclk_generate_node_registration(mqtt_wss_client client, node_instance_creation_t *node_creation) { + size_t len; + char *msg = generate_node_instance_creation(&len, node_creation); + if (!msg) { + error("Error generating nodeinstance::create::v1::CreateNodeInstance"); + return; + } + + aclk_send_bin_message_subtopic_pid(client, msg, len, ACLK_TOPICID_CREATE_NODE, "CreateNodeInstance"); + freez(msg); +} + +void aclk_generate_node_state_update(mqtt_wss_client client, node_instance_connection_t *node_connection) { + size_t len; + char *msg = generate_node_instance_connection(&len, node_connection); + if (!msg) { + error("Error generating nodeinstance::v1::UpdateNodeInstanceConnection"); + return; + } + + aclk_send_bin_message_subtopic_pid(client, msg, len, ACLK_TOPICID_NODE_CONN, "UpdateNodeInstanceConnection"); + freez(msg); +} +#endif /* ENABLE_NEW_CLOUD_PROTOCOL */ + #ifndef __GNUC__ #pragma endregion #endif diff --git a/aclk/aclk_tx_msgs.h b/aclk/aclk_tx_msgs.h index 50c981696..da29a4a32 100644 --- a/aclk/aclk_tx_msgs.h +++ b/aclk/aclk_tx_msgs.h @@ -4,8 +4,12 @@ #include #include "libnetdata/libnetdata.h" -#include "../daemon/common.h" +#include "daemon/common.h" #include "mqtt_wss_client.h" +#include "schema-wrappers/schema_wrappers.h" +#include "aclk_util.h" + +uint16_t aclk_send_bin_message_subtopic_pid(mqtt_wss_client client, char *msg, size_t msg_len, enum aclk_topics subtopic, const char *msgname); void aclk_send_info_metadata(mqtt_wss_client client, int metadata_submitted, RRDHOST *host); void aclk_send_alarm_metadata(mqtt_wss_client client, int metadata_submitted); @@ -19,4 +23,13 @@ void aclk_alarm_state_msg(mqtt_wss_client client, json_object *msg); json_object *aclk_generate_disconnect(const char *message); int aclk_send_app_layer_disconnect(mqtt_wss_client client, const char *message); +#ifdef ENABLE_NEW_CLOUD_PROTOCOL +// new protobuf msgs +uint16_t aclk_send_agent_connection_update(mqtt_wss_client client, int reachable); +char *aclk_generate_lwt(size_t *size); + +void aclk_generate_node_registration(mqtt_wss_client client, node_instance_creation_t *node_creation); +void aclk_generate_node_state_update(mqtt_wss_client client, node_instance_connection_t *node_connection); +#endif + #endif diff --git a/aclk/aclk_util.c b/aclk/aclk_util.c index b8ac66756..ee8fcaf94 100644 --- a/aclk/aclk_util.c +++ b/aclk/aclk_util.c @@ -2,15 +2,14 @@ #include "aclk_util.h" -#include +#include "daemon/common.h" -#include "../daemon/common.h" +int aclk_use_new_cloud_arch = 0; +usec_t aclk_session_newarch = 0; -// CentOS 7 has older version that doesn't define this -// same goes for MacOS -#ifndef UUID_STR_LEN -#define UUID_STR_LEN 37 -#endif +aclk_env_t *aclk_env = NULL; + +int chart_batch_id; aclk_encoding_type_t aclk_encoding_type_t_from_str(const char *str) { if (!strcmp(str, "json")) { @@ -54,6 +53,15 @@ void aclk_env_t_destroy(aclk_env_t *env) { } } +int aclk_env_has_capa(const char *capa) +{ + for (int i = 0; i < (int) aclk_env->capability_count; i++) { + if (!strcasecmp(capa, aclk_env->capabilities[i])) + return 1; + } + return 0; +} + #ifdef ACLK_LOG_CONVERSATION_DIR volatile int aclk_conversation_log_counter = 0; #if !defined(HAVE_C___ATOMIC) || defined(NETDATA_NO_ATOMIC_INSTRUCTIONS) @@ -109,18 +117,53 @@ struct topic_name { // in answer to /password endpoint const char *name; } topic_names[] = { - { .id = ACLK_TOPICID_CHART, .name = "chart" }, - { .id = ACLK_TOPICID_ALARMS, .name = "alarms" }, - { .id = ACLK_TOPICID_METADATA, .name = "meta" }, - { .id = ACLK_TOPICID_COMMAND, .name = "inbox-cmd" }, - { .id = ACLK_TOPICID_UNKNOWN, .name = NULL } + { .id = ACLK_TOPICID_CHART, .name = "chart" }, + { .id = ACLK_TOPICID_ALARMS, .name = "alarms" }, + { .id = ACLK_TOPICID_METADATA, .name = "meta" }, + { .id = ACLK_TOPICID_COMMAND, .name = "inbox-cmd" }, + { .id = ACLK_TOPICID_AGENT_CONN, .name = "agent-connection" }, + { .id = ACLK_TOPICID_CMD_NG_V1, .name = "inbox-cmd-v1" }, + { .id = ACLK_TOPICID_CREATE_NODE, .name = "create-node-instance" }, + { .id = ACLK_TOPICID_NODE_CONN, .name = "node-instance-connection" }, + { .id = ACLK_TOPICID_CHART_DIMS, .name = "chart-and-dims-updated" }, + { .id = ACLK_TOPICID_CHART_CONFIGS_UPDATED, .name = "chart-configs-updated" }, + { .id = ACLK_TOPICID_CHART_RESET, .name = "reset-charts" }, + { .id = ACLK_TOPICID_RETENTION_UPDATED, .name = "chart-retention-updated" }, + { .id = ACLK_TOPICID_NODE_INFO, .name = "node-instance-info" }, + { .id = ACLK_TOPICID_ALARM_LOG, .name = "alarm-log" }, + { .id = ACLK_TOPICID_ALARM_HEALTH, .name = "alarm-health" }, + { .id = ACLK_TOPICID_ALARM_CONFIG, .name = "alarm-config" }, + { .id = ACLK_TOPICID_ALARM_SNAPSHOT, .name = "alarm-snapshot" }, + { .id = ACLK_TOPICID_UNKNOWN, .name = NULL } +}; + +enum aclk_topics compulsory_topics_legacy[] = { + ACLK_TOPICID_CHART, + ACLK_TOPICID_ALARMS, + ACLK_TOPICID_METADATA, + ACLK_TOPICID_COMMAND, + ACLK_TOPICID_UNKNOWN }; -enum aclk_topics compulsory_topics[] = { +enum aclk_topics compulsory_topics_new_cloud_arch[] = { +// TODO remove old topics once not needed anymore ACLK_TOPICID_CHART, ACLK_TOPICID_ALARMS, ACLK_TOPICID_METADATA, ACLK_TOPICID_COMMAND, + ACLK_TOPICID_AGENT_CONN, + ACLK_TOPICID_CMD_NG_V1, + ACLK_TOPICID_CREATE_NODE, + ACLK_TOPICID_NODE_CONN, + ACLK_TOPICID_CHART_DIMS, + ACLK_TOPICID_CHART_CONFIGS_UPDATED, + ACLK_TOPICID_CHART_RESET, + ACLK_TOPICID_RETENTION_UPDATED, + ACLK_TOPICID_NODE_INFO, + ACLK_TOPICID_ALARM_LOG, + ACLK_TOPICID_ALARM_HEALTH, + ACLK_TOPICID_ALARM_CONFIG, + ACLK_TOPICID_ALARM_SNAPSHOT, ACLK_TOPICID_UNKNOWN }; @@ -188,7 +231,7 @@ static int topic_cache_add_topic(struct json_object *json, struct aclk_topic *to } topic->topic_id = topic_name_to_id(json_object_get_string(json_object_iter_peek_value(&it))); if (topic->topic_id == ACLK_TOPICID_UNKNOWN) { - info("topic dictionary has unknown topic name \"%s\"", json_object_get_string(json_object_iter_peek_value(&it))); + debug(D_ACLK, "topic dictionary has unknown topic name \"%s\"", json_object_get_string(json_object_iter_peek_value(&it))); } json_object_iter_next(&it); continue; @@ -246,6 +289,8 @@ int aclk_generate_topic_cache(struct json_object *json) } } + enum aclk_topics *compulsory_topics = aclk_use_new_cloud_arch ? compulsory_topics_new_cloud_arch : compulsory_topics_legacy; + for (int i = 0; compulsory_topics[i] != ACLK_TOPICID_UNKNOWN; i++) { if (!aclk_get_topic(compulsory_topics[i])) { error("missing compulsory topic \"%s\" in password response from cloud", topic_id_to_name(compulsory_topics[i])); @@ -315,189 +360,6 @@ unsigned long int aclk_tbeb_delay(int reset, int base, unsigned long int min, un return delay; } -#define ACLK_PROXY_PROTO_ADDR_SEPARATOR "://" -#define ACLK_PROXY_ENV "env" -#define ACLK_PROXY_CONFIG_VAR "proxy" - -struct { - ACLK_PROXY_TYPE type; - const char *url_str; -} supported_proxy_types[] = { - { .type = PROXY_TYPE_SOCKS5, .url_str = "socks5" ACLK_PROXY_PROTO_ADDR_SEPARATOR }, - { .type = PROXY_TYPE_SOCKS5, .url_str = "socks5h" ACLK_PROXY_PROTO_ADDR_SEPARATOR }, - { .type = PROXY_TYPE_HTTP, .url_str = "http" ACLK_PROXY_PROTO_ADDR_SEPARATOR }, - { .type = PROXY_TYPE_UNKNOWN, .url_str = NULL }, -}; - -const char *aclk_proxy_type_to_s(ACLK_PROXY_TYPE *type) -{ - switch (*type) { - case PROXY_DISABLED: - return "disabled"; - case PROXY_TYPE_HTTP: - return "HTTP"; - case PROXY_TYPE_SOCKS5: - return "SOCKS"; - default: - return "Unknown"; - } -} - -static inline ACLK_PROXY_TYPE aclk_find_proxy(const char *string) -{ - int i = 0; - while (supported_proxy_types[i].url_str) { - if (!strncmp(supported_proxy_types[i].url_str, string, strlen(supported_proxy_types[i].url_str))) - return supported_proxy_types[i].type; - i++; - } - return PROXY_TYPE_UNKNOWN; -} - -ACLK_PROXY_TYPE aclk_verify_proxy(const char *string) -{ - if (!string) - return PROXY_TYPE_UNKNOWN; - - while (*string == 0x20 && *string!=0) // Help coverity (compiler will remove) - string++; - - if (!*string) - return PROXY_TYPE_UNKNOWN; - - return aclk_find_proxy(string); -} - -// helper function to censor user&password -// for logging purposes -void safe_log_proxy_censor(char *proxy) -{ - size_t length = strlen(proxy); - char *auth = proxy + length - 1; - char *cur; - - while ((auth >= proxy) && (*auth != '@')) - auth--; - - //if not found or @ is first char do nothing - if (auth <= proxy) - return; - - cur = strstr(proxy, ACLK_PROXY_PROTO_ADDR_SEPARATOR); - if (!cur) - cur = proxy; - else - cur += strlen(ACLK_PROXY_PROTO_ADDR_SEPARATOR); - - while (cur < auth) { - *cur = 'X'; - cur++; - } -} - -static inline void safe_log_proxy_error(char *str, const char *proxy) -{ - char *log = strdupz(proxy); - safe_log_proxy_censor(log); - error("%s Provided Value:\"%s\"", str, log); - freez(log); -} - -static inline int check_socks_enviroment(const char **proxy) -{ - char *tmp = getenv("socks_proxy"); - - if (!tmp) - return 1; - - if (aclk_verify_proxy(tmp) == PROXY_TYPE_SOCKS5) { - *proxy = tmp; - return 0; - } - - safe_log_proxy_error( - "Environment var \"socks_proxy\" defined but of unknown format. Supported syntax: \"socks5[h]://[user:pass@]host:ip\".", - tmp); - return 1; -} - -static inline int check_http_enviroment(const char **proxy) -{ - char *tmp = getenv("http_proxy"); - - if (!tmp) - return 1; - - if (aclk_verify_proxy(tmp) == PROXY_TYPE_HTTP) { - *proxy = tmp; - return 0; - } - - safe_log_proxy_error( - "Environment var \"http_proxy\" defined but of unknown format. Supported syntax: \"http[s]://[user:pass@]host:ip\".", - tmp); - return 1; -} - -const char *aclk_lws_wss_get_proxy_setting(ACLK_PROXY_TYPE *type) -{ - const char *proxy = config_get(CONFIG_SECTION_CLOUD, ACLK_PROXY_CONFIG_VAR, ACLK_PROXY_ENV); - *type = PROXY_DISABLED; - - if (strcmp(proxy, "none") == 0) - return proxy; - - if (strcmp(proxy, ACLK_PROXY_ENV) == 0) { - if (check_socks_enviroment(&proxy) == 0) { -#ifdef LWS_WITH_SOCKS5 - *type = PROXY_TYPE_SOCKS5; - return proxy; -#else - safe_log_proxy_error("socks_proxy environment variable set to use SOCKS5 proxy " - "but Libwebsockets used doesn't have SOCKS5 support built in. " - "Ignoring and checking for other options.", - proxy); -#endif - } - if (check_http_enviroment(&proxy) == 0) - *type = PROXY_TYPE_HTTP; - return proxy; - } - - *type = aclk_verify_proxy(proxy); -#ifndef LWS_WITH_SOCKS5 - if (*type == PROXY_TYPE_SOCKS5) { - safe_log_proxy_error( - "Config var \"" ACLK_PROXY_CONFIG_VAR - "\" set to use SOCKS5 proxy but Libwebsockets used is built without support for SOCKS proxy. ACLK will be disabled.", - proxy); - } -#endif - if (*type == PROXY_TYPE_UNKNOWN) { - *type = PROXY_DISABLED; - safe_log_proxy_error( - "Config var \"" ACLK_PROXY_CONFIG_VAR - "\" defined but of unknown format. Supported syntax: \"socks5[h]://[user:pass@]host:ip\".", - proxy); - } - - return proxy; -} - -// helper function to read settings only once (static) -// as claiming, challenge/response and ACLK -// read the same thing, no need to parse again -const char *aclk_get_proxy(ACLK_PROXY_TYPE *type) -{ - static const char *proxy = NULL; - static ACLK_PROXY_TYPE proxy_type = PROXY_NOT_SET; - - if (proxy_type == PROXY_NOT_SET) - proxy = aclk_lws_wss_get_proxy_setting(&proxy_type); - - *type = proxy_type; - return proxy; -} #define HTTP_PROXY_PREFIX "http://" void aclk_set_proxy(char **ohost, int *port, enum mqtt_wss_proxy_type *type) diff --git a/aclk/aclk_util.h b/aclk/aclk_util.h index 03b22e40c..07de5c58a 100644 --- a/aclk/aclk_util.h +++ b/aclk/aclk_util.h @@ -8,6 +8,11 @@ // Helper stuff which should not have any further inside ACLK dependency // and are supposed not to be needed outside of ACLK +extern int aclk_use_new_cloud_arch; +extern usec_t aclk_session_newarch; + +extern int chart_batch_id; + typedef enum { ACLK_ENC_UNKNOWN = 0, ACLK_ENC_JSON, @@ -44,18 +49,34 @@ typedef struct { aclk_backoff_t backoff; } aclk_env_t; +extern aclk_env_t *aclk_env; + aclk_encoding_type_t aclk_encoding_type_t_from_str(const char *str); aclk_transport_type_t aclk_transport_type_t_from_str(const char *str); void aclk_transport_desc_t_destroy(aclk_transport_desc_t *trp_desc); void aclk_env_t_destroy(aclk_env_t *env); +int aclk_env_has_capa(const char *capa); enum aclk_topics { - ACLK_TOPICID_UNKNOWN = 0, - ACLK_TOPICID_CHART = 1, - ACLK_TOPICID_ALARMS = 2, - ACLK_TOPICID_METADATA = 3, - ACLK_TOPICID_COMMAND = 4 + ACLK_TOPICID_UNKNOWN = 0, + ACLK_TOPICID_CHART = 1, + ACLK_TOPICID_ALARMS = 2, + ACLK_TOPICID_METADATA = 3, + ACLK_TOPICID_COMMAND = 4, + ACLK_TOPICID_AGENT_CONN = 5, + ACLK_TOPICID_CMD_NG_V1 = 6, + ACLK_TOPICID_CREATE_NODE = 7, + ACLK_TOPICID_NODE_CONN = 8, + ACLK_TOPICID_CHART_DIMS = 9, + ACLK_TOPICID_CHART_CONFIGS_UPDATED = 10, + ACLK_TOPICID_CHART_RESET = 11, + ACLK_TOPICID_RETENTION_UPDATED = 12, + ACLK_TOPICID_NODE_INFO = 13, + ACLK_TOPICID_ALARM_LOG = 14, + ACLK_TOPICID_ALARM_HEALTH = 15, + ACLK_TOPICID_ALARM_CONFIG = 16, + ACLK_TOPICID_ALARM_SNAPSHOT = 17 }; const char *aclk_get_topic(enum aclk_topics topic); @@ -78,20 +99,6 @@ int aclk_get_conv_log_next(); unsigned long int aclk_tbeb_delay(int reset, int base, unsigned long int min, unsigned long int max); #define aclk_tbeb_reset(x) aclk_tbeb_delay(1, 0, 0, 0) -typedef enum aclk_proxy_type { - PROXY_TYPE_UNKNOWN = 0, - PROXY_TYPE_SOCKS5, - PROXY_TYPE_HTTP, - PROXY_DISABLED, - PROXY_NOT_SET, -} ACLK_PROXY_TYPE; - -const char *aclk_proxy_type_to_s(ACLK_PROXY_TYPE *type); -ACLK_PROXY_TYPE aclk_verify_proxy(const char *string); -const char *aclk_lws_wss_get_proxy_setting(ACLK_PROXY_TYPE *type); -void safe_log_proxy_censor(char *proxy); -const char *aclk_get_proxy(ACLK_PROXY_TYPE *type); - void aclk_set_proxy(char **ohost, int *port, enum mqtt_wss_proxy_type *type); #endif /* ACLK_UTIL_H */ diff --git a/aclk/https_client.c b/aclk/https_client.c index 907f512ba..470c3fdf3 100644 --- a/aclk/https_client.c +++ b/aclk/https_client.c @@ -4,7 +4,7 @@ #include "https_client.h" -#include "../mqtt_websockets/c-rbuf/include/ringbuffer.h" +#include "mqtt_websockets/c-rbuf/include/ringbuffer.h" enum http_parse_state { HTTP_PARSE_INITIAL = 0, @@ -47,7 +47,7 @@ static inline void http_parse_ctx_clear(http_parse_ctx *ctx) { #define RESP_PROTO "HTTP/1.1 " #define HTTP_KEYVAL_SEPARATOR ": " #define HTTP_HDR_BUFFER_SIZE 256 -#define PORT_STR_MAX_BYTES 7 +#define PORT_STR_MAX_BYTES 12 static void process_http_hdr(http_parse_ctx *parse_ctx, const char *key, const char *val) { @@ -303,7 +303,8 @@ static int read_parse_response(https_req_ctx_t *ctx) { error("Poll timed out"); return 2; } - continue; + if (!ctx->ssl_ctx) + continue; } ctx->poll_fd.events = 0; @@ -421,6 +422,35 @@ err_exit: return rc; } +static int cert_verify_callback(int preverify_ok, X509_STORE_CTX *ctx) +{ + X509 *err_cert; + int err, depth; + char *err_str; + + if (!preverify_ok) { + err = X509_STORE_CTX_get_error(ctx); + depth = X509_STORE_CTX_get_error_depth(ctx); + err_cert = X509_STORE_CTX_get_current_cert(ctx); + err_str = X509_NAME_oneline(X509_get_subject_name(err_cert), NULL, 0); + + error("Cert Chain verify error:num=%d:%s:depth=%d:%s", err, + X509_verify_cert_error_string(err), depth, err_str); + + free(err_str); + } + +#ifdef ACLK_SSL_ALLOW_SELF_SIGNED + if (!preverify_ok && err == X509_V_ERR_DEPTH_ZERO_SELF_SIGNED_CERT) + { + preverify_ok = 1; + error("Self Signed Certificate Accepted as the agent was built with ACLK_SSL_ALLOW_SELF_SIGNED"); + } +#endif + + return preverify_ok; +} + int https_request(https_req_t *request, https_req_response_t *response) { int rc = 1, ret; char connect_port_str[PORT_STR_MAX_BYTES]; @@ -438,7 +468,7 @@ int https_request(https_req_t *request, https_req_response_t *response) { goto exit_req_ctx; } - snprintf(connect_port_str, PORT_STR_MAX_BYTES, "%d", connect_port); + snprintfz(connect_port_str, PORT_STR_MAX_BYTES, "%d", connect_port); ctx->sock = connect_to_this_ip46(IPPROTO_TCP, SOCK_STREAM, connect_host, 0, connect_port_str, &timeout); if (ctx->sock < 0) { @@ -480,6 +510,12 @@ int https_request(https_req_t *request, https_req_response_t *response) { goto exit_sock; } + if (!SSL_CTX_set_default_verify_paths(ctx->ssl_ctx)) { + error("Error setting default verify paths"); + goto exit_CTX; + } + SSL_CTX_set_verify(ctx->ssl_ctx, SSL_VERIFY_PEER | SSL_VERIFY_CLIENT_ONCE, cert_verify_callback); + ctx->ssl = SSL_new(ctx->ssl_ctx); if (ctx->ssl==NULL) { error("Cannot allocate SSL"); @@ -570,7 +606,7 @@ static int parse_host_port(url_t *url) { error(URL_PARSER_LOG_PREFIX ": specified but no port number"); return 1; } - if (port_len > 5 /* MAX port lenght is 5digit long in decimal */) { + if (port_len > 5 /* MAX port length is 5digit long in decimal */) { error(URL_PARSER_LOG_PREFIX "port # is too long"); return 1; } diff --git a/aclk/legacy/aclk_common.c b/aclk/legacy/aclk_common.c index 96f955451..7f8368e44 100644 --- a/aclk/legacy/aclk_common.c +++ b/aclk/legacy/aclk_common.c @@ -1,201 +1,18 @@ #include "aclk_common.h" -#include "../../daemon/common.h" +#include "daemon/common.h" #ifdef ENABLE_ACLK #include #endif -netdata_mutex_t aclk_shared_state_mutex = NETDATA_MUTEX_INITIALIZER; +netdata_mutex_t legacy_aclk_shared_state_mutex = NETDATA_MUTEX_INITIALIZER; -int aclk_disable_runtime = 0; -int aclk_kill_link = 0; - -struct aclk_shared_state aclk_shared_state = { +struct legacy_aclk_shared_state legacy_aclk_shared_state = { .version_neg = 0, .version_neg_wait_till = 0 }; -struct { - ACLK_PROXY_TYPE type; - const char *url_str; -} supported_proxy_types[] = { - { .type = PROXY_TYPE_SOCKS5, .url_str = "socks5" ACLK_PROXY_PROTO_ADDR_SEPARATOR }, - { .type = PROXY_TYPE_SOCKS5, .url_str = "socks5h" ACLK_PROXY_PROTO_ADDR_SEPARATOR }, - { .type = PROXY_TYPE_HTTP, .url_str = "http" ACLK_PROXY_PROTO_ADDR_SEPARATOR }, - { .type = PROXY_TYPE_UNKNOWN, .url_str = NULL }, -}; - -const char *aclk_proxy_type_to_s(ACLK_PROXY_TYPE *type) -{ - switch (*type) { - case PROXY_DISABLED: - return "disabled"; - case PROXY_TYPE_HTTP: - return "HTTP"; - case PROXY_TYPE_SOCKS5: - return "SOCKS"; - default: - return "Unknown"; - } -} - -static inline ACLK_PROXY_TYPE aclk_find_proxy(const char *string) -{ - int i = 0; - while (supported_proxy_types[i].url_str) { - if (!strncmp(supported_proxy_types[i].url_str, string, strlen(supported_proxy_types[i].url_str))) - return supported_proxy_types[i].type; - i++; - } - return PROXY_TYPE_UNKNOWN; -} - -ACLK_PROXY_TYPE aclk_verify_proxy(const char *string) -{ - if (!string) - return PROXY_TYPE_UNKNOWN; - - while (*string == 0x20 && *string!=0) // Help coverity (compiler will remove) - string++; - - if (!*string) - return PROXY_TYPE_UNKNOWN; - - return aclk_find_proxy(string); -} - -// helper function to censor user&password -// for logging purposes -void safe_log_proxy_censor(char *proxy) -{ - size_t length = strlen(proxy); - char *auth = proxy + length - 1; - char *cur; - - while ((auth >= proxy) && (*auth != '@')) - auth--; - - //if not found or @ is first char do nothing - if (auth <= proxy) - return; - - cur = strstr(proxy, ACLK_PROXY_PROTO_ADDR_SEPARATOR); - if (!cur) - cur = proxy; - else - cur += strlen(ACLK_PROXY_PROTO_ADDR_SEPARATOR); - - while (cur < auth) { - *cur = 'X'; - cur++; - } -} - -static inline void safe_log_proxy_error(char *str, const char *proxy) -{ - char *log = strdupz(proxy); - safe_log_proxy_censor(log); - error("%s Provided Value:\"%s\"", str, log); - freez(log); -} - -static inline int check_socks_environment(const char **proxy) -{ - char *tmp = getenv("socks_proxy"); - - if (!tmp) - return 1; - - if (aclk_verify_proxy(tmp) == PROXY_TYPE_SOCKS5) { - *proxy = tmp; - return 0; - } - - safe_log_proxy_error( - "Environment var \"socks_proxy\" defined but of unknown format. Supported syntax: \"socks5[h]://[user:pass@]host:ip\".", - tmp); - return 1; -} - -static inline int check_http_environment(const char **proxy) -{ - char *tmp = getenv("http_proxy"); - - if (!tmp) - return 1; - - if (aclk_verify_proxy(tmp) == PROXY_TYPE_HTTP) { - *proxy = tmp; - return 0; - } - - safe_log_proxy_error( - "Environment var \"http_proxy\" defined but of unknown format. Supported syntax: \"http[s]://[user:pass@]host:ip\".", - tmp); - return 1; -} - -const char *aclk_lws_wss_get_proxy_setting(ACLK_PROXY_TYPE *type) -{ - const char *proxy = config_get(CONFIG_SECTION_CLOUD, ACLK_PROXY_CONFIG_VAR, ACLK_PROXY_ENV); - *type = PROXY_DISABLED; - - if (strcmp(proxy, "none") == 0) - return proxy; - - if (strcmp(proxy, ACLK_PROXY_ENV) == 0) { - if (check_socks_environment(&proxy) == 0) { -#ifdef LWS_WITH_SOCKS5 - *type = PROXY_TYPE_SOCKS5; - return proxy; -#else - safe_log_proxy_error("socks_proxy environment variable set to use SOCKS5 proxy " - "but Libwebsockets used doesn't have SOCKS5 support built in. " - "Ignoring and checking for other options.", - proxy); -#endif - } - if (check_http_environment(&proxy) == 0) - *type = PROXY_TYPE_HTTP; - return proxy; - } - - *type = aclk_verify_proxy(proxy); -#ifndef LWS_WITH_SOCKS5 - if (*type == PROXY_TYPE_SOCKS5) { - safe_log_proxy_error( - "Config var \"" ACLK_PROXY_CONFIG_VAR - "\" set to use SOCKS5 proxy but Libwebsockets used is built without support for SOCKS proxy. ACLK will be disabled.", - proxy); - } -#endif - if (*type == PROXY_TYPE_UNKNOWN) { - *type = PROXY_DISABLED; - safe_log_proxy_error( - "Config var \"" ACLK_PROXY_CONFIG_VAR - "\" defined but of unknown format. Supported syntax: \"socks5[h]://[user:pass@]host:ip\".", - proxy); - } - - return proxy; -} - -// helper function to read settings only once (static) -// as claiming, challenge/response and ACLK -// read the same thing, no need to parse again -const char *aclk_get_proxy(ACLK_PROXY_TYPE *type) -{ - static const char *proxy = NULL; - static ACLK_PROXY_TYPE proxy_type = PROXY_NOT_SET; - - if (proxy_type == PROXY_NOT_SET) - proxy = aclk_lws_wss_get_proxy_setting(&proxy_type); - - *type = proxy_type; - return proxy; -} - int aclk_decode_base_url(char *url, char **aclk_hostname, int *aclk_port) { int pos = 0; @@ -234,27 +51,3 @@ int aclk_decode_base_url(char *url, char **aclk_hostname, int *aclk_port) info("Setting ACLK target host=%s port=%d from %s", *aclk_hostname, *aclk_port, url); return 0; } - -struct label *add_aclk_host_labels(struct label *label) { -#ifdef ENABLE_ACLK - ACLK_PROXY_TYPE aclk_proxy; - char *proxy_str; - aclk_get_proxy(&aclk_proxy); - - switch(aclk_proxy) { - case PROXY_TYPE_SOCKS5: - proxy_str = "SOCKS5"; - break; - case PROXY_TYPE_HTTP: - proxy_str = "HTTP"; - break; - default: - proxy_str = "none"; - break; - } - label = add_label_to_list(label, "_aclk_impl", "Legacy", LABEL_SOURCE_AUTO); - return add_label_to_list(label, "_aclk_proxy", proxy_str, LABEL_SOURCE_AUTO); -#else - return label; -#endif -} diff --git a/aclk/legacy/aclk_common.h b/aclk/legacy/aclk_common.h index eedb5b51c..080680ff1 100644 --- a/aclk/legacy/aclk_common.h +++ b/aclk/legacy/aclk_common.h @@ -1,12 +1,12 @@ #ifndef ACLK_COMMON_H #define ACLK_COMMON_H -#include "aclk_rrdhost_state.h" -#include "../../daemon/common.h" +#include "../aclk_rrdhost_state.h" +#include "daemon/common.h" -extern netdata_mutex_t aclk_shared_state_mutex; -#define ACLK_SHARED_STATE_LOCK netdata_mutex_lock(&aclk_shared_state_mutex) -#define ACLK_SHARED_STATE_UNLOCK netdata_mutex_unlock(&aclk_shared_state_mutex) +extern netdata_mutex_t legacy_aclk_shared_state_mutex; +#define legacy_aclk_shared_state_LOCK netdata_mutex_lock(&legacy_aclk_shared_state_mutex) +#define legacy_aclk_shared_state_UNLOCK netdata_mutex_unlock(&legacy_aclk_shared_state_mutex) // minimum and maximum supported version of ACLK // in this version of agent @@ -33,8 +33,8 @@ extern netdata_mutex_t aclk_shared_state_mutex; #define ACLK_IS_HOST_INITIALIZING(host) (host->aclk_state.state == ACLK_HOST_INITIALIZING) #define ACLK_IS_HOST_POPCORNING(host) (ACLK_IS_HOST_INITIALIZING(host) && host->aclk_state.t_last_popcorn_update) -extern struct aclk_shared_state { - // optimization to avoid looping trough hosts +extern struct legacy_aclk_shared_state { + // optimization to avoid looping through hosts // every time Query Thread wakes up RRDHOST *next_popcorn_host; @@ -42,31 +42,10 @@ extern struct aclk_shared_state { // protect by lock otherwise int version_neg; usec_t version_neg_wait_till; -} aclk_shared_state; - -typedef enum aclk_proxy_type { - PROXY_TYPE_UNKNOWN = 0, - PROXY_TYPE_SOCKS5, - PROXY_TYPE_HTTP, - PROXY_DISABLED, - PROXY_NOT_SET, -} ACLK_PROXY_TYPE; - -extern int aclk_kill_link; // Tells the agent to tear down the link -extern int aclk_disable_runtime; +} legacy_aclk_shared_state; const char *aclk_proxy_type_to_s(ACLK_PROXY_TYPE *type); -#define ACLK_PROXY_PROTO_ADDR_SEPARATOR "://" -#define ACLK_PROXY_ENV "env" -#define ACLK_PROXY_CONFIG_VAR "proxy" - -ACLK_PROXY_TYPE aclk_verify_proxy(const char *string); -const char *aclk_lws_wss_get_proxy_setting(ACLK_PROXY_TYPE *type); -void safe_log_proxy_censor(char *proxy); int aclk_decode_base_url(char *url, char **aclk_hostname, int *aclk_port); -const char *aclk_get_proxy(ACLK_PROXY_TYPE *type); - -struct label *add_aclk_host_labels(struct label *label); #endif //ACLK_COMMON_H diff --git a/aclk/legacy/aclk_lws_https_client.c b/aclk/legacy/aclk_lws_https_client.c index f41a230db..8a490c6f4 100644 --- a/aclk/legacy/aclk_lws_https_client.c +++ b/aclk/legacy/aclk_lws_https_client.c @@ -2,13 +2,7 @@ #define ACLK_LWS_HTTPS_CLIENT_INTERNAL #include "aclk_lws_https_client.h" - -#ifndef ACLK_NG #include "aclk_common.h" -#else -#include "../aclk.h" -#endif - #include "aclk_lws_wss_client.h" #define SMALL_BUFFER 16 diff --git a/aclk/legacy/aclk_lws_https_client.h b/aclk/legacy/aclk_lws_https_client.h index 811809dd1..5f30a37fd 100644 --- a/aclk/legacy/aclk_lws_https_client.h +++ b/aclk/legacy/aclk_lws_https_client.h @@ -3,7 +3,7 @@ #ifndef NETDATA_LWS_HTTPS_CLIENT_H #define NETDATA_LWS_HTTPS_CLIENT_H -#include "../../daemon/common.h" +#include "daemon/common.h" #include "libnetdata/libnetdata.h" #define DATAMAXLEN 1024*16 diff --git a/aclk/legacy/aclk_lws_wss_client.c b/aclk/legacy/aclk_lws_wss_client.c index f73902b30..012f2a8cc 100644 --- a/aclk/legacy/aclk_lws_wss_client.c +++ b/aclk/legacy/aclk_lws_wss_client.c @@ -3,9 +3,10 @@ #include "aclk_lws_wss_client.h" #include "libnetdata/libnetdata.h" -#include "../../daemon/common.h" +#include "daemon/common.h" #include "aclk_common.h" #include "aclk_stats.h" +#include "../aclk_proxy.h" extern int aclk_shutting_down; @@ -450,9 +451,9 @@ static int aclk_lws_wss_callback(struct lws *wsi, enum lws_callback_reasons reas if (n>=0) { data->written += n; if (aclk_stats_enabled) { - ACLK_STATS_LOCK; - aclk_metrics_per_sample.write_q_consumed += n; - ACLK_STATS_UNLOCK; + LEGACY_ACLK_STATS_LOCK; + legacy_aclk_metrics_per_sample.write_q_consumed += n; + LEGACY_ACLK_STATS_UNLOCK; } } //error("lws_write(req=%u,written=%u) %zu of %zu",bytes_left, rc, data->written,data->data_size,rc); @@ -473,9 +474,9 @@ static int aclk_lws_wss_callback(struct lws *wsi, enum lws_callback_reasons reas retval = 1; aclk_lws_mutex_unlock(&engine_instance->read_buf_mutex); if (aclk_stats_enabled) { - ACLK_STATS_LOCK; - aclk_metrics_per_sample.read_q_added += len; - ACLK_STATS_UNLOCK; + LEGACY_ACLK_STATS_LOCK; + legacy_aclk_metrics_per_sample.read_q_added += len; + LEGACY_ACLK_STATS_UNLOCK; } // to future myself -> do not call this while read lock is active as it will eventually @@ -553,9 +554,9 @@ int aclk_lws_wss_client_write(void *buf, size_t count) aclk_lws_mutex_unlock(&engine_instance->write_buf_mutex); if (aclk_stats_enabled) { - ACLK_STATS_LOCK; - aclk_metrics_per_sample.write_q_added += count; - ACLK_STATS_UNLOCK; + LEGACY_ACLK_STATS_LOCK; + legacy_aclk_metrics_per_sample.write_q_added += count; + LEGACY_ACLK_STATS_UNLOCK; } lws_callback_on_writable(engine_instance->lws_wsi); @@ -584,9 +585,9 @@ int aclk_lws_wss_client_read(void *buf, size_t count) engine_instance->data_to_read = 0; if (aclk_stats_enabled) { - ACLK_STATS_LOCK; - aclk_metrics_per_sample.read_q_consumed += data_to_be_read; - ACLK_STATS_UNLOCK; + LEGACY_ACLK_STATS_LOCK; + legacy_aclk_metrics_per_sample.read_q_consumed += data_to_be_read; + LEGACY_ACLK_STATS_UNLOCK; } abort: diff --git a/aclk/legacy/aclk_lws_wss_client.h b/aclk/legacy/aclk_lws_wss_client.h index eb99ee024..c68649cf3 100644 --- a/aclk/legacy/aclk_lws_wss_client.h +++ b/aclk/legacy/aclk_lws_wss_client.h @@ -58,7 +58,7 @@ struct aclk_lws_wss_engine_instance { struct lws_wss_packet_buffer *write_buffer_head; struct lws_ring *read_ringbuffer; - //flags to be readed by engine user + //flags to be read by engine user int websocket_connection_up; // currently this is by default disabled diff --git a/aclk/legacy/aclk_query.c b/aclk/legacy/aclk_query.c index 040068e87..21eae11fd 100644 --- a/aclk/legacy/aclk_query.c +++ b/aclk/legacy/aclk_query.c @@ -2,15 +2,16 @@ #include "aclk_query.h" #include "aclk_stats.h" #include "aclk_rx_msgs.h" +#include "agent_cloud_link.h" #define WEB_HDR_ACCEPT_ENC "Accept-Encoding:" -pthread_cond_t query_cond_wait = PTHREAD_COND_INITIALIZER; -pthread_mutex_t query_lock_wait = PTHREAD_MUTEX_INITIALIZER; -#define QUERY_THREAD_LOCK pthread_mutex_lock(&query_lock_wait) -#define QUERY_THREAD_UNLOCK pthread_mutex_unlock(&query_lock_wait) +#define ACLK_QUERY_THREAD_NAME "ACLK_Query" -volatile int aclk_connected = 0; +pthread_cond_t legacy_query_cond_wait = PTHREAD_COND_INITIALIZER; +pthread_mutex_t legacy_query_lock_wait = PTHREAD_MUTEX_INITIALIZER; +#define LEGACY_QUERY_THREAD_LOCK pthread_mutex_lock(&legacy_query_lock_wait) +#define LEGACY_QUERY_THREAD_UNLOCK pthread_mutex_unlock(&legacy_query_lock_wait) #ifndef __GNUC__ #pragma region ACLK_QUEUE @@ -188,7 +189,7 @@ aclk_query_find(char *topic, void *data, char *msg_id, char *query, ACLK_CMD cmd * Add a query to execute, the result will be send to the specified topic */ -int aclk_queue_query(char *topic, void *data, char *msg_id, char *query, int run_after, int internal, ACLK_CMD aclk_cmd) +int legacy_aclk_queue_query(char *topic, void *data, char *msg_id, char *query, int run_after, int internal, ACLK_CMD aclk_cmd) { struct aclk_query *new_query, *tmp_query; @@ -205,7 +206,7 @@ int aclk_queue_query(char *topic, void *data, char *msg_id, char *query, int run if (unlikely(tmp_query)) { if (tmp_query->run_after == run_after) { ACLK_QUEUE_UNLOCK; - QUERY_THREAD_WAKEUP; + LEGACY_QUERY_THREAD_WAKEUP; return 0; } @@ -220,9 +221,9 @@ int aclk_queue_query(char *topic, void *data, char *msg_id, char *query, int run } if (aclk_stats_enabled) { - ACLK_STATS_LOCK; - aclk_metrics_per_sample.queries_queued++; - ACLK_STATS_UNLOCK; + LEGACY_ACLK_STATS_LOCK; + legacy_aclk_metrics_per_sample.queries_queued++; + LEGACY_ACLK_STATS_UNLOCK; } new_query = callocz(1, sizeof(struct aclk_query)); @@ -255,7 +256,7 @@ int aclk_queue_query(char *topic, void *data, char *msg_id, char *query, int run aclk_queue.aclk_query_tail = new_query; aclk_queue.count++; ACLK_QUEUE_UNLOCK; - QUERY_THREAD_WAKEUP; + LEGACY_QUERY_THREAD_WAKEUP; return 0; } @@ -264,7 +265,7 @@ int aclk_queue_query(char *topic, void *data, char *msg_id, char *query, int run aclk_queue.count++; ACLK_QUEUE_UNLOCK; - QUERY_THREAD_WAKEUP; + LEGACY_QUERY_THREAD_WAKEUP; return 0; } @@ -332,12 +333,12 @@ static char *aclk_encode_response(char *src, size_t content_size, int keep_newli static usec_t aclk_web_api_request_v1(RRDHOST *host, struct web_client *w, char *url, usec_t q_created) { usec_t t = now_boottime_usec(); - aclk_metric_mat_update(&aclk_metrics_per_sample.cloud_q_recvd_to_processed, t - q_created); + legacy_aclk_metric_mat_update(&legacy_aclk_metrics_per_sample.cloud_q_recvd_to_processed, t - q_created); w->response.code = web_client_api_request_v1(host, w, url); t = now_boottime_usec() - t; - aclk_metric_mat_update(&aclk_metrics_per_sample.cloud_q_db_query_time, t); + legacy_aclk_metric_mat_update(&legacy_aclk_metrics_per_sample.cloud_q_db_query_time, t); return t; } @@ -375,7 +376,7 @@ static int aclk_execute_query(struct aclk_query *this_query) buffer_flush(local_buffer); local_buffer->contenttype = CT_APPLICATION_JSON; - aclk_create_header(local_buffer, "http", this_query->msg_id, 0, 0, aclk_shared_state.version_neg); + aclk_create_header(local_buffer, "http", this_query->msg_id, 0, 0, legacy_aclk_shared_state.version_neg); buffer_strcat(local_buffer, ",\n\t\"payload\": "); char *encoded_response = aclk_encode_response(w->response.data->buffer, w->response.data->len, 0); char *encoded_header = aclk_encode_response(w->response.header_output->buffer, w->response.header_output->len, 1); @@ -510,7 +511,7 @@ static int aclk_execute_query_v2(struct aclk_query *this_query) local_buffer = buffer_create(NETDATA_WEB_RESPONSE_INITIAL_SIZE); local_buffer->contenttype = CT_APPLICATION_JSON; - aclk_create_header(local_buffer, "http", this_query->msg_id, 0, 0, aclk_shared_state.version_neg); + aclk_create_header(local_buffer, "http", this_query->msg_id, 0, 0, legacy_aclk_shared_state.version_neg); buffer_sprintf(local_buffer, ",\"t-exec\": %llu,\"t-rx\": %llu,\"http-code\": %d", t, this_query->created, w->response.code); buffer_strcat(local_buffer, "}\x0D\x0A\x0D\x0A"); buffer_strcat(local_buffer, w->response.header_output->buffer); @@ -607,7 +608,7 @@ static int aclk_process_query(struct aclk_query_thread *t_info) case ACLK_CMD_ONCONNECT: ACLK_HOST_PTR_COMPULSORY("ACLK_CMD_ONCONNECT"); #if ACLK_VERSION_MIN < ACLK_V_CHILDRENSTATE - if (host != localhost && aclk_shared_state.version_neg < ACLK_V_CHILDRENSTATE) { + if (host != localhost && legacy_aclk_shared_state.version_neg < ACLK_V_CHILDRENSTATE) { error("We are not allowed to send connect message in ACLK version before %d", ACLK_V_CHILDRENSTATE); break; } @@ -638,7 +639,7 @@ static int aclk_process_query(struct aclk_query_thread *t_info) debug(D_ACLK, "EXECUTING a chart delete command"); //TODO: This send the info metadata for now - aclk_send_info_metadata(ACLK_METADATA_SENT, host); + legacy_aclk_send_info_metadata(ACLK_METADATA_SENT, host); break; case ACLK_CMD_ALARM: @@ -673,10 +674,10 @@ static int aclk_process_query(struct aclk_query_thread *t_info) debug(D_ACLK, "Query #%ld (%s) done", query_count, this_query->topic); if (aclk_stats_enabled) { - ACLK_STATS_LOCK; - aclk_metrics_per_sample.queries_dispatched++; - aclk_queries_per_thread[t_info->idx]++; - ACLK_STATS_UNLOCK; + LEGACY_ACLK_STATS_LOCK; + legacy_aclk_metrics_per_sample.queries_dispatched++; + legacy_aclk_queries_per_thread[t_info->idx]++; + LEGACY_ACLK_STATS_UNLOCK; if (likely(getrusage_called_this_tick[t_info->idx] < MAX_GETRUSAGE_CALLS_PER_TICK)) { getrusage(RUSAGE_THREAD, &rusage_per_thread[t_info->idx]); @@ -690,7 +691,7 @@ static int aclk_process_query(struct aclk_query_thread *t_info) return 1; } -void aclk_query_threads_cleanup(struct aclk_query_threads *query_threads) +void legacy_aclk_query_threads_cleanup(struct aclk_query_threads *query_threads) { if (query_threads && query_threads->thread_list) { for (int i = 0; i < query_threads->count; i++) { @@ -707,8 +708,8 @@ void aclk_query_threads_cleanup(struct aclk_query_threads *query_threads) } while (this_query); } -#define TASK_LEN_MAX 16 -void aclk_query_threads_start(struct aclk_query_threads *query_threads) +#define TASK_LEN_MAX 22 +void legacy_aclk_query_threads_start(struct aclk_query_threads *query_threads) { info("Starting %d query threads.", query_threads->count); @@ -717,10 +718,10 @@ void aclk_query_threads_start(struct aclk_query_threads *query_threads) for (int i = 0; i < query_threads->count; i++) { query_threads->thread_list[i].idx = i; //thread needs to know its index for statistics - if(unlikely(snprintf(thread_name, TASK_LEN_MAX, "%s_%d", ACLK_THREAD_NAME, i) < 0)) + if(unlikely(snprintfz(thread_name, TASK_LEN_MAX, "%s_%d", ACLK_QUERY_THREAD_NAME, i) < 0)) error("snprintf encoding error"); netdata_thread_create( - &query_threads->thread_list[i].thread, thread_name, NETDATA_THREAD_OPTION_JOINABLE, aclk_query_main_thread, + &query_threads->thread_list[i].thread, thread_name, NETDATA_THREAD_OPTION_JOINABLE, legacy_aclk_query_main_thread, &query_threads->thread_list[i]); } } @@ -730,10 +731,10 @@ void aclk_query_threads_start(struct aclk_query_threads *query_threads) * returns actual/updated popcorning state */ -ACLK_POPCORNING_STATE aclk_host_popcorn_check(RRDHOST *host) +ACLK_AGENT_STATE aclk_host_popcorn_check(RRDHOST *host) { rrdhost_aclk_state_lock(host); - ACLK_POPCORNING_STATE ret = host->aclk_state.state; + ACLK_AGENT_STATE ret = host->aclk_state.state; if (host->aclk_state.state != ACLK_HOST_INITIALIZING){ rrdhost_aclk_state_unlock(host); return ret; @@ -766,7 +767,7 @@ ACLK_POPCORNING_STATE aclk_host_popcorn_check(RRDHOST *host) * of no new collectors coming in in order to mark the agent * as stable (set agent_state = AGENT_STABLE) */ -void *aclk_query_main_thread(void *ptr) +void *legacy_aclk_query_main_thread(void *ptr) { struct aclk_query_thread *info = ptr; @@ -785,25 +786,24 @@ void *aclk_query_main_thread(void *ptr) sleep(1); continue; } - ACLK_SHARED_STATE_LOCK; - if (unlikely(!aclk_shared_state.version_neg)) { - if (!aclk_shared_state.version_neg_wait_till || aclk_shared_state.version_neg_wait_till > now_monotonic_usec()) { - ACLK_SHARED_STATE_UNLOCK; + legacy_aclk_shared_state_LOCK; + if (unlikely(!legacy_aclk_shared_state.version_neg)) { + if (!legacy_aclk_shared_state.version_neg_wait_till || legacy_aclk_shared_state.version_neg_wait_till > now_monotonic_usec()) { + legacy_aclk_shared_state_UNLOCK; info("Waiting for ACLK Version Negotiation message from Cloud"); sleep(1); continue; } - errno = 0; - error("ACLK version negotiation failed. No reply to \"hello\" with \"version\" from cloud in time of %ds." + info("ACLK version negotiation failed (This is expected). No reply to \"hello\" with \"version\" from cloud in time of %ds." " Reverting to default ACLK version of %d.", VERSION_NEG_TIMEOUT, ACLK_VERSION_MIN); - aclk_shared_state.version_neg = ACLK_VERSION_MIN; - aclk_set_rx_handlers(aclk_shared_state.version_neg); + legacy_aclk_shared_state.version_neg = ACLK_VERSION_MIN; + aclk_set_rx_handlers(legacy_aclk_shared_state.version_neg); } - ACLK_SHARED_STATE_UNLOCK; + legacy_aclk_shared_state_UNLOCK; rrdhost_aclk_state_lock(localhost); if (unlikely(localhost->aclk_state.metadata == ACLK_METADATA_REQUIRED)) { - if (unlikely(aclk_queue_query("on_connect", localhost, NULL, NULL, 0, 1, ACLK_CMD_ONCONNECT))) { + if (unlikely(legacy_aclk_queue_query("on_connect", localhost, NULL, NULL, 0, 1, ACLK_CMD_ONCONNECT))) { rrdhost_aclk_state_unlock(localhost); errno = 0; error("ACLK failed to queue on_connect command"); @@ -814,25 +814,25 @@ void *aclk_query_main_thread(void *ptr) } rrdhost_aclk_state_unlock(localhost); - ACLK_SHARED_STATE_LOCK; - if (aclk_shared_state.next_popcorn_host && aclk_host_popcorn_check(aclk_shared_state.next_popcorn_host) == ACLK_HOST_STABLE) { - aclk_queue_query("on_connect", aclk_shared_state.next_popcorn_host, NULL, NULL, 0, 1, ACLK_CMD_ONCONNECT); - aclk_shared_state.next_popcorn_host = NULL; + legacy_aclk_shared_state_LOCK; + if (legacy_aclk_shared_state.next_popcorn_host && aclk_host_popcorn_check(legacy_aclk_shared_state.next_popcorn_host) == ACLK_HOST_STABLE) { + legacy_aclk_queue_query("on_connect", legacy_aclk_shared_state.next_popcorn_host, NULL, NULL, 0, 1, ACLK_CMD_ONCONNECT); + legacy_aclk_shared_state.next_popcorn_host = NULL; aclk_update_next_child_to_popcorn(); } - ACLK_SHARED_STATE_UNLOCK; + legacy_aclk_shared_state_UNLOCK; while (aclk_process_query(info)) { // Process all commands }; - QUERY_THREAD_LOCK; + LEGACY_QUERY_THREAD_LOCK; // TODO: Need to check if there are queries awaiting already - if (unlikely(pthread_cond_wait(&query_cond_wait, &query_lock_wait))) + if (unlikely(pthread_cond_wait(&legacy_query_cond_wait, &legacy_query_lock_wait))) sleep_usec(USEC_PER_SEC * 1); - QUERY_THREAD_UNLOCK; + LEGACY_QUERY_THREAD_UNLOCK; } return NULL; diff --git a/aclk/legacy/aclk_query.h b/aclk/legacy/aclk_query.h index 026985c8d..622b66e2c 100644 --- a/aclk/legacy/aclk_query.h +++ b/aclk/legacy/aclk_query.h @@ -10,14 +10,11 @@ #define MAX_GETRUSAGE_CALLS_PER_TICK 5 // Maximum number of times getrusage can be called per tick, per thread. -extern pthread_cond_t query_cond_wait; -extern pthread_mutex_t query_lock_wait; +extern pthread_cond_t legacy_query_cond_wait; +extern pthread_mutex_t legacy_query_lock_wait; extern uint8_t *getrusage_called_this_tick; -#define QUERY_THREAD_WAKEUP pthread_cond_signal(&query_cond_wait) -#define QUERY_THREAD_WAKEUP_ALL pthread_cond_broadcast(&query_cond_wait) - -extern volatile int aclk_connected; - +#define LEGACY_QUERY_THREAD_WAKEUP pthread_cond_signal(&legacy_query_cond_wait) +#define LEGACY_QUERY_THREAD_WAKEUP_ALL pthread_cond_broadcast(&legacy_query_cond_wait) struct aclk_query_thread { netdata_thread_t thread; int idx; @@ -34,11 +31,11 @@ struct aclk_cloud_req_v2 { char *query_endpoint; }; -void *aclk_query_main_thread(void *ptr); -int aclk_queue_query(char *token, void *data, char *msg_type, char *query, int run_after, int internal, ACLK_CMD cmd); +void *legacy_aclk_query_main_thread(void *ptr); +int legacy_aclk_queue_query(char *token, void *data, char *msg_type, char *query, int run_after, int internal, ACLK_CMD cmd); -void aclk_query_threads_start(struct aclk_query_threads *query_threads); -void aclk_query_threads_cleanup(struct aclk_query_threads *query_threads); +void legacy_aclk_query_threads_start(struct aclk_query_threads *query_threads); +void legacy_aclk_query_threads_cleanup(struct aclk_query_threads *query_threads); unsigned int aclk_query_size(); #endif //NETDATA_AGENT_CLOUD_LINK_H diff --git a/aclk/legacy/aclk_rrdhost_state.h b/aclk/legacy/aclk_rrdhost_state.h deleted file mode 100644 index 7ab3a502e..000000000 --- a/aclk/legacy/aclk_rrdhost_state.h +++ /dev/null @@ -1,42 +0,0 @@ -#ifndef ACLK_RRDHOST_STATE_H -#define ACLK_RRDHOST_STATE_H - -#include "../../libnetdata/libnetdata.h" - -typedef enum aclk_cmd { - ACLK_CMD_CLOUD, - ACLK_CMD_ONCONNECT, - ACLK_CMD_INFO, - ACLK_CMD_CHART, - ACLK_CMD_CHARTDEL, - ACLK_CMD_ALARM, - ACLK_CMD_CLOUD_QUERY_2, - ACLK_CMD_CHILD_CONNECT, - ACLK_CMD_CHILD_DISCONNECT -} ACLK_CMD; - -typedef enum aclk_metadata_state { - ACLK_METADATA_REQUIRED, - ACLK_METADATA_CMD_QUEUED, - ACLK_METADATA_SENT -} ACLK_METADATA_STATE; - -typedef enum aclk_agent_state { - ACLK_HOST_INITIALIZING, - ACLK_HOST_STABLE -} ACLK_POPCORNING_STATE; - -typedef struct aclk_rrdhost_state { - char *claimed_id; // Claimed ID if host has one otherwise NULL - -#ifdef ENABLE_ACLK - // per child popcorning - ACLK_POPCORNING_STATE state; - ACLK_METADATA_STATE metadata; - - time_t timestamp_created; - time_t t_last_popcorn_update; -#endif /* ENABLE_ACLK */ -} aclk_rrdhost_state; - -#endif /* ACLK_RRDHOST_STATE_H */ diff --git a/aclk/legacy/aclk_rx_msgs.c b/aclk/legacy/aclk_rx_msgs.c index 68dad81e0..d4778bbcf 100644 --- a/aclk/legacy/aclk_rx_msgs.c +++ b/aclk/legacy/aclk_rx_msgs.c @@ -4,6 +4,7 @@ #include "aclk_common.h" #include "aclk_stats.h" #include "aclk_query.h" +#include "agent_cloud_link.h" #ifndef UUID_STR_LEN #define UUID_STR_LEN 37 @@ -107,7 +108,7 @@ static int aclk_handle_cloud_request_v1(struct aclk_request *cloud_to_agent, cha error( "Received \"http\" message from Cloud with version %d, but ACLK version %d is used", cloud_to_agent->version, - aclk_shared_state.version_neg); + legacy_aclk_shared_state.version_neg); return 1; } @@ -126,14 +127,14 @@ static int aclk_handle_cloud_request_v1(struct aclk_request *cloud_to_agent, cha return 1; } - if (unlikely(aclk_queue_query(cloud_to_agent->callback_topic, NULL, cloud_to_agent->msg_id, cloud_to_agent->payload, 0, 0, ACLK_CMD_CLOUD))) + if (unlikely(legacy_aclk_queue_query(cloud_to_agent->callback_topic, NULL, cloud_to_agent->msg_id, cloud_to_agent->payload, 0, 0, ACLK_CMD_CLOUD))) debug(D_ACLK, "ACLK failed to queue incoming \"http\" message"); if (aclk_stats_enabled) { - ACLK_STATS_LOCK; - aclk_metrics_per_sample.cloud_req_v1++; - aclk_metrics_per_sample.cloud_req_ok++; - ACLK_STATS_UNLOCK; + LEGACY_ACLK_STATS_LOCK; + legacy_aclk_metrics_per_sample.cloud_req_v1++; + legacy_aclk_metrics_per_sample.cloud_req_ok++; + LEGACY_ACLK_STATS_UNLOCK; } return 0; @@ -181,11 +182,11 @@ static int aclk_handle_cloud_request_v2(struct aclk_request *cloud_to_agent, cha } // we do this here due to cloud_req being taken over by query thread - // which if crazy quick can free it after aclk_queue_query + // which if crazy quick can free it after legacy_aclk_queue_query stat_idx = aclk_cloud_req_type_to_idx(cloud_req->query_endpoint); - // aclk_queue_query takes ownership of data pointer - if (unlikely(aclk_queue_query( + // legacy_aclk_queue_query takes ownership of data pointer + if (unlikely(legacy_aclk_queue_query( cloud_to_agent->callback_topic, cloud_req, cloud_to_agent->msg_id, cloud_to_agent->payload, 0, 0, ACLK_CMD_CLOUD_QUERY_2))) { error("ACLK failed to queue incoming \"http\" v2 message"); @@ -193,11 +194,11 @@ static int aclk_handle_cloud_request_v2(struct aclk_request *cloud_to_agent, cha } if (aclk_stats_enabled) { - ACLK_STATS_LOCK; - aclk_metrics_per_sample.cloud_req_v2++; - aclk_metrics_per_sample.cloud_req_ok++; - aclk_metrics_per_sample.cloud_req_by_type[stat_idx]++; - ACLK_STATS_UNLOCK; + LEGACY_ACLK_STATS_LOCK; + legacy_aclk_metrics_per_sample.cloud_req_v2++; + legacy_aclk_metrics_per_sample.cloud_req_ok++; + legacy_aclk_metrics_per_sample.cloud_req_by_type[stat_idx]++; + LEGACY_ACLK_STATS_UNLOCK; } return 0; @@ -258,19 +259,19 @@ static int aclk_handle_version_response(struct aclk_request *cloud_to_agent, cha version = MIN(cloud_to_agent->max_version, ACLK_VERSION_MAX); - ACLK_SHARED_STATE_LOCK; - if (unlikely(now_monotonic_usec() > aclk_shared_state.version_neg_wait_till)) { + legacy_aclk_shared_state_LOCK; + if (unlikely(now_monotonic_usec() > legacy_aclk_shared_state.version_neg_wait_till)) { errno = 0; error("The \"version\" message came too late ignoring."); goto err_cleanup; } - if (unlikely(aclk_shared_state.version_neg)) { + if (unlikely(legacy_aclk_shared_state.version_neg)) { errno = 0; - error("Version has already been set to %d", aclk_shared_state.version_neg); + error("Version has already been set to %d", legacy_aclk_shared_state.version_neg); goto err_cleanup; } - aclk_shared_state.version_neg = version; - ACLK_SHARED_STATE_UNLOCK; + legacy_aclk_shared_state.version_neg = version; + legacy_aclk_shared_state_UNLOCK; info("Choosing version %d of ACLK", version); @@ -279,7 +280,7 @@ static int aclk_handle_version_response(struct aclk_request *cloud_to_agent, cha return 0; err_cleanup: - ACLK_SHARED_STATE_UNLOCK; + legacy_aclk_shared_state_UNLOCK; return 1; } @@ -288,31 +289,31 @@ typedef struct aclk_incoming_msg_type{ int(*fnc)(struct aclk_request *, char *); }aclk_incoming_msg_type; -aclk_incoming_msg_type aclk_incoming_msg_types_v1[] = { +aclk_incoming_msg_type legacy_aclk_incoming_msg_types_v1[] = { { .name = "http", .fnc = aclk_handle_cloud_request_v1 }, { .name = "version", .fnc = aclk_handle_version_response }, { .name = NULL, .fnc = NULL } }; -aclk_incoming_msg_type aclk_incoming_msg_types_compression[] = { +aclk_incoming_msg_type legacy_aclk_incoming_msg_types_compression[] = { { .name = "http", .fnc = aclk_handle_cloud_request_v2 }, { .name = "version", .fnc = aclk_handle_version_response }, { .name = NULL, .fnc = NULL } }; -struct aclk_incoming_msg_type *aclk_incoming_msg_types = aclk_incoming_msg_types_v1; +struct aclk_incoming_msg_type *legacy_aclk_incoming_msg_types = legacy_aclk_incoming_msg_types_v1; void aclk_set_rx_handlers(int version) { if(version >= ACLK_V_COMPRESSION) { - aclk_incoming_msg_types = aclk_incoming_msg_types_compression; + legacy_aclk_incoming_msg_types = legacy_aclk_incoming_msg_types_compression; return; } - aclk_incoming_msg_types = aclk_incoming_msg_types_v1; + legacy_aclk_incoming_msg_types = legacy_aclk_incoming_msg_types_v1; } -int aclk_handle_cloud_message(char *payload) +int legacy_aclk_handle_cloud_message(char *payload) { struct aclk_request cloud_to_agent; memset(&cloud_to_agent, 0, sizeof(struct aclk_request)); @@ -325,7 +326,7 @@ int aclk_handle_cloud_message(char *payload) debug(D_ACLK, "ACLK incoming message (%s)", payload); - int rc = json_parse(payload, &cloud_to_agent, cloud_to_agent_parse); + int rc = json_parse(payload, &cloud_to_agent, legacy_cloud_to_agent_parse); if (unlikely(rc != JSON_OK)) { errno = 0; @@ -339,22 +340,22 @@ int aclk_handle_cloud_message(char *payload) goto err_cleanup; } - if (!aclk_shared_state.version_neg && strcmp(cloud_to_agent.type_id, "version")) { + if (!legacy_aclk_shared_state.version_neg && strcmp(cloud_to_agent.type_id, "version")) { error("Only \"version\" message is allowed before popcorning and version negotiation is finished. Ignoring"); goto err_cleanup; } - for (int i = 0; aclk_incoming_msg_types[i].name; i++) { - if (strcmp(cloud_to_agent.type_id, aclk_incoming_msg_types[i].name) == 0) { - if (likely(!aclk_incoming_msg_types[i].fnc(&cloud_to_agent, payload))) { + for (int i = 0; legacy_aclk_incoming_msg_types[i].name; i++) { + if (strcmp(cloud_to_agent.type_id, legacy_aclk_incoming_msg_types[i].name) == 0) { + if (likely(!legacy_aclk_incoming_msg_types[i].fnc(&cloud_to_agent, payload))) { // in case of success handler is supposed to clean up after itself // or as in the case of aclk_handle_cloud_request take // ownership of the pointers (done to avoid copying) - // see what `aclk_queue_query` parameter `internal` does + // see what `legacy_aclk_queue_query` parameter `internal` does // NEVER CONTINUE THIS LOOP AFTER CALLING FUNCTION!!! // msg handlers (namely aclk_handle_version_response) - // can freely change what aclk_incoming_msg_types points to + // can freely change what legacy_aclk_incoming_msg_types points to // so either exit or restart this for loop freez(cloud_to_agent.type_id); return 0; @@ -378,9 +379,9 @@ err_cleanup: err_cleanup_nojson: if (aclk_stats_enabled) { - ACLK_STATS_LOCK; - aclk_metrics_per_sample.cloud_req_err++; - ACLK_STATS_UNLOCK; + LEGACY_ACLK_STATS_LOCK; + legacy_aclk_metrics_per_sample.cloud_req_err++; + LEGACY_ACLK_STATS_UNLOCK; } return 1; diff --git a/aclk/legacy/aclk_rx_msgs.h b/aclk/legacy/aclk_rx_msgs.h index 3095e41a7..f1f99114f 100644 --- a/aclk/legacy/aclk_rx_msgs.h +++ b/aclk/legacy/aclk_rx_msgs.h @@ -3,10 +3,10 @@ #ifndef NETDATA_ACLK_RX_MSGS_H #define NETDATA_ACLK_RX_MSGS_H -#include "../../daemon/common.h" +#include "daemon/common.h" #include "libnetdata/libnetdata.h" -int aclk_handle_cloud_message(char *payload); +int legacy_aclk_handle_cloud_message(char *payload); void aclk_set_rx_handlers(int version); diff --git a/aclk/legacy/aclk_stats.c b/aclk/legacy/aclk_stats.c index 88679cb3c..fbbb322a1 100644 --- a/aclk/legacy/aclk_stats.c +++ b/aclk/legacy/aclk_stats.c @@ -1,33 +1,31 @@ #include "aclk_stats.h" -netdata_mutex_t aclk_stats_mutex = NETDATA_MUTEX_INITIALIZER; +netdata_mutex_t legacy_aclk_stats_mutex = NETDATA_MUTEX_INITIALIZER; -int aclk_stats_enabled; - -int query_thread_count; +int legacy_query_thread_count; // data ACLK stats need per query thread -struct aclk_qt_data { +struct legacy_aclk_qt_data { RRDDIM *dim; -} *aclk_qt_data = NULL; +} *legacy_aclk_qt_data = NULL; // ACLK per query thread cpu stats -struct aclk_cpu_data { +struct legacy_aclk_cpu_data { RRDDIM *user; RRDDIM *system; RRDSET *st; -} *aclk_cpu_data = NULL; +} *legacy_aclk_cpu_data = NULL; -uint32_t *aclk_queries_per_thread = NULL; -uint32_t *aclk_queries_per_thread_sample = NULL; +uint32_t *legacy_aclk_queries_per_thread = NULL; +uint32_t *legacy_aclk_queries_per_thread_sample = NULL; struct rusage *rusage_per_thread; uint8_t *getrusage_called_this_tick = NULL; -struct aclk_metrics aclk_metrics = { +static struct legacy_aclk_metrics legacy_aclk_metrics = { .online = 0, }; -struct aclk_metrics_per_sample aclk_metrics_per_sample; +struct legacy_aclk_metrics_per_sample legacy_aclk_metrics_per_sample; struct aclk_mat_metrics aclk_mat_metrics = { #ifdef NETDATA_INTERNAL_CHECKS @@ -61,20 +59,20 @@ struct aclk_mat_metrics aclk_mat_metrics = { "by query thread (just before passing to the database)." } }; -void aclk_metric_mat_update(struct aclk_metric_mat_data *metric, usec_t measurement) +void legacy_aclk_metric_mat_update(struct aclk_metric_mat_data *metric, usec_t measurement) { if (aclk_stats_enabled) { - ACLK_STATS_LOCK; + LEGACY_ACLK_STATS_LOCK; if (metric->max < measurement) metric->max = measurement; metric->total += measurement; metric->count++; - ACLK_STATS_UNLOCK; + LEGACY_ACLK_STATS_UNLOCK; } } -static void aclk_stats_collect(struct aclk_metrics_per_sample *per_sample, struct aclk_metrics *permanent) +static void aclk_stats_collect(struct legacy_aclk_metrics_per_sample *per_sample, struct legacy_aclk_metrics *permanent) { static RRDSET *st_aclkstats = NULL; static RRDDIM *rd_online_status = NULL; @@ -93,7 +91,7 @@ static void aclk_stats_collect(struct aclk_metrics_per_sample *per_sample, struc rrdset_done(st_aclkstats); } -static void aclk_stats_query_queue(struct aclk_metrics_per_sample *per_sample) +static void aclk_stats_query_queue(struct legacy_aclk_metrics_per_sample *per_sample) { static RRDSET *st_query_thread = NULL; static RRDDIM *rd_queued = NULL; @@ -115,7 +113,7 @@ static void aclk_stats_query_queue(struct aclk_metrics_per_sample *per_sample) rrdset_done(st_query_thread); } -static void aclk_stats_write_q(struct aclk_metrics_per_sample *per_sample) +static void aclk_stats_write_q(struct legacy_aclk_metrics_per_sample *per_sample) { static RRDSET *st = NULL; static RRDDIM *rd_wq_add = NULL; @@ -137,7 +135,7 @@ static void aclk_stats_write_q(struct aclk_metrics_per_sample *per_sample) rrdset_done(st); } -static void aclk_stats_read_q(struct aclk_metrics_per_sample *per_sample) +static void aclk_stats_read_q(struct legacy_aclk_metrics_per_sample *per_sample) { static RRDSET *st = NULL; static RRDDIM *rd_rq_add = NULL; @@ -159,7 +157,7 @@ static void aclk_stats_read_q(struct aclk_metrics_per_sample *per_sample) rrdset_done(st); } -static void aclk_stats_cloud_req(struct aclk_metrics_per_sample *per_sample) +static void aclk_stats_cloud_req(struct legacy_aclk_metrics_per_sample *per_sample) { static RRDSET *st = NULL; static RRDDIM *rd_rq_ok = NULL; @@ -181,7 +179,7 @@ static void aclk_stats_cloud_req(struct aclk_metrics_per_sample *per_sample) rrdset_done(st); } -static void aclk_stats_cloud_req_version(struct aclk_metrics_per_sample *per_sample) +static void aclk_stats_cloud_req_version(struct legacy_aclk_metrics_per_sample *per_sample) { static RRDSET *st = NULL; static RRDDIM *rd_rq_v1 = NULL; @@ -223,7 +221,7 @@ int aclk_cloud_req_type_to_idx(const char *name) return 0; } -static void aclk_stats_cloud_req_cmd(struct aclk_metrics_per_sample *per_sample) +static void aclk_stats_cloud_req_cmd(struct legacy_aclk_metrics_per_sample *per_sample) { static RRDSET *st; static int initialized = 0; @@ -246,7 +244,7 @@ static void aclk_stats_cloud_req_cmd(struct aclk_metrics_per_sample *per_sample) rrdset_done(st); } -#define MAX_DIM_NAME 16 +#define MAX_DIM_NAME 22 static void aclk_stats_query_threads(uint32_t *queries_per_thread) { static RRDSET *st = NULL; @@ -258,16 +256,16 @@ static void aclk_stats_query_threads(uint32_t *queries_per_thread) "netdata", "aclk_query_threads", NULL, "aclk", NULL, "Queries Processed Per Thread", "req/s", "netdata", "stats", 200008, localhost->rrd_update_every, RRDSET_TYPE_STACKED); - for (int i = 0; i < query_thread_count; i++) { - if (snprintf(dim_name, MAX_DIM_NAME, "Query %d", i) < 0) + for (int i = 0; i < legacy_query_thread_count; i++) { + if (snprintfz(dim_name, MAX_DIM_NAME, "Query %d", i) < 0) error("snprintf encoding error"); - aclk_qt_data[i].dim = rrddim_add(st, dim_name, NULL, 1, localhost->rrd_update_every, RRD_ALGORITHM_ABSOLUTE); + legacy_aclk_qt_data[i].dim = rrddim_add(st, dim_name, NULL, 1, localhost->rrd_update_every, RRD_ALGORITHM_ABSOLUTE); } } else rrdset_next(st); - for (int i = 0; i < query_thread_count; i++) { - rrddim_set_by_pointer(st, aclk_qt_data[i].dim, queries_per_thread[i]); + for (int i = 0; i < legacy_query_thread_count; i++) { + rrddim_set_by_pointer(st, legacy_aclk_qt_data[i].dim, queries_per_thread[i]); } rrdset_done(st); @@ -301,59 +299,59 @@ static void aclk_stats_cpu_threads(void) char id[100 + 1]; char title[100 + 1]; - for (int i = 0; i < query_thread_count; i++) { - if (unlikely(!aclk_cpu_data[i].st)) { + for (int i = 0; i < legacy_query_thread_count; i++) { + if (unlikely(!legacy_aclk_cpu_data[i].st)) { snprintfz(id, 100, "aclk_thread%d_cpu", i); snprintfz(title, 100, "Cpu Usage For Thread No %d", i); - aclk_cpu_data[i].st = rrdset_create_localhost( + legacy_aclk_cpu_data[i].st = rrdset_create_localhost( "netdata", id, NULL, "aclk", NULL, title, "milliseconds/s", "netdata", "stats", 200020 + i, localhost->rrd_update_every, RRDSET_TYPE_STACKED); - aclk_cpu_data[i].user = rrddim_add(aclk_cpu_data[i].st, "user", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL); - aclk_cpu_data[i].system = rrddim_add(aclk_cpu_data[i].st, "system", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL); + legacy_aclk_cpu_data[i].user = rrddim_add(legacy_aclk_cpu_data[i].st, "user", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL); + legacy_aclk_cpu_data[i].system = rrddim_add(legacy_aclk_cpu_data[i].st, "system", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL); } else - rrdset_next(aclk_cpu_data[i].st); + rrdset_next(legacy_aclk_cpu_data[i].st); } - for (int i = 0; i < query_thread_count; i++) { - rrddim_set_by_pointer(aclk_cpu_data[i].st, aclk_cpu_data[i].user, rusage_per_thread[i].ru_utime.tv_sec * 1000000ULL + rusage_per_thread[i].ru_utime.tv_usec); - rrddim_set_by_pointer(aclk_cpu_data[i].st, aclk_cpu_data[i].system, rusage_per_thread[i].ru_stime.tv_sec * 1000000ULL + rusage_per_thread[i].ru_stime.tv_usec); - rrdset_done(aclk_cpu_data[i].st); + for (int i = 0; i < legacy_query_thread_count; i++) { + rrddim_set_by_pointer(legacy_aclk_cpu_data[i].st, legacy_aclk_cpu_data[i].user, rusage_per_thread[i].ru_utime.tv_sec * 1000000ULL + rusage_per_thread[i].ru_utime.tv_usec); + rrddim_set_by_pointer(legacy_aclk_cpu_data[i].st, legacy_aclk_cpu_data[i].system, rusage_per_thread[i].ru_stime.tv_sec * 1000000ULL + rusage_per_thread[i].ru_stime.tv_usec); + rrdset_done(legacy_aclk_cpu_data[i].st); } } -void aclk_stats_thread_cleanup() +void legacy_aclk_stats_thread_cleanup() { - freez(aclk_qt_data); - freez(aclk_queries_per_thread); - freez(aclk_queries_per_thread_sample); - freez(aclk_cpu_data); + freez(legacy_aclk_qt_data); + freez(legacy_aclk_queries_per_thread); + freez(legacy_aclk_queries_per_thread_sample); + freez(legacy_aclk_cpu_data); freez(rusage_per_thread); } -void *aclk_stats_main_thread(void *ptr) +void *legacy_aclk_stats_main_thread(void *ptr) { struct aclk_stats_thread *args = ptr; - query_thread_count = args->query_thread_count; - aclk_qt_data = callocz(query_thread_count, sizeof(struct aclk_qt_data)); - aclk_cpu_data = callocz(query_thread_count, sizeof(struct aclk_cpu_data)); - aclk_queries_per_thread = callocz(query_thread_count, sizeof(uint32_t)); - aclk_queries_per_thread_sample = callocz(query_thread_count, sizeof(uint32_t)); - rusage_per_thread = callocz(query_thread_count, sizeof(struct rusage)); - getrusage_called_this_tick = callocz(query_thread_count, sizeof(uint8_t)); + legacy_query_thread_count = args->query_thread_count; + legacy_aclk_qt_data = callocz(legacy_query_thread_count, sizeof(struct legacy_aclk_qt_data)); + legacy_aclk_cpu_data = callocz(legacy_query_thread_count, sizeof(struct legacy_aclk_cpu_data)); + legacy_aclk_queries_per_thread = callocz(legacy_query_thread_count, sizeof(uint32_t)); + legacy_aclk_queries_per_thread_sample = callocz(legacy_query_thread_count, sizeof(uint32_t)); + rusage_per_thread = callocz(legacy_query_thread_count, sizeof(struct rusage)); + getrusage_called_this_tick = callocz(legacy_query_thread_count, sizeof(uint8_t)); heartbeat_t hb; heartbeat_init(&hb); usec_t step_ut = localhost->rrd_update_every * USEC_PER_SEC; - memset(&aclk_metrics_per_sample, 0, sizeof(struct aclk_metrics_per_sample)); + memset(&legacy_aclk_metrics_per_sample, 0, sizeof(struct legacy_aclk_metrics_per_sample)); - struct aclk_metrics_per_sample per_sample; - struct aclk_metrics permanent; + struct legacy_aclk_metrics_per_sample per_sample; + struct legacy_aclk_metrics permanent; while (!netdata_exit) { netdata_thread_testcancel(); @@ -363,17 +361,17 @@ void *aclk_stats_main_thread(void *ptr) heartbeat_next(&hb, step_ut); if (netdata_exit) break; - ACLK_STATS_LOCK; + LEGACY_ACLK_STATS_LOCK; // to not hold lock longer than necessary, especially not to hold it // during database rrd* operations - memcpy(&per_sample, &aclk_metrics_per_sample, sizeof(struct aclk_metrics_per_sample)); - memcpy(&permanent, &aclk_metrics, sizeof(struct aclk_metrics)); - memset(&aclk_metrics_per_sample, 0, sizeof(struct aclk_metrics_per_sample)); + memcpy(&per_sample, &legacy_aclk_metrics_per_sample, sizeof(struct legacy_aclk_metrics_per_sample)); + memcpy(&permanent, &legacy_aclk_metrics, sizeof(struct legacy_aclk_metrics)); + memset(&legacy_aclk_metrics_per_sample, 0, sizeof(struct legacy_aclk_metrics_per_sample)); - memcpy(aclk_queries_per_thread_sample, aclk_queries_per_thread, sizeof(uint32_t) * query_thread_count); - memset(aclk_queries_per_thread, 0, sizeof(uint32_t) * query_thread_count); - memset(getrusage_called_this_tick, 0, sizeof(uint8_t) * query_thread_count); - ACLK_STATS_UNLOCK; + memcpy(legacy_aclk_queries_per_thread_sample, legacy_aclk_queries_per_thread, sizeof(uint32_t) * legacy_query_thread_count); + memset(legacy_aclk_queries_per_thread, 0, sizeof(uint32_t) * legacy_query_thread_count); + memset(getrusage_called_this_tick, 0, sizeof(uint8_t) * legacy_query_thread_count); + LEGACY_ACLK_STATS_UNLOCK; aclk_stats_collect(&per_sample, &permanent); aclk_stats_query_queue(&per_sample); @@ -386,7 +384,7 @@ void *aclk_stats_main_thread(void *ptr) aclk_stats_cloud_req_cmd(&per_sample); - aclk_stats_query_threads(aclk_queries_per_thread_sample); + aclk_stats_query_threads(legacy_aclk_queries_per_thread_sample); aclk_stats_cpu_threads(); @@ -400,14 +398,14 @@ void *aclk_stats_main_thread(void *ptr) return 0; } -void aclk_stats_upd_online(int online) { +void legacy_aclk_stats_upd_online(int online) { if(!aclk_stats_enabled) return; - ACLK_STATS_LOCK; - aclk_metrics.online = online; + LEGACY_ACLK_STATS_LOCK; + legacy_aclk_metrics.online = online; if(!online) - aclk_metrics_per_sample.offline_during_sample = 1; - ACLK_STATS_UNLOCK; + legacy_aclk_metrics_per_sample.offline_during_sample = 1; + LEGACY_ACLK_STATS_UNLOCK; } diff --git a/aclk/legacy/aclk_stats.h b/aclk/legacy/aclk_stats.h index 5e50a2272..560de3b5e 100644 --- a/aclk/legacy/aclk_stats.h +++ b/aclk/legacy/aclk_stats.h @@ -3,18 +3,16 @@ #ifndef NETDATA_ACLK_STATS_H #define NETDATA_ACLK_STATS_H -#include "../../daemon/common.h" +#include "daemon/common.h" #include "libnetdata/libnetdata.h" #include "aclk_common.h" #define ACLK_STATS_THREAD_NAME "ACLK_Stats" -extern netdata_mutex_t aclk_stats_mutex; +extern netdata_mutex_t legacy_aclk_stats_mutex; -#define ACLK_STATS_LOCK netdata_mutex_lock(&aclk_stats_mutex) -#define ACLK_STATS_UNLOCK netdata_mutex_unlock(&aclk_stats_mutex) - -extern int aclk_stats_enabled; +#define LEGACY_ACLK_STATS_LOCK netdata_mutex_lock(&legacy_aclk_stats_mutex) +#define LEGACY_ACLK_STATS_UNLOCK netdata_mutex_unlock(&legacy_aclk_stats_mutex) struct aclk_stats_thread { netdata_thread_t *thread; @@ -22,7 +20,7 @@ struct aclk_stats_thread { }; // preserve between samples -struct aclk_metrics { +struct legacy_aclk_metrics { volatile uint8_t online; }; @@ -53,7 +51,7 @@ extern struct aclk_mat_metrics { struct aclk_metric_mat cloud_q_recvd_to_processed; } aclk_mat_metrics; -void aclk_metric_mat_update(struct aclk_metric_mat_data *metric, usec_t measurement); +void legacy_aclk_metric_mat_update(struct aclk_metric_mat_data *metric, usec_t measurement); #define ACLK_STATS_CLOUD_REQ_TYPE_CNT 7 // if you change update cloud_req_type_names @@ -61,7 +59,7 @@ void aclk_metric_mat_update(struct aclk_metric_mat_data *metric, usec_t measurem int aclk_cloud_req_type_to_idx(const char *name); // reset to 0 on every sample -extern struct aclk_metrics_per_sample { +extern struct legacy_aclk_metrics_per_sample { /* in the unlikely event of ACLK disconnecting and reconnecting under 1 sampling rate we want to make sure we record the disconnection @@ -90,13 +88,13 @@ extern struct aclk_metrics_per_sample { #endif struct aclk_metric_mat_data cloud_q_db_query_time; struct aclk_metric_mat_data cloud_q_recvd_to_processed; -} aclk_metrics_per_sample; +} legacy_aclk_metrics_per_sample; -extern uint32_t *aclk_queries_per_thread; +extern uint32_t *legacy_aclk_queries_per_thread; extern struct rusage *rusage_per_thread; -void *aclk_stats_main_thread(void *ptr); -void aclk_stats_thread_cleanup(); -void aclk_stats_upd_online(int online); +void *legacy_aclk_stats_main_thread(void *ptr); +void legacy_aclk_stats_thread_cleanup(); +void legacy_aclk_stats_upd_online(int online); #endif /* NETDATA_ACLK_STATS_H */ diff --git a/aclk/legacy/agent_cloud_link.c b/aclk/legacy/agent_cloud_link.c index 5ed7e66af..80ca23971 100644 --- a/aclk/legacy/agent_cloud_link.c +++ b/aclk/legacy/agent_cloud_link.c @@ -6,6 +6,7 @@ #include "aclk_query.h" #include "aclk_common.h" #include "aclk_stats.h" +#include "../aclk_collector_list.h" #ifdef ENABLE_ACLK #include @@ -15,46 +16,20 @@ int aclk_shutting_down = 0; // Other global state static int aclk_subscribed = 0; -static int aclk_disable_single_updates = 0; static char *aclk_username = NULL; static char *aclk_password = NULL; static char *global_base_topic = NULL; static int aclk_connecting = 0; int aclk_force_reconnect = 0; // Indication from lower layers -usec_t aclk_session_us = 0; // Used by the mqtt layer -time_t aclk_session_sec = 0; // Used by the mqtt layer static netdata_mutex_t aclk_mutex = NETDATA_MUTEX_INITIALIZER; -static netdata_mutex_t collector_mutex = NETDATA_MUTEX_INITIALIZER; #define ACLK_LOCK netdata_mutex_lock(&aclk_mutex) #define ACLK_UNLOCK netdata_mutex_unlock(&aclk_mutex) -#define COLLECTOR_LOCK netdata_mutex_lock(&collector_mutex) -#define COLLECTOR_UNLOCK netdata_mutex_unlock(&collector_mutex) - void lws_wss_check_queues(size_t *write_len, size_t *write_len_bytes, size_t *read_len); void aclk_lws_wss_destroy_context(); -/* - * Maintain a list of collectors and chart count - * If all the charts of a collector are deleted - * then a new metadata dataset must be send to the cloud - * - */ -struct _collector { - time_t created; - uint32_t count; //chart count - uint32_t hostname_hash; - uint32_t plugin_hash; - uint32_t module_hash; - char *hostname; - char *plugin_name; - char *module_name; - struct _collector *next; -}; - -struct _collector *collector_list = NULL; char *create_uuid() { @@ -67,7 +42,7 @@ char *create_uuid() return uuid_str; } -int cloud_to_agent_parse(JSON_ENTRY *e) +int legacy_cloud_to_agent_parse(JSON_ENTRY *e) { struct aclk_request *data = e->callback_data; @@ -247,202 +222,10 @@ char *get_topic(char *sub_topic, char *final_topic, int max_size) return final_topic; } -#ifndef __GNUC__ -#pragma region ACLK Internal Collector Tracking -#endif - -/* - * Free a collector structure - */ - -static void _free_collector(struct _collector *collector) -{ - if (likely(collector->plugin_name)) - freez(collector->plugin_name); - - if (likely(collector->module_name)) - freez(collector->module_name); - - if (likely(collector->hostname)) - freez(collector->hostname); - - freez(collector); -} - -/* - * This will report the collector list - * - */ -#ifdef ACLK_DEBUG -static void _dump_collector_list() -{ - struct _collector *tmp_collector; - - COLLECTOR_LOCK; - - info("DUMPING ALL COLLECTORS"); - - if (unlikely(!collector_list || !collector_list->next)) { - COLLECTOR_UNLOCK; - info("DUMPING ALL COLLECTORS -- nothing found"); - return; - } - - // Note that the first entry is "dummy" - tmp_collector = collector_list->next; - - while (tmp_collector) { - info( - "COLLECTOR %s : [%s:%s] count = %u", tmp_collector->hostname, - tmp_collector->plugin_name ? tmp_collector->plugin_name : "", - tmp_collector->module_name ? tmp_collector->module_name : "", tmp_collector->count); - - tmp_collector = tmp_collector->next; - } - info("DUMPING ALL COLLECTORS DONE"); - COLLECTOR_UNLOCK; -} -#endif - -/* - * This will cleanup the collector list - * - */ -static void _reset_collector_list() -{ - struct _collector *tmp_collector, *next_collector; - - COLLECTOR_LOCK; - - if (unlikely(!collector_list || !collector_list->next)) { - COLLECTOR_UNLOCK; - return; - } - - // Note that the first entry is "dummy" - tmp_collector = collector_list->next; - collector_list->count = 0; - collector_list->next = NULL; - - // We broke the link; we can unlock - COLLECTOR_UNLOCK; - - while (tmp_collector) { - next_collector = tmp_collector->next; - _free_collector(tmp_collector); - tmp_collector = next_collector; - } -} - -/* - * Find a collector (if it exists) - * Must lock before calling this - * If last_collector is not null, it will return the previous collector in the linked - * list (used in collector delete) - */ -static struct _collector *_find_collector( - const char *hostname, const char *plugin_name, const char *module_name, struct _collector **last_collector) -{ - struct _collector *tmp_collector, *prev_collector; - uint32_t plugin_hash; - uint32_t module_hash; - uint32_t hostname_hash; - - if (unlikely(!collector_list)) { - collector_list = callocz(1, sizeof(struct _collector)); - return NULL; - } - - if (unlikely(!collector_list->next)) - return NULL; - - plugin_hash = plugin_name ? simple_hash(plugin_name) : 1; - module_hash = module_name ? simple_hash(module_name) : 1; - hostname_hash = simple_hash(hostname); - - // Note that the first entry is "dummy" - tmp_collector = collector_list->next; - prev_collector = collector_list; - while (tmp_collector) { - if (plugin_hash == tmp_collector->plugin_hash && module_hash == tmp_collector->module_hash && - hostname_hash == tmp_collector->hostname_hash && (!strcmp(hostname, tmp_collector->hostname)) && - (!plugin_name || !tmp_collector->plugin_name || !strcmp(plugin_name, tmp_collector->plugin_name)) && - (!module_name || !tmp_collector->module_name || !strcmp(module_name, tmp_collector->module_name))) { - if (unlikely(last_collector)) - *last_collector = prev_collector; - - return tmp_collector; - } - - prev_collector = tmp_collector; - tmp_collector = tmp_collector->next; - } - - return tmp_collector; -} - -/* - * Called to delete a collector - * It will reduce the count (chart_count) and will remove it - * from the linked list if the count reaches zero - * The structure will be returned to the caller to free - * the resources - * - */ -static struct _collector *_del_collector(const char *hostname, const char *plugin_name, const char *module_name) -{ - struct _collector *tmp_collector, *prev_collector = NULL; - - tmp_collector = _find_collector(hostname, plugin_name, module_name, &prev_collector); - - if (likely(tmp_collector)) { - --tmp_collector->count; - if (unlikely(!tmp_collector->count)) - prev_collector->next = tmp_collector->next; - } - return tmp_collector; -} - -/* - * Add a new collector (plugin / module) to the list - * If it already exists just update the chart count - * - * Lock before calling - */ -static struct _collector *_add_collector(const char *hostname, const char *plugin_name, const char *module_name) -{ - struct _collector *tmp_collector; - - tmp_collector = _find_collector(hostname, plugin_name, module_name, NULL); - - if (unlikely(!tmp_collector)) { - tmp_collector = callocz(1, sizeof(struct _collector)); - tmp_collector->hostname_hash = simple_hash(hostname); - tmp_collector->plugin_hash = plugin_name ? simple_hash(plugin_name) : 1; - tmp_collector->module_hash = module_name ? simple_hash(module_name) : 1; - - tmp_collector->hostname = strdupz(hostname); - tmp_collector->plugin_name = plugin_name ? strdupz(plugin_name) : NULL; - tmp_collector->module_name = module_name ? strdupz(module_name) : NULL; - - tmp_collector->next = collector_list->next; - collector_list->next = tmp_collector; - } - tmp_collector->count++; - debug( - D_ACLK, "ADD COLLECTOR %s [%s:%s] -- chart %u", hostname, plugin_name ? plugin_name : "*", - module_name ? module_name : "*", tmp_collector->count); - return tmp_collector; -} - -#ifndef __GNUC__ -#pragma endregion -#endif - -/* Avoids the need to scan trough all RRDHOSTS +/* Avoids the need to scan through all RRDHOSTS * every time any Query Thread Wakes Up * (every time we need to check child popcorn expiry) - * call with ACLK_SHARED_STATE_LOCK held + * call with legacy_aclk_shared_state_LOCK held */ void aclk_update_next_child_to_popcorn(void) { @@ -462,19 +245,19 @@ void aclk_update_next_child_to_popcorn(void) any = 1; - if (unlikely(!aclk_shared_state.next_popcorn_host)) { - aclk_shared_state.next_popcorn_host = host; + if (unlikely(!legacy_aclk_shared_state.next_popcorn_host)) { + legacy_aclk_shared_state.next_popcorn_host = host; rrdhost_aclk_state_unlock(host); continue; } - if (aclk_shared_state.next_popcorn_host->aclk_state.t_last_popcorn_update > host->aclk_state.t_last_popcorn_update) - aclk_shared_state.next_popcorn_host = host; + if (legacy_aclk_shared_state.next_popcorn_host->aclk_state.t_last_popcorn_update > host->aclk_state.t_last_popcorn_update) + legacy_aclk_shared_state.next_popcorn_host = host; rrdhost_aclk_state_unlock(host); } if(!any) - aclk_shared_state.next_popcorn_host = NULL; + legacy_aclk_shared_state.next_popcorn_host = NULL; rrd_unlock(); } @@ -487,7 +270,7 @@ static int aclk_popcorn_check_bump(RRDHOST *host) { time_t now = now_monotonic_sec(); int updated = 0, ret; - ACLK_SHARED_STATE_LOCK; + legacy_aclk_shared_state_LOCK; rrdhost_aclk_state_lock(host); ret = ACLK_IS_HOST_INITIALIZING(host); @@ -502,12 +285,12 @@ static int aclk_popcorn_check_bump(RRDHOST *host) if (host != localhost && updated) aclk_update_next_child_to_popcorn(); - ACLK_SHARED_STATE_UNLOCK; + legacy_aclk_shared_state_UNLOCK; return ret; } rrdhost_aclk_state_unlock(host); - ACLK_SHARED_STATE_UNLOCK; + legacy_aclk_shared_state_UNLOCK; return ret; } @@ -523,13 +306,13 @@ static void aclk_start_host_popcorning(RRDHOST *host) { usec_t now = now_monotonic_sec(); info("Starting ACLK popcorn timer for host \"%s\" with GUID \"%s\"", host->hostname, host->machine_guid); - ACLK_SHARED_STATE_LOCK; + legacy_aclk_shared_state_LOCK; rrdhost_aclk_state_lock(host); if (host == localhost && !ACLK_IS_HOST_INITIALIZING(host)) { errno = 0; error("Localhost is allowed to do popcorning only once after startup!"); rrdhost_aclk_state_unlock(host); - ACLK_SHARED_STATE_UNLOCK; + legacy_aclk_shared_state_UNLOCK; return; } @@ -539,16 +322,16 @@ static void aclk_start_host_popcorning(RRDHOST *host) rrdhost_aclk_state_unlock(host); if (host != localhost) aclk_update_next_child_to_popcorn(); - ACLK_SHARED_STATE_UNLOCK; + legacy_aclk_shared_state_UNLOCK; } static void aclk_stop_host_popcorning(RRDHOST *host) { - ACLK_SHARED_STATE_LOCK; + legacy_aclk_shared_state_LOCK; rrdhost_aclk_state_lock(host); if (!ACLK_IS_HOST_POPCORNING(host)) { rrdhost_aclk_state_unlock(host); - ACLK_SHARED_STATE_UNLOCK; + legacy_aclk_shared_state_UNLOCK; return; } @@ -557,18 +340,18 @@ static void aclk_stop_host_popcorning(RRDHOST *host) host->aclk_state.metadata = ACLK_METADATA_REQUIRED; rrdhost_aclk_state_unlock(host); - if(host == aclk_shared_state.next_popcorn_host) { - aclk_shared_state.next_popcorn_host = NULL; + if(host == legacy_aclk_shared_state.next_popcorn_host) { + legacy_aclk_shared_state.next_popcorn_host = NULL; aclk_update_next_child_to_popcorn(); } - ACLK_SHARED_STATE_UNLOCK; + legacy_aclk_shared_state_UNLOCK; } /* * Add a new collector to the list * If it exists, update the chart count */ -void aclk_add_collector(RRDHOST *host, const char *plugin_name, const char *module_name) +void legacy_aclk_add_collector(RRDHOST *host, const char *plugin_name, const char *module_name) { struct _collector *tmp_collector; if (unlikely(!netdata_ready)) { @@ -589,7 +372,7 @@ void aclk_add_collector(RRDHOST *host, const char *plugin_name, const char *modu if(aclk_popcorn_check_bump(host)) return; - if (unlikely(aclk_queue_query("collector", host, NULL, NULL, 0, 1, ACLK_CMD_ONCONNECT))) + if (unlikely(legacy_aclk_queue_query("collector", host, NULL, NULL, 0, 1, ACLK_CMD_ONCONNECT))) debug(D_ACLK, "ACLK failed to queue on_connect command on collector addition"); } @@ -601,7 +384,7 @@ void aclk_add_collector(RRDHOST *host, const char *plugin_name, const char *modu * This function will release the memory used and schedule * a cloud update */ -void aclk_del_collector(RRDHOST *host, const char *plugin_name, const char *module_name) +void legacy_aclk_del_collector(RRDHOST *host, const char *plugin_name, const char *module_name) { struct _collector *tmp_collector; if (unlikely(!netdata_ready)) { @@ -628,7 +411,7 @@ void aclk_del_collector(RRDHOST *host, const char *plugin_name, const char *modu if (aclk_popcorn_check_bump(host)) return; - if (unlikely(aclk_queue_query("collector", host, NULL, NULL, 0, 1, ACLK_CMD_ONCONNECT))) + if (unlikely(legacy_aclk_queue_query("collector", host, NULL, NULL, 0, 1, ACLK_CMD_ONCONNECT))) debug(D_ACLK, "ACLK failed to queue on_connect command on collector deletion"); } @@ -639,7 +422,7 @@ static void aclk_graceful_disconnect() // Send a graceful disconnect message BUFFER *b = buffer_create(512); - aclk_create_header(b, "disconnect", NULL, 0, 0, aclk_shared_state.version_neg); + aclk_create_header(b, "disconnect", NULL, 0, 0, legacy_aclk_shared_state.version_neg); buffer_strcat(b, ",\n\t\"payload\": \"graceful\"}"); aclk_send_message(ACLK_METADATA_TOPIC, (char*)buffer_tostring(b), NULL); buffer_free(b); @@ -963,10 +746,10 @@ static void aclk_try_to_connect(char *hostname, int port) aclk_connecting = 1; create_publish_base_topic(); - ACLK_SHARED_STATE_LOCK; - aclk_shared_state.version_neg = 0; - aclk_shared_state.version_neg_wait_till = 0; - ACLK_SHARED_STATE_UNLOCK; + legacy_aclk_shared_state_LOCK; + legacy_aclk_shared_state.version_neg = 0; + legacy_aclk_shared_state.version_neg_wait_till = 0; + legacy_aclk_shared_state_UNLOCK; rc = mqtt_attempt_connection(hostname, port, aclk_username, aclk_password); if (unlikely(rc)) { @@ -981,10 +764,10 @@ static inline void aclk_hello_msg() char *msg_id = create_uuid(); - ACLK_SHARED_STATE_LOCK; - aclk_shared_state.version_neg = 0; - aclk_shared_state.version_neg_wait_till = now_monotonic_usec() + USEC_PER_SEC * VERSION_NEG_TIMEOUT; - ACLK_SHARED_STATE_UNLOCK; + legacy_aclk_shared_state_LOCK; + legacy_aclk_shared_state.version_neg = 0; + legacy_aclk_shared_state.version_neg_wait_till = now_monotonic_usec() + USEC_PER_SEC * VERSION_NEG_TIMEOUT; + legacy_aclk_shared_state_UNLOCK; //Hello message is versioned separately from the rest of the protocol aclk_create_header(buf, "hello", msg_id, 0, 0, ACLK_VERSION_NEG_VERSION); @@ -1004,7 +787,7 @@ static inline void aclk_hello_msg() * * @return It always returns NULL */ -void *aclk_main(void *ptr) +void *legacy_aclk_main(void *ptr) { struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr; struct aclk_query_threads query_threads; @@ -1065,7 +848,7 @@ void *aclk_main(void *ptr) stats_thread->thread = mallocz(sizeof(netdata_thread_t)); stats_thread->query_thread_count = query_threads.count; netdata_thread_create( - stats_thread->thread, ACLK_STATS_THREAD_NAME, NETDATA_THREAD_OPTION_JOINABLE, aclk_stats_main_thread, + stats_thread->thread, ACLK_STATS_THREAD_NAME, NETDATA_THREAD_OPTION_JOINABLE, legacy_aclk_stats_main_thread, stats_thread); } @@ -1165,20 +948,20 @@ void *aclk_main(void *ptr) } if (unlikely(!query_threads.thread_list)) { - aclk_query_threads_start(&query_threads); + legacy_aclk_query_threads_start(&query_threads); } time_t now = now_monotonic_sec(); if(aclk_connected && last_periodic_query_wakeup < now) { - // to make `aclk_queue_query()` param `run_after` work + // to make `legacy_aclk_queue_query()` param `run_after` work // also makes per child popcorning work last_periodic_query_wakeup = now; - QUERY_THREAD_WAKEUP; + LEGACY_QUERY_THREAD_WAKEUP; } } // forever exited: // Wakeup query thread to cleanup - QUERY_THREAD_WAKEUP_ALL; + LEGACY_QUERY_THREAD_WAKEUP_ALL; freez(aclk_username); freez(aclk_password); @@ -1192,18 +975,18 @@ exited: if (agent_id && aclk_connected) { freez(agent_id); // Wakeup thread to cleanup - QUERY_THREAD_WAKEUP; + LEGACY_QUERY_THREAD_WAKEUP; aclk_graceful_disconnect(); } - aclk_query_threads_cleanup(&query_threads); + legacy_aclk_query_threads_cleanup(&query_threads); _reset_collector_list(); freez(collector_list); if(aclk_stats_enabled) { netdata_thread_join(*stats_thread->thread, NULL); - aclk_stats_thread_cleanup(); + legacy_aclk_stats_thread_cleanup(); freez(stats_thread->thread); freez(stats_thread); } @@ -1306,12 +1089,12 @@ void aclk_connect() { info("Connection detected (%u queued queries)", aclk_query_size()); - aclk_stats_upd_online(1); + legacy_aclk_stats_upd_online(1); aclk_connected = 1; aclk_reconnect_delay(0); - QUERY_THREAD_WAKEUP; + LEGACY_QUERY_THREAD_WAKEUP; return; } @@ -1321,7 +1104,7 @@ void aclk_disconnect() if (likely(aclk_connected)) info("Disconnect detected (%u queued queries)", aclk_query_size()); - aclk_stats_upd_online(0); + legacy_aclk_stats_upd_online(0); aclk_subscribed = 0; rrdhost_aclk_state_lock(localhost); @@ -1372,7 +1155,7 @@ inline void aclk_create_header(BUFFER *dest, char *type, char *msg_id, time_t ts */ void health_active_log_alarms_2json(RRDHOST *host, BUFFER *wb); -void aclk_send_alarm_metadata(ACLK_METADATA_STATE metadata_submitted) +void legacy_aclk_send_alarm_metadata(ACLK_METADATA_STATE metadata_submitted) { BUFFER *local_buffer = buffer_create(NETDATA_WEB_RESPONSE_INITIAL_SIZE); @@ -1388,9 +1171,9 @@ void aclk_send_alarm_metadata(ACLK_METADATA_STATE metadata_submitted) // session. if (metadata_submitted == ACLK_METADATA_SENT) - aclk_create_header(local_buffer, "connect_alarms", msg_id, 0, 0, aclk_shared_state.version_neg); + aclk_create_header(local_buffer, "connect_alarms", msg_id, 0, 0, legacy_aclk_shared_state.version_neg); else - aclk_create_header(local_buffer, "connect_alarms", msg_id, aclk_session_sec, aclk_session_us, aclk_shared_state.version_neg); + aclk_create_header(local_buffer, "connect_alarms", msg_id, aclk_session_sec, aclk_session_us, legacy_aclk_shared_state.version_neg); buffer_strcat(local_buffer, ",\n\t\"payload\": "); @@ -1418,7 +1201,7 @@ void aclk_send_alarm_metadata(ACLK_METADATA_STATE metadata_submitted) * /api/v1/info * charts */ -int aclk_send_info_metadata(ACLK_METADATA_STATE metadata_submitted, RRDHOST *host) +int legacy_aclk_send_info_metadata(ACLK_METADATA_STATE metadata_submitted, RRDHOST *host) { BUFFER *local_buffer = buffer_create(NETDATA_WEB_RESPONSE_INITIAL_SIZE); @@ -1433,9 +1216,9 @@ int aclk_send_info_metadata(ACLK_METADATA_STATE metadata_submitted, RRDHOST *hos // a fake on_connect message then use the real timestamp to indicate it is within the existing // session. if (metadata_submitted == ACLK_METADATA_SENT) - aclk_create_header(local_buffer, "update", msg_id, 0, 0, aclk_shared_state.version_neg); + aclk_create_header(local_buffer, "update", msg_id, 0, 0, legacy_aclk_shared_state.version_neg); else - aclk_create_header(local_buffer, "connect", msg_id, aclk_session_sec, aclk_session_us, aclk_shared_state.version_neg); + aclk_create_header(local_buffer, "connect", msg_id, aclk_session_sec, aclk_session_us, legacy_aclk_shared_state.version_neg); buffer_strcat(local_buffer, ",\n\t\"payload\": "); buffer_sprintf(local_buffer, "{\n\t \"info\" : "); @@ -1459,14 +1242,14 @@ int aclk_send_info_child_connection(RRDHOST *host, ACLK_CMD cmd) BUFFER *local_buffer = buffer_create(NETDATA_WEB_RESPONSE_INITIAL_SIZE); local_buffer->contenttype = CT_APPLICATION_JSON; - if(aclk_shared_state.version_neg < ACLK_V_CHILDRENSTATE) - fatal("This function should not be called if ACLK version is less than %d (current %d)", ACLK_V_CHILDRENSTATE, aclk_shared_state.version_neg); + if(legacy_aclk_shared_state.version_neg < ACLK_V_CHILDRENSTATE) + fatal("This function should not be called if ACLK version is less than %d (current %d)", ACLK_V_CHILDRENSTATE, legacy_aclk_shared_state.version_neg); debug(D_ACLK, "Sending Child Disconnect"); char *msg_id = create_uuid(); - aclk_create_header(local_buffer, cmd == ACLK_CMD_CHILD_CONNECT ? "child_connect" : "child_disconnect", msg_id, 0, 0, aclk_shared_state.version_neg); + aclk_create_header(local_buffer, cmd == ACLK_CMD_CHILD_CONNECT ? "child_connect" : "child_disconnect", msg_id, 0, 0, legacy_aclk_shared_state.version_neg); buffer_strcat(local_buffer, ",\"payload\":"); @@ -1486,10 +1269,10 @@ int aclk_send_info_child_connection(RRDHOST *host, ACLK_CMD cmd) return 0; } -void aclk_host_state_update(RRDHOST *host, ACLK_CMD cmd) +void legacy_aclk_host_state_update(RRDHOST *host, int connect) { #if ACLK_VERSION_MIN < ACLK_V_CHILDRENSTATE - if (aclk_shared_state.version_neg < ACLK_V_CHILDRENSTATE) + if (legacy_aclk_shared_state.version_neg < ACLK_V_CHILDRENSTATE) return; #else #warning "This check became unnecessary. Remove" @@ -1498,19 +1281,14 @@ void aclk_host_state_update(RRDHOST *host, ACLK_CMD cmd) if (unlikely(aclk_host_initializing(localhost))) return; - switch (cmd) { - case ACLK_CMD_CHILD_CONNECT: - debug(D_ACLK, "Child Connected %s %s.", host->hostname, host->machine_guid); - aclk_start_host_popcorning(host); - aclk_queue_query("add_child", host, NULL, NULL, 0, 1, ACLK_CMD_CHILD_CONNECT); - break; - case ACLK_CMD_CHILD_DISCONNECT: - debug(D_ACLK, "Child Disconnected %s %s.", host->hostname, host->machine_guid); - aclk_stop_host_popcorning(host); - aclk_queue_query("del_child", host, NULL, NULL, 0, 1, ACLK_CMD_CHILD_DISCONNECT); - break; - default: - error("Unknown command for aclk_host_state_update %d.", (int)cmd); + if (connect) { + debug(D_ACLK, "Child Connected %s %s.", host->hostname, host->machine_guid); + aclk_start_host_popcorning(host); + legacy_aclk_queue_query("add_child", host, NULL, NULL, 0, 1, ACLK_CMD_CHILD_CONNECT); + } else { + debug(D_ACLK, "Child Disconnected %s %s.", host->hostname, host->machine_guid); + aclk_stop_host_popcorning(host); + legacy_aclk_queue_query("del_child", host, NULL, NULL, 0, 1, ACLK_CMD_CHILD_DISCONNECT); } } @@ -1537,31 +1315,21 @@ void aclk_send_stress_test(size_t size) // or on request int aclk_send_metadata(ACLK_METADATA_STATE state, RRDHOST *host) { - aclk_send_info_metadata(state, host); + legacy_aclk_send_info_metadata(state, host); if(host == localhost) - aclk_send_alarm_metadata(state); + legacy_aclk_send_alarm_metadata(state); return 0; } -void aclk_single_update_disable() -{ - aclk_disable_single_updates = 1; -} - -void aclk_single_update_enable() -{ - aclk_disable_single_updates = 0; -} - // Triggered by a health reload, sends the alarm metadata -void aclk_alarm_reload() +void legacy_aclk_alarm_reload() { if (unlikely(aclk_host_initializing(localhost))) return; - if (unlikely(aclk_queue_query("on_connect", localhost, NULL, NULL, 0, 1, ACLK_CMD_ONCONNECT))) { + if (unlikely(legacy_aclk_queue_query("on_connect", localhost, NULL, NULL, 0, 1, ACLK_CMD_ONCONNECT))) { if (likely(aclk_connected)) { errno = 0; error("ACLK failed to queue on_connect command on alarm reload"); @@ -1585,7 +1353,7 @@ int aclk_send_single_chart(RRDHOST *host, char *chart) buffer_flush(local_buffer); local_buffer->contenttype = CT_APPLICATION_JSON; - aclk_create_header(local_buffer, "chart", msg_id, 0, 0, aclk_shared_state.version_neg); + aclk_create_header(local_buffer, "chart", msg_id, 0, 0, legacy_aclk_shared_state.version_neg); buffer_strcat(local_buffer, ",\n\t\"payload\": "); rrdset2json(st, local_buffer, NULL, NULL, 1); @@ -1598,7 +1366,7 @@ int aclk_send_single_chart(RRDHOST *host, char *chart) return 0; } -int aclk_update_chart(RRDHOST *host, char *chart_name, ACLK_CMD aclk_cmd) +int legacy_aclk_update_chart(RRDHOST *host, char *chart_name, int create) { #ifndef ENABLE_ACLK UNUSED(host); @@ -1611,7 +1379,7 @@ int aclk_update_chart(RRDHOST *host, char *chart_name, ACLK_CMD aclk_cmd) if (!netdata_cloud_setting) return 0; - if (aclk_shared_state.version_neg < ACLK_V_CHILDRENSTATE && host != localhost) + if (legacy_aclk_shared_state.version_neg < ACLK_V_CHILDRENSTATE && host != localhost) return 0; if (aclk_host_initializing(localhost)) @@ -1623,7 +1391,7 @@ int aclk_update_chart(RRDHOST *host, char *chart_name, ACLK_CMD aclk_cmd) if (aclk_popcorn_check_bump(host)) return 0; - if (unlikely(aclk_queue_query("_chart", host, NULL, chart_name, 0, 1, aclk_cmd))) { + if (unlikely(legacy_aclk_queue_query("_chart", host, NULL, chart_name, 0, 1, create ? ACLK_CMD_CHART : ACLK_CMD_CHARTDEL))) { if (likely(aclk_connected)) { errno = 0; error("ACLK failed to queue chart_update command"); @@ -1634,7 +1402,7 @@ int aclk_update_chart(RRDHOST *host, char *chart_name, ACLK_CMD aclk_cmd) #endif } -int aclk_update_alarm(RRDHOST *host, ALARM_ENTRY *ae) +int legacy_aclk_update_alarm(RRDHOST *host, ALARM_ENTRY *ae) { BUFFER *local_buffer = NULL; @@ -1661,7 +1429,7 @@ int aclk_update_alarm(RRDHOST *host, ALARM_ENTRY *ae) char *msg_id = create_uuid(); buffer_flush(local_buffer); - aclk_create_header(local_buffer, "status-change", msg_id, 0, 0, aclk_shared_state.version_neg); + aclk_create_header(local_buffer, "status-change", msg_id, 0, 0, legacy_aclk_shared_state.version_neg); buffer_strcat(local_buffer, ",\n\t\"payload\": "); netdata_rwlock_rdlock(&host->health_log.alarm_log_rwlock); @@ -1670,7 +1438,7 @@ int aclk_update_alarm(RRDHOST *host, ALARM_ENTRY *ae) buffer_sprintf(local_buffer, "\n}"); - if (unlikely(aclk_queue_query(ACLK_ALARMS_TOPIC, NULL, msg_id, local_buffer->buffer, 0, 1, ACLK_CMD_ALARM))) { + if (unlikely(legacy_aclk_queue_query(ACLK_ALARMS_TOPIC, NULL, msg_id, local_buffer->buffer, 0, 1, ACLK_CMD_ALARM))) { if (likely(aclk_connected)) { errno = 0; error("ACLK failed to queue alarm_command on alarm_update"); @@ -1682,3 +1450,53 @@ int aclk_update_alarm(RRDHOST *host, ALARM_ENTRY *ae) return 0; } + +char *legacy_aclk_state(void) +{ + BUFFER *wb = buffer_create(1024); + char *ret; + + buffer_strcat(wb, + "ACLK Available: Yes\n" + "ACLK Implementation: Legacy\n" + "Claimed: " + ); + + char *agent_id = is_agent_claimed(); + if (agent_id == NULL) + buffer_strcat(wb, "No\n"); + else { + buffer_sprintf(wb, "Yes\nClaimed Id: %s\n", agent_id); + freez(agent_id); + } + + buffer_sprintf(wb, "Online: %s", aclk_connected ? "Yes" : "No"); + + ret = strdupz(buffer_tostring(wb)); + buffer_free(wb); + return ret; +} + +char *legacy_aclk_state_json(void) +{ + BUFFER *wb = buffer_create(1024); + char *agent_id = is_agent_claimed(); + + buffer_sprintf(wb, + "{\"aclk-available\":true," + "\"aclk-implementation\":\"Legacy\"," + "\"agent-claimed\":%s," + "\"claimed-id\":", + agent_id ? "true" : "false" + ); + + if (agent_id) { + buffer_sprintf(wb, "\"%s\"", agent_id); + freez(agent_id); + } else + buffer_strcat(wb, "null"); + + buffer_sprintf(wb, ",\"online\":%s}", aclk_connected ? "true" : "false"); + + return strdupz(buffer_tostring(wb)); +} diff --git a/aclk/legacy/agent_cloud_link.h b/aclk/legacy/agent_cloud_link.h index bfcfef8e9..8954a337a 100644 --- a/aclk/legacy/agent_cloud_link.h +++ b/aclk/legacy/agent_cloud_link.h @@ -3,11 +3,10 @@ #ifndef NETDATA_AGENT_CLOUD_LINK_H #define NETDATA_AGENT_CLOUD_LINK_H -#include "../../daemon/common.h" +#include "daemon/common.h" #include "mqtt.h" #include "aclk_common.h" -#define ACLK_THREAD_NAME "ACLK_Query" #define ACLK_CHART_TOPIC "outbound/meta" #define ACLK_ALARMS_TOPIC "outbound/alarms" #define ACLK_METADATA_TOPIC "outbound/meta" @@ -18,7 +17,6 @@ #define ACLK_INITIALIZATION_WAIT 60 // Wait for link to initialize in seconds (per msg) #define ACLK_INITIALIZATION_SLEEP_WAIT 1 // Wait time @ spin lock for MQTT initialization in seconds -#define ACLK_QOS 1 #define ACLK_PING_INTERVAL 60 #define ACLK_LOOP_TIMEOUT 5 // seconds to wait for operations in the library loop @@ -42,16 +40,7 @@ struct aclk_request { typedef enum aclk_init_action { ACLK_INIT, ACLK_REINIT } ACLK_INIT_ACTION; -void *aclk_main(void *ptr); - -#define NETDATA_ACLK_HOOK \ - { .name = "ACLK_Main", \ - .config_section = NULL, \ - .config_name = NULL, \ - .enabled = 1, \ - .thread = NULL, \ - .init_routine = NULL, \ - .start_routine = aclk_main }, +void *legacy_aclk_main(void *ptr); extern int aclk_send_message(char *sub_topic, char *message, char *msg_id); extern int aclk_send_message_bin(char *sub_topic, const void *message, size_t len, char *msg_id); @@ -62,32 +51,35 @@ char *create_uuid(); // callbacks for agent cloud link int aclk_subscribe(char *topic, int qos); -int cloud_to_agent_parse(JSON_ENTRY *e); +int legacy_cloud_to_agent_parse(JSON_ENTRY *e); void aclk_disconnect(); void aclk_connect(); +#ifdef ENABLE_ACLK int aclk_send_metadata(ACLK_METADATA_STATE state, RRDHOST *host); -int aclk_send_info_metadata(ACLK_METADATA_STATE metadata_submitted, RRDHOST *host); -void aclk_send_alarm_metadata(ACLK_METADATA_STATE metadata_submitted); +int legacy_aclk_send_info_metadata(ACLK_METADATA_STATE metadata_submitted, RRDHOST *host); +void legacy_aclk_send_alarm_metadata(ACLK_METADATA_STATE metadata_submitted); int aclk_wait_for_initialization(); char *create_publish_base_topic(); int aclk_send_single_chart(RRDHOST *host, char *chart); -int aclk_update_chart(RRDHOST *host, char *chart_name, ACLK_CMD aclk_cmd); -int aclk_update_alarm(RRDHOST *host, ALARM_ENTRY *ae); +int legacy_aclk_update_chart(RRDHOST *host, char *chart_name, int create); +int legacy_aclk_update_alarm(RRDHOST *host, ALARM_ENTRY *ae); void aclk_create_header(BUFFER *dest, char *type, char *msg_id, time_t ts_secs, usec_t ts_us, int version); -int aclk_handle_cloud_message(char *payload); -void aclk_add_collector(RRDHOST *host, const char *plugin_name, const char *module_name); -void aclk_del_collector(RRDHOST *host, const char *plugin_name, const char *module_name); -void aclk_alarm_reload(); +int legacy_aclk_handle_cloud_message(char *payload); +void legacy_aclk_add_collector(RRDHOST *host, const char *plugin_name, const char *module_name); +void legacy_aclk_del_collector(RRDHOST *host, const char *plugin_name, const char *module_name); +void legacy_aclk_alarm_reload(void); unsigned long int aclk_reconnect_delay(int mode); extern void health_alarm_entry2json_nolock(BUFFER *wb, ALARM_ENTRY *ae, RRDHOST *host); -void aclk_single_update_enable(); -void aclk_single_update_disable(); -void aclk_host_state_update(RRDHOST *host, ACLK_CMD cmd); +void legacy_aclk_host_state_update(RRDHOST *host, int connect); int aclk_send_info_child_connection(RRDHOST *host, ACLK_CMD cmd); void aclk_update_next_child_to_popcorn(void); +char *legacy_aclk_state(void); +char *legacy_aclk_state_json(void); +#endif + #endif //NETDATA_AGENT_CLOUD_LINK_H diff --git a/aclk/legacy/mqtt.c b/aclk/legacy/mqtt.c index 74f774555..0e4bb2ec9 100644 --- a/aclk/legacy/mqtt.c +++ b/aclk/legacy/mqtt.c @@ -1,12 +1,16 @@ // SPDX-License-Identifier: GPL-3.0-or-later #include -#include "../../daemon/common.h" +#include "daemon/common.h" #include "mqtt.h" #include "aclk_lws_wss_client.h" #include "aclk_stats.h" #include "aclk_rx_msgs.h" +#include "agent_cloud_link.h" + +#define ACLK_QOS 1 + extern usec_t aclk_session_us; extern time_t aclk_session_sec; @@ -27,7 +31,7 @@ void mqtt_message_callback(struct mosquitto *mosq, void *obj, const struct mosqu UNUSED(mosq); UNUSED(obj); - aclk_handle_cloud_message(msg->payload); + legacy_aclk_handle_cloud_message(msg->payload); } void publish_callback(struct mosquitto *mosq, void *obj, int rc) @@ -44,7 +48,7 @@ void publish_callback(struct mosquitto *mosq, void *obj, int rc) info("Publish_callback: mid=%d latency=%" PRId64 "ms", rc, diff); - aclk_metric_mat_update(&aclk_metrics_per_sample.latency, diff); + legacy_aclk_metric_mat_update(&legacy_aclk_metrics_per_sample.latency, diff); #endif return; } diff --git a/aclk/legacy/mqtt.h b/aclk/legacy/mqtt.h index cc4765d62..98d599f51 100644 --- a/aclk/legacy/mqtt.h +++ b/aclk/legacy/mqtt.h @@ -19,7 +19,7 @@ const char *_link_strerror(int rc); int _link_set_lwt(char *topic, int qos); -int aclk_handle_cloud_message(char *); +int legacy_aclk_handle_cloud_message(char *); extern char *get_topic(char *sub_topic, char *final_topic, int max_size); #endif //NETDATA_MQTT_H diff --git a/aclk/schema-wrappers/alarm_config.cc b/aclk/schema-wrappers/alarm_config.cc new file mode 100644 index 000000000..56d7e6f39 --- /dev/null +++ b/aclk/schema-wrappers/alarm_config.cc @@ -0,0 +1,147 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "alarm_config.h" + +#include "proto/alarm/v1/config.pb.h" + +#include "libnetdata/libnetdata.h" + +#include "schema_wrapper_utils.h" + +using namespace alarms::v1; + +void destroy_aclk_alarm_configuration(struct aclk_alarm_configuration *cfg) +{ + freez(cfg->alarm); + freez(cfg->tmpl); + freez(cfg->on_chart); + + freez(cfg->classification); + freez(cfg->type); + freez(cfg->component); + + freez(cfg->os); + freez(cfg->hosts); + freez(cfg->plugin); + freez(cfg->module); + freez(cfg->charts); + freez(cfg->families); + freez(cfg->lookup); + freez(cfg->every); + freez(cfg->units); + + freez(cfg->green); + freez(cfg->red); + + freez(cfg->calculation_expr); + freez(cfg->warning_expr); + freez(cfg->critical_expr); + + freez(cfg->recipient); + freez(cfg->exec); + freez(cfg->delay); + freez(cfg->repeat); + freez(cfg->info); + freez(cfg->options); + freez(cfg->host_labels); + + freez(cfg->p_db_lookup_dimensions); + freez(cfg->p_db_lookup_method); + freez(cfg->p_db_lookup_options); +} + +char *generate_provide_alarm_configuration(size_t *len, struct provide_alarm_configuration *data) +{ + ProvideAlarmConfiguration msg; + AlarmConfiguration *cfg = msg.mutable_config(); + + msg.set_config_hash(data->cfg_hash); + + if (data->cfg.alarm) + cfg->set_alarm(data->cfg.alarm); + if (data->cfg.tmpl) + cfg->set_template_(data->cfg.tmpl); + if(data->cfg.on_chart) + cfg->set_on_chart(data->cfg.on_chart); + + if (data->cfg.classification) + cfg->set_classification(data->cfg.classification); + if (data->cfg.type) + cfg->set_type(data->cfg.type); + if (data->cfg.component) + cfg->set_component(data->cfg.component); + + if (data->cfg.os) + cfg->set_os(data->cfg.os); + if (data->cfg.hosts) + cfg->set_hosts(data->cfg.hosts); + if (data->cfg.plugin) + cfg->set_plugin(data->cfg.plugin); + if(data->cfg.module) + cfg->set_module(data->cfg.module); + if(data->cfg.charts) + cfg->set_charts(data->cfg.charts); + if(data->cfg.families) + cfg->set_families(data->cfg.families); + if(data->cfg.lookup) + cfg->set_lookup(data->cfg.lookup); + if(data->cfg.every) + cfg->set_every(data->cfg.every); + if(data->cfg.units) + cfg->set_units(data->cfg.units); + + if (data->cfg.green) + cfg->set_green(data->cfg.green); + if (data->cfg.red) + cfg->set_red(data->cfg.red); + + if (data->cfg.calculation_expr) + cfg->set_calculation_expr(data->cfg.calculation_expr); + if (data->cfg.warning_expr) + cfg->set_warning_expr(data->cfg.warning_expr); + if (data->cfg.critical_expr) + cfg->set_critical_expr(data->cfg.critical_expr); + + if (data->cfg.recipient) + cfg->set_recipient(data->cfg.recipient); + if (data->cfg.exec) + cfg->set_exec(data->cfg.exec); + if (data->cfg.delay) + cfg->set_delay(data->cfg.delay); + if (data->cfg.repeat) + cfg->set_repeat(data->cfg.repeat); + if (data->cfg.info) + cfg->set_info(data->cfg.info); + if (data->cfg.options) + cfg->set_options(data->cfg.options); + if (data->cfg.host_labels) + cfg->set_host_labels(data->cfg.host_labels); + + cfg->set_p_db_lookup_after(data->cfg.p_db_lookup_after); + cfg->set_p_db_lookup_before(data->cfg.p_db_lookup_before); + if (data->cfg.p_db_lookup_dimensions) + cfg->set_p_db_lookup_dimensions(data->cfg.p_db_lookup_dimensions); + if (data->cfg.p_db_lookup_method) + cfg->set_p_db_lookup_method(data->cfg.p_db_lookup_method); + if (data->cfg.p_db_lookup_options) + cfg->set_p_db_lookup_options(data->cfg.p_db_lookup_options); + cfg->set_p_update_every(data->cfg.p_update_every); + + *len = PROTO_COMPAT_MSG_SIZE(msg); + char *bin = (char*)mallocz(*len); + if (!msg.SerializeToArray(bin, *len)) + return NULL; + + return bin; +} + +char *parse_send_alarm_configuration(const char *data, size_t len) +{ + SendAlarmConfiguration msg; + if (!msg.ParseFromArray(data, len)) + return NULL; + if (!msg.config_hash().c_str()) + return NULL; + return strdupz(msg.config_hash().c_str()); +} + diff --git a/aclk/schema-wrappers/alarm_config.h b/aclk/schema-wrappers/alarm_config.h new file mode 100644 index 000000000..157fbc60f --- /dev/null +++ b/aclk/schema-wrappers/alarm_config.h @@ -0,0 +1,69 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef ACLK_SCHEMA_WRAPPER_ALARM_CONFIG_H +#define ACLK_SCHEMA_WRAPPER_ALARM_CONFIG_H + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +struct aclk_alarm_configuration { + char *alarm; + char *tmpl; + char *on_chart; + + char *classification; + char *type; + char *component; + + char *os; + char *hosts; + char *plugin; + char *module; + char *charts; + char *families; + char *lookup; + char *every; + char *units; + + char *green; + char *red; + + char *calculation_expr; + char *warning_expr; + char *critical_expr; + + char *recipient; + char *exec; + char *delay; + char *repeat; + char *info; + char *options; + char *host_labels; + + int32_t p_db_lookup_after; + int32_t p_db_lookup_before; + char *p_db_lookup_dimensions; + char *p_db_lookup_method; + char *p_db_lookup_options; + int32_t p_update_every; +}; + +void destroy_aclk_alarm_configuration(struct aclk_alarm_configuration *cfg); + +struct provide_alarm_configuration { + char *cfg_hash; + struct aclk_alarm_configuration cfg; +}; + +char *generate_provide_alarm_configuration(size_t *len, struct provide_alarm_configuration *data); +char *parse_send_alarm_configuration(const char *data, size_t len); + +#ifdef __cplusplus +} +#endif + +#endif /* ACLK_SCHEMA_WRAPPER_ALARM_CONFIG_H */ diff --git a/aclk/schema-wrappers/alarm_stream.cc b/aclk/schema-wrappers/alarm_stream.cc new file mode 100644 index 000000000..5868e5d67 --- /dev/null +++ b/aclk/schema-wrappers/alarm_stream.cc @@ -0,0 +1,248 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "alarm_stream.h" + +#include "proto/alarm/v1/stream.pb.h" + +#include "libnetdata/libnetdata.h" + +#include "schema_wrapper_utils.h" + +using namespace alarms::v1; + +struct start_alarm_streaming parse_start_alarm_streaming(const char *data, size_t len) +{ + struct start_alarm_streaming ret; + memset(&ret, 0, sizeof(ret)); + + StartAlarmStreaming msg; + + if (!msg.ParseFromArray(data, len)) + return ret; + + ret.node_id = strdupz(msg.node_id().c_str()); + ret.batch_id = msg.batch_id(); + ret.start_seq_id = msg.start_sequnce_id(); + + return ret; +} + +char *parse_send_alarm_log_health(const char *data, size_t len) +{ + SendAlarmLogHealth msg; + if (!msg.ParseFromArray(data, len)) + return NULL; + return strdupz(msg.node_id().c_str()); +} + +char *generate_alarm_log_health(size_t *len, struct alarm_log_health *data) +{ + AlarmLogHealth msg; + LogEntries *entries; + + msg.set_claim_id(data->claim_id); + msg.set_node_id(data->node_id); + msg.set_enabled(data->enabled); + + switch (data->status) { + case alarm_log_status_aclk::ALARM_LOG_STATUS_IDLE: + msg.set_status(alarms::v1::ALARM_LOG_STATUS_IDLE); + break; + case alarm_log_status_aclk::ALARM_LOG_STATUS_RUNNING: + msg.set_status(alarms::v1::ALARM_LOG_STATUS_RUNNING); + break; + case alarm_log_status_aclk::ALARM_LOG_STATUS_UNSPECIFIED: + msg.set_status(alarms::v1::ALARM_LOG_STATUS_UNSPECIFIED); + break; + default: + error("Unknown status of AlarmLogHealth LogEntry"); + return NULL; + } + + entries = msg.mutable_log_entries(); + entries->set_first_sequence_id(data->log_entries.first_seq_id); + entries->set_last_sequence_id(data->log_entries.last_seq_id); + + set_google_timestamp_from_timeval(data->log_entries.first_when, entries->mutable_first_when()); + set_google_timestamp_from_timeval(data->log_entries.last_when, entries->mutable_last_when()); + + *len = PROTO_COMPAT_MSG_SIZE(msg); + char *bin = (char*)mallocz(*len); + if (!msg.SerializeToArray(bin, *len)) + return NULL; + + return bin; +} + +static alarms::v1::AlarmStatus aclk_alarm_status_to_proto(enum aclk_alarm_status status) +{ + switch (status) { + case aclk_alarm_status::ALARM_STATUS_NULL: + return alarms::v1::ALARM_STATUS_NULL; + case aclk_alarm_status::ALARM_STATUS_UNKNOWN: + return alarms::v1::ALARM_STATUS_UNKNOWN; + case aclk_alarm_status::ALARM_STATUS_REMOVED: + return alarms::v1::ALARM_STATUS_REMOVED; + case aclk_alarm_status::ALARM_STATUS_NOT_A_NUMBER: + return alarms::v1::ALARM_STATUS_NOT_A_NUMBER; + case aclk_alarm_status::ALARM_STATUS_CLEAR: + return alarms::v1::ALARM_STATUS_CLEAR; + case aclk_alarm_status::ALARM_STATUS_WARNING: + return alarms::v1::ALARM_STATUS_WARNING; + case aclk_alarm_status::ALARM_STATUS_CRITICAL: + return alarms::v1::ALARM_STATUS_CRITICAL; + default: + error("Unknown alarm status"); + return alarms::v1::ALARM_STATUS_UNKNOWN; + } +} + +void destroy_alarm_log_entry(struct alarm_log_entry *entry) +{ + //freez(entry->node_id); + //freez(entry->claim_id); + + freez(entry->chart); + freez(entry->name); + freez(entry->family); + + freez(entry->config_hash); + + freez(entry->timezone); + + freez(entry->exec_path); + freez(entry->conf_source); + freez(entry->command); + + freez(entry->value_string); + freez(entry->old_value_string); + + freez(entry->rendered_info); +} + +static void fill_alarm_log_entry(struct alarm_log_entry *data, AlarmLogEntry *proto) +{ + proto->set_node_id(data->node_id); + proto->set_claim_id(data->claim_id); + + proto->set_chart(data->chart); + proto->set_name(data->name); + if (data->family) + proto->set_family(data->family); + + proto->set_batch_id(data->batch_id); + proto->set_sequence_id(data->sequence_id); + proto->set_when(data->when); + + proto->set_config_hash(data->config_hash); + + proto->set_utc_offset(data->utc_offset); + proto->set_timezone(data->timezone); + + proto->set_exec_path(data->exec_path); + proto->set_conf_source(data->conf_source); + proto->set_command(data->command); + + proto->set_duration(data->duration); + proto->set_non_clear_duration(data->non_clear_duration); + + + proto->set_status(aclk_alarm_status_to_proto(data->status)); + proto->set_old_status(aclk_alarm_status_to_proto(data->old_status)); + proto->set_delay(data->delay); + proto->set_delay_up_to_timestamp(data->delay_up_to_timestamp); + + proto->set_last_repeat(data->last_repeat); + proto->set_silenced(data->silenced); + + if (data->value_string) + proto->set_value_string(data->value_string); + if (data->old_value_string) + proto->set_old_value_string(data->old_value_string); + + proto->set_value(data->value); + proto->set_old_value(data->old_value); + + proto->set_updated(data->updated); + + proto->set_rendered_info(data->rendered_info); +} + +char *generate_alarm_log_entry(size_t *len, struct alarm_log_entry *data) +{ + AlarmLogEntry le; + + fill_alarm_log_entry(data, &le); + + *len = PROTO_COMPAT_MSG_SIZE(le); + char *bin = (char*)mallocz(*len); + if (!le.SerializeToArray(bin, *len)) + return NULL; + + return bin; +} + +struct send_alarm_snapshot *parse_send_alarm_snapshot(const char *data, size_t len) +{ + SendAlarmSnapshot msg; + if (!msg.ParseFromArray(data, len)) + return NULL; + + struct send_alarm_snapshot *ret = (struct send_alarm_snapshot*)callocz(1, sizeof(struct send_alarm_snapshot)); + if (msg.claim_id().c_str()) + ret->claim_id = strdupz(msg.claim_id().c_str()); + if (msg.node_id().c_str()) + ret->node_id = strdupz(msg.node_id().c_str()); + ret->snapshot_id = msg.snapshot_id(); + ret->sequence_id = msg.sequence_id(); + + return ret; +} + +void destroy_send_alarm_snapshot(struct send_alarm_snapshot *ptr) +{ + freez(ptr->claim_id); + freez(ptr->node_id); + freez(ptr); +} + +alarm_snapshot_proto_ptr_t generate_alarm_snapshot_proto(struct alarm_snapshot *data) +{ + AlarmSnapshot *msg = new AlarmSnapshot; + if (unlikely(!msg)) fatal("Cannot allocate memory for AlarmSnapshot"); + + msg->set_node_id(data->node_id); + msg->set_claim_id(data->claim_id); + msg->set_snapshot_id(data->snapshot_id); + msg->set_chunks(data->chunks); + msg->set_chunk(data->chunk); + + // this is handled automatically by add_alarm_log_entry2snapshot function + msg->set_chunk_size(0); + + return msg; +} + +void add_alarm_log_entry2snapshot(alarm_snapshot_proto_ptr_t snapshot, struct alarm_log_entry *data) +{ + AlarmSnapshot *alarm_snapshot = (AlarmSnapshot *)snapshot; + AlarmLogEntry *alarm_log_entry = alarm_snapshot->add_alarms(); + + fill_alarm_log_entry(data, alarm_log_entry); + + alarm_snapshot->set_chunk_size(alarm_snapshot->chunk_size() + 1); +} + +char *generate_alarm_snapshot_bin(size_t *len, alarm_snapshot_proto_ptr_t snapshot) +{ + AlarmSnapshot *alarm_snapshot = (AlarmSnapshot *)snapshot; + *len = PROTO_COMPAT_MSG_SIZE_PTR(alarm_snapshot); + char *bin = (char*)mallocz(*len); + if (!alarm_snapshot->SerializeToArray(bin, *len)) { + delete alarm_snapshot; + return NULL; + } + + delete alarm_snapshot; + return bin; +} diff --git a/aclk/schema-wrappers/alarm_stream.h b/aclk/schema-wrappers/alarm_stream.h new file mode 100644 index 000000000..2932bb192 --- /dev/null +++ b/aclk/schema-wrappers/alarm_stream.h @@ -0,0 +1,134 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef ACLK_SCHEMA_WRAPPER_ALARM_STREAM_H +#define ACLK_SCHEMA_WRAPPER_ALARM_STREAM_H + +#include + +#include "database/rrd.h" + +#ifdef __cplusplus +extern "C" { +#endif + +enum alarm_log_status_aclk { + ALARM_LOG_STATUS_UNSPECIFIED = 0, + ALARM_LOG_STATUS_RUNNING = 1, + ALARM_LOG_STATUS_IDLE = 2 +}; + +struct alarm_log_entries { + int64_t first_seq_id; + struct timeval first_when; + + int64_t last_seq_id; + struct timeval last_when; +}; + +struct alarm_log_health { + char *claim_id; + char *node_id; + int enabled; + enum alarm_log_status_aclk status; + struct alarm_log_entries log_entries; +}; + +struct start_alarm_streaming { + char *node_id; + uint64_t batch_id; + uint64_t start_seq_id; +}; + +struct start_alarm_streaming parse_start_alarm_streaming(const char *data, size_t len); +char *parse_send_alarm_log_health(const char *data, size_t len); + +char *generate_alarm_log_health(size_t *len, struct alarm_log_health *data); + +enum aclk_alarm_status { + ALARM_STATUS_NULL = 0, + ALARM_STATUS_UNKNOWN = 1, + ALARM_STATUS_REMOVED = 2, + ALARM_STATUS_NOT_A_NUMBER = 3, + ALARM_STATUS_CLEAR = 4, + ALARM_STATUS_WARNING = 5, + ALARM_STATUS_CRITICAL = 6 +}; + +struct alarm_log_entry { + char *node_id; + char *claim_id; + + char *chart; + char *name; + char *family; + + uint64_t batch_id; + uint64_t sequence_id; + uint64_t when; + + char *config_hash; + + int32_t utc_offset; + char *timezone; + + char *exec_path; + char *conf_source; + char *command; + + uint32_t duration; + uint32_t non_clear_duration; + + enum aclk_alarm_status status; + enum aclk_alarm_status old_status; + uint64_t delay; + uint64_t delay_up_to_timestamp; + + uint64_t last_repeat; + int silenced; + + char *value_string; + char *old_value_string; + + double value; + double old_value; + + // updated alarm entry, when the status of the alarm has been updated by a later entry + int updated; + + // rendered_info + char *rendered_info; +}; + +struct send_alarm_snapshot { + char *node_id; + char *claim_id; + uint64_t snapshot_id; + uint64_t sequence_id; +}; + +struct alarm_snapshot { + char *node_id; + char *claim_id; + uint64_t snapshot_id; + uint32_t chunks; + uint32_t chunk; +}; + +typedef void* alarm_snapshot_proto_ptr_t; + +void destroy_alarm_log_entry(struct alarm_log_entry *entry); + +char *generate_alarm_log_entry(size_t *len, struct alarm_log_entry *data); + +struct send_alarm_snapshot *parse_send_alarm_snapshot(const char *data, size_t len); +void destroy_send_alarm_snapshot(struct send_alarm_snapshot *ptr); + +alarm_snapshot_proto_ptr_t generate_alarm_snapshot_proto(struct alarm_snapshot *data); +void add_alarm_log_entry2snapshot(alarm_snapshot_proto_ptr_t snapshot, struct alarm_log_entry *data); +char *generate_alarm_snapshot_bin(size_t *len, alarm_snapshot_proto_ptr_t snapshot); + +#ifdef __cplusplus +} +#endif + +#endif /* ACLK_SCHEMA_WRAPPER_ALARM_STREAM_H */ diff --git a/aclk/schema-wrappers/chart_config.cc b/aclk/schema-wrappers/chart_config.cc new file mode 100644 index 000000000..87e34e0df --- /dev/null +++ b/aclk/schema-wrappers/chart_config.cc @@ -0,0 +1,105 @@ +#include "chart_config.h" + +#include "proto/chart/v1/config.pb.h" + +#include "libnetdata/libnetdata.h" + +#include "schema_wrapper_utils.h" + +void destroy_update_chart_config(struct update_chart_config *cfg) +{ + freez(cfg->claim_id); + freez(cfg->node_id); + freez(cfg->hashes); +} + +void destroy_chart_config_updated(struct chart_config_updated *cfg) +{ + freez(cfg->type); + freez(cfg->family); + freez(cfg->context); + freez(cfg->title); + freez(cfg->plugin); + freez(cfg->module); + freez(cfg->units); + freez(cfg->config_hash); +} + +struct update_chart_config parse_update_chart_config(const char *data, size_t len) +{ + chart::v1::UpdateChartConfigs cfgs; + update_chart_config res; + memset(&res, 0, sizeof(res)); + + if (!cfgs.ParseFromArray(data, len)) + return res; + + res.claim_id = strdupz(cfgs.claim_id().c_str()); + res.node_id = strdupz(cfgs.node_id().c_str()); + + // to not do bazillion tiny allocations for individual strings + // we calculate how much memory we will need for all of them + // and allocate at once + int hash_count = cfgs.config_hashes_size(); + size_t total_strlen = 0; + for (int i = 0; i < hash_count; i++) + total_strlen += cfgs.config_hashes(i).length(); + total_strlen += hash_count; //null bytes + + res.hashes = (char**)callocz( 1, + (hash_count+1) * sizeof(char*) + //char * array incl. terminating NULL at the end + total_strlen //strings themselves incl. 1 null byte each + ); + + char* dest = ((char*)res.hashes) + (hash_count + 1 /* NULL ptr */) * sizeof(char *); + // now copy them strings + // null bytes handled by callocz + for (int i = 0; i < hash_count; i++) { + strcpy(dest, cfgs.config_hashes(i).c_str()); + res.hashes[i] = dest; + dest += strlen(dest) + 1 /* end string null */; + } + + return res; +} + +char *generate_chart_configs_updated(size_t *len, const struct chart_config_updated *config_list, int list_size) +{ + chart::v1::ChartConfigsUpdated configs; + for (int i = 0; i < list_size; i++) { + chart::v1::ChartConfigUpdated *config = configs.add_configs(); + config->set_type(config_list[i].type); + if (config_list[i].family) + config->set_family(config_list[i].family); + config->set_context(config_list[i].context); + config->set_title(config_list[i].title); + config->set_priority(config_list[i].priority); + config->set_plugin(config_list[i].plugin); + + if (config_list[i].module) + config->set_module(config_list[i].module); + + switch (config_list[i].chart_type) { + case RRDSET_TYPE_LINE: + config->set_chart_type(chart::v1::LINE); + break; + case RRDSET_TYPE_AREA: + config->set_chart_type(chart::v1::AREA); + break; + case RRDSET_TYPE_STACKED: + config->set_chart_type(chart::v1::STACKED); + break; + default: + return NULL; + } + + config->set_units(config_list[i].units); + config->set_config_hash(config_list[i].config_hash); + } + + *len = PROTO_COMPAT_MSG_SIZE(configs); + char *bin = (char*)mallocz(*len); + configs.SerializeToArray(bin, *len); + + return bin; +} diff --git a/aclk/schema-wrappers/chart_config.h b/aclk/schema-wrappers/chart_config.h new file mode 100644 index 000000000..f08f76b61 --- /dev/null +++ b/aclk/schema-wrappers/chart_config.h @@ -0,0 +1,50 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef ACLK_SCHEMA_WRAPPER_CHART_CONFIG_H +#define ACLK_SCHEMA_WRAPPER_CHART_CONFIG_H + +#include + +#include "database/rrd.h" + +#ifdef __cplusplus +extern "C" { +#endif + +struct update_chart_config { + char *claim_id; + char *node_id; + char **hashes; +}; + +enum chart_config_chart_type { + LINE, + AREA, + STACKED +}; + +struct chart_config_updated { + char *type; + char *family; + char *context; + char *title; + uint64_t priority; + char *plugin; + char *module; + RRDSET_TYPE chart_type; + char *units; + char *config_hash; +}; + +void destroy_update_chart_config(struct update_chart_config *cfg); +void destroy_chart_config_updated(struct chart_config_updated *cfg); + +struct update_chart_config parse_update_chart_config(const char *data, size_t len); + +char *generate_chart_configs_updated(size_t *len, const struct chart_config_updated *config_list, int list_size); + +#ifdef __cplusplus +} +#endif + +#endif /* ACLK_SCHEMA_WRAPPER_CHART_CONFIG_H */ diff --git a/aclk/schema-wrappers/chart_stream.cc b/aclk/schema-wrappers/chart_stream.cc new file mode 100644 index 000000000..7d820e533 --- /dev/null +++ b/aclk/schema-wrappers/chart_stream.cc @@ -0,0 +1,342 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "aclk/aclk_util.h" + +#include "proto/chart/v1/stream.pb.h" +#include "chart_stream.h" + +#include "schema_wrapper_utils.h" + +#include +#include + +stream_charts_and_dims_t parse_stream_charts_and_dims(const char *data, size_t len) +{ + chart::v1::StreamChartsAndDimensions msg; + stream_charts_and_dims_t res; + memset(&res, 0, sizeof(res)); + + if (!msg.ParseFromArray(data, len)) + return res; + + res.node_id = strdup(msg.node_id().c_str()); + res.claim_id = strdup(msg.claim_id().c_str()); + res.seq_id = msg.sequence_id(); + res.batch_id = msg.batch_id(); + set_timeval_from_google_timestamp(msg.seq_id_created_at(), &res.seq_id_created_at); + + return res; +} + +chart_and_dim_ack_t parse_chart_and_dimensions_ack(const char *data, size_t len) +{ + chart::v1::ChartsAndDimensionsAck msg; + chart_and_dim_ack_t res = { .claim_id = NULL, .node_id = NULL, .last_seq_id = 0 }; + + if (!msg.ParseFromArray(data, len)) + return res; + + res.node_id = strdup(msg.node_id().c_str()); + res.claim_id = strdup(msg.claim_id().c_str()); + res.last_seq_id = msg.last_sequence_id(); + + return res; +} + +char *generate_reset_chart_messages(size_t *len, chart_reset_t reset) +{ + chart::v1::ResetChartMessages msg; + + msg.set_claim_id(reset.claim_id); + msg.set_node_id(reset.node_id); + switch (reset.reason) { + case DB_EMPTY: + msg.set_reason(chart::v1::ResetReason::DB_EMPTY); + break; + case SEQ_ID_NOT_EXISTS: + msg.set_reason(chart::v1::ResetReason::SEQ_ID_NOT_EXISTS); + break; + case TIMESTAMP_MISMATCH: + msg.set_reason(chart::v1::ResetReason::TIMESTAMP_MISMATCH); + break; + default: + return NULL; + } + + *len = PROTO_COMPAT_MSG_SIZE(msg); + char *bin = (char*)malloc(*len); + if (bin) + msg.SerializeToArray(bin, *len); + + return bin; +} + +void chart_instance_updated_destroy(struct chart_instance_updated *instance) +{ + freez((char*)instance->id); + freez((char*)instance->claim_id); + + free_label_list(instance->label_head); + + freez((char*)instance->config_hash); +} + +static int set_chart_instance_updated(chart::v1::ChartInstanceUpdated *chart, const struct chart_instance_updated *update) +{ + google::protobuf::Map *map; + aclk_lib::v1::ACLKMessagePosition *pos; + struct label *label; + + chart->set_id(update->id); + chart->set_claim_id(update->claim_id); + chart->set_node_id(update->node_id); + chart->set_name(update->name); + + map = chart->mutable_chart_labels(); + label = update->label_head; + while (label) { + map->insert({label->key, label->value}); + label = label->next; + } + + switch (update->memory_mode) { + case RRD_MEMORY_MODE_NONE: + chart->set_memory_mode(chart::v1::NONE); + break; + case RRD_MEMORY_MODE_RAM: + chart->set_memory_mode(chart::v1::RAM); + break; + case RRD_MEMORY_MODE_MAP: + chart->set_memory_mode(chart::v1::MAP); + break; + case RRD_MEMORY_MODE_SAVE: + chart->set_memory_mode(chart::v1::SAVE); + break; + case RRD_MEMORY_MODE_ALLOC: + chart->set_memory_mode(chart::v1::ALLOC); + break; + case RRD_MEMORY_MODE_DBENGINE: + chart->set_memory_mode(chart::v1::DB_ENGINE); + break; + default: + return 1; + break; + } + + chart->set_update_every_interval(update->update_every); + chart->set_config_hash(update->config_hash); + + pos = chart->mutable_position(); + pos->set_sequence_id(update->position.sequence_id); + pos->set_previous_sequence_id(update->position.previous_sequence_id); + set_google_timestamp_from_timeval(update->position.seq_id_creation_time, pos->mutable_seq_id_created_at()); + + return 0; +} + +static int set_chart_dim_updated(chart::v1::ChartDimensionUpdated *dim, const struct chart_dimension_updated *c_dim) +{ + aclk_lib::v1::ACLKMessagePosition *pos; + + dim->set_id(c_dim->id); + dim->set_chart_id(c_dim->chart_id); + dim->set_node_id(c_dim->node_id); + dim->set_claim_id(c_dim->claim_id); + dim->set_name(c_dim->name); + + set_google_timestamp_from_timeval(c_dim->created_at, dim->mutable_created_at()); + set_google_timestamp_from_timeval(c_dim->last_timestamp, dim->mutable_last_timestamp()); + + pos = dim->mutable_position(); + pos->set_sequence_id(c_dim->position.sequence_id); + pos->set_previous_sequence_id(c_dim->position.previous_sequence_id); + set_google_timestamp_from_timeval(c_dim->position.seq_id_creation_time, pos->mutable_seq_id_created_at()); + + return 0; +} + +char *generate_charts_and_dimensions_updated(size_t *len, char **payloads, size_t *payload_sizes, int *is_dim, struct aclk_message_position *new_positions, uint64_t batch_id) +{ + chart::v1::ChartsAndDimensionsUpdated msg; + chart::v1::ChartInstanceUpdated db_chart; + chart::v1::ChartDimensionUpdated db_dim; + aclk_lib::v1::ACLKMessagePosition *pos; + + msg.set_batch_id(batch_id); + + for (int i = 0; payloads[i]; i++) { + if (is_dim[i]) { + if (!db_dim.ParseFromArray(payloads[i], payload_sizes[i])) { + error("[ACLK] Could not parse chart::v1::chart_dimension_updated"); + return NULL; + } + + pos = db_dim.mutable_position(); + pos->set_sequence_id(new_positions[i].sequence_id); + pos->set_previous_sequence_id(new_positions[i].previous_sequence_id); + set_google_timestamp_from_timeval(new_positions[i].seq_id_creation_time, pos->mutable_seq_id_created_at()); + + chart::v1::ChartDimensionUpdated *dim = msg.add_dimensions(); + *dim = db_dim; + } else { + if (!db_chart.ParseFromArray(payloads[i], payload_sizes[i])) { + error("[ACLK] Could not parse chart::v1::ChartInstanceUpdated"); + return NULL; + } + + pos = db_chart.mutable_position(); + pos->set_sequence_id(new_positions[i].sequence_id); + pos->set_previous_sequence_id(new_positions[i].previous_sequence_id); + set_google_timestamp_from_timeval(new_positions[i].seq_id_creation_time, pos->mutable_seq_id_created_at()); + + chart::v1::ChartInstanceUpdated *chart = msg.add_charts(); + *chart = db_chart; + } + } + + *len = PROTO_COMPAT_MSG_SIZE(msg); + char *bin = (char*)mallocz(*len); + msg.SerializeToArray(bin, *len); + + return bin; +} + +char *generate_charts_updated(size_t *len, char **payloads, size_t *payload_sizes, struct aclk_message_position *new_positions) +{ + chart::v1::ChartsAndDimensionsUpdated msg; + + msg.set_batch_id(chart_batch_id); + + for (int i = 0; payloads[i]; i++) { + chart::v1::ChartInstanceUpdated db_msg; + chart::v1::ChartInstanceUpdated *chart; + aclk_lib::v1::ACLKMessagePosition *pos; + + if (!db_msg.ParseFromArray(payloads[i], payload_sizes[i])) { + error("[ACLK] Could not parse chart::v1::ChartInstanceUpdated"); + return NULL; + } + + pos = db_msg.mutable_position(); + pos->set_sequence_id(new_positions[i].sequence_id); + pos->set_previous_sequence_id(new_positions[i].previous_sequence_id); + set_google_timestamp_from_timeval(new_positions[i].seq_id_creation_time, pos->mutable_seq_id_created_at()); + + chart = msg.add_charts(); + *chart = db_msg; + } + + *len = PROTO_COMPAT_MSG_SIZE(msg); + char *bin = (char*)mallocz(*len); + msg.SerializeToArray(bin, *len); + + return bin; +} + +char *generate_chart_dimensions_updated(size_t *len, char **payloads, size_t *payload_sizes, struct aclk_message_position *new_positions) +{ + chart::v1::ChartsAndDimensionsUpdated msg; + + msg.set_batch_id(chart_batch_id); + + for (int i = 0; payloads[i]; i++) { + chart::v1::ChartDimensionUpdated db_msg; + chart::v1::ChartDimensionUpdated *dim; + aclk_lib::v1::ACLKMessagePosition *pos; + + if (!db_msg.ParseFromArray(payloads[i], payload_sizes[i])) { + error("[ACLK] Could not parse chart::v1::chart_dimension_updated"); + return NULL; + } + + pos = db_msg.mutable_position(); + pos->set_sequence_id(new_positions[i].sequence_id); + pos->set_previous_sequence_id(new_positions[i].previous_sequence_id); + set_google_timestamp_from_timeval(new_positions[i].seq_id_creation_time, pos->mutable_seq_id_created_at()); + + dim = msg.add_dimensions(); + *dim = db_msg; + } + + *len = PROTO_COMPAT_MSG_SIZE(msg); + char *bin = (char*)mallocz(*len); + msg.SerializeToArray(bin, *len); + + return bin; +} + +char *generate_chart_instance_updated(size_t *len, const struct chart_instance_updated *update) +{ + chart::v1::ChartInstanceUpdated *chart = new chart::v1::ChartInstanceUpdated(); + + if (set_chart_instance_updated(chart, update)) + return NULL; + + *len = PROTO_COMPAT_MSG_SIZE_PTR(chart); + char *bin = (char*)mallocz(*len); + chart->SerializeToArray(bin, *len); + + delete chart; + return bin; +} + +char *generate_chart_dimension_updated(size_t *len, const struct chart_dimension_updated *dim) +{ + chart::v1::ChartDimensionUpdated *proto_dim = new chart::v1::ChartDimensionUpdated(); + + if (set_chart_dim_updated(proto_dim, dim)) + return NULL; + + *len = PROTO_COMPAT_MSG_SIZE_PTR(proto_dim); + char *bin = (char*)mallocz(*len); + proto_dim->SerializeToArray(bin, *len); + + delete proto_dim; + return bin; +} + +using namespace google::protobuf; + +char *generate_retention_updated(size_t *len, struct retention_updated *data) +{ + chart::v1::RetentionUpdated msg; + + msg.set_claim_id(data->claim_id); + msg.set_node_id(data->node_id); + + switch (data->memory_mode) { + case RRD_MEMORY_MODE_NONE: + msg.set_memory_mode(chart::v1::NONE); + break; + case RRD_MEMORY_MODE_RAM: + msg.set_memory_mode(chart::v1::RAM); + break; + case RRD_MEMORY_MODE_MAP: + msg.set_memory_mode(chart::v1::MAP); + break; + case RRD_MEMORY_MODE_SAVE: + msg.set_memory_mode(chart::v1::SAVE); + break; + case RRD_MEMORY_MODE_ALLOC: + msg.set_memory_mode(chart::v1::ALLOC); + break; + case RRD_MEMORY_MODE_DBENGINE: + msg.set_memory_mode(chart::v1::DB_ENGINE); + break; + default: + return NULL; + } + + for (int i = 0; i < data->interval_duration_count; i++) { + Map *map = msg.mutable_interval_durations(); + map->insert({data->interval_durations[i].update_every, data->interval_durations[i].retention}); + } + + set_google_timestamp_from_timeval(data->rotation_timestamp, msg.mutable_rotation_timestamp()); + + *len = PROTO_COMPAT_MSG_SIZE(msg); + char *bin = (char*)mallocz(*len); + msg.SerializeToArray(bin, *len); + + return bin; +} diff --git a/aclk/schema-wrappers/chart_stream.h b/aclk/schema-wrappers/chart_stream.h new file mode 100644 index 000000000..7a46ecd8e --- /dev/null +++ b/aclk/schema-wrappers/chart_stream.h @@ -0,0 +1,121 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef ACLK_SCHEMA_WRAPPER_CHART_STREAM_H +#define ACLK_SCHEMA_WRAPPER_CHART_STREAM_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include "database/rrd.h" + +typedef struct { + char* claim_id; + char* node_id; + + uint64_t seq_id; + uint64_t batch_id; + + struct timeval seq_id_created_at; +} stream_charts_and_dims_t; + +stream_charts_and_dims_t parse_stream_charts_and_dims(const char *data, size_t len); + +typedef struct { + char* claim_id; + char* node_id; + + uint64_t last_seq_id; +} chart_and_dim_ack_t; + +chart_and_dim_ack_t parse_chart_and_dimensions_ack(const char *data, size_t len); + +enum chart_reset_reason { + DB_EMPTY, + SEQ_ID_NOT_EXISTS, + TIMESTAMP_MISMATCH +}; + +typedef struct { + char *claim_id; + char *node_id; + + enum chart_reset_reason reason; +} chart_reset_t; + +char *generate_reset_chart_messages(size_t *len, const chart_reset_t reset); + +struct aclk_message_position { + uint64_t sequence_id; + struct timeval seq_id_creation_time; + uint64_t previous_sequence_id; +}; + +struct chart_instance_updated { + const char *id; + const char *claim_id; + const char *node_id; + const char *name; + + struct label *label_head; + + RRD_MEMORY_MODE memory_mode; + + uint32_t update_every; + const char * config_hash; + + struct aclk_message_position position; +}; + +void chart_instance_updated_destroy(struct chart_instance_updated *instance); + +struct chart_dimension_updated { + const char *id; + const char *chart_id; + const char *node_id; + const char *claim_id; + const char *name; + struct timeval created_at; + struct timeval last_timestamp; + struct aclk_message_position position; +}; + +typedef struct { + struct chart_instance_updated *charts; + uint16_t chart_count; + + struct chart_dimension_updated *dims; + uint16_t dim_count; + + uint64_t batch_id; +} charts_and_dims_updated_t; + +struct interval_duration { + uint32_t update_every; + uint32_t retention; +}; + +struct retention_updated { + char *claim_id; + char *node_id; + + RRD_MEMORY_MODE memory_mode; + + struct interval_duration *interval_durations; + int interval_duration_count; + + struct timeval rotation_timestamp; +}; + +char *generate_charts_and_dimensions_updated(size_t *len, char **payloads, size_t *payload_sizes, int *is_dim, struct aclk_message_position *new_positions, uint64_t batch_id); +char *generate_charts_updated(size_t *len, char **payloads, size_t *payload_sizes, struct aclk_message_position *new_positions); +char *generate_chart_instance_updated(size_t *len, const struct chart_instance_updated *update); +char *generate_chart_dimensions_updated(size_t *len, char **payloads, size_t *payload_sizes, struct aclk_message_position *new_positions); +char *generate_chart_dimension_updated(size_t *len, const struct chart_dimension_updated *dim); +char *generate_retention_updated(size_t *len, struct retention_updated *data); + +#ifdef __cplusplus +} +#endif + +#endif /* ACLK_SCHEMA_WRAPPER_CHART_STREAM_H */ diff --git a/aclk/schema-wrappers/connection.cc b/aclk/schema-wrappers/connection.cc new file mode 100644 index 000000000..e3bbfe31f --- /dev/null +++ b/aclk/schema-wrappers/connection.cc @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "proto/agent/v1/connection.pb.h" +#include "proto/agent/v1/disconnect.pb.h" +#include "connection.h" + +#include "schema_wrapper_utils.h" + +#include +#include + +using namespace agent::v1; + +char *generate_update_agent_connection(size_t *len, const update_agent_connection_t *data) +{ + UpdateAgentConnection connupd; + + connupd.set_claim_id(data->claim_id); + connupd.set_reachable(data->reachable); + connupd.set_session_id(data->session_id); + + connupd.set_update_source((data->lwt) ? CONNECTION_UPDATE_SOURCE_LWT : CONNECTION_UPDATE_SOURCE_AGENT); + + struct timeval tv; + gettimeofday(&tv, NULL); + + google::protobuf::Timestamp *timestamp = connupd.mutable_updated_at(); + timestamp->set_seconds(tv.tv_sec); + timestamp->set_nanos(tv.tv_usec * 1000); + + *len = PROTO_COMPAT_MSG_SIZE(connupd); + char *msg = (char*)malloc(*len); + if (msg) + connupd.SerializeToArray(msg, *len); + + return msg; +} + +struct disconnect_cmd *parse_disconnect_cmd(const char *data, size_t len) { + DisconnectReq req; + struct disconnect_cmd *res; + + if (!req.ParseFromArray(data, len)) + return NULL; + + res = (struct disconnect_cmd *)calloc(1, sizeof(struct disconnect_cmd)); + + if (!res) + return NULL; + + res->reconnect_after_s = req.reconnect_after_seconds(); + res->permaban = req.permaban(); + res->error_code = req.error_code(); + if (req.error_description().c_str()) { + res->error_description = strdup(req.error_description().c_str()); + if (!res->error_description) { + free(res); + return NULL; + } + } + + return res; +} diff --git a/aclk/schema-wrappers/connection.h b/aclk/schema-wrappers/connection.h new file mode 100644 index 000000000..8c223869a --- /dev/null +++ b/aclk/schema-wrappers/connection.h @@ -0,0 +1,43 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef ACLK_SCHEMA_WRAPPER_CONNECTION_H +#define ACLK_SCHEMA_WRAPPER_CONNECTION_H + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct { + const char *claim_id; + unsigned int reachable:1; + + int64_t session_id; + + unsigned int lwt:1; + +// TODO in future optional fields +// > 15 optional fields: +// How long the system was running until connection (only applicable when reachable=true) +// google.protobuf.Duration system_uptime = 15; +// How long the netdata agent was running until connection (only applicable when reachable=true) +// google.protobuf.Duration agent_uptime = 16; + + +} update_agent_connection_t; + +char *generate_update_agent_connection(size_t *len, const update_agent_connection_t *data); + +struct disconnect_cmd { + uint64_t reconnect_after_s; + int permaban; + uint32_t error_code; + char *error_description; +}; + +struct disconnect_cmd *parse_disconnect_cmd(const char *data, size_t len); + +#ifdef __cplusplus +} +#endif + +#endif /* ACLK_SCHEMA_WRAPPER_CONNECTION_H */ diff --git a/aclk/schema-wrappers/node_connection.cc b/aclk/schema-wrappers/node_connection.cc new file mode 100644 index 000000000..0a4c8ece1 --- /dev/null +++ b/aclk/schema-wrappers/node_connection.cc @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "proto/nodeinstance/connection/v1/connection.pb.h" +#include "node_connection.h" + +#include "schema_wrapper_utils.h" + +#include +#include + +char *generate_node_instance_connection(size_t *len, const node_instance_connection_t *data) { + nodeinstance::v1::UpdateNodeInstanceConnection msg; + + if(data->claim_id) + msg.set_claim_id(data->claim_id); + msg.set_node_id(data->node_id); + + msg.set_liveness(data->live); + msg.set_queryable(data->queryable); + + msg.set_session_id(data->session_id); + msg.set_hops(data->hops); + + struct timeval tv; + gettimeofday(&tv, NULL); + + google::protobuf::Timestamp *timestamp = msg.mutable_updated_at(); + timestamp->set_seconds(tv.tv_sec); + timestamp->set_nanos(tv.tv_usec * 1000); + + *len = PROTO_COMPAT_MSG_SIZE(msg); + char *bin = (char*)malloc(*len); + if (bin) + msg.SerializeToArray(bin, *len); + + return bin; +} diff --git a/aclk/schema-wrappers/node_connection.h b/aclk/schema-wrappers/node_connection.h new file mode 100644 index 000000000..3fd207213 --- /dev/null +++ b/aclk/schema-wrappers/node_connection.h @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef ACLK_SCHEMA_WRAPPER_NODE_CONNECTION_H +#define ACLK_SCHEMA_WRAPPER_NODE_CONNECTION_H + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct { + const char* claim_id; + const char* node_id; + + unsigned int live:1; + unsigned int queryable:1; + + int64_t session_id; + + int32_t hops; +} node_instance_connection_t; + +char *generate_node_instance_connection(size_t *len, const node_instance_connection_t *data); + + +#ifdef __cplusplus +} +#endif + +#endif /* ACLK_SCHEMA_WRAPPER_NODE_CONNECTION_H */ diff --git a/aclk/schema-wrappers/node_creation.cc b/aclk/schema-wrappers/node_creation.cc new file mode 100644 index 000000000..c696bb27b --- /dev/null +++ b/aclk/schema-wrappers/node_creation.cc @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "proto/nodeinstance/create/v1/creation.pb.h" +#include "node_creation.h" + +#include "schema_wrapper_utils.h" + +#include + +char *generate_node_instance_creation(size_t *len, const node_instance_creation_t *data) +{ + nodeinstance::create::v1::CreateNodeInstance msg; + + if (data->claim_id) + msg.set_claim_id(data->claim_id); + msg.set_machine_guid(data->machine_guid); + msg.set_hostname(data->hostname); + msg.set_hops(data->hops); + + *len = PROTO_COMPAT_MSG_SIZE(msg); + char *bin = (char*)malloc(*len); + if (bin) + msg.SerializeToArray(bin, *len); + + return bin; +} + +node_instance_creation_result_t parse_create_node_instance_result(const char *data, size_t len) +{ + nodeinstance::create::v1::CreateNodeInstanceResult msg; + node_instance_creation_result_t res = { .node_id = NULL, .machine_guid = NULL }; + + if (!msg.ParseFromArray(data, len)) + return res; + + res.node_id = strdup(msg.node_id().c_str()); + res.machine_guid = strdup(msg.machine_guid().c_str()); + return res; +} diff --git a/aclk/schema-wrappers/node_creation.h b/aclk/schema-wrappers/node_creation.h new file mode 100644 index 000000000..71e45ef55 --- /dev/null +++ b/aclk/schema-wrappers/node_creation.h @@ -0,0 +1,31 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef ACLK_SCHEMA_WRAPPER_NODE_CREATION_H +#define ACLK_SCHEMA_WRAPPER_NODE_CREATION_H + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct { + const char* claim_id; + const char* machine_guid; + const char* hostname; + + int32_t hops; +} node_instance_creation_t; + +typedef struct { + char *node_id; + char *machine_guid; +} node_instance_creation_result_t; + +char *generate_node_instance_creation(size_t *len, const node_instance_creation_t *data); +node_instance_creation_result_t parse_create_node_instance_result(const char *data, size_t len); + + +#ifdef __cplusplus +} +#endif + +#endif /* ACLK_SCHEMA_WRAPPER_NODE_CREATION_H */ diff --git a/aclk/schema-wrappers/node_info.cc b/aclk/schema-wrappers/node_info.cc new file mode 100644 index 000000000..f6638aa5f --- /dev/null +++ b/aclk/schema-wrappers/node_info.cc @@ -0,0 +1,95 @@ +#include "node_info.h" + +#include "proto/nodeinstance/info/v1/info.pb.h" + +#include "schema_wrapper_utils.h" + +static int generate_node_info(nodeinstance::info::v1::NodeInfo *info, struct aclk_node_info *data) +{ + struct label *label; + google::protobuf::Map *map; + + if (data->name) + info->set_name(data->name); + + if (data->os) + info->set_os(data->os); + if (data->os_name) + info->set_os_name(data->os_name); + if (data->os_version) + info->set_os_version(data->os_version); + + if (data->kernel_name) + info->set_kernel_name(data->kernel_name); + if (data->kernel_version) + info->set_kernel_version(data->kernel_version); + + if (data->architecture) + info->set_architecture(data->architecture); + + info->set_cpus(data->cpus); + + if (data->cpu_frequency) + info->set_cpu_frequency(data->cpu_frequency); + + if (data->memory) + info->set_memory(data->memory); + + if (data->disk_space) + info->set_disk_space(data->disk_space); + + if (data->version) + info->set_version(data->version); + + if (data->release_channel) + info->set_release_channel(data->release_channel); + + if (data->timezone) + info->set_timezone(data->timezone); + + if (data->virtualization_type) + info->set_virtualization_type(data->virtualization_type); + + if (data->container_type) + info->set_container_type(data->container_type); + + if (data->custom_info) + info->set_custom_info(data->custom_info); + + for (size_t i = 0; i < data->service_count; i++) + info->add_services(data->services[i]); + + if (data->machine_guid) + info->set_machine_guid(data->machine_guid); + + map = info->mutable_host_labels(); + label = data->host_labels_head; + while (label) { + map->insert({label->key, label->value}); + label = label->next; + } + + return 0; +} + +char *generate_update_node_info_message(size_t *len, struct update_node_info *info) +{ + nodeinstance::info::v1::UpdateNodeInfo msg; + + msg.set_node_id(info->node_id); + msg.set_claim_id(info->claim_id); + + if (generate_node_info(msg.mutable_data(), &info->data)) + return NULL; + + set_google_timestamp_from_timeval(info->updated_at, msg.mutable_updated_at()); + msg.set_machine_guid(info->machine_guid); + msg.set_child(info->child); + + *len = PROTO_COMPAT_MSG_SIZE(msg); + char *bin = (char*)malloc(*len); + if (bin) + msg.SerializeToArray(bin, *len); + + return bin; +} diff --git a/aclk/schema-wrappers/node_info.h b/aclk/schema-wrappers/node_info.h new file mode 100644 index 000000000..4acb671a5 --- /dev/null +++ b/aclk/schema-wrappers/node_info.h @@ -0,0 +1,69 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef ACLK_SCHEMA_WRAPPER_NODE_INFO_H +#define ACLK_SCHEMA_WRAPPER_NODE_INFO_H + +#include + +#include "database/rrd.h" + +#ifdef __cplusplus +extern "C" { +#endif + +struct aclk_node_info { + char *name; + + char *os; + char *os_name; + char *os_version; + + char *kernel_name; + char *kernel_version; + + char *architecture; + + uint32_t cpus; + + char *cpu_frequency; + + char *memory; + + char *disk_space; + + char *version; + + char *release_channel; + + char *timezone; + + char *virtualization_type; + + char *container_type; + + char *custom_info; + + char **services; + size_t service_count; + + char *machine_guid; + + struct label *host_labels_head; +}; + +struct update_node_info { + char *node_id; + char *claim_id; + struct aclk_node_info data; + struct timeval updated_at; + char *machine_guid; + int child; +}; + +char *generate_update_node_info_message(size_t *len, struct update_node_info *info); + +#ifdef __cplusplus +} +#endif + +#endif /* ACLK_SCHEMA_WRAPPER_NODE_INFO_H */ diff --git a/aclk/schema-wrappers/schema_wrapper_utils.cc b/aclk/schema-wrappers/schema_wrapper_utils.cc new file mode 100644 index 000000000..b100e20c3 --- /dev/null +++ b/aclk/schema-wrappers/schema_wrapper_utils.cc @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "schema_wrapper_utils.h" + +void set_google_timestamp_from_timeval(struct timeval tv, google::protobuf::Timestamp *ts) +{ + ts->set_nanos(tv.tv_usec*1000); + ts->set_seconds(tv.tv_sec); +} + +void set_timeval_from_google_timestamp(const google::protobuf::Timestamp &ts, struct timeval *tv) +{ + tv->tv_sec = ts.seconds(); + tv->tv_usec = ts.nanos()/1000; +} diff --git a/aclk/schema-wrappers/schema_wrapper_utils.h b/aclk/schema-wrappers/schema_wrapper_utils.h new file mode 100644 index 000000000..494855f82 --- /dev/null +++ b/aclk/schema-wrappers/schema_wrapper_utils.h @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef SCHEMA_WRAPPER_UTILS_H +#define SCHEMA_WRAPPER_UTILS_H + +#include +#include + +#if GOOGLE_PROTOBUF_VERSION < 3001000 +#define PROTO_COMPAT_MSG_SIZE(msg) (size_t)msg.ByteSize(); +#define PROTO_COMPAT_MSG_SIZE_PTR(msg) (size_t)msg->ByteSize(); +#else +#define PROTO_COMPAT_MSG_SIZE(msg) msg.ByteSizeLong(); +#define PROTO_COMPAT_MSG_SIZE_PTR(msg) msg->ByteSizeLong(); +#endif + +void set_google_timestamp_from_timeval(struct timeval tv, google::protobuf::Timestamp *ts); +void set_timeval_from_google_timestamp(const google::protobuf::Timestamp &ts, struct timeval *tv); + +#endif /* SCHEMA_WRAPPER_UTILS_H */ diff --git a/aclk/schema-wrappers/schema_wrappers.h b/aclk/schema-wrappers/schema_wrappers.h new file mode 100644 index 000000000..a3975fca3 --- /dev/null +++ b/aclk/schema-wrappers/schema_wrappers.h @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +// utility header to include all the message wrappers at once + +#ifndef SCHEMA_WRAPPERS_H +#define SCHEMA_WRAPPERS_H + +#include "connection.h" +#include "node_connection.h" +#include "node_creation.h" +#include "chart_config.h" +#include "chart_stream.h" +#include "alarm_config.h" +#include "alarm_stream.h" +#include "node_info.h" + +#endif /* SCHEMA_WRAPPERS_H */ diff --git a/backends/WALKTHROUGH.md b/backends/WALKTHROUGH.md index 76dd62f83..12eea2ee1 100644 --- a/backends/WALKTHROUGH.md +++ b/backends/WALKTHROUGH.md @@ -176,14 +176,14 @@ Prometheus’s homepage and begin to type ‘netdata\_’ Prometheus should auto ![](https://github.com/ldelossa/NetdataTutorial/raw/master/Screen%20Shot%202017-07-28%20at%205.13.43%20PM.png) -Let’s now start exploring how we can graph some metrics. Back in our NetData container lets get the CPU spinning with a +Let’s now start exploring how we can graph some metrics. Back in our Netdata container lets get the CPU spinning with a pointless busy loop. On the shell do the following: ```sh [root@netdata /]# while true; do echo "HOT HOT HOT CPU"; done ``` -Our NetData cpu graph should be showing some activity. Let’s represent this in Prometheus. In order to do this let’s +Our Netdata cpu graph should be showing some activity. Let’s represent this in Prometheus. In order to do this let’s keep our metrics page open for reference: We are setting out to graph the data in the CPU chart so let’s search for “system.cpu”in the metrics page above. We come across a section of metrics with the first comments `# COMMENT homogeneous chart "system.cpu", context "system.cpu", family @@ -209,18 +209,18 @@ query the dimension also. Place this into our query text box. ![](https://github.com/ldelossa/NetdataTutorial/raw/master/Screen%20Shot%202017-07-28%20at%205.54.40%20PM.png) -Awesome, this is exactly what we wanted. If you haven’t caught on yet we can emulate entire charts from NetData by using +Awesome, this is exactly what we wanted. If you haven’t caught on yet we can emulate entire charts from Netdata by using the `chart` dimension. If you’d like you can combine the ‘chart’ and ‘instance’ dimension to create per-instance charts. Let’s give this a try: `netdata_system_cpu_percentage_average{chart="system.cpu", instance="netdata:19999"}` -This is the basics of using Prometheus to query NetData. I’d advise everyone at this point to read [this -page](../backends/prometheus/#using-netdata-with-prometheus). The key point here is that NetData can export metrics from +This is the basics of using Prometheus to query Netdata. I’d advise everyone at this point to read [this +page](../backends/prometheus/#using-netdata-with-prometheus). The key point here is that Netdata can export metrics from its internal DB or can send metrics “as-collected” by specifying the ‘source=as-collected’ url parameter like so. If you choose to use this method you will need to use Prometheus's set of functions here: to obtain useful metrics as you are now dealing with raw counters from the system. For example you will have to use the `irate()` function over a counter to get that metric's rate per second. If your graphing needs are met by using the -metrics returned by NetData's internal database (not specifying any source= url parameter) then use that. If you find +metrics returned by Netdata's internal database (not specifying any source= url parameter) then use that. If you find limitations then consider re-writing your queries using the raw data and using Prometheus functions to get the desired chart. diff --git a/backends/prometheus/README.md b/backends/prometheus/README.md index 10275fa20..a0460d1d8 100644 --- a/backends/prometheus/README.md +++ b/backends/prometheus/README.md @@ -110,7 +110,7 @@ scrape_configs: # You can use `prometheus_all_hosts` if you want Prometheus to set the `instance` to your hostname instead of IP format: [prometheus] # - # sources: as-collected | raw | average | sum | volume + # source: as-collected | raw | average | sum | volume # default is: average #source: [as-collected] # @@ -125,46 +125,46 @@ scrape_configs: #### Install nodes.yml -The following is completely optional, it will enable Prometheus to generate alerts from some NetData sources. Tweak the +The following is completely optional, it will enable Prometheus to generate alerts from some Netdata sources. Tweak the values to your own needs. We will use the following `nodes.yml` file below. Save it at `/opt/prometheus/nodes.yml`, and add a _- "nodes.yml"_ entry under the _rule_files:_ section in the example prometheus.yml file above. ```yaml groups: -- name: nodes - - rules: - - alert: node_high_cpu_usage_70 - expr: avg(rate(netdata_cpu_cpu_percentage_average{dimension="idle"}[1m])) by (job) > 70 - for: 1m - annotations: - description: '{{ $labels.job }} on ''{{ $labels.job }}'' CPU usage is at {{ humanize $value }}%.' - summary: CPU alert for container node '{{ $labels.job }}' - - - alert: node_high_memory_usage_70 - expr: 100 / sum(netdata_system_ram_MB_average) by (job) - * sum(netdata_system_ram_MB_average{dimension=~"free|cached"}) by (job) < 30 - for: 1m - annotations: - description: '{{ $labels.job }} memory usage is {{ humanize $value}}%.' - summary: Memory alert for container node '{{ $labels.job }}' - - - alert: node_low_root_filesystem_space_20 - expr: 100 / sum(netdata_disk_space_GB_average{family="/"}) by (job) - * sum(netdata_disk_space_GB_average{family="/",dimension=~"avail|cached"}) by (job) < 20 - for: 1m - annotations: - description: '{{ $labels.job }} root filesystem space is {{ humanize $value}}%.' - summary: Root filesystem alert for container node '{{ $labels.job }}' - - - alert: node_root_filesystem_fill_rate_6h - expr: predict_linear(netdata_disk_space_GB_average{family="/",dimension=~"avail|cached"}[1h], 6 * 3600) < 0 - for: 1h - labels: - severity: critical - annotations: - description: Container node {{ $labels.job }} root filesystem is going to fill up in 6h. - summary: Disk fill alert for Swarm node '{{ $labels.job }}' + - name: nodes + + rules: + - alert: node_high_cpu_usage_70 + expr: sum(sum_over_time(netdata_system_cpu_percentage_average{dimension=~"(user|system|softirq|irq|guest)"}[10m])) by (job) / sum(count_over_time(netdata_system_cpu_percentage_average{dimension="idle"}[10m])) by (job) > 70 + for: 1m + annotations: + description: '{{ $labels.job }} on ''{{ $labels.job }}'' CPU usage is at {{ humanize $value }}%.' + summary: CPU alert for container node '{{ $labels.job }}' + + - alert: node_high_memory_usage_70 + expr: 100 / sum(netdata_system_ram_MB_average) by (job) + * sum(netdata_system_ram_MB_average{dimension=~"free|cached"}) by (job) < 30 + for: 1m + annotations: + description: '{{ $labels.job }} memory usage is {{ humanize $value}}%.' + summary: Memory alert for container node '{{ $labels.job }}' + + - alert: node_low_root_filesystem_space_20 + expr: 100 / sum(netdata_disk_space_GB_average{family="/"}) by (job) + * sum(netdata_disk_space_GB_average{family="/",dimension=~"avail|cached"}) by (job) < 20 + for: 1m + annotations: + description: '{{ $labels.job }} root filesystem space is {{ humanize $value}}%.' + summary: Root filesystem alert for container node '{{ $labels.job }}' + + - alert: node_root_filesystem_fill_rate_6h + expr: predict_linear(netdata_disk_space_GB_average{family="/",dimension=~"avail|cached"}[1h], 6 * 3600) < 0 + for: 1h + labels: + severity: critical + annotations: + description: Container node {{ $labels.job }} root filesystem is going to fill up in 6h. + summary: Disk fill alert for Swarm node '{{ $labels.job }}' ``` #### Install prometheus.service diff --git a/build_external/scenarios/aclk-testing/agent-compose.yml b/build_external/scenarios/aclk-testing/agent-compose.yml index c05c97cdf..5f0f19af5 100644 --- a/build_external/scenarios/aclk-testing/agent-compose.yml +++ b/build_external/scenarios/aclk-testing/agent-compose.yml @@ -5,8 +5,8 @@ services: context: ../../.. dockerfile: build_external/make-install.Dockerfile args: - - DISTRO=arch - - VERSION=current + - DISTRO=arch + - VERSION=current image: arch_current_dev:latest command: > sh -c "echo -n 00000000-0000-0000-0000-000000000000 >/var/lib/netdata/cloud.d/claimed_id && @@ -15,5 +15,4 @@ services: echo ' agent cloud link port = 9002' >>/etc/netdata/netdata.conf && /usr/sbin/netdata -D" ports: - - 20000:19999 - + - 20000:19999 diff --git a/build_external/scenarios/aclk-testing/agent-valgrind-compose.yml b/build_external/scenarios/aclk-testing/agent-valgrind-compose.yml index d404ed41b..3173e81e4 100644 --- a/build_external/scenarios/aclk-testing/agent-valgrind-compose.yml +++ b/build_external/scenarios/aclk-testing/agent-valgrind-compose.yml @@ -5,8 +5,8 @@ services: context: ../../.. dockerfile: build_external/make-install.Dockerfile args: - - DISTRO=arch - - VERSION=extras + - DISTRO=arch + - VERSION=extras image: arch_extras_dev:latest command: > sh -c "echo -n 00000000-0000-0000-0000-000000000000 >/var/lib/netdata/cloud.d/claimed_id && @@ -15,5 +15,4 @@ services: echo ' agent cloud link port = 9002' >>/etc/netdata/netdata.conf && /usr/sbin/valgrind --leak-check=full /usr/sbin/netdata -D -W debug_flags=0x200000000" ports: - - 20000:19999 - + - 20000:19999 diff --git a/build_external/scenarios/aclk-testing/agent_netdata.conf b/build_external/scenarios/aclk-testing/agent_netdata.conf index d13e51453..774005f76 100644 --- a/build_external/scenarios/aclk-testing/agent_netdata.conf +++ b/build_external/scenarios/aclk-testing/agent_netdata.conf @@ -1124,7 +1124,7 @@ # context = netdata.statsd_cpu # priority = 132001 # name = netdata.plugin_statsd_charting_cpu - # title = NetData statsd charting thread CPU usage + # title = Netdata statsd charting thread CPU usage # dim user name = user # dim user algorithm = incremental # dim user multiplier = 1 @@ -1145,7 +1145,7 @@ # context = netdata.statsd_cpu # priority = 132002 # name = netdata.plugin_statsd_collector1_cpu - # title = NetData statsd collector thread No 1 CPU usage + # title = Netdata statsd collector thread No 1 CPU usage # dim user name = user # dim user algorithm = incremental # dim user multiplier = 1 @@ -1955,7 +1955,7 @@ # context = netdata.plugin_cgroups_cpu # priority = 132000 # name = netdata.plugin_cgroups_cpu - # title = NetData CGroups Plugin CPU usage + # title = Netdata CGroups Plugin CPU usage # dim user name = user # dim user algorithm = incremental # dim user multiplier = 1 @@ -1976,7 +1976,7 @@ # context = netdata.plugin_diskspace # priority = 132020 # name = netdata.plugin_diskspace - # title = NetData Disk Space Plugin CPU usage + # title = Netdata Disk Space Plugin CPU usage # dim user name = user # dim user algorithm = incremental # dim user multiplier = 1 @@ -1997,7 +1997,7 @@ # context = netdata.plugin_diskspace_dt # priority = 132021 # name = netdata.plugin_diskspace_dt - # title = NetData Disk Space Plugin Duration + # title = Netdata Disk Space Plugin Duration # dim duration name = duration # dim duration algorithm = absolute # dim duration multiplier = 1 @@ -4844,7 +4844,7 @@ # context = netdata.plugin_proc_modules # priority = 132001 # name = netdata.plugin_proc_modules - # title = NetData Proc Plugin Modules Durations + # title = Netdata Proc Plugin Modules Durations # dim stat name = stat # dim stat algorithm = absolute # dim stat multiplier = 1 @@ -4949,7 +4949,7 @@ # context = netdata.plugin_proc_cpu # priority = 132000 # name = netdata.plugin_proc_cpu - # title = NetData Proc Plugin CPU usage + # title = Netdata Proc Plugin CPU usage # dim user name = user # dim user algorithm = incremental # dim user multiplier = 1 @@ -4970,7 +4970,7 @@ # context = netdata.server_cpu # priority = 130000 # name = netdata.server_cpu - # title = NetData CPU usage + # title = Netdata CPU usage # dim user name = user # dim user algorithm = incremental # dim user multiplier = 1 @@ -4991,7 +4991,7 @@ # context = netdata.clients # priority = 130200 # name = netdata.clients - # title = NetData Web Clients + # title = Netdata Web Clients # dim clients name = clients # dim clients algorithm = absolute # dim clients multiplier = 1 @@ -5008,7 +5008,7 @@ # context = netdata.requests # priority = 130300 # name = netdata.requests - # title = NetData Web Requests + # title = Netdata Web Requests # dim requests name = requests # dim requests algorithm = incremental # dim requests multiplier = 1 @@ -5025,7 +5025,7 @@ # context = netdata.net # priority = 130000 # name = netdata.net - # title = NetData Network Traffic + # title = Netdata Network Traffic # dim in name = in # dim in algorithm = incremental # dim in multiplier = 8 @@ -5046,7 +5046,7 @@ # context = netdata.response_time # priority = 130400 # name = netdata.response_time - # title = NetData API Response Time + # title = Netdata API Response Time # dim average name = average # dim average algorithm = absolute # dim average multiplier = 1 @@ -5067,7 +5067,7 @@ # context = netdata.compression_ratio # priority = 130500 # name = netdata.compression_ratio - # title = NetData API Responses Compression Savings Ratio + # title = Netdata API Responses Compression Savings Ratio # dim savings name = savings # dim savings algorithm = absolute # dim savings multiplier = 1 @@ -5084,7 +5084,7 @@ # context = netdata.dbengine_compression_ratio # priority = 130502 # name = netdata.dbengine_compression_ratio - # title = NetData DB engine data extents' compression savings ratio + # title = Netdata DB engine data extents' compression savings ratio # dim savings name = savings # dim savings algorithm = absolute # dim savings multiplier = 1 @@ -5101,7 +5101,7 @@ # context = netdata.page_cache_hit_ratio # priority = 130503 # name = netdata.page_cache_hit_ratio - # title = NetData DB engine page cache hit ratio + # title = Netdata DB engine page cache hit ratio # dim ratio name = ratio # dim ratio algorithm = absolute # dim ratio multiplier = 1 @@ -5118,7 +5118,7 @@ # context = netdata.page_cache_stats # priority = 130504 # name = netdata.page_cache_stats - # title = NetData dbengine page cache statistics + # title = Netdata dbengine page cache statistics # dim descriptors name = descriptors # dim descriptors algorithm = absolute # dim descriptors multiplier = 1 @@ -5155,7 +5155,7 @@ # context = netdata.dbengine_long_term_page_stats # priority = 130505 # name = netdata.dbengine_long_term_page_stats - # title = NetData dbengine long-term page statistics + # title = Netdata dbengine long-term page statistics # dim total name = total # dim total algorithm = absolute # dim total multiplier = 1 @@ -5184,7 +5184,7 @@ # context = netdata.dbengine_io_throughput # priority = 130506 # name = netdata.dbengine_io_throughput - # title = NetData DB engine I/O throughput + # title = Netdata DB engine I/O throughput # dim reads name = reads # dim reads algorithm = incremental # dim reads multiplier = 1 @@ -5205,7 +5205,7 @@ # context = netdata.dbengine_io_operations # priority = 130507 # name = netdata.dbengine_io_operations - # title = NetData DB engine I/O operations + # title = Netdata DB engine I/O operations # dim reads name = reads # dim reads algorithm = incremental # dim reads multiplier = 1 @@ -5226,7 +5226,7 @@ # context = netdata.dbengine_global_errors # priority = 130508 # name = netdata.dbengine_global_errors - # title = NetData DB engine errors + # title = Netdata DB engine errors # dim io_errors name = io_errors # dim io_errors algorithm = incremental # dim io_errors multiplier = 1 @@ -5251,7 +5251,7 @@ # context = netdata.dbengine_global_file_descriptors # priority = 130509 # name = netdata.dbengine_global_file_descriptors - # title = NetData DB engine File Descriptors + # title = Netdata DB engine File Descriptors # dim current name = current # dim current algorithm = absolute # dim current multiplier = 1 @@ -5272,7 +5272,7 @@ # context = netdata.dbengine_ram # priority = 130510 # name = netdata.dbengine_ram - # title = NetData DB engine RAM usage + # title = Netdata DB engine RAM usage # dim cache name = cache # dim cache algorithm = absolute # dim cache multiplier = 1 @@ -5297,7 +5297,7 @@ # context = netdata.web_cpu # priority = 132003 # name = netdata.web_thread4_cpu - # title = NetData web server thread No 4 CPU usage + # title = Netdata web server thread No 4 CPU usage # dim user name = user # dim user algorithm = incremental # dim user multiplier = 1 @@ -5318,7 +5318,7 @@ # context = netdata.web_cpu # priority = 132000 # name = netdata.web_thread1_cpu - # title = NetData web server thread No 1 CPU usage + # title = Netdata web server thread No 1 CPU usage # dim user name = user # dim user algorithm = incremental # dim user multiplier = 1 @@ -5339,7 +5339,7 @@ # context = netdata.web_cpu # priority = 132005 # name = netdata.web_thread6_cpu - # title = NetData web server thread No 6 CPU usage + # title = Netdata web server thread No 6 CPU usage # dim user name = user # dim user algorithm = incremental # dim user multiplier = 1 @@ -5360,7 +5360,7 @@ # context = netdata.web_cpu # priority = 132002 # name = netdata.web_thread3_cpu - # title = NetData web server thread No 3 CPU usage + # title = Netdata web server thread No 3 CPU usage # dim user name = user # dim user algorithm = incremental # dim user multiplier = 1 @@ -5381,7 +5381,7 @@ # context = netdata.web_cpu # priority = 132001 # name = netdata.web_thread2_cpu - # title = NetData web server thread No 2 CPU usage + # title = Netdata web server thread No 2 CPU usage # dim user name = user # dim user algorithm = incremental # dim user multiplier = 1 @@ -5520,7 +5520,7 @@ # context = netdata.web_cpu # priority = 132004 # name = netdata.web_thread5_cpu - # title = NetData web server thread No 5 CPU usage + # title = Netdata web server thread No 5 CPU usage # dim user name = user # dim user algorithm = incremental # dim user multiplier = 1 diff --git a/build_external/scenarios/aclk-testing/paho-compose.yml b/build_external/scenarios/aclk-testing/paho-compose.yml index 4fc6ce2c4..8c39e20fc 100644 --- a/build_external/scenarios/aclk-testing/paho-compose.yml +++ b/build_external/scenarios/aclk-testing/paho-compose.yml @@ -3,4 +3,4 @@ services: paho_inspect: build: context: . - dockerfile: paho.Dockerfile \ No newline at end of file + dockerfile: paho.Dockerfile diff --git a/build_external/scenarios/aclk-testing/vernemq-compose.yml b/build_external/scenarios/aclk-testing/vernemq-compose.yml index a9f07a546..3ec805a74 100644 --- a/build_external/scenarios/aclk-testing/vernemq-compose.yml +++ b/build_external/scenarios/aclk-testing/vernemq-compose.yml @@ -4,4 +4,3 @@ services: build: dockerfile: configureVerneMQ.Dockerfile context: . - diff --git a/build_external/scenarios/gaps_hi/child-compose.yml b/build_external/scenarios/gaps_hi/child-compose.yml index e50736093..2ca306f37 100644 --- a/build_external/scenarios/gaps_hi/child-compose.yml +++ b/build_external/scenarios/gaps_hi/child-compose.yml @@ -10,5 +10,4 @@ services: #- ./child_guid:/var/lib/netdata/registry/netdata.public.unique.id:ro - ./min.conf:/etc/netdata/netdata.conf:ro cap_add: - - SYS_PTRACE - + - SYS_PTRACE diff --git a/build_external/scenarios/gaps_hi/middle-compose.yml b/build_external/scenarios/gaps_hi/middle-compose.yml index cb4a04555..c316164dd 100644 --- a/build_external/scenarios/gaps_hi/middle-compose.yml +++ b/build_external/scenarios/gaps_hi/middle-compose.yml @@ -10,4 +10,4 @@ services: - ./middle_guid:/var/lib/netdata/registry/netdata.public.unique.id:ro - ./min.conf:/etc/netdata/netdata.conf:ro cap_add: - - SYS_PTRACE + - SYS_PTRACE diff --git a/build_external/scenarios/gaps_lo/child-compose.yml b/build_external/scenarios/gaps_lo/child-compose.yml index dca900c2e..dee06f2ee 100644 --- a/build_external/scenarios/gaps_lo/child-compose.yml +++ b/build_external/scenarios/gaps_lo/child-compose.yml @@ -12,4 +12,3 @@ services: - ./mostly_off.conf:/etc/netdata/netdata.conf:ro cap_add: - SYS_PTRACE - diff --git a/build_external/scenarios/only-agent/docker-compose.yml b/build_external/scenarios/only-agent/docker-compose.yml index 74bdef8e3..eb1386fd5 100644 --- a/build_external/scenarios/only-agent/docker-compose.yml +++ b/build_external/scenarios/only-agent/docker-compose.yml @@ -4,5 +4,5 @@ services: image: ${Distro}_${Version}_dev command: /usr/sbin/netdata -D ports: - - 80 - - 443 + - 80 + - 443 diff --git a/claim/README.md b/claim/README.md index b3ebb8221..bbccaac19 100644 --- a/claim/README.md +++ b/claim/README.md @@ -1,12 +1,12 @@ -# Agent claiming +# Connect Agent to Cloud -Agent claiming allows a Netdata Agent, running on a distributed node, to securely connect to Netdata Cloud. A Space's +You can securely connect a Netdata Agent, running on a distributed node, to Netdata Cloud. A Space's administrator creates a **claiming token**, which is used to add an Agent to their Space via the [Agent-Cloud link (ACLK)](/aclk/README.md). @@ -14,54 +14,83 @@ Are you just starting out with Netdata Cloud? See our [get started with Cloud](https://learn.netdata.cloud/docs/cloud/get-started) guide for a walkthrough of the process and simplified instructions. -Claiming nodes is a security feature in Netdata Cloud. Through the process of claiming, you demonstrate in a few ways -that you have administrative access to that node and the configuration settings for its Agent. By logging into the node, -you prove you have access, and by using the claiming script or the Netdata command line, you prove you have write access -and administrative privileges. +When connecting an agent (also referred to as a node) to Netdata Cloud, you must complete a verification process that proves you have some level of authorization to manage the node itself. This verification is a security feature that helps prevent unauthorized users from seeing the data on your node. Only the administrators of a Space in Netdata Cloud can view the claiming token and accompanying script generated by Netdata Cloud. -> The claiming process ensures no third party can add your node, and then view your node's metrics, in a Cloud account, +> The connection process ensures no third party can add your node, and then view your node's metrics, in a Cloud account, > Space, or War Room that you did not authorize. -By claiming a node, you opt-in to sending data from your Agent to Netdata Cloud via the [ACLK](/aclk/README.md). This -data is encrypted by TLS while it is in transit. We use the RSA keypair created during claiming to authenticate the -identity of the Agent when it connects to the Cloud. While the data does flow through Netdata Cloud servers on its way +By connecting a node, you opt-in to sending data from your Agent to Netdata Cloud via the [ACLK](/aclk/README.md). This +data is encrypted by TLS while it is in transit. We use the RSA keypair created during the connection process to authenticate the +identity of the Netdata Agent when it connects to the Cloud. While the data does flow through Netdata Cloud servers on its way from Agents to the browser, we do not store or log it. -You can claim a node during the Netdata Cloud onboarding process, or after you created a Space by clicking on **Claim +You can connect a node during the Netdata Cloud onboarding process, or after you created a Space by clicking on **Connect Nodes** in the [Spaces management area](https://learn.netdata.cloud/docs/cloud/spaces#manage-spaces). -There are two important notes regarding claiming: +There are two important notes regarding connecting nodes: -- _You can only claim any given node in a single Space_. You can, however, add that claimed node to multiple War Rooms +- _You can only connect any given node in a single Space_. You can, however, add that connected node to multiple War Rooms within that one Space. -- You must repeat the claiming process on every node you want to add to Netdata Cloud. +- You must repeat the connection process on every node you want to add to Netdata Cloud. -## How to claim a node +## How to connect a node -To claim a node, select which War Rooms you want to add this node to with the dropdown, then copy and paste the script -given by Cloud into your node's terminal. Hit **Enter**. +There will be three main flows from where you might want to connect a node to Netdata Cloud. +* when you are on an [ +War Room](#empty-war-room) and you want to connect your first node +* when you are at the [Manage Space](#manage-space-or-war-room) area and you select **Connect Nodes** to connect a node, coming from Manage Space or Manage War Room +* when you are on the [Nodes view page](https://learn.netdata.cloud/docs/cloud/visualize/nodes) and want to connect a node - this process falls into the [Manage Space](#manage-space-or-war-room) flow -```bash -sudo netdata-claim.sh -token=TOKEN -rooms=ROOM1,ROOM2 -url=https://app.netdata.cloud -``` +Please note that only the administrators of a Space in Netdata Cloud can view the claiming token and accompanying script, generated by Netdata Cloud, to trigger the connection process. + +### Empty War Room + +Either at your first sign in or following ones, when you enter Netdata Cloud and are at a War Room that doesn’t have any node added to it, you will be able to: +* connect a new node to Netdata Cloud and add it to the War Room +* add a previously connected node to the War Room -The script should return `Agent was successfully claimed.`. If the claiming script returns errors, or if you don't see -the node in your Space after 60 seconds, see the [troubleshooting information](#troubleshooting). If you prefer not to -use root privileges via `sudo` to run the claiming script, see the next section. +If your case is to connect a new node and add it to the War Room, you will need to tell us what environment the node is running on (Linux, Docker, macOS, Kubernetes) and then we will provide you with a script to initiate the connection process. You just will need to copy and paste it into your node's terminal. See one of the following sections depending on your case: +* [Linux](#connect-an-agent-running-in-linux) +* [Docker](#connect-an-agent-running-in-docker) +* [macOS](#connect-an-agent-running-in-macos) +* [Kubernetes](#connect-a-kubernetes-clusters-parent-netdata-pod) -Repeat this process with every node you want to add to Cloud during onboarding. You can also add more nodes once you've +Repeat this process with every node you want to add to Netdata Cloud during onboarding. You can also add more nodes once you've finished onboarding. -### Claim an agent without root privileges +### Manage Space or War Room + +To connect a node, select which War Rooms you want to add this node to with the dropdown, then copy and paste the script +given by Netdata Cloud into your node's terminal. + +When coming from [Nodes view page](https://learn.netdata.cloud/docs/cloud/visualize/nodes) the room parameter is already defined to current War Room. + +### Connect an agent running in Linux + +If you want to connect a node that is running on a Linux environment, the script that will be provided to you by Netdata Cloud is the [kickstart](/packaging/installer/#automatic-one-line-installation-script) which will install the Netdata Agent on your node, if it isn't already installed, and connect the node to Netdata Cloud. It should be similar to: -If you don't want to run the claiming script with root privileges, you can discover which user is running the Agent, -switch to that user, and run the claiming script. +``` +bash <(curl -Ss https://my-netdata.io/kickstart.sh) --claim-token TOKEN --claim-rooms ROOM1,ROOM2 --claim-url https://app.netdata.cloud +``` +The script should return `Agent was successfully claimed.`. If the connecting to Netdata Cloud process returns errors, or if you don't see +the node in your Space after 60 seconds, see the [troubleshooting information](#troubleshooting). + +Please note that to run it you will either need to have root privileges or run it with the user that is running the agent, more details on the [Connect an agent without root privileges](#connect-an-agent-without-root-privileges) section. + +For more details on what are the extra parameters `claim-token`, `claim-rooms` and `claim-url` please refer to [Connect node to Netdata Cloud during installation](/packaging/installer/methods/kickstart#connect-node-to-netdata-cloud-during-installation). + +### Connect an agent without root privileges + +If you don't want to run the installation script to connect your nodes to Netdata Cloud with root privileges, you can discover which user is running the Agent, +switch to that user, and run the script. Use `grep` to search your `netdata.conf` file, which is typically located at `/etc/netdata/netdata.conf`, for the `run as user` setting. For example: +To connect a node, select which War Rooms you want to add this node to with the dropdown, then copy and paste the script +given by Netdata Cloud into your node's terminal. ```bash grep "run as user" /etc/netdata/netdata.conf @@ -69,22 +98,21 @@ grep "run as user" /etc/netdata/netdata.conf ``` The default user is `netdata`. Yours may be different, so pay attention to the output from `grep`. Switch to that user -and run the claiming script. +and run the script. ```bash -netdata-claim.sh -token=TOKEN -rooms=ROOM1,ROOM2 -url=https://app.netdata.cloud +bash <(curl -Ss https://my-netdata.io/kickstart.sh) --claim-token TOKEN --claim-rooms ROOM1,ROOM2 --claim-url https://app.netdata.cloud ``` +### Connect an agent running in Docker -Hit **Enter**. The script should return `Agent was successfully claimed.`. If the claiming script returns errors, or if -you don't see the node in your Space after 60 seconds, see the [troubleshooting information](#troubleshooting). +To connect an instance of the Netdata Agent running inside of a Docker container, it is recommended that you follow +the instructions and use the commands provided either in the `Nodes` tab of an [empty War Room](#empty-war-room) on Netdata Cloud or +in the shelf that appears when you click **Connect Nodes** and select **Docker**. -### Claim an Agent running in Docker +However, users can also claim a new node by claiming environment variables in the container to have it automatically +connected on startup or restart. -To claim an instance of the Netdata Agent running inside of a Docker container, either set claiming environment -variables in the container to have it automatically claimed on startup or restart, or use `docker exec` to manually -claim an already running container. - -For claiming to work, the contents of `/var/lib/netdata` _must_ be preserved across container +For the connection process to work, the contents of `/var/lib/netdata` _must_ be preserved across container restarts using a persistent volume. See our [recommended `docker run` and Docker Compose examples](/packaging/docker/README.md#create-a-new-netdata-agent-container) for details. @@ -97,17 +125,21 @@ The Netdata Docker container looks for the following environment variables on st - `NETDATA_CLAIM_ROOMS` - `NETDATA_CLAIM_PROXY` -If the token and URL are specified in their corresponding variables _and_ the container is not already claimed, -it will use these values to attempt to claim the container, automatically adding the node to the specified War -Rooms. If a proxy is specified, it will be used for the claiming process and for connecting to Netdata Cloud. +If the token and URL are specified in their corresponding variables _and_ the container is not already connected, +it will use these values to attempt to connect the container, automatically adding the node to the specified War +Rooms. If a proxy is specified, it will be used for the connection process and for connecting to Netdata Cloud. These variables can be specified using any mechanism supported by your container tooling for setting environment -variables inside containers. For example, when creating a new Netdata continer using `docker run`, the following -modified version of the command can be used to set the variables: +variables inside containers. -```bash +When using the `docker run` command, if you have an agent container already running, it is important to know that there will be a short period of downtime. This is due to the process of recreating the new agent container. + +The command to connect a new node to Netdata Cloud is: + +```bash docker run -d --name=netdata \ -p 19999:19999 \ + -v netdataconfig:/etc/netdata \ -v netdatalib:/var/lib/netdata \ -v netdatacache:/var/cache/netdata \ -v /etc/passwd:/host/etc/passwd:ro \ @@ -115,40 +147,99 @@ docker run -d --name=netdata \ -v /proc:/host/proc:ro \ -v /sys:/host/sys:ro \ -v /etc/os-release:/host/etc/os-release:ro \ - -e NETDATA_CLAIM_TOKEN=TOKEN \ - -e NETDATA_CLAIM_URL="https://app.netdata.cloud" \ - -e NETDATA_CLAIM_ROOMS=ROOM1,ROOM2 \ --restart unless-stopped \ --cap-add SYS_PTRACE \ --security-opt apparmor=unconfined \ - netdata/netdata + -e NETDATA_CLAIM_TOKEN=TOKEN \ + -e NETDATA_CLAIM_URL="https://app.netdata.cloud" \ + -e NETDATA_CLAIM_ROOMS=ROOM1,ROOM2 \ + -e NETDATA_CLAIM_PROXY=PROXY \ + netdata/netdata ``` +>Note: This command is suggested for connecting a new container. Using this command for an existing container recreates the container, though data +and configuration of the old container may be preserved. If you are claiming an existing container that can not be recreated, +you can add the container by going to Netdata Cloud, clicking the **Nodes** tab, clicking **Connect Nodes**, selecting **Docker**, and following +the instructions and commands provided or by following the instructions in an [empty War Room](#empty-war-room). -Output that would be seen from the claiming script when using other methods will be present in the container logs. +The output that would be seen from the connection process when using other methods will be present in the container logs. -Using the environment variables like this to handle claiming is the preferred method of claiming Docker containers +Using the environment variables like this to handle the connection process is the preferred method of connecting Docker containers as it works in the widest variety of situations and simplifies configuration management. +#### Using Docker compose + +If you use `docker compose`, you can copy the config provided by Netdata Cloud, which should be same as the one below: + +```bash +version: '3' +services: + netdata: + image: netdata/netdata + container_name: netdata + hostname: example.com # set to fqdn of host + ports: + - 19999:19999 + restart: unless-stopped + cap_add: + - SYS_PTRACE + security_opt: + - apparmor:unconfined + volumes: + - netdataconfig:/etc/netdata + - netdatalib:/var/lib/netdata + - netdatacache:/var/cache/netdata + - /etc/passwd:/host/etc/passwd:ro + - /etc/group:/host/etc/group:ro + - /proc:/host/proc:ro + - /sys:/host/sys:ro + - /etc/os-release:/host/etc/os-release:ro + environment: + - NETDATA_CLAIM_TOKEN=TOKEN + - NETDATA_CLAIM_URL="https://app.netdata.cloud" + - NETDATA_CLAIM_ROOMS=ROOM1,ROOM2 + +volumes: + netdataconfig: + netdatalib: + netdatacache: +``` + +Then run the following command in the same directory as the `docker-compose.yml` file to start the container. + +```bash +docker-compose up -d +``` #### Using docker exec -Claim a _running Netdata Agent container_ by appending the script offered by Cloud to a `docker exec ...` command, replacing +Connect a _running Netdata Agent container_, where you don't want to recreate the existing container, append the script offered by Netdata Cloud to a `docker exec ...` command, replacing `netdata` with the name of your running container: ```bash docker exec -it netdata netdata-claim.sh -token=TOKEN -rooms=ROOM1,ROOM2 -url=https://app.netdata.cloud ``` +The values for `ROOM1,ROOM2` can be found by by going to Netdata Cloud, clicking the **Nodes** tab, clicking **Connect Nodes**, selecting **Docker**, and copying the `rooms=` value in the command provided. -The script should return `Agent was successfully claimed.`. If the claiming script returns errors, or if +The script should return `Agent was successfully claimed.`. If the connection process returns errors, or if you don't see the node in your Space after 60 seconds, see the [troubleshooting information](#troubleshooting). -### Claim a Kubernetes cluster's parent Netdata pod +### Connect an agent running in macOS -Read our [Kubernetes installation](/packaging/installer/methods/kubernetes.md#claim-a-kubernetes-clusters-parent-pod) -for details on claiming a parent Netdata pod. +To connect a node that is running on a macOS environment the script that will be provided to you by Netdata Cloud is the [kickstart](/packaging/installer/methods/macos#install-netdata-with-kickstart) which will install the Netdata Agent on your node, if it isn't already installed, and connect the node to Netdata Cloud. It should be similar to: -### Claim through a proxy +```bash +bash <(curl -Ss https://my-netdata.io/kickstart.sh) --install /usr/local/ --claim-token TOKEN --claim-rooms ROOM1,ROOM2 --claim-url https://app.netdata.cloud +``` +The script should return `Agent was successfully claimed.`. If the connecting to Netdata Cloud process returns errors, or if you don't see +the node in your Space after 60 seconds, see the [troubleshooting information](#troubleshooting). + +### Connect a Kubernetes cluster's parent Netdata pod + +Read our [Kubernetes installation](/packaging/installer/methods/kubernetes.md#connect-a-kubernetes-clusters-parent-pod) +for details on connecting a parent Netdata pod. -A Space's administrator can claim a node through a SOCKS5 or HTTP(S) proxy. +### Connect through a proxy + +A Space's administrator can connect a node through a SOCKS5 or HTTP(S) proxy. You should first configure the proxy in the `[cloud]` section of `netdata.conf`. The proxy settings you specify here will also be used to tunnel the ACLK. The default `proxy` setting is `none`. @@ -162,8 +253,8 @@ The `proxy` setting can take one of the following values: - `none`: Do not use a proxy, even if the system configured otherwise. - `env`: Try to read proxy settings from set environment variables `http_proxy`/`socks_proxy`. -- `socks5[h]://[user:pass@]host:ip`: The ACLK and claiming will use the specified SOCKS5 proxy. -- `http://[user:pass@]host:ip`: The ACLK and claiming will use the specified HTTP(S) proxy. +- `socks5[h]://[user:pass@]host:ip`: The ACLK and connection process will use the specified SOCKS5 proxy. +- `http://[user:pass@]host:ip`: The ACLK and connection process will use the specified HTTP(S) proxy. For example, a SOCKS5 proxy setting may look like the following: @@ -173,23 +264,23 @@ For example, a SOCKS5 proxy setting may look like the following: proxy = socks5h://proxy.example.com:1080 # With a URL ``` -You can now move on to claiming. When you claim with the `netdata-claim.sh` script, add the `-proxy=` parameter and +You can now move on to connecting. When you connect with the [kickstart](/packaging/installer/#automatic-one-line-installation-script) script, add the `--claim-proxy=` parameter and append the same proxy setting you added to `netdata.conf`. ```bash -sudo netdata-claim.sh -token=MYTOKEN1234567 -rooms=room1,room2 -url=https://app.netdata.cloud -proxy=socks5h://203.0.113.0:1080 +bash <(curl -Ss https://my-netdata.io/kickstart.sh) --claim-token TOKEN --claim-rooms ROOM1,ROOM2 --claim-url https://app.netdata.cloud --claim-proxy socks5h://203.0.113.0:1080 ``` -Hit **Enter**. The script should return `Agent was successfully claimed.`. If the claiming script returns errors, or if +Hit **Enter**. The script should return `Agent was successfully claimed.`. If the connecting to Netdata Cloud process returns errors, or if you don't see the node in your Space after 60 seconds, see the [troubleshooting information](#troubleshooting). ### Troubleshooting -If you're having trouble claiming a node, this may be because the [ACLK](/aclk/README.md) cannot connect to Cloud. +If you're having trouble connecting a node, this may be because the [ACLK](/aclk/README.md) cannot connect to Cloud. With the Netdata Agent running, visit `http://NODE:19999/api/v1/info` in your browser, replacing `NODE` with the IP address or hostname of your Agent. The returned JSON contains four keys that will be helpful to diagnose any issues you -might be having with the ACLK or claiming process. +might be having with the ACLK or connection process. ```json "cloud-enabled" @@ -200,6 +291,23 @@ might be having with the ACLK or claiming process. Use these keys and the information below to troubleshoot the ACLK. +#### kickstart: unsupported Netdata installation + +If you run the kickstart script and get the following error `Existing install appears to be handled manually or through the system package manager.` you most probably installed Netdata using an unsupported package. + +If you are using an unsupported package, such as a third-party `.deb`/`.rpm` package provided by your distribution, +please remove that package and reinstall using our [recommended kickstart +script](/docs/get-started.mdx#install-on-linux-with-one-line-installer-recommended). + +#### kickstart: Failed to write new machine GUID + +If you run the kickstart script but don't have privileges required for the actions done on the connecting to Netdata Cloud process you will get the following error: + +```bash +Failed to write new machine GUID. Please make sure you have rights to write to /var/lib/netdata/registry/netdata.public.unique.id. +``` +For a successful execution you will need to run the script with root privileges or run it with the user that is running the agent, more details on the [Connect an agent without root privileges](#connect-an-agent-without-root-privileges) section. + #### bash: netdata-claim.sh: command not found If you run the claiming script and see a `command not found` error, you either installed Netdata in a non-standard @@ -211,7 +319,7 @@ If you are using an unsupported package, such as a third-party `.deb`/`.rpm` pac please remove that package and reinstall using our [recommended kickstart script](/docs/get-started.mdx#install-on-linux-with-one-line-installer-recommended). -#### Claiming on older distributions (Ubuntu 14.04, Debian 8, CentOS 6) +#### Connecting on older distributions (Ubuntu 14.04, Debian 8, CentOS 6) If you're running an older Linux distribution or one that has reached EOL, such as Ubuntu 14.04 LTS, Debian 8, or CentOS 6, your Agent may not be able to securely connect to Netdata Cloud due to an outdated version of OpenSSL. These old @@ -285,7 +393,7 @@ with details about your system and relevant output from `error.log`. #### agent-claimed is false -You must [claim your node](#how-to-claim-a-node). +You must [connect your node](#how-to-connect-a-node). #### aclk-available is false @@ -293,14 +401,14 @@ If `aclk-available` is `false` and all other keys are `true`, your Agent is havi through the ACLK. Please check your system's firewall. If your Agent needs to use a proxy to access the internet, you must [set up a proxy for -claiming](#claim-through-a-proxy). +connecting](#connect-through-a-proxy). If you are certain firewall and proxy settings are not the issue, you should consult the Agent's `error.log` at `/var/log/netdata/error.log` and contact us by [creating an issue on GitHub](https://github.com/netdata/netdata/issues/new?labels=bug%2C+needs+triage%2C+ACLK&template=bug_report.md&title=ACLK-available-is-false) with details about your system and relevant output from `error.log`. -### Remove and reclaim a node +### Remove and reconnect a node To remove a node from your Space in Netdata Cloud, delete the `cloud.d/` directory in your Netdata library directory. @@ -309,11 +417,13 @@ cd /var/lib/netdata # Replace with your Netdata library directory, if not /var sudo rm -rf cloud.d/ ``` -This node no longer has access to the credentials it was claimed with and cannot connect to Netdata Cloud via the ACLK. +This node no longer has access to the credentials it was used when connecting to Netdata Cloud via the ACLK. You will still be able to see this node in your War Rooms in an **unreachable** state. -If you want to reclaim this node into a different Space, you need to create a new identity by adding `-id=$(uuidgen)` to -the claiming script parameters. Make sure that you have the `uuidgen-runtime` package installed, as it is used to run the command `uuidgen`. For example, using the default claiming script: +If you want to reconnect this node, you need to create a new identity by adding `-id=$(uuidgen)` to +the claiming script parameters (not yet supported on the kickstart script). Make sure that you have the `uuidgen-runtime` package installed, as it is used to run the command `uuidgen`. For example: + +**Claiming script** ```bash sudo netdata-claim.sh -token=TOKEN -rooms=ROOM1,ROOM2 -url=https://app.netdata.cloud -id=$(uuidgen) @@ -321,9 +431,8 @@ sudo netdata-claim.sh -token=TOKEN -rooms=ROOM1,ROOM2 -url=https://app.netdata.c The agent _must be restarted_ after this change. -## Claiming reference - -In the sections below, you can find reference material for the claiming script, claiming via the Agent's command line +## Connecting reference +In the sections below, you can find reference material for the kickstart script, claiming script, connecting via the Agent's command line tool, and details about the files found in `cloud.d`. ### The `cloud.conf` file @@ -336,9 +445,35 @@ using the [ACLK](/aclk/README.md). | cloud base url | https://app.netdata.cloud | The URL for the Netdata Cloud web application. You should not change this. If you want to disable Cloud, change the `enabled` setting. | | enabled | yes | The runtime option to disable the [Agent-Cloud link](/aclk/README.md) and prevent your Agent from connecting to Netdata Cloud. | +### kickstart script + +The best way to install Netdata and connect your nodes to Netdata Cloud is with our automatic one-line installation script, [kickstart](/packaging/installer/#automatic-one-line-installation-script). This script will install the Netdata Agent, in case it isn't already installed, and connect your node to Netdata Cloud. + +This works with: +* all Linux distributions, see [Netdata distribution support matrix](https://learn.netdata.cloud/docs/agent/packaging/distributions) +* macOS + +For details on how to run this script please check [How to connect a node](#how-to-connect-a-node) and choose your environment. + +In case Netdata Agent is already installed and you run this script to connect a node to Netdata Cloud it will not upgrade your agent automatically. If you also want to upgrade the Agent installation you'll need to run the script again without the connection options. + +Our suggestion is to first run kickstart to upgrade your agent by running the command below and the run the [How to connect a node] +(#how-to-connect-a-node). + +**Linux** + +```bash +bash <(curl -Ss https://my-netdata.io/kickstart.sh) +``` + +**macOS** + +```bash +bash <(curl -Ss https://my-netdata.io/kickstart.sh) --install /usr/local/ +``` ### Claiming script -A Space's administrator can claim an Agent by directly calling the `netdata-claim.sh` script either with root privileges +A Space's administrator can also connect an Agent by directly calling the `netdata-claim.sh` script either with root privileges using `sudo`, or as the user running the Agent (typically `netdata`), and passing the following arguments: ```sh @@ -356,7 +491,7 @@ using `sudo`, or as the user running the Agent (typically `netdata`), and passin where PROXY_URL is the endpoint of a SOCKS5 proxy. ``` -For example, the following command claims an Agent and adds it to rooms `room1` and `room2`: +For example, the following command connects an Agent and adds it to rooms `room1` and `room2`: ```sh netdata-claim.sh -token=MYTOKEN1234567 -rooms=room1,room2 @@ -368,11 +503,13 @@ You should then update the `netdata` service about the result with `netdatacli`: netdatacli reload-claiming-state ``` -This reloads the Agent claiming state from disk. +This reloads the Agent connection state from disk. + +Our recommendation is to trigger the connection process using the [kickstart](/packaging/installer/#automatic-one-line-installation-script) whenever possible. ### Netdata Agent command line -If a Netdata Agent is running, the Space's administrator can claim a node using the `netdata` service binary with +If a Netdata Agent is running, the Space's administrator can connect a node using the `netdata` service binary with additional command line parameters: ```sh @@ -388,9 +525,9 @@ For example: If need be, the user can override the Agent's defaults by providing additional arguments like those described [here](#claiming-script). -### Claiming directory +### Connection directory -Netdata stores the Agent's claiming-related state in the Netdata library directory under `cloud.d`. For a default +Netdata stores the Agent's connection-related state in the Netdata library directory under `cloud.d`. For a default installation, this directory exists at `/var/lib/netdata/cloud.d`. The directory and its files should be owned by the user that runs the Agent, which is typically the `netdata` user. diff --git a/claim/claim.c b/claim/claim.c index ce3f0803d..2007b637a 100644 --- a/claim/claim.c +++ b/claim/claim.c @@ -1,12 +1,8 @@ // SPDX-License-Identifier: GPL-3.0-or-later #include "claim.h" -#include "../registry/registry_internals.h" -#ifndef ACLK_NG -#include "../aclk/legacy/aclk_common.h" -#else -#include "../aclk/aclk.h" -#endif +#include "registry/registry_internals.h" +#include "aclk/aclk_api.h" char *claiming_pending_arguments = NULL; diff --git a/claim/claim.h b/claim/claim.h index 2fd8d3e9b..171cc1fab 100644 --- a/claim/claim.h +++ b/claim/claim.h @@ -3,7 +3,7 @@ #ifndef NETDATA_CLAIM_H #define NETDATA_CLAIM_H 1 -#include "../daemon/common.h" +#include "daemon/common.h" extern char *claiming_pending_arguments; extern struct config cloud_config; diff --git a/claim/netdata-claim.sh.in b/claim/netdata-claim.sh.in index 137d112fc..18813a6ba 100755 --- a/claim/netdata-claim.sh.in +++ b/claim/netdata-claim.sh.in @@ -418,7 +418,7 @@ HERE_DOC exit $EXIT_CODE fi echo >&2 "${PROXYMSG}The claim was successful but the agent could not be notified ($?)- it requires a restart to connect to the cloud." - exit 5 + [ "$NETDATA_RUNNING" -eq 0 ] && exit 0 || exit 5 fi echo >&2 "Failed to claim node with the following error message:\"${ERROR_MESSAGES[$EXIT_CODE]}\"" diff --git a/cli/README.md b/cli/README.md index 93812372a..1962b2ede 100644 --- a/cli/README.md +++ b/cli/README.md @@ -26,7 +26,7 @@ shutdown-agent fatal-agent Log the state and halt the netdata agent. reload-claiming-state - Reload agent claiming state from disk. + Reload agent connection state from disk. ``` Those commands are the same that can be sent to netdata via [signals](/daemon/README.md#command-line-options). diff --git a/cli/cli.c b/cli/cli.c index 4df020178..229ad2952 100644 --- a/cli/cli.c +++ b/cli/cli.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later #include "cli.h" -#include "../libnetdata/required_dummies.h" +#include "libnetdata/required_dummies.h" static uv_pipe_t client_pipe; static uv_write_t write_req; @@ -198,4 +198,4 @@ int main(int argc, char **argv) uv_close((uv_handle_t *)&client_pipe, NULL); return exit_status; -} \ No newline at end of file +} diff --git a/cli/cli.h b/cli/cli.h index 9e730a301..056e99862 100644 --- a/cli/cli.h +++ b/cli/cli.h @@ -3,6 +3,6 @@ #ifndef NETDATA_CLI_H #define NETDATA_CLI_H 1 -#include "../daemon/common.h" +#include "daemon/common.h" #endif //NETDATA_CLI_H diff --git a/collectors/COLLECTORS.md b/collectors/COLLECTORS.md index 3325049e7..5f37dfc34 100644 --- a/collectors/COLLECTORS.md +++ b/collectors/COLLECTORS.md @@ -135,6 +135,7 @@ configure any of these collectors according to your setup and infrastructure. - [Riak KV](/collectors/python.d.plugin/riakkv/README.md): Collect database stats from the `/stats` endpoint. - [Zookeeper](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/zookeeper/): Monitor application health metrics reading the server's response to the `mntr` command. +- [Memcached](/collectors/python.d.plugin/memcached/README.md): Collect memory-caching system performance metrics. ### Distributed computing @@ -417,7 +418,6 @@ The Netdata Agent can collect these system- and hardware-level metrics using a v `proc.plugin` collector. - [KSM](/collectors/proc.plugin/README.md): Measure the amount of merging, savings, and effectiveness using the `proc.plugin` collector. -- [Memcached](/collectors/python.d.plugin/memcached/README.md): Collect memory-caching system performance metrics. - [Numa](/collectors/proc.plugin/README.md): Gather metrics on the number of non-uniform memory access (NUMA) events every second using the `proc.plugin` collector. - [Page faults](/collectors/proc.plugin/README.md): Collect the number of memory page faults per second using the @@ -496,7 +496,7 @@ collectors are described only in code and associated charts in Netdata dashboard - [ACLK (code only)](https://github.com/netdata/netdata/blob/master/aclk/legacy/aclk_stats.c): View whether a Netdata Agent is connected to Netdata Cloud via the [ACLK](/aclk/README.md), the volume of queries, process times, and more. - [Alarms](https://learn.netdata.cloud/docs/agent/collectors/python.d.plugin/alarms): This collector creates an - Alarms menu with one line plot showing the alarm states of a Netdata Agent over time. + **Alarms** menu with one line plot showing the alarm states of a Netdata Agent over time. - [Anomalies](https://learn.netdata.cloud/docs/agent/collectors/python.d.plugin/anomalies): This collector uses the Python PyOD library to perform unsupervised anomaly detection on your Netdata charts and/or dimensions. - [Exporting (code only)](https://github.com/netdata/netdata/blob/master/exporting/send_internal_metrics.c): Gather diff --git a/collectors/all.h b/collectors/all.h index bbb395691..647ee9774 100644 --- a/collectors/all.h +++ b/collectors/all.h @@ -3,7 +3,7 @@ #ifndef NETDATA_ALL_H #define NETDATA_ALL_H 1 -#include "../daemon/common.h" +#include "daemon/common.h" // netdata internal data collection plugins @@ -30,40 +30,41 @@ // - for each FAMILY +100 // - for each CHART +10 -#define NETDATA_CHART_PRIO_SYSTEM_CPU 100 -#define NETDATA_CHART_PRIO_SYSTEM_LOAD 100 -#define NETDATA_CHART_PRIO_SYSTEM_IO 150 -#define NETDATA_CHART_PRIO_SYSTEM_PGPGIO 151 -#define NETDATA_CHART_PRIO_SYSTEM_RAM 200 -#define NETDATA_CHART_PRIO_SYSTEM_SWAP 201 -#define NETDATA_CHART_PRIO_SYSTEM_SWAPIO 250 -#define NETDATA_CHART_PRIO_SYSTEM_NET 500 -#define NETDATA_CHART_PRIO_SYSTEM_IPV4 500 // freebsd only -#define NETDATA_CHART_PRIO_SYSTEM_IP 501 -#define NETDATA_CHART_PRIO_SYSTEM_IPV6 502 -#define NETDATA_CHART_PRIO_SYSTEM_PROCESSES 600 -#define NETDATA_CHART_PRIO_SYSTEM_FORKS 700 -#define NETDATA_CHART_PRIO_SYSTEM_ACTIVE_PROCESSES 750 -#define NETDATA_CHART_PRIO_SYSTEM_CTXT 800 -#define NETDATA_CHART_PRIO_SYSTEM_IDLEJITTER 800 -#define NETDATA_CHART_PRIO_SYSTEM_INTR 900 -#define NETDATA_CHART_PRIO_SYSTEM_SOFTIRQS 950 -#define NETDATA_CHART_PRIO_SYSTEM_SOFTNET_STAT 955 -#define NETDATA_CHART_PRIO_SYSTEM_INTERRUPTS 1000 -#define NETDATA_CHART_PRIO_SYSTEM_DEV_INTR 1000 // freebsd only -#define NETDATA_CHART_PRIO_SYSTEM_SOFT_INTR 1100 // freebsd only -#define NETDATA_CHART_PRIO_SYSTEM_ENTROPY 1000 -#define NETDATA_CHART_PRIO_SYSTEM_UPTIME 1000 -#define NETDATA_CHART_PRIO_CLOCK_SYNC_STATE 1100 -#define NETDATA_CHART_PRIO_CLOCK_SYNC_OFFSET 1110 -#define NETDATA_CHART_PRIO_SYSTEM_IPC_MSQ_QUEUES 1200 // freebsd only -#define NETDATA_CHART_PRIO_SYSTEM_IPC_MSQ_MESSAGES 1201 -#define NETDATA_CHART_PRIO_SYSTEM_IPC_MSQ_SIZE 1202 -#define NETDATA_CHART_PRIO_SYSTEM_IPC_SEMAPHORES 1203 -#define NETDATA_CHART_PRIO_SYSTEM_IPC_SEM_ARRAYS 1204 -#define NETDATA_CHART_PRIO_SYSTEM_IPC_SHARED_MEM_SEGS 1205 -#define NETDATA_CHART_PRIO_SYSTEM_IPC_SHARED_MEM_SIZE 1206 -#define NETDATA_CHART_PRIO_SYSTEM_PACKETS 7001 // freebsd only +#define NETDATA_CHART_PRIO_SYSTEM_CPU 100 +#define NETDATA_CHART_PRIO_SYSTEM_LOAD 100 +#define NETDATA_CHART_PRIO_SYSTEM_IO 150 +#define NETDATA_CHART_PRIO_SYSTEM_PGPGIO 151 +#define NETDATA_CHART_PRIO_SYSTEM_RAM 200 +#define NETDATA_CHART_PRIO_SYSTEM_SWAP 201 +#define NETDATA_CHART_PRIO_SYSTEM_SWAPIO 250 +#define NETDATA_CHART_PRIO_SYSTEM_NET 500 +#define NETDATA_CHART_PRIO_SYSTEM_IPV4 500 // freebsd only +#define NETDATA_CHART_PRIO_SYSTEM_IP 501 +#define NETDATA_CHART_PRIO_SYSTEM_IPV6 502 +#define NETDATA_CHART_PRIO_SYSTEM_PROCESSES 600 +#define NETDATA_CHART_PRIO_SYSTEM_FORKS 700 +#define NETDATA_CHART_PRIO_SYSTEM_ACTIVE_PROCESSES 750 +#define NETDATA_CHART_PRIO_SYSTEM_CTXT 800 +#define NETDATA_CHART_PRIO_SYSTEM_IDLEJITTER 800 +#define NETDATA_CHART_PRIO_SYSTEM_INTR 900 +#define NETDATA_CHART_PRIO_SYSTEM_SOFTIRQS 950 +#define NETDATA_CHART_PRIO_SYSTEM_SOFTNET_STAT 955 +#define NETDATA_CHART_PRIO_SYSTEM_INTERRUPTS 1000 +#define NETDATA_CHART_PRIO_SYSTEM_DEV_INTR 1000 // freebsd only +#define NETDATA_CHART_PRIO_SYSTEM_SOFT_INTR 1100 // freebsd only +#define NETDATA_CHART_PRIO_SYSTEM_ENTROPY 1000 +#define NETDATA_CHART_PRIO_SYSTEM_UPTIME 1000 +#define NETDATA_CHART_PRIO_CLOCK_SYNC_STATE 1100 +#define NETDATA_CHART_PRIO_CLOCK_SYNC_OFFSET 1110 +#define NETDATA_CHART_PRIO_SYSTEM_IPC_MSQ_QUEUES 1200 // freebsd only +#define NETDATA_CHART_PRIO_SYSTEM_IPC_MSQ_MESSAGES 1201 +#define NETDATA_CHART_PRIO_SYSTEM_IPC_MSQ_SIZE 1202 +#define NETDATA_CHART_PRIO_SYSTEM_IPC_SEMAPHORES 1203 +#define NETDATA_CHART_PRIO_SYSTEM_IPC_SEM_ARRAYS 1204 +#define NETDATA_CHART_PRIO_SYSTEM_IPC_SHARED_MEM_SEGS 1205 +#define NETDATA_CHART_PRIO_SYSTEM_IPC_SHARED_MEM_SIZE 1206 +#define NETDATA_CHART_PRIO_SYSTEM_IPC_SHARED_MEM_CALLS 1207 +#define NETDATA_CHART_PRIO_SYSTEM_PACKETS 7001 // freebsd only // CPU per core @@ -116,6 +117,7 @@ #define NETDATA_CHART_PRIO_DISK_SVCTM 2070 #define NETDATA_CHART_PRIO_DISK_MOPS 2080 #define NETDATA_CHART_PRIO_DISK_IOTIME 2090 +#define NETDATA_CHART_PRIO_DISK_LATENCY 2095 #define NETDATA_CHART_PRIO_BCACHE_CACHE_ALLOC 2120 #define NETDATA_CHART_PRIO_BCACHE_HIT_RATIO 2120 #define NETDATA_CHART_PRIO_BCACHE_RATES 2121 @@ -128,36 +130,68 @@ #define NETDATA_CHART_PRIO_DISKSPACE_SPACE 2023 #define NETDATA_CHART_PRIO_DISKSPACE_INODES 2024 +// MDSTAT + +#define NETDATA_CHART_PRIO_MDSTAT_HEALTH 2100 +#define NETDATA_CHART_PRIO_MDSTAT_FLUSH 2101 +#define NETDATA_CHART_PRIO_MDSTAT_NONREDUNDANT 2105 +#define NETDATA_CHART_PRIO_MDSTAT_DISKS 2106 // 5 charts per raid +#define NETDATA_CHART_PRIO_MDSTAT_MISMATCH 2107 +#define NETDATA_CHART_PRIO_MDSTAT_OPERATION 2108 +#define NETDATA_CHART_PRIO_MDSTAT_FINISH 2109 +#define NETDATA_CHART_PRIO_MDSTAT_SPEED 2110 + +// Filesystem +#define NETDATA_CHART_PRIO_FILESYSTEM_VFS_CLEAN 2150 +#define NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_COUNT 2151 +#define NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_BYTES 2152 +#define NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_EBYTES 2153 +#define NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_FSYNC 2154 +#define NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_EFSYNC 2155 +#define NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_OPEN 2156 +#define NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_EOPEN 2157 +#define NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_CREATE 2158 +#define NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_ECREATE 2159 + +#define NETDATA_CHART_PRIO_EBPF_FILESYSTEM_CHARTS 2160 + +// Mount Points +#define NETDATA_CHART_PRIO_EBPF_MOUNT_CHARTS 2190 + +// File descriptor +#define NETDATA_CHART_PRIO_EBPF_FD_CHARTS 2195 + + // NFS (server) -#define NETDATA_CHART_PRIO_NFSD_READCACHE 2100 -#define NETDATA_CHART_PRIO_NFSD_FILEHANDLES 2101 -#define NETDATA_CHART_PRIO_NFSD_IO 2102 -#define NETDATA_CHART_PRIO_NFSD_THREADS 2103 -#define NETDATA_CHART_PRIO_NFSD_THREADS_FULLCNT 2104 -#define NETDATA_CHART_PRIO_NFSD_THREADS_HISTOGRAM 2105 -#define NETDATA_CHART_PRIO_NFSD_READAHEAD 2105 -#define NETDATA_CHART_PRIO_NFSD_NET 2107 -#define NETDATA_CHART_PRIO_NFSD_RPC 2108 -#define NETDATA_CHART_PRIO_NFSD_PROC2 2109 -#define NETDATA_CHART_PRIO_NFSD_PROC3 2110 -#define NETDATA_CHART_PRIO_NFSD_PROC4 2111 -#define NETDATA_CHART_PRIO_NFSD_PROC4OPS 2112 +#define NETDATA_CHART_PRIO_NFSD_READCACHE 2200 +#define NETDATA_CHART_PRIO_NFSD_FILEHANDLES 2201 +#define NETDATA_CHART_PRIO_NFSD_IO 2202 +#define NETDATA_CHART_PRIO_NFSD_THREADS 2203 +#define NETDATA_CHART_PRIO_NFSD_THREADS_FULLCNT 2204 +#define NETDATA_CHART_PRIO_NFSD_THREADS_HISTOGRAM 2205 +#define NETDATA_CHART_PRIO_NFSD_READAHEAD 2205 +#define NETDATA_CHART_PRIO_NFSD_NET 2207 +#define NETDATA_CHART_PRIO_NFSD_RPC 2208 +#define NETDATA_CHART_PRIO_NFSD_PROC2 2209 +#define NETDATA_CHART_PRIO_NFSD_PROC3 2210 +#define NETDATA_CHART_PRIO_NFSD_PROC4 2211 +#define NETDATA_CHART_PRIO_NFSD_PROC4OPS 2212 // NFS (client) -#define NETDATA_CHART_PRIO_NFS_NET 2207 -#define NETDATA_CHART_PRIO_NFS_RPC 2208 -#define NETDATA_CHART_PRIO_NFS_PROC2 2209 -#define NETDATA_CHART_PRIO_NFS_PROC3 2210 -#define NETDATA_CHART_PRIO_NFS_PROC4 2211 +#define NETDATA_CHART_PRIO_NFS_NET 2307 +#define NETDATA_CHART_PRIO_NFS_RPC 2308 +#define NETDATA_CHART_PRIO_NFS_PROC2 2309 +#define NETDATA_CHART_PRIO_NFS_PROC3 2310 +#define NETDATA_CHART_PRIO_NFS_PROC4 2311 // BTRFS -#define NETDATA_CHART_PRIO_BTRFS_DISK 2300 -#define NETDATA_CHART_PRIO_BTRFS_DATA 2301 -#define NETDATA_CHART_PRIO_BTRFS_METADATA 2302 -#define NETDATA_CHART_PRIO_BTRFS_SYSTEM 2303 +#define NETDATA_CHART_PRIO_BTRFS_DISK 2400 +#define NETDATA_CHART_PRIO_BTRFS_DATA 2401 +#define NETDATA_CHART_PRIO_BTRFS_METADATA 2402 +#define NETDATA_CHART_PRIO_BTRFS_SYSTEM 2403 // ZFS @@ -182,6 +216,9 @@ #define NETDATA_CHART_PRIO_ZFS_POOL_STATE 2820 +// HARDIRQS + +#define NETDATA_CHART_PRIO_HARDIRQ_LATENCY 2900 // SOFTIRQs @@ -308,16 +345,6 @@ #define NETDATA_CHART_PRIO_SYNPROXY_CONN_OPEN 8753 #define NETDATA_CHART_PRIO_SYNPROXY_ENTRIES 8754 -// MDSTAT - -#define NETDATA_CHART_PRIO_MDSTAT_HEALTH 9000 -#define NETDATA_CHART_PRIO_MDSTAT_NONREDUNDANT 9001 -#define NETDATA_CHART_PRIO_MDSTAT_DISKS 9002 // 5 charts per raid -#define NETDATA_CHART_PRIO_MDSTAT_MISMATCH 9003 -#define NETDATA_CHART_PRIO_MDSTAT_OPERATION 9004 -#define NETDATA_CHART_PRIO_MDSTAT_FINISH 9005 -#define NETDATA_CHART_PRIO_MDSTAT_SPEED 9006 - // Linux Power Supply #define NETDATA_CHART_PRIO_POWER_SUPPLY_CAPACITY 9500 // 4 charts per power supply diff --git a/collectors/apps.plugin/apps_groups.conf b/collectors/apps.plugin/apps_groups.conf index cffd26c95..a36cae50b 100644 --- a/collectors/apps.plugin/apps_groups.conf +++ b/collectors/apps.plugin/apps_groups.conf @@ -25,6 +25,9 @@ # To add process names with spaces, enclose them in quotes (single or double) # example: 'Plex Media Serv' "my other process". # +# Note that spaces are not supported for process groups. Use a dash "-" instead. +# example-process-group: process1 process2 +# # Wildcard support: # You can add an asterisk (*) at the beginning and/or the end of a process: # @@ -176,6 +179,12 @@ kube-proxy: kube-proxy metrics-server: metrics-server heapster: heapster +# ----------------------------------------------------------------------------- +# AWS + +aws-s3: '*aws s3*' +aws: aws + # ----------------------------------------------------------------------------- # containers & virtual machines @@ -298,6 +307,8 @@ kernel: fsnotify_mark kthrotld deferwq scsi_* # ----------------------------------------------------------------------------- # other application servers +consul: consul + kafka: *kafka.Kafka* rabbitmq: *rabbitmq* @@ -314,3 +325,16 @@ p4: p4* git-services: gitea gitlab-runner freeswitch: freeswitch* + +# -------- web3 / blockchains ---------- + +go-ethereum: geth* +nethermind-ethereum: nethermind* +besu-ethereum: besu* +openEthereum: openethereum* +urbit: urbit* +bitcoin-node: *bitcoind* lnd* +filecoin: lotus* lotus-miner* lotus-worker* +solana: solana* +web3: *hardhat* *ganache* *truffle* *brownie* *waffle* +terra: terra* mantle* diff --git a/collectors/apps.plugin/apps_plugin.c b/collectors/apps.plugin/apps_plugin.c index 4d4626e6b..3bed4bb6b 100644 --- a/collectors/apps.plugin/apps_plugin.c +++ b/collectors/apps.plugin/apps_plugin.c @@ -6,7 +6,7 @@ * Released under GPL v3+ */ -#include "../../libnetdata/libnetdata.h" +#include "libnetdata/libnetdata.h" // ---------------------------------------------------------------------------- @@ -128,6 +128,7 @@ static int enable_file_charts = 1, max_fds_cache_seconds = 60, #endif + enable_detailed_uptime_charts = 0, enable_users_charts = 1, enable_groups_charts = 1, include_exited_childs = 1; @@ -3354,7 +3355,7 @@ static void normalize_utilization(struct target *root) { cgtime_fix_ratio = 1.0; //(double)(global_utime + global_stime) / (double)(utime + cutime + stime + cstime); } else if((global_utime + global_stime > utime + stime) && (cutime || cstime)) { - // childrens resources are too high + // children resources are too high // lower only the children resources utime_fix_ratio = stime_fix_ratio = @@ -3519,26 +3520,28 @@ static void send_collected_data_to_netdata(struct target *root, const char *type } send_END(); - send_BEGIN(type, "uptime_min", dt); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed && w->processes)) - send_SET(w->name, w->uptime_min); - } - send_END(); + if (enable_detailed_uptime_charts) { + send_BEGIN(type, "uptime_min", dt); + for (w = root; w ; w = w->next) { + if(unlikely(w->exposed && w->processes)) + send_SET(w->name, w->uptime_min); + } + send_END(); - send_BEGIN(type, "uptime_avg", dt); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed && w->processes)) - send_SET(w->name, w->uptime_sum / w->processes); - } - send_END(); + send_BEGIN(type, "uptime_avg", dt); + for (w = root; w ; w = w->next) { + if(unlikely(w->exposed && w->processes)) + send_SET(w->name, w->uptime_sum / w->processes); + } + send_END(); - send_BEGIN(type, "uptime_max", dt); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed && w->processes)) - send_SET(w->name, w->uptime_max); + send_BEGIN(type, "uptime_max", dt); + for (w = root; w ; w = w->next) { + if(unlikely(w->exposed && w->processes)) + send_SET(w->name, w->uptime_max); + } + send_END(); } - send_END(); #endif send_BEGIN(type, "mem", dt); @@ -3710,22 +3713,24 @@ static void send_charts_updates_to_netdata(struct target *root, const char *type fprintf(stdout, "DIMENSION %s '' absolute 1 1\n", w->name); } - fprintf(stdout, "CHART %s.uptime_min '' '%s Minimum Uptime' 'seconds' processes %s.uptime_min line 20009 %d\n", type, title, type, update_every); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed)) - fprintf(stdout, "DIMENSION %s '' absolute 1 1\n", w->name); - } + if (enable_detailed_uptime_charts) { + fprintf(stdout, "CHART %s.uptime_min '' '%s Minimum Uptime' 'seconds' processes %s.uptime_min line 20009 %d\n", type, title, type, update_every); + for (w = root; w ; w = w->next) { + if(unlikely(w->exposed)) + fprintf(stdout, "DIMENSION %s '' absolute 1 1\n", w->name); + } - fprintf(stdout, "CHART %s.uptime_avg '' '%s Average Uptime' 'seconds' processes %s.uptime_avg line 20010 %d\n", type, title, type, update_every); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed)) - fprintf(stdout, "DIMENSION %s '' absolute 1 1\n", w->name); - } + fprintf(stdout, "CHART %s.uptime_avg '' '%s Average Uptime' 'seconds' processes %s.uptime_avg line 20010 %d\n", type, title, type, update_every); + for (w = root; w ; w = w->next) { + if(unlikely(w->exposed)) + fprintf(stdout, "DIMENSION %s '' absolute 1 1\n", w->name); + } - fprintf(stdout, "CHART %s.uptime_max '' '%s Maximum Uptime' 'seconds' processes %s.uptime_max line 20011 %d\n", type, title, type, update_every); - for (w = root; w ; w = w->next) { - if(unlikely(w->exposed)) - fprintf(stdout, "DIMENSION %s '' absolute 1 1\n", w->name); + fprintf(stdout, "CHART %s.uptime_max '' '%s Maximum Uptime' 'seconds' processes %s.uptime_max line 20011 %d\n", type, title, type, update_every); + for (w = root; w ; w = w->next) { + if(unlikely(w->exposed)) + fprintf(stdout, "DIMENSION %s '' absolute 1 1\n", w->name); + } } #endif @@ -3939,6 +3944,11 @@ static void parse_args(int argc, char **argv) continue; } + if(strcmp("with-detailed-uptime", argv[i]) == 0) { + enable_detailed_uptime_charts = 1; + continue; + } + if(strcmp("-h", argv[i]) == 0 || strcmp("--help", argv[i]) == 0) { fprintf(stderr, "\n" @@ -3951,34 +3961,36 @@ static void parse_args(int argc, char **argv) "\n" " Available command line options:\n" "\n" - " SECONDS set the data collection frequency\n" + " SECONDS set the data collection frequency\n" "\n" - " debug enable debugging (lot of output)\n" + " debug enable debugging (lot of output)\n" "\n" " with-childs\n" - " without-childs enable / disable aggregating exited\n" - " children resources into parents\n" - " (default is enabled)\n" + " without-childs enable / disable aggregating exited\n" + " children resources into parents\n" + " (default is enabled)\n" "\n" " with-guest\n" - " without-guest enable / disable reporting guest charts\n" - " (default is disabled)\n" + " without-guest enable / disable reporting guest charts\n" + " (default is disabled)\n" "\n" " with-files\n" - " without-files enable / disable reporting files, sockets, pipes\n" - " (default is enabled)\n" + " without-files enable / disable reporting files, sockets, pipes\n" + " (default is enabled)\n" + "\n" + " without-users disable reporting per user charts\n" "\n" - " without-users disable reporting per user charts\n" + " without-groups disable reporting per user group charts\n" "\n" - " without-groups disable reporting per user group charts\n" + " with-detailed-uptime enable reporting min/avg/max uptime charts\n" "\n" #ifndef __FreeBSD__ - " fds-cache-secs N cache the files of processed for N seconds\n" - " caching is adaptive per file (when a file\n" - " is found, it starts at 0 and while the file\n" - " remains open, it is incremented up to the\n" - " max given)\n" - " (default is %d seconds)\n" + " fds-cache-secs N cache the files of processed for N seconds\n" + " caching is adaptive per file (when a file\n" + " is found, it starts at 0 and while the file\n" + " remains open, it is incremented up to the\n" + " max given)\n" + " (default is %d seconds)\n" "\n" #endif " version or -v or -V print program version and exit\n" diff --git a/collectors/cgroups.plugin/cgroup-name.sh.in b/collectors/cgroups.plugin/cgroup-name.sh.in index 8ef8ab58e..1f31c49a7 100755 --- a/collectors/cgroups.plugin/cgroup-name.sh.in +++ b/collectors/cgroups.plugin/cgroup-name.sh.in @@ -118,24 +118,37 @@ function add_lbl_prefix() { # pod level cgroup name format: 'pod__' # container level cgroup name format: 'cntr___' function k8s_get_kubepod_name() { - # GKE /sys/fs/cgroup/*/ tree: + # GKE /sys/fs/cgroup/*/ (cri=docker, cgroups=v1): # |-- kubepods # | |-- burstable # | | |-- pod98cee708-023b-11eb-933d-42010a800193 # | | | |-- 922161c98e6ea450bf665226cdc64ca2aa3e889934c2cff0aec4325f8f78ac03 - # | | `-- a5d223eec35e00f5a1c6fa3e3a5faac6148cdc1f03a2e762e873b7efede012d7 # | `-- pode314bbac-d577-11ea-a171-42010a80013b # | |-- 7d505356b04507de7b710016d540b2759483ed5f9136bb01a80872b08f771930 - # | `-- 88ab4683b99cfa7cc8c5f503adf7987dd93a3faa7c4ce0d17d419962b3220d50 # - # Minikube (v1.8.2) /sys/fs/cgroup/*/ tree: + # GKE /sys/fs/cgroup/*/ (cri=containerd, cgroups=v1): + # |-- kubepods.slice + # | |-- kubepods-besteffort.slice + # | | |-- kubepods-besteffort-pode1465238_4518_4c21_832f_fd9f87033dad.slice + # | | | |-- cri-containerd-66be9b2efdf4d85288c319b8c1a2f50d2439b5617e36f45d9d0d0be1381113be.scope + # | `-- kubepods-pod91f5b561_369f_4103_8015_66391059996a.slice + # | |-- cri-containerd-24c53b774a586f06abc058619b47f71d9d869ac50c92898adbd199106fd0aaeb.scope + # + # GKE /sys/fs/cgroup/*/ (cri=crio, cgroups=v1): + # |-- kubepods.slice + # | |-- kubepods-besteffort.slice + # | | |-- kubepods-besteffort-podad412dfe_3589_4056_965a_592356172968.slice + # | | | |-- crio-77b019312fd9825828b70214b2c94da69c30621af2a7ee06f8beace4bc9439e5.scope + # + # Minikube (v1.8.2) /sys/fs/cgroup/*/ (cri=docker, cgroups=v1): # |-- kubepods.slice # | |-- kubepods-besteffort.slice # | | |-- kubepods-besteffort-pod10fb5647_c724_400c_b9cc_0e6eae3110e7.slice # | | | |-- docker-36e5eb5056dfdf6dbb75c0c44a1ecf23217fe2c50d606209d8130fcbb19fb5a7.scope - # | | | `-- docker-87e18c2323621cf0f635c53c798b926e33e9665c348c60d489eef31ee1bd38d7.scope # - # NOTE: cgroups plugin uses '_' to join dir names, so it is ___... + # NOTE: cgroups plugin + # - uses '_' to join dir names (so it is ___...) + # - replaces '.' with '-' local fn="${FUNCNAME[0]}" local id="${1}" @@ -157,9 +170,9 @@ function k8s_get_kubepod_name() { # kubepods_kubepods- name=${clean_id//-/_} name=${name/#kubepods_kubepods/kubepods} - elif [[ $clean_id =~ .+pod[a-f0-9_-]+_docker-([a-f0-9]+)$ ]]; then - # ...pod_docker- (POD_UID w/ "_") - cntr_id=${BASH_REMATCH[1]} + elif [[ $clean_id =~ .+pod[a-f0-9_-]+_(docker|crio|cri-containerd)-([a-f0-9]+)$ ]]; then + # ...pod_(docker|crio|cri-containerd)- (POD_UID w/ "_") + cntr_id=${BASH_REMATCH[2]} elif [[ $clean_id =~ .+pod[a-f0-9-]+_([a-f0-9]+)$ ]]; then # ...pod_ cntr_id=${BASH_REMATCH[1]} @@ -252,7 +265,7 @@ function k8s_get_kubepod_name() { jq_filter+='container_name=\"\(.name)\",' jq_filter+='container_id=\"\(.containerID)\"' jq_filter+='") | ' - jq_filter+='sub("docker://";"")' # containerID: docker://a346da9bc0e3eaba6b295f64ac16e02f2190db2cef570835706a9e7a36e2c722 + jq_filter+='sub("(docker|cri-o|containerd)://";"")' # containerID: docker://a346da9bc0e3eaba6b295f64ac16e02f2190db2cef570835706a9e7a36e2c722 local containers if ! containers=$(jq -r "${jq_filter}" <<< "$pods" 2>&1); then diff --git a/collectors/cgroups.plugin/cgroup-network-helper.sh b/collectors/cgroups.plugin/cgroup-network-helper.sh index 1b60f452a..f355480b8 100755 --- a/collectors/cgroups.plugin/cgroup-network-helper.sh +++ b/collectors/cgroups.plugin/cgroup-network-helper.sh @@ -76,7 +76,7 @@ debug() { pid= cgroup= -while [ ! -z "${1}" ] +while [ -n "${1}" ] do case "${1}" in --cgroup) cgroup="${2}"; shift 1;; @@ -164,7 +164,7 @@ virsh_find_all_interfaces_for_cgroup() { # shellcheck disable=SC2230 virsh="$(which virsh 2>/dev/null || command -v virsh 2>/dev/null)" - if [ ! -z "${virsh}" ] + if [ -n "${virsh}" ] then local d d="$(virsh_cgroup_to_domain_name "${c}")" @@ -172,7 +172,7 @@ virsh_find_all_interfaces_for_cgroup() { # e.g.: vm01\x2dweb => vm01-web (https://github.com/netdata/netdata/issues/11088#issuecomment-832618149) d="$(printf '%b' "${d}")" - if [ ! -z "${d}" ] + if [ -n "${d}" ] then debug "running: virsh domiflist ${d}; to find the network interfaces" @@ -203,8 +203,11 @@ netnsid_find_all_interfaces_for_pid() { local pid="${1}" [ -z "${pid}" ] && return 1 - local nsid=$(lsns -t net -p ${pid} -o NETNSID -nr) - [ -z "${nsid}" -o "${nsid}" = "unassigned" ] && return 1 + local nsid + nsid=$(lsns -t net -p "${pid}" -o NETNSID -nr 2>/dev/null) + if [ -z "${nsid}" ] || [ "${nsid}" = "unassigned" ]; then + return 1 + fi set_source "netnsid" ip link show |\ @@ -234,14 +237,14 @@ netnsid_find_all_interfaces_for_cgroup() { find_all_interfaces_of_pid_or_cgroup() { local p="${1}" c="${2}" # the pid and the cgroup path - if [ ! -z "${pid}" ] + if [ -n "${pid}" ] then # we have been called with a pid proc_pid_fdinfo_iff "${p}" netnsid_find_all_interfaces_for_pid "${p}" - elif [ ! -z "${c}" ] + elif [ -n "${c}" ] then # we have been called with a cgroup diff --git a/collectors/cgroups.plugin/sys_fs_cgroup.c b/collectors/cgroups.plugin/sys_fs_cgroup.c index eea4d9ae7..92aa22c77 100644 --- a/collectors/cgroups.plugin/sys_fs_cgroup.c +++ b/collectors/cgroups.plugin/sys_fs_cgroup.c @@ -94,6 +94,11 @@ static struct cgroups_systemd_config_setting cgroups_systemd_options[] = { { .name = NULL, .setting = SYSTEMD_CGROUP_ERR }, }; +// Shared memory with information from detected cgroups +netdata_ebpf_cgroup_shm_t shm_cgroup_ebpf = {NULL, NULL}; +static int shm_fd_cgroup_ebpf = -1; +sem_t *shm_mutex_cgroup_ebpf = SEM_FAILED; + /* on Fed systemd is not in PATH for some reason */ #define SYSTEMD_CMD_RHEL "/usr/lib/systemd/systemd --version" #define SYSTEMD_HIERARCHY_STRING "default-hierarchy=" @@ -168,8 +173,6 @@ static enum cgroups_type cgroups_try_detect_version() if (!statfs(filename, &fsinfo)) { if (fsinfo.f_type == CGROUP2_SUPER_MAGIC) return CGROUPS_V2; - if (fsinfo.f_type == CGROUP_SUPER_MAGIC) - return CGROUPS_V1; } #endif @@ -463,6 +466,61 @@ void read_cgroup_plugin_configuration() { mountinfo_free_all(root); } +void netdata_cgroup_ebpf_set_values(size_t length) +{ + sem_wait(shm_mutex_cgroup_ebpf); + + shm_cgroup_ebpf.header->cgroup_max = cgroup_root_max; + shm_cgroup_ebpf.header->systemd_enabled = cgroup_enable_systemd_services | + cgroup_enable_systemd_services_detailed_memory | + cgroup_used_memory; + shm_cgroup_ebpf.header->body_length = length; + + sem_post(shm_mutex_cgroup_ebpf); +} + +void netdata_cgroup_ebpf_initialize_shm() +{ + shm_fd_cgroup_ebpf = shm_open(NETDATA_SHARED_MEMORY_EBPF_CGROUP_NAME, O_CREAT | O_RDWR, 0660); + if (shm_fd_cgroup_ebpf < 0) { + error("Cannot initialize shared memory used by cgroup and eBPF, integration won't happen."); + return; + } + + size_t length = sizeof(netdata_ebpf_cgroup_shm_header_t) + cgroup_root_max * sizeof(netdata_ebpf_cgroup_shm_body_t); + if (ftruncate(shm_fd_cgroup_ebpf, length)) { + error("Cannot set size for shared memory."); + goto end_init_shm; + } + + shm_cgroup_ebpf.header = (netdata_ebpf_cgroup_shm_header_t *) mmap(NULL, length, + PROT_READ | PROT_WRITE, MAP_SHARED, + shm_fd_cgroup_ebpf, 0); + + if (!shm_cgroup_ebpf.header) { + error("Cannot map shared memory used between cgroup and eBPF, integration won't happen"); + goto end_init_shm; + } + shm_cgroup_ebpf.body = (netdata_ebpf_cgroup_shm_body_t *) ((char *)shm_cgroup_ebpf.header + + sizeof(netdata_ebpf_cgroup_shm_header_t)); + + shm_mutex_cgroup_ebpf = sem_open(NETDATA_NAMED_SEMAPHORE_EBPF_CGROUP_NAME, O_CREAT, + S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH, 1); + + if (shm_mutex_cgroup_ebpf != SEM_FAILED) { + netdata_cgroup_ebpf_set_values(length); + return; + } + + error("Cannot create semaphore, integration between eBPF and cgroup won't happen"); + munmap(shm_cgroup_ebpf.header, length); + +end_init_shm: + close(shm_fd_cgroup_ebpf); + shm_fd_cgroup_ebpf = -1; + shm_unlink(NETDATA_SHARED_MEMORY_EBPF_CGROUP_NAME); +} + // ---------------------------------------------------------------------------- // cgroup objects @@ -597,10 +655,6 @@ struct cgroup_network_interface { struct cgroup_network_interface *next; }; -#define CGROUP_OPTIONS_DISABLED_DUPLICATE 0x00000001 -#define CGROUP_OPTIONS_SYSTEM_SLICE_SERVICE 0x00000002 -#define CGROUP_OPTIONS_IS_UNIFIED 0x00000004 - // *** WARNING *** The fields are not thread safe. Take care of safe usage. struct cgroup { uint32_t options; @@ -609,6 +663,7 @@ struct cgroup { char enabled; // enabled in the config char pending_renames; + char *intermediate_id; // TODO: remove it when the renaming script is fixed char *id; uint32_t hash; @@ -1313,13 +1368,16 @@ static inline char *cgroup_chart_id_strdupz(const char *s) { char *r = strdupz(s); netdata_fix_chart_id(r); + return r; +} + +// TODO: move the code to cgroup_chart_id_strdupz() when the renaming script is fixed +static inline void substitute_dots_in_id(char *s) { // dots are used to distinguish chart type and id in streaming, so we should replace them - for (char *d = r; *d; d++) { + for (char *d = s; *d; d++) { if (*d == '.') *d = '-'; } - - return r; } char *parse_k8s_data(struct label **labels, char *data) @@ -1357,7 +1415,8 @@ static inline void cgroup_get_chart_name(struct cgroup *cg) { pid_t cgroup_pid; char command[CGROUP_CHARTID_LINE_MAX + 1]; - snprintfz(command, CGROUP_CHARTID_LINE_MAX, "exec %s '%s'", cgroups_rename_script, cg->chart_id); + // TODO: use cg->id when the renaming script is fixed + snprintfz(command, CGROUP_CHARTID_LINE_MAX, "exec %s '%s'", cgroups_rename_script, cg->intermediate_id); debug(D_CGROUP, "executing command \"%s\" for cgroup '%s'", command, cg->chart_id); FILE *fp = mypopen(command, &cgroup_pid); @@ -1394,6 +1453,7 @@ static inline void cgroup_get_chart_name(struct cgroup *cg) { freez(cg->chart_id); cg->chart_id = cgroup_chart_id_strdupz(name); + substitute_dots_in_id(cg->chart_id); cg->hash_chart = simple_hash(cg->chart_id); } } @@ -1420,7 +1480,10 @@ static inline struct cgroup *cgroup_add(const char *id) { cg->chart_title = cgroup_title_strdupz(id); + cg->intermediate_id = cgroup_chart_id_strdupz(id); + cg->chart_id = cgroup_chart_id_strdupz(id); + substitute_dots_in_id(cg->chart_id); cg->hash_chart = simple_hash(cg->chart_id); if(cgroup_use_unified_cgroups) cg->options |= CGROUP_OPTIONS_IS_UNIFIED; @@ -1461,10 +1524,6 @@ static inline struct cgroup *cgroup_add(const char *id) { strncpy(buffer, cg->id, CGROUP_CHARTID_LINE_MAX); char *s = buffer; - //freez(cg->chart_id); - //cg->chart_id = cgroup_chart_id_strdupz(s); - //cg->hash_chart = simple_hash(cg->chart_id); - // skip to the last slash size_t len = strlen(s); while(len--) if(unlikely(s[len] == '/')) break; @@ -1588,6 +1647,7 @@ static inline void cgroup_free(struct cgroup *cg) { free_pressure(&cg->memory_pressure); freez(cg->id); + freez(cg->intermediate_id); freez(cg->chart_id); freez(cg->chart_title); @@ -2056,6 +2116,69 @@ static inline void copy_discovered_cgroups() cgroup_root = discovered_cgroup_root; } +static void is_there_cgroup_procs(netdata_ebpf_cgroup_shm_body_t *out, char *id) +{ + struct stat buf; + + snprintfz(out->path, FILENAME_MAX, "%s%s/cgroup.procs", cgroup_cpuset_base, id); + if (likely(stat(out->path, &buf) == 0)) { + return; + } + + snprintfz(out->path, FILENAME_MAX, "%s%s/cgroup.procs", cgroup_blkio_base, id); + if (likely(stat(out->path, &buf) == 0)) { + return; + } + + snprintfz(out->path, FILENAME_MAX, "%s%s/cgroup.procs", cgroup_memory_base, id); + if (likely(stat(out->path, &buf) == 0)) { + return; + } + + snprintfz(out->path, FILENAME_MAX, "%s%s/cgroup.procs", cgroup_devices_base, id); + if (likely(stat(out->path, &buf) == 0)) { + return; + } + + out->path[0] = '\0'; + out->enabled = 0; +} + +static inline void share_cgroups() +{ + struct cgroup *cg; + int count; + struct stat buf; + + if (shm_mutex_cgroup_ebpf == SEM_FAILED) { + return; + } + sem_wait(shm_mutex_cgroup_ebpf); + + for (cg = cgroup_root, count = 0; cg ; cg = cg->next, count++) { + netdata_ebpf_cgroup_shm_body_t *ptr = &shm_cgroup_ebpf.body[count]; + char *prefix = (cg->options & CGROUP_OPTIONS_SYSTEM_SLICE_SERVICE) ? "" : "cgroup_"; + snprintfz(ptr->name, CGROUP_EBPF_NAME_SHARED_LENGTH - 1, "%s%s", prefix, cg->chart_title); + ptr->hash = simple_hash(ptr->name); + ptr->options = cg->options; + ptr->enabled = cg->enabled; + if (cgroup_use_unified_cgroups) { + snprintfz(ptr->path, FILENAME_MAX, "%s%s/cgroup.procs", cgroup_unified_base, cg->id); + if (likely(stat(ptr->path, &buf) == -1)) { + ptr->path[0] = '\0'; + ptr->enabled = 0; + } + } else { + is_there_cgroup_procs(ptr, cg->id); + } + + debug(D_CGROUP, "cgroup shared: NAME=%s, ENABLED=%d", ptr->name, ptr->enabled); + } + + shm_cgroup_ebpf.header->cgroup_root_count = count; + sem_post(shm_mutex_cgroup_ebpf); +} + static inline void find_all_cgroups() { debug(D_CGROUP, "searching for cgroups"); @@ -2112,6 +2235,8 @@ static inline void find_all_cgroups() { copy_discovered_cgroups(); uv_mutex_unlock(&cgroup_root_mutex); + share_cgroups(); + debug(D_CGROUP, "done searching for cgroups"); } @@ -2743,7 +2868,7 @@ void update_systemd_services_charts( if(unlikely(!cg->rd_mem_detailed_rss)) cg->rd_mem_detailed_rss = rrddim_add(st_mem_detailed_rss, cg->chart_id, cg->chart_title, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - rrddim_set_by_pointer(st_mem_detailed_rss, cg->rd_mem_detailed_rss, cg->memory.total_rss + cg->memory.total_rss_huge); + rrddim_set_by_pointer(st_mem_detailed_rss, cg->rd_mem_detailed_rss, cg->memory.total_rss); if(unlikely(!cg->rd_mem_detailed_mapped)) cg->rd_mem_detailed_mapped = rrddim_add(st_mem_detailed_mapped, cg->chart_id, cg->chart_title, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); @@ -2792,7 +2917,15 @@ void update_systemd_services_charts( if(unlikely(!cg->rd_swap_usage)) cg->rd_swap_usage = rrddim_add(st_swap_usage, cg->chart_id, cg->chart_title, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE); - rrddim_set_by_pointer(st_swap_usage, cg->rd_swap_usage, cg->memory.msw_usage_in_bytes); + if(!(cg->options & CGROUP_OPTIONS_IS_UNIFIED)) { + rrddim_set_by_pointer( + st_swap_usage, + cg->rd_swap_usage, + cg->memory.msw_usage_in_bytes > (cg->memory.usage_in_bytes + cg->memory.total_inactive_file) ? + cg->memory.msw_usage_in_bytes - (cg->memory.usage_in_bytes + cg->memory.total_inactive_file) : 0); + } else { + rrddim_set_by_pointer(st_swap_usage, cg->rd_swap_usage, cg->memory.msw_usage_in_bytes); + } } if(likely(do_io && cg->io_service_bytes.updated)) { @@ -3482,8 +3615,8 @@ void update_cgroup_charts(int update_every) { rrddim_set( cg->st_mem_usage, "swap", - (cg->memory.msw_usage_in_bytes > cg->memory.usage_in_bytes) ? - cg->memory.msw_usage_in_bytes - cg->memory.usage_in_bytes : 0); + cg->memory.msw_usage_in_bytes > (cg->memory.usage_in_bytes + cg->memory.total_inactive_file) ? + cg->memory.msw_usage_in_bytes - (cg->memory.usage_in_bytes + cg->memory.total_inactive_file) : 0); } else { rrddim_set(cg->st_mem_usage, "swap", cg->memory.msw_usage_in_bytes); } @@ -4022,6 +4155,18 @@ static void cgroup_main_cleanup(void *ptr) { sleep_usec(step); } + if (shm_mutex_cgroup_ebpf != SEM_FAILED) { + sem_close(shm_mutex_cgroup_ebpf); + } + + if (shm_cgroup_ebpf.header) { + munmap(shm_cgroup_ebpf.header, shm_cgroup_ebpf.header->body_length); + } + + if (shm_fd_cgroup_ebpf > 0) { + close(shm_fd_cgroup_ebpf); + } + static_thread->enabled = NETDATA_MAIN_THREAD_EXITED; } @@ -4034,6 +4179,7 @@ void *cgroups_main(void *ptr) { int vdo_cpu_netdata = config_get_boolean("plugin:cgroups", "cgroups plugin resource charts", 1); read_cgroup_plugin_configuration(); + netdata_cgroup_ebpf_initialize_shm(); RRDSET *stcpu_thread = NULL; @@ -4057,7 +4203,7 @@ void *cgroups_main(void *ptr) { int error = uv_thread_create(&discovery_thread.thread, cgroup_discovery_worker, NULL); if (error) { - error("CGROUP: cannot create tread worker. uv_thread_create(): %s", uv_strerror(error)); + error("CGROUP: cannot create thread worker. uv_thread_create(): %s", uv_strerror(error)); goto exit; } uv_thread_set_name_np(discovery_thread.thread, "PLUGIN[cgroups]"); diff --git a/collectors/cgroups.plugin/sys_fs_cgroup.h b/collectors/cgroups.plugin/sys_fs_cgroup.h index 155330ff1..017aa8fb5 100644 --- a/collectors/cgroups.plugin/sys_fs_cgroup.h +++ b/collectors/cgroups.plugin/sys_fs_cgroup.h @@ -3,7 +3,7 @@ #ifndef NETDATA_SYS_FS_CGROUP_H #define NETDATA_SYS_FS_CGROUP_H 1 -#include "../../daemon/common.h" +#include "daemon/common.h" #if (TARGET_OS == OS_LINUX) @@ -20,6 +20,38 @@ extern void *cgroups_main(void *ptr); +#define CGROUP_OPTIONS_DISABLED_DUPLICATE 0x00000001 +#define CGROUP_OPTIONS_SYSTEM_SLICE_SERVICE 0x00000002 +#define CGROUP_OPTIONS_IS_UNIFIED 0x00000004 + +typedef struct netdata_ebpf_cgroup_shm_header { + int cgroup_root_count; + int cgroup_max; + int systemd_enabled; + int __pad; + size_t body_length; +} netdata_ebpf_cgroup_shm_header_t; + +#define CGROUP_EBPF_NAME_SHARED_LENGTH 256 + +typedef struct netdata_ebpf_cgroup_shm_body { + // Considering what is exposed in this link https://en.wikipedia.org/wiki/Comparison_of_file_systems#Limits + // this length is enough to store what we want. + char name[CGROUP_EBPF_NAME_SHARED_LENGTH]; + uint32_t hash; + uint32_t options; + int enabled; + char path[FILENAME_MAX + 1]; +} netdata_ebpf_cgroup_shm_body_t; + +typedef struct netdata_ebpf_cgroup_shm { + netdata_ebpf_cgroup_shm_header_t *header; + netdata_ebpf_cgroup_shm_body_t *body; +} netdata_ebpf_cgroup_shm_t; + +#define NETDATA_SHARED_MEMORY_EBPF_CGROUP_NAME "netdata_shm_cgroup_ebpf" +#define NETDATA_NAMED_SEMAPHORE_EBPF_CGROUP_NAME "/netdata_sem_cgroup_ebpf" + #include "../proc.plugin/plugin_proc.h" #else // (TARGET_OS == OS_LINUX) diff --git a/collectors/charts.d.plugin/ap/ap.chart.sh b/collectors/charts.d.plugin/ap/ap.chart.sh index 5dd787835..80c9dc602 100644 --- a/collectors/charts.d.plugin/ap/ap.chart.sh +++ b/collectors/charts.d.plugin/ap/ap.chart.sh @@ -61,25 +61,25 @@ ap_create() { # create the chart with 3 dimensions cat << EOF -CHART ap_clients.${dev} '' "Connected clients to ${ssid} on ${dev}" "clients" ${dev} ap.clients line $((ap_priority + 1)) $ap_update_every +CHART ap_clients.${dev} '' "Connected clients to ${ssid} on ${dev}" "clients" ${dev} ap.clients line $((ap_priority + 1)) $ap_update_every '' '' 'ap' DIMENSION clients '' absolute 1 1 -CHART ap_bandwidth.${dev} '' "Bandwidth for ${ssid} on ${dev}" "kilobits/s" ${dev} ap.net area $((ap_priority + 2)) $ap_update_every +CHART ap_bandwidth.${dev} '' "Bandwidth for ${ssid} on ${dev}" "kilobits/s" ${dev} ap.net area $((ap_priority + 2)) $ap_update_every '' '' 'ap' DIMENSION received '' incremental 8 1024 DIMENSION sent '' incremental -8 1024 -CHART ap_packets.${dev} '' "Packets for ${ssid} on ${dev}" "packets/s" ${dev} ap.packets line $((ap_priority + 3)) $ap_update_every +CHART ap_packets.${dev} '' "Packets for ${ssid} on ${dev}" "packets/s" ${dev} ap.packets line $((ap_priority + 3)) $ap_update_every '' '' 'ap' DIMENSION received '' incremental 1 1 DIMENSION sent '' incremental -1 1 -CHART ap_issues.${dev} '' "Transmit Issues for ${ssid} on ${dev}" "issues/s" ${dev} ap.issues line $((ap_priority + 4)) $ap_update_every +CHART ap_issues.${dev} '' "Transmit Issues for ${ssid} on ${dev}" "issues/s" ${dev} ap.issues line $((ap_priority + 4)) $ap_update_every '' '' 'ap' DIMENSION retries 'tx retries' incremental 1 1 DIMENSION failures 'tx failures' incremental -1 1 -CHART ap_signal.${dev} '' "Average Signal for ${ssid} on ${dev}" "dBm" ${dev} ap.signal line $((ap_priority + 5)) $ap_update_every +CHART ap_signal.${dev} '' "Average Signal for ${ssid} on ${dev}" "dBm" ${dev} ap.signal line $((ap_priority + 5)) $ap_update_every '' '' 'ap' DIMENSION signal 'average signal' absolute 1 1000 -CHART ap_bitrate.${dev} '' "Bitrate for ${ssid} on ${dev}" "Mbps" ${dev} ap.bitrate line $((ap_priority + 6)) $ap_update_every +CHART ap_bitrate.${dev} '' "Bitrate for ${ssid} on ${dev}" "Mbps" ${dev} ap.bitrate line $((ap_priority + 6)) $ap_update_every '' '' 'ap' DIMENSION receive '' absolute 1 1000 DIMENSION transmit '' absolute -1 1000 DIMENSION expected 'expected throughput' absolute 1 1000 @@ -92,7 +92,7 @@ EOF # _update is called continuously, to collect the values ap_update() { # the first argument to this function is the microseconds since last update - # pass this parameter to the BEGIN statement (see bellow). + # pass this parameter to the BEGIN statement (see below). # do all the work to collect / calculate the values # for each dimension diff --git a/collectors/charts.d.plugin/apcupsd/apcupsd.chart.sh b/collectors/charts.d.plugin/apcupsd/apcupsd.chart.sh index 014a9c1de..e78d99e7d 100644 --- a/collectors/charts.d.plugin/apcupsd/apcupsd.chart.sh +++ b/collectors/charts.d.plugin/apcupsd/apcupsd.chart.sh @@ -80,35 +80,35 @@ apcupsd_create() { # create the charts cat << EOF -CHART apcupsd_${host}.charge '' "UPS Charge for ${host} on ${src}" "percentage" ups apcupsd.charge area $((apcupsd_priority + 1)) $apcupsd_update_every +CHART apcupsd_${host}.charge '' "UPS Charge for ${host} on ${src}" "percentage" ups apcupsd.charge area $((apcupsd_priority + 1)) $apcupsd_update_every '' '' 'apcupsd' DIMENSION battery_charge charge absolute 1 100 -CHART apcupsd_${host}.battery_voltage '' "UPS Battery Voltage for ${host} on ${src}" "Volts" ups apcupsd.battery.voltage line $((apcupsd_priority + 3)) $apcupsd_update_every +CHART apcupsd_${host}.battery_voltage '' "UPS Battery Voltage for ${host} on ${src}" "Volts" ups apcupsd.battery.voltage line $((apcupsd_priority + 3)) $apcupsd_update_every '' '' 'apcupsd' DIMENSION battery_voltage voltage absolute 1 100 DIMENSION battery_voltage_nominal nominal absolute 1 100 -CHART apcupsd_${host}.input_voltage '' "UPS Input Voltage for ${host} on ${src}" "Volts" input apcupsd.input.voltage line $((apcupsd_priority + 4)) $apcupsd_update_every +CHART apcupsd_${host}.input_voltage '' "UPS Input Voltage for ${host} on ${src}" "Volts" input apcupsd.input.voltage line $((apcupsd_priority + 4)) $apcupsd_update_every '' '' 'apcupsd' DIMENSION input_voltage voltage absolute 1 100 DIMENSION input_voltage_min min absolute 1 100 DIMENSION input_voltage_max max absolute 1 100 -CHART apcupsd_${host}.input_frequency '' "UPS Input Frequency for ${host} on ${src}" "Hz" input apcupsd.input.frequency line $((apcupsd_priority + 5)) $apcupsd_update_every +CHART apcupsd_${host}.input_frequency '' "UPS Input Frequency for ${host} on ${src}" "Hz" input apcupsd.input.frequency line $((apcupsd_priority + 5)) $apcupsd_update_every '' '' 'apcupsd' DIMENSION input_frequency frequency absolute 1 100 -CHART apcupsd_${host}.output_voltage '' "UPS Output Voltage for ${host} on ${src}" "Volts" output apcupsd.output.voltage line $((apcupsd_priority + 6)) $apcupsd_update_every +CHART apcupsd_${host}.output_voltage '' "UPS Output Voltage for ${host} on ${src}" "Volts" output apcupsd.output.voltage line $((apcupsd_priority + 6)) $apcupsd_update_every '' '' 'apcupsd' DIMENSION output_voltage voltage absolute 1 100 DIMENSION output_voltage_nominal nominal absolute 1 100 -CHART apcupsd_${host}.load '' "UPS Load for ${host} on ${src}" "percentage" ups apcupsd.load area $((apcupsd_priority)) $apcupsd_update_every +CHART apcupsd_${host}.load '' "UPS Load for ${host} on ${src}" "percentage" ups apcupsd.load area $((apcupsd_priority)) $apcupsd_update_every '' '' 'apcupsd' DIMENSION load load absolute 1 100 -CHART apcupsd_${host}.temp '' "UPS Temperature for ${host} on ${src}" "Celsius" ups apcupsd.temperature line $((apcupsd_priority + 7)) $apcupsd_update_every +CHART apcupsd_${host}.temp '' "UPS Temperature for ${host} on ${src}" "Celsius" ups apcupsd.temperature line $((apcupsd_priority + 7)) $apcupsd_update_every '' '' 'apcupsd' DIMENSION temp temp absolute 1 100 -CHART apcupsd_${host}.time '' "UPS Time Remaining for ${host} on ${src}" "Minutes" ups apcupsd.time area $((apcupsd_priority + 2)) $apcupsd_update_every +CHART apcupsd_${host}.time '' "UPS Time Remaining for ${host} on ${src}" "Minutes" ups apcupsd.time area $((apcupsd_priority + 2)) $apcupsd_update_every '' '' 'apcupsd' DIMENSION time time absolute 1 100 -CHART apcupsd_${host}.online '' "UPS ONLINE flag for ${host} on ${src}" "boolean" ups apcupsd.online line $((apcupsd_priority + 8)) $apcupsd_update_every +CHART apcupsd_${host}.online '' "UPS ONLINE flag for ${host} on ${src}" "boolean" ups apcupsd.online line $((apcupsd_priority + 8)) $apcupsd_update_every '' '' 'apcupsd' DIMENSION online online absolute 0 1 EOF @@ -118,7 +118,7 @@ EOF apcupsd_update() { # the first argument to this function is the microseconds since last update - # pass this parameter to the BEGIN statement (see bellow). + # pass this parameter to the BEGIN statement (see below). # do all the work to collect / calculate the values # for each dimension @@ -154,7 +154,7 @@ BEGIN { /^LOADPCT.*/ { load = \$3 * 100 }; /^ITEMP.*/ { temp = \$3 * 100 }; /^TIMELEFT.*/ { time = \$3 * 100 }; -/^STATUS.*/ { online=(\$3 == \"ONLINE\" || \$3 == \"ONBATT\")?1:0 }; +/^STATUS.*/ { online=(\$3 != \"COMMLOST\" && !(\$3 == \"SHUTTING\" && \$4 == \"DOWN\"))?1:0 }; END { print \"BEGIN apcupsd_${host}.online $1\"; print \"SET online = \" online; diff --git a/collectors/charts.d.plugin/charts.d.conf b/collectors/charts.d.plugin/charts.d.conf index 0872d39e6..2d32f73ea 100644 --- a/collectors/charts.d.plugin/charts.d.conf +++ b/collectors/charts.d.plugin/charts.d.conf @@ -45,3 +45,4 @@ # Nothing useful. # Just an example charts.d plugin you can use as a template. # example=force +# sensors=force diff --git a/collectors/charts.d.plugin/charts.d.plugin.in b/collectors/charts.d.plugin/charts.d.plugin.in index 1b5c3f337..9187fc25d 100755 --- a/collectors/charts.d.plugin/charts.d.plugin.in +++ b/collectors/charts.d.plugin/charts.d.plugin.in @@ -634,7 +634,7 @@ global_update() { charts_run_counter[$chart]=0 charts_serial_failures[$chart]=0 - echo "CHART netdata.plugin_chartsd_$chart '' 'Execution time for $chart plugin' 'milliseconds / run' charts.d netdata.plugin_charts area 145000 ${charts_update_every[$chart]}" + echo "CHART netdata.plugin_chartsd_$chart '' 'Execution time for $chart plugin' 'milliseconds / run' charts.d netdata.plugin_charts area 145000 ${charts_update_every[$chart]} '' '' '$chart'" echo "DIMENSION run_time 'run time' absolute 1 1" done diff --git a/collectors/charts.d.plugin/example/example.chart.sh b/collectors/charts.d.plugin/example/example.chart.sh index 5ff51a579..6bbbcf1d7 100644 --- a/collectors/charts.d.plugin/example/example.chart.sh +++ b/collectors/charts.d.plugin/example/example.chart.sh @@ -89,11 +89,11 @@ example_check() { example_create() { # create the chart with 3 dimensions cat << EOF -CHART example.random '' "Random Numbers Stacked Chart" "% of random numbers" random random stacked $((example_priority)) $example_update_every +CHART example.random '' "Random Numbers Stacked Chart" "% of random numbers" random random stacked $((example_priority)) $example_update_every '' '' 'example' DIMENSION random1 '' percentage-of-absolute-row 1 1 DIMENSION random2 '' percentage-of-absolute-row 1 1 DIMENSION random3 '' percentage-of-absolute-row 1 1 -CHART example.random2 '' "A random number" "random number" random random area $((example_priority + 1)) $example_update_every +CHART example.random2 '' "A random number" "random number" random random area $((example_priority + 1)) $example_update_every '' '' 'example' DIMENSION random '' absolute 1 1 EOF @@ -103,7 +103,7 @@ EOF # _update is called continuously, to collect the values example_update() { # the first argument to this function is the microseconds since last update - # pass this parameter to the BEGIN statement (see bellow). + # pass this parameter to the BEGIN statement (see below). example_get || return 1 diff --git a/collectors/charts.d.plugin/libreswan/libreswan.chart.sh b/collectors/charts.d.plugin/libreswan/libreswan.chart.sh index a3a56b26d..d526f7a91 100644 --- a/collectors/charts.d.plugin/libreswan/libreswan.chart.sh +++ b/collectors/charts.d.plugin/libreswan/libreswan.chart.sh @@ -124,10 +124,10 @@ libreswan_create_one() { libreswan_tunnel_charts[${name}]="$(fixid "${name}")" cat << EOF -CHART libreswan.${libreswan_tunnel_charts[${name}]}_net '${name}_net' "LibreSWAN Tunnel ${name} Traffic" "kilobits/s" "${name}" libreswan.net area $((libreswan_priority)) $libreswan_update_every +CHART libreswan.${libreswan_tunnel_charts[${name}]}_net '${name}_net' "LibreSWAN Tunnel ${name} Traffic" "kilobits/s" "${name}" libreswan.net area $((libreswan_priority)) $libreswan_update_every '' '' 'libreswan' DIMENSION in '' incremental 8 1000 DIMENSION out '' incremental -8 1000 -CHART libreswan.${libreswan_tunnel_charts[${name}]}_uptime '${name}_uptime' "LibreSWAN Tunnel ${name} Uptime" "seconds" "${name}" libreswan.uptime line $((libreswan_priority + 1)) $libreswan_update_every +CHART libreswan.${libreswan_tunnel_charts[${name}]}_uptime '${name}_uptime' "LibreSWAN Tunnel ${name} Uptime" "seconds" "${name}" libreswan.uptime line $((libreswan_priority + 1)) $libreswan_update_every '' '' 'libreswan' DIMENSION uptime '' absolute 1 1 EOF @@ -173,7 +173,7 @@ VALUESEOF # _update is called continuously, to collect the values libreswan_update() { # the first argument to this function is the microseconds since last update - # pass this parameter to the BEGIN statement (see bellow). + # pass this parameter to the BEGIN statement (see below). libreswan_get || return 1 libreswan_now=$(date +%s) diff --git a/collectors/charts.d.plugin/nut/nut.chart.sh b/collectors/charts.d.plugin/nut/nut.chart.sh index 60233361e..2f7e3f336 100644 --- a/collectors/charts.d.plugin/nut/nut.chart.sh +++ b/collectors/charts.d.plugin/nut/nut.chart.sh @@ -129,7 +129,7 @@ EOF2 nut_update() { # the first argument to this function is the microseconds since last update - # pass this parameter to the BEGIN statement (see bellow). + # pass this parameter to the BEGIN statement (see below). # do all the work to collect / calculate the values # for each dimension diff --git a/collectors/charts.d.plugin/opensips/opensips.chart.sh b/collectors/charts.d.plugin/opensips/opensips.chart.sh index d3a2118ce..02401fd59 100644 --- a/collectors/charts.d.plugin/opensips/opensips.chart.sh +++ b/collectors/charts.d.plugin/opensips/opensips.chart.sh @@ -49,61 +49,61 @@ opensips_check() { opensips_create() { # create the charts cat << EOF -CHART opensips.dialogs_active '' "OpenSIPS Active Dialogs" "dialogs" dialogs '' area $((opensips_priority + 1)) $opensips_update_every +CHART opensips.dialogs_active '' "OpenSIPS Active Dialogs" "dialogs" dialogs '' area $((opensips_priority + 1)) $opensips_update_every '' '' 'opensips' DIMENSION dialog_active_dialogs active absolute 1 1 DIMENSION dialog_early_dialogs early absolute -1 1 -CHART opensips.users '' "OpenSIPS Users" "users" users '' line $((opensips_priority + 2)) $opensips_update_every +CHART opensips.users '' "OpenSIPS Users" "users" users '' line $((opensips_priority + 2)) $opensips_update_every '' '' 'opensips' DIMENSION usrloc_registered_users registered absolute 1 1 DIMENSION usrloc_location_users location absolute 1 1 DIMENSION usrloc_location_contacts contacts absolute 1 1 DIMENSION usrloc_location_expires expires incremental -1 1 -CHART opensips.registrar '' "OpenSIPS Registrar" "registrations/s" registrar '' line $((opensips_priority + 3)) $opensips_update_every +CHART opensips.registrar '' "OpenSIPS Registrar" "registrations/s" registrar '' line $((opensips_priority + 3)) $opensips_update_every '' '' 'opensips' DIMENSION registrar_accepted_regs accepted incremental 1 1 DIMENSION registrar_rejected_regs rejected incremental -1 1 -CHART opensips.transactions '' "OpenSIPS Transactions" "transactions/s" transactions '' line $((opensips_priority + 4)) $opensips_update_every +CHART opensips.transactions '' "OpenSIPS Transactions" "transactions/s" transactions '' line $((opensips_priority + 4)) $opensips_update_every '' '' 'opensips' DIMENSION tm_UAS_transactions UAS incremental 1 1 DIMENSION tm_UAC_transactions UAC incremental -1 1 -CHART opensips.core_rcv '' "OpenSIPS Core Receives" "queries/s" core '' line $((opensips_priority + 5)) $opensips_update_every +CHART opensips.core_rcv '' "OpenSIPS Core Receives" "queries/s" core '' line $((opensips_priority + 5)) $opensips_update_every '' '' 'opensips' DIMENSION core_rcv_requests requests incremental 1 1 DIMENSION core_rcv_replies replies incremental -1 1 -CHART opensips.core_fwd '' "OpenSIPS Core Forwards" "queries/s" core '' line $((opensips_priority + 6)) $opensips_update_every +CHART opensips.core_fwd '' "OpenSIPS Core Forwards" "queries/s" core '' line $((opensips_priority + 6)) $opensips_update_every '' '' 'opensips' DIMENSION core_fwd_requests requests incremental 1 1 DIMENSION core_fwd_replies replies incremental -1 1 -CHART opensips.core_drop '' "OpenSIPS Core Drops" "queries/s" core '' line $((opensips_priority + 7)) $opensips_update_every +CHART opensips.core_drop '' "OpenSIPS Core Drops" "queries/s" core '' line $((opensips_priority + 7)) $opensips_update_every '' '' 'opensips' DIMENSION core_drop_requests requests incremental 1 1 DIMENSION core_drop_replies replies incremental -1 1 -CHART opensips.core_err '' "OpenSIPS Core Errors" "queries/s" core '' line $((opensips_priority + 8)) $opensips_update_every +CHART opensips.core_err '' "OpenSIPS Core Errors" "queries/s" core '' line $((opensips_priority + 8)) $opensips_update_every '' '' 'opensips' DIMENSION core_err_requests requests incremental 1 1 DIMENSION core_err_replies replies incremental -1 1 -CHART opensips.core_bad '' "OpenSIPS Core Bad" "queries/s" core '' line $((opensips_priority + 9)) $opensips_update_every +CHART opensips.core_bad '' "OpenSIPS Core Bad" "queries/s" core '' line $((opensips_priority + 9)) $opensips_update_every '' '' 'opensips' DIMENSION core_bad_URIs_rcvd bad_URIs_rcvd incremental 1 1 DIMENSION core_unsupported_methods unsupported_methods incremental 1 1 DIMENSION core_bad_msg_hdr bad_msg_hdr incremental 1 1 -CHART opensips.tm_replies '' "OpenSIPS TM Replies" "replies/s" transactions '' line $((opensips_priority + 10)) $opensips_update_every +CHART opensips.tm_replies '' "OpenSIPS TM Replies" "replies/s" transactions '' line $((opensips_priority + 10)) $opensips_update_every '' '' 'opensips' DIMENSION tm_received_replies received incremental 1 1 DIMENSION tm_relayed_replies relayed incremental 1 1 DIMENSION tm_local_replies local incremental 1 1 -CHART opensips.transactions_status '' "OpenSIPS Transactions Status" "transactions/s" transactions '' line $((opensips_priority + 11)) $opensips_update_every +CHART opensips.transactions_status '' "OpenSIPS Transactions Status" "transactions/s" transactions '' line $((opensips_priority + 11)) $opensips_update_every '' '' 'opensips' DIMENSION tm_2xx_transactions 2xx incremental 1 1 DIMENSION tm_3xx_transactions 3xx incremental 1 1 DIMENSION tm_4xx_transactions 4xx incremental 1 1 DIMENSION tm_5xx_transactions 5xx incremental 1 1 DIMENSION tm_6xx_transactions 6xx incremental 1 1 -CHART opensips.transactions_inuse '' "OpenSIPS InUse Transactions" "transactions" transactions '' line $((opensips_priority + 12)) $opensips_update_every +CHART opensips.transactions_inuse '' "OpenSIPS InUse Transactions" "transactions" transactions '' line $((opensips_priority + 12)) $opensips_update_every '' '' 'opensips' DIMENSION tm_inuse_transactions inuse absolute 1 1 -CHART opensips.sl_replies '' "OpenSIPS SL Replies" "replies/s" core '' line $((opensips_priority + 13)) $opensips_update_every +CHART opensips.sl_replies '' "OpenSIPS SL Replies" "replies/s" core '' line $((opensips_priority + 13)) $opensips_update_every '' '' 'opensips' DIMENSION sl_1xx_replies 1xx incremental 1 1 DIMENSION sl_2xx_replies 2xx incremental 1 1 DIMENSION sl_3xx_replies 3xx incremental 1 1 @@ -114,31 +114,31 @@ DIMENSION sl_sent_replies sent incremental 1 1 DIMENSION sl_sent_err_replies error incremental 1 1 DIMENSION sl_received_ACKs ACKed incremental 1 1 -CHART opensips.dialogs '' "OpenSIPS Dialogs" "dialogs/s" dialogs '' line $((opensips_priority + 14)) $opensips_update_every +CHART opensips.dialogs '' "OpenSIPS Dialogs" "dialogs/s" dialogs '' line $((opensips_priority + 14)) $opensips_update_every '' '' 'opensips' DIMENSION dialog_processed_dialogs processed incremental 1 1 DIMENSION dialog_expired_dialogs expired incremental 1 1 DIMENSION dialog_failed_dialogs failed incremental -1 1 -CHART opensips.net_waiting '' "OpenSIPS Network Waiting" "kilobytes" net '' line $((opensips_priority + 15)) $opensips_update_every +CHART opensips.net_waiting '' "OpenSIPS Network Waiting" "kilobytes" net '' line $((opensips_priority + 15)) $opensips_update_every '' '' 'opensips' DIMENSION net_waiting_udp UDP absolute 1 1024 DIMENSION net_waiting_tcp TCP absolute 1 1024 -CHART opensips.uri_checks '' "OpenSIPS URI Checks" "checks / sec" uri '' line $((opensips_priority + 16)) $opensips_update_every +CHART opensips.uri_checks '' "OpenSIPS URI Checks" "checks / sec" uri '' line $((opensips_priority + 16)) $opensips_update_every '' '' 'opensips' DIMENSION uri_positive_checks positive incremental 1 1 DIMENSION uri_negative_checks negative incremental -1 1 -CHART opensips.traces '' "OpenSIPS Traces" "traces / sec" traces '' line $((opensips_priority + 17)) $opensips_update_every +CHART opensips.traces '' "OpenSIPS Traces" "traces / sec" traces '' line $((opensips_priority + 17)) $opensips_update_every '' '' 'opensips' DIMENSION siptrace_traced_requests requests incremental 1 1 DIMENSION siptrace_traced_replies replies incremental -1 1 -CHART opensips.shmem '' "OpenSIPS Shared Memory" "kilobytes" mem '' line $((opensips_priority + 18)) $opensips_update_every +CHART opensips.shmem '' "OpenSIPS Shared Memory" "kilobytes" mem '' line $((opensips_priority + 18)) $opensips_update_every '' '' 'opensips' DIMENSION shmem_total_size total absolute 1 1024 DIMENSION shmem_used_size used absolute 1 1024 DIMENSION shmem_real_used_size real_used absolute 1 1024 DIMENSION shmem_max_used_size max_used absolute 1 1024 DIMENSION shmem_free_size free absolute 1 1024 -CHART opensips.shmem_fragments '' "OpenSIPS Shared Memory Fragmentation" "fragments" mem '' line $((opensips_priority + 19)) $opensips_update_every +CHART opensips.shmem_fragments '' "OpenSIPS Shared Memory Fragmentation" "fragments" mem '' line $((opensips_priority + 19)) $opensips_update_every '' '' 'opensips' DIMENSION shmem_fragments fragments absolute 1 1 EOF @@ -147,7 +147,7 @@ EOF opensips_update() { # the first argument to this function is the microseconds since last update - # pass this parameter to the BEGIN statement (see bellow). + # pass this parameter to the BEGIN statement (see below). # do all the work to collect / calculate the values # for each dimension @@ -158,7 +158,7 @@ opensips_update() { # local opensips_client_http_ then one or more of these a-z 0-9 _ then = and one of more of 0-9 # local opensips_server_all_ then one or more of these a-z 0-9 _ then = and one of more of 0-9 # 4. then execute this as a script with the eval - # be very carefull with eval: + # be very careful with eval: # prepare the script and always grep at the end the lines that are useful, so that # even if something goes wrong, no other code can be executed diff --git a/collectors/charts.d.plugin/sensors/README.md b/collectors/charts.d.plugin/sensors/README.md index cee3f601c..2a1cdb676 100644 --- a/collectors/charts.d.plugin/sensors/README.md +++ b/collectors/charts.d.plugin/sensors/README.md @@ -19,20 +19,37 @@ The plugin will provide charts for all configured system sensors The plugin will create Netdata charts for: -1. **Temperature** -2. **Voltage** -3. **Current** -4. **Power** -5. **Fans Speed** -6. **Energy** -7. **Humidity** +1. **Temperature** +2. **Voltage** +3. **Current** +4. **Power** +5. **Fans Speed** +6. **Energy** +7. **Humidity** One chart for every sensor chip found and each of the above will be created. +## Enable the collector + +The `sensors` collector is disabled by default. To enable it, edit the `charts.d.conf` file using `edit-config` from the +Netdata [config directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`. + +```bash +cd /etc/netdata # Replace this path with your Netdata config directory, if different +sudo ./edit-config charts.d.conf +``` + +It also needs to be set to "force" to be enabled: + +```shell +# example=force +sensors=force +``` + ## Configuration -Edit the `charts.d/sensors.conf` configuration file using `edit-config` from the Netdata [config -directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`. +Edit the `charts.d/sensors.conf` configuration file using `edit-config` from the +Netdata [config directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`. ```bash cd /etc/netdata # Replace this path with your Netdata config directory, if different diff --git a/collectors/charts.d.plugin/sensors/sensors.chart.sh b/collectors/charts.d.plugin/sensors/sensors.chart.sh index b92187772..bff381f1c 100644 --- a/collectors/charts.d.plugin/sensors/sensors.chart.sh +++ b/collectors/charts.d.plugin/sensors/sensors.chart.sh @@ -143,7 +143,7 @@ sensors_create() { files="$(sensors_check_files "$files")" files="$(sensors_check_temp_type "$files")" [ -z "$files" ] && continue - echo "CHART sensors.temp_$id '' '$name Temperature' 'Celsius' 'temperature' 'sensors.temp' line $((sensors_priority + 1)) $sensors_update_every" + echo "CHART sensors.temp_$id '' '$name Temperature' 'Celsius' 'temperature' 'sensors.temp' line $((sensors_priority + 1)) $sensors_update_every '' '' 'sensors'" echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN sensors.temp_$id \$1\"" divisor=1000 ;; @@ -152,7 +152,7 @@ sensors_create() { files="$(ls "$path"/in*_input 2>/dev/null)" files="$(sensors_check_files "$files")" [ -z "$files" ] && continue - echo "CHART sensors.volt_$id '' '$name Voltage' 'Volts' 'voltage' 'sensors.volt' line $((sensors_priority + 2)) $sensors_update_every" + echo "CHART sensors.volt_$id '' '$name Voltage' 'Volts' 'voltage' 'sensors.volt' line $((sensors_priority + 2)) $sensors_update_every '' '' 'sensors'" echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN sensors.volt_$id \$1\"" divisor=1000 ;; @@ -161,7 +161,7 @@ sensors_create() { files="$(ls "$path"/curr*_input 2>/dev/null)" files="$(sensors_check_files "$files")" [ -z "$files" ] && continue - echo "CHART sensors.curr_$id '' '$name Current' 'Ampere' 'current' 'sensors.curr' line $((sensors_priority + 3)) $sensors_update_every" + echo "CHART sensors.curr_$id '' '$name Current' 'Ampere' 'current' 'sensors.curr' line $((sensors_priority + 3)) $sensors_update_every '' '' 'sensors'" echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN sensors.curr_$id \$1\"" divisor=1000 ;; @@ -170,7 +170,7 @@ sensors_create() { files="$(ls "$path"/power*_input 2>/dev/null)" files="$(sensors_check_files "$files")" [ -z "$files" ] && continue - echo "CHART sensors.power_$id '' '$name Power' 'Watt' 'power' 'sensors.power' line $((sensors_priority + 4)) $sensors_update_every" + echo "CHART sensors.power_$id '' '$name Power' 'Watt' 'power' 'sensors.power' line $((sensors_priority + 4)) $sensors_update_every '' '' 'sensors'" echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN sensors.power_$id \$1\"" divisor=1000000 ;; @@ -179,7 +179,7 @@ sensors_create() { files="$(ls "$path"/fan*_input 2>/dev/null)" files="$(sensors_check_files "$files")" [ -z "$files" ] && continue - echo "CHART sensors.fan_$id '' '$name Fans Speed' 'Rotations / Minute' 'fans' 'sensors.fans' line $((sensors_priority + 5)) $sensors_update_every" + echo "CHART sensors.fan_$id '' '$name Fans Speed' 'Rotations / Minute' 'fans' 'sensors.fans' line $((sensors_priority + 5)) $sensors_update_every '' '' 'sensors'" echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN sensors.fan_$id \$1\"" ;; @@ -187,7 +187,7 @@ sensors_create() { files="$(ls "$path"/energy*_input 2>/dev/null)" files="$(sensors_check_files "$files")" [ -z "$files" ] && continue - echo "CHART sensors.energy_$id '' '$name Energy' 'Joule' 'energy' 'sensors.energy' areastack $((sensors_priority + 6)) $sensors_update_every" + echo "CHART sensors.energy_$id '' '$name Energy' 'Joule' 'energy' 'sensors.energy' areastack $((sensors_priority + 6)) $sensors_update_every '' '' 'sensors'" echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN sensors.energy_$id \$1\"" algorithm="incremental" divisor=1000000 @@ -197,7 +197,7 @@ sensors_create() { files="$(ls "$path"/humidity*_input 2>/dev/null)" files="$(sensors_check_files "$files")" [ -z "$files" ] && continue - echo "CHART sensors.humidity_$id '' '$name Humidity' 'Percent' 'humidity' 'sensors.humidity' line $((sensors_priority + 7)) $sensors_update_every" + echo "CHART sensors.humidity_$id '' '$name Humidity' 'Percent' 'humidity' 'sensors.humidity' line $((sensors_priority + 7)) $sensors_update_every '' '' 'sensors'" echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN sensors.humidity_$id \$1\"" divisor=1000 ;; @@ -237,7 +237,7 @@ sensors_create() { # _update is called continuously, to collect the values sensors_update() { # the first argument to this function is the microseconds since last update - # pass this parameter to the BEGIN statement (see bellow). + # pass this parameter to the BEGIN statement (see below). # do all the work to collect / calculate the values # for each dimension diff --git a/collectors/checks.plugin/plugin_checks.h b/collectors/checks.plugin/plugin_checks.h index 93494765d..c8057253c 100644 --- a/collectors/checks.plugin/plugin_checks.h +++ b/collectors/checks.plugin/plugin_checks.h @@ -3,7 +3,7 @@ #ifndef NETDATA_PLUGIN_CHECKS_H #define NETDATA_PLUGIN_CHECKS_H 1 -#include "../../daemon/common.h" +#include "daemon/common.h" #ifdef NETDATA_INTERNAL_CHECKS diff --git a/collectors/cups.plugin/cups_plugin.c b/collectors/cups.plugin/cups_plugin.c index 25d6f8cb5..1d493619e 100644 --- a/collectors/cups.plugin/cups_plugin.c +++ b/collectors/cups.plugin/cups_plugin.c @@ -6,7 +6,7 @@ * Released under GPL v3+ */ -#include "../../libnetdata/libnetdata.h" +#include "libnetdata/libnetdata.h" #include #include diff --git a/collectors/diskspace.plugin/plugin_diskspace.c b/collectors/diskspace.plugin/plugin_diskspace.c index 311b55adf..2e7d18909 100644 --- a/collectors/diskspace.plugin/plugin_diskspace.c +++ b/collectors/diskspace.plugin/plugin_diskspace.c @@ -83,6 +83,28 @@ int mount_point_cleanup(void *entry, void *data) { return 0; } +// for the full list of protected mount points look at +// https://github.com/systemd/systemd/blob/1eb3ef78b4df28a9e9f464714208f2682f957e36/src/core/namespace.c#L142-L149 +// https://github.com/systemd/systemd/blob/1eb3ef78b4df28a9e9f464714208f2682f957e36/src/core/namespace.c#L180-L194 +static const char *systemd_protected_mount_points[] = { + "/home", + "/root", + "/usr", + "/boot", + "/efi", + "/etc", + NULL +}; + +int mount_point_is_protected(char *mount_point) +{ + for (size_t i = 0; systemd_protected_mount_points[i] != NULL; i++) + if (!strcmp(mount_point, systemd_protected_mount_points[i])) + return 1; + + return 0; +} + static inline void do_disk_space_stats(struct mountinfo *mi, int update_every) { const char *family = mi->mount_point; const char *disk = mi->persistent_id; @@ -190,7 +212,12 @@ static inline void do_disk_space_stats(struct mountinfo *mi, int update_every) { if(unlikely(m->do_space == CONFIG_BOOLEAN_NO && m->do_inodes == CONFIG_BOOLEAN_NO)) return; - if(unlikely(mi->flags & MOUNTINFO_READONLY && !m->collected && m->do_space != CONFIG_BOOLEAN_YES && m->do_inodes != CONFIG_BOOLEAN_YES)) + if (unlikely( + mi->flags & MOUNTINFO_READONLY && + !mount_point_is_protected(mi->mount_point) && + !m->collected && + m->do_space != CONFIG_BOOLEAN_YES && + m->do_inodes != CONFIG_BOOLEAN_YES)) return; struct statvfs buff_statvfs; @@ -389,6 +416,10 @@ void *diskspace_main(void *ptr) { if(unlikely(mi->flags & (MOUNTINFO_IS_DUMMY | MOUNTINFO_IS_BIND))) continue; + // exclude mounts made by ProtectHome and ProtectSystem systemd hardening options + if(mi->flags & MOUNTINFO_READONLY && !strcmp(mi->root, mi->mount_point)) + continue; + do_disk_space_stats(mi, update_every); if(unlikely(netdata_exit)) break; } diff --git a/collectors/diskspace.plugin/plugin_diskspace.h b/collectors/diskspace.plugin/plugin_diskspace.h index 7c9df9d13..1f9670879 100644 --- a/collectors/diskspace.plugin/plugin_diskspace.h +++ b/collectors/diskspace.plugin/plugin_diskspace.h @@ -3,7 +3,7 @@ #ifndef NETDATA_PLUGIN_PROC_DISKSPACE_H #define NETDATA_PLUGIN_PROC_DISKSPACE_H -#include "../../daemon/common.h" +#include "daemon/common.h" #if (TARGET_OS == OS_LINUX) diff --git a/collectors/ebpf.plugin/Makefile.am b/collectors/ebpf.plugin/Makefile.am index 18b1fc6c8..2d5f92a6b 100644 --- a/collectors/ebpf.plugin/Makefile.am +++ b/collectors/ebpf.plugin/Makefile.am @@ -3,10 +3,6 @@ AUTOMAKE_OPTIONS = subdir-objects MAINTAINERCLEANFILES = $(srcdir)/Makefile.in -CLEANFILES = \ - reset_netdata_trace.sh \ - $(NULL) - include $(top_srcdir)/build/subst.inc SUFFIXES = .in @@ -16,12 +12,7 @@ userebpfconfigdir=$(configdir)/ebpf.d install-exec-local: $(INSTALL) -d $(DESTDIR)$(userebpfconfigdir) -dist_plugins_SCRIPTS = \ - reset_netdata_trace.sh \ - $(NULL) - dist_noinst_DATA = \ - reset_netdata_trace.sh.in \ README.md \ $(NULL) @@ -34,7 +25,18 @@ dist_ebpfconfig_DATA = \ ebpf.d/ebpf_kernel_reject_list.txt \ ebpf.d/cachestat.conf \ ebpf.d/dcstat.conf \ + ebpf.d/disk.conf \ + ebpf.d/fd.conf \ + ebpf.d/filesystem.conf \ + ebpf.d/hardirq.conf \ + ebpf.d/mdflush.conf \ + ebpf.d/mount.conf \ ebpf.d/network.conf \ + ebpf.d/oomkill.conf \ ebpf.d/process.conf \ + ebpf.d/shm.conf \ + ebpf.d/softirq.conf \ ebpf.d/sync.conf \ + ebpf.d/swap.conf \ + ebpf.d/vfs.conf \ $(NULL) diff --git a/collectors/ebpf.plugin/README.md b/collectors/ebpf.plugin/README.md index 1e593786b..60f1fd742 100644 --- a/collectors/ebpf.plugin/README.md +++ b/collectors/ebpf.plugin/README.md @@ -1,35 +1,52 @@ # eBPF monitoring with Netdata -Netdata's extended Berkeley Packet Filter (eBPF) collector monitors kernel-level metrics for file descriptors, virtual -filesystem IO, and process management on Linux systems. You can use our eBPF collector to analyze how and when a process -accesses files, when it makes system calls, whether it leaks memory or creating zombie processes, and more. +eBPF consists of a wide toolchain that ultimately outputs a set of bytecode that will run inside the eBPF virtual +machine (VM) which lives inside the Linux kernel. The program in particular is executed in response to a [tracepoint +or kprobe](#probes-and-tracepoints) activation. -Netdata's eBPF monitoring toolkit uses two custom eBPF programs. The default, called `entry`, monitors calls to a -variety of kernel functions, such as `do_sys_open`, `__close_fd`, `vfs_read`, `vfs_write`, `_do_fork`, and more. The -`return` program also monitors the return of each kernel functions to deliver more granular metrics about how your -system and its applications interact with the Linux kernel. +Netdata has written many eBPF programs, which, when compiled and integrated into the Netdata Agent, are able to collect +a wide array of data about the host that would otherwise be impossible. The data eBPF programs can collect is truly unique, +which gives the Netdata Agent access to data that is high value but normally hard to capture. -eBPF monitoring can help you troubleshoot and debug how applications interact with the Linux kernel. See our [guide on -troubleshooting apps with eBPF metrics](/docs/guides/troubleshoot/monitor-debug-applications-ebpf.md) for configuration -and troubleshooting tips. +eBPF monitoring can help you troubleshoot and debug how applications interact with the Linux kernel. See +our [guide on troubleshooting apps with eBPF metrics](/docs/guides/troubleshoot/monitor-debug-applications-ebpf.md) for +configuration and troubleshooting tips.

An example of VFS charts, made possible by the eBPF collector plugin
An example of VFS charts made possible by the eBPF collector plugin.
-## Enable the collector on Linux +## Probes and Tracepoints + +The following two features from the Linux kernel are used by Netdata to run eBPF programs: + +- Kprobes and return probes (kretprobe): Probes can insert virtually into any kernel instruction. When eBPF runs in + `entry` mode, it attaches only `kprobes` for internal functions monitoring calls and some arguments every time a + function is called. The user can also change configuration to use [`return`](#global) mode, and this will allow users + to monitor return from these functions and detect possible failures. +- Tracepoints are hooks to call specific functions. Tracepoints are more stable than `kprobes` and are preferred when + both options are available. + +In each case, wherever a normal kprobe, kretprobe, or tracepoint would have run its hook function, an eBPF program is +run instead, performing various collection logic before letting the kernel continue its normal control flow. + +There are more methods by which eBPF programs can be triggered but which are not currently supported, such as via uprobes +which allow hooking into arbitrary user-space functions in a similar manner to kprobes. + +## Manually enable the collector on Linux **The eBPF collector is installed and enabled by default on most new installations of the Agent**. The eBPF collector -does not currently work with [static build installations](/packaging/installer/methods/kickstart-64.md), but improved -support is in active development. +does not currently work with [static build installations](/packaging/installer/methods/kickstart-64.md) for kernels older +than `4.11`, but improved support is in active development. eBPF monitoring only works on Linux systems and with specific Linux kernels, including all kernels newer than `4.11.0`, and all kernels on CentOS 7.6 or later. @@ -39,72 +56,403 @@ section for details. ## Charts -The eBPF collector creates an **eBPF** menu in the Agent's dashboard along with three sub-menus: **File**, **VFS**, and -**Process**. All the charts in this section update every second. The collector stores the actual value inside of its -process, but charts only show the difference between the values collected in the previous and current seconds. +The eBPF collector creates charts on different menus, like System Overview, Memory, MD arrays, Disks, Filesystem, +Mount Points, Networking Stack, systemd Services, and Applications. + +The collector stores the actual value inside of its process, but charts only show the difference between the values +collected in the previous and current seconds. + +### System overview + +Not all charts within the System Overview menu are enabled by default, because they add around 100ns overhead for each +function call, this number is small for a human perspective, but the functions are called many times creating an impact +on host. See the [configuration](#configuration) section for details about how to enable them. + +#### Processes + +Internally, the Linux kernel treats both processes and threads as `tasks`. To create a thread, the kernel offers a few +system calls: `fork(2)`, `vfork(2)`, and `clone(2)`. To generate this chart, the eBPF +collector uses the following `tracepoints` and `kprobe`: + +- `sched/sched_process_fork`: Tracepoint called after a call for `fork (2)`, `vfork (2)` and `clone (2)`. +- `sched/sched_process_exec`: Tracepoint called after a exec-family syscall. +- `kprobe/kernel_clone`: This is the main [`fork()`](https://elixir.bootlin.com/linux/v5.10/source/kernel/fork.c#L2415) + routine since kernel `5.10.0` was released. +- `kprobe/_do_fork`: Like `kernel_clone`, but this was the main function between kernels `4.2.0` and `5.9.16` +- `kprobe/do_fork`: This was the main function before kernel `4.2.0`. + +#### Process Exit + +Ending a task requires two steps. The first is a call to the internal function `do_exit`, which notifies the operating +system that the task is finishing its work. The second step is to release the kernel information with the internal +function `release_task`. The difference between the two dimensions can help you discover +[zombie processes](https://en.wikipedia.org/wiki/Zombie_process). To get the metrics, the collector uses: + +- `sched/sched_process_exit`: Tracepoint called after a task exits. +- `kprobe/release_task`: This function is called when a process exits, as the kernel still needs to remove the process + descriptor. + +#### Task error + +The functions responsible for ending tasks do not return values, so this chart contains information about failures on +process and thread creation only. + +#### Swap + +Inside the swap submenu the eBPF plugin creates the chart `swapcalls`; this chart is displaying when processes are +calling functions [`swap_readpage` and `swap_writepage`](https://hzliu123.github.io/linux-kernel/Page%20Cache%20in%20Linux%202.6.pdf ), +which are functions responsible for doing IO in swap memory. To collect the exact moment that an access to swap happens, +the collector attaches `kprobes` for cited functions. + +#### Soft IRQ + +The following `tracepoints` are used to measure time usage for soft IRQs: + +- [`irq/softirq_entry`](https://www.kernel.org/doc/html/latest/core-api/tracepoint.html#c.trace_softirq_entry): Called + before softirq handler +- [`irq/softirq_exit`](https://www.kernel.org/doc/html/latest/core-api/tracepoint.html#c.trace_softirq_exit): Called when + softirq handler returns. + +#### Hard IRQ + +The following tracepoints are used to measure the latency of servicing a +hardware interrupt request (hard IRQ). + +- [`irq/irq_handler_entry`](https://www.kernel.org/doc/html/latest/core-api/tracepoint.html#c.trace_irq_handler_entry): + Called immediately before the IRQ action handler. +- [`irq/irq_handler_exit`](https://www.kernel.org/doc/html/latest/core-api/tracepoint.html#c.trace_irq_handler_exit): + Called immediately after the IRQ action handler returns. +- `irq_vectors`: These are traces from `irq_handler_entry` and + `irq_handler_exit` when an IRQ is handled. The following elements from vector + are triggered: + - `irq_vectors/local_timer_entry` + - `irq_vectors/local_timer_exit` + - `irq_vectors/reschedule_entry` + - `irq_vectors/reschedule_exit` + - `irq_vectors/call_function_entry` + - `irq_vectors/call_function_exit` + - `irq_vectors/call_function_single_entry` + - `irq_vectors/call_function_single_xit` + - `irq_vectors/irq_work_entry` + - `irq_vectors/irq_work_exit` + - `irq_vectors/error_apic_entry` + - `irq_vectors/error_apic_exit` + - `irq_vectors/thermal_apic_entry` + - `irq_vectors/thermal_apic_exit` + - `irq_vectors/threshold_apic_entry` + - `irq_vectors/threshold_apic_exit` + - `irq_vectors/deferred_error_entry` + - `irq_vectors/deferred_error_exit` + - `irq_vectors/spurious_apic_entry` + - `irq_vectors/spurious_apic_exit` + - `irq_vectors/x86_platform_ipi_entry` + - `irq_vectors/x86_platform_ipi_exit` + +#### IPC shared memory + +To monitor shared memory system call counts, the following `kprobes` are used: + +- `shmget`: Runs when [`shmget`](https://man7.org/linux/man-pages/man2/shmget.2.html) is called. +- `shmat`: Runs when [`shmat`](https://man7.org/linux/man-pages/man2/shmat.2.html) is called. +- `shmdt`: Runs when [`shmdt`](https://man7.org/linux/man-pages/man2/shmat.2.html) is called. +- `shmctl`: Runs when [`shmctl`](https://man7.org/linux/man-pages/man2/shmctl.2.html) is called. + +### Memory + +In the memory submenu the eBPF plugin creates two submenus **page cache** and **synchronization** with the following +organization: + +* Page Cache + * Page cache ratio + * Dirty pages + * Page cache hits + * Page cache misses +* Synchronization + * File sync + * Memory map sync + * File system sync + * File range sync + +#### Page cache ratio + +The chart `cachestat_ratio` shows how processes are accessing page cache. In a normal scenario, we expect values around +100%, which means that the majority of the work on the machine is processed in memory. To calculate the ratio, Netdata +attaches `kprobes` for kernel functions: + +- `add_to_page_cache_lru`: Page addition. +- `mark_page_accessed`: Access to cache. +- `account_page_dirtied`: Dirty (modified) pages. +- `mark_buffer_dirty`: Writes to page cache. + +#### Dirty pages + +On `cachestat_dirties` Netdata demonstrates the number of pages that were modified. This chart shows the number of calls +to the function `mark_buffer_dirty`. + +#### Page cache hits + +A page cache hit is when the page cache is successfully accessed with a read operation. We do not count pages that were +added relatively recently. + +#### Page cache misses + +A page cache miss means that a page was not inside memory when the process tried to access it. This chart shows the +result of the difference for calls between functions `add_to_page_cache_lru` and `account_page_dirtied`. + +#### File sync + +This chart shows calls to synchronization methods, [`fsync(2)`](https://man7.org/linux/man-pages/man2/fdatasync.2.html) +and [`fdatasync(2)`](https://man7.org/linux/man-pages/man2/fdatasync.2.html), to transfer all modified page caches +for the files on disk devices. These calls block until the disk reports that the transfer has been completed. They flush +data for specific file descriptors. + +#### Memory map sync + +The chart shows calls to [`msync(2)`](https://man7.org/linux/man-pages/man2/msync.2.html) syscalls. This syscall flushes +changes to a file that was mapped into memory using [`mmap(2)`](https://man7.org/linux/man-pages/man2/mmap.2.html). + +#### File system sync + +This chart monitors calls demonstrating commits from filesystem caches to disk. Netdata attaches `kprobes` for +[`sync(2)`](https://man7.org/linux/man-pages/man2/sync.2.html), and [`syncfs(2)`](https://man7.org/linux/man-pages/man2/sync.2.html). + +#### File range sync + +This chart shows calls to [`sync_file_range(2)`](https://man7.org/linux/man-pages/man2/sync_file_range.2.html) which +synchronizes file segments with disk. + +> Note: This is the most dangerous syscall to synchronize data, according to its manual. + +### Multiple Device (MD) arrays + +The eBPF plugin shows multi-device flushes happening in real time. This can be used to explain some spikes happening +in [disk latency](#disk) charts. + +By default, MD flush is disabled. To enable it, configure your +`/etc/netdata/ebpf.d.conf` file as: + +```conf +[global] + mdflush = yes +``` + +#### MD flush + +To collect data related to Linux multi-device (MD) flushing, the following kprobe is used: + +- `kprobe/md_flush_request`: called whenever a request for flushing multi-device data is made. + +### Disk + +The eBPF plugin also shows a chart in the Disk section when the `disk` thread is enabled. This will create the +chart `disk_latency_io` for each disk on the host. The following tracepoints are used: + +- [`block/block_rq_issue`](https://www.kernel.org/doc/html/latest/core-api/tracepoint.html#c.trace_block_rq_issue): + IO request operation to a device drive. +- [`block/block_rq_complete`](https://www.kernel.org/doc/html/latest/core-api/tracepoint.html#c.trace_block_rq_complete): + IO operation completed by device. + +### Filesystem + +This group has charts demonstrating how applications interact with the Linux +kernel to open and close file descriptors. It also brings latency charts for +several different filesystems. -### File +#### ext4 -This group has two charts demonstrating how software interacts with the Linux kernel to open and close file descriptors. +To measure the latency of executing some actions in an +[ext4](https://elixir.bootlin.com/linux/latest/source/fs/ext4) filesystem, the +collector needs to attach `kprobes` and `kretprobes` for each of the following +functions: + +- `ext4_file_read_iter`: Function used to measure read latency. +- `ext4_file_write_iter`: Function used to measure write latency. +- `ext4_file_open`: Function used to measure open latency. +- `ext4_sync_file`: Function used to measure sync latency. + +#### ZFS + +To measure the latency of executing some actions in a zfs filesystem, the +collector needs to attach `kprobes` and `kretprobes` for each of the following +functions: + +- `zpl_iter_read`: Function used to measure read latency. +- `zpl_iter_write`: Function used to measure write latency. +- `zpl_open`: Function used to measure open latency. +- `zpl_fsync`: Function used to measure sync latency. + +#### XFS + +To measure the latency of executing some actions in an +[xfs](https://elixir.bootlin.com/linux/latest/source/fs/xfs) filesystem, the +collector needs to attach `kprobes` and `kretprobes` for each of the following +functions: + +- `xfs_file_read_iter`: Function used to measure read latency. +- `xfs_file_write_iter`: Function used to measure write latency. +- `xfs_file_open`: Function used to measure open latency. +- `xfs_file_fsync`: Function used to measure sync latency. + +#### NFS + +To measure the latency of executing some actions in an +[nfs](https://elixir.bootlin.com/linux/latest/source/fs/nfs) filesystem, the +collector needs to attach `kprobes` and `kretprobes` for each of the following +functions: + +- `nfs_file_read`: Function used to measure read latency. +- `nfs_file_write`: Function used to measure write latency. +- `nfs_file_open`: Functions used to measure open latency. +- `nfs4_file_open`: Functions used to measure open latency for NFS v4. +- `nfs_getattr`: Function used to measure sync latency. + +#### btrfs + +To measure the latency of executing some actions in a [btrfs](https://elixir.bootlin.com/linux/latest/source/fs/btrfs/file.c) +filesystem, the collector needs to attach `kprobes` and `kretprobes` for each of the following functions: + +> Note: We are listing two functions used to measure `read` latency, but we use either `btrfs_file_read_iter` or +`generic_file_read_iter`, depending on kernel version. + +- `btrfs_file_read_iter`: Function used to measure read latency since kernel `5.10.0`. +- `generic_file_read_iter`: Like `btrfs_file_read_iter`, but this function was used before kernel `5.10.0`. +- `btrfs_file_write_iter`: Function used to write data. +- `btrfs_file_open`: Function used to open files. +- `btrfs_sync_file`: Function used to synchronize data to filesystem. #### File descriptor -This chart contains two dimensions that show the number of calls to the functions `do_sys_open` and `__close_fd`. Most -software do not commonly call these functions directly, but they are behind the system calls `open(2)`, `openat(2)`, -and `close(2)`. +To give metrics related to `open` and `close` events, instead of attaching kprobes for each syscall used to do these +events, the collector attaches `kprobes` for the common function used for syscalls: + +- [`do_sys_open`](https://0xax.gitbooks.io/linux-insides/content/SysCall/linux-syscall-5.html ): Internal function used to + open files. +- [`do_sys_openat2`](https://elixir.bootlin.com/linux/v5.6/source/fs/open.c#L1162): + Function called from `do_sys_open` since version `5.6.0`. +- [`close_fd`](https://www.mail-archive.com/linux-kernel@vger.kernel.org/msg2271761.html): Function used to close file + descriptor since kernel `5.11.0`. +- `__close_fd`: Function used to close files before version `5.11.0`. #### File error This chart shows the number of times some software tried and failed to open or close a file descriptor. -### VFS +#### VFS + +The Linux Virtual File System (VFS) is an abstraction layer on top of a +concrete filesystem like the ones listed in the parent section, e.g. `ext4`. -A [virtual file system](https://en.wikipedia.org/wiki/Virtual_file_system) (VFS) is a layer on top of regular -filesystems. The functions present inside this API are used for all filesystems, so it's possible the charts in this -group won't show _all_ the actions that occurred on your system. +In this section we list the mechanism by which we gather VFS data, and what +charts are consequently created. -#### Deleted objects +##### VFS eBPF Hooks -This chart monitors calls for `vfs_unlink`. This function is responsible for removing objects from the file system. +To measure the latency and total quantity of executing some VFS-level +functions, ebpf.plugin needs to attach kprobes and kretprobes for each of the +following functions: -#### IO +- `vfs_write`: Function used monitoring the number of successful & failed + filesystem write calls, as well as the total number of written bytes. +- `vfs_writev`: Same function as `vfs_write` but for vector writes (i.e. a + single write operation using a group of buffers rather than 1). +- `vfs_read`: Function used for monitoring the number of successful & failed + filesystem read calls, as well as the total number of read bytes. +- `vfs_readv` Same function as `vfs_read` but for vector reads (i.e. a single + read operation using a group of buffers rather than 1). +- `vfs_unlink`: Function used for monitoring the number of successful & failed + filesystem unlink calls. +- `vfs_fsync`: Function used for monitoring the number of successful & failed + filesystem fsync calls. +- `vfs_open`: Function used for monitoring the number of successful & failed + filesystem open calls. +- `vfs_create`: Function used for monitoring the number of successful & failed + filesystem create calls. + +##### VFS Deleted objects + +This chart monitors calls to `vfs_unlink`. This function is responsible for removing objects from the file system. + +##### VFS IO This chart shows the number of calls to the functions `vfs_read` and `vfs_write`. -#### IO bytes +##### VFS IO bytes -This chart also monitors `vfs_read` and `vfs_write`, but instead shows the total of bytes read and written with these -functions. +This chart also monitors `vfs_read` and `vfs_write` but, instead of the number of calls, it shows the total amount of +bytes read and written with these functions. The Agent displays the number of bytes written as negative because they are moving down to disk. -#### IO errors +##### VFS IO errors The Agent counts and shows the number of instances where a running program experiences a read or write error. -### Process +##### VFS Create -For this group, the eBPF collector monitors process/thread creation and process end, and then displays any errors in the -following charts. +This chart shows the number of calls to `vfs_create`. This function is responsible for creating files. -#### Process thread +##### VFS Synchronization -Internally, the Linux kernel treats both processes and threads as `tasks`. To create a thread, the kernel offers a few -system calls: `fork(2)`, `vfork(2)` and `clone(2)`. In turn, each of these system calls use the function `_do_fork`. To -generate this chart, the eBPF collector monitors `_do_fork` to populate the `process` dimension, and monitors -`sys_clone` to identify threads. +This chart shows the number of calls to `vfs_fsync`. This function is responsible for calling `fsync(2)` or +`fdatasync(2)` on a file. You can see more details in the Synchronization section. -#### Exit +##### VFS Open -Ending a task requires two steps. The first is a call to the internal function `do_exit`, which notifies the operating -system that the task is finishing its work. The second step is to release the kernel information with the internal -function `release_task`. The difference between the two dimensions can help you discover [zombie -processes](https://en.wikipedia.org/wiki/Zombie_process). +This chart shows the number of calls to `vfs_open`. This function is responsible for opening files. -#### Task error +#### Directory Cache -The functions responsible for ending tasks do not return values, so this chart contains information about failures on -process and thread creation. +Metrics for directory cache are collected using kprobe for `lookup_fast`, because we are interested in the number of +times this function is accessed. On the other hand, for `d_lookup` we are not only interested in the number of times it +is accessed, but also in possible errors, so we need to attach a `kretprobe`. For this reason, the following is used: + +- [`lookup_fast`](https://lwn.net/Articles/649115/): Called to look at data inside the directory cache. +- [`d_lookup`](https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/fs/dcache.c?id=052b398a43a7de8c68c13e7fa05d6b3d16ce6801#n2223): + Called when the desired file is not inside the directory cache. + +### Mount Points + +The following `kprobes` are used to collect `mount` & `unmount` call counts: + +- [`mount`](https://man7.org/linux/man-pages/man2/mount.2.html): mount filesystem on host. +- [`umount`](https://man7.org/linux/man-pages/man2/umount.2.html): umount filesystem on host. + +### Networking Stack + +Netdata monitors socket bandwidth attaching `kprobes` for internal functions. + +#### TCP functions + +This chart demonstrates calls to functions `tcp_sendmsg`, `tcp_cleanup_rbuf`, and `tcp_close`; these functions are used +to send & receive data and to close connections when `TCP` protocol is used. + +#### TCP bandwidth + +Like the previous chart, this one also monitors `tcp_sendmsg` and `tcp_cleanup_rbuf`, but instead of showing the number +of calls, it demonstrates the number of bytes sent and received. + +#### TCP retransmit + +This chart demonstrates calls to function `tcp_retransmit` that is responsible for executing TCP retransmission when the +receiver did not return the packet during the expected time. + +#### UDP functions + +This chart demonstrates calls to functions `udp_sendmsg` and `udp_recvmsg`, which are responsible for sending & +receiving data for connections when the `UDP` protocol is used. + +#### UDP bandwidth + +Like the previous chart, this one also monitors `udp_sendmsg` and `udp_recvmsg`, but instead of showing the number of +calls, it monitors the number of bytes sent and received. + +### Apps + +#### OOM Killing + +These are tracepoints related to [OOM](https://en.wikipedia.org/wiki/Out_of_memory) killing processes. + +- `oom/mark_victim`: Monitors when an oomkill event happens. ## Configuration @@ -134,7 +482,7 @@ cd /etc/netdata/ # Replace with your Netdata configuration directory, if not / The `[global]` section defines settings for the whole eBPF collector. -#### ebpf load mode +#### eBPF load mode The collector has two different eBPF programs. These programs monitor the same functions inside the kernel, but they monitor, process, and display different kinds of information. @@ -143,43 +491,20 @@ By default, this plugin uses the `entry` mode. Changing this mode can create sig system, but also offer valuable information if you are developing or debugging software. The `ebpf load mode` option accepts the following values: ​ -- `entry`: This is the default mode. In this mode, the eBPF collector only monitors calls for the functions described - in the sections above, and does not show charts related to errors. -- `return`: In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates - new charts for the return of these functions, such as errors. Monitoring function returns can help in debugging - software, such as failing to close file descriptors or creating zombie processes. -- `update every`: Number of seconds used for eBPF to send data for Netdata. -- `pid table size`: Defines the maximum number of PIDs stored inside the application hash table. - +- `entry`: This is the default mode. In this mode, the eBPF collector only monitors calls for the functions described in + the sections above, and does not show charts related to errors. +- `return`: In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates new + charts for the return of these functions, such as errors. Monitoring function returns can help in debugging software, + such as failing to close file descriptors or creating zombie processes. +- `update every`: Number of seconds used for eBPF to send data for Netdata. +- `pid table size`: Defines the maximum number of PIDs stored inside the application hash table. + #### Integration with `apps.plugin` The eBPF collector also creates charts for each running application through an integration with the [`apps.plugin`](/collectors/apps.plugin/README.md). This integration helps you understand how specific applications interact with the Linux kernel. -When the integration is enabled, your dashboard will also show the following charts using low-level Linux metrics: - -- eBPF file - - Number of calls to open files. (`apps.file_open`) - - Number of files closed. (`apps.file_closed`) - - Number of calls to open files that returned errors. - - Number of calls to close files that returned errors. -- eBPF syscall - - Number of calls to delete files. (`apps.file_deleted`) - - Number of calls to `vfs_write`. (`apps.vfs_write_call`) - - Number of calls to `vfs_read`. (`apps.vfs_read_call`) - - Number of bytes written with `vfs_write`. (`apps.vfs_write_bytes`) - - Number of bytes read with `vfs_read`. (`apps.vfs_read_bytes`) - - Number of calls to write a file that returned errors. - - Number of calls to read a file that returned errors. -- eBPF process - - Number of process created with `do_fork`. (`apps.process_create`) - - Number of threads created with `do_fork` or `__x86_64_sys_clone`, depending on your system's kernel version. (`apps.thread_create`) - - Number of times that a process called `do_exit`. (`apps.task_close`) -- eBPF net - - Number of bytes sent. (`apps.bandwidth_sent`) - - Number of bytes received. (`apps.bandwidth_recv`) - If you want to _disable_ the integration with `apps.plugin` along with the above charts, change the setting `apps` to `no`. @@ -188,30 +513,129 @@ If you want to _disable_ the integration with `apps.plugin` along with the above apps = yes ``` -When the integration is enabled, eBPF collector allocates memory for each process running. The total - allocated memory has direct relationship with the kernel version. When the eBPF plugin is running on kernels newer than `4.15`, - it uses per-cpu maps to speed up the update of hash tables. This also implies storing data for the same PID - for each processor it runs. +When the integration is enabled, eBPF collector allocates memory for each process running. The total allocated memory +has direct relationship with the kernel version. When the eBPF plugin is running on kernels newer than `4.15`, it uses +per-cpu maps to speed up the update of hash tables. This also implies storing data for the same PID for each processor +it runs. + +#### Integration with `cgroups.plugin` -#### `[ebpf programs]` +The eBPF collector also creates charts for each cgroup through an integration with the +[`cgroups.plugin`](/collectors/cgroups.plugin/README.md). This integration helps you understand how a specific cgroup +interacts with the Linux kernel. + +The integration with `cgroups.plugin` is disabled by default to avoid creating overhead on your system. If you want to +_enable_ the integration with `cgroups.plugin`, change the `cgroups` setting to `yes`. + +```conf +[global] + cgroups = yes +``` + +If you do not need to monitor specific metrics for your `cgroups`, you can enable `cgroups` inside +`ebpf.d.conf`, and then disable the plugin for a specific `thread` by following the steps in the +[Configuration](#configuration) section. + +#### Integration Dashboard Elements + +When an integration is enabled, your dashboard will also show the following cgroups and apps charts using low-level +Linux metrics: + +> Note: The parenthetical accompanying each bulleted item provides the chart name. + +- mem + - Number of processes killed due out of memory. (`oomkills`) +- process + - Number of processes created with `do_fork`. (`process_create`) + - Number of threads created with `do_fork` or `clone (2)`, depending on your system's kernel + version. (`thread_create`) + - Number of times that a process called `do_exit`. (`task_exit`) + - Number of times that a process called `release_task`. (`task_close`) + - Number of times that an error happened to create thread or process. (`task_error`) +- swap + - Number of calls to `swap_readpage`. (`swap_read_call`) + - Number of calls to `swap_writepage`. (`swap_write_call`) +- network + - Number of bytes sent. (`total_bandwidth_sent`) + - Number of bytes received. (`total_bandwidth_recv`) + - Number of calls to `tcp_sendmsg`. (`bandwidth_tcp_send`) + - Number of calls to `tcp_cleanup_rbuf`. (`bandwidth_tcp_recv`) + - Number of calls to `tcp_retransmit_skb`. (`bandwidth_tcp_retransmit`) + - Number of calls to `udp_sendmsg`. (`bandwidth_udp_send`) + - Number of calls to `udp_recvmsg`. (`bandwidth_udp_recv`) +- file access + - Number of calls to open files. (`file_open`) + - Number of calls to open files that returned errors. (`open_error`) + - Number of files closed. (`file_closed`) + - Number of calls to close files that returned errors. (`file_error_closed`) +- vfs + - Number of calls to `vfs_unlink`. (`file_deleted`) + - Number of calls to `vfs_write`. (`vfs_write_call`) + - Number of calls to write a file that returned errors. (`vfs_write_error`) + - Number of calls to `vfs_read`. (`vfs_read_call`) + - Number of bytes written with `vfs_write`. (`vfs_write_bytes`) + - Number of bytes read with `vfs_read`. (`vfs_read_bytes`) + - Number of calls to read a file that returned errors. (`vfs_read_error`) + - Number of calls to `vfs_fsync`. (`vfs_fsync`) + - Number of calls to sync file that returned errors. (`vfs_fsync_error`) + - Number of calls to `vfs_open`. (`vfs_open`) + - Number of calls to open file that returned errors. (`vfs_open_error`) + - Number of calls to `vfs_create`. (`vfs_create`) + - Number of calls to open file that returned errors. (`vfs_create_error`) +- page cache + - Ratio of pages accessed. (`cachestat_ratio`) + - Number of modified pages ("dirty"). (`cachestat_dirties`) + - Number of accessed pages. (`cachestat_hits`) + - Number of pages brought from disk. (`cachestat_misses`) +- directory cache + - Ratio of files available in directory cache. (`dc_hit_ratio`) + - Number of files accessed. (`dc_reference`) + - Number of files accessed that were not in cache. (`dc_not_cache`) + - Number of files not found. (`dc_not_found`) +- ipc shm + - Number of calls to `shm_get`. (`shmget_call`) + - Number of calls to `shm_at`. (`shmat_call`) + - Number of calls to `shm_dt`. (`shmdt_call`) + - Number of calls to `shm_ctl`. (`shmctl_call`) + +### `[ebpf programs]` The eBPF collector enables and runs the following eBPF programs by default: -- `cachestat`: Netdata's eBPF data collector creates charts about the memory page cache. When the integration with - [`apps.plugin`](/collectors/apps.plugin/README.md) is enabled, this collector creates charts for the whole host _and_ - for each application. -- `dcstat` : This eBPF program creates charts that show information about file access using directory cache. It appends - `kprobes` for `lookup_fast()` and `d_lookup()` to identify if files are inside directory cache, outside and - files are not found. -- `process`: This eBPF program creates charts that show information about process creation, VFS IO, and files removed. - When in `return` mode, it also creates charts showing errors when these operations are executed. -- `network viewer`: This eBPF program creates charts with information about `TCP` and `UDP` functions, including the - bandwidth consumed by each. -- `sync`: Montitor calls for syscalls sync(2), fsync(2), fdatasync(2), syncfs(2), msync(2), and sync_file_range(2). +- `fd` : This eBPF program creates charts that show information about calls to open files. +- `mount`: This eBPF program creates charts that show calls to syscalls mount(2) and umount(2). +- `shm`: This eBPF program creates charts that show calls to syscalls shmget(2), shmat(2), shmdt(2) and shmctl(2). +- `sync`: Montitor calls to syscalls sync(2), fsync(2), fdatasync(2), syncfs(2), msync(2), and sync_file_range(2). +- `network viewer`: This eBPF program creates charts with information about `TCP` and `UDP` functions, including the + bandwidth consumed by each. +- `vfs`: This eBPF program creates charts that show information about VFS (Virtual File System) functions. +- `process`: This eBPF program creates charts that show information about process life. When in `return` mode, it also + creates charts showing errors when these operations are executed. +- `hardirq`: This eBPF program creates charts that show information about time spent servicing individual hardware + interrupt requests (hard IRQs). +- `softirq`: This eBPF program creates charts that show information about time spent servicing individual software + interrupt requests (soft IRQs). +- `oomkill`: This eBPF program creates a chart that shows OOM kills for all applications recognized via + the `apps.plugin` integration. Note that this program will show application charts regardless of whether apps + integration is turned on or off. + +You can also enable the following eBPF programs: + +- `cachestat`: Netdata's eBPF data collector creates charts about the memory page cache. When the integration with + [`apps.plugin`](/collectors/apps.plugin/README.md) is enabled, this collector creates charts for the whole host _and_ + for each application. +- `dcstat` : This eBPF program creates charts that show information about file access using directory cache. It appends + `kprobes` for `lookup_fast()` and `d_lookup()` to identify if files are inside directory cache, outside and files are + not found. +- `disk` : This eBPF program creates charts that show information about disk latency independent of filesystem. +- `filesystem` : This eBPF program creates charts that show information about some filesystem latency. +- `swap` : This eBPF program creates charts that show information about swap access. +- `mdflush`: This eBPF program creates charts that show information about + multi-device software flushes. ## Thread configuration -You can configure each thread of the eBPF data collector by editing either the `cachestat.conf`, `process.conf`, +You can configure each thread of the eBPF data collector by editing either the `cachestat.conf`, `process.conf`, or `network.conf` files. Use [`edit-config`](/docs/configure/nodes.md) from your Netdata config directory: ```bash @@ -225,10 +649,16 @@ The following configuration files are available: - `cachestat.conf`: Configuration for the `cachestat` thread. - `dcstat.conf`: Configuration for the `dcstat` thread. +- `disk.conf`: Configuration for the `disk` thread. +- `fd.conf`: Configuration for the `file descriptor` thread. +- `filesystem.conf`: Configuration for the `filesystem` thread. +- `hardirq.conf`: Configuration for the `hardirq` thread. - `process.conf`: Configuration for the `process` thread. -- `network.conf`: Configuration for the `network viewer` thread. This config file overwrites the global options and - also lets you specify which network the eBPF collector monitors. +- `network.conf`: Configuration for the `network viewer` thread. This config file overwrites the global options and also + lets you specify which network the eBPF collector monitors. +- `softirq.conf`: Configuration for the `softirq` thread. - `sync.conf`: Configuration for the `sync` thread. +- `vfs.conf`: Configuration for the `vfs` thread. ### Network configuration @@ -237,7 +667,7 @@ are divided in the following sections: #### `[network connections]` -You can configure the information shown on `outbound` and `inbound` charts with the settings in this section. +You can configure the information shown on `outbound` and `inbound` charts with the settings in this section. ```conf [network connections] @@ -249,24 +679,24 @@ You can configure the information shown on `outbound` and `inbound` charts with ``` When you define a `ports` setting, Netdata will collect network metrics for that specific port. For example, if you -write `ports = 19999`, Netdata will collect only connections for itself. The `hostnames` setting accepts -[simple patterns](/libnetdata/simple_pattern/README.md). The `ports`, and `ips` settings accept negation (`!`) to - deny specific values or asterisk alone to define all values. +write `ports = 19999`, Netdata will collect only connections for itself. The `hostnames` setting accepts +[simple patterns](/libnetdata/simple_pattern/README.md). The `ports`, and `ips` settings accept negation (`!`) to deny +specific values or asterisk alone to define all values. In the above example, Netdata will collect metrics for all ports between 1 and 443, with the exception of 53 (domain) and 145. The following options are available: -- `ports`: Define the destination ports for Netdata to monitor. -- `hostnames`: The list of hostnames that can be resolved to an IP address. -- `ips`: The IP or range of IPs that you want to monitor. You can use IPv4 or IPv6 addresses, use dashes to define a - range of IPs, or use CIDR values. The default behavior is to only collect data for private IP addresses, but this - can be changed with the `ips` setting. - -By default, Netdata displays up to 500 dimensions on network connection charts. If there are more possible dimensions, -they will be bundled into the `other` dimension. You can increase the number of shown dimensions by changing the `maximum -dimensions` setting. +- `ports`: Define the destination ports for Netdata to monitor. +- `hostnames`: The list of hostnames that can be resolved to an IP address. +- `ips`: The IP or range of IPs that you want to monitor. You can use IPv4 or IPv6 addresses, use dashes to define a + range of IPs, or use CIDR values. The default behavior is to only collect data for private IP addresses, but this can + be changed with the `ips` setting. + +By default, Netdata displays up to 500 dimensions on network connection charts. If there are more possible dimensions, +they will be bundled into the `other` dimension. You can increase the number of shown dimensions by changing +the `maximum dimensions` setting. The dimensions for the traffic charts are created using the destination IPs of the sockets by default. This can be changed setting `resolve hostname ips = yes` and restarting Netdata, after this Netdata will create dimensions using @@ -274,8 +704,9 @@ the `hostnames` every time that is possible to resolve IPs to their hostnames. #### `[service name]` -Netdata uses the list of services in `/etc/services` to plot network connection charts. If this file does not contain the -name for a particular service you use in your infrastructure, you will need to add it to the `[service name]` section. +Netdata uses the list of services in `/etc/services` to plot network connection charts. If this file does not contain +the name for a particular service you use in your infrastructure, you will need to add it to the `[service name]` +section. For example, Netdata's default port (`19999`) is not listed in `/etc/services`. To associate that port with the Netdata service in network connection charts, and thus see the name of the service instead of its port, define it: @@ -287,7 +718,7 @@ service in network connection charts, and thus see the name of the service inste ### Sync configuration -The sync configuration has specific options to disable monitoring for syscalls, as default option all syscalls are +The sync configuration has specific options to disable monitoring for syscalls, as default option all syscalls are monitored. ```conf @@ -300,6 +731,22 @@ monitored. sync_file_range = yes ``` +### Filesystem configuration + +The filesystem configuration has specific options to disable monitoring for filesystems, by default all filesystems are +monitored. + +```conf +[filesystem] + btrfsdist = yes + ext4dist = yes + nfsdist = yes + xfsdist = yes + zfsdist = yes +``` + +The ebpf program `nfsdist` monitors only `nfs` mount points. + ## Troubleshooting If the eBPF collector does not work, you can troubleshoot it by running the `ebpf.plugin` command and investigating its @@ -330,17 +777,18 @@ curl -sSL https://raw.githubusercontent.com/netdata/kernel-collector/master/tool If this script returns no output, your system is ready to compile and run the eBPF collector. -If you see a warning about a missing kernel configuration (`KPROBES KPROBES_ON_FTRACE HAVE_KPROBES BPF BPF_SYSCALL -BPF_JIT`), you will need to recompile your kernel to support this configuration. The process of recompiling Linux -kernels varies based on your distribution and version. Read the documentation for your system's distribution to learn -more about the specific workflow for recompiling the kernel, ensuring that you set all the necessary +If you see a warning about a missing kernel +configuration (`KPROBES KPROBES_ON_FTRACE HAVE_KPROBES BPF BPF_SYSCALL BPF_JIT`), you will need to recompile your kernel +to support this configuration. The process of recompiling Linux kernels varies based on your distribution and version. +Read the documentation for your system's distribution to learn more about the specific workflow for recompiling the +kernel, ensuring that you set all the necessary -- [Ubuntu](https://wiki.ubuntu.com/Kernel/BuildYourOwnKernel) -- [Debian](https://kernel-team.pages.debian.net/kernel-handbook/ch-common-tasks.html#s-common-official) -- [Fedora](https://fedoraproject.org/wiki/Building_a_custom_kernel) -- [CentOS](https://wiki.centos.org/HowTos/Custom_Kernel) -- [Arch Linux](https://wiki.archlinux.org/index.php/Kernel/Traditional_compilation) -- [Slackware](https://docs.slackware.com/howtos:slackware_admin:kernelbuilding) +- [Ubuntu](https://wiki.ubuntu.com/Kernel/BuildYourOwnKernel) +- [Debian](https://kernel-team.pages.debian.net/kernel-handbook/ch-common-tasks.html#s-common-official) +- [Fedora](https://fedoraproject.org/wiki/Building_a_custom_kernel) +- [CentOS](https://wiki.centos.org/HowTos/Custom_Kernel) +- [Arch Linux](https://wiki.archlinux.org/index.php/Kernel/Traditional_compilation) +- [Slackware](https://docs.slackware.com/howtos:slackware_admin:kernelbuilding) ### Mount `debugfs` and `tracefs` @@ -353,19 +801,20 @@ sudo mount -t tracefs nodev /sys/kernel/tracing ``` If they are already mounted, you will see an error. You can also configure your system's `/etc/fstab` configuration to -mount these filesystems on startup. More information can be found in the [ftrace documentation](https://www.kernel.org/doc/Documentation/trace/ftrace.txt). +mount these filesystems on startup. More information can be found in +the [ftrace documentation](https://www.kernel.org/doc/Documentation/trace/ftrace.txt). ## Performance -eBPF monitoring is complex and produces a large volume of metrics. We've discovered scenarios where the eBPF plugin +eBPF monitoring is complex and produces a large volume of metrics. We've discovered scenarios where the eBPF plugin significantly increases kernel memory usage by several hundred MB. -If your node is experiencing high memory usage and there is no obvious culprit to be found in the `apps.mem` chart, -consider testing for high kernel memory usage by [disabling eBPF monitoring](#configuration). Next, -[restart Netdata](/docs/configure/start-stop-restart.md) with `sudo systemctl restart netdata` to see if system -memory usage (see the `system.ram` chart) has dropped significantly. +If your node is experiencing high memory usage and there is no obvious culprit to be found in the `apps.mem` chart, +consider testing for high kernel memory usage by [disabling eBPF monitoring](#configuration). Next, +[restart Netdata](/docs/configure/start-stop-restart.md) with `sudo systemctl restart netdata` to see if system memory +usage (see the `system.ram` chart) has dropped significantly. -Beginning with `v1.31`, kernel memory usage is configurable via the [`pid table size` setting](#ebpf-load-mode) +Beginning with `v1.31`, kernel memory usage is configurable via the [`pid table size` setting](#ebpf-load-mode) in `ebpf.conf`. ## SELinux @@ -423,7 +872,7 @@ allow unconfined_service_t self:bpf { map_create map_read map_write prog_load pr Then compile your `netdata_ebpf.te` file with the following commands to create a binary that loads the new policies: ```bash -# checkmodule -M -m -o netdata_ebpf.mod netdata_ebpf.te +# checkmodule -M -m -o netdata_ebpf.mod netdata_ebpf.te # semodule_package -o netdata_ebpf.pp -m netdata_ebpf.mod ``` @@ -450,9 +899,4 @@ shows how the lockdown module impacts `ebpf.plugin` based on the selected option If you or your distribution compiled the kernel with the last combination, your system cannot load shared libraries required to run `ebpf.plugin`. -## Cleaning `kprobe_events` -The eBPF collector adds entries to the file `/sys/kernel/debug/tracing/kprobe_events`, and cleans them on exit, unless -another process prevents it. If you need to clean the eBPF entries safely, you can manually run the script -`/usr/libexec/netdata/plugins.d/reset_netdata_trace.sh`. - [![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Febpf.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>) diff --git a/collectors/ebpf.plugin/ebpf.c b/collectors/ebpf.plugin/ebpf.c index 5cc005f30..71a13e84f 100644 --- a/collectors/ebpf.plugin/ebpf.c +++ b/collectors/ebpf.plugin/ebpf.c @@ -55,7 +55,6 @@ char *ebpf_plugin_dir = PLUGINS_DIR; static char *ebpf_configured_log_dir = LOG_DIR; char *ebpf_algorithms[] = {"absolute", "incremental"}; -int update_every = 1; static int thread_finished = 0; int close_ebpf_plugin = 0; struct config collector_config = { .first_section = NULL, @@ -67,7 +66,7 @@ struct config collector_config = { .first_section = NULL, int running_on_kernel = 0; char kernel_string[64]; int ebpf_nprocs; -static int isrh; +int isrh = 0; uint32_t finalized_threads = 1; pthread_mutex_t lock; @@ -76,32 +75,109 @@ pthread_cond_t collect_data_cond_var; ebpf_module_t ebpf_modules[] = { { .thread_name = "process", .config_name = "process", .enabled = 0, .start_routine = ebpf_process_thread, - .update_time = 1, .global_charts = 1, .apps_charts = 1, .mode = MODE_ENTRY, - .optional = 0, .apps_routine = ebpf_process_create_apps_charts, .maps = NULL, - .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL}, + .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO, + .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, + .apps_routine = ebpf_process_create_apps_charts, .maps = NULL, + .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &process_config, + .config_file = NETDATA_PROCESS_CONFIG_FILE}, { .thread_name = "socket", .config_name = "socket", .enabled = 0, .start_routine = ebpf_socket_thread, - .update_time = 1, .global_charts = 1, .apps_charts = 1, .mode = MODE_ENTRY, - .optional = 0, .apps_routine = ebpf_socket_create_apps_charts, .maps = NULL, - .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL}, + .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO, + .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, + .apps_routine = ebpf_socket_create_apps_charts, .maps = NULL, + .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &socket_config, + .config_file = NETDATA_NETWORK_CONFIG_FILE}, { .thread_name = "cachestat", .config_name = "cachestat", .enabled = 0, .start_routine = ebpf_cachestat_thread, - .update_time = 1, .global_charts = 1, .apps_charts = 1, .mode = MODE_ENTRY, - .optional = 0, .apps_routine = ebpf_cachestat_create_apps_charts, .maps = NULL, - .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL}, + .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO, + .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, + .apps_routine = ebpf_cachestat_create_apps_charts, .maps = NULL, + .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &cachestat_config, + .config_file = NETDATA_CACHESTAT_CONFIG_FILE}, { .thread_name = "sync", .config_name = "sync", .enabled = 0, .start_routine = ebpf_sync_thread, - .update_time = 1, .global_charts = 1, .apps_charts = 1, .mode = MODE_ENTRY, - .optional = 0, .apps_routine = NULL, .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL }, + .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO, + .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, .apps_routine = NULL, .maps = NULL, + .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &sync_config, + .config_file = NETDATA_SYNC_CONFIG_FILE}, { .thread_name = "dc", .config_name = "dc", .enabled = 0, .start_routine = ebpf_dcstat_thread, - .update_time = 1, .global_charts = 1, .apps_charts = 1, .mode = MODE_ENTRY, - .optional = 0, .apps_routine = ebpf_dcstat_create_apps_charts, .maps = NULL, - .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE }, - { .thread_name = NULL, .enabled = 0, .start_routine = NULL, .update_time = 1, - .global_charts = 0, .apps_charts = 1, .mode = MODE_ENTRY, - .optional = 0, .apps_routine = NULL, .maps = NULL, .pid_map_size = 0, .names = NULL }, + .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO, + .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, + .apps_routine = ebpf_dcstat_create_apps_charts, .maps = NULL, + .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &dcstat_config, + .config_file = NETDATA_DIRECTORY_DCSTAT_CONFIG_FILE}, + { .thread_name = "swap", .config_name = "swap", .enabled = 0, .start_routine = ebpf_swap_thread, + .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO, + .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, + .apps_routine = ebpf_swap_create_apps_charts, .maps = NULL, + .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &swap_config, + .config_file = NETDATA_DIRECTORY_SWAP_CONFIG_FILE}, + { .thread_name = "vfs", .config_name = "vfs", .enabled = 0, .start_routine = ebpf_vfs_thread, + .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO, + .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, + .apps_routine = ebpf_vfs_create_apps_charts, .maps = NULL, + .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &vfs_config, + .config_file = NETDATA_DIRECTORY_VFS_CONFIG_FILE }, + { .thread_name = "filesystem", .config_name = "filesystem", .enabled = 0, .start_routine = ebpf_filesystem_thread, + .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO, + .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, .apps_routine = NULL, .maps = NULL, + .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &fs_config, + .config_file = NETDATA_FILESYSTEM_CONFIG_FILE}, + { .thread_name = "disk", .config_name = "disk", .enabled = 0, .start_routine = ebpf_disk_thread, + .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO, + .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, .apps_routine = NULL, .maps = NULL, + .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &disk_config, + .config_file = NETDATA_DISK_CONFIG_FILE}, + { .thread_name = "mount", .config_name = "mount", .enabled = 0, .start_routine = ebpf_mount_thread, + .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO, + .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, .apps_routine = NULL, .maps = NULL, + .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &mount_config, + .config_file = NETDATA_MOUNT_CONFIG_FILE}, + { .thread_name = "fd", .config_name = "fd", .enabled = 0, .start_routine = ebpf_fd_thread, + .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO, + .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, + .apps_routine = ebpf_fd_create_apps_charts, .maps = NULL, + .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &fd_config, + .config_file = NETDATA_FD_CONFIG_FILE}, + { .thread_name = "hardirq", .config_name = "hardirq", .enabled = 0, .start_routine = ebpf_hardirq_thread, + .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO, + .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, .apps_routine = NULL, .maps = NULL, + .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &hardirq_config, + .config_file = NETDATA_HARDIRQ_CONFIG_FILE}, + { .thread_name = "softirq", .config_name = "softirq", .enabled = 0, .start_routine = ebpf_softirq_thread, + .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO, + .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, .apps_routine = NULL, .maps = NULL, + .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &softirq_config, + .config_file = NETDATA_SOFTIRQ_CONFIG_FILE}, + { .thread_name = "oomkill", .config_name = "oomkill", .enabled = 0, .start_routine = ebpf_oomkill_thread, + .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO, + .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, + .apps_routine = ebpf_oomkill_create_apps_charts, .maps = NULL, + .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &oomkill_config, + .config_file = NETDATA_OOMKILL_CONFIG_FILE}, + { .thread_name = "shm", .config_name = "shm", .enabled = 0, .start_routine = ebpf_shm_thread, + .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO, + .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, + .apps_routine = ebpf_shm_create_apps_charts, .maps = NULL, + .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &shm_config, + .config_file = NETDATA_DIRECTORY_SHM_CONFIG_FILE}, + { .thread_name = "mdflush", .config_name = "mdflush", .enabled = 0, .start_routine = ebpf_mdflush_thread, + .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO, + .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, .apps_routine = NULL, .maps = NULL, + .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &mdflush_config, + .config_file = NETDATA_DIRECTORY_MDFLUSH_CONFIG_FILE}, + { .thread_name = NULL, .enabled = 0, .start_routine = NULL, .update_every = EBPF_DEFAULT_UPDATE_EVERY, + .global_charts = 0, .apps_charts = CONFIG_BOOLEAN_NO, .cgroup_charts = CONFIG_BOOLEAN_NO, + .mode = MODE_ENTRY, .optional = 0, .apps_routine = NULL, .maps = NULL, .pid_map_size = 0, .names = NULL, + .cfg = NULL, .config_name = NULL}, }; // Link with apps.plugin ebpf_process_stat_t *global_process_stat = NULL; +// Link with cgroup.plugin +netdata_ebpf_cgroup_shm_t shm_ebpf_cgroup = {NULL, NULL}; +int shm_fd_ebpf_cgroup = -1; +sem_t *shm_sem_ebpf_cgroup = SEM_FAILED; +pthread_mutex_t mutex_cgroup_shm; + //Network viewer ebpf_network_viewer_options_t network_viewer_opt; @@ -155,6 +231,33 @@ static void ebpf_exit(int sig) freez(dcstat_pid); } + if (ebpf_modules[EBPF_MODULE_SWAP_IDX].enabled) { + ebpf_modules[EBPF_MODULE_SWAP_IDX].enabled = 0; + clean_swap_pid_structures(); + freez(swap_pid); + } + + if (ebpf_modules[EBPF_MODULE_VFS_IDX].enabled) { + ebpf_modules[EBPF_MODULE_VFS_IDX].enabled = 0; + clean_vfs_pid_structures(); + freez(vfs_pid); + } + + if (ebpf_modules[EBPF_MODULE_FD_IDX].enabled) { + ebpf_modules[EBPF_MODULE_FD_IDX].enabled = 0; + clean_fd_pid_structures(); + freez(fd_pid); + } + + if (ebpf_modules[EBPF_MODULE_SHM_IDX].enabled) { + ebpf_modules[EBPF_MODULE_SHM_IDX].enabled = 0; + clean_shm_pid_structures(); + freez(shm_pid); + } + + ebpf_close_cgroup_shm(); + + ebpf_clean_cgroup_pids(); /* int ret = fork(); if (ret < 0) // error @@ -241,8 +344,7 @@ inline void write_end_chart() */ void write_chart_dimension(char *dim, long long value) { - int ret = printf("SET %s = %lld\n", dim, value); - UNUSED(ret); + printf("SET %s = %lld\n", dim, value); } /** @@ -253,7 +355,7 @@ void write_chart_dimension(char *dim, long long value) * @param move the pointer with the values that will be published * @param end the number of values that will be written on standard output * - * @return It returns a variable tha maps the charts that did not have zero values. + * @return It returns a variable that maps the charts that did not have zero values. */ void write_count_chart(char *name, char *family, netdata_publish_syscall_t *move, uint32_t end) { @@ -322,7 +424,7 @@ void ebpf_one_dimension_write_charts(char *family, char *chart, char *dim, long * @param dread the dimension name * @param vread the value for previous dimension * - * @return It returns a variable tha maps the charts that did not have zero values. + * @return It returns a variable that maps the charts that did not have zero values. */ void write_io_chart(char *chart, char *family, char *dwrite, long long vwrite, char *dread, long long vread) { @@ -334,6 +436,36 @@ void write_io_chart(char *chart, char *family, char *dwrite, long long vwrite, c write_end_chart(); } +/** + * Write chart cmd on standard output + * + * @param type chart type + * @param id chart id + * @param title chart title + * @param units units label + * @param family group name used to attach the chart on dashboard + * @param charttype chart type + * @param context chart context + * @param order chart order + * @param update_every update interval used by plugin + * @param module chart module name, this is the eBPF thread. + */ +void ebpf_write_chart_cmd(char *type, char *id, char *title, char *units, char *family, + char *charttype, char *context, int order, int update_every, char *module) +{ + printf("CHART %s.%s '' '%s' '%s' '%s' '%s' '%s' %d %d '' 'ebpf.plugin' '%s'\n", + type, + id, + title, + units, + (family)?family:"", + (context)?context:"", + (charttype)?charttype:"", + order, + update_every, + module); +} + /** * Write chart cmd on standard output * @@ -345,11 +477,12 @@ void write_io_chart(char *chart, char *family, char *dwrite, long long vwrite, c * @param charttype chart type * @param context chart context * @param order chart order + * @param update_every value to overwrite the update frequency set by the server. */ -void ebpf_write_chart_cmd(char *type, char *id, char *title, char *units, char *family, - char *charttype, char *context, int order) +void ebpf_write_chart_obsolete(char *type, char *id, char *title, char *units, char *family, + char *charttype, char *context, int order, int update_every) { - printf("CHART %s.%s '' '%s' '%s' '%s' '%s' '%s' %d %d\n", + printf("CHART %s.%s '' '%s' '%s' '%s' '%s' '%s' %d %d 'obsolete'\n", type, id, title, @@ -395,17 +528,19 @@ void ebpf_create_global_dimension(void *ptr, int end) /** * Call write_chart_cmd to create the charts * - * @param type chart type - * @param id chart id - * @param title chart title - * @param units axis label - * @param family group name used to attach the chart on dashboard - * @param context chart context - * @param charttype chart type - * @param order order number of the specified chart - * @param ncd a pointer to a function called to create dimensions - * @param move a pointer for a structure that has the dimensions - * @param end number of dimensions for the chart created + * @param type chart type + * @param id chart id + * @param title chart title + * @param units axis label + * @param family group name used to attach the chart on dashboard + * @param context chart context + * @param charttype chart type + * @param order order number of the specified chart + * @param ncd a pointer to a function called to create dimensions + * @param move a pointer for a structure that has the dimensions + * @param end number of dimensions for the chart created + * @param update_every update interval used with chart. + * @param module chart module name, this is the eBPF thread. */ void ebpf_create_chart(char *type, char *id, @@ -417,11 +552,15 @@ void ebpf_create_chart(char *type, int order, void (*ncd)(void *, int), void *move, - int end) + int end, + int update_every, + char *module) { - ebpf_write_chart_cmd(type, id, title, units, family, charttype, context, order); + ebpf_write_chart_cmd(type, id, title, units, family, charttype, context, order, update_every, module); - ncd(move, end); + if (ncd) { + ncd(move, end); + } } /** @@ -435,12 +574,15 @@ void ebpf_create_chart(char *type, * @param order the chart order * @param algorithm the algorithm used by dimension * @param root structure used to create the dimensions. + * @param update_every update interval used by plugin + * @param module chart module name, this is the eBPF thread. */ void ebpf_create_charts_on_apps(char *id, char *title, char *units, char *family, char *charttype, int order, - char *algorithm, struct target *root) + char *algorithm, struct target *root, int update_every, char *module) { struct target *w; - ebpf_write_chart_cmd(NETDATA_APPS_FAMILY, id, title, units, family, charttype, NULL, order); + ebpf_write_chart_cmd(NETDATA_APPS_FAMILY, id, title, units, family, charttype, NULL, order, + update_every, module); for (w = root; w; w = w->next) { if (unlikely(w->exposed)) @@ -448,6 +590,31 @@ void ebpf_create_charts_on_apps(char *id, char *title, char *units, char *family } } +/** + * Call the necessary functions to create a name. + * + * @param family family name + * @param name chart name + * @param hist0 histogram values + * @param dimensions dimension values. + * @param end number of bins that will be sent to Netdata. + * + * @return It returns a variable that maps the charts that did not have zero values. + */ +void write_histogram_chart(char *family, char *name, const netdata_idx_t *hist, char **dimensions, uint32_t end) +{ + write_begin_chart(family, name); + + uint32_t i; + for (i = 0; i < end; i++) { + write_chart_dimension(dimensions[i], (long long) hist[i]); + } + + write_end_chart(); + + fflush(stdout); +} + /***************************************************************** * * FUNCTIONS TO DEFINE OPTIONS @@ -503,43 +670,68 @@ static inline void ebpf_set_thread_mode(netdata_run_mode_t lmode) /** * Enable specific charts selected by user. * - * @param em the structure that will be changed - * @param enable the status about the apps charts. + * @param em the structure that will be changed + * @param disable_apps the status about the apps charts. + * @param disable_cgroup the status about the cgroups charts. */ -static inline void ebpf_enable_specific_chart(struct ebpf_module *em, int enable) +static inline void ebpf_enable_specific_chart(struct ebpf_module *em, int disable_apps, int disable_cgroup) { - em->enabled = 1; - if (!enable) { - em->apps_charts = 1; + em->enabled = CONFIG_BOOLEAN_YES; + + // oomkill stores data inside apps submenu, so it always need to have apps_enabled for plugin to create + // its chart, without this comparison eBPF.plugin will try to store invalid data when apps is disabled. + if (!disable_apps || !strcmp(em->thread_name, "oomkill")) { + em->apps_charts = CONFIG_BOOLEAN_YES; } - em->global_charts = 1; + + if (!disable_cgroup) { + em->cgroup_charts = CONFIG_BOOLEAN_YES; + } + + em->global_charts = CONFIG_BOOLEAN_YES; } /** * Enable all charts * - * @param apps what is the current status of apps + * @param apps what is the current status of apps + * @param cgroups what is the current status of cgroups */ -static inline void ebpf_enable_all_charts(int apps) +static inline void ebpf_enable_all_charts(int apps, int cgroups) { int i; for (i = 0; ebpf_modules[i].thread_name; i++) { - ebpf_enable_specific_chart(&ebpf_modules[i], apps); + ebpf_enable_specific_chart(&ebpf_modules[i], apps, cgroups); } } +/** + * Disable all Global charts + * + * Disable charts + */ +static inline void disable_all_global_charts() +{ + int i; + for (i = 0; ebpf_modules[i].thread_name; i++) { + ebpf_modules[i].enabled = 0; + ebpf_modules[i].global_charts = 0; + } +} + + /** * Enable the specified chart group * * @param idx the index of ebpf_modules that I am enabling * @param disable_apps should I keep apps charts? */ -static inline void ebpf_enable_chart(int idx, int disable_apps) +static inline void ebpf_enable_chart(int idx, int disable_apps, int disable_cgroup) { int i; for (i = 0; ebpf_modules[i].thread_name; i++) { if (i == idx) { - ebpf_enable_specific_chart(&ebpf_modules[i], disable_apps); + ebpf_enable_specific_chart(&ebpf_modules[i], disable_apps, disable_cgroup); break; } } @@ -558,6 +750,19 @@ static inline void ebpf_disable_apps() } } +/** + * Disable Cgroups + * + * Disable charts for apps loading only global charts. + */ +static inline void ebpf_disable_cgroups() +{ + int i; + for (i = 0; ebpf_modules[i].thread_name; i++) { + ebpf_modules[i].cgroup_charts = 0; + } +} + /** * Print help on standard error for user knows how to use the collector. */ @@ -579,39 +784,140 @@ void ebpf_print_help() " Released under GNU General Public License v3 or later.\n" " All rights reserved.\n" "\n" - " This program is a data collector plugin for netdata.\n" + " This eBPF.plugin is a data collector plugin for netdata.\n" "\n" - " Available command line options:\n" + " This plugin only accepts long options with one or two dashes. The available command line options are:\n" "\n" - " SECONDS Set the data collection frequency.\n" + " SECONDS Set the data collection frequency.\n" "\n" - " --help or -h Show this help.\n" + " [-]-help Show this help.\n" "\n" - " --version or -v Show software version.\n" + " [-]-version Show software version.\n" "\n" - " --global or -g Disable charts per application.\n" + " [-]-global Disable charts per application and cgroup.\n" "\n" - " --all or -a Enable all chart groups (global and apps), unless -g is also given.\n" + " [-]-all Enable all chart groups (global, apps, and cgroup), unless -g is also given.\n" "\n" - " --cachestat or -c Enable charts related to process run time.\n" + " [-]-cachestat Enable charts related to process run time.\n" "\n" - " --dcstat or -d Enable charts related to directory cache.\n" + " [-]-dcstat Enable charts related to directory cache.\n" "\n" - " --net or -n Enable network viewer charts.\n" + " [-]-disk Enable charts related to disk monitoring.\n" "\n" - " --process or -p Enable charts related to process run time.\n" + " [-]-filesystem Enable chart related to filesystem run time.\n" "\n" - " --return or -r Run the collector in return mode.\n" - "\n", - " --sync or -s Enable chart related to sync run time.\n" + " [-]-hardirq Enable chart related to hard IRQ latency.\n" + "\n" + " [-]-mdflush Enable charts related to multi-device flush.\n" + "\n" + " [-]-mount Enable charts related to mount monitoring.\n" "\n" + " [-]-net Enable network viewer charts.\n" + "\n" + " [-]-oomkill Enable chart related to OOM kill tracking.\n" + "\n" + " [-]-process Enable charts related to process run time.\n" + "\n" + " [-]-return Run the collector in return mode.\n" + "\n" + " [-]-shm Enable chart related to shared memory tracking.\n" + "\n" + " [-]-softirq Enable chart related to soft IRQ latency.\n" + "\n" + " [-]-sync Enable chart related to sync run time.\n" + "\n" + " [-]-swap Enable chart related to swap run time.\n" + "\n" + " [-]-vfs Enable chart related to vfs run time.\n" + "\n", VERSION, (year >= 116) ? year + 1900 : 2020); } /***************************************************************** * - * AUXILIAR FUNCTIONS USED DURING INITIALIZATION + * TRACEPOINT MANAGEMENT FUNCTIONS + * + *****************************************************************/ + +/** + * Enable a tracepoint. + * + * @return 0 on success, -1 on error. + */ +int ebpf_enable_tracepoint(ebpf_tracepoint_t *tp) +{ + int test = ebpf_is_tracepoint_enabled(tp->class, tp->event); + + // err? + if (test == -1) { + return -1; + } + // disabled? + else if (test == 0) { + // enable it then. + if (ebpf_enable_tracing_values(tp->class, tp->event)) { + return -1; + } + } + + // enabled now or already was. + tp->enabled = true; + + return 0; +} + +/** + * Disable a tracepoint if it's enabled. + * + * @return 0 on success, -1 on error. + */ +int ebpf_disable_tracepoint(ebpf_tracepoint_t *tp) +{ + int test = ebpf_is_tracepoint_enabled(tp->class, tp->event); + + // err? + if (test == -1) { + return -1; + } + // enabled? + else if (test == 1) { + // disable it then. + if (ebpf_disable_tracing_values(tp->class, tp->event)) { + return -1; + } + } + + // disable now or already was. + tp->enabled = false; + + return 0; +} + +/** + * Enable multiple tracepoints on a list of tracepoints which end when the + * class is NULL. + * + * @return the number of successful enables. + */ +uint32_t ebpf_enable_tracepoints(ebpf_tracepoint_t *tps) +{ + uint32_t cnt = 0; + for (int i = 0; tps[i].class != NULL; i++) { + if (ebpf_enable_tracepoint(&tps[i]) == -1) { + infoerr("failed to enable tracepoint %s:%s", + tps[i].class, tps[i].event); + } + else { + cnt += 1; + } + } + return cnt; +} + +/***************************************************************** + * + * AUXILIARY FUNCTIONS USED DURING INITIALIZATION * *****************************************************************/ @@ -745,20 +1051,6 @@ static void ebpf_allocate_common_vectors() global_process_stat = callocz((size_t)ebpf_nprocs, sizeof(ebpf_process_stat_t)); } -/** - * Fill the ebpf_data structure with default values - * - * @param ef the pointer to set default values - */ -void fill_ebpf_data(ebpf_data_t *ef) -{ - memset(ef, 0, sizeof(ebpf_data_t)); - ef->kernel_string = kernel_string; - ef->running_on_kernel = running_on_kernel; - ef->map_fd = callocz(EBPF_MAX_MAPS, sizeof(int)); - ef->isrh = isrh; -} - /** * Define how to load the ebpf programs * @@ -778,13 +1070,16 @@ static inline void how_to_load(char *ptr) * Update interval * * Update default interval with value from user + * + * @param update_every value to overwrite the update frequency set by the server. */ -static void ebpf_update_interval() +static void ebpf_update_interval(int update_every) { int i; - int value = (int) appconfig_get_number(&collector_config, EBPF_GLOBAL_SECTION, EBPF_CFG_UPDATE_EVERY, 1); + int value = (int) appconfig_get_number(&collector_config, EBPF_GLOBAL_SECTION, EBPF_CFG_UPDATE_EVERY, + update_every); for (i = 0; ebpf_modules[i].thread_name; i++) { - ebpf_modules[i].update_time = value; + ebpf_modules[i].update_every = value; } } @@ -807,9 +1102,11 @@ static void ebpf_update_table_size() /** * Read collector values * - * @param disable_apps variable to store information related to apps. + * @param disable_apps variable to store information related to apps. + * @param disable_cgroups variable to store information related to cgroups. + * @param update_every value to overwrite the update frequency set by the server. */ -static void read_collector_values(int *disable_apps) +static void read_collector_values(int *disable_apps, int *disable_cgroups, int update_every) { // Read global section char *value; @@ -822,7 +1119,7 @@ static void read_collector_values(int *disable_apps) how_to_load(value); - ebpf_update_interval(); + ebpf_update_interval(update_every); ebpf_update_table_size(); @@ -837,12 +1134,17 @@ static void read_collector_values(int *disable_apps) } *disable_apps = (int)enabled; + // Cgroup is a positive sentence, so we need to invert the values to disable apps. + // We are using the same pattern for cgroup and apps + enabled = appconfig_get_boolean(&collector_config, EBPF_GLOBAL_SECTION, EBPF_CFG_CGROUP, CONFIG_BOOLEAN_NO); + *disable_cgroups = (enabled == CONFIG_BOOLEAN_NO)?CONFIG_BOOLEAN_YES:CONFIG_BOOLEAN_NO; + // Read ebpf programs section enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, ebpf_modules[EBPF_MODULE_PROCESS_IDX].config_name, CONFIG_BOOLEAN_YES); int started = 0; if (enabled) { - ebpf_enable_chart(EBPF_MODULE_PROCESS_IDX, *disable_apps); + ebpf_enable_chart(EBPF_MODULE_PROCESS_IDX, *disable_apps, *disable_cgroups); started++; } @@ -855,7 +1157,7 @@ static void read_collector_values(int *disable_apps) CONFIG_BOOLEAN_NO); if (enabled) { - ebpf_enable_chart(EBPF_MODULE_SOCKET_IDX, *disable_apps); + ebpf_enable_chart(EBPF_MODULE_SOCKET_IDX, *disable_apps, *disable_cgroups); // Read network viewer section if network viewer is enabled // This is kept here to keep backward compatibility parse_network_viewer_section(&collector_config); @@ -869,13 +1171,13 @@ static void read_collector_values(int *disable_apps) if (!enabled) enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "network connections", CONFIG_BOOLEAN_NO); - ebpf_modules[EBPF_MODULE_SOCKET_IDX].optional = enabled; + ebpf_modules[EBPF_MODULE_SOCKET_IDX].optional = (int)enabled; enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "cachestat", CONFIG_BOOLEAN_NO); if (enabled) { - ebpf_enable_chart(EBPF_MODULE_CACHESTAT_IDX, *disable_apps); + ebpf_enable_chart(EBPF_MODULE_CACHESTAT_IDX, *disable_apps, *disable_cgroups); started++; } @@ -883,19 +1185,96 @@ static void read_collector_values(int *disable_apps) CONFIG_BOOLEAN_YES); if (enabled) { - ebpf_enable_chart(EBPF_MODULE_SYNC_IDX, *disable_apps); + ebpf_enable_chart(EBPF_MODULE_SYNC_IDX, *disable_apps, *disable_cgroups); started++; } enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "dcstat", CONFIG_BOOLEAN_NO); if (enabled) { - ebpf_enable_chart(EBPF_MODULE_DCSTAT_IDX, *disable_apps); + ebpf_enable_chart(EBPF_MODULE_DCSTAT_IDX, *disable_apps, *disable_cgroups); + started++; + } + + enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "swap", + CONFIG_BOOLEAN_NO); + if (enabled) { + ebpf_enable_chart(EBPF_MODULE_SWAP_IDX, *disable_apps, *disable_cgroups); + started++; + } + + enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "vfs", + CONFIG_BOOLEAN_NO); + if (enabled) { + ebpf_enable_chart(EBPF_MODULE_VFS_IDX, *disable_apps, *disable_cgroups); + started++; + } + + enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "filesystem", + CONFIG_BOOLEAN_NO); + if (enabled) { + ebpf_enable_chart(EBPF_MODULE_FILESYSTEM_IDX, *disable_apps, *disable_cgroups); + started++; + } + + enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "disk", + CONFIG_BOOLEAN_NO); + if (enabled) { + ebpf_enable_chart(EBPF_MODULE_DISK_IDX, *disable_apps, *disable_cgroups); + started++; + } + + enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "mount", + CONFIG_BOOLEAN_YES); + if (enabled) { + ebpf_enable_chart(EBPF_MODULE_MOUNT_IDX, *disable_apps, *disable_cgroups); + started++; + } + + enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "fd", + CONFIG_BOOLEAN_YES); + if (enabled) { + ebpf_enable_chart(EBPF_MODULE_FD_IDX, *disable_apps, *disable_cgroups); + started++; + } + + enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "hardirq", + CONFIG_BOOLEAN_YES); + if (enabled) { + ebpf_enable_chart(EBPF_MODULE_HARDIRQ_IDX, *disable_apps, *disable_cgroups); + started++; + } + + enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "softirq", + CONFIG_BOOLEAN_YES); + if (enabled) { + ebpf_enable_chart(EBPF_MODULE_SOFTIRQ_IDX, *disable_apps, *disable_cgroups); + started++; + } + + enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "oomkill", + CONFIG_BOOLEAN_YES); + if (enabled) { + ebpf_enable_chart(EBPF_MODULE_OOMKILL_IDX, *disable_apps, *disable_cgroups); + started++; + } + + enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "shm", + CONFIG_BOOLEAN_YES); + if (enabled) { + ebpf_enable_chart(EBPF_MODULE_SHM_IDX, *disable_apps, *disable_cgroups); + started++; + } + + enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "mdflush", + CONFIG_BOOLEAN_NO); + if (enabled) { + ebpf_enable_chart(EBPF_MODULE_MDFLUSH_IDX, *disable_apps, *disable_cgroups); started++; } if (!started){ - ebpf_enable_all_charts(*disable_apps); + ebpf_enable_all_charts(*disable_apps, *disable_cgroups); // Read network viewer section // This is kept here to keep backward compatibility parse_network_viewer_section(&collector_config); @@ -906,12 +1285,14 @@ static void read_collector_values(int *disable_apps) /** * Load collector config * - * @param path the path where the file ebpf.conf is stored. - * @param disable_apps variable to store the information about apps plugin status. + * @param path the path where the file ebpf.conf is stored. + * @param disable_apps variable to store the information about apps plugin status. + * @param disable_cgroups variable to store the information about cgroups plugin status. + * @param update_every value to overwrite the update frequency set by the server. * * @return 0 on success and -1 otherwise. */ -static int load_collector_config(char *path, int *disable_apps) +static int load_collector_config(char *path, int *disable_apps, int *disable_cgroups, int update_every) { char lpath[4096]; @@ -923,7 +1304,7 @@ static int load_collector_config(char *path, int *disable_apps) } } - read_collector_values(disable_apps); + read_collector_values(disable_apps, disable_cgroups, update_every); return 0; } @@ -957,6 +1338,21 @@ void set_global_variables() isrh = get_redhat_release(); pid_max = get_system_pid_max(); + running_on_kernel = ebpf_get_kernel_version(); + ebpf_update_kernel(kernel_string, 63, isrh, running_on_kernel); +} + +/** + * Load collector config + * + * @param lmode the mode that will be used for them. + */ +static inline void ebpf_load_thread_config() +{ + int i; + for (i = 0; ebpf_modules[i].thread_name; i++) { + ebpf_update_module(&ebpf_modules[i]); + } } /** @@ -965,23 +1361,36 @@ void set_global_variables() * @param argc the number of arguments * @param argv the pointer to the arguments */ -static void parse_args(int argc, char **argv) +static void ebpf_parse_args(int argc, char **argv) { - int enabled = 0; int disable_apps = 0; + int disable_cgroups = 1; int freq = 0; int option_index = 0; + uint64_t select_threads = 0; static struct option long_options[] = { - {"help", no_argument, 0, 'h' }, - {"version", no_argument, 0, 'v' }, - {"global", no_argument, 0, 'g' }, - {"all", no_argument, 0, 'a' }, - {"cachestat", no_argument, 0, 'c' }, - {"dcstat", no_argument, 0, 'd' }, - {"net", no_argument, 0, 'n' }, - {"process", no_argument, 0, 'p' }, - {"return", no_argument, 0, 'r' }, - {"sync", no_argument, 0, 's' }, + {"process", no_argument, 0, 0 }, + {"net", no_argument, 0, 0 }, + {"cachestat", no_argument, 0, 0 }, + {"sync", no_argument, 0, 0 }, + {"dcstat", no_argument, 0, 0 }, + {"swap", no_argument, 0, 0 }, + {"vfs", no_argument, 0, 0 }, + {"filesystem", no_argument, 0, 0 }, + {"disk", no_argument, 0, 0 }, + {"mount", no_argument, 0, 0 }, + {"filedescriptor", no_argument, 0, 0 }, + {"hardirq", no_argument, 0, 0 }, + {"softirq", no_argument, 0, 0 }, + {"oomkill", no_argument, 0, 0 }, + {"shm", no_argument, 0, 0 }, + {"mdflush", no_argument, 0, 0 }, + /* INSERT NEW THREADS BEFORE THIS COMMENT TO KEEP COMPATIBILITY WITH enum ebpf_module_indexes */ + {"all", no_argument, 0, 0 }, + {"version", no_argument, 0, 0 }, + {"help", no_argument, 0, 0 }, + {"global", no_argument, 0, 0 }, + {"return", no_argument, 0, 0 }, {0, 0, 0, 0} }; @@ -995,83 +1404,166 @@ static void parse_args(int argc, char **argv) } } + if (!freq) + freq = EBPF_DEFAULT_UPDATE_EVERY; + + if (load_collector_config(ebpf_user_config_dir, &disable_apps, &disable_cgroups, freq)) { + info( + "Does not have a configuration file inside `%s/ebpf.d.conf. It will try to load stock file.", + ebpf_user_config_dir); + if (load_collector_config(ebpf_stock_config_dir, &disable_apps, &disable_cgroups, freq)) { + info("Does not have a stock file. It is starting with default options."); + } + } + + ebpf_load_thread_config(); + while (1) { - int c = getopt_long(argc, argv, "hvgacdnprs", long_options, &option_index); + int c = getopt_long_only(argc, argv, "", long_options, &option_index); if (c == -1) break; - switch (c) { - case 'h': { - ebpf_print_help(); - exit(0); + switch (option_index) { + case EBPF_MODULE_PROCESS_IDX: { + select_threads |= 1< 0) { - update_every = freq; - } + if (disable_apps || disable_cgroups) { + if (disable_apps) + ebpf_disable_apps(); - if (load_collector_config(ebpf_user_config_dir, &disable_apps)) { - info( - "Does not have a configuration file inside `%s/ebpf.d.conf. It will try to load stock file.", - ebpf_user_config_dir); - if (load_collector_config(ebpf_stock_config_dir, &disable_apps)) { - info("Does not have a stock file. It is starting with default options."); - } else { - enabled = 1; - } - } else { - enabled = 1; - } + if (disable_cgroups) + ebpf_disable_cgroups(); - if (!enabled) { - ebpf_enable_all_charts(disable_apps); -#ifdef NETDATA_INTERNAL_CHECKS - info("EBPF running with all charts, because neither \"-n\" or \"-p\" was given."); -#endif + ebpf_enable_all_charts(disable_apps, disable_cgroups); } - if (disable_apps) - return; + if (select_threads) { + disable_all_global_charts(); + uint64_t idx; + for (idx = 0; idx < EBPF_OPTION_ALL_CHARTS; idx++) { + if (select_threads & 1< 63) + length = 63; + + buffer[length] = '\0'; + old_pid = (pid_t)str2uint32_t(buffer); + } + fclose(fp); + + return old_pid; +} + +/** + * Kill previous process + * + * Kill previous process whether it was not closed. + * + * @param filename is the full name of the file. + * @param pid that identifies the process + */ +static void ebpf_kill_previous_process(char *filename, pid_t pid) +{ + pid_t old_pid = ebpf_read_previous_pid(filename); + if (!old_pid) + return; + + // Process is not running + char *prev_name = ebpf_get_process_name(old_pid); + if (!prev_name) + return; + + char *current_name = ebpf_get_process_name(pid); + + if (!strcmp(prev_name, current_name)) + kill(old_pid, SIGKILL); + + freez(prev_name); + freez(current_name); + + // wait few microseconds before start new plugin + sleep_usec(USEC_PER_MS * 300); +} + +/** + * Manage PID + * + * This function kills another instance of eBPF whether it is necessary and update the file content. + * + * @param pid that identifies the process + */ +static void ebpf_manage_pid(pid_t pid) +{ + char filename[FILENAME_MAX + 1]; + snprintfz(filename, FILENAME_MAX, "%s%s/ebpf.d/ebpf.pid", netdata_configured_host_prefix, ebpf_plugin_dir); + + ebpf_kill_previous_process(filename, pid); + ebpf_update_pid_file(filename, pid); +} + /** * Entry point * @@ -1143,9 +1755,9 @@ static void parse_args(int argc, char **argv) int main(int argc, char **argv) { set_global_variables(); - parse_args(argc, argv); + ebpf_parse_args(argc, argv); + ebpf_manage_pid(getpid()); - running_on_kernel = get_kernel_version(kernel_string, 63); if (!has_condition_to_run(running_on_kernel)) { error("The current collector cannot run on this kernel."); return 2; @@ -1203,6 +1815,28 @@ int main(int argc, char **argv) NULL, NULL, ebpf_modules[EBPF_MODULE_SYNC_IDX].start_routine}, {"EBPF DCSTAT" , NULL, NULL, 1, NULL, NULL, ebpf_modules[EBPF_MODULE_DCSTAT_IDX].start_routine}, + {"EBPF SWAP" , NULL, NULL, 1, + NULL, NULL, ebpf_modules[EBPF_MODULE_SWAP_IDX].start_routine}, + {"EBPF VFS" , NULL, NULL, 1, + NULL, NULL, ebpf_modules[EBPF_MODULE_VFS_IDX].start_routine}, + {"EBPF FILESYSTEM" , NULL, NULL, 1, + NULL, NULL, ebpf_modules[EBPF_MODULE_FILESYSTEM_IDX].start_routine}, + {"EBPF DISK" , NULL, NULL, 1, + NULL, NULL, ebpf_modules[EBPF_MODULE_DISK_IDX].start_routine}, + {"EBPF MOUNT" , NULL, NULL, 1, + NULL, NULL, ebpf_modules[EBPF_MODULE_MOUNT_IDX].start_routine}, + {"EBPF FD" , NULL, NULL, 1, + NULL, NULL, ebpf_modules[EBPF_MODULE_FD_IDX].start_routine}, + {"EBPF HARDIRQ" , NULL, NULL, 1, + NULL, NULL, ebpf_modules[EBPF_MODULE_HARDIRQ_IDX].start_routine}, + {"EBPF SOFTIRQ" , NULL, NULL, 1, + NULL, NULL, ebpf_modules[EBPF_MODULE_SOFTIRQ_IDX].start_routine}, + {"EBPF OOMKILL" , NULL, NULL, 1, + NULL, NULL, ebpf_modules[EBPF_MODULE_OOMKILL_IDX].start_routine}, + {"EBPF SHM" , NULL, NULL, 1, + NULL, NULL, ebpf_modules[EBPF_MODULE_SHM_IDX].start_routine}, + {"EBPF MDFLUSH" , NULL, NULL, 1, + NULL, NULL, ebpf_modules[EBPF_MODULE_MDFLUSH_IDX].start_routine}, {NULL , NULL, NULL, 0, NULL, NULL, NULL} }; diff --git a/collectors/ebpf.plugin/ebpf.d.conf b/collectors/ebpf.plugin/ebpf.d.conf index ef6ff8145..845b711c9 100644 --- a/collectors/ebpf.plugin/ebpf.d.conf +++ b/collectors/ebpf.plugin/ebpf.d.conf @@ -6,9 +6,10 @@ # `return : In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates # new charts for the return of these functions, such as errors. # -# The eBPF collector also creates charts for each running application through an integration with the `apps plugin`. -# If you want to disable the integration with `apps.plugin` along with the above charts, change the setting `apps` to -# 'no'. +# The eBPF collector also creates charts for each running application through an integration with the `apps.plugin` +# or `cgroups.plugin`. +# If you want to disable the integration with `apps.plugin` or `cgroups.plugin` along with the above charts, change the setting +# `apps` and `cgroups` to 'no'. # # The `update every` option defines the number of seconds used to read data from kernel and send to netdata # @@ -17,7 +18,8 @@ [global] ebpf load mode = entry apps = yes - update every = 1 + cgroups = no + update every = 5 pid table size = 32768 # @@ -25,17 +27,39 @@ # # The eBPF collector enables and runs the following eBPF programs by default: # -# `cachestat`: Make charts for kernel functions related to page cache. -# `process` : This eBPF program creates charts that show information about process creation, VFS IO, and +# `cachestat` : Make charts for kernel functions related to page cache. +# `dcstat` : Make charts for kernel functions related to directory cache. +# `disk` : Monitor I/O latencies for disks +# `fd` : This eBPF program creates charts that show information about file manipulation. +# `mdflush` : Monitors flush counts for multi-devices. +# `mount` : Monitor calls for syscalls mount and umount +# `filesystem`: Monitor calls for functions used to manipulate specific filesystems +# `hardirq` : Monitor latency of serving hardware interrupt requests (hard IRQs). +# `oomkill` : This eBPF program creates a chart that shows which process got OOM killed and when. +# `process` : This eBPF program creates charts that show information about process life. +# `shm` : Monitor calls for syscalls shmget, shmat, shmdt and shmctl. +# `socket` : This eBPF program creates charts with information about `TCP` and `UDP` functions, including the +# bandwidth consumed by each. +# `softirq` : Monitor latency of serving software interrupt requests (soft IRQs). +# `sync` : Montitor calls for syscall sync(2). +# `swap` : Monitor calls for internal swap functions. +# `vfs` : This eBPF program creates charts that show information about process VFS IO, VFS file manipulation and # files removed. -# `socket` : This eBPF program creates charts with information about `TCP` and `UDP` functions, including the -# bandwidth consumed by each. -# `sync` : Montitor calls for syscall sync(2). [ebpf programs] cachestat = no dcstat = no + disk = no + fd = yes + filesystem = no + hardirq = yes + mdflush = no + mount = yes + oomkill = yes process = yes + shm = yes socket = yes + softirq = yes sync = yes + swap = no + vfs = yes network connections = no - diff --git a/collectors/ebpf.plugin/ebpf.d/cachestat.conf b/collectors/ebpf.plugin/ebpf.d/cachestat.conf index 0c4d991df..41205930a 100644 --- a/collectors/ebpf.plugin/ebpf.d/cachestat.conf +++ b/collectors/ebpf.plugin/ebpf.d/cachestat.conf @@ -3,14 +3,17 @@ # `return : In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates # new charts for the return of these functions, such as errors. # -# The eBPF collector also creates charts for each running application through an integration with the `apps plugin`. -# If you want to disable the integration with `apps.plugin` along with the above charts, change the setting `apps` to -# 'no'. +# The eBPF collector also creates charts for each running application through an integration with the `apps.plugin` +# or `cgroups.plugin`. +# If you want to disable the integration with `apps.plugin` or `cgroups.plugin` along with the above charts, change +# the setting `apps` and `cgroups` to 'no'. # # The `pid table size` defines the maximum number of PIDs stored inside the application hash table. -# -[global] - ebpf load mode = entry - apps = yes - update every = 2 - pid table size = 32768 +# +# Uncomment lines to define specific options for thread. +#[global] +# ebpf load mode = entry +# apps = yes +# cgroups = no +# update every = 10 +# pid table size = 32768 diff --git a/collectors/ebpf.plugin/ebpf.d/dcstat.conf b/collectors/ebpf.plugin/ebpf.d/dcstat.conf index 2607b98fd..a65e0acbc 100644 --- a/collectors/ebpf.plugin/ebpf.d/dcstat.conf +++ b/collectors/ebpf.plugin/ebpf.d/dcstat.conf @@ -3,11 +3,15 @@ # `return : In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates # new charts for the return of these functions, such as errors. # -# The eBPF collector also creates charts for each running application through an integration with the `apps plugin`. -# If you want to disable the integration with `apps.plugin` along with the above charts, change the setting `apps` to -# 'no'. +# The eBPF collector also creates charts for each running application through an integration with the `apps.plugin` +# or `cgroups.plugin`. +# If you want to disable the integration with `apps.plugin` or `cgroups.plugin` along with the above charts, change +# the setting `apps` and `cgroups` to 'no'. # -[global] - ebpf load mode = entry - apps = yes - update every = 2 +# Uncomment lines to define specific options for thread. +#[global] +# ebpf load mode = entry +# apps = yes +# cgroups = no +# update every = 10 +# pid table size = 32768 diff --git a/collectors/ebpf.plugin/ebpf.d/disk.conf b/collectors/ebpf.plugin/ebpf.d/disk.conf new file mode 100644 index 000000000..4adf88e74 --- /dev/null +++ b/collectors/ebpf.plugin/ebpf.d/disk.conf @@ -0,0 +1,9 @@ +# The `ebpf load mode` option accepts the following values : +# `entry` : The eBPF collector only monitors calls for the functions, and does not show charts related to errors. +# `return : In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates +# new charts for the return of these functions, such as errors. +# +#[global] +# ebpf load mode = entry +# update every = 10 + diff --git a/collectors/ebpf.plugin/ebpf.d/fd.conf b/collectors/ebpf.plugin/ebpf.d/fd.conf new file mode 100644 index 000000000..f6edd3d93 --- /dev/null +++ b/collectors/ebpf.plugin/ebpf.d/fd.conf @@ -0,0 +1,19 @@ +# The `ebpf load mode` option accepts the following values : +# `entry` : The eBPF collector only monitors calls for the functions, and does not show charts related to errors. +# `return : In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates +# new charts for the return of these functions, such as errors. +# +# The eBPF collector also creates charts for each running application through an integration with the `apps.plugin` +# or `cgroups.plugin`. +# If you want to disable the integration with `apps.plugin` or `cgroups.plugin` along with the above charts, change +# the setting `apps` and `cgroups` to 'no'. +# +# The `pid table size` defines the maximum number of PIDs stored inside the hash table. +# +# Uncomment lines to define specific options for thread. +#[global] +# ebpf load mode = entry +# apps = yes +# cgroups = no +# update every = 10 +# pid table size = 32768 diff --git a/collectors/ebpf.plugin/ebpf.d/filesystem.conf b/collectors/ebpf.plugin/ebpf.d/filesystem.conf new file mode 100644 index 000000000..c5eb01e54 --- /dev/null +++ b/collectors/ebpf.plugin/ebpf.d/filesystem.conf @@ -0,0 +1,20 @@ +# The `ebpf load mode` option accepts the following values : +# `entry` : The eBPF collector only monitors calls for the functions, and does not show charts related to errors. +# `return : In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates +# new charts for the return of these functions, such as errors. +# +# The eBPF collector also creates charts for each running application through an integration with the `apps plugin`. +# If you want to disable the integration with `apps.plugin` along with the above charts, change the setting `apps` to +# 'no'. +# +#[global] +# ebpf load mode = entry +# update every = 10 + +# All filesystems are named as 'NAMEdist' where NAME is the filesystem name while 'dist' is a reference for distribution. +[filesystem] + btrfsdist = yes + ext4dist = yes + nfsdist = yes + xfsdist = yes + zfsdist = yes diff --git a/collectors/ebpf.plugin/ebpf.d/hardirq.conf b/collectors/ebpf.plugin/ebpf.d/hardirq.conf new file mode 100644 index 000000000..f2bae1d57 --- /dev/null +++ b/collectors/ebpf.plugin/ebpf.d/hardirq.conf @@ -0,0 +1,8 @@ +# The `ebpf load mode` option accepts the following values : +# `entry` : The eBPF collector only monitors calls for the functions, and does not show charts related to errors. +# `return : In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates +# new charts for the return of these functions, such as errors. +# +#[global] +# ebpf load mode = entry +# update every = 10 diff --git a/collectors/ebpf.plugin/ebpf.d/mdflush.conf b/collectors/ebpf.plugin/ebpf.d/mdflush.conf new file mode 100644 index 000000000..e65e8672c --- /dev/null +++ b/collectors/ebpf.plugin/ebpf.d/mdflush.conf @@ -0,0 +1,7 @@ +# The `ebpf load mode` option accepts the following values : +# `entry` : The eBPF collector only monitors calls for the functions, and does not show charts related to errors. +# `return : In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates +# new charts for the return of these functions, such as errors. +#[global] +# ebpf load mode = entry +# update every = 1 diff --git a/collectors/ebpf.plugin/ebpf.d/mount.conf b/collectors/ebpf.plugin/ebpf.d/mount.conf new file mode 100644 index 000000000..9d3174755 --- /dev/null +++ b/collectors/ebpf.plugin/ebpf.d/mount.conf @@ -0,0 +1,8 @@ +# The `ebpf load mode` option accepts the following values : +# `entry` : The eBPF collector only monitors calls for the functions, and does not show charts related to errors. +# `return : In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates +# new charts for the return of these functions, such as errors. +# +#[global] +# ebpf load mode = entry +# update every = 1 diff --git a/collectors/ebpf.plugin/ebpf.d/network.conf b/collectors/ebpf.plugin/ebpf.d/network.conf index 6bbd49a49..e692622a9 100644 --- a/collectors/ebpf.plugin/ebpf.d/network.conf +++ b/collectors/ebpf.plugin/ebpf.d/network.conf @@ -3,9 +3,10 @@ # `return : In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates # new charts for the return of these functions, such as errors. # -# The eBPF collector also creates charts for each running application through an integration with the `apps plugin`. -# If you want to disable the integration with `apps.plugin` along with the above charts, change the setting `apps` to -# 'no'. +# The eBPF collector also creates charts for each running application through an integration with the `apps.plugin` +# or `cgroups.plugin`. +# If you want to disable the integration with `apps.plugin` or `cgroups.plugin` along with the above charts, change +# the setting `apps` and `cgroups` to 'no'. # # The following options change the hash table size: # `bandwidth table size`: Maximum number of connections monitored @@ -14,9 +15,10 @@ # `udp connection table size`: Maximum number of UDP connections monitored # [global] - ebpf load mode = entry - apps = yes - update every = 1 +# ebpf load mode = entry +# apps = yes +# cgroups = no +# update every = 10 bandwidth table size = 16384 ipv4 connection table size = 16384 ipv6 connection table size = 16384 diff --git a/collectors/ebpf.plugin/ebpf.d/oomkill.conf b/collectors/ebpf.plugin/ebpf.d/oomkill.conf new file mode 100644 index 000000000..e65e8672c --- /dev/null +++ b/collectors/ebpf.plugin/ebpf.d/oomkill.conf @@ -0,0 +1,7 @@ +# The `ebpf load mode` option accepts the following values : +# `entry` : The eBPF collector only monitors calls for the functions, and does not show charts related to errors. +# `return : In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates +# new charts for the return of these functions, such as errors. +#[global] +# ebpf load mode = entry +# update every = 1 diff --git a/collectors/ebpf.plugin/ebpf.d/process.conf b/collectors/ebpf.plugin/ebpf.d/process.conf index 511da95ad..f6edd3d93 100644 --- a/collectors/ebpf.plugin/ebpf.d/process.conf +++ b/collectors/ebpf.plugin/ebpf.d/process.conf @@ -3,14 +3,17 @@ # `return : In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates # new charts for the return of these functions, such as errors. # -# The eBPF collector also creates charts for each running application through an integration with the `apps plugin`. -# If you want to disable the integration with `apps.plugin` along with the above charts, change the setting `apps` to -# 'no'. +# The eBPF collector also creates charts for each running application through an integration with the `apps.plugin` +# or `cgroups.plugin`. +# If you want to disable the integration with `apps.plugin` or `cgroups.plugin` along with the above charts, change +# the setting `apps` and `cgroups` to 'no'. # # The `pid table size` defines the maximum number of PIDs stored inside the hash table. -# -[global] - ebpf load mode = entry - apps = yes - update every = 1 - pid table size = 32768 +# +# Uncomment lines to define specific options for thread. +#[global] +# ebpf load mode = entry +# apps = yes +# cgroups = no +# update every = 10 +# pid table size = 32768 diff --git a/collectors/ebpf.plugin/ebpf.d/shm.conf b/collectors/ebpf.plugin/ebpf.d/shm.conf new file mode 100644 index 000000000..c0a10c98e --- /dev/null +++ b/collectors/ebpf.plugin/ebpf.d/shm.conf @@ -0,0 +1,24 @@ +# The `ebpf load mode` option accepts the following values : +# `entry` : The eBPF collector only monitors calls for the functions, and does not show charts related to errors. +# `return : In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates +# new charts for the return of these functions, such as errors. +# +# The eBPF collector also creates charts for each running application through an integration with the `apps.plugin` +# or `cgroups.plugin`. +# If you want to disable the integration with `apps.plugin` or `cgroups.plugin` along with the above charts, change +# the setting `apps` and `cgroups` to 'no'. +# +# Uncomment lines to define specific options for thread. +#[global] +# ebpf load mode = entry +# apps = yes +# cgroups = no +# update every = 10 +# pid table size = 32768 + +# List of monitored syscalls +[syscalls] + shmget = yes + shmat = yes + shmdt = yes + shmctl = yes diff --git a/collectors/ebpf.plugin/ebpf.d/softirq.conf b/collectors/ebpf.plugin/ebpf.d/softirq.conf new file mode 100644 index 000000000..f2bae1d57 --- /dev/null +++ b/collectors/ebpf.plugin/ebpf.d/softirq.conf @@ -0,0 +1,8 @@ +# The `ebpf load mode` option accepts the following values : +# `entry` : The eBPF collector only monitors calls for the functions, and does not show charts related to errors. +# `return : In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates +# new charts for the return of these functions, such as errors. +# +#[global] +# ebpf load mode = entry +# update every = 10 diff --git a/collectors/ebpf.plugin/ebpf.d/swap.conf b/collectors/ebpf.plugin/ebpf.d/swap.conf new file mode 100644 index 000000000..a65e0acbc --- /dev/null +++ b/collectors/ebpf.plugin/ebpf.d/swap.conf @@ -0,0 +1,17 @@ +# The `ebpf load mode` option accepts the following values : +# `entry` : The eBPF collector only monitors calls for the functions, and does not show charts related to errors. +# `return : In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates +# new charts for the return of these functions, such as errors. +# +# The eBPF collector also creates charts for each running application through an integration with the `apps.plugin` +# or `cgroups.plugin`. +# If you want to disable the integration with `apps.plugin` or `cgroups.plugin` along with the above charts, change +# the setting `apps` and `cgroups` to 'no'. +# +# Uncomment lines to define specific options for thread. +#[global] +# ebpf load mode = entry +# apps = yes +# cgroups = no +# update every = 10 +# pid table size = 32768 diff --git a/collectors/ebpf.plugin/ebpf.d/sync.conf b/collectors/ebpf.plugin/ebpf.d/sync.conf index de28f3394..03c469f68 100644 --- a/collectors/ebpf.plugin/ebpf.d/sync.conf +++ b/collectors/ebpf.plugin/ebpf.d/sync.conf @@ -3,15 +3,17 @@ # `return : In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates # new charts for the return of these functions, such as errors. # -# The eBPF collector also creates charts for each running application through an integration with the `apps plugin`. -# If you want to disable the integration with `apps.plugin` along with the above charts, change the setting `apps` to -# 'no'. +# The eBPF collector also creates charts for each running application through an integration with the `apps.plugin` +# or `cgroups.plugin`. +# If you want to disable the integration with `apps.plugin` or `cgroups.plugin` along with the above charts, change +# the setting `apps` and `cgroups` to 'no'. # # -[global] - ebpf load mode = entry - apps = yes - update every = 2 +#[global] +# ebpf load mode = entry +# apps = yes +# cgroups = no +# update every = 10 # List of monitored syscalls [syscalls] diff --git a/collectors/ebpf.plugin/ebpf.d/vfs.conf b/collectors/ebpf.plugin/ebpf.d/vfs.conf new file mode 100644 index 000000000..a65e0acbc --- /dev/null +++ b/collectors/ebpf.plugin/ebpf.d/vfs.conf @@ -0,0 +1,17 @@ +# The `ebpf load mode` option accepts the following values : +# `entry` : The eBPF collector only monitors calls for the functions, and does not show charts related to errors. +# `return : In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates +# new charts for the return of these functions, such as errors. +# +# The eBPF collector also creates charts for each running application through an integration with the `apps.plugin` +# or `cgroups.plugin`. +# If you want to disable the integration with `apps.plugin` or `cgroups.plugin` along with the above charts, change +# the setting `apps` and `cgroups` to 'no'. +# +# Uncomment lines to define specific options for thread. +#[global] +# ebpf load mode = entry +# apps = yes +# cgroups = no +# update every = 10 +# pid table size = 32768 diff --git a/collectors/ebpf.plugin/ebpf.h b/collectors/ebpf.plugin/ebpf.h index 841701e20..a59bad031 100644 --- a/collectors/ebpf.plugin/ebpf.h +++ b/collectors/ebpf.plugin/ebpf.h @@ -30,6 +30,7 @@ #include "daemon/main.h" #include "ebpf_apps.h" +#include "ebpf_cgroup.h" #define NETDATA_EBPF_OLD_CONFIG_FILE "ebpf.conf" #define NETDATA_EBPF_CONFIG_FILE "ebpf.d.conf" @@ -73,14 +74,37 @@ typedef struct netdata_error_report { } netdata_error_report_t; extern ebpf_module_t ebpf_modules[]; -enum ebpf_module_indexes { +enum ebpf_main_index { EBPF_MODULE_PROCESS_IDX, EBPF_MODULE_SOCKET_IDX, EBPF_MODULE_CACHESTAT_IDX, EBPF_MODULE_SYNC_IDX, - EBPF_MODULE_DCSTAT_IDX + EBPF_MODULE_DCSTAT_IDX, + EBPF_MODULE_SWAP_IDX, + EBPF_MODULE_VFS_IDX, + EBPF_MODULE_FILESYSTEM_IDX, + EBPF_MODULE_DISK_IDX, + EBPF_MODULE_MOUNT_IDX, + EBPF_MODULE_FD_IDX, + EBPF_MODULE_HARDIRQ_IDX, + EBPF_MODULE_SOFTIRQ_IDX, + EBPF_MODULE_OOMKILL_IDX, + EBPF_MODULE_SHM_IDX, + EBPF_MODULE_MDFLUSH_IDX, + /* THREADS MUST BE INCLUDED BEFORE THIS COMMENT */ + EBPF_OPTION_ALL_CHARTS, + EBPF_OPTION_VERSION, + EBPF_OPTION_HELP, + EBPF_OPTION_GLOBAL_CHART, + EBPF_OPTION_RETURN_MODE }; +typedef struct ebpf_tracepoint { + bool enabled; + char *class; + char *event; +} ebpf_tracepoint_t; + // Copied from musl header #ifndef offsetof #if __GNUC__ > 3 @@ -92,10 +116,16 @@ enum ebpf_module_indexes { // Chart definitions #define NETDATA_EBPF_FAMILY "ebpf" +#define NETDATA_EBPF_IP_FAMILY "ip" #define NETDATA_FILESYSTEM_FAMILY "filesystem" +#define NETDATA_EBPF_MOUNT_GLOBAL_FAMILY "mount_points" #define NETDATA_EBPF_CHART_TYPE_LINE "line" #define NETDATA_EBPF_CHART_TYPE_STACKED "stacked" #define NETDATA_EBPF_MEMORY_GROUP "mem" +#define NETDATA_EBPF_SYSTEM_GROUP "system" +#define NETDATA_SYSTEM_SWAP_SUBMENU "swap" +#define NETDATA_SYSTEM_CGROUP_SWAP_SUBMENU "swap (eBPF)" +#define NETDATA_SYSTEM_IPC_SHM_SUBMENU "ipc shared memory" // Log file #define NETDATA_DEVELOPER_LOG_FILE "developer.log" @@ -111,6 +141,8 @@ enum ebpf_module_indexes { #define EBPF_SYS_CLONE_IDX 11 #define EBPF_MAX_MAPS 32 +#define EBPF_DEFAULT_UPDATE_EVERY 10 + enum ebpf_algorithms_list { NETDATA_EBPF_ABSOLUTE_IDX, NETDATA_EBPF_INCREMENTAL_IDX @@ -125,6 +157,7 @@ extern pthread_mutex_t lock; extern int close_ebpf_plugin; extern int ebpf_nprocs; extern int running_on_kernel; +extern int isrh; extern char *ebpf_plugin_dir; extern char kernel_string[64]; @@ -146,7 +179,9 @@ extern void ebpf_write_chart_cmd(char *type, char *family, char *charttype, char *context, - int order); + int order, + int update_every, + char *module); extern void ebpf_write_global_dimension(char *name, char *id, char *algorithm); @@ -162,7 +197,9 @@ extern void ebpf_create_chart(char *type, int order, void (*ncd)(void *, int), void *move, - int end); + int end, + int update_every, + char *module); extern void write_begin_chart(char *family, char *name); @@ -175,8 +212,6 @@ extern void write_err_chart(char *name, char *family, netdata_publish_syscall_t extern void write_io_chart(char *chart, char *family, char *dwrite, long long vwrite, char *dread, long long vread); -extern void fill_ebpf_data(ebpf_data_t *ef); - extern void ebpf_create_charts_on_apps(char *name, char *title, char *units, @@ -184,12 +219,18 @@ extern void ebpf_create_charts_on_apps(char *name, char *charttype, int order, char *algorithm, - struct target *root); + struct target *root, + int update_every, + char *module); extern void write_end_chart(); extern void ebpf_cleanup_publish_syscall(netdata_publish_syscall_t *nps); +extern int ebpf_enable_tracepoint(ebpf_tracepoint_t *tp); +extern int ebpf_disable_tracepoint(ebpf_tracepoint_t *tp); +extern uint32_t ebpf_enable_tracepoints(ebpf_tracepoint_t *tps); + #define EBPF_PROGRAMS_SECTION "ebpf programs" #define EBPF_COMMON_DIMENSION_PERCENTAGE "%" @@ -199,16 +240,21 @@ extern void ebpf_cleanup_publish_syscall(netdata_publish_syscall_t *nps); #define EBPF_COMMON_DIMENSION_DIFFERENCE "difference" #define EBPF_COMMON_DIMENSION_PACKETS "packets" #define EBPF_COMMON_DIMENSION_FILES "files" +#define EBPF_COMMON_DIMENSION_MILLISECONDS "milliseconds" +#define EBPF_COMMON_DIMENSION_KILLS "kills" // Common variables extern int debug_enabled; extern struct pid_stat *root_of_pids; +extern ebpf_cgroup_target_t *ebpf_cgroup_pids; extern char *ebpf_algorithms[]; extern struct config collector_config; -extern struct pid_stat *root_of_pids; extern ebpf_process_stat_t *global_process_stat; +extern netdata_ebpf_cgroup_shm_t shm_ebpf_cgroup; +extern int shm_fd_ebpf_cgroup; +extern sem_t *shm_sem_ebpf_cgroup; +extern pthread_mutex_t mutex_cgroup_shm; extern size_t all_pids_count; -extern int update_every; extern uint32_t finalized_threads; // Socket functions and variables @@ -219,6 +265,9 @@ extern void ebpf_cachestat_create_apps_charts(struct ebpf_module *em, void *root extern void ebpf_one_dimension_write_charts(char *family, char *chart, char *dim, long long v1); extern collected_number get_value_from_structure(char *basis, size_t offset); extern void ebpf_update_pid_table(ebpf_local_maps_t *pid, ebpf_module_t *em); +extern void ebpf_write_chart_obsolete(char *type, char *id, char *title, char *units, char *family, + char *charttype, char *context, int order, int update_every); +extern void write_histogram_chart(char *family, char *name, const netdata_idx_t *hist, char **dimensions, uint32_t end); #define EBPF_MAX_SYNCHRONIZATION_TIME 300 diff --git a/collectors/ebpf.plugin/ebpf_apps.c b/collectors/ebpf.plugin/ebpf_apps.c index 6459bad0d..015d1bf21 100644 --- a/collectors/ebpf.plugin/ebpf_apps.c +++ b/collectors/ebpf.plugin/ebpf_apps.c @@ -116,9 +116,9 @@ int am_i_running_as_root() /** * Reset the target values * - * @param root the pointer to the chain that will be reseted. + * @param root the pointer to the chain that will be reset. * - * @return it returns the number of structures that was reseted. + * @return it returns the number of structures that was reset. */ size_t zero_all_targets(struct target *root) { @@ -909,6 +909,33 @@ static inline void del_pid_entry(pid_t pid) all_pids_count--; } +/** + * Get command string associated with a PID. + * This can only safely be used when holding the `collect_data_mutex` lock. + * + * @param pid the pid to search the data. + * @param n the maximum amount of bytes to copy into dest. + * if this is greater than the size of the command, it is clipped. + * @param dest the target memory buffer to write the command into. + * @return -1 if the PID hasn't been scraped yet, 0 otherwise. + */ +int get_pid_comm(pid_t pid, size_t n, char *dest) +{ + struct pid_stat *stat; + + stat = all_pids[pid]; + if (unlikely(stat == NULL)) { + return -1; + } + + if (unlikely(n > sizeof(stat->comm))) { + n = sizeof(stat->comm); + } + + strncpyz(dest, stat->comm, n); + return 0; +} + /** * Cleanup variable from other threads * @@ -922,7 +949,7 @@ void cleanup_variables_from_other_threads(uint32_t pid) socket_bandwidth_curr[pid] = NULL; } - // Clean cachestat strcture + // Clean cachestat structure if (cachestat_pid) { freez(cachestat_pid[pid]); cachestat_pid[pid] = NULL; @@ -933,6 +960,30 @@ void cleanup_variables_from_other_threads(uint32_t pid) freez(dcstat_pid[pid]); dcstat_pid[pid] = NULL; } + + // Clean swap structure + if (swap_pid) { + freez(swap_pid[pid]); + swap_pid[pid] = NULL; + } + + // Clean vfs structure + if (vfs_pid) { + freez(vfs_pid[pid]); + vfs_pid[pid] = NULL; + } + + // Clean fd structure + if (fd_pid) { + freez(fd_pid[pid]); + fd_pid[pid] = NULL; + } + + // Clean shm structure + if (shm_pid) { + freez(shm_pid[pid]); + shm_pid[pid] = NULL; + } } /** diff --git a/collectors/ebpf.plugin/ebpf_apps.h b/collectors/ebpf.plugin/ebpf_apps.h index edcdef605..0c72b8782 100644 --- a/collectors/ebpf.plugin/ebpf_apps.h +++ b/collectors/ebpf.plugin/ebpf_apps.h @@ -11,17 +11,28 @@ #include "libnetdata/ebpf/ebpf.h" #define NETDATA_APPS_FAMILY "apps" -#define NETDATA_APPS_FILE_GROUP "file (eBPF)" -#define NETDATA_APPS_VFS_GROUP "vfs (eBPF)" +#define NETDATA_APPS_FILE_GROUP "file_access" +#define NETDATA_APPS_FILE_CGROUP_GROUP "file_access (eBPF)" #define NETDATA_APPS_PROCESS_GROUP "process (eBPF)" -#define NETDATA_APPS_NET_GROUP "net (eBPF)" -#define NETDATA_APPS_CACHESTAT_GROUP "page cache (eBPF)" -#define NETDATA_APPS_DCSTAT_GROUP "directory cache (eBPF)" +#define NETDATA_APPS_NET_GROUP "net" +#define NETDATA_APPS_IPC_SHM_GROUP "ipc shm (eBPF)" #include "ebpf_process.h" #include "ebpf_dcstat.h" +#include "ebpf_disk.h" +#include "ebpf_fd.h" +#include "ebpf_filesystem.h" +#include "ebpf_hardirq.h" #include "ebpf_cachestat.h" +#include "ebpf_mdflush.h" +#include "ebpf_mount.h" +#include "ebpf_oomkill.h" +#include "ebpf_shm.h" +#include "ebpf_socket.h" +#include "ebpf_softirq.h" #include "ebpf_sync.h" +#include "ebpf_swap.h" +#include "ebpf_vfs.h" #define MAX_COMPARE_NAME 100 #define MAX_NAME 100 @@ -113,6 +124,10 @@ struct target { // Changes made to simplify integration between apps and eBPF. netdata_publish_cachestat_t cachestat; netdata_publish_dcstat_t dcstat; + netdata_publish_swap_t swap; + netdata_publish_vfs_t vfs; + netdata_fd_stat_t fd; + netdata_publish_shm_t shm; /* These variables are not necessary for eBPF collector kernel_uint_t minflt; @@ -341,34 +356,13 @@ typedef struct ebpf_process_stat { uint32_t pid; //Counter - uint32_t open_call; - uint32_t write_call; - uint32_t writev_call; - uint32_t read_call; - uint32_t readv_call; - uint32_t unlink_call; uint32_t exit_call; uint32_t release_call; - uint32_t fork_call; - uint32_t clone_call; - uint32_t close_call; - - //Accumulator - uint64_t write_bytes; - uint64_t writev_bytes; - uint64_t readv_bytes; - uint64_t read_bytes; + uint32_t create_process; + uint32_t create_thread; //Counter - uint32_t open_err; - uint32_t write_err; - uint32_t writev_err; - uint32_t read_err; - uint32_t readv_err; - uint32_t unlink_err; - uint32_t fork_err; - uint32_t clone_err; - uint32_t close_err; + uint32_t task_err; uint8_t removeme; } ebpf_process_stat_t; @@ -425,6 +419,8 @@ extern void cleanup_exited_pids(); extern int ebpf_read_hash_table(void *ep, int fd, uint32_t pid); +extern int get_pid_comm(pid_t pid, size_t n, char *dest); + extern size_t read_processes_statistic_using_pid_on_target(ebpf_process_stat_t **ep, int fd, struct pid_on_target *pids); diff --git a/collectors/ebpf.plugin/ebpf_cachestat.c b/collectors/ebpf.plugin/ebpf_cachestat.c index cdeac6951..7ba8c01ae 100644 --- a/collectors/ebpf.plugin/ebpf_cachestat.c +++ b/collectors/ebpf.plugin/ebpf_cachestat.c @@ -3,7 +3,6 @@ #include "ebpf.h" #include "ebpf_cachestat.h" -static ebpf_data_t cachestat_data; netdata_publish_cachestat_t **cachestat_pid; static struct bpf_link **probe_links = NULL; @@ -16,7 +15,8 @@ static netdata_publish_syscall_t cachestat_counter_publish_aggregated[NETDATA_CA netdata_cachestat_pid_t *cachestat_vector = NULL; -static netdata_idx_t *cachestat_hash_values = NULL; +static netdata_idx_t cachestat_hash_values[NETDATA_CACHESTAT_END]; +static netdata_idx_t *cachestat_values = NULL; static int read_thread_closed = 1; @@ -24,11 +24,20 @@ struct netdata_static_thread cachestat_threads = {"CACHESTAT KERNEL", NULL, NULL, 1, NULL, NULL, NULL}; -static ebpf_local_maps_t cachestat_maps[] = {{.name = "cstat_pid", .internal_input = ND_EBPF_DEFAULT_PID_SIZE, - .user_input = 0}, - {.name = NULL, .internal_input = 0, .user_input = 0}}; - -static int *map_fd = NULL; +static ebpf_local_maps_t cachestat_maps[] = {{.name = "cstat_global", .internal_input = NETDATA_CACHESTAT_END, + .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC, + .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}, + {.name = "cstat_pid", .internal_input = ND_EBPF_DEFAULT_PID_SIZE, + .user_input = 0, + .type = NETDATA_EBPF_MAP_RESIZABLE | NETDATA_EBPF_MAP_PID, + .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}, + {.name = "cstat_ctrl", .internal_input = NETDATA_CONTROLLER_END, + .user_input = 0, + .type = NETDATA_EBPF_MAP_CONTROLLER, + .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}, + {.name = NULL, .internal_input = 0, .user_input = 0, + .type = NETDATA_EBPF_MAP_CONTROLLER, + .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}}; struct config cachestat_config = { .first_section = NULL, .last_section = NULL, @@ -78,15 +87,17 @@ static void ebpf_cachestat_cleanup(void *ptr) ebpf_cleanup_publish_syscall(cachestat_counter_publish_aggregated); freez(cachestat_vector); - freez(cachestat_hash_values); - - struct bpf_program *prog; - size_t i = 0 ; - bpf_object__for_each_program(prog, objects) { - bpf_link__destroy(probe_links[i]); - i++; + freez(cachestat_values); + + if (probe_links) { + struct bpf_program *prog; + size_t i = 0 ; + bpf_object__for_each_program(prog, objects) { + bpf_link__destroy(probe_links[i]); + i++; + } + bpf_object__close(objects); } - bpf_object__close(objects); } /***************************************************************** @@ -100,7 +111,7 @@ static void ebpf_cachestat_cleanup(void *ptr) * * Update publish values before to write dimension. * - * @param out strcuture that will receive data. + * @param out structure that will receive data. * @param mpa calls for mark_page_accessed during the last second. * @param mbd calls for mark_buffer_dirty during the last second. * @param apcl calls for add_to_page_cache_lru during the last second. @@ -247,7 +258,7 @@ static void read_apps_table() netdata_cachestat_pid_t *cv = cachestat_vector; uint32_t key; struct pid_stat *pids = root_of_pids; - int fd = map_fd[NETDATA_CACHESTAT_PID_STATS]; + int fd = cachestat_maps[NETDATA_CACHESTAT_PID_STATS].map_fd; size_t length = sizeof(netdata_cachestat_pid_t)*ebpf_nprocs; while (pids) { key = pids->pid; @@ -268,6 +279,43 @@ static void read_apps_table() } } +/** + * Update cgroup + * + * Update cgroup data based in + */ +static void ebpf_update_cachestat_cgroup() +{ + netdata_cachestat_pid_t *cv = cachestat_vector; + int fd = cachestat_maps[NETDATA_CACHESTAT_PID_STATS].map_fd; + size_t length = sizeof(netdata_cachestat_pid_t) * ebpf_nprocs; + + ebpf_cgroup_target_t *ect; + pthread_mutex_lock(&mutex_cgroup_shm); + for (ect = ebpf_cgroup_pids; ect; ect = ect->next) { + struct pid_on_target2 *pids; + for (pids = ect->pids; pids; pids = pids->next) { + int pid = pids->pid; + netdata_cachestat_pid_t *out = &pids->cachestat; + if (likely(cachestat_pid) && cachestat_pid[pid]) { + netdata_publish_cachestat_t *in = cachestat_pid[pid]; + + memcpy(out, &in->current, sizeof(netdata_cachestat_pid_t)); + } else { + memset(cv, 0, length); + if (bpf_map_lookup_elem(fd, &pid, cv)) { + continue; + } + + cachestat_apps_accumulator(cv); + + memcpy(out, cv, sizeof(netdata_cachestat_pid_t)); + } + } + } + pthread_mutex_unlock(&mutex_cgroup_shm); +} + /** * Create apps charts * @@ -277,43 +325,42 @@ static void read_apps_table() */ void ebpf_cachestat_create_apps_charts(struct ebpf_module *em, void *ptr) { - UNUSED(em); struct target *root = ptr; ebpf_create_charts_on_apps(NETDATA_CACHESTAT_HIT_RATIO_CHART, "The ratio is calculated dividing the Hit pages per total cache accesses without counting dirties.", EBPF_COMMON_DIMENSION_PERCENTAGE, - NETDATA_APPS_CACHESTAT_GROUP, + NETDATA_CACHESTAT_SUBMENU, NETDATA_EBPF_CHART_TYPE_LINE, 20090, ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX], - root); + root, em->update_every, NETDATA_EBPF_MODULE_NAME_CACHESTAT); ebpf_create_charts_on_apps(NETDATA_CACHESTAT_DIRTY_CHART, "Number of pages marked as dirty. When a page is called dirty, this means that the data stored inside the page needs to be written to devices.", EBPF_CACHESTAT_DIMENSION_PAGE, - NETDATA_APPS_CACHESTAT_GROUP, + NETDATA_CACHESTAT_SUBMENU, NETDATA_EBPF_CHART_TYPE_STACKED, 20091, ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], - root); + root, em->update_every, NETDATA_EBPF_MODULE_NAME_CACHESTAT); ebpf_create_charts_on_apps(NETDATA_CACHESTAT_HIT_CHART, "Number of cache access without counting dirty pages and page additions.", EBPF_CACHESTAT_DIMENSION_HITS, - NETDATA_APPS_CACHESTAT_GROUP, + NETDATA_CACHESTAT_SUBMENU, NETDATA_EBPF_CHART_TYPE_STACKED, 20092, ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX], - root); + root, em->update_every, NETDATA_EBPF_MODULE_NAME_CACHESTAT); ebpf_create_charts_on_apps(NETDATA_CACHESTAT_MISSES_CHART, "Page caches added without counting dirty pages", EBPF_CACHESTAT_DIMENSION_MISSES, - NETDATA_APPS_CACHESTAT_GROUP, + NETDATA_CACHESTAT_SUBMENU, NETDATA_EBPF_CHART_TYPE_STACKED, 20093, ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX], - root); + root, em->update_every, NETDATA_EBPF_MODULE_NAME_CACHESTAT); } /***************************************************************** @@ -331,12 +378,18 @@ static void read_global_table() { uint32_t idx; netdata_idx_t *val = cachestat_hash_values; - netdata_idx_t stored; - int fd = map_fd[NETDATA_CACHESTAT_GLOBAL_STATS]; + netdata_idx_t *stored = cachestat_values; + int fd = cachestat_maps[NETDATA_CACHESTAT_GLOBAL_STATS].map_fd; for (idx = NETDATA_KEY_CALLS_ADD_TO_PAGE_CACHE_LRU; idx < NETDATA_CACHESTAT_END; idx++) { - if (!bpf_map_lookup_elem(fd, &idx, &stored)) { - val[idx] = stored; + if (!bpf_map_lookup_elem(fd, &idx, stored)) { + int i; + int end = ebpf_nprocs; + netdata_idx_t total = 0; + for (i = 0; i < end; i++) + total += stored[i]; + + val[idx] = total; } } } @@ -360,7 +413,7 @@ void *ebpf_cachestat_read_hash(void *ptr) ebpf_module_t *em = (ebpf_module_t *)ptr; - usec_t step = NETDATA_LATENCY_CACHESTAT_SLEEP_MS * em->update_time; + usec_t step = NETDATA_LATENCY_CACHESTAT_SLEEP_MS * em->update_every; while (!close_ebpf_plugin) { usec_t dt = heartbeat_next(&hb, step); (void)dt; @@ -428,7 +481,7 @@ void ebpf_cachestat_sum_pids(netdata_publish_cachestat_t *publish, struct pid_on } /** - * Send data to Netdata calling auxiliar functions. + * Send data to Netdata calling auxiliary functions. * * @param root the target list. */ @@ -446,7 +499,7 @@ void ebpf_cache_send_apps_data(struct target *root) uint64_t mpa = current->mark_page_accessed - prev->mark_page_accessed; uint64_t mbd = current->mark_buffer_dirty - prev->mark_buffer_dirty; - w->cachestat.dirty = current->mark_buffer_dirty; + w->cachestat.dirty = mbd; uint64_t apcl = current->add_to_page_cache_lru - prev->add_to_page_cache_lru; uint64_t apd = current->account_page_dirtied - prev->account_page_dirtied; @@ -486,6 +539,297 @@ void ebpf_cache_send_apps_data(struct target *root) write_end_chart(); } +/** + * Cachestat sum PIDs + * + * Sum values for all PIDs associated to a group + * + * @param publish output structure. + * @param root structure with listed IPs + */ +void ebpf_cachestat_sum_cgroup_pids(netdata_publish_cachestat_t *publish, struct pid_on_target2 *root) +{ + memcpy(&publish->prev, &publish->current,sizeof(publish->current)); + memset(&publish->current, 0, sizeof(publish->current)); + + netdata_cachestat_pid_t *dst = &publish->current; + while (root) { + netdata_cachestat_pid_t *src = &root->cachestat; + + dst->account_page_dirtied += src->account_page_dirtied; + dst->add_to_page_cache_lru += src->add_to_page_cache_lru; + dst->mark_buffer_dirty += src->mark_buffer_dirty; + dst->mark_page_accessed += src->mark_page_accessed; + + root = root->next; + } +} + +/** + * Calc chart values + * + * Do necessary math to plot charts. + */ +void ebpf_cachestat_calc_chart_values() +{ + ebpf_cgroup_target_t *ect; + for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { + ebpf_cachestat_sum_cgroup_pids(&ect->publish_cachestat, ect->pids); + + netdata_cachestat_pid_t *current = &ect->publish_cachestat.current; + netdata_cachestat_pid_t *prev = &ect->publish_cachestat.prev; + + uint64_t mpa = current->mark_page_accessed - prev->mark_page_accessed; + uint64_t mbd = current->mark_buffer_dirty - prev->mark_buffer_dirty; + ect->publish_cachestat.dirty = mbd; + uint64_t apcl = current->add_to_page_cache_lru - prev->add_to_page_cache_lru; + uint64_t apd = current->account_page_dirtied - prev->account_page_dirtied; + + cachestat_update_publish(&ect->publish_cachestat, mpa, mbd, apcl, apd); + } +} + +/** + * Create Systemd cachestat Charts + * + * Create charts when systemd is enabled + * + * @param update_every value to overwrite the update frequency set by the server. + **/ +static void ebpf_create_systemd_cachestat_charts(int update_every) +{ + ebpf_create_charts_on_systemd(NETDATA_CACHESTAT_HIT_RATIO_CHART, + "Hit is calculating using total cache added without dirties per total added because of red misses.", + EBPF_COMMON_DIMENSION_PERCENTAGE, NETDATA_CACHESTAT_SUBMENU, + NETDATA_EBPF_CHART_TYPE_LINE, 21100, + ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX], + NETDATA_SYSTEMD_CACHESTAT_HIT_RATIO_CONTEXT, NETDATA_EBPF_MODULE_NAME_CACHESTAT, + update_every); + + ebpf_create_charts_on_systemd(NETDATA_CACHESTAT_DIRTY_CHART, + "Number of dirty pages added to the page cache.", + EBPF_CACHESTAT_DIMENSION_PAGE, NETDATA_CACHESTAT_SUBMENU, + NETDATA_EBPF_CHART_TYPE_LINE, 21101, + ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX], + NETDATA_SYSTEMD_CACHESTAT_MODIFIED_CACHE_CONTEXT, NETDATA_EBPF_MODULE_NAME_CACHESTAT, + update_every); + + ebpf_create_charts_on_systemd(NETDATA_CACHESTAT_HIT_CHART, "Hits are function calls that Netdata counts.", + EBPF_CACHESTAT_DIMENSION_HITS, NETDATA_CACHESTAT_SUBMENU, + NETDATA_EBPF_CHART_TYPE_LINE, 21102, + ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX], + NETDATA_SYSTEMD_CACHESTAT_HIT_FILE_CONTEXT, NETDATA_EBPF_MODULE_NAME_CACHESTAT, + update_every); + + ebpf_create_charts_on_systemd(NETDATA_CACHESTAT_MISSES_CHART, "Misses are function calls that Netdata counts.", + EBPF_CACHESTAT_DIMENSION_MISSES, NETDATA_CACHESTAT_SUBMENU, + NETDATA_EBPF_CHART_TYPE_LINE, 21103, + ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX], + NETDATA_SYSTEMD_CACHESTAT_MISS_FILES_CONTEXT, NETDATA_EBPF_MODULE_NAME_CACHESTAT, + update_every); +} + +/** + * Send Cache Stat charts + * + * Send collected data to Netdata. + * + * @return It returns the status for chart creation, if it is necessary to remove a specific dimension, zero is returned + * otherwise function returns 1 to avoid chart recreation + */ +static int ebpf_send_systemd_cachestat_charts() +{ + int ret = 1; + ebpf_cgroup_target_t *ect; + + write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_CACHESTAT_HIT_RATIO_CHART); + for (ect = ebpf_cgroup_pids; ect; ect = ect->next) { + if (unlikely(ect->systemd) && unlikely(ect->updated)) { + write_chart_dimension(ect->name, (long long)ect->publish_cachestat.ratio); + } else + ret = 0; + } + write_end_chart(); + + write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_CACHESTAT_DIRTY_CHART); + for (ect = ebpf_cgroup_pids; ect; ect = ect->next) { + if (unlikely(ect->systemd) && unlikely(ect->updated)) { + write_chart_dimension(ect->name, (long long)ect->publish_cachestat.dirty); + } + } + write_end_chart(); + + write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_CACHESTAT_HIT_CHART); + for (ect = ebpf_cgroup_pids; ect; ect = ect->next) { + if (unlikely(ect->systemd) && unlikely(ect->updated)) { + write_chart_dimension(ect->name, (long long)ect->publish_cachestat.hit); + } + } + write_end_chart(); + + write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_CACHESTAT_MISSES_CHART); + for (ect = ebpf_cgroup_pids; ect; ect = ect->next) { + if (unlikely(ect->systemd) && unlikely(ect->updated)) { + write_chart_dimension(ect->name, (long long)ect->publish_cachestat.miss); + } + } + write_end_chart(); + + return ret; +} + +/** + * Send Directory Cache charts + * + * Send collected data to Netdata. + */ +static void ebpf_send_specific_cachestat_data(char *type, netdata_publish_cachestat_t *npc) +{ + write_begin_chart(type, NETDATA_CACHESTAT_HIT_RATIO_CHART); + write_chart_dimension(cachestat_counter_publish_aggregated[NETDATA_CACHESTAT_IDX_RATIO].name, (long long)npc->ratio); + write_end_chart(); + + write_begin_chart(type, NETDATA_CACHESTAT_DIRTY_CHART); + write_chart_dimension(cachestat_counter_publish_aggregated[NETDATA_CACHESTAT_IDX_DIRTY].name, (long long)npc->dirty); + write_end_chart(); + + write_begin_chart(type, NETDATA_CACHESTAT_HIT_CHART); + write_chart_dimension(cachestat_counter_publish_aggregated[NETDATA_CACHESTAT_IDX_HIT].name, (long long)npc->hit); + write_end_chart(); + + write_begin_chart(type, NETDATA_CACHESTAT_MISSES_CHART); + write_chart_dimension(cachestat_counter_publish_aggregated[NETDATA_CACHESTAT_IDX_MISS].name, (long long)npc->miss); + write_end_chart(); +} + +/** + * Create specific cache Stat charts + * + * Create charts for cgroup/application. + * + * @param type the chart type. + * @param update_every value to overwrite the update frequency set by the server. + */ +static void ebpf_create_specific_cachestat_charts(char *type, int update_every) +{ + ebpf_create_chart(type, NETDATA_CACHESTAT_HIT_RATIO_CHART, + "Hit is calculating using total cache added without dirties per total added because of red misses.", + EBPF_COMMON_DIMENSION_PERCENTAGE, NETDATA_CACHESTAT_CGROUP_SUBMENU, + NETDATA_CGROUP_CACHESTAT_HIT_RATIO_CONTEXT, + NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5200, + ebpf_create_global_dimension, + cachestat_counter_publish_aggregated, 1, update_every, NETDATA_EBPF_MODULE_NAME_CACHESTAT); + + ebpf_create_chart(type, NETDATA_CACHESTAT_DIRTY_CHART, + "Number of dirty pages added to the page cache.", + EBPF_CACHESTAT_DIMENSION_PAGE, NETDATA_CACHESTAT_CGROUP_SUBMENU, + NETDATA_CGROUP_CACHESTAT_MODIFIED_CACHE_CONTEXT, + NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5201, + ebpf_create_global_dimension, + &cachestat_counter_publish_aggregated[NETDATA_CACHESTAT_IDX_DIRTY], 1, + update_every, NETDATA_EBPF_MODULE_NAME_CACHESTAT); + + ebpf_create_chart(type, NETDATA_CACHESTAT_HIT_CHART, + "Hits are function calls that Netdata counts.", + EBPF_CACHESTAT_DIMENSION_HITS, NETDATA_CACHESTAT_CGROUP_SUBMENU, + NETDATA_CGROUP_CACHESTAT_HIT_FILES_CONTEXT, + NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5202, + ebpf_create_global_dimension, + &cachestat_counter_publish_aggregated[NETDATA_CACHESTAT_IDX_HIT], 1, + update_every, NETDATA_EBPF_MODULE_NAME_CACHESTAT); + + ebpf_create_chart(type, NETDATA_CACHESTAT_MISSES_CHART, + "Misses are function calls that Netdata counts.", + EBPF_CACHESTAT_DIMENSION_MISSES, NETDATA_CACHESTAT_CGROUP_SUBMENU, + NETDATA_CGROUP_CACHESTAT_MISS_FILES_CONTEXT, + NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5203, + ebpf_create_global_dimension, + &cachestat_counter_publish_aggregated[NETDATA_CACHESTAT_IDX_MISS], 1, + update_every, NETDATA_EBPF_MODULE_NAME_CACHESTAT); +} + +/** + * Obsolete specific cache stat charts + * + * Obsolete charts for cgroup/application. + * + * @param type the chart type. + * @param update_every value to overwrite the update frequency set by the server. + */ +static void ebpf_obsolete_specific_cachestat_charts(char *type, int update_every) +{ + ebpf_write_chart_obsolete(type, NETDATA_CACHESTAT_HIT_RATIO_CHART, + "Hit is calculating using total cache added without dirties per total added because of red misses.", + EBPF_COMMON_DIMENSION_PERCENTAGE, NETDATA_CACHESTAT_SUBMENU, + NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_CACHESTAT_HIT_RATIO_CONTEXT, + NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5200, update_every); + + ebpf_write_chart_obsolete(type, NETDATA_CACHESTAT_DIRTY_CHART, + "Number of dirty pages added to the page cache.", + EBPF_CACHESTAT_DIMENSION_PAGE, NETDATA_CACHESTAT_SUBMENU, + NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_CACHESTAT_MODIFIED_CACHE_CONTEXT, + NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5201, update_every); + + ebpf_write_chart_obsolete(type, NETDATA_CACHESTAT_HIT_CHART, + "Hits are function calls that Netdata counts.", + EBPF_CACHESTAT_DIMENSION_HITS, NETDATA_CACHESTAT_SUBMENU, + NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_CACHESTAT_HIT_FILES_CONTEXT, + NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5202, update_every); + + ebpf_write_chart_obsolete(type, NETDATA_CACHESTAT_MISSES_CHART, + "Misses are function calls that Netdata counts.", + EBPF_CACHESTAT_DIMENSION_MISSES, NETDATA_CACHESTAT_SUBMENU, + NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_CACHESTAT_MISS_FILES_CONTEXT, + NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5203, update_every); +} + +/** + * Send data to Netdata calling auxiliary functions. + * + * @param update_every value to overwrite the update frequency set by the server. +*/ +void ebpf_cachestat_send_cgroup_data(int update_every) +{ + if (!ebpf_cgroup_pids) + return; + + pthread_mutex_lock(&mutex_cgroup_shm); + ebpf_cgroup_target_t *ect; + ebpf_cachestat_calc_chart_values(); + + int has_systemd = shm_ebpf_cgroup.header->systemd_enabled; + if (has_systemd) { + static int systemd_charts = 0; + if (!systemd_charts) { + ebpf_create_systemd_cachestat_charts(update_every); + systemd_charts = 1; + } + + systemd_charts = ebpf_send_systemd_cachestat_charts(); + } + + for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { + if (ect->systemd) + continue; + + if (!(ect->flags & NETDATA_EBPF_CGROUP_HAS_CACHESTAT_CHART) && ect->updated) { + ebpf_create_specific_cachestat_charts(ect->name, update_every); + ect->flags |= NETDATA_EBPF_CGROUP_HAS_CACHESTAT_CHART; + } + + if (ect->flags & NETDATA_EBPF_CGROUP_HAS_CACHESTAT_CHART) { + if (ect->updated) { + ebpf_send_specific_cachestat_data(ect->name, &ect->publish_cachestat); + } else { + ebpf_obsolete_specific_cachestat_charts(ect->name, update_every); + ect->flags &= ~NETDATA_EBPF_CGROUP_HAS_CACHESTAT_CHART; + } + } + } + + pthread_mutex_unlock(&mutex_cgroup_shm); +} + /** * Main loop for this collector. */ @@ -494,29 +838,40 @@ static void cachestat_collector(ebpf_module_t *em) cachestat_threads.thread = mallocz(sizeof(netdata_thread_t)); cachestat_threads.start_routine = ebpf_cachestat_read_hash; - map_fd = cachestat_data.map_fd; - netdata_thread_create(cachestat_threads.thread, cachestat_threads.name, NETDATA_THREAD_OPTION_JOINABLE, ebpf_cachestat_read_hash, em); netdata_publish_cachestat_t publish; memset(&publish, 0, sizeof(publish)); int apps = em->apps_charts; + int cgroups = em->cgroup_charts; + int update_every = em->update_every; + int counter = update_every - 1; while (!close_ebpf_plugin) { pthread_mutex_lock(&collect_data_mutex); pthread_cond_wait(&collect_data_cond_var, &collect_data_mutex); - if (apps) - read_apps_table(); + if (++counter == update_every) { + counter = 0; + if (apps) + read_apps_table(); - pthread_mutex_lock(&lock); + if (cgroups) + ebpf_update_cachestat_cgroup(); - cachestat_send_global(&publish); + pthread_mutex_lock(&lock); - if (apps) - ebpf_cache_send_apps_data(apps_groups_root_target); + cachestat_send_global(&publish); + + if (apps) + ebpf_cache_send_apps_data(apps_groups_root_target); + + if (cgroups) + ebpf_cachestat_send_cgroup_data(update_every); + + pthread_mutex_unlock(&lock); + } - pthread_mutex_unlock(&lock); pthread_mutex_unlock(&collect_data_mutex); } } @@ -531,8 +886,10 @@ static void cachestat_collector(ebpf_module_t *em) * Create global charts * * Call ebpf_create_chart to create the charts for the collector. + * + * @param em a pointer to `struct ebpf_module` */ -static void ebpf_create_memory_charts() +static void ebpf_create_memory_charts(ebpf_module_t *em) { ebpf_create_chart(NETDATA_EBPF_MEMORY_GROUP, NETDATA_CACHESTAT_HIT_RATIO_CHART, "Hit is calculating using total cache added without dirties per total added because of red misses.", @@ -541,7 +898,7 @@ static void ebpf_create_memory_charts() NETDATA_EBPF_CHART_TYPE_LINE, 21100, ebpf_create_global_dimension, - cachestat_counter_publish_aggregated, 1); + cachestat_counter_publish_aggregated, 1, em->update_every, NETDATA_EBPF_MODULE_NAME_CACHESTAT); ebpf_create_chart(NETDATA_EBPF_MEMORY_GROUP, NETDATA_CACHESTAT_DIRTY_CHART, "Number of dirty pages added to the page cache.", @@ -550,7 +907,8 @@ static void ebpf_create_memory_charts() NETDATA_EBPF_CHART_TYPE_LINE, 21101, ebpf_create_global_dimension, - &cachestat_counter_publish_aggregated[NETDATA_CACHESTAT_IDX_DIRTY], 1); + &cachestat_counter_publish_aggregated[NETDATA_CACHESTAT_IDX_DIRTY], 1, + em->update_every, NETDATA_EBPF_MODULE_NAME_CACHESTAT); ebpf_create_chart(NETDATA_EBPF_MEMORY_GROUP, NETDATA_CACHESTAT_HIT_CHART, "Hits are function calls that Netdata counts.", @@ -559,7 +917,8 @@ static void ebpf_create_memory_charts() NETDATA_EBPF_CHART_TYPE_LINE, 21102, ebpf_create_global_dimension, - &cachestat_counter_publish_aggregated[NETDATA_CACHESTAT_IDX_HIT], 1); + &cachestat_counter_publish_aggregated[NETDATA_CACHESTAT_IDX_HIT], 1, + em->update_every, NETDATA_EBPF_MODULE_NAME_CACHESTAT); ebpf_create_chart(NETDATA_EBPF_MEMORY_GROUP, NETDATA_CACHESTAT_MISSES_CHART, "Misses are function calls that Netdata counts.", @@ -568,7 +927,8 @@ static void ebpf_create_memory_charts() NETDATA_EBPF_CHART_TYPE_LINE, 21103, ebpf_create_global_dimension, - &cachestat_counter_publish_aggregated[NETDATA_CACHESTAT_IDX_MISS], 1); + &cachestat_counter_publish_aggregated[NETDATA_CACHESTAT_IDX_MISS], 1, + em->update_every, NETDATA_EBPF_MODULE_NAME_CACHESTAT); fflush(stdout); } @@ -579,17 +939,20 @@ static void ebpf_create_memory_charts() * We are not testing the return, because callocz does this and shutdown the software * case it was not possible to allocate. * - * @param length is the length for the vectors used inside the collector. + * @param apps is apps enabled? */ -static void ebpf_cachestat_allocate_global_vectors(size_t length) +static void ebpf_cachestat_allocate_global_vectors(int apps) { - cachestat_pid = callocz((size_t)pid_max, sizeof(netdata_publish_cachestat_t *)); + if (apps) + cachestat_pid = callocz((size_t)pid_max, sizeof(netdata_publish_cachestat_t *)); + cachestat_vector = callocz((size_t)ebpf_nprocs, sizeof(netdata_cachestat_pid_t)); - cachestat_hash_values = callocz(length, sizeof(netdata_idx_t)); + cachestat_values = callocz((size_t)ebpf_nprocs, sizeof(netdata_idx_t)); - memset(cachestat_counter_aggregated_data, 0, length * sizeof(netdata_syscall_stat_t)); - memset(cachestat_counter_publish_aggregated, 0, length * sizeof(netdata_publish_syscall_t)); + memset(cachestat_hash_values, 0, NETDATA_CACHESTAT_END * sizeof(netdata_idx_t)); + memset(cachestat_counter_aggregated_data, 0, NETDATA_CACHESTAT_END * sizeof(netdata_syscall_stat_t)); + memset(cachestat_counter_publish_aggregated, 0, NETDATA_CACHESTAT_END * sizeof(netdata_publish_syscall_t)); } /***************************************************************** @@ -613,22 +976,16 @@ void *ebpf_cachestat_thread(void *ptr) ebpf_module_t *em = (ebpf_module_t *)ptr; em->maps = cachestat_maps; - fill_ebpf_data(&cachestat_data); - ebpf_update_module(em, &cachestat_config, NETDATA_CACHESTAT_CONFIG_FILE); - ebpf_update_pid_table(&cachestat_maps[0], em); + ebpf_update_pid_table(&cachestat_maps[NETDATA_CACHESTAT_PID_STATS], em); if (!em->enabled) goto endcachestat; pthread_mutex_lock(&lock); - ebpf_cachestat_allocate_global_vectors(NETDATA_CACHESTAT_END); - if (ebpf_update_kernel(&cachestat_data)) { - pthread_mutex_unlock(&lock); - goto endcachestat; - } + ebpf_cachestat_allocate_global_vectors(em->apps_charts); - probe_links = ebpf_load_program(ebpf_plugin_dir, em, kernel_string, &objects, cachestat_data.map_fd); + probe_links = ebpf_load_program(ebpf_plugin_dir, em, kernel_string, &objects); if (!probe_links) { pthread_mutex_unlock(&lock); goto endcachestat; @@ -642,7 +999,7 @@ void *ebpf_cachestat_thread(void *ptr) cachestat_counter_dimension_name, cachestat_counter_dimension_name, algorithms, NETDATA_CACHESTAT_END); - ebpf_create_memory_charts(); + ebpf_create_memory_charts(em); pthread_mutex_unlock(&lock); diff --git a/collectors/ebpf.plugin/ebpf_cachestat.h b/collectors/ebpf.plugin/ebpf_cachestat.h index 694933e0c..7904c8113 100644 --- a/collectors/ebpf.plugin/ebpf_cachestat.h +++ b/collectors/ebpf.plugin/ebpf_cachestat.h @@ -3,13 +3,17 @@ #ifndef NETDATA_EBPF_CACHESTAT_H #define NETDATA_EBPF_CACHESTAT_H 1 +// Module name +#define NETDATA_EBPF_MODULE_NAME_CACHESTAT "cachestat" + // charts #define NETDATA_CACHESTAT_HIT_RATIO_CHART "cachestat_ratio" #define NETDATA_CACHESTAT_DIRTY_CHART "cachestat_dirties" #define NETDATA_CACHESTAT_HIT_CHART "cachestat_hits" #define NETDATA_CACHESTAT_MISSES_CHART "cachestat_misses" -#define NETDATA_CACHESTAT_SUBMENU "page cache (eBPF)" +#define NETDATA_CACHESTAT_SUBMENU "page_cache" +#define NETDATA_CACHESTAT_CGROUP_SUBMENU "page cache (eBPF)" #define EBPF_CACHESTAT_DIMENSION_PAGE "pages/s" #define EBPF_CACHESTAT_DIMENSION_HITS "hits/s" @@ -20,6 +24,17 @@ // configuration file #define NETDATA_CACHESTAT_CONFIG_FILE "cachestat.conf" +// Contexts +#define NETDATA_CGROUP_CACHESTAT_HIT_RATIO_CONTEXT "cgroup.cachestat_ratio" +#define NETDATA_CGROUP_CACHESTAT_MODIFIED_CACHE_CONTEXT "cgroup.cachestat_dirties" +#define NETDATA_CGROUP_CACHESTAT_HIT_FILES_CONTEXT "cgroup.cachestat_hits" +#define NETDATA_CGROUP_CACHESTAT_MISS_FILES_CONTEXT "cgroup.cachestat_misses" + +#define NETDATA_SYSTEMD_CACHESTAT_HIT_RATIO_CONTEXT "services.cachestat_ratio" +#define NETDATA_SYSTEMD_CACHESTAT_MODIFIED_CACHE_CONTEXT "services.cachestat_dirties" +#define NETDATA_SYSTEMD_CACHESTAT_HIT_FILE_CONTEXT "services.cachestat_hits" +#define NETDATA_SYSTEMD_CACHESTAT_MISS_FILES_CONTEXT "services.cachestat_misses" + // variables enum cachestat_counters { NETDATA_KEY_CALLS_ADD_TO_PAGE_CACHE_LRU, @@ -62,4 +77,6 @@ typedef struct netdata_publish_cachestat { extern void *ebpf_cachestat_thread(void *ptr); extern void clean_cachestat_pid_structures(); +extern struct config cachestat_config; + #endif // NETDATA_EBPF_CACHESTAT_H diff --git a/collectors/ebpf.plugin/ebpf_cgroup.c b/collectors/ebpf.plugin/ebpf_cgroup.c new file mode 100644 index 000000000..ecdc46c0b --- /dev/null +++ b/collectors/ebpf.plugin/ebpf_cgroup.c @@ -0,0 +1,348 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include + +#include "ebpf.h" +#include "ebpf_cgroup.h" + +ebpf_cgroup_target_t *ebpf_cgroup_pids = NULL; + +// -------------------------------------------------------------------------------------------------------------------- +// Map shared memory + +/** + * Map Shared Memory locally + * + * Map the shared memory for current process + * + * @param fd file descriptor returned after shm_open was called. + * @param length length of the shared memory + * + * @return It returns a pointer to the region mapped. + */ +static inline void *ebpf_cgroup_map_shm_locally(int fd, size_t length) +{ + void *value; + + value = mmap(NULL, length, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); + if (!value) { + error("Cannot map shared memory used between eBPF and cgroup, integration between processes won't happen"); + close(shm_fd_ebpf_cgroup); + shm_fd_ebpf_cgroup = -1; + shm_unlink(NETDATA_SHARED_MEMORY_EBPF_CGROUP_NAME); + } + + return value; +} + +/** + * Map cgroup shared memory + * + * Map cgroup shared memory from cgroup to plugin + */ +void ebpf_map_cgroup_shared_memory() +{ + static int limit_try = 0; + static time_t next_try = 0; + + if (shm_ebpf_cgroup.header || limit_try > NETDATA_EBPF_CGROUP_MAX_TRIES) + return; + + time_t curr_time = time(NULL); + if (curr_time < next_try) + return; + + limit_try++; + next_try = curr_time + NETDATA_EBPF_CGROUP_NEXT_TRY_SEC; + + shm_fd_ebpf_cgroup = shm_open(NETDATA_SHARED_MEMORY_EBPF_CGROUP_NAME, O_RDWR, 0660); + if (shm_fd_ebpf_cgroup < 0) { + if (limit_try == NETDATA_EBPF_CGROUP_MAX_TRIES) + error("Shared memory was not initialized, integration between processes won't happen."); + + return; + } + + // Map only header + shm_ebpf_cgroup.header = (netdata_ebpf_cgroup_shm_header_t *) ebpf_cgroup_map_shm_locally(shm_fd_ebpf_cgroup, + sizeof(netdata_ebpf_cgroup_shm_header_t)); + if (!shm_ebpf_cgroup.header) { + limit_try = NETDATA_EBPF_CGROUP_MAX_TRIES + 1; + return; + } + + size_t length = shm_ebpf_cgroup.header->body_length; + + munmap(shm_ebpf_cgroup.header, sizeof(netdata_ebpf_cgroup_shm_header_t)); + + shm_ebpf_cgroup.header = (netdata_ebpf_cgroup_shm_header_t *)ebpf_cgroup_map_shm_locally(shm_fd_ebpf_cgroup, length); + if (!shm_ebpf_cgroup.header) { + limit_try = NETDATA_EBPF_CGROUP_MAX_TRIES + 1; + return; + } + shm_ebpf_cgroup.body = (netdata_ebpf_cgroup_shm_body_t *) ((char *)shm_ebpf_cgroup.header + + sizeof(netdata_ebpf_cgroup_shm_header_t)); + + shm_sem_ebpf_cgroup = sem_open(NETDATA_NAMED_SEMAPHORE_EBPF_CGROUP_NAME, O_CREAT, 0660, 1); + + if (shm_sem_ebpf_cgroup == SEM_FAILED) { + error("Cannot create semaphore, integration between eBPF and cgroup won't happen"); + munmap(shm_ebpf_cgroup.header, length); + shm_ebpf_cgroup.header = NULL; + close(shm_fd_ebpf_cgroup); + shm_fd_ebpf_cgroup = -1; + shm_unlink(NETDATA_SHARED_MEMORY_EBPF_CGROUP_NAME); + } +} + +// -------------------------------------------------------------------------------------------------------------------- +// Close and Cleanup + +/** + * Close shared memory + */ +void ebpf_close_cgroup_shm() +{ + if (shm_sem_ebpf_cgroup != SEM_FAILED) { + sem_close(shm_sem_ebpf_cgroup); + sem_unlink(NETDATA_NAMED_SEMAPHORE_EBPF_CGROUP_NAME); + shm_sem_ebpf_cgroup = SEM_FAILED; + } + + if (shm_fd_ebpf_cgroup > 0) { + close(shm_fd_ebpf_cgroup); + shm_unlink(NETDATA_SHARED_MEMORY_EBPF_CGROUP_NAME); + shm_fd_ebpf_cgroup = -1; + } +} + +/** + * Clean Specific cgroup pid + * + * Clean all PIDs associated with cgroup. + * + * @param pt structure pid on target that will have your PRs removed + */ +static inline void ebpf_clean_specific_cgroup_pids(struct pid_on_target2 *pt) +{ + while (pt) { + struct pid_on_target2 *next_pid = pt->next; + + freez(pt); + pt = next_pid; + } +} + +/** + * Cleanup link list + */ +void ebpf_clean_cgroup_pids() +{ + if (!ebpf_cgroup_pids) + return; + + ebpf_cgroup_target_t *ect = ebpf_cgroup_pids; + while (ect) { + ebpf_cgroup_target_t *next_cgroup = ect->next; + + ebpf_clean_specific_cgroup_pids(ect->pids); + freez(ect); + + ect = next_cgroup; + } + ebpf_cgroup_pids = NULL; +} + +/** + * Remove Cgroup Update Target Update List + * + * Remove from cgroup target and update the link list + */ +static void ebpf_remove_cgroup_target_update_list() +{ + ebpf_cgroup_target_t *next, *ect = ebpf_cgroup_pids; + ebpf_cgroup_target_t *prev = ebpf_cgroup_pids; + while (ect) { + next = ect->next; + if (!ect->updated) { + if (ect == ebpf_cgroup_pids) { + ebpf_cgroup_pids = next; + prev = next; + } else { + prev->next = next; + } + + ebpf_clean_specific_cgroup_pids(ect->pids); + freez(ect); + } else { + prev = ect; + } + + ect = next; + } +} + +// -------------------------------------------------------------------------------------------------------------------- +// Fill variables + +/** + * Set Target Data + * + * Set local variable values according shared memory information. + * + * @param out local output variable. + * @param ptr input from shared memory. + */ +static inline void ebpf_cgroup_set_target_data(ebpf_cgroup_target_t *out, netdata_ebpf_cgroup_shm_body_t *ptr) +{ + out->hash = ptr->hash; + snprintfz(out->name, 255, "%s", ptr->name); + out->systemd = ptr->options & CGROUP_OPTIONS_SYSTEM_SLICE_SERVICE; + out->updated = 1; +} + +/** + * Find or create + * + * Find the structure inside the link list or allocate and link when it is not present. + * + * @param ptr Input from shared memory. + * + * @return It returns a pointer for the structure associated with the input. + */ +static ebpf_cgroup_target_t * ebpf_cgroup_find_or_create(netdata_ebpf_cgroup_shm_body_t *ptr) +{ + ebpf_cgroup_target_t *ect, *prev; + for (ect = ebpf_cgroup_pids, prev = ebpf_cgroup_pids; ect; prev = ect, ect = ect->next) { + if (ect->hash == ptr->hash && !strcmp(ect->name, ptr->name)) { + ect->updated = 1; + return ect; + } + } + + ebpf_cgroup_target_t *new_ect = callocz(1, sizeof(ebpf_cgroup_target_t)); + + ebpf_cgroup_set_target_data(new_ect, ptr); + if (!ebpf_cgroup_pids) { + ebpf_cgroup_pids = new_ect; + } else { + prev->next = new_ect; + } + + return new_ect; +} + +/** + * Update pid link list + * + * Update PIDs list associated with specific cgroup. + * + * @param ect cgroup structure where pids will be stored + * @param path file with PIDs associated to cgroup. + */ +static void ebpf_update_pid_link_list(ebpf_cgroup_target_t *ect, char *path) +{ + procfile *ff = procfile_open(path, " \t:", PROCFILE_FLAG_DEFAULT); + if (!ff) + return; + + ff = procfile_readall(ff); + if (!ff) + return; + + size_t lines = procfile_lines(ff), l; + for (l = 0; l < lines ;l++) { + int pid = (int)str2l(procfile_lineword(ff, l, 0)); + if (pid) { + struct pid_on_target2 *pt, *prev; + for (pt = ect->pids, prev = ect->pids; pt; prev = pt, pt = pt->next) { + if (pt->pid == pid) + break; + } + + if (!pt) { + struct pid_on_target2 *w = callocz(1, sizeof(struct pid_on_target2)); + w->pid = pid; + if (!ect->pids) + ect->pids = w; + else + prev->next = w; + } + } + } + + procfile_close(ff); +} + +/** + * Set remove var + * + * Set variable remove. If this variable is not reset, the structure will be removed from link list. + */ + void ebpf_reset_updated_var() + { + ebpf_cgroup_target_t *ect; + for (ect = ebpf_cgroup_pids; ect; ect = ect->next) { + ect->updated = 0; + } + } + +/** + * Parse cgroup shared memory + * + * This function is responsible to copy necessary data from shared memory to local memory. + */ +void ebpf_parse_cgroup_shm_data() +{ + if (shm_ebpf_cgroup.header) { + sem_wait(shm_sem_ebpf_cgroup); + int i, end = shm_ebpf_cgroup.header->cgroup_root_count; + + pthread_mutex_lock(&mutex_cgroup_shm); + + ebpf_remove_cgroup_target_update_list(); + + ebpf_reset_updated_var(); + + for (i = 0; i < end; i++) { + netdata_ebpf_cgroup_shm_body_t *ptr = &shm_ebpf_cgroup.body[i]; + if (ptr->enabled) { + ebpf_cgroup_target_t *ect = ebpf_cgroup_find_or_create(ptr); + ebpf_update_pid_link_list(ect, ptr->path); + } + } + pthread_mutex_unlock(&mutex_cgroup_shm); + + sem_post(shm_sem_ebpf_cgroup); + } +} + +// -------------------------------------------------------------------------------------------------------------------- +// Create charts + +/** + * Create charts on systemd submenu + * + * @param id the chart id + * @param title the value displayed on vertical axis. + * @param units the value displayed on vertical axis. + * @param family Submenu that the chart will be attached on dashboard. + * @param charttype chart type + * @param order the chart order + * @param algorithm the algorithm used by dimension + * @param context add context for chart + * @param module chart module name, this is the eBPF thread. + * @param update_every value to overwrite the update frequency set by the server. + */ +void ebpf_create_charts_on_systemd(char *id, char *title, char *units, char *family, char *charttype, int order, + char *algorithm, char *context, char *module, int update_every) +{ + ebpf_cgroup_target_t *w; + ebpf_write_chart_cmd(NETDATA_SERVICE_FAMILY, id, title, units, family, charttype, context, + order, update_every, module); + + for (w = ebpf_cgroup_pids; w; w = w->next) { + if (unlikely(w->systemd) && unlikely(w->updated)) + fprintf(stdout, "DIMENSION %s '' %s 1 1\n", w->name, algorithm); + } +} diff --git a/collectors/ebpf.plugin/ebpf_cgroup.h b/collectors/ebpf.plugin/ebpf_cgroup.h new file mode 100644 index 000000000..03969194a --- /dev/null +++ b/collectors/ebpf.plugin/ebpf_cgroup.h @@ -0,0 +1,70 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_EBPF_CGROUP_H +#define NETDATA_EBPF_CGROUP_H 1 + +#define NETDATA_EBPF_CGROUP_MAX_TRIES 3 +#define NETDATA_EBPF_CGROUP_NEXT_TRY_SEC 30 + +#include "ebpf.h" +#include "ebpf_apps.h" + +#define NETDATA_SERVICE_FAMILY "services" + +struct pid_on_target2 { + int32_t pid; + int updated; + + netdata_publish_swap_t swap; + netdata_fd_stat_t fd; + netdata_publish_vfs_t vfs; + ebpf_process_stat_t ps; + netdata_dcstat_pid_t dc; + netdata_publish_shm_t shm; + ebpf_bandwidth_t socket; + netdata_cachestat_pid_t cachestat; + + struct pid_on_target2 *next; +}; + +enum ebpf_cgroup_flags { + NETDATA_EBPF_CGROUP_HAS_PROCESS_CHART = 1, + NETDATA_EBPF_CGROUP_HAS_SWAP_CHART = 1<<2, + NETDATA_EBPF_CGROUP_HAS_SOCKET_CHART = 1<<3, + NETDATA_EBPF_CGROUP_HAS_FD_CHART = 1<<4, + NETDATA_EBPF_CGROUP_HAS_VFS_CHART = 1<<5, + NETDATA_EBPF_CGROUP_HAS_OOMKILL_CHART = 1<<6, + NETDATA_EBPF_CGROUP_HAS_CACHESTAT_CHART = 1<<7, + NETDATA_EBPF_CGROUP_HAS_DC_CHART = 1<<8, + NETDATA_EBPF_CGROUP_HAS_SHM_CHART = 1<<9 +}; + +typedef struct ebpf_cgroup_target { + char name[256]; // title + uint32_t hash; + uint32_t flags; + uint32_t systemd; + uint32_t updated; + + netdata_publish_swap_t publish_systemd_swap; + netdata_fd_stat_t publish_systemd_fd; + netdata_publish_vfs_t publish_systemd_vfs; + ebpf_process_stat_t publish_systemd_ps; + netdata_publish_dcstat_t publish_dc; + int oomkill; + netdata_publish_shm_t publish_shm; + ebpf_socket_publish_apps_t publish_socket; + netdata_publish_cachestat_t publish_cachestat; + + struct pid_on_target2 *pids; + struct ebpf_cgroup_target *next; +} ebpf_cgroup_target_t; + +extern void ebpf_map_cgroup_shared_memory(); +extern void ebpf_parse_cgroup_shm_data(); +extern void ebpf_close_cgroup_shm(); +extern void ebpf_clean_cgroup_pids(); +extern void ebpf_create_charts_on_systemd(char *id, char *title, char *units, char *family, char *charttype, int order, + char *algorithm, char *context, char *module, int update_every); + +#endif /* NETDATA_EBPF_CGROUP_H */ diff --git a/collectors/ebpf.plugin/ebpf_dcstat.c b/collectors/ebpf.plugin/ebpf_dcstat.c index 01fd97972..7ae821889 100644 --- a/collectors/ebpf.plugin/ebpf_dcstat.c +++ b/collectors/ebpf.plugin/ebpf_dcstat.c @@ -7,16 +7,14 @@ static char *dcstat_counter_dimension_name[NETDATA_DCSTAT_IDX_END] = { "ratio", static netdata_syscall_stat_t dcstat_counter_aggregated_data[NETDATA_DCSTAT_IDX_END]; static netdata_publish_syscall_t dcstat_counter_publish_aggregated[NETDATA_DCSTAT_IDX_END]; -static ebpf_data_t dcstat_data; - netdata_dcstat_pid_t *dcstat_vector = NULL; netdata_publish_dcstat_t **dcstat_pid = NULL; static struct bpf_link **probe_links = NULL; static struct bpf_object *objects = NULL; -static int *map_fd = NULL; static netdata_idx_t dcstat_hash_values[NETDATA_DCSTAT_IDX_END]; +static netdata_idx_t *dcstat_values = NULL; static int read_thread_closed = 1; @@ -30,9 +28,20 @@ struct netdata_static_thread dcstat_threads = {"DCSTAT KERNEL", NULL, NULL, 1, NULL, NULL, NULL}; -static ebpf_local_maps_t dcstat_maps[] = {{.name = "dcstat_pid", .internal_input = ND_EBPF_DEFAULT_PID_SIZE, - .user_input = 0}, - {.name = NULL, .internal_input = 0, .user_input = 0}}; +static ebpf_local_maps_t dcstat_maps[] = {{.name = "dcstat_global", .internal_input = NETDATA_DIRECTORY_CACHE_END, + .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC, + .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}, + {.name = "dcstat_pid", .internal_input = ND_EBPF_DEFAULT_PID_SIZE, + .user_input = 0, + .type = NETDATA_EBPF_MAP_RESIZABLE | NETDATA_EBPF_MAP_PID, + .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}, + {.name = "dcstat_ctrl", .internal_input = NETDATA_CONTROLLER_END, + .user_input = 0, + .type = NETDATA_EBPF_MAP_CONTROLLER, + .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}, + {.name = NULL, .internal_input = 0, .user_input = 0, + .type = NETDATA_EBPF_MAP_CONTROLLER, + .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}}; static ebpf_specify_name_t dc_optional_name[] = { {.program_name = "netdata_lookup_fast", .function_to_attach = "lookup_fast", @@ -51,7 +60,7 @@ static ebpf_specify_name_t dc_optional_name[] = { {.program_name = "netdata_look * * Update publish values before to write dimension. * - * @param out strcuture that will receive data. + * @param out structure that will receive data. * @param cache_access number of access to directory cache. * @param not_found number of files not found on the file system */ @@ -117,18 +126,21 @@ static void ebpf_dcstat_cleanup(void *ptr) } freez(dcstat_vector); + freez(dcstat_values); ebpf_cleanup_publish_syscall(dcstat_counter_publish_aggregated); ebpf_dcstat_clean_names(); - struct bpf_program *prog; - size_t i = 0 ; - bpf_object__for_each_program(prog, objects) { - bpf_link__destroy(probe_links[i]); - i++; + if (probe_links) { + struct bpf_program *prog; + size_t i = 0 ; + bpf_object__for_each_program(prog, objects) { + bpf_link__destroy(probe_links[i]); + i++; + } + bpf_object__close(objects); } - bpf_object__close(objects); } /***************************************************************** @@ -146,43 +158,42 @@ static void ebpf_dcstat_cleanup(void *ptr) */ void ebpf_dcstat_create_apps_charts(struct ebpf_module *em, void *ptr) { - UNUSED(em); struct target *root = ptr; ebpf_create_charts_on_apps(NETDATA_DC_HIT_CHART, "Percentage of files listed inside directory cache", EBPF_COMMON_DIMENSION_PERCENTAGE, - NETDATA_APPS_DCSTAT_GROUP, + NETDATA_DIRECTORY_CACHE_SUBMENU, NETDATA_EBPF_CHART_TYPE_LINE, 20100, ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX], - root); + root, em->update_every, NETDATA_EBPF_MODULE_NAME_DCSTAT); ebpf_create_charts_on_apps(NETDATA_DC_REFERENCE_CHART, "Count file access.", EBPF_COMMON_DIMENSION_FILES, - NETDATA_APPS_DCSTAT_GROUP, + NETDATA_DIRECTORY_CACHE_SUBMENU, NETDATA_EBPF_CHART_TYPE_STACKED, 20101, ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX], - root); + root, em->update_every, NETDATA_EBPF_MODULE_NAME_DCSTAT); ebpf_create_charts_on_apps(NETDATA_DC_REQUEST_NOT_CACHE_CHART, "Access to files that were not present inside directory cache.", EBPF_COMMON_DIMENSION_FILES, - NETDATA_APPS_DCSTAT_GROUP, + NETDATA_DIRECTORY_CACHE_SUBMENU, NETDATA_EBPF_CHART_TYPE_STACKED, 20102, ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX], - root); + root, em->update_every, NETDATA_EBPF_MODULE_NAME_DCSTAT); ebpf_create_charts_on_apps(NETDATA_DC_REQUEST_NOT_FOUND_CHART, "Number of requests for files that were not found on filesystem.", EBPF_COMMON_DIMENSION_FILES, - NETDATA_APPS_DCSTAT_GROUP, + NETDATA_DIRECTORY_CACHE_SUBMENU, NETDATA_EBPF_CHART_TYPE_STACKED, 20103, ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX], - root); + root, em->update_every, NETDATA_EBPF_MODULE_NAME_DCSTAT); } /***************************************************************** @@ -252,7 +263,7 @@ static void read_apps_table() netdata_dcstat_pid_t *cv = dcstat_vector; uint32_t key; struct pid_stat *pids = root_of_pids; - int fd = map_fd[NETDATA_DCSTAT_PID_STATS]; + int fd = dcstat_maps[NETDATA_DCSTAT_PID_STATS].map_fd; size_t length = sizeof(netdata_dcstat_pid_t)*ebpf_nprocs; while (pids) { key = pids->pid; @@ -273,6 +284,43 @@ static void read_apps_table() } } +/** + * Update cgroup + * + * Update cgroup data based in + */ +static void ebpf_update_dc_cgroup() +{ + netdata_dcstat_pid_t *cv = dcstat_vector; + int fd = dcstat_maps[NETDATA_DCSTAT_PID_STATS].map_fd; + size_t length = sizeof(netdata_dcstat_pid_t)*ebpf_nprocs; + + ebpf_cgroup_target_t *ect; + pthread_mutex_lock(&mutex_cgroup_shm); + for (ect = ebpf_cgroup_pids; ect; ect = ect->next) { + struct pid_on_target2 *pids; + for (pids = ect->pids; pids; pids = pids->next) { + int pid = pids->pid; + netdata_dcstat_pid_t *out = &pids->dc; + if (likely(dcstat_pid) && dcstat_pid[pid]) { + netdata_publish_dcstat_t *in = dcstat_pid[pid]; + + memcpy(out, &in->curr, sizeof(netdata_dcstat_pid_t)); + } else { + memset(cv, 0, length); + if (bpf_map_lookup_elem(fd, &pid, cv)) { + continue; + } + + dcstat_apps_accumulator(cv); + + memcpy(out, cv, sizeof(netdata_dcstat_pid_t)); + } + } + } + pthread_mutex_unlock(&mutex_cgroup_shm); +} + /** * Read global table * @@ -282,12 +330,18 @@ static void read_global_table() { uint32_t idx; netdata_idx_t *val = dcstat_hash_values; - netdata_idx_t stored; - int fd = map_fd[NETDATA_DCSTAT_GLOBAL_STATS]; + netdata_idx_t *stored = dcstat_values; + int fd = dcstat_maps[NETDATA_DCSTAT_GLOBAL_STATS].map_fd; for (idx = NETDATA_KEY_DC_REFERENCE; idx < NETDATA_DIRECTORY_CACHE_END; idx++) { - if (!bpf_map_lookup_elem(fd, &idx, &stored)) { - val[idx] = stored; + if (!bpf_map_lookup_elem(fd, &idx, stored)) { + int i; + int end = ebpf_nprocs; + netdata_idx_t total = 0; + for (i = 0; i < end; i++) + total += stored[i]; + + val[idx] = total; } } } @@ -311,7 +365,7 @@ void *ebpf_dcstat_read_hash(void *ptr) ebpf_module_t *em = (ebpf_module_t *)ptr; - usec_t step = NETDATA_LATENCY_DCSTAT_SLEEP_MS * em->update_time; + usec_t step = NETDATA_LATENCY_DCSTAT_SLEEP_MS * em->update_every; while (!close_ebpf_plugin) { usec_t dt = heartbeat_next(&hb, step); (void)dt; @@ -350,7 +404,7 @@ void ebpf_dcstat_sum_pids(netdata_publish_dcstat_t *publish, struct pid_on_targe } /** - * Send data to Netdata calling auxiliar functions. + * Send data to Netdata calling auxiliary functions. * * @param root the target list. */ @@ -456,6 +510,324 @@ static void dcstat_send_global(netdata_publish_dcstat_t *publish) &dcstat_counter_publish_aggregated[NETDATA_DCSTAT_IDX_REFERENCE], 3); } +/** + * Create specific directory cache charts + * + * Create charts for cgroup/application. + * + * @param type the chart type. + * @param update_every value to overwrite the update frequency set by the server. + */ +static void ebpf_create_specific_dc_charts(char *type, int update_every) +{ + ebpf_create_chart(type, NETDATA_DC_HIT_CHART, "Percentage of files listed inside directory cache.", + EBPF_COMMON_DIMENSION_PERCENTAGE, NETDATA_DIRECTORY_CACHE_SUBMENU, + NETDATA_CGROUP_DC_HIT_RATIO_CONTEXT, NETDATA_EBPF_CHART_TYPE_LINE, + NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5700, + ebpf_create_global_dimension, + dcstat_counter_publish_aggregated, 1, update_every, NETDATA_EBPF_MODULE_NAME_DCSTAT); + + ebpf_create_chart(type, NETDATA_DC_REFERENCE_CHART, "Count file access.", + EBPF_COMMON_DIMENSION_FILES, NETDATA_DIRECTORY_CACHE_SUBMENU, + NETDATA_CGROUP_DC_REFERENCE_CONTEXT, NETDATA_EBPF_CHART_TYPE_LINE, + NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5701, + ebpf_create_global_dimension, + &dcstat_counter_publish_aggregated[NETDATA_DCSTAT_IDX_REFERENCE], 1, + update_every, NETDATA_EBPF_MODULE_NAME_DCSTAT); + + ebpf_create_chart(type, NETDATA_DC_REQUEST_NOT_CACHE_CHART, + "Access to files that were not present inside directory cache.", + EBPF_COMMON_DIMENSION_FILES, NETDATA_DIRECTORY_CACHE_SUBMENU, + NETDATA_CGROUP_DC_NOT_CACHE_CONTEXT, NETDATA_EBPF_CHART_TYPE_LINE, + NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5702, + ebpf_create_global_dimension, + &dcstat_counter_publish_aggregated[NETDATA_DCSTAT_IDX_SLOW], 1, + update_every, NETDATA_EBPF_MODULE_NAME_DCSTAT); + + ebpf_create_chart(type, NETDATA_DC_REQUEST_NOT_FOUND_CHART, + "Number of requests for files that were not found on filesystem.", + EBPF_COMMON_DIMENSION_FILES, NETDATA_DIRECTORY_CACHE_SUBMENU, + NETDATA_CGROUP_DC_NOT_FOUND_CONTEXT, NETDATA_EBPF_CHART_TYPE_LINE, + NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5703, + ebpf_create_global_dimension, + &dcstat_counter_publish_aggregated[NETDATA_DCSTAT_IDX_MISS], 1, + update_every, NETDATA_EBPF_MODULE_NAME_DCSTAT); +} + +/** + * Obsolete specific directory cache charts + * + * Obsolete charts for cgroup/application. + * + * @param type the chart type. + * @param update_every value to overwrite the update frequency set by the server. + */ +static void ebpf_obsolete_specific_dc_charts(char *type, int update_every) +{ + ebpf_write_chart_obsolete(type, NETDATA_DC_HIT_CHART, + "Percentage of files listed inside directory cache.", + EBPF_COMMON_DIMENSION_PERCENTAGE, NETDATA_DIRECTORY_CACHE_SUBMENU, + NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_DC_HIT_RATIO_CONTEXT, + NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5700, update_every); + + ebpf_write_chart_obsolete(type, NETDATA_DC_REFERENCE_CHART, + "Count file access.", + EBPF_COMMON_DIMENSION_FILES, NETDATA_DIRECTORY_CACHE_SUBMENU, + NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_DC_REFERENCE_CONTEXT, + NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5701, update_every); + + ebpf_write_chart_obsolete(type, NETDATA_DC_REQUEST_NOT_CACHE_CHART, + "Access to files that were not present inside directory cache.", + EBPF_COMMON_DIMENSION_FILES, NETDATA_DIRECTORY_CACHE_SUBMENU, + NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_DC_NOT_CACHE_CONTEXT, + NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5702, update_every); + + ebpf_write_chart_obsolete(type, NETDATA_DC_REQUEST_NOT_FOUND_CHART, + "Number of requests for files that were not found on filesystem.", + EBPF_COMMON_DIMENSION_FILES, NETDATA_DIRECTORY_CACHE_SUBMENU, + NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_DC_NOT_FOUND_CONTEXT, + NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5703, update_every); +} + +/** + * Cachestat sum PIDs + * + * Sum values for all PIDs associated to a group + * + * @param publish output structure. + * @param root structure with listed IPs + */ +void ebpf_dc_sum_cgroup_pids(netdata_publish_dcstat_t *publish, struct pid_on_target2 *root) +{ + memset(&publish->curr, 0, sizeof(netdata_dcstat_pid_t)); + netdata_dcstat_pid_t *dst = &publish->curr; + while (root) { + netdata_dcstat_pid_t *src = &root->dc; + + dst->cache_access += src->cache_access; + dst->file_system += src->file_system; + dst->not_found += src->not_found; + + root = root->next; + } +} + +/** + * Calc chart values + * + * Do necessary math to plot charts. + */ +void ebpf_dc_calc_chart_values() +{ + ebpf_cgroup_target_t *ect; + for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { + ebpf_dc_sum_cgroup_pids(&ect->publish_dc, ect->pids); + uint64_t cache = ect->publish_dc.curr.cache_access; + uint64_t not_found = ect->publish_dc.curr.not_found; + + dcstat_update_publish(&ect->publish_dc, cache, not_found); + + ect->publish_dc.cache_access = (long long)ect->publish_dc.curr.cache_access - + (long long)ect->publish_dc.prev.cache_access; + ect->publish_dc.prev.cache_access = ect->publish_dc.curr.cache_access; + + if (ect->publish_dc.curr.not_found < ect->publish_dc.prev.not_found) { + ect->publish_dc.prev.not_found = 0; + } + } +} + +/** + * Create Systemd directory cache Charts + * + * Create charts when systemd is enabled + * + * @param update_every value to overwrite the update frequency set by the server. + **/ +static void ebpf_create_systemd_dc_charts(int update_every) +{ + ebpf_create_charts_on_systemd(NETDATA_DC_HIT_CHART, + "Percentage of files listed inside directory cache.", + EBPF_COMMON_DIMENSION_PERCENTAGE, + NETDATA_DIRECTORY_CACHE_SUBMENU, + NETDATA_EBPF_CHART_TYPE_LINE, + 21200, + ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX], + NETDATA_SYSTEMD_DC_HIT_RATIO_CONTEXT, NETDATA_EBPF_MODULE_NAME_DCSTAT, + update_every); + + ebpf_create_charts_on_systemd(NETDATA_DC_REFERENCE_CHART, + "Count file access.", + EBPF_COMMON_DIMENSION_FILES, + NETDATA_DIRECTORY_CACHE_SUBMENU, + NETDATA_EBPF_CHART_TYPE_LINE, + 21201, + ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX], + NETDATA_SYSTEMD_DC_REFERENCE_CONTEXT, NETDATA_EBPF_MODULE_NAME_DCSTAT, + update_every); + + ebpf_create_charts_on_systemd(NETDATA_DC_REQUEST_NOT_CACHE_CHART, + "Access to files that were not present inside directory cache.", + EBPF_COMMON_DIMENSION_FILES, + NETDATA_DIRECTORY_CACHE_SUBMENU, + NETDATA_EBPF_CHART_TYPE_LINE, + 21202, + ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX], + NETDATA_SYSTEMD_DC_NOT_CACHE_CONTEXT, NETDATA_EBPF_MODULE_NAME_DCSTAT, + update_every); + + ebpf_create_charts_on_systemd(NETDATA_DC_REQUEST_NOT_FOUND_CHART, + "Number of requests for files that were not found on filesystem.", + EBPF_COMMON_DIMENSION_FILES, + NETDATA_DIRECTORY_CACHE_SUBMENU, + NETDATA_EBPF_CHART_TYPE_LINE, + 21202, + ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX], + NETDATA_SYSTEMD_DC_NOT_FOUND_CONTEXT, NETDATA_EBPF_MODULE_NAME_DCSTAT, + update_every); +} + +/** + * Send Directory Cache charts + * + * Send collected data to Netdata. + * + * @return It returns the status for chart creation, if it is necessary to remove a specific dimension, zero is returned + * otherwise function returns 1 to avoid chart recreation + */ +static int ebpf_send_systemd_dc_charts() +{ + int ret = 1; + collected_number value; + ebpf_cgroup_target_t *ect; + write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_DC_HIT_CHART); + for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { + if (unlikely(ect->systemd) && unlikely(ect->updated)) { + write_chart_dimension(ect->name, (long long) ect->publish_dc.ratio); + } else + ret = 0; + } + write_end_chart(); + + write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_DC_REFERENCE_CHART); + for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { + if (unlikely(ect->systemd) && unlikely(ect->updated)) { + write_chart_dimension(ect->name, (long long) ect->publish_dc.cache_access); + } + } + write_end_chart(); + + write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_DC_REQUEST_NOT_CACHE_CHART); + for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { + if (unlikely(ect->systemd) && unlikely(ect->updated)) { + value = (collected_number) (!ect->publish_dc.cache_access) ? 0 : + (long long )ect->publish_dc.curr.file_system - (long long)ect->publish_dc.prev.file_system; + ect->publish_dc.prev.file_system = ect->publish_dc.curr.file_system; + + write_chart_dimension(ect->name, (long long) value); + } + } + write_end_chart(); + + write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_DC_REQUEST_NOT_FOUND_CHART); + for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { + if (unlikely(ect->systemd) && unlikely(ect->updated)) { + value = (collected_number) (!ect->publish_dc.cache_access) ? 0 : + (long long)ect->publish_dc.curr.not_found - (long long)ect->publish_dc.prev.not_found; + + ect->publish_dc.prev.not_found = ect->publish_dc.curr.not_found; + + write_chart_dimension(ect->name, (long long) value); + } + } + write_end_chart(); + + return ret; +} + +/** + * Send Directory Cache charts + * + * Send collected data to Netdata. + * + */ +static void ebpf_send_specific_dc_data(char *type, netdata_publish_dcstat_t *pdc) +{ + collected_number value; + write_begin_chart(type, NETDATA_DC_HIT_CHART); + write_chart_dimension(dcstat_counter_publish_aggregated[NETDATA_DCSTAT_IDX_RATIO].name, + (long long) pdc->ratio); + write_end_chart(); + + write_begin_chart(type, NETDATA_DC_REFERENCE_CHART); + write_chart_dimension(dcstat_counter_publish_aggregated[NETDATA_DCSTAT_IDX_REFERENCE].name, + (long long) pdc->cache_access); + write_end_chart(); + + value = (collected_number) (!pdc->cache_access) ? 0 : + (long long )pdc->curr.file_system - (long long)pdc->prev.file_system; + pdc->prev.file_system = pdc->curr.file_system; + + write_begin_chart(type, NETDATA_DC_REQUEST_NOT_CACHE_CHART); + write_chart_dimension(dcstat_counter_publish_aggregated[NETDATA_DCSTAT_IDX_SLOW].name, (long long) value); + write_end_chart(); + + value = (collected_number) (!pdc->cache_access) ? 0 : + (long long)pdc->curr.not_found - (long long)pdc->prev.not_found; + pdc->prev.not_found = pdc->curr.not_found; + + write_begin_chart(type, NETDATA_DC_REQUEST_NOT_FOUND_CHART); + write_chart_dimension(dcstat_counter_publish_aggregated[NETDATA_DCSTAT_IDX_MISS].name, (long long) value); + write_end_chart(); +} + +/** + * Send data to Netdata calling auxiliary functions. + * + * @param update_every value to overwrite the update frequency set by the server. +*/ +void ebpf_dc_send_cgroup_data(int update_every) +{ + if (!ebpf_cgroup_pids) + return; + + pthread_mutex_lock(&mutex_cgroup_shm); + ebpf_cgroup_target_t *ect; + ebpf_dc_calc_chart_values(); + + int has_systemd = shm_ebpf_cgroup.header->systemd_enabled; + if (has_systemd) { + static int systemd_charts = 0; + if (!systemd_charts) { + ebpf_create_systemd_dc_charts(update_every); + systemd_charts = 1; + } + + systemd_charts = ebpf_send_systemd_dc_charts(); + } + + for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { + if (ect->systemd) + continue; + + if (!(ect->flags & NETDATA_EBPF_CGROUP_HAS_DC_CHART) && ect->updated) { + ebpf_create_specific_dc_charts(ect->name, update_every); + ect->flags |= NETDATA_EBPF_CGROUP_HAS_DC_CHART; + } + + if (ect->flags & NETDATA_EBPF_CGROUP_HAS_DC_CHART) { + if (ect->updated) { + ebpf_send_specific_dc_data(ect->name, &ect->publish_dc); + } else { + ebpf_obsolete_specific_dc_charts(ect->name, update_every); + ect->flags &= ~NETDATA_EBPF_CGROUP_HAS_DC_CHART; + } + } + } + + pthread_mutex_unlock(&mutex_cgroup_shm); +} + /** * Main loop for this collector. */ @@ -464,29 +836,40 @@ static void dcstat_collector(ebpf_module_t *em) dcstat_threads.thread = mallocz(sizeof(netdata_thread_t)); dcstat_threads.start_routine = ebpf_dcstat_read_hash; - map_fd = dcstat_data.map_fd; - netdata_thread_create(dcstat_threads.thread, dcstat_threads.name, NETDATA_THREAD_OPTION_JOINABLE, ebpf_dcstat_read_hash, em); netdata_publish_dcstat_t publish; memset(&publish, 0, sizeof(publish)); int apps = em->apps_charts; + int cgroups = em->cgroup_charts; + int update_every = em->update_every; + int counter = update_every - 1; while (!close_ebpf_plugin) { pthread_mutex_lock(&collect_data_mutex); pthread_cond_wait(&collect_data_cond_var, &collect_data_mutex); - if (apps) - read_apps_table(); + if (++counter == update_every) { + counter = 0; + if (apps) + read_apps_table(); - pthread_mutex_lock(&lock); + if (cgroups) + ebpf_update_dc_cgroup(); - dcstat_send_global(&publish); + pthread_mutex_lock(&lock); - if (apps) - ebpf_dcache_send_apps_data(apps_groups_root_target); + dcstat_send_global(&publish); + + if (apps) + ebpf_dcache_send_apps_data(apps_groups_root_target); + + if (cgroups) + ebpf_dc_send_cgroup_data(update_every); + + pthread_mutex_unlock(&lock); + } - pthread_mutex_unlock(&lock); pthread_mutex_unlock(&collect_data_mutex); } } @@ -501,26 +884,29 @@ static void dcstat_collector(ebpf_module_t *em) * Create filesystem charts * * Call ebpf_create_chart to create the charts for the collector. + * + * @param update_every value to overwrite the update frequency set by the server. */ -static void ebpf_create_filesystem_charts() +static void ebpf_create_filesystem_charts(int update_every) { ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY, NETDATA_DC_HIT_CHART, "Percentage of files listed inside directory cache", - EBPF_COMMON_DIMENSION_PERCENTAGE, NETDATA_DIRECTORY_FILESYSTEM_SUBMENU, + EBPF_COMMON_DIMENSION_PERCENTAGE, NETDATA_DIRECTORY_CACHE_SUBMENU, NULL, NETDATA_EBPF_CHART_TYPE_LINE, 21200, ebpf_create_global_dimension, - dcstat_counter_publish_aggregated, 1); + dcstat_counter_publish_aggregated, 1, update_every, NETDATA_EBPF_MODULE_NAME_DCSTAT); ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY, NETDATA_DC_REFERENCE_CHART, "Variables used to calculate hit ratio.", - EBPF_COMMON_DIMENSION_FILES, NETDATA_DIRECTORY_FILESYSTEM_SUBMENU, + EBPF_COMMON_DIMENSION_FILES, NETDATA_DIRECTORY_CACHE_SUBMENU, NULL, NETDATA_EBPF_CHART_TYPE_LINE, 21201, ebpf_create_global_dimension, - &dcstat_counter_publish_aggregated[NETDATA_DCSTAT_IDX_REFERENCE], 3); + &dcstat_counter_publish_aggregated[NETDATA_DCSTAT_IDX_REFERENCE], 3, + update_every, NETDATA_EBPF_MODULE_NAME_DCSTAT); fflush(stdout); } @@ -531,15 +917,18 @@ static void ebpf_create_filesystem_charts() * We are not testing the return, because callocz does this and shutdown the software * case it was not possible to allocate. * - * @param length is the length for the vectors used inside the collector. + * @param apps is apps enabled? */ -static void ebpf_dcstat_allocate_global_vectors(size_t length) +static void ebpf_dcstat_allocate_global_vectors(int apps) { - dcstat_pid = callocz((size_t)pid_max, sizeof(netdata_publish_dcstat_t *)); + if (apps) + dcstat_pid = callocz((size_t)pid_max, sizeof(netdata_publish_dcstat_t *)); + dcstat_vector = callocz((size_t)ebpf_nprocs, sizeof(netdata_dcstat_pid_t)); + dcstat_values = callocz((size_t)ebpf_nprocs, sizeof(netdata_idx_t)); - memset(dcstat_counter_aggregated_data, 0, length*sizeof(netdata_syscall_stat_t)); - memset(dcstat_counter_publish_aggregated, 0, length*sizeof(netdata_publish_syscall_t)); + memset(dcstat_counter_aggregated_data, 0, NETDATA_DCSTAT_IDX_END * sizeof(netdata_syscall_stat_t)); + memset(dcstat_counter_publish_aggregated, 0, NETDATA_DCSTAT_IDX_END * sizeof(netdata_publish_syscall_t)); } /***************************************************************** @@ -563,21 +952,19 @@ void *ebpf_dcstat_thread(void *ptr) ebpf_module_t *em = (ebpf_module_t *)ptr; em->maps = dcstat_maps; - fill_ebpf_data(&dcstat_data); - ebpf_update_module(em, &dcstat_config, NETDATA_DIRECTORY_DCSTAT_CONFIG_FILE); - ebpf_update_pid_table(&dcstat_maps[0], em); + ebpf_update_pid_table(&dcstat_maps[NETDATA_DCSTAT_PID_STATS], em); ebpf_update_names(dc_optional_name, em); if (!em->enabled) goto enddcstat; - ebpf_dcstat_allocate_global_vectors(NETDATA_DCSTAT_IDX_END); + ebpf_dcstat_allocate_global_vectors(em->apps_charts); pthread_mutex_lock(&lock); - probe_links = ebpf_load_program(ebpf_plugin_dir, em, kernel_string, &objects, dcstat_data.map_fd); + probe_links = ebpf_load_program(ebpf_plugin_dir, em, kernel_string, &objects); if (!probe_links) { pthread_mutex_unlock(&lock); goto enddcstat; @@ -592,7 +979,7 @@ void *ebpf_dcstat_thread(void *ptr) dcstat_counter_dimension_name, dcstat_counter_dimension_name, algorithms, NETDATA_DCSTAT_IDX_END); - ebpf_create_filesystem_charts(); + ebpf_create_filesystem_charts(em->update_every); pthread_mutex_unlock(&lock); dcstat_collector(em); diff --git a/collectors/ebpf.plugin/ebpf_dcstat.h b/collectors/ebpf.plugin/ebpf_dcstat.h index ad4bd1992..c5e6e2bcf 100644 --- a/collectors/ebpf.plugin/ebpf_dcstat.h +++ b/collectors/ebpf.plugin/ebpf_dcstat.h @@ -3,6 +3,8 @@ #ifndef NETDATA_EBPF_DCSTAT_H #define NETDATA_EBPF_DCSTAT_H 1 +// Module name +#define NETDATA_EBPF_MODULE_NAME_DCSTAT "dcstat" // charts #define NETDATA_DC_HIT_CHART "dc_hit_ratio" @@ -11,11 +13,21 @@ #define NETDATA_DC_REQUEST_NOT_FOUND_CHART "dc_not_found" #define NETDATA_DIRECTORY_CACHE_SUBMENU "directory cache (eBPF)" -#define NETDATA_DIRECTORY_FILESYSTEM_SUBMENU "Directory Cache (eBPF)" // configuration file #define NETDATA_DIRECTORY_DCSTAT_CONFIG_FILE "dcstat.conf" +// Contexts +#define NETDATA_CGROUP_DC_HIT_RATIO_CONTEXT "cgroup.dc_ratio" +#define NETDATA_CGROUP_DC_REFERENCE_CONTEXT "cgroup.dc_reference" +#define NETDATA_CGROUP_DC_NOT_CACHE_CONTEXT "cgroup.dc_not_cache" +#define NETDATA_CGROUP_DC_NOT_FOUND_CONTEXT "cgroup.dc_not_found" + +#define NETDATA_SYSTEMD_DC_HIT_RATIO_CONTEXT "services.dc_ratio" +#define NETDATA_SYSTEMD_DC_REFERENCE_CONTEXT "services.dc_reference" +#define NETDATA_SYSTEMD_DC_NOT_CACHE_CONTEXT "services.dc_not_cache" +#define NETDATA_SYSTEMD_DC_NOT_FOUND_CONTEXT "services.dc_not_found" + #define NETDATA_LATENCY_DCSTAT_SLEEP_MS 700000ULL enum directory_cache_indexes { @@ -60,5 +72,6 @@ typedef struct netdata_publish_dcstat { extern void *ebpf_dcstat_thread(void *ptr); extern void ebpf_dcstat_create_apps_charts(struct ebpf_module *em, void *ptr); extern void clean_dcstat_pid_structures(); +extern struct config dcstat_config; #endif // NETDATA_EBPF_DCSTAT_H diff --git a/collectors/ebpf.plugin/ebpf_disk.c b/collectors/ebpf.plugin/ebpf_disk.c new file mode 100644 index 000000000..6e139ec9f --- /dev/null +++ b/collectors/ebpf.plugin/ebpf_disk.c @@ -0,0 +1,842 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include +#include + +#include "ebpf.h" +#include "ebpf_disk.h" + +struct config disk_config = { .first_section = NULL, + .last_section = NULL, + .mutex = NETDATA_MUTEX_INITIALIZER, + .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare }, + .rwlock = AVL_LOCK_INITIALIZER } }; + +static ebpf_local_maps_t disk_maps[] = {{.name = "tbl_disk_iocall", .internal_input = NETDATA_DISK_HISTOGRAM_LENGTH, + .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC, + .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}, + {.name = NULL, .internal_input = 0, .user_input = 0, + .type = NETDATA_EBPF_MAP_CONTROLLER, + .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}}; +static avl_tree_lock disk_tree; +netdata_ebpf_disks_t *disk_list = NULL; + +char *tracepoint_block_type = { "block"} ; +char *tracepoint_block_issue = { "block_rq_issue" }; +char *tracepoint_block_rq_complete = { "block_rq_complete" }; + +static struct bpf_link **probe_links = NULL; +static struct bpf_object *objects = NULL; + +static int was_block_issue_enabled = 0; +static int was_block_rq_complete_enabled = 0; + +static char **dimensions = NULL; +static netdata_syscall_stat_t disk_aggregated_data[NETDATA_EBPF_HIST_MAX_BINS]; +static netdata_publish_syscall_t disk_publish_aggregated[NETDATA_EBPF_HIST_MAX_BINS]; + +static int read_thread_closed = 1; + +static netdata_idx_t *disk_hash_values = NULL; +static struct netdata_static_thread disk_threads = {"DISK KERNEL", + NULL, NULL, 1, NULL, + NULL, NULL }; + +ebpf_publish_disk_t *plot_disks = NULL; +pthread_mutex_t plot_mutex; + +/***************************************************************** + * + * FUNCTIONS TO MANIPULATE HARD DISKS + * + *****************************************************************/ + +/** + * Parse start + * + * Parse start address of disk + * + * @param w structure where data is stored + * @param filename variable used to store value + * + * @return It returns 0 on success and -1 otherwise + */ +static inline int ebpf_disk_parse_start(netdata_ebpf_disks_t *w, char *filename) +{ + char content[FILENAME_MAX + 1]; + int fd = open(filename, O_RDONLY, 0); + if (fd < 0) { + return -1; + } + + ssize_t file_length = read(fd, content, 4095); + if (file_length > 0) { + if (file_length > FILENAME_MAX) + file_length = FILENAME_MAX; + + content[file_length] = '\0'; + w->start = strtoul(content, NULL, 10); + } + close(fd); + + return 0; +} + +/** + * Parse uevent + * + * Parse uevent file + * + * @param w structure where data is stored + * @param filename variable used to store value + * + * @return It returns 0 on success and -1 otherwise + */ +static inline int ebpf_parse_uevent(netdata_ebpf_disks_t *w, char *filename) +{ + char content[FILENAME_MAX + 1]; + int fd = open(filename, O_RDONLY, 0); + if (fd < 0) { + return -1; + } + + ssize_t file_length = read(fd, content, FILENAME_MAX); + if (file_length > 0) { + if (file_length > FILENAME_MAX) + file_length = FILENAME_MAX; + + content[file_length] = '\0'; + + char *s = strstr(content, "PARTNAME=EFI"); + if (s) { + w->main->boot_partition = w; + w->flags |= NETDATA_DISK_HAS_EFI; + w->boot_chart = strdupz("disk_bootsector"); + } + } + close(fd); + + return 0; +} + +/** + * Parse Size + * + * @param w structure where data is stored + * @param filename variable used to store value + * + * @return It returns 0 on success and -1 otherwise + */ +static inline int ebpf_parse_size(netdata_ebpf_disks_t *w, char *filename) +{ + char content[FILENAME_MAX + 1]; + int fd = open(filename, O_RDONLY, 0); + if (fd < 0) { + return -1; + } + + ssize_t file_length = read(fd, content, FILENAME_MAX); + if (file_length > 0) { + if (file_length > FILENAME_MAX) + file_length = FILENAME_MAX; + + content[file_length] = '\0'; + w->end = w->start + strtoul(content, NULL, 10) -1; + } + close(fd); + + return 0; +} + +/** + * Read Disk information + * + * Read disk information from /sys/block + * + * @param w structure where data is stored + * @param name disk name + */ +static void ebpf_read_disk_info(netdata_ebpf_disks_t *w, char *name) +{ + static netdata_ebpf_disks_t *main_disk = NULL; + static uint32_t key = 0; + char *path = { "/sys/block" }; + char disk[NETDATA_DISK_NAME_LEN + 1]; + char filename[FILENAME_MAX + 1]; + snprintfz(disk, NETDATA_DISK_NAME_LEN, "%s", name); + size_t length = strlen(disk); + if (!length) { + return; + } + + length--; + size_t curr = length; + while (isdigit((int)disk[length])) { + disk[length--] = '\0'; + } + + // We are looking for partition information, if it is a device we will ignore it. + if (curr == length) { + main_disk = w; + key = MKDEV(w->major, w->minor); + w->bootsector_key = key; + return; + } + w->bootsector_key = key; + w->main = main_disk; + + snprintfz(filename, FILENAME_MAX, "%s/%s/%s/uevent", path, disk, name); + if (ebpf_parse_uevent(w, filename)) + return; + + snprintfz(filename, FILENAME_MAX, "%s/%s/%s/start", path, disk, name); + if (ebpf_disk_parse_start(w, filename)) + return; + + snprintfz(filename, FILENAME_MAX, "%s/%s/%s/size", path, disk, name); + ebpf_parse_size(w, filename); +} + +/** + * New encode dev + * + * New encode algorithm extracted from https://elixir.bootlin.com/linux/v5.10.8/source/include/linux/kdev_t.h#L39 + * + * @param major driver major number + * @param minor driver minor number + * + * @return + */ +static inline uint32_t netdata_new_encode_dev(uint32_t major, uint32_t minor) { + return (minor & 0xff) | (major << 8) | ((minor & ~0xff) << 12); +} + +/** + * Compare disks + * + * Compare major and minor values to add disks to tree. + * + * @param a pointer to netdata_ebpf_disks + * @param b pointer to netdata_ebpf_disks + * + * @return It returns 0 case the values are equal, 1 case a is bigger than b and -1 case a is smaller than b. +*/ +static int ebpf_compare_disks(void *a, void *b) +{ + netdata_ebpf_disks_t *ptr1 = a; + netdata_ebpf_disks_t *ptr2 = b; + + if (ptr1->dev > ptr2->dev) + return 1; + if (ptr1->dev < ptr2->dev) + return -1; + + return 0; +} + +/** + * Update listen table + * + * Update link list when it is necessary. + * + * @param name disk name + * @param major major disk identifier + * @param minor minor disk identifier + * @param current_time current timestamp + */ +static void update_disk_table(char *name, int major, int minor, time_t current_time) +{ + netdata_ebpf_disks_t find; + netdata_ebpf_disks_t *w; + size_t length; + + uint32_t dev = netdata_new_encode_dev(major, minor); + find.dev = dev; + netdata_ebpf_disks_t *ret = (netdata_ebpf_disks_t *) avl_search_lock(&disk_tree, (avl_t *)&find); + if (ret) { // Disk is already present + ret->flags |= NETDATA_DISK_IS_HERE; + ret->last_update = current_time; + return; + } + + netdata_ebpf_disks_t *update_next = disk_list; + if (likely(disk_list)) { + netdata_ebpf_disks_t *move = disk_list; + while (move) { + if (dev == move->dev) + return; + + update_next = move; + move = move->next; + } + + w = callocz(1, sizeof(netdata_ebpf_disks_t)); + length = strlen(name); + if (length >= NETDATA_DISK_NAME_LEN) + length = NETDATA_DISK_NAME_LEN; + + memcpy(w->family, name, length); + w->family[length] = '\0'; + w->major = major; + w->minor = minor; + w->dev = netdata_new_encode_dev(major, minor); + update_next->next = w; + } else { + disk_list = callocz(1, sizeof(netdata_ebpf_disks_t)); + length = strlen(name); + if (length >= NETDATA_DISK_NAME_LEN) + length = NETDATA_DISK_NAME_LEN; + + memcpy(disk_list->family, name, length); + disk_list->family[length] = '\0'; + disk_list->major = major; + disk_list->minor = minor; + disk_list->dev = netdata_new_encode_dev(major, minor); + + w = disk_list; + } + + ebpf_read_disk_info(w, name); + + netdata_ebpf_disks_t *check; + check = (netdata_ebpf_disks_t *) avl_insert_lock(&disk_tree, (avl_t *)w); + if (check != w) + error("Internal error, cannot insert the AVL tree."); + +#ifdef NETDATA_INTERNAL_CHECKS + info("The Latency is monitoring the hard disk %s (Major = %d, Minor = %d, Device = %u)", name, major, minor,w->dev); +#endif + + w->flags |= NETDATA_DISK_IS_HERE; +} + +/** + * Read Local Disks + * + * Parse /proc/partitions to get block disks used to measure latency. + * + * @return It returns 0 on success and -1 otherwise + */ +static int read_local_disks() +{ + char filename[FILENAME_MAX + 1]; + snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, NETDATA_EBPF_PROC_PARTITIONS); + procfile *ff = procfile_open(filename, " \t:", PROCFILE_FLAG_DEFAULT); + if (!ff) + return -1; + + ff = procfile_readall(ff); + if (!ff) + return -1; + + size_t lines = procfile_lines(ff), l; + time_t current_time = now_realtime_sec(); + for(l = 2; l < lines ;l++) { + size_t words = procfile_linewords(ff, l); + // This is header or end of file + if (unlikely(words < 4)) + continue; + + int major = (int)strtol(procfile_lineword(ff, l, 0), NULL, 10); + // The main goal of this thread is to measure block devices, so any block device with major number + // smaller than 7 according /proc/devices is not "important". + if (major > 7) { + int minor = (int)strtol(procfile_lineword(ff, l, 1), NULL, 10); + update_disk_table(procfile_lineword(ff, l, 3), major, minor, current_time); + } + } + + procfile_close(ff); + + return 0; +} + +/** + * Update disks + * + * @param em main thread structure + */ +void ebpf_update_disks(ebpf_module_t *em) +{ + static time_t update_every = 0; + time_t curr = now_realtime_sec(); + if (curr < update_every) + return; + + update_every = curr + 5 * em->update_every; + + (void)read_local_disks(); +} + +/***************************************************************** + * + * FUNCTIONS TO CLOSE THE THREAD + * + *****************************************************************/ + +/** + * Disk disable tracepoints + * + * Disable tracepoints when the plugin was responsible to enable it. + */ +static void ebpf_disk_disable_tracepoints() +{ + char *default_message = { "Cannot disable the tracepoint" }; + if (!was_block_issue_enabled) { + if (ebpf_disable_tracing_values(tracepoint_block_type, tracepoint_block_issue)) + error("%s %s/%s.", default_message, tracepoint_block_type, tracepoint_block_issue); + } + + if (!was_block_rq_complete_enabled) { + if (ebpf_disable_tracing_values(tracepoint_block_type, tracepoint_block_rq_complete)) + error("%s %s/%s.", default_message, tracepoint_block_type, tracepoint_block_rq_complete); + } +} + +/** + * Cleanup plot disks + * + * Clean disk list + */ +static void ebpf_cleanup_plot_disks() +{ + ebpf_publish_disk_t *move = plot_disks, *next; + while (move) { + next = move->next; + + freez(move); + + move = next; + } +} + +/** + * Cleanup Disk List + */ +static void ebpf_cleanup_disk_list() +{ + netdata_ebpf_disks_t *move = disk_list; + while (move) { + netdata_ebpf_disks_t *next = move->next; + + freez(move->histogram.name); + freez(move->boot_chart); + freez(move); + + move = next; + } +} + +/** + * Clean up the main thread. + * + * @param ptr thread data. + */ +static void ebpf_disk_cleanup(void *ptr) +{ + ebpf_disk_disable_tracepoints(); + + ebpf_module_t *em = (ebpf_module_t *)ptr; + if (!em->enabled) + return; + + heartbeat_t hb; + heartbeat_init(&hb); + uint32_t tick = 2 * USEC_PER_MS; + while (!read_thread_closed) { + usec_t dt = heartbeat_next(&hb, tick); + UNUSED(dt); + } + + if (dimensions) + ebpf_histogram_dimension_cleanup(dimensions, NETDATA_EBPF_HIST_MAX_BINS); + + freez(disk_hash_values); + freez(disk_threads.thread); + pthread_mutex_destroy(&plot_mutex); + + ebpf_cleanup_plot_disks(); + ebpf_cleanup_disk_list(); + + if (probe_links) { + struct bpf_program *prog; + size_t i = 0 ; + bpf_object__for_each_program(prog, objects) { + bpf_link__destroy(probe_links[i]); + i++; + } + bpf_object__close(objects); + } +} + +/***************************************************************** + * + * MAIN LOOP + * + *****************************************************************/ + +/** + * Fill Plot list + * + * @param ptr a pointer for current disk + */ +static void ebpf_fill_plot_disks(netdata_ebpf_disks_t *ptr) +{ + pthread_mutex_lock(&plot_mutex); + ebpf_publish_disk_t *w; + if (likely(plot_disks)) { + ebpf_publish_disk_t *move = plot_disks, *store = plot_disks; + while (move) { + if (move->plot == ptr) { + pthread_mutex_unlock(&plot_mutex); + return; + } + + store = move; + move = move->next; + } + + w = callocz(1, sizeof(ebpf_publish_disk_t)); + w->plot = ptr; + store->next = w; + } else { + plot_disks = callocz(1, sizeof(ebpf_publish_disk_t)); + plot_disks->plot = ptr; + } + pthread_mutex_unlock(&plot_mutex); + + ptr->flags |= NETDATA_DISK_ADDED_TO_PLOT_LIST; +} + +/** + * Read hard disk table + * + * @param table file descriptor for table + * + * Read the table with number of calls for all functions + */ +static void read_hard_disk_tables(int table) +{ + netdata_idx_t *values = disk_hash_values; + block_key_t key = {}; + block_key_t next_key = {}; + + netdata_ebpf_disks_t *ret = NULL; + + while (bpf_map_get_next_key(table, &key, &next_key) == 0) { + int test = bpf_map_lookup_elem(table, &key, values); + if (test < 0) { + key = next_key; + continue; + } + + netdata_ebpf_disks_t find; + find.dev = key.dev; + + if (likely(ret)) { + if (find.dev != ret->dev) + ret = (netdata_ebpf_disks_t *)avl_search_lock(&disk_tree, (avl_t *)&find); + } else + ret = (netdata_ebpf_disks_t *)avl_search_lock(&disk_tree, (avl_t *)&find); + + // Disk was inserted after we parse /proc/partitions + if (!ret) { + if (read_local_disks()) { + key = next_key; + continue; + } + + ret = (netdata_ebpf_disks_t *)avl_search_lock(&disk_tree, (avl_t *)&find); + if (!ret) { + // We should never reach this point, but we are adding it to keep a safe code + key = next_key; + continue; + } + } + + uint64_t total = 0; + int i; + int end = (running_on_kernel < NETDATA_KERNEL_V4_15) ? 1 : ebpf_nprocs; + for (i = 0; i < end; i++) { + total += values[i]; + } + + ret->histogram.histogram[key.bin] = total; + + if (!(ret->flags & NETDATA_DISK_ADDED_TO_PLOT_LIST)) + ebpf_fill_plot_disks(ret); + + key = next_key; + } +} + +/** + * Disk read hash + * + * This is the thread callback. + * This thread is necessary, because we cannot freeze the whole plugin to read the data on very busy socket. + * + * @param ptr It is a NULL value for this thread. + * + * @return It always returns NULL. + */ +void *ebpf_disk_read_hash(void *ptr) +{ + heartbeat_t hb; + heartbeat_init(&hb); + + ebpf_module_t *em = (ebpf_module_t *)ptr; + + usec_t step = NETDATA_LATENCY_DISK_SLEEP_MS * em->update_every; + while (!close_ebpf_plugin) { + usec_t dt = heartbeat_next(&hb, step); + (void)dt; + + read_hard_disk_tables(disk_maps[NETDATA_DISK_READ].map_fd); + } + + return NULL; +} + +/** + * Obsolete Hard Disk charts + * + * Make Hard disk charts and fill chart name + * + * @param w the structure with necessary information to create the chart + * @param update_every value to overwrite the update frequency set by the server. + */ +static void ebpf_obsolete_hd_charts(netdata_ebpf_disks_t *w, int update_every) +{ + ebpf_write_chart_obsolete(w->histogram.name, w->family, w->histogram.title, EBPF_COMMON_DIMENSION_CALL, + w->family, NETDATA_EBPF_CHART_TYPE_STACKED, "disk.latency_io", + w->histogram.order, update_every); + + w->flags = 0; +} + +/** + * Create Hard Disk charts + * + * Make Hard disk charts and fill chart name + * + * @param w the structure with necessary information to create the chart + * @param update_every value to overwrite the update frequency set by the server. + */ +static void ebpf_create_hd_charts(netdata_ebpf_disks_t *w, int update_every) +{ + int order = NETDATA_CHART_PRIO_DISK_LATENCY; + char *family = w->family; + + w->histogram.name = strdupz("disk_latency_io"); + w->histogram.title = NULL; + w->histogram.order = order; + + ebpf_create_chart(w->histogram.name, family, "Disk latency", EBPF_COMMON_DIMENSION_CALL, + family, "disk.latency_io", NETDATA_EBPF_CHART_TYPE_STACKED, order, + ebpf_create_global_dimension, disk_publish_aggregated, NETDATA_EBPF_HIST_MAX_BINS, + update_every, NETDATA_EBPF_MODULE_NAME_DISK); + order++; + + w->flags |= NETDATA_DISK_CHART_CREATED; +} + +/** + * Remove pointer from plot + * + * Remove pointer from plot list when the disk is not present. + */ +static void ebpf_remove_pointer_from_plot_disk(ebpf_module_t *em) +{ + time_t current_time = now_realtime_sec(); + time_t limit = 10 * em->update_every; + pthread_mutex_lock(&plot_mutex); + ebpf_publish_disk_t *move = plot_disks, *prev = plot_disks; + int update_every = em->update_every; + while (move) { + netdata_ebpf_disks_t *ned = move->plot; + uint32_t flags = ned->flags; + + if (!(flags & NETDATA_DISK_IS_HERE) && ((current_time - ned->last_update) > limit)) { + ebpf_obsolete_hd_charts(ned, update_every); + avl_t *ret = (avl_t *)avl_remove_lock(&disk_tree, (avl_t *)ned); + UNUSED(ret); + if (move == plot_disks) { + freez(move); + plot_disks = NULL; + break; + } else { + prev->next = move->next; + ebpf_publish_disk_t *clean = move; + move = move->next; + freez(clean); + continue; + } + } + + prev = move; + move = move->next; + } + pthread_mutex_unlock(&plot_mutex); +} + +/** + * Send Hard disk data + * + * Send hard disk information to Netdata. + * + * @param update_every value to overwrite the update frequency set by the server. + */ +static void ebpf_latency_send_hd_data(int update_every) +{ + pthread_mutex_lock(&plot_mutex); + if (!plot_disks) { + pthread_mutex_unlock(&plot_mutex); + return; + } + + ebpf_publish_disk_t *move = plot_disks; + while (move) { + netdata_ebpf_disks_t *ned = move->plot; + uint32_t flags = ned->flags; + if (!(flags & NETDATA_DISK_CHART_CREATED)) { + ebpf_create_hd_charts(ned, update_every); + } + + if ((flags & NETDATA_DISK_CHART_CREATED)) { + write_histogram_chart(ned->histogram.name, ned->family, + ned->histogram.histogram, dimensions, NETDATA_EBPF_HIST_MAX_BINS); + } + + ned->flags &= ~NETDATA_DISK_IS_HERE; + + move = move->next; + } + pthread_mutex_unlock(&plot_mutex); +} + +/** +* Main loop for this collector. +*/ +static void disk_collector(ebpf_module_t *em) +{ + disk_hash_values = callocz(ebpf_nprocs, sizeof(netdata_idx_t)); + disk_threads.thread = mallocz(sizeof(netdata_thread_t)); + disk_threads.start_routine = ebpf_disk_read_hash; + + netdata_thread_create(disk_threads.thread, disk_threads.name, NETDATA_THREAD_OPTION_JOINABLE, + ebpf_disk_read_hash, em); + + int update_every = em->update_every; + int counter = update_every - 1; + read_thread_closed = 0; + while (!close_ebpf_plugin) { + pthread_mutex_lock(&collect_data_mutex); + pthread_cond_wait(&collect_data_cond_var, &collect_data_mutex); + + if (++counter == update_every) { + counter = 0; + pthread_mutex_lock(&lock); + ebpf_remove_pointer_from_plot_disk(em); + ebpf_latency_send_hd_data(update_every); + + pthread_mutex_unlock(&lock); + } + + pthread_mutex_unlock(&collect_data_mutex); + + ebpf_update_disks(em); + } + read_thread_closed = 1; +} + +/***************************************************************** + * + * EBPF DISK THREAD + * + *****************************************************************/ + +/** + * Enable tracepoints + * + * Enable necessary tracepoints for thread. + * + * @return It returns 0 on success and -1 otherwise + */ +static int ebpf_disk_enable_tracepoints() +{ + int test = ebpf_is_tracepoint_enabled(tracepoint_block_type, tracepoint_block_issue); + if (test == -1) + return -1; + else if (!test) { + if (ebpf_enable_tracing_values(tracepoint_block_type, tracepoint_block_issue)) + return -1; + } + was_block_issue_enabled = test; + + test = ebpf_is_tracepoint_enabled(tracepoint_block_type, tracepoint_block_rq_complete); + if (test == -1) + return -1; + else if (!test) { + if (ebpf_enable_tracing_values(tracepoint_block_type, tracepoint_block_rq_complete)) + return -1; + } + was_block_rq_complete_enabled = test; + + return 0; +} + +/** + * Disk thread + * + * Thread used to generate disk charts. + * + * @param ptr a pointer to `struct ebpf_module` + * + * @return It always return NULL + */ +void *ebpf_disk_thread(void *ptr) +{ + netdata_thread_cleanup_push(ebpf_disk_cleanup, ptr); + + ebpf_module_t *em = (ebpf_module_t *)ptr; + em->maps = disk_maps; + + if (!em->enabled) + goto enddisk; + + if (ebpf_disk_enable_tracepoints()) { + em->enabled = CONFIG_BOOLEAN_NO; + goto enddisk; + } + + avl_init_lock(&disk_tree, ebpf_compare_disks); + if (read_local_disks()) { + em->enabled = CONFIG_BOOLEAN_NO; + goto enddisk; + } + + if (pthread_mutex_init(&plot_mutex, NULL)) { + error("Cannot initialize local mutex"); + goto enddisk; + } + + probe_links = ebpf_load_program(ebpf_plugin_dir, em, kernel_string, &objects); + if (!probe_links) { + goto enddisk; + } + + int algorithms[NETDATA_EBPF_HIST_MAX_BINS]; + ebpf_fill_algorithms(algorithms, NETDATA_EBPF_HIST_MAX_BINS, NETDATA_EBPF_INCREMENTAL_IDX); + dimensions = ebpf_fill_histogram_dimension(NETDATA_EBPF_HIST_MAX_BINS); + + ebpf_global_labels(disk_aggregated_data, disk_publish_aggregated, dimensions, dimensions, algorithms, + NETDATA_EBPF_HIST_MAX_BINS); + + disk_collector(em); + +enddisk: + netdata_thread_cleanup_pop(1); + + return NULL; +} diff --git a/collectors/ebpf.plugin/ebpf_disk.h b/collectors/ebpf.plugin/ebpf_disk.h new file mode 100644 index 000000000..8e58174b9 --- /dev/null +++ b/collectors/ebpf.plugin/ebpf_disk.h @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_EBPF_DISK_H +#define NETDATA_EBPF_DISK_H 1 + +// Module name +#define NETDATA_EBPF_MODULE_NAME_DISK "disk" + +#include "libnetdata/avl/avl.h" +#include "libnetdata/ebpf/ebpf.h" + +#define NETDATA_EBPF_PROC_PARTITIONS "/proc/partitions" + +#define NETDATA_LATENCY_DISK_SLEEP_MS 650000ULL + +// Process configuration name +#define NETDATA_DISK_CONFIG_FILE "disk.conf" + +// Decode function extracted from: https://elixir.bootlin.com/linux/v5.10.8/source/include/linux/kdev_t.h#L7 +#define MINORBITS 20 +#define MKDEV(ma,mi) (((ma) << MINORBITS) | (mi)) + +enum netdata_latency_disks_flags { + NETDATA_DISK_ADDED_TO_PLOT_LIST = 1, + NETDATA_DISK_CHART_CREATED = 2, + NETDATA_DISK_IS_HERE = 4, + NETDATA_DISK_HAS_EFI = 8 +}; + +/* + * The definition (DISK_NAME_LEN) has been a stable value since Kernel 3.0, + * I decided to bring it as internal definition, to avoid include linux/genhd.h. + */ +#define NETDATA_DISK_NAME_LEN 32 +typedef struct netdata_ebpf_disks { + // Search + avl_t avl; + uint32_t dev; + uint32_t major; + uint32_t minor; + uint32_t bootsector_key; + uint64_t start; // start sector + uint64_t end; // end sector + + // Print information + char family[NETDATA_DISK_NAME_LEN + 1]; + char *boot_chart; + + netdata_ebpf_histogram_t histogram; + + uint32_t flags; + time_t last_update; + + struct netdata_ebpf_disks *main; + struct netdata_ebpf_disks *boot_partition; + struct netdata_ebpf_disks *next; +} netdata_ebpf_disks_t; + +enum ebpf_disk_tables { + NETDATA_DISK_READ +}; + +typedef struct block_key { + uint32_t bin; + uint32_t dev; +} block_key_t; + +typedef struct netdata_ebpf_publish_disk { + netdata_ebpf_disks_t *plot; + struct netdata_ebpf_publish_disk *next; +} ebpf_publish_disk_t; + +extern struct config disk_config; + +extern void *ebpf_disk_thread(void *ptr); + +#endif /* NETDATA_EBPF_DISK_H */ + diff --git a/collectors/ebpf.plugin/ebpf_fd.c b/collectors/ebpf.plugin/ebpf_fd.c new file mode 100644 index 000000000..6eecf5847 --- /dev/null +++ b/collectors/ebpf.plugin/ebpf_fd.c @@ -0,0 +1,865 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "ebpf.h" +#include "ebpf_fd.h" + +static char *fd_dimension_names[NETDATA_FD_SYSCALL_END] = { "open", "close" }; +static char *fd_id_names[NETDATA_FD_SYSCALL_END] = { "do_sys_open", "__close_fd" }; + +static netdata_syscall_stat_t fd_aggregated_data[NETDATA_FD_SYSCALL_END]; +static netdata_publish_syscall_t fd_publish_aggregated[NETDATA_FD_SYSCALL_END]; + +static ebpf_local_maps_t fd_maps[] = {{.name = "tbl_fd_pid", .internal_input = ND_EBPF_DEFAULT_PID_SIZE, + .user_input = 0, + .type = NETDATA_EBPF_MAP_RESIZABLE | NETDATA_EBPF_MAP_PID, + .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}, + {.name = "tbl_fd_global", .internal_input = NETDATA_KEY_END_VECTOR, + .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC, + .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}, + {.name = "fd_ctrl", .internal_input = NETDATA_CONTROLLER_END, + .user_input = 0, + .type = NETDATA_EBPF_MAP_CONTROLLER, + .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}, + {.name = NULL, .internal_input = 0, .user_input = 0, + .type = NETDATA_EBPF_MAP_CONTROLLER, + .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}}; + + +struct config fd_config = { .first_section = NULL, .last_section = NULL, .mutex = NETDATA_MUTEX_INITIALIZER, + .index = {.avl_tree = { .root = NULL, .compar = appconfig_section_compare }, + .rwlock = AVL_LOCK_INITIALIZER } }; + +static struct bpf_link **probe_links = NULL; +static struct bpf_object *objects = NULL; + +struct netdata_static_thread fd_thread = {"FD KERNEL", NULL, NULL, 1, NULL, + NULL, NULL}; +static int read_thread_closed = 1; +static netdata_idx_t fd_hash_values[NETDATA_FD_COUNTER]; +static netdata_idx_t *fd_values = NULL; + +netdata_fd_stat_t *fd_vector = NULL; +netdata_fd_stat_t **fd_pid = NULL; + +/***************************************************************** + * + * FUNCTIONS TO CLOSE THE THREAD + * + *****************************************************************/ + +/** + * Clean PID structures + * + * Clean the allocated structures. + */ +void clean_fd_pid_structures() { + struct pid_stat *pids = root_of_pids; + while (pids) { + freez(fd_pid[pids->pid]); + + pids = pids->next; + } +} + +/** + * Clean up the main thread. + * + * @param ptr thread data. + */ +static void ebpf_fd_cleanup(void *ptr) +{ + ebpf_module_t *em = (ebpf_module_t *)ptr; + if (!em->enabled) + return; + + heartbeat_t hb; + heartbeat_init(&hb); + uint32_t tick = 2 * USEC_PER_MS; + while (!read_thread_closed) { + usec_t dt = heartbeat_next(&hb, tick); + UNUSED(dt); + } + + ebpf_cleanup_publish_syscall(fd_publish_aggregated); + freez(fd_thread.thread); + freez(fd_values); + freez(fd_vector); + + if (probe_links) { + struct bpf_program *prog; + size_t i = 0 ; + bpf_object__for_each_program(prog, objects) { + bpf_link__destroy(probe_links[i]); + i++; + } + bpf_object__close(objects); + } +} + +/***************************************************************** + * + * MAIN LOOP + * + *****************************************************************/ + +/** + * Send data to Netdata calling auxiliary functions. + * + * @param em the structure with thread information + */ +static void ebpf_fd_send_data(ebpf_module_t *em) +{ + fd_publish_aggregated[NETDATA_FD_SYSCALL_OPEN].ncall = fd_hash_values[NETDATA_KEY_CALLS_DO_SYS_OPEN]; + fd_publish_aggregated[NETDATA_FD_SYSCALL_OPEN].nerr = fd_hash_values[NETDATA_KEY_ERROR_DO_SYS_OPEN]; + + fd_publish_aggregated[NETDATA_FD_SYSCALL_CLOSE].ncall = fd_hash_values[NETDATA_KEY_CALLS_CLOSE_FD]; + fd_publish_aggregated[NETDATA_FD_SYSCALL_CLOSE].nerr = fd_hash_values[NETDATA_KEY_ERROR_CLOSE_FD]; + + write_count_chart(NETDATA_FILE_OPEN_CLOSE_COUNT, NETDATA_FILESYSTEM_FAMILY, fd_publish_aggregated, + NETDATA_FD_SYSCALL_END); + + if (em->mode < MODE_ENTRY) { + write_err_chart(NETDATA_FILE_OPEN_ERR_COUNT, NETDATA_FILESYSTEM_FAMILY, + fd_publish_aggregated, NETDATA_FD_SYSCALL_END); + } +} + +/** + * Read global counter + * + * Read the table with number of calls for all functions + */ +static void read_global_table() +{ + uint32_t idx; + netdata_idx_t *val = fd_hash_values; + netdata_idx_t *stored = fd_values; + int fd = fd_maps[NETDATA_FD_GLOBAL_STATS].map_fd; + + for (idx = NETDATA_KEY_CALLS_DO_SYS_OPEN; idx < NETDATA_FD_COUNTER; idx++) { + if (!bpf_map_lookup_elem(fd, &idx, stored)) { + int i; + int end = ebpf_nprocs; + netdata_idx_t total = 0; + for (i = 0; i < end; i++) + total += stored[i]; + + val[idx] = total; + } + } +} + +/** + * File descriptor read hash + * + * This is the thread callback. + * This thread is necessary, because we cannot freeze the whole plugin to read the data. + * + * @param ptr It is a NULL value for this thread. + * + * @return It always returns NULL. + */ +void *ebpf_fd_read_hash(void *ptr) +{ + read_thread_closed = 0; + + heartbeat_t hb; + heartbeat_init(&hb); + + ebpf_module_t *em = (ebpf_module_t *)ptr; + usec_t step = NETDATA_FD_SLEEP_MS * em->update_every; + while (!close_ebpf_plugin) { + usec_t dt = heartbeat_next(&hb, step); + (void)dt; + + read_global_table(); + } + + read_thread_closed = 1; + return NULL; +} + +/** + * Apps Accumulator + * + * Sum all values read from kernel and store in the first address. + * + * @param out the vector with read values. + */ +static void fd_apps_accumulator(netdata_fd_stat_t *out) +{ + int i, end = (running_on_kernel >= NETDATA_KERNEL_V4_15) ? ebpf_nprocs : 1; + netdata_fd_stat_t *total = &out[0]; + for (i = 1; i < end; i++) { + netdata_fd_stat_t *w = &out[i]; + total->open_call += w->open_call; + total->close_call += w->close_call; + total->open_err += w->open_err; + total->close_err += w->close_err; + } +} + +/** + * Fill PID + * + * Fill PID structures + * + * @param current_pid pid that we are collecting data + * @param out values read from hash tables; + */ +static void fd_fill_pid(uint32_t current_pid, netdata_fd_stat_t *publish) +{ + netdata_fd_stat_t *curr = fd_pid[current_pid]; + if (!curr) { + curr = callocz(1, sizeof(netdata_fd_stat_t)); + fd_pid[current_pid] = curr; + } + + memcpy(curr, &publish[0], sizeof(netdata_fd_stat_t)); +} + +/** + * Read APPS table + * + * Read the apps table and store data inside the structure. + */ +static void read_apps_table() +{ + netdata_fd_stat_t *fv = fd_vector; + uint32_t key; + struct pid_stat *pids = root_of_pids; + int fd = fd_maps[NETDATA_FD_PID_STATS].map_fd; + size_t length = sizeof(netdata_fd_stat_t) * ebpf_nprocs; + while (pids) { + key = pids->pid; + + if (bpf_map_lookup_elem(fd, &key, fv)) { + pids = pids->next; + continue; + } + + fd_apps_accumulator(fv); + + fd_fill_pid(key, fv); + + // We are cleaning to avoid passing data read from one process to other. + memset(fv, 0, length); + + pids = pids->next; + } +} + +/** + * Update cgroup + * + * Update cgroup data based in + */ +static void ebpf_update_fd_cgroup() +{ + ebpf_cgroup_target_t *ect ; + netdata_fd_stat_t *fv = fd_vector; + int fd = fd_maps[NETDATA_FD_PID_STATS].map_fd; + size_t length = sizeof(netdata_fd_stat_t) * ebpf_nprocs; + + pthread_mutex_lock(&mutex_cgroup_shm); + for (ect = ebpf_cgroup_pids; ect; ect = ect->next) { + struct pid_on_target2 *pids; + for (pids = ect->pids; pids; pids = pids->next) { + int pid = pids->pid; + netdata_fd_stat_t *out = &pids->fd; + if (likely(fd_pid) && fd_pid[pid]) { + netdata_fd_stat_t *in = fd_pid[pid]; + + memcpy(out, in, sizeof(netdata_fd_stat_t)); + } else { + memset(fv, 0, length); + if (!bpf_map_lookup_elem(fd, &pid, fv)) { + fd_apps_accumulator(fv); + + memcpy(out, fv, sizeof(netdata_fd_stat_t)); + } + } + } + } + pthread_mutex_unlock(&mutex_cgroup_shm); +} + +/** + * Sum PIDs + * + * Sum values for all targets. + * + * @param fd the output + * @param root list of pids + */ +static void ebpf_fd_sum_pids(netdata_fd_stat_t *fd, struct pid_on_target *root) +{ + uint32_t open_call = 0; + uint32_t close_call = 0; + uint32_t open_err = 0; + uint32_t close_err = 0; + + while (root) { + int32_t pid = root->pid; + netdata_fd_stat_t *w = fd_pid[pid]; + if (w) { + open_call += w->open_call; + close_call += w->close_call; + open_err += w->open_err; + close_err += w->close_err; + } + + root = root->next; + } + + // These conditions were added, because we are using incremental algorithm + fd->open_call = (open_call >= fd->open_call) ? open_call : fd->open_call; + fd->close_call = (close_call >= fd->close_call) ? close_call : fd->close_call; + fd->open_err = (open_err >= fd->open_err) ? open_err : fd->open_err; + fd->close_err = (close_err >= fd->close_err) ? close_err : fd->close_err; +} + +/** + * Send data to Netdata calling auxiliary functions. + * + * @param em the structure with thread information + * @param root the target list. +*/ +void ebpf_fd_send_apps_data(ebpf_module_t *em, struct target *root) +{ + struct target *w; + for (w = root; w; w = w->next) { + if (unlikely(w->exposed && w->processes)) { + ebpf_fd_sum_pids(&w->fd, w->root_pid); + } + } + + write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_FILE_OPEN); + for (w = root; w; w = w->next) { + if (unlikely(w->exposed && w->processes)) { + write_chart_dimension(w->name, w->fd.open_call); + } + } + write_end_chart(); + + if (em->mode < MODE_ENTRY) { + write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_FILE_OPEN_ERROR); + for (w = root; w; w = w->next) { + if (unlikely(w->exposed && w->processes)) { + write_chart_dimension(w->name, w->fd.open_err); + } + } + write_end_chart(); + } + + write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_FILE_CLOSED); + for (w = root; w; w = w->next) { + if (unlikely(w->exposed && w->processes)) { + write_chart_dimension(w->name, w->fd.close_call); + } + } + write_end_chart(); + + if (em->mode < MODE_ENTRY) { + write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_FILE_CLOSE_ERROR); + for (w = root; w; w = w->next) { + if (unlikely(w->exposed && w->processes)) { + write_chart_dimension(w->name, w->fd.close_err); + } + } + write_end_chart(); + } +} + +/** + * Sum PIDs + * + * Sum values for all targets. + * + * @param fd structure used to store data + * @param pids input data + */ +static void ebpf_fd_sum_cgroup_pids(netdata_fd_stat_t *fd, struct pid_on_target2 *pids) +{ + netdata_fd_stat_t accumulator; + memset(&accumulator, 0, sizeof(accumulator)); + + while (pids) { + netdata_fd_stat_t *w = &pids->fd; + + accumulator.open_err += w->open_err; + accumulator.open_call += w->open_call; + accumulator.close_call += w->close_call; + accumulator.close_err += w->close_err; + + pids = pids->next; + } + + fd->open_call = (accumulator.open_call >= fd->open_call) ? accumulator.open_call : fd->open_call; + fd->open_err = (accumulator.open_err >= fd->open_err) ? accumulator.open_err : fd->open_err; + fd->close_call = (accumulator.close_call >= fd->close_call) ? accumulator.close_call : fd->close_call; + fd->close_err = (accumulator.close_err >= fd->close_err) ? accumulator.close_err : fd->close_err; +} + +/** + * Create specific file descriptor charts + * + * Create charts for cgroup/application. + * + * @param type the chart type. + * @param em the main thread structure. + */ +static void ebpf_create_specific_fd_charts(char *type, ebpf_module_t *em) +{ + ebpf_create_chart(type, NETDATA_SYSCALL_APPS_FILE_OPEN, "Number of open files", + EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_FILE_CGROUP_GROUP, + NETDATA_CGROUP_FD_OPEN_CONTEXT, NETDATA_EBPF_CHART_TYPE_LINE, + NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5400, + ebpf_create_global_dimension, + &fd_publish_aggregated[NETDATA_FD_SYSCALL_OPEN], + 1, em->update_every, NETDATA_EBPF_MODULE_NAME_SWAP); + + if (em->mode < MODE_ENTRY) { + ebpf_create_chart(type, NETDATA_SYSCALL_APPS_FILE_OPEN_ERROR, "Fails to open files", + EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_FILE_CGROUP_GROUP, + NETDATA_CGROUP_FD_OPEN_ERR_CONTEXT, NETDATA_EBPF_CHART_TYPE_LINE, + NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5401, + ebpf_create_global_dimension, + &fd_publish_aggregated[NETDATA_FD_SYSCALL_OPEN], + 1, em->update_every, + NETDATA_EBPF_MODULE_NAME_SWAP); + } + + ebpf_create_chart(type, NETDATA_SYSCALL_APPS_FILE_CLOSED, "Files closed", + EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_FILE_CGROUP_GROUP, + NETDATA_CGROUP_FD_CLOSE_CONTEXT, NETDATA_EBPF_CHART_TYPE_LINE, + NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5402, + ebpf_create_global_dimension, + &fd_publish_aggregated[NETDATA_FD_SYSCALL_CLOSE], + 1, em->update_every, NETDATA_EBPF_MODULE_NAME_SWAP); + + if (em->mode < MODE_ENTRY) { + ebpf_create_chart(type, NETDATA_SYSCALL_APPS_FILE_CLOSE_ERROR, "Fails to close files", + EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_FILE_CGROUP_GROUP, + NETDATA_CGROUP_FD_CLOSE_ERR_CONTEXT, NETDATA_EBPF_CHART_TYPE_LINE, + NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5403, + ebpf_create_global_dimension, + &fd_publish_aggregated[NETDATA_FD_SYSCALL_CLOSE], + 1, em->update_every, + NETDATA_EBPF_MODULE_NAME_SWAP); + } +} + +/** + * Obsolete specific file descriptor charts + * + * Obsolete charts for cgroup/application. + * + * @param type the chart type. + * @param em the main thread structure. + */ +static void ebpf_obsolete_specific_fd_charts(char *type, ebpf_module_t *em) +{ + ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_FILE_OPEN, "Number of open files", + EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_FILE_GROUP, + NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_FD_OPEN_CONTEXT, + NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5400, em->update_every); + + if (em->mode < MODE_ENTRY) { + ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_FILE_OPEN_ERROR, "Fails to open files", + EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_FILE_GROUP, + NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_FD_OPEN_ERR_CONTEXT, + NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5401, em->update_every); + } + + ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_FILE_CLOSED, "Files closed", + EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_FILE_GROUP, + NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_FD_CLOSE_CONTEXT, + NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5402, em->update_every); + + if (em->mode < MODE_ENTRY) { + ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_FILE_CLOSE_ERROR, "Fails to close files", + EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_FILE_GROUP, + NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_FD_CLOSE_ERR_CONTEXT, + NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5403, em->update_every); + } +} + +/* + * Send specific file descriptor data + * + * Send data for specific cgroup/apps. + * + * @param type chart type + * @param values structure with values that will be sent to netdata + */ +static void ebpf_send_specific_fd_data(char *type, netdata_fd_stat_t *values, ebpf_module_t *em) +{ + write_begin_chart(type, NETDATA_SYSCALL_APPS_FILE_OPEN); + write_chart_dimension(fd_publish_aggregated[NETDATA_FD_SYSCALL_OPEN].name, (long long)values->open_call); + write_end_chart(); + + if (em->mode < MODE_ENTRY) { + write_begin_chart(type, NETDATA_SYSCALL_APPS_FILE_OPEN_ERROR); + write_chart_dimension(fd_publish_aggregated[NETDATA_FD_SYSCALL_OPEN].name, (long long)values->open_err); + write_end_chart(); + } + + write_begin_chart(type, NETDATA_SYSCALL_APPS_FILE_CLOSED); + write_chart_dimension(fd_publish_aggregated[NETDATA_FD_SYSCALL_CLOSE].name, (long long)values->close_call); + write_end_chart(); + + if (em->mode < MODE_ENTRY) { + write_begin_chart(type, NETDATA_SYSCALL_APPS_FILE_CLOSE_ERROR); + write_chart_dimension(fd_publish_aggregated[NETDATA_FD_SYSCALL_CLOSE].name, (long long)values->close_err); + write_end_chart(); + } +} + +/** + * Create systemd file descriptor charts + * + * Create charts when systemd is enabled + * + * @param em the main collector structure + **/ +static void ebpf_create_systemd_fd_charts(ebpf_module_t *em) +{ + ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_FILE_OPEN, "Number of open files", + EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_FILE_CGROUP_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, 20061, + ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_FD_OPEN_CONTEXT, + NETDATA_EBPF_MODULE_NAME_PROCESS, em->update_every); + + if (em->mode < MODE_ENTRY) { + ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_FILE_OPEN_ERROR, "Fails to open files", + EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_FILE_CGROUP_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, 20062, + ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_FD_OPEN_ERR_CONTEXT, + NETDATA_EBPF_MODULE_NAME_PROCESS, em->update_every); + } + + ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_FILE_CLOSED, "Files closed", + EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_FILE_CGROUP_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, 20063, + ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_FD_CLOSE_CONTEXT, + NETDATA_EBPF_MODULE_NAME_PROCESS, em->update_every); + + if (em->mode < MODE_ENTRY) { + ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_FILE_CLOSE_ERROR, "Fails to close files", + EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_FILE_CGROUP_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, 20064, + ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_FD_CLOSE_ERR_CONTEXT, + NETDATA_EBPF_MODULE_NAME_PROCESS, em->update_every); + } +} + +/** + * Send Systemd charts + * + * Send collected data to Netdata. + * + * @param em the main collector structure + * + * @return It returns the status for chart creation, if it is necessary to remove a specific dimension zero is returned + * otherwise function returns 1 to avoid chart recreation + */ +static int ebpf_send_systemd_fd_charts(ebpf_module_t *em) +{ + int ret = 1; + ebpf_cgroup_target_t *ect; + write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_FILE_OPEN); + for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { + if (unlikely(ect->systemd) && unlikely(ect->updated)) { + write_chart_dimension(ect->name, ect->publish_systemd_fd.open_call); + } else + ret = 0; + } + write_end_chart(); + + if (em->mode < MODE_ENTRY) { + write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_FILE_OPEN_ERROR); + for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { + if (unlikely(ect->systemd) && unlikely(ect->updated)) { + write_chart_dimension(ect->name, ect->publish_systemd_fd.open_err); + } + } + write_end_chart(); + } + + write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_FILE_CLOSED); + for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { + if (unlikely(ect->systemd) && unlikely(ect->updated)) { + write_chart_dimension(ect->name, ect->publish_systemd_fd.close_call); + } + } + write_end_chart(); + + if (em->mode < MODE_ENTRY) { + write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_FILE_CLOSE_ERROR); + for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { + if (unlikely(ect->systemd) && unlikely(ect->updated)) { + write_chart_dimension(ect->name, ect->publish_systemd_fd.close_err); + } + } + write_end_chart(); + } + + return ret; +} + +/** + * Send data to Netdata calling auxiliary functions. + * + * @param em the main collector structure +*/ +static void ebpf_fd_send_cgroup_data(ebpf_module_t *em) +{ + if (!ebpf_cgroup_pids) + return; + + pthread_mutex_lock(&mutex_cgroup_shm); + ebpf_cgroup_target_t *ect; + for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { + ebpf_fd_sum_cgroup_pids(&ect->publish_systemd_fd, ect->pids); + } + + int has_systemd = shm_ebpf_cgroup.header->systemd_enabled; + if (has_systemd) { + static int systemd_charts = 0; + if (!systemd_charts) { + ebpf_create_systemd_fd_charts(em); + systemd_charts = 1; + } + + systemd_charts = ebpf_send_systemd_fd_charts(em); + } + + for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { + if (ect->systemd) + continue; + + if (!(ect->flags & NETDATA_EBPF_CGROUP_HAS_FD_CHART) && ect->updated) { + ebpf_create_specific_fd_charts(ect->name, em); + ect->flags |= NETDATA_EBPF_CGROUP_HAS_FD_CHART; + } + + if (ect->flags & NETDATA_EBPF_CGROUP_HAS_FD_CHART ) { + if (ect->updated) { + ebpf_send_specific_fd_data(ect->name, &ect->publish_systemd_fd, em); + } else { + ebpf_obsolete_specific_fd_charts(ect->name, em); + ect->flags &= ~NETDATA_EBPF_CGROUP_HAS_FD_CHART; + } + } + } + + pthread_mutex_unlock(&mutex_cgroup_shm); +} + +/** +* Main loop for this collector. +*/ +static void fd_collector(ebpf_module_t *em) +{ + fd_thread.thread = mallocz(sizeof(netdata_thread_t)); + fd_thread.start_routine = ebpf_fd_read_hash; + + netdata_thread_create(fd_thread.thread, fd_thread.name, NETDATA_THREAD_OPTION_JOINABLE, + ebpf_fd_read_hash, em); + + int apps = em->apps_charts; + int cgroups = em->cgroup_charts; + int update_every = em->update_every; + int counter = update_every - 1; + while (!close_ebpf_plugin) { + pthread_mutex_lock(&collect_data_mutex); + pthread_cond_wait(&collect_data_cond_var, &collect_data_mutex); + + if (++counter == update_every) { + counter = 0; + if (apps) + read_apps_table(); + + if (cgroups) + ebpf_update_fd_cgroup(); + + pthread_mutex_lock(&lock); + + ebpf_fd_send_data(em); + + if (apps) + ebpf_fd_send_apps_data(em, apps_groups_root_target); + + if (cgroups) + ebpf_fd_send_cgroup_data(em); + + pthread_mutex_unlock(&lock); + } + + pthread_mutex_unlock(&collect_data_mutex); + } +} + +/***************************************************************** + * + * CREATE CHARTS + * + *****************************************************************/ + +/** + * Create apps charts + * + * Call ebpf_create_chart to create the charts on apps submenu. + * + * @param em a pointer to the structure with the default values. + */ +void ebpf_fd_create_apps_charts(struct ebpf_module *em, void *ptr) +{ + struct target *root = ptr; + ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_FILE_OPEN, + "Number of open files", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_APPS_FILE_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + 20061, + ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], + root, em->update_every, NETDATA_EBPF_MODULE_NAME_PROCESS); + + if (em->mode < MODE_ENTRY) { + ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_FILE_OPEN_ERROR, + "Fails to open files", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_APPS_FILE_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + 20062, + ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], + root, em->update_every, NETDATA_EBPF_MODULE_NAME_PROCESS); + } + + ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_FILE_CLOSED, + "Files closed", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_APPS_FILE_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + 20063, + ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], + root, em->update_every, NETDATA_EBPF_MODULE_NAME_PROCESS); + + if (em->mode < MODE_ENTRY) { + ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_FILE_CLOSE_ERROR, + "Fails to close files", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_APPS_FILE_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + 20064, + ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], + root, em->update_every, NETDATA_EBPF_MODULE_NAME_PROCESS); + } +} + +/** + * Create global charts + * + * Call ebpf_create_chart to create the charts for the collector. + * + * @param em a pointer to the structure with the default values. + */ +static void ebpf_create_fd_global_charts(ebpf_module_t *em) +{ + ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY, + NETDATA_FILE_OPEN_CLOSE_COUNT, + "Open and close calls", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_FILE_GROUP, + NULL, + NETDATA_EBPF_CHART_TYPE_LINE, + NETDATA_CHART_PRIO_EBPF_FD_CHARTS, + ebpf_create_global_dimension, + fd_publish_aggregated, + NETDATA_FD_SYSCALL_END, + em->update_every, NETDATA_EBPF_MODULE_NAME_FD); + + if (em->mode < MODE_ENTRY) { + ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY, + NETDATA_FILE_OPEN_ERR_COUNT, + "Open fails", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_FILE_GROUP, + NULL, + NETDATA_EBPF_CHART_TYPE_LINE, + NETDATA_CHART_PRIO_EBPF_FD_CHARTS + 1, + ebpf_create_global_dimension, + fd_publish_aggregated, + NETDATA_FD_SYSCALL_END, + em->update_every, NETDATA_EBPF_MODULE_NAME_FD); + } +} + +/***************************************************************** + * + * MAIN THREAD + * + *****************************************************************/ + +/** + * Allocate vectors used with this thread. + * + * We are not testing the return, because callocz does this and shutdown the software + * case it was not possible to allocate. + * + * @param apps is apps enabled? + */ +static void ebpf_fd_allocate_global_vectors(int apps) +{ + if (apps) + fd_pid = callocz((size_t)pid_max, sizeof(netdata_fd_stat_t *)); + + fd_vector = callocz((size_t)ebpf_nprocs, sizeof(netdata_fd_stat_t)); + + fd_values = callocz((size_t)ebpf_nprocs, sizeof(netdata_idx_t)); +} + +/** + * Directory Cache thread + * + * Thread used to make dcstat thread + * + * @param ptr a pointer to `struct ebpf_module` + * + * @return It always returns NULL + */ +void *ebpf_fd_thread(void *ptr) +{ + netdata_thread_cleanup_push(ebpf_fd_cleanup, ptr); + + ebpf_module_t *em = (ebpf_module_t *)ptr; + em->maps = fd_maps; + + if (!em->enabled) + goto endfd; + + ebpf_fd_allocate_global_vectors(em->apps_charts); + + probe_links = ebpf_load_program(ebpf_plugin_dir, em, kernel_string, &objects); + if (!probe_links) { + goto endfd; + } + + int algorithms[NETDATA_FD_SYSCALL_END] = { + NETDATA_EBPF_INCREMENTAL_IDX, NETDATA_EBPF_INCREMENTAL_IDX + }; + + ebpf_global_labels(fd_aggregated_data, fd_publish_aggregated, fd_dimension_names, fd_id_names, + algorithms, NETDATA_FD_SYSCALL_END); + + pthread_mutex_lock(&lock); + ebpf_create_fd_global_charts(em); + pthread_mutex_unlock(&lock); + + fd_collector(em); + +endfd: + netdata_thread_cleanup_pop(1); + return NULL; +} diff --git a/collectors/ebpf.plugin/ebpf_fd.h b/collectors/ebpf.plugin/ebpf_fd.h new file mode 100644 index 000000000..851e040e5 --- /dev/null +++ b/collectors/ebpf.plugin/ebpf_fd.h @@ -0,0 +1,85 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_EBPF_FD_H +#define NETDATA_EBPF_FD_H 1 + +// Module name +#define NETDATA_EBPF_MODULE_NAME_FD "filedescriptor" + +#define NETDATA_FD_SLEEP_MS 850000ULL + +// Menu group +#define NETDATA_FILE_GROUP "file_access" + +// Global chart name +#define NETDATA_FILE_OPEN_CLOSE_COUNT "file_descriptor" +#define NETDATA_FILE_OPEN_ERR_COUNT "file_error" + +// Charts created on Apps submenu +#define NETDATA_SYSCALL_APPS_FILE_OPEN "file_open" +#define NETDATA_SYSCALL_APPS_FILE_CLOSED "file_closed" +#define NETDATA_SYSCALL_APPS_FILE_OPEN_ERROR "file_open_error" +#define NETDATA_SYSCALL_APPS_FILE_CLOSE_ERROR "file_close_error" + +// Process configuration name +#define NETDATA_FD_CONFIG_FILE "fd.conf" + +// Contexts +#define NETDATA_CGROUP_FD_OPEN_CONTEXT "cgroup.fd_open" +#define NETDATA_CGROUP_FD_OPEN_ERR_CONTEXT "cgroup.fd_open_error" +#define NETDATA_CGROUP_FD_CLOSE_CONTEXT "cgroup.fd_close" +#define NETDATA_CGROUP_FD_CLOSE_ERR_CONTEXT "cgroup.fd_close_error" + +#define NETDATA_SYSTEMD_FD_OPEN_CONTEXT "services.fd_open" +#define NETDATA_SYSTEMD_FD_OPEN_ERR_CONTEXT "services.fd_open_error" +#define NETDATA_SYSTEMD_FD_CLOSE_CONTEXT "services.fd_close" +#define NETDATA_SYSTEMD_FD_CLOSE_ERR_CONTEXT "services.fd_close_error" + +typedef struct netdata_fd_stat { + uint64_t pid_tgid; // Unique identifier + uint32_t pid; // Process ID + + uint32_t open_call; // Open syscalls (open and openat) + uint32_t close_call; // Close syscall (close) + + // Errors + uint32_t open_err; + uint32_t close_err; +} netdata_fd_stat_t; + +enum fd_tables { + NETDATA_FD_PID_STATS, + NETDATA_FD_GLOBAL_STATS, + + // Keep this as last and don't skip numbers as it is used as element counter + NETDATA_FD_CONTROLLER +}; + +enum fd_counters { + NETDATA_KEY_CALLS_DO_SYS_OPEN, + NETDATA_KEY_ERROR_DO_SYS_OPEN, + + NETDATA_KEY_CALLS_CLOSE_FD, + NETDATA_KEY_ERROR_CLOSE_FD, + + // Keep this as last and don't skip numbers as it is used as element counter + NETDATA_FD_COUNTER +}; + +enum fd_syscalls { + NETDATA_FD_SYSCALL_OPEN, + NETDATA_FD_SYSCALL_CLOSE, + + // Do not insert nothing after this value + NETDATA_FD_SYSCALL_END +}; + + +extern void *ebpf_fd_thread(void *ptr); +extern void ebpf_fd_create_apps_charts(struct ebpf_module *em, void *ptr); +extern struct config fd_config; +extern netdata_fd_stat_t **fd_pid; +extern void clean_fd_pid_structures(); + +#endif /* NETDATA_EBPF_FD_H */ + diff --git a/collectors/ebpf.plugin/ebpf_filesystem.c b/collectors/ebpf.plugin/ebpf_filesystem.c new file mode 100644 index 000000000..ad2c9eff0 --- /dev/null +++ b/collectors/ebpf.plugin/ebpf_filesystem.c @@ -0,0 +1,661 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "ebpf_filesystem.h" + +struct config fs_config = { .first_section = NULL, + .last_section = NULL, + .mutex = NETDATA_MUTEX_INITIALIZER, + .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare }, + .rwlock = AVL_LOCK_INITIALIZER } }; + +static ebpf_local_maps_t fs_maps[] = {{.name = "tbl_ext4", .internal_input = NETDATA_KEY_CALLS_SYNC, + .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC, + .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}, + {.name = "tbl_xfs", .internal_input = NETDATA_KEY_CALLS_SYNC, + .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC, + .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}, + {.name = "tbl_nfs", .internal_input = NETDATA_KEY_CALLS_SYNC, + .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC, + .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}, + {.name = "tbl_zfs", .internal_input = NETDATA_KEY_CALLS_SYNC, + .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC, + .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}, + {.name = "tbl_btrfs", .internal_input = NETDATA_KEY_CALLS_SYNC, + .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC, + .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}, + {.name = "tbl_ext_addr", .internal_input = 1, + .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC, + .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}, + {.name = NULL, .internal_input = 0, .user_input = 0, + .type = NETDATA_EBPF_MAP_CONTROLLER, + .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}}; + +ebpf_filesystem_partitions_t localfs[] = + {{.filesystem = "ext4", + .optional_filesystem = NULL, + .family = "ext4", + .objects = NULL, + .probe_links = NULL, + .flags = NETDATA_FILESYSTEM_FLAG_NO_PARTITION, + .enabled = CONFIG_BOOLEAN_YES, + .addresses = {.function = NULL, .addr = 0}}, + {.filesystem = "xfs", + .optional_filesystem = NULL, + .family = "xfs", + .objects = NULL, + .probe_links = NULL, + .flags = NETDATA_FILESYSTEM_FLAG_NO_PARTITION, + .enabled = CONFIG_BOOLEAN_YES, + .addresses = {.function = NULL, .addr = 0}}, + {.filesystem = "nfs", + .optional_filesystem = "nfs4", + .family = "nfs", + .objects = NULL, + .probe_links = NULL, + .flags = NETDATA_FILESYSTEM_ATTR_CHARTS, + .enabled = CONFIG_BOOLEAN_YES, + .addresses = {.function = NULL, .addr = 0}}, + {.filesystem = "zfs", + .optional_filesystem = NULL, + .family = "zfs", + .objects = NULL, + .probe_links = NULL, + .flags = NETDATA_FILESYSTEM_FLAG_NO_PARTITION, + .enabled = CONFIG_BOOLEAN_YES, + .addresses = {.function = NULL, .addr = 0}}, + {.filesystem = "btrfs", + .optional_filesystem = NULL, + .family = "btrfs", + .objects = NULL, + .probe_links = NULL, + .flags = NETDATA_FILESYSTEM_FILL_ADDRESS_TABLE, + .enabled = CONFIG_BOOLEAN_YES, + .addresses = {.function = "btrfs_file_operations", .addr = 0}}, + {.filesystem = NULL, + .optional_filesystem = NULL, + .family = NULL, + .objects = NULL, + .probe_links = NULL, + .flags = NETDATA_FILESYSTEM_FLAG_NO_PARTITION, + .enabled = CONFIG_BOOLEAN_YES, + .addresses = {.function = NULL, .addr = 0}}}; + +struct netdata_static_thread filesystem_threads = {"EBPF FS READ", + NULL, NULL, 1, NULL, + NULL, NULL }; + +static int read_thread_closed = 1; +static netdata_syscall_stat_t filesystem_aggregated_data[NETDATA_EBPF_HIST_MAX_BINS]; +static netdata_publish_syscall_t filesystem_publish_aggregated[NETDATA_EBPF_HIST_MAX_BINS]; + +char **dimensions = NULL; +static netdata_idx_t *filesystem_hash_values = NULL; + +/***************************************************************** + * + * COMMON FUNCTIONS + * + *****************************************************************/ + +/** + * Create Filesystem chart + * + * Create latency charts + * + * @param update_every value to overwrite the update frequency set by the server. + */ +static void ebpf_obsolete_fs_charts(int update_every) +{ + int i; + uint32_t test = NETDATA_FILESYSTEM_FLAG_CHART_CREATED | NETDATA_FILESYSTEM_REMOVE_CHARTS; + for (i = 0; localfs[i].filesystem; i++) { + ebpf_filesystem_partitions_t *efp = &localfs[i]; + uint32_t flags = efp->flags; + if ((flags & test) == test) { + flags &= ~NETDATA_FILESYSTEM_FLAG_CHART_CREATED; + + ebpf_write_chart_obsolete(NETDATA_FILESYSTEM_FAMILY, efp->hread.name, + efp->hread.title, + EBPF_COMMON_DIMENSION_CALL, efp->family_name, + NULL, NETDATA_EBPF_CHART_TYPE_STACKED, efp->hread.order, update_every); + + ebpf_write_chart_obsolete(NETDATA_FILESYSTEM_FAMILY, efp->hwrite.name, + efp->hwrite.title, + EBPF_COMMON_DIMENSION_CALL, efp->family_name, + NULL, NETDATA_EBPF_CHART_TYPE_STACKED, efp->hwrite.order, update_every); + + ebpf_write_chart_obsolete(NETDATA_FILESYSTEM_FAMILY, efp->hopen.name, efp->hopen.title, + EBPF_COMMON_DIMENSION_CALL, efp->family_name, + NULL, NETDATA_EBPF_CHART_TYPE_STACKED, efp->hopen.order, update_every); + + ebpf_write_chart_obsolete(NETDATA_FILESYSTEM_FAMILY, efp->hadditional.name, efp->hadditional.title, + EBPF_COMMON_DIMENSION_CALL, efp->family_name, + NULL, NETDATA_EBPF_CHART_TYPE_STACKED, efp->hadditional.order, + update_every); + } + efp->flags = flags; + } +} + +/** + * Create Filesystem chart + * + * Create latency charts + * + * @param update_every value to overwrite the update frequency set by the server. + */ +static void ebpf_create_fs_charts(int update_every) +{ + static int order = NETDATA_CHART_PRIO_EBPF_FILESYSTEM_CHARTS; + char chart_name[64], title[256], family[64]; + int i; + uint32_t test = NETDATA_FILESYSTEM_FLAG_CHART_CREATED|NETDATA_FILESYSTEM_REMOVE_CHARTS; + for (i = 0; localfs[i].filesystem; i++) { + ebpf_filesystem_partitions_t *efp = &localfs[i]; + uint32_t flags = efp->flags; + if (flags & NETDATA_FILESYSTEM_FLAG_HAS_PARTITION && !(flags & test)) { + snprintfz(title, 255, "%s latency for each read request.", efp->filesystem); + snprintfz(family, 63, "%s_latency", efp->family); + snprintfz(chart_name, 63, "%s_read_latency", efp->filesystem); + efp->hread.name = strdupz(chart_name); + efp->hread.title = strdupz(title); + efp->hread.order = order; + efp->family_name = strdupz(family); + + ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY, efp->hread.name, + title, + EBPF_COMMON_DIMENSION_CALL, family, + NULL, NETDATA_EBPF_CHART_TYPE_STACKED, order, ebpf_create_global_dimension, + filesystem_publish_aggregated, NETDATA_EBPF_HIST_MAX_BINS, + update_every, NETDATA_EBPF_MODULE_NAME_FILESYSTEM); + order++; + + snprintfz(title, 255, "%s latency for each write request.", efp->filesystem); + snprintfz(chart_name, 63, "%s_write_latency", efp->filesystem); + efp->hwrite.name = strdupz(chart_name); + efp->hwrite.title = strdupz(title); + efp->hwrite.order = order; + ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY, efp->hwrite.name, + title, + EBPF_COMMON_DIMENSION_CALL, family, + NULL, NETDATA_EBPF_CHART_TYPE_STACKED, order, ebpf_create_global_dimension, + filesystem_publish_aggregated, NETDATA_EBPF_HIST_MAX_BINS, + update_every, NETDATA_EBPF_MODULE_NAME_FILESYSTEM); + order++; + + snprintfz(title, 255, "%s latency for each open request.", efp->filesystem); + snprintfz(chart_name, 63, "%s_open_latency", efp->filesystem); + efp->hopen.name = strdupz(chart_name); + efp->hopen.title = strdupz(title); + efp->hopen.order = order; + ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY, efp->hopen.name, + title, + EBPF_COMMON_DIMENSION_CALL, family, + NULL, NETDATA_EBPF_CHART_TYPE_STACKED, order, ebpf_create_global_dimension, + filesystem_publish_aggregated, NETDATA_EBPF_HIST_MAX_BINS, + update_every, NETDATA_EBPF_MODULE_NAME_FILESYSTEM); + order++; + + char *type = (efp->flags & NETDATA_FILESYSTEM_ATTR_CHARTS) ? "attribute" : "sync"; + snprintfz(title, 255, "%s latency for each %s request.", efp->filesystem, type); + snprintfz(chart_name, 63, "%s_%s_latency", efp->filesystem, type); + efp->hadditional.name = strdupz(chart_name); + efp->hadditional.title = strdupz(title); + efp->hadditional.order = order; + ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY, efp->hadditional.name, title, + EBPF_COMMON_DIMENSION_CALL, family, + NULL, NETDATA_EBPF_CHART_TYPE_STACKED, order, ebpf_create_global_dimension, + filesystem_publish_aggregated, NETDATA_EBPF_HIST_MAX_BINS, + update_every, NETDATA_EBPF_MODULE_NAME_FILESYSTEM); + order++; + efp->flags |= NETDATA_FILESYSTEM_FLAG_CHART_CREATED; + } + } +} + +/** + * Initialize eBPF data + * + * @param em main thread structure. + * + * @return it returns 0 on success and -1 otherwise. + */ +int ebpf_filesystem_initialize_ebpf_data(ebpf_module_t *em) +{ + int i; + const char *saved_name = em->thread_name; + for (i = 0; localfs[i].filesystem; i++) { + ebpf_filesystem_partitions_t *efp = &localfs[i]; + if (!efp->probe_links && efp->flags & NETDATA_FILESYSTEM_LOAD_EBPF_PROGRAM) { + em->thread_name = efp->filesystem; + efp->probe_links = ebpf_load_program(ebpf_plugin_dir, em, kernel_string, &efp->objects); + if (!efp->probe_links) { + em->thread_name = saved_name; + return -1; + } + efp->flags |= NETDATA_FILESYSTEM_FLAG_HAS_PARTITION; + + // Nedeed for filesystems like btrfs + if ((efp->flags & NETDATA_FILESYSTEM_FILL_ADDRESS_TABLE) && (efp->addresses.function)) { + ebpf_load_addresses(&efp->addresses, fs_maps[i + 1].map_fd); + } + } + efp->flags &= ~NETDATA_FILESYSTEM_LOAD_EBPF_PROGRAM; + } + em->thread_name = saved_name; + + if (!dimensions) { + dimensions = ebpf_fill_histogram_dimension(NETDATA_EBPF_HIST_MAX_BINS); + + memset(filesystem_aggregated_data, 0 , NETDATA_EBPF_HIST_MAX_BINS * sizeof(netdata_syscall_stat_t)); + memset(filesystem_publish_aggregated, 0 , NETDATA_EBPF_HIST_MAX_BINS * sizeof(netdata_publish_syscall_t)); + + filesystem_hash_values = callocz(ebpf_nprocs, sizeof(netdata_idx_t)); + } + + return 0; +} + +/** + * Read Local partitions + * + * @return the total of partitions that will be monitored + */ +static int ebpf_read_local_partitions() +{ + char filename[FILENAME_MAX + 1]; + snprintfz(filename, FILENAME_MAX, "%s/proc/self/mountinfo", netdata_configured_host_prefix); + procfile *ff = procfile_open(filename, " \t", PROCFILE_FLAG_DEFAULT); + if(unlikely(!ff)) { + snprintfz(filename, FILENAME_MAX, "%s/proc/1/mountinfo", netdata_configured_host_prefix); + ff = procfile_open(filename, " \t", PROCFILE_FLAG_DEFAULT); + if(unlikely(!ff)) return 0; + } + + ff = procfile_readall(ff); + if(unlikely(!ff)) + return 0; + + int count = 0; + unsigned long l, i, lines = procfile_lines(ff); + for (i = 0; localfs[i].filesystem; i++) { + localfs[i].flags |= NETDATA_FILESYSTEM_REMOVE_CHARTS; + } + + for(l = 0; l < lines ; l++) { + // In "normal" situation the expected value is at column 7 + // When `shared` options is added to mount information, the filesystem is at column 8 + // Finally when we have systemd starting netdata, it will be at column 9 + unsigned long index = procfile_linewords(ff, l) - 3; + + char *fs = procfile_lineword(ff, l, index); + + for (i = 0; localfs[i].filesystem; i++) { + ebpf_filesystem_partitions_t *w = &localfs[i]; + if (w->enabled && (!strcmp(fs, w->filesystem) || + (w->optional_filesystem && !strcmp(fs, w->optional_filesystem)))) { + localfs[i].flags |= NETDATA_FILESYSTEM_LOAD_EBPF_PROGRAM; + localfs[i].flags &= ~NETDATA_FILESYSTEM_REMOVE_CHARTS; + count++; + break; + } + } + } + procfile_close(ff); + + return count; +} + +/** + * Update partition + * + * Update the partition structures before to plot + * + * @param em main thread structure + * + * @return 0 on success and -1 otherwise. + */ +static int ebpf_update_partitions(ebpf_module_t *em) +{ + static time_t update_every = 0; + time_t curr = now_realtime_sec(); + if (curr < update_every) + return 0; + + update_every = curr + 5 * em->update_every; + if (!ebpf_read_local_partitions()) { + em->optional = -1; + return -1; + } + + if (ebpf_filesystem_initialize_ebpf_data(em)) { + return -1; + } + + return 0; +} + +/***************************************************************** + * + * CLEANUP FUNCTIONS + * + *****************************************************************/ + +/* + * Cleanup eBPF data + */ +void ebpf_filesystem_cleanup_ebpf_data() +{ + int i; + for (i = 0; localfs[i].filesystem; i++) { + ebpf_filesystem_partitions_t *efp = &localfs[i]; + if (efp->probe_links) { + freez(efp->family_name); + + freez(efp->hread.name); + freez(efp->hread.title); + + freez(efp->hwrite.name); + freez(efp->hwrite.title); + + freez(efp->hopen.name); + freez(efp->hopen.title); + + freez(efp->hadditional.name); + freez(efp->hadditional.title); + + struct bpf_link **probe_links = efp->probe_links; + size_t j = 0 ; + struct bpf_program *prog; + bpf_object__for_each_program(prog, efp->objects) { + bpf_link__destroy(probe_links[j]); + j++; + } + bpf_object__close(efp->objects); + } + } +} + +/** + * Clean up the main thread. + * + * @param ptr thread data. + */ +static void ebpf_filesystem_cleanup(void *ptr) +{ + ebpf_module_t *em = (ebpf_module_t *)ptr; + if (!em->enabled) + return; + + heartbeat_t hb; + heartbeat_init(&hb); + uint32_t tick = 2*USEC_PER_MS; + while (!read_thread_closed) { + usec_t dt = heartbeat_next(&hb, tick); + UNUSED(dt); + } + + freez(filesystem_threads.thread); + ebpf_cleanup_publish_syscall(filesystem_publish_aggregated); + + ebpf_filesystem_cleanup_ebpf_data(); + if (dimensions) + ebpf_histogram_dimension_cleanup(dimensions, NETDATA_EBPF_HIST_MAX_BINS); + freez(filesystem_hash_values); +} + +/***************************************************************** + * + * MAIN THREAD + * + *****************************************************************/ + +/** + * Select hist + * + * Select a histogram to store data. + * + * @param efp pointer for the structure with pointers. + * @param id histogram selector + * + * @return It returns a pointer for the histogram + */ +static inline netdata_ebpf_histogram_t *select_hist(ebpf_filesystem_partitions_t *efp, uint32_t *idx, uint32_t id) +{ + if (id < NETDATA_KEY_CALLS_READ) { + *idx = id; + return &efp->hread; + } else if (id < NETDATA_KEY_CALLS_WRITE) { + *idx = id - NETDATA_KEY_CALLS_READ; + return &efp->hwrite; + } else if (id < NETDATA_KEY_CALLS_OPEN) { + *idx = id - NETDATA_KEY_CALLS_WRITE; + return &efp->hopen; + } else if (id < NETDATA_KEY_CALLS_SYNC ){ + *idx = id - NETDATA_KEY_CALLS_OPEN; + return &efp->hadditional; + } + + return NULL; +} + +/** + * Read hard disk table + * + * @param table index for the hash table + * + * Read the table with number of calls for all functions + */ +static void read_filesystem_table(ebpf_filesystem_partitions_t *efp, int fd) +{ + netdata_idx_t *values = filesystem_hash_values; + uint32_t key; + uint32_t idx; + for (key = 0; key < NETDATA_KEY_CALLS_SYNC; key++) { + netdata_ebpf_histogram_t *w = select_hist(efp, &idx, key); + if (!w) { + continue; + } + + int test = bpf_map_lookup_elem(fd, &key, values); + if (test < 0) { + continue; + } + + uint64_t total = 0; + int i; + int end = ebpf_nprocs; + for (i = 0; i < end; i++) { + total += values[i]; + } + + if (idx >= NETDATA_EBPF_HIST_MAX_BINS) + idx = NETDATA_EBPF_HIST_MAX_BINS - 1; + w->histogram[idx] = total; + } +} + +/** + * Read hard disk table + * + * @param table index for the hash table + * + * Read the table with number of calls for all functions + */ +static void read_filesystem_tables() +{ + int i; + for (i = 0; localfs[i].filesystem; i++) { + ebpf_filesystem_partitions_t *efp = &localfs[i]; + if (efp->flags & NETDATA_FILESYSTEM_FLAG_HAS_PARTITION) { + read_filesystem_table(efp, fs_maps[i].map_fd); + } + } +} + +/** + * Socket read hash + * + * This is the thread callback. + * This thread is necessary, because we cannot freeze the whole plugin to read the data on very busy socket. + * + * @param ptr It is a NULL value for this thread. + * + * @return It always returns NULL. + */ +void *ebpf_filesystem_read_hash(void *ptr) +{ + ebpf_module_t *em = (ebpf_module_t *)ptr; + read_thread_closed = 0; + + heartbeat_t hb; + heartbeat_init(&hb); + usec_t step = NETDATA_FILESYSTEM_READ_SLEEP_MS * em->update_every; + int update_every = em->update_every; + while (!close_ebpf_plugin) { + usec_t dt = heartbeat_next(&hb, step); + (void)dt; + + (void) ebpf_update_partitions(em); + ebpf_obsolete_fs_charts(update_every); + + // No more partitions, it is not necessary to read tables + if (em->optional) + continue; + + read_filesystem_tables(); + } + + read_thread_closed = 1; + return NULL; +} + +/** + * Send Hard disk data + * + * Send hard disk information to Netdata. + */ +static void ebpf_histogram_send_data() +{ + uint32_t i; + uint32_t test = NETDATA_FILESYSTEM_FLAG_HAS_PARTITION | NETDATA_FILESYSTEM_REMOVE_CHARTS; + for (i = 0; localfs[i].filesystem; i++) { + ebpf_filesystem_partitions_t *efp = &localfs[i]; + if ((efp->flags & test) == NETDATA_FILESYSTEM_FLAG_HAS_PARTITION) { + write_histogram_chart(NETDATA_FILESYSTEM_FAMILY, efp->hread.name, + efp->hread.histogram, dimensions, NETDATA_EBPF_HIST_MAX_BINS); + + write_histogram_chart(NETDATA_FILESYSTEM_FAMILY, efp->hwrite.name, + efp->hwrite.histogram, dimensions, NETDATA_EBPF_HIST_MAX_BINS); + + write_histogram_chart(NETDATA_FILESYSTEM_FAMILY, efp->hopen.name, + efp->hopen.histogram, dimensions, NETDATA_EBPF_HIST_MAX_BINS); + + write_histogram_chart(NETDATA_FILESYSTEM_FAMILY, efp->hadditional.name, + efp->hadditional.histogram, dimensions, NETDATA_EBPF_HIST_MAX_BINS); + } + } +} + +/** + * Main loop for this collector. + * + * @param em main structure for this thread + */ +static void filesystem_collector(ebpf_module_t *em) +{ + filesystem_threads.thread = mallocz(sizeof(netdata_thread_t)); + filesystem_threads.start_routine = ebpf_filesystem_read_hash; + + netdata_thread_create(filesystem_threads.thread, filesystem_threads.name, + NETDATA_THREAD_OPTION_JOINABLE, ebpf_filesystem_read_hash, em); + + int update_every = em->update_every; + int counter = update_every - 1; + while (!close_ebpf_plugin || em->optional) { + pthread_mutex_lock(&collect_data_mutex); + pthread_cond_wait(&collect_data_cond_var, &collect_data_mutex); + + if (++counter == update_every) { + counter = 0; + pthread_mutex_lock(&lock); + + ebpf_create_fs_charts(update_every); + ebpf_histogram_send_data(); + + pthread_mutex_unlock(&lock); + } + + pthread_mutex_unlock(&collect_data_mutex); + } +} + +/***************************************************************** + * + * ENTRY THREAD + * + *****************************************************************/ + +/** + * Update Filesystem + * + * Update file system structure using values read from configuration file. + */ +static void ebpf_update_filesystem() +{ + char dist[NETDATA_FS_MAX_DIST_NAME + 1]; + int i; + for (i = 0; localfs[i].filesystem; i++) { + snprintfz(dist, NETDATA_FS_MAX_DIST_NAME, "%sdist", localfs[i].filesystem); + + localfs[i].enabled = appconfig_get_boolean(&fs_config, NETDATA_FILESYSTEM_CONFIG_NAME, dist, + CONFIG_BOOLEAN_YES); + } +} + +/** + * Filesystem thread + * + * Thread used to generate socket charts. + * + * @param ptr a pointer to `struct ebpf_module` + * + * @return It always return NULL + */ +void *ebpf_filesystem_thread(void *ptr) +{ + netdata_thread_cleanup_push(ebpf_filesystem_cleanup, ptr); + + ebpf_module_t *em = (ebpf_module_t *)ptr; + em->maps = fs_maps; + ebpf_update_filesystem(); + + if (!em->enabled) + goto endfilesystem; + + // Initialize optional as zero, to identify when there are not partitions to monitor + em->optional = 0; + + if (ebpf_update_partitions(em)) { + if (em->optional) + info("Netdata cannot monitor the filesystems used on this host."); + + em->enabled = 0; + goto endfilesystem; + } + + int algorithms[NETDATA_EBPF_HIST_MAX_BINS]; + ebpf_fill_algorithms(algorithms, NETDATA_EBPF_HIST_MAX_BINS, NETDATA_EBPF_INCREMENTAL_IDX); + ebpf_global_labels(filesystem_aggregated_data, filesystem_publish_aggregated, dimensions, dimensions, + algorithms, NETDATA_EBPF_HIST_MAX_BINS); + + pthread_mutex_lock(&lock); + ebpf_create_fs_charts(em->update_every); + pthread_mutex_unlock(&lock); + + filesystem_collector(em); + +endfilesystem: + netdata_thread_cleanup_pop(1); + return NULL; +} diff --git a/collectors/ebpf.plugin/ebpf_filesystem.h b/collectors/ebpf.plugin/ebpf_filesystem.h new file mode 100644 index 000000000..295eec205 --- /dev/null +++ b/collectors/ebpf.plugin/ebpf_filesystem.h @@ -0,0 +1,68 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_EBPF_FILESYSTEM_H +#define NETDATA_EBPF_FILESYSTEM_H 1 + +// Module name +#define NETDATA_EBPF_MODULE_NAME_FILESYSTEM "filesystem" + +#include "ebpf.h" + +#define NETDATA_FS_MAX_DIST_NAME 64UL + +#define NETDATA_FILESYSTEM_CONFIG_NAME "filesystem" +#define NETDATA_FILESYSTEM_READ_SLEEP_MS 600000ULL + +// Process configuration name +#define NETDATA_FILESYSTEM_CONFIG_FILE "filesystem.conf" + +typedef struct netdata_fs_hist { + uint32_t hist_id; + uint32_t bin; +} netdata_fs_hist_t; + +enum filesystem_limit { + NETDATA_KEY_CALLS_READ = 24, + NETDATA_KEY_CALLS_WRITE = 48, + NETDATA_KEY_CALLS_OPEN = 72, + NETDATA_KEY_CALLS_SYNC = 96 +}; + +enum netdata_filesystem_flags { + NETDATA_FILESYSTEM_FLAG_NO_PARTITION = 0, + NETDATA_FILESYSTEM_LOAD_EBPF_PROGRAM = 1, + NETDATA_FILESYSTEM_FLAG_HAS_PARTITION = 2, + NETDATA_FILESYSTEM_FLAG_CHART_CREATED = 4, + NETDATA_FILESYSTEM_FILL_ADDRESS_TABLE = 8, + NETDATA_FILESYSTEM_REMOVE_CHARTS = 16, + NETDATA_FILESYSTEM_ATTR_CHARTS = 32 +}; + +enum netdata_filesystem_table { + NETDATA_MAIN_FS_TABLE, + NETDATA_ADDR_FS_TABLE +}; + +typedef struct ebpf_filesystem_partitions { + char *filesystem; + char *optional_filesystem; + char *family; + char *family_name; + struct bpf_object *objects; + struct bpf_link **probe_links; + + netdata_ebpf_histogram_t hread; + netdata_ebpf_histogram_t hwrite; + netdata_ebpf_histogram_t hopen; + netdata_ebpf_histogram_t hadditional; + + uint32_t flags; + uint32_t enabled; + + ebpf_addresses_t addresses; +} ebpf_filesystem_partitions_t; + +extern void *ebpf_filesystem_thread(void *ptr); +extern struct config fs_config; + +#endif /* NETDATA_EBPF_FILESYSTEM_H */ diff --git a/collectors/ebpf.plugin/ebpf_hardirq.c b/collectors/ebpf.plugin/ebpf_hardirq.c new file mode 100644 index 000000000..ff649e9cd --- /dev/null +++ b/collectors/ebpf.plugin/ebpf_hardirq.c @@ -0,0 +1,494 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "ebpf.h" +#include "ebpf_hardirq.h" + +struct config hardirq_config = { .first_section = NULL, + .last_section = NULL, + .mutex = NETDATA_MUTEX_INITIALIZER, + .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare }, + .rwlock = AVL_LOCK_INITIALIZER } }; + +#define HARDIRQ_MAP_LATENCY 0 +#define HARDIRQ_MAP_LATENCY_STATIC 1 +static ebpf_local_maps_t hardirq_maps[] = { + { + .name = "tbl_hardirq", + .internal_input = NETDATA_HARDIRQ_MAX_IRQS, + .user_input = 0, + .type = NETDATA_EBPF_MAP_STATIC, + .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED + }, + { + .name = "tbl_hardirq_static", + .internal_input = HARDIRQ_EBPF_STATIC_END, + .user_input = 0, + .type = NETDATA_EBPF_MAP_STATIC, + .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED + }, + /* end */ + { + .name = NULL, + .internal_input = 0, + .user_input = 0, + .type = NETDATA_EBPF_MAP_CONTROLLER, + .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED + } +}; + +#define HARDIRQ_TP_CLASS_IRQ "irq" +#define HARDIRQ_TP_CLASS_IRQ_VECTORS "irq_vectors" +static ebpf_tracepoint_t hardirq_tracepoints[] = { + {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ, .event = "irq_handler_entry"}, + {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ, .event = "irq_handler_exit"}, + {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ_VECTORS, .event = "thermal_apic_entry"}, + {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ_VECTORS, .event = "thermal_apic_exit"}, + {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ_VECTORS, .event = "threshold_apic_entry"}, + {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ_VECTORS, .event = "threshold_apic_exit"}, + {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ_VECTORS, .event = "error_apic_entry"}, + {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ_VECTORS, .event = "error_apic_exit"}, + {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ_VECTORS, .event = "deferred_error_apic_entry"}, + {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ_VECTORS, .event = "deferred_error_apic_exit"}, + {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ_VECTORS, .event = "spurious_apic_entry"}, + {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ_VECTORS, .event = "spurious_apic_exit"}, + {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ_VECTORS, .event = "call_function_entry"}, + {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ_VECTORS, .event = "call_function_exit"}, + {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ_VECTORS, .event = "call_function_single_entry"}, + {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ_VECTORS, .event = "call_function_single_exit"}, + {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ_VECTORS, .event = "reschedule_entry"}, + {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ_VECTORS, .event = "reschedule_exit"}, + {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ_VECTORS, .event = "local_timer_entry"}, + {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ_VECTORS, .event = "local_timer_exit"}, + {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ_VECTORS, .event = "irq_work_entry"}, + {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ_VECTORS, .event = "irq_work_exit"}, + {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ_VECTORS, .event = "x86_platform_ipi_entry"}, + {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ_VECTORS, .event = "x86_platform_ipi_exit"}, + /* end */ + {.enabled = false, .class = NULL, .event = NULL} +}; + +static hardirq_static_val_t hardirq_static_vals[] = { + { + .idx = HARDIRQ_EBPF_STATIC_APIC_THERMAL, + .name = "apic_thermal", + .latency = 0 + }, + { + .idx = HARDIRQ_EBPF_STATIC_APIC_THRESHOLD, + .name = "apic_threshold", + .latency = 0 + }, + { + .idx = HARDIRQ_EBPF_STATIC_APIC_ERROR, + .name = "apic_error", + .latency = 0 + }, + { + .idx = HARDIRQ_EBPF_STATIC_APIC_DEFERRED_ERROR, + .name = "apic_deferred_error", + .latency = 0 + }, + { + .idx = HARDIRQ_EBPF_STATIC_APIC_SPURIOUS, + .name = "apic_spurious", + .latency = 0 + }, + { + .idx = HARDIRQ_EBPF_STATIC_FUNC_CALL, + .name = "func_call", + .latency = 0 + }, + { + .idx = HARDIRQ_EBPF_STATIC_FUNC_CALL_SINGLE, + .name = "func_call_single", + .latency = 0 + }, + { + .idx = HARDIRQ_EBPF_STATIC_RESCHEDULE, + .name = "reschedule", + .latency = 0 + }, + { + .idx = HARDIRQ_EBPF_STATIC_LOCAL_TIMER, + .name = "local_timer", + .latency = 0 + }, + { + .idx = HARDIRQ_EBPF_STATIC_IRQ_WORK, + .name = "irq_work", + .latency = 0 + }, + { + .idx = HARDIRQ_EBPF_STATIC_X86_PLATFORM_IPI, + .name = "x86_platform_ipi", + .latency = 0 + }, +}; + +static struct bpf_link **probe_links = NULL; +static struct bpf_object *objects = NULL; + +static int read_thread_closed = 1; + +// store for "published" data from the reader thread, which the collector +// thread will write to netdata agent. +static avl_tree_lock hardirq_pub; + +// tmp store for dynamic hard IRQ values we get from a per-CPU eBPF map. +static hardirq_ebpf_val_t *hardirq_ebpf_vals = NULL; + +// tmp store for static hard IRQ values we get from a per-CPU eBPF map. +static hardirq_ebpf_static_val_t *hardirq_ebpf_static_vals = NULL; + +static struct netdata_static_thread hardirq_threads = {"HARDIRQ KERNEL", + NULL, NULL, 1, NULL, + NULL, NULL }; + +/** + * Clean up the main thread. + * + * @param ptr thread data. + */ +static void hardirq_cleanup(void *ptr) +{ + for (int i = 0; hardirq_tracepoints[i].class != NULL; i++) { + ebpf_disable_tracepoint(&hardirq_tracepoints[i]); + } + + ebpf_module_t *em = (ebpf_module_t *)ptr; + if (!em->enabled) { + return; + } + + heartbeat_t hb; + heartbeat_init(&hb); + uint32_t tick = 1 * USEC_PER_MS; + while (!read_thread_closed) { + usec_t dt = heartbeat_next(&hb, tick); + UNUSED(dt); + } + + freez(hardirq_ebpf_vals); + freez(hardirq_ebpf_static_vals); + freez(hardirq_threads.thread); + + if (probe_links) { + struct bpf_program *prog; + size_t i = 0 ; + bpf_object__for_each_program(prog, objects) { + bpf_link__destroy(probe_links[i]); + i++; + } + bpf_object__close(objects); + } +} + +/***************************************************************** + * MAIN LOOP + *****************************************************************/ + +/** + * Compare hard IRQ values. + * + * @param a `hardirq_val_t *`. + * @param b `hardirq_val_t *`. + * + * @return 0 if a==b, 1 if a>b, -1 if airq > ptr2->irq) { + return 1; + } + else if (ptr1->irq < ptr2->irq) { + return -1; + } + else { + return 0; + } +} + +static void hardirq_read_latency_map(int mapfd) +{ + hardirq_ebpf_key_t key = {}; + hardirq_ebpf_key_t next_key = {}; + hardirq_val_t search_v = {}; + hardirq_val_t *v = NULL; + + while (bpf_map_get_next_key(mapfd, &key, &next_key) == 0) { + // get val for this key. + int test = bpf_map_lookup_elem(mapfd, &key, hardirq_ebpf_vals); + if (unlikely(test < 0)) { + key = next_key; + continue; + } + + // is this IRQ saved yet? + // + // if not, make a new one, mark it as unsaved for now, and continue; we + // will insert it at the end after all of its values are correctly set, + // so that we can safely publish it to the collector within a single, + // short locked operation. + // + // otherwise simply continue; we will only update the latency, which + // can be republished safely without a lock. + // + // NOTE: lock isn't strictly necessary for this initial search, as only + // this thread does writing, but the AVL is using a read-write lock so + // there is no congestion. + bool v_is_new = false; + search_v.irq = key.irq; + v = (hardirq_val_t *)avl_search_lock(&hardirq_pub, (avl_t *)&search_v); + if (unlikely(v == NULL)) { + // latency/name can only be added reliably at a later time. + // when they're added, only then will we AVL insert. + v = callocz(1, sizeof(hardirq_val_t)); + v->irq = key.irq; + v->dim_exists = false; + + v_is_new = true; + } + + // note two things: + // 1. we must add up latency value for this IRQ across all CPUs. + // 2. the name is unfortunately *not* available on all CPU maps - only + // a single map contains the name, so we must find it. we only need + // to copy it though if the IRQ is new for us. + bool name_saved = false; + uint64_t total_latency = 0; + int i; + int end = (running_on_kernel < NETDATA_KERNEL_V4_15) ? 1 : ebpf_nprocs; + for (i = 0; i < end; i++) { + total_latency += hardirq_ebpf_vals[i].latency/1000; + + // copy name for new IRQs. + if (v_is_new && !name_saved && hardirq_ebpf_vals[i].name[0] != '\0') { + strncpyz( + v->name, + hardirq_ebpf_vals[i].name, + NETDATA_HARDIRQ_NAME_LEN + ); + name_saved = true; + } + } + + // can now safely publish latency for existing IRQs. + v->latency = total_latency; + + // can now safely publish new IRQ. + if (v_is_new) { + avl_t *check = avl_insert_lock(&hardirq_pub, (avl_t *)v); + if (check != (avl_t *)v) { + error("Internal error, cannot insert the AVL tree."); + } + } + + key = next_key; + } +} + +static void hardirq_read_latency_static_map(int mapfd) +{ + uint32_t i; + for (i = 0; i < HARDIRQ_EBPF_STATIC_END; i++) { + uint32_t map_i = hardirq_static_vals[i].idx; + int test = bpf_map_lookup_elem(mapfd, &map_i, hardirq_ebpf_static_vals); + if (unlikely(test < 0)) { + continue; + } + + uint64_t total_latency = 0; + int cpu_i; + int end = (running_on_kernel < NETDATA_KERNEL_V4_15) ? 1 : ebpf_nprocs; + for (cpu_i = 0; cpu_i < end; cpu_i++) { + total_latency += hardirq_ebpf_static_vals[cpu_i].latency/1000; + } + + hardirq_static_vals[i].latency = total_latency; + } +} + +/** + * Read eBPF maps for hard IRQ. + */ +static void *hardirq_reader(void *ptr) +{ + read_thread_closed = 0; + + heartbeat_t hb; + heartbeat_init(&hb); + + ebpf_module_t *em = (ebpf_module_t *)ptr; + + usec_t step = NETDATA_HARDIRQ_SLEEP_MS * em->update_every; + while (!close_ebpf_plugin) { + usec_t dt = heartbeat_next(&hb, step); + UNUSED(dt); + + hardirq_read_latency_map(hardirq_maps[HARDIRQ_MAP_LATENCY].map_fd); + hardirq_read_latency_static_map(hardirq_maps[HARDIRQ_MAP_LATENCY_STATIC].map_fd); + } + + read_thread_closed = 1; + return NULL; +} + +static void hardirq_create_charts(int update_every) +{ + ebpf_create_chart( + NETDATA_EBPF_SYSTEM_GROUP, + "hardirq_latency", + "Hardware IRQ latency", + EBPF_COMMON_DIMENSION_MILLISECONDS, + "interrupts", + NULL, + NETDATA_EBPF_CHART_TYPE_STACKED, + NETDATA_CHART_PRIO_HARDIRQ_LATENCY, + NULL, NULL, 0, update_every, + NETDATA_EBPF_MODULE_NAME_HARDIRQ + ); + + fflush(stdout); +} + +static void hardirq_create_static_dims() +{ + uint32_t i; + for (i = 0; i < HARDIRQ_EBPF_STATIC_END; i++) { + ebpf_write_global_dimension( + hardirq_static_vals[i].name, hardirq_static_vals[i].name, + ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX] + ); + } +} + +// callback for avl tree traversal on `hardirq_pub`. +static int hardirq_write_dims(void *entry, void *data) +{ + UNUSED(data); + + hardirq_val_t *v = entry; + + // IRQs get dynamically added in, so add the dimension if we haven't yet. + if (!v->dim_exists) { + ebpf_write_global_dimension( + v->name, v->name, + ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX] + ); + v->dim_exists = true; + } + + write_chart_dimension(v->name, v->latency); + + return 1; +} + +static inline void hardirq_write_static_dims() +{ + uint32_t i; + for (i = 0; i < HARDIRQ_EBPF_STATIC_END; i++) { + write_chart_dimension( + hardirq_static_vals[i].name, + hardirq_static_vals[i].latency + ); + } +} + +/** +* Main loop for this collector. +*/ +static void hardirq_collector(ebpf_module_t *em) +{ + hardirq_ebpf_vals = callocz( + (running_on_kernel < NETDATA_KERNEL_V4_15) ? 1 : ebpf_nprocs, + sizeof(hardirq_ebpf_val_t) + ); + hardirq_ebpf_static_vals = callocz( + (running_on_kernel < NETDATA_KERNEL_V4_15) ? 1 : ebpf_nprocs, + sizeof(hardirq_ebpf_static_val_t) + ); + + avl_init_lock(&hardirq_pub, hardirq_val_cmp); + + // create reader thread. + hardirq_threads.thread = mallocz(sizeof(netdata_thread_t)); + hardirq_threads.start_routine = hardirq_reader; + netdata_thread_create( + hardirq_threads.thread, + hardirq_threads.name, + NETDATA_THREAD_OPTION_JOINABLE, + hardirq_reader, + em + ); + + // create chart and static dims. + pthread_mutex_lock(&lock); + hardirq_create_charts(em->update_every); + hardirq_create_static_dims(); + pthread_mutex_unlock(&lock); + + // loop and read from published data until ebpf plugin is closed. + int update_every = em->update_every; + int counter = update_every - 1; + while (!close_ebpf_plugin) { + pthread_mutex_lock(&collect_data_mutex); + pthread_cond_wait(&collect_data_cond_var, &collect_data_mutex); + + if (++counter == update_every) { + counter = 0; + pthread_mutex_lock(&lock); + + // write dims now for all hitherto discovered IRQs. + write_begin_chart(NETDATA_EBPF_SYSTEM_GROUP, "hardirq_latency"); + avl_traverse_lock(&hardirq_pub, hardirq_write_dims, NULL); + hardirq_write_static_dims(); + write_end_chart(); + + pthread_mutex_unlock(&lock); + } + + pthread_mutex_unlock(&collect_data_mutex); + } +} + +/***************************************************************** + * EBPF HARDIRQ THREAD + *****************************************************************/ + +/** + * Hard IRQ latency thread. + * + * @param ptr a `ebpf_module_t *`. + * @return always NULL. + */ +void *ebpf_hardirq_thread(void *ptr) +{ + netdata_thread_cleanup_push(hardirq_cleanup, ptr); + + ebpf_module_t *em = (ebpf_module_t *)ptr; + em->maps = hardirq_maps; + + if (!em->enabled) { + goto endhardirq; + } + + if (ebpf_enable_tracepoints(hardirq_tracepoints) == 0) { + em->enabled = CONFIG_BOOLEAN_NO; + goto endhardirq; + } + + probe_links = ebpf_load_program(ebpf_plugin_dir, em, kernel_string, &objects); + if (!probe_links) { + goto endhardirq; + } + + hardirq_collector(em); + +endhardirq: + netdata_thread_cleanup_pop(1); + + return NULL; +} diff --git a/collectors/ebpf.plugin/ebpf_hardirq.h b/collectors/ebpf.plugin/ebpf_hardirq.h new file mode 100644 index 000000000..4c8a7a098 --- /dev/null +++ b/collectors/ebpf.plugin/ebpf_hardirq.h @@ -0,0 +1,73 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_EBPF_HARDIRQ_H +#define NETDATA_EBPF_HARDIRQ_H 1 + +/***************************************************************** + * copied from kernel-collectors repo, with modifications needed + * for inclusion here. + *****************************************************************/ + +#define NETDATA_HARDIRQ_NAME_LEN 32 +#define NETDATA_HARDIRQ_MAX_IRQS 1024L + +typedef struct hardirq_ebpf_key { + int irq; +} hardirq_ebpf_key_t; + +typedef struct hardirq_ebpf_val { + uint64_t latency; + uint64_t ts; + char name[NETDATA_HARDIRQ_NAME_LEN]; +} hardirq_ebpf_val_t; + +enum hardirq_ebpf_static { + HARDIRQ_EBPF_STATIC_APIC_THERMAL, + HARDIRQ_EBPF_STATIC_APIC_THRESHOLD, + HARDIRQ_EBPF_STATIC_APIC_ERROR, + HARDIRQ_EBPF_STATIC_APIC_DEFERRED_ERROR, + HARDIRQ_EBPF_STATIC_APIC_SPURIOUS, + HARDIRQ_EBPF_STATIC_FUNC_CALL, + HARDIRQ_EBPF_STATIC_FUNC_CALL_SINGLE, + HARDIRQ_EBPF_STATIC_RESCHEDULE, + HARDIRQ_EBPF_STATIC_LOCAL_TIMER, + HARDIRQ_EBPF_STATIC_IRQ_WORK, + HARDIRQ_EBPF_STATIC_X86_PLATFORM_IPI, + + HARDIRQ_EBPF_STATIC_END +}; + +typedef struct hardirq_ebpf_static_val { + uint64_t latency; + uint64_t ts; +} hardirq_ebpf_static_val_t; + +/***************************************************************** + * below this is eBPF plugin-specific code. + *****************************************************************/ + +#define NETDATA_EBPF_MODULE_NAME_HARDIRQ "hardirq" +#define NETDATA_HARDIRQ_SLEEP_MS 650000ULL +#define NETDATA_HARDIRQ_CONFIG_FILE "hardirq.conf" + +typedef struct hardirq_val { + // must be at top for simplified AVL tree usage. + // if it's not at the top, we need to use `containerof` for almost all ops. + avl_t avl; + + int irq; + bool dim_exists; // keep this after `int irq` for alignment byte savings. + uint64_t latency; + char name[NETDATA_HARDIRQ_NAME_LEN]; +} hardirq_val_t; + +typedef struct hardirq_static_val { + enum hardirq_ebpf_static idx; + char *name; + uint64_t latency; +} hardirq_static_val_t; + +extern struct config hardirq_config; +extern void *ebpf_hardirq_thread(void *ptr); + +#endif /* NETDATA_EBPF_HARDIRQ_H */ diff --git a/collectors/ebpf.plugin/ebpf_mdflush.c b/collectors/ebpf.plugin/ebpf_mdflush.c new file mode 100644 index 000000000..e2420ec8e --- /dev/null +++ b/collectors/ebpf.plugin/ebpf_mdflush.c @@ -0,0 +1,312 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "ebpf.h" +#include "ebpf_mdflush.h" + +struct config mdflush_config = { .first_section = NULL, + .last_section = NULL, + .mutex = NETDATA_MUTEX_INITIALIZER, + .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare }, + .rwlock = AVL_LOCK_INITIALIZER } }; + +#define MDFLUSH_MAP_COUNT 0 +static ebpf_local_maps_t mdflush_maps[] = { + { + .name = "tbl_mdflush", + .internal_input = 1024, + .user_input = 0, + .type = NETDATA_EBPF_MAP_STATIC, + .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED + }, + /* end */ + { + .name = NULL, + .internal_input = 0, + .user_input = 0, + .type = NETDATA_EBPF_MAP_CONTROLLER, + .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED + } +}; + +// store for "published" data from the reader thread, which the collector +// thread will write to netdata agent. +static avl_tree_lock mdflush_pub; + +// tmp store for mdflush values we get from a per-CPU eBPF map. +static mdflush_ebpf_val_t *mdflush_ebpf_vals = NULL; + +static struct bpf_link **probe_links = NULL; +static struct bpf_object *objects = NULL; + +static int read_thread_closed = 1; + +static struct netdata_static_thread mdflush_threads = {"MDFLUSH KERNEL", + NULL, NULL, 1, NULL, + NULL, NULL }; + +/** + * Clean up the main thread. + * + * @param ptr thread data. + */ +static void mdflush_cleanup(void *ptr) +{ + ebpf_module_t *em = (ebpf_module_t *)ptr; + if (!em->enabled) { + return; + } + + heartbeat_t hb; + heartbeat_init(&hb); + uint32_t tick = 1 * USEC_PER_MS; + while (!read_thread_closed) { + usec_t dt = heartbeat_next(&hb, tick); + UNUSED(dt); + } + + freez(mdflush_ebpf_vals); + freez(mdflush_threads.thread); + + if (probe_links) { + struct bpf_program *prog; + size_t i = 0 ; + bpf_object__for_each_program(prog, objects) { + bpf_link__destroy(probe_links[i]); + i++; + } + bpf_object__close(objects); + } +} + +/** + * Compare mdflush values. + * + * @param a `netdata_mdflush_t *`. + * @param b `netdata_mdflush_t *`. + * + * @return 0 if a==b, 1 if a>b, -1 if aunit > ptr2->unit) { + return 1; + } + else if (ptr1->unit < ptr2->unit) { + return -1; + } + else { + return 0; + } +} + +static void mdflush_read_count_map() +{ + int mapfd = mdflush_maps[MDFLUSH_MAP_COUNT].map_fd; + mdflush_ebpf_key_t curr_key = (uint32_t)-1; + mdflush_ebpf_key_t key = (uint32_t)-1; + netdata_mdflush_t search_v; + netdata_mdflush_t *v = NULL; + + while (bpf_map_get_next_key(mapfd, &curr_key, &key) == 0) { + curr_key = key; + + // get val for this key. + int test = bpf_map_lookup_elem(mapfd, &key, mdflush_ebpf_vals); + if (unlikely(test < 0)) { + continue; + } + + // is this record saved yet? + // + // if not, make a new one, mark it as unsaved for now, and continue; we + // will insert it at the end after all of its values are correctly set, + // so that we can safely publish it to the collector within a single, + // short locked operation. + // + // otherwise simply continue; we will only update the flush count, + // which can be republished safely without a lock. + // + // NOTE: lock isn't strictly necessary for this initial search, as only + // this thread does writing, but the AVL is using a read-write lock so + // there is no congestion. + bool v_is_new = false; + search_v.unit = key; + v = (netdata_mdflush_t *)avl_search_lock( + &mdflush_pub, + (avl_t *)&search_v + ); + if (unlikely(v == NULL)) { + // flush count can only be added reliably at a later time. + // when they're added, only then will we AVL insert. + v = callocz(1, sizeof(netdata_mdflush_t)); + v->unit = key; + sprintf(v->disk_name, "md%u", key); + v->dim_exists = false; + + v_is_new = true; + } + + // we must add up count value for this record across all CPUs. + uint64_t total_cnt = 0; + int i; + int end = (running_on_kernel < NETDATA_KERNEL_V4_15) ? 1 : ebpf_nprocs; + for (i = 0; i < end; i++) { + total_cnt += mdflush_ebpf_vals[i]; + } + + // can now safely publish count for existing records. + v->cnt = total_cnt; + + // can now safely publish new record. + if (v_is_new) { + avl_t *check = avl_insert_lock(&mdflush_pub, (avl_t *)v); + if (check != (avl_t *)v) { + error("Internal error, cannot insert the AVL tree."); + } + } + } +} + +/** + * Read eBPF maps for mdflush. + */ +static void *mdflush_reader(void *ptr) +{ + read_thread_closed = 0; + + heartbeat_t hb; + heartbeat_init(&hb); + + ebpf_module_t *em = (ebpf_module_t *)ptr; + + usec_t step = NETDATA_MDFLUSH_SLEEP_MS * em->update_every; + while (!close_ebpf_plugin) { + usec_t dt = heartbeat_next(&hb, step); + UNUSED(dt); + + mdflush_read_count_map(); + } + + read_thread_closed = 1; + return NULL; +} + +static void mdflush_create_charts(int update_every) +{ + ebpf_create_chart( + "mdstat", + "mdstat_flush", + "MD flushes", + "flushes", + "flush (eBPF)", + "md.flush", + NETDATA_EBPF_CHART_TYPE_STACKED, + NETDATA_CHART_PRIO_MDSTAT_FLUSH, + NULL, NULL, 0, update_every, + NETDATA_EBPF_MODULE_NAME_MDFLUSH + ); + + fflush(stdout); +} + +// callback for avl tree traversal on `mdflush_pub`. +static int mdflush_write_dims(void *entry, void *data) +{ + UNUSED(data); + + netdata_mdflush_t *v = entry; + + // records get dynamically added in, so add the dim if we haven't yet. + if (!v->dim_exists) { + ebpf_write_global_dimension( + v->disk_name, v->disk_name, + ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX] + ); + v->dim_exists = true; + } + + write_chart_dimension(v->disk_name, v->cnt); + + return 1; +} + +/** +* Main loop for this collector. +*/ +static void mdflush_collector(ebpf_module_t *em) +{ + mdflush_ebpf_vals = callocz(ebpf_nprocs, sizeof(mdflush_ebpf_val_t)); + + avl_init_lock(&mdflush_pub, mdflush_val_cmp); + + // create reader thread. + mdflush_threads.thread = mallocz(sizeof(netdata_thread_t)); + mdflush_threads.start_routine = mdflush_reader; + netdata_thread_create( + mdflush_threads.thread, + mdflush_threads.name, + NETDATA_THREAD_OPTION_JOINABLE, + mdflush_reader, + em + ); + + // create chart and static dims. + pthread_mutex_lock(&lock); + mdflush_create_charts(em->update_every); + pthread_mutex_unlock(&lock); + + // loop and read from published data until ebpf plugin is closed. + int update_every = em->update_every; + int counter = update_every - 1; + while (!close_ebpf_plugin) { + pthread_mutex_lock(&collect_data_mutex); + pthread_cond_wait(&collect_data_cond_var, &collect_data_mutex); + + if (++counter == update_every) { + counter = 0; + pthread_mutex_lock(&lock); + + // write dims now for all hitherto discovered devices. + write_begin_chart("mdstat", "mdstat_flush"); + avl_traverse_lock(&mdflush_pub, mdflush_write_dims, NULL); + write_end_chart(); + + pthread_mutex_unlock(&lock); + } + + pthread_mutex_unlock(&collect_data_mutex); + } +} + +/** + * mdflush thread. + * + * @param ptr a `ebpf_module_t *`. + * @return always NULL. + */ +void *ebpf_mdflush_thread(void *ptr) +{ + netdata_thread_cleanup_push(mdflush_cleanup, ptr); + + ebpf_module_t *em = (ebpf_module_t *)ptr; + em->maps = mdflush_maps; + + if (!em->enabled) { + goto endmdflush; + } + + probe_links = ebpf_load_program(ebpf_plugin_dir, em, kernel_string, &objects); + if (!probe_links) { + goto endmdflush; + } + + mdflush_collector(em); + +endmdflush: + netdata_thread_cleanup_pop(1); + + return NULL; +} diff --git a/collectors/ebpf.plugin/ebpf_mdflush.h b/collectors/ebpf.plugin/ebpf_mdflush.h new file mode 100644 index 000000000..59856ad67 --- /dev/null +++ b/collectors/ebpf.plugin/ebpf_mdflush.h @@ -0,0 +1,42 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_EBPF_MDFLUSH_H +#define NETDATA_EBPF_MDFLUSH_H 1 + +// Module name +#define NETDATA_EBPF_MODULE_NAME_MDFLUSH "mdflush" + +#define NETDATA_MDFLUSH_SLEEP_MS 850000ULL + +// charts +#define NETDATA_MDFLUSH_GLOBAL_CHART "mdflush" + +// configuration file +#define NETDATA_DIRECTORY_MDFLUSH_CONFIG_FILE "mdflush.conf" + +// copy of mdflush types from kernel-collectors repo. +typedef uint32_t mdflush_ebpf_key_t; +typedef uint64_t mdflush_ebpf_val_t; + +typedef struct netdata_mdflush { + // must be at top for simplified AVL tree usage. + // if it's not at the top, we need to use `containerof` for almost all ops. + avl_t avl; + + // key & name of device. + // the name is generated by the key, usually as `md`. + uint32_t unit; + char disk_name[32]; + + // have we defined the dimension for this device yet? + bool dim_exists; + + // incremental flush count value. + uint64_t cnt; +} netdata_mdflush_t; + +extern void *ebpf_mdflush_thread(void *ptr); + +extern struct config mdflush_config; + +#endif diff --git a/collectors/ebpf.plugin/ebpf_mount.c b/collectors/ebpf.plugin/ebpf_mount.c new file mode 100644 index 000000000..46f323471 --- /dev/null +++ b/collectors/ebpf.plugin/ebpf_mount.c @@ -0,0 +1,260 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "ebpf.h" +#include "ebpf_mount.h" + +static ebpf_local_maps_t mount_maps[] = {{.name = "tbl_mount", .internal_input = NETDATA_MOUNT_END, + .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC, + .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}, + {.name = NULL, .internal_input = 0, .user_input = 0, + .type = NETDATA_EBPF_MAP_CONTROLLER, + .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}}; + +static char *mount_dimension_name[NETDATA_EBPF_MOUNT_SYSCALL] = { "mount", "umount" }; +static netdata_syscall_stat_t mount_aggregated_data[NETDATA_EBPF_MOUNT_SYSCALL]; +static netdata_publish_syscall_t mount_publish_aggregated[NETDATA_EBPF_MOUNT_SYSCALL]; + +struct config mount_config = { .first_section = NULL, .last_section = NULL, .mutex = NETDATA_MUTEX_INITIALIZER, + .index = {.avl_tree = { .root = NULL, .compar = appconfig_section_compare }, + .rwlock = AVL_LOCK_INITIALIZER } }; + +static int read_thread_closed = 1; +static netdata_idx_t *mount_values = NULL; + +static struct bpf_link **probe_links = NULL; +static struct bpf_object *objects = NULL; + +static netdata_idx_t mount_hash_values[NETDATA_MOUNT_END]; + +struct netdata_static_thread mount_thread = {"MOUNT KERNEL", + NULL, NULL, 1, NULL, + NULL, NULL}; + +/***************************************************************** + * + * FUNCTIONS TO CLOSE THE THREAD + * + *****************************************************************/ + +/** + * Clean up the main thread. + * + * @param ptr thread data. + */ +static void ebpf_mount_cleanup(void *ptr) +{ + ebpf_module_t *em = (ebpf_module_t *)ptr; + if (!em->enabled) + return; + + freez(mount_thread.thread); + freez(mount_values); + + if (probe_links) { + struct bpf_program *prog; + size_t i = 0 ; + bpf_object__for_each_program(prog, objects) { + bpf_link__destroy(probe_links[i]); + i++; + } + bpf_object__close(objects); + } +} + +/***************************************************************** + * + * MAIN LOOP + * + *****************************************************************/ + +/** + * Read global table + * + * Read the table with number of calls for all functions + */ +static void read_global_table() +{ + uint32_t idx; + netdata_idx_t *val = mount_hash_values; + netdata_idx_t *stored = mount_values; + int fd = mount_maps[NETDATA_KEY_MOUNT_TABLE].map_fd; + + for (idx = NETDATA_KEY_MOUNT_CALL; idx < NETDATA_MOUNT_END; idx++) { + if (!bpf_map_lookup_elem(fd, &idx, stored)) { + int i; + int end = ebpf_nprocs; + netdata_idx_t total = 0; + for (i = 0; i < end; i++) + total += stored[i]; + + val[idx] = total; + } + } +} + +/** + * Mount read hash + * + * This is the thread callback. + * This thread is necessary, because we cannot freeze the whole plugin to read the data. + * + * @param ptr It is a NULL value for this thread. + * + * @return It always returns NULL. + */ +void *ebpf_mount_read_hash(void *ptr) +{ + read_thread_closed = 0; + + heartbeat_t hb; + heartbeat_init(&hb); + + ebpf_module_t *em = (ebpf_module_t *)ptr; + + usec_t step = NETDATA_LATENCY_MOUNT_SLEEP_MS * em->update_every; + while (!close_ebpf_plugin) { + usec_t dt = heartbeat_next(&hb, step); + (void)dt; + + read_global_table(); + } + read_thread_closed = 1; + + return NULL; +} + +/** + * Send data to Netdata calling auxiliary functions. +*/ +static void ebpf_mount_send_data() +{ + int i, j; + int end = NETDATA_EBPF_MOUNT_SYSCALL; + for (i = NETDATA_KEY_MOUNT_CALL, j = NETDATA_KEY_MOUNT_ERROR; i < end; i++, j++) { + mount_publish_aggregated[i].ncall = mount_hash_values[i]; + mount_publish_aggregated[i].nerr = mount_hash_values[j]; + } + + write_count_chart(NETDATA_EBPF_MOUNT_CALLS, NETDATA_EBPF_MOUNT_GLOBAL_FAMILY, + mount_publish_aggregated, NETDATA_EBPF_MOUNT_SYSCALL); + + write_err_chart(NETDATA_EBPF_MOUNT_ERRORS, NETDATA_EBPF_MOUNT_GLOBAL_FAMILY, + mount_publish_aggregated, NETDATA_EBPF_MOUNT_SYSCALL); +} + +/** +* Main loop for this collector. +*/ +static void mount_collector(ebpf_module_t *em) +{ + mount_thread.thread = mallocz(sizeof(netdata_thread_t)); + mount_thread.start_routine = ebpf_mount_read_hash; + memset(mount_hash_values, 0, sizeof(mount_hash_values)); + + mount_values = callocz((size_t)ebpf_nprocs, sizeof(netdata_idx_t)); + + netdata_thread_create(mount_thread.thread, mount_thread.name, NETDATA_THREAD_OPTION_JOINABLE, + ebpf_mount_read_hash, em); + + int update_every = em->update_every; + int counter = update_every - 1; + while (!close_ebpf_plugin) { + pthread_mutex_lock(&collect_data_mutex); + pthread_cond_wait(&collect_data_cond_var, &collect_data_mutex); + + if (++counter == update_every) { + counter = 0; + pthread_mutex_lock(&lock); + + ebpf_mount_send_data(); + + pthread_mutex_unlock(&lock); + } + + pthread_mutex_unlock(&collect_data_mutex); + } +} + +/***************************************************************** + * + * INITIALIZE THREAD + * + *****************************************************************/ + +/** + * Create mount charts + * + * Call ebpf_create_chart to create the charts for the collector. + * + * @param update_every value to overwrite the update frequency set by the server. + */ +static void ebpf_create_mount_charts(int update_every) +{ + ebpf_create_chart(NETDATA_EBPF_MOUNT_GLOBAL_FAMILY, NETDATA_EBPF_MOUNT_CALLS, + "Calls to mount and umount syscalls.", + EBPF_COMMON_DIMENSION_CALL, NETDATA_EBPF_MOUNT_FAMILY, + NULL, + NETDATA_EBPF_CHART_TYPE_LINE, + NETDATA_CHART_PRIO_EBPF_MOUNT_CHARTS, + ebpf_create_global_dimension, + mount_publish_aggregated, NETDATA_EBPF_MOUNT_SYSCALL, + update_every, NETDATA_EBPF_MODULE_NAME_MOUNT); + + ebpf_create_chart(NETDATA_EBPF_MOUNT_GLOBAL_FAMILY, NETDATA_EBPF_MOUNT_ERRORS, + "Errors to mount and umount syscalls.", + EBPF_COMMON_DIMENSION_CALL, NETDATA_EBPF_MOUNT_FAMILY, + NULL, + NETDATA_EBPF_CHART_TYPE_LINE, + NETDATA_CHART_PRIO_EBPF_MOUNT_CHARTS + 1, + ebpf_create_global_dimension, + mount_publish_aggregated, NETDATA_EBPF_MOUNT_SYSCALL, + update_every, NETDATA_EBPF_MODULE_NAME_MOUNT); + + fflush(stdout); +} + +/***************************************************************** + * + * MAIN THREAD + * + *****************************************************************/ + +/** + * Mount thread + * + * Thread used to make mount thread + * + * @param ptr a pointer to `struct ebpf_module` + * + * @return It always returns NULL + */ +void *ebpf_mount_thread(void *ptr) +{ + netdata_thread_cleanup_push(ebpf_mount_cleanup, ptr); + + ebpf_module_t *em = (ebpf_module_t *)ptr; + em->maps = mount_maps; + + if (!em->enabled) + goto endmount; + + probe_links = ebpf_load_program(ebpf_plugin_dir, em, kernel_string, &objects); + if (!probe_links) { + goto endmount; + } + + int algorithms[NETDATA_EBPF_MOUNT_SYSCALL] = { NETDATA_EBPF_INCREMENTAL_IDX, NETDATA_EBPF_INCREMENTAL_IDX }; + + ebpf_global_labels(mount_aggregated_data, mount_publish_aggregated, mount_dimension_name, mount_dimension_name, + algorithms, NETDATA_EBPF_MOUNT_SYSCALL); + + pthread_mutex_lock(&lock); + ebpf_create_mount_charts(em->update_every); + pthread_mutex_unlock(&lock); + + mount_collector(em); + +endmount: + netdata_thread_cleanup_pop(1); + return NULL; +} diff --git a/collectors/ebpf.plugin/ebpf_mount.h b/collectors/ebpf.plugin/ebpf_mount.h new file mode 100644 index 000000000..700bea13b --- /dev/null +++ b/collectors/ebpf.plugin/ebpf_mount.h @@ -0,0 +1,36 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_EBPF_MOUNT_H +#define NETDATA_EBPF_MOUNT_H 1 + +// Module name +#define NETDATA_EBPF_MODULE_NAME_MOUNT "mount" + +#define NETDATA_EBPF_MOUNT_SYSCALL 2 + +#define NETDATA_LATENCY_MOUNT_SLEEP_MS 700000ULL + +#define NETDATA_EBPF_MOUNT_CALLS "call" +#define NETDATA_EBPF_MOUNT_ERRORS "error" +#define NETDATA_EBPF_MOUNT_FAMILY "mount (eBPF)" + +// Process configuration name +#define NETDATA_MOUNT_CONFIG_FILE "mount.conf" + +enum mount_counters { + NETDATA_KEY_MOUNT_CALL, + NETDATA_KEY_UMOUNT_CALL, + NETDATA_KEY_MOUNT_ERROR, + NETDATA_KEY_UMOUNT_ERROR, + + NETDATA_MOUNT_END +}; + +enum mount_tables { + NETDATA_KEY_MOUNT_TABLE +}; + +extern struct config mount_config; +extern void *ebpf_mount_thread(void *ptr); + +#endif /* NETDATA_EBPF_MOUNT_H */ diff --git a/collectors/ebpf.plugin/ebpf_oomkill.c b/collectors/ebpf.plugin/ebpf_oomkill.c new file mode 100644 index 000000000..7f7df36f9 --- /dev/null +++ b/collectors/ebpf.plugin/ebpf_oomkill.c @@ -0,0 +1,400 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "ebpf.h" +#include "ebpf_oomkill.h" + +struct config oomkill_config = { .first_section = NULL, + .last_section = NULL, + .mutex = NETDATA_MUTEX_INITIALIZER, + .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare }, + .rwlock = AVL_LOCK_INITIALIZER } }; + +#define OOMKILL_MAP_KILLCNT 0 +static ebpf_local_maps_t oomkill_maps[] = { + { + .name = "tbl_oomkill", + .internal_input = NETDATA_OOMKILL_MAX_ENTRIES, + .user_input = 0, + .type = NETDATA_EBPF_MAP_STATIC, + .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED + }, + /* end */ + { + .name = NULL, + .internal_input = 0, + .user_input = 0, + .type = NETDATA_EBPF_MAP_CONTROLLER, + .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED + } +}; + +static ebpf_tracepoint_t oomkill_tracepoints[] = { + {.enabled = false, .class = "oom", .event = "mark_victim"}, + /* end */ + {.enabled = false, .class = NULL, .event = NULL} +}; + +static struct bpf_link **probe_links = NULL; +static struct bpf_object *objects = NULL; + +static netdata_publish_syscall_t oomkill_publish_aggregated = {.name = "oomkill", .dimension = "oomkill", + .algorithm = "absolute", + .next = NULL}; + +/** + * Clean up the main thread. + * + * @param ptr thread data. + */ +static void oomkill_cleanup(void *ptr) +{ + ebpf_module_t *em = (ebpf_module_t *)ptr; + if (!em->enabled) { + return; + } + + if (probe_links) { + struct bpf_program *prog; + size_t i = 0 ; + bpf_object__for_each_program(prog, objects) { + bpf_link__destroy(probe_links[i]); + i++; + } + bpf_object__close(objects); + } +} + +static void oomkill_write_data(int32_t *keys, uint32_t total) +{ + // for each app, see if it was OOM killed. record as 1 if so otherwise 0. + struct target *w; + for (w = apps_groups_root_target; w != NULL; w = w->next) { + if (likely(w->exposed && w->processes)) { + bool was_oomkilled = false; + struct pid_on_target *pids = w->root_pid; + while (pids) { + uint32_t j; + for (j = 0; j < total; j++) { + if (pids->pid == keys[j]) { + was_oomkilled = true; + // set to 0 so we consider it "done". + keys[j] = 0; + goto write_dim; + } + } + pids = pids->next; + } + + write_dim:; + write_chart_dimension(w->name, was_oomkilled); + } + } + + // for any remaining keys for which we couldn't find a group, this could be + // for various reasons, but the primary one is that the PID has not yet + // been picked up by the process thread when parsing the proc filesystem. + // since it's been OOM killed, it will never be parsed in the future, so + // we have no choice but to dump it into `other`. + uint32_t j; + uint32_t rem_count = 0; + for (j = 0; j < total; j++) { + int32_t key = keys[j]; + if (key != 0) { + rem_count += 1; + } + } + if (rem_count > 0) { + write_chart_dimension("other", rem_count); + } +} + +/** + * Create specific OOMkill charts + * + * Create charts for cgroup/application. + * + * @param type the chart type. + * @param update_every value to overwrite the update frequency set by the server. + */ +static void ebpf_create_specific_oomkill_charts(char *type, int update_every) +{ + ebpf_create_chart(type, NETDATA_OOMKILL_CHART, "OOM kills. This chart is provided by eBPF plugin.", + EBPF_COMMON_DIMENSION_KILLS, NETDATA_EBPF_MEMORY_GROUP, + NULL, NETDATA_EBPF_CHART_TYPE_LINE, + NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5600, + ebpf_create_global_dimension, + &oomkill_publish_aggregated, 1, update_every, NETDATA_EBPF_MODULE_NAME_OOMKILL); +} + +/** + * Create Systemd OOMkill Charts + * + * Create charts when systemd is enabled + * + * @param update_every value to overwrite the update frequency set by the server. + **/ +static void ebpf_create_systemd_oomkill_charts(int update_every) +{ + ebpf_create_charts_on_systemd(NETDATA_OOMKILL_CHART, "OOM kills. This chart is provided by eBPF plugin.", + EBPF_COMMON_DIMENSION_KILLS, NETDATA_EBPF_MEMORY_GROUP, + NETDATA_EBPF_CHART_TYPE_LINE, 20191, + ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NULL, + NETDATA_EBPF_MODULE_NAME_OOMKILL, update_every); +} + +/** + * Send Systemd charts + * + * Send collected data to Netdata. + * + * @return It returns the status for chart creation, if it is necessary to remove a specific dimension, zero is returned + * otherwise function returns 1 to avoid chart recreation + */ +static int ebpf_send_systemd_oomkill_charts() +{ + int ret = 1; + ebpf_cgroup_target_t *ect; + write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_OOMKILL_CHART); + for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { + if (unlikely(ect->systemd) && unlikely(ect->updated)) { + write_chart_dimension(ect->name, (long long) ect->oomkill); + ect->oomkill = 0; + } else + ret = 0; + } + write_end_chart(); + + return ret; +} + +/* + * Send Specific OOMkill data + * + * Send data for specific cgroup/apps. + * + * @param type chart type + * @param value value for oomkill + */ +static void ebpf_send_specific_oomkill_data(char *type, int value) +{ + write_begin_chart(type, NETDATA_OOMKILL_CHART); + write_chart_dimension(oomkill_publish_aggregated.name, (long long)value); + write_end_chart(); +} + +/** + * Create specific OOMkill charts + * + * Create charts for cgroup/application. + * + * @param type the chart type. + * @param update_every value to overwrite the update frequency set by the server. + */ +static void ebpf_obsolete_specific_oomkill_charts(char *type, int update_every) +{ + ebpf_write_chart_obsolete(type, NETDATA_OOMKILL_CHART, "OOM kills. This chart is provided by eBPF plugin.", + EBPF_COMMON_DIMENSION_KILLS, NETDATA_EBPF_MEMORY_GROUP, + NETDATA_EBPF_CHART_TYPE_LINE, NULL, + NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5600, update_every); +} + +/** + * Send data to Netdata calling auxiliary functions. + * + * @param update_every value to overwrite the update frequency set by the server. +*/ +void ebpf_oomkill_send_cgroup_data(int update_every) +{ + if (!ebpf_cgroup_pids) + return; + + pthread_mutex_lock(&mutex_cgroup_shm); + ebpf_cgroup_target_t *ect; + + int has_systemd = shm_ebpf_cgroup.header->systemd_enabled; + if (has_systemd) { + static int systemd_charts = 0; + if (!systemd_charts) { + ebpf_create_systemd_oomkill_charts(update_every); + systemd_charts = 1; + } + systemd_charts = ebpf_send_systemd_oomkill_charts(); + } + + for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { + if (ect->systemd) + continue; + + if (!(ect->flags & NETDATA_EBPF_CGROUP_HAS_OOMKILL_CHART) && ect->updated) { + ebpf_create_specific_oomkill_charts(ect->name, update_every); + ect->flags |= NETDATA_EBPF_CGROUP_HAS_OOMKILL_CHART; + } + + if (ect->flags & NETDATA_EBPF_CGROUP_HAS_OOMKILL_CHART && ect->updated) { + ebpf_send_specific_oomkill_data(ect->name, ect->oomkill); + } else { + ebpf_obsolete_specific_oomkill_charts(ect->name, update_every); + ect->flags &= ~NETDATA_EBPF_CGROUP_HAS_OOMKILL_CHART; + } + } + + pthread_mutex_unlock(&mutex_cgroup_shm); +} + +/** + * Read data + * + * Read OOMKILL events from table. + * + * @param keys vector where data will be stored + * + * @return It returns the number of read elements + */ +static uint32_t oomkill_read_data(int32_t *keys) +{ + // the first `i` entries of `keys` will contain the currently active PIDs + // in the eBPF map. + uint32_t i = 0; + + uint32_t curr_key = 0; + uint32_t key = 0; + int mapfd = oomkill_maps[OOMKILL_MAP_KILLCNT].map_fd; + while (bpf_map_get_next_key(mapfd, &curr_key, &key) == 0) { + curr_key = key; + + keys[i] = (int32_t)key; + i += 1; + + // delete this key now that we've recorded its existence. there's no + // race here, as the same PID will only get OOM killed once. + int test = bpf_map_delete_elem(mapfd, &key); + if (unlikely(test < 0)) { + // since there's only 1 thread doing these deletions, it should be + // impossible to get this condition. + error("key unexpectedly not available for deletion."); + } + } + + return i; +} + +/** + * Update cgroup + * + * Update cgroup data based in + * + * @param keys vector with pids that had oomkill event + * @param total number of elements in keys vector. + */ +static void ebpf_update_oomkill_cgroup(int32_t *keys, uint32_t total) +{ + ebpf_cgroup_target_t *ect; + for (ect = ebpf_cgroup_pids; ect; ect = ect->next) { + ect->oomkill = 0; + struct pid_on_target2 *pids; + for (pids = ect->pids; pids; pids = pids->next) { + uint32_t j; + int32_t pid = pids->pid; + for (j = 0; j < total; j++) { + if (pid == keys[j]) { + ect->oomkill = 1; + break; + } + } + } + } +} + +/** +* Main loop for this collector. +*/ +static void oomkill_collector(ebpf_module_t *em) +{ + int cgroups = em->cgroup_charts; + int update_every = em->update_every; + int counter = update_every - 1; + int32_t keys[NETDATA_OOMKILL_MAX_ENTRIES]; + memset(keys, 0, sizeof(keys)); + + // loop and read until ebpf plugin is closed. + while (!close_ebpf_plugin) { + pthread_mutex_lock(&collect_data_mutex); + pthread_cond_wait(&collect_data_cond_var, &collect_data_mutex); + + if (++counter == update_every) { + counter = 0; + pthread_mutex_lock(&lock); + + uint32_t count = oomkill_read_data(keys); + if (cgroups && count) + ebpf_update_oomkill_cgroup(keys, count); + + // write everything from the ebpf map. + if (cgroups) + ebpf_oomkill_send_cgroup_data(update_every); + + write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_OOMKILL_CHART); + oomkill_write_data(keys, count); + write_end_chart(); + + pthread_mutex_unlock(&lock); + } + + pthread_mutex_unlock(&collect_data_mutex); + } +} + +/** + * Create apps charts + * + * Call ebpf_create_chart to create the charts on apps submenu. + * + * @param em a pointer to the structure with the default values. + */ +void ebpf_oomkill_create_apps_charts(struct ebpf_module *em, void *ptr) +{ + struct target *root = ptr; + ebpf_create_charts_on_apps(NETDATA_OOMKILL_CHART, + "OOM kills", + EBPF_COMMON_DIMENSION_KILLS, + "mem", + NETDATA_EBPF_CHART_TYPE_STACKED, + 20020, + ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX], + root, em->update_every, NETDATA_EBPF_MODULE_NAME_OOMKILL); +} + +/** + * OOM kill tracking thread. + * + * @param ptr a `ebpf_module_t *`. + * @return always NULL. + */ +void *ebpf_oomkill_thread(void *ptr) +{ + netdata_thread_cleanup_push(oomkill_cleanup, ptr); + + ebpf_module_t *em = (ebpf_module_t *)ptr; + em->maps = oomkill_maps; + + if (!em->enabled) { + goto endoomkill; + } + + if (ebpf_enable_tracepoints(oomkill_tracepoints) == 0) { + em->enabled = CONFIG_BOOLEAN_NO; + goto endoomkill; + } + + probe_links = ebpf_load_program(ebpf_plugin_dir, em, kernel_string, &objects); + if (!probe_links) { + goto endoomkill; + } + + oomkill_collector(em); + +endoomkill: + netdata_thread_cleanup_pop(1); + + return NULL; +} diff --git a/collectors/ebpf.plugin/ebpf_oomkill.h b/collectors/ebpf.plugin/ebpf_oomkill.h new file mode 100644 index 000000000..86f9463dd --- /dev/null +++ b/collectors/ebpf.plugin/ebpf_oomkill.h @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_EBPF_OOMKILL_H +#define NETDATA_EBPF_OOMKILL_H 1 + +/***************************************************************** + * copied from kernel-collectors repo, with modifications needed + * for inclusion here. + *****************************************************************/ + +#define NETDATA_OOMKILL_MAX_ENTRIES 64 + +typedef uint8_t oomkill_ebpf_val_t; + +/***************************************************************** + * below this is eBPF plugin-specific code. + *****************************************************************/ + +#define NETDATA_EBPF_MODULE_NAME_OOMKILL "oomkill" +#define NETDATA_OOMKILL_SLEEP_MS 650000ULL +#define NETDATA_OOMKILL_CONFIG_FILE "oomkill.conf" + +#define NETDATA_OOMKILL_CHART "oomkills" + +extern struct config oomkill_config; +extern void *ebpf_oomkill_thread(void *ptr); +extern void ebpf_oomkill_create_apps_charts(struct ebpf_module *em, void *ptr); + +#endif /* NETDATA_EBPF_OOMKILL_H */ diff --git a/collectors/ebpf.plugin/ebpf_process.c b/collectors/ebpf.plugin/ebpf_process.c index 9b15c8407..a4a6709e8 100644 --- a/collectors/ebpf.plugin/ebpf_process.c +++ b/collectors/ebpf.plugin/ebpf_process.c @@ -11,29 +11,42 @@ * *****************************************************************/ -static char *process_dimension_names[NETDATA_KEY_PUBLISH_PROCESS_END] = { "open", "close", "delete", "read", "write", - "process", "task", "process", "thread" }; -static char *process_id_names[NETDATA_KEY_PUBLISH_PROCESS_END] = { "do_sys_open", "__close_fd", "vfs_unlink", - "vfs_read", "vfs_write", "do_exit", - "release_task", "_do_fork", "sys_clone" }; +static char *process_dimension_names[NETDATA_KEY_PUBLISH_PROCESS_END] = { "process", "task", "process", "thread" }; +static char *process_id_names[NETDATA_KEY_PUBLISH_PROCESS_END] = { "do_exit", "release_task", "_do_fork", "sys_clone" }; static char *status[] = { "process", "zombie" }; static ebpf_local_maps_t process_maps[] = {{.name = "tbl_pid_stats", .internal_input = ND_EBPF_DEFAULT_PID_SIZE, - .user_input = 0}, - {.name = NULL, .internal_input = 0, .user_input = 0}}; + .user_input = 0, + .type = NETDATA_EBPF_MAP_RESIZABLE | NETDATA_EBPF_MAP_PID, + .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}, + {.name = "tbl_total_stats", .internal_input = NETDATA_KEY_END_VECTOR, + .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC, + .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}, + {.name = "process_ctrl", .internal_input = NETDATA_CONTROLLER_END, + .user_input = 0, + .type = NETDATA_EBPF_MAP_CONTROLLER, + .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}, + {.name = NULL, .internal_input = 0, .user_input = 0, + .type = NETDATA_EBPF_MAP_CONTROLLER, + .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}}; + +char *tracepoint_sched_type = { "sched" } ; +char *tracepoint_sched_process_exit = { "sched_process_exit" }; +char *tracepoint_sched_process_exec = { "sched_process_exec" }; +char *tracepoint_sched_process_fork = { "sched_process_fork" }; +static int was_sched_process_exit_enabled = 0; +static int was_sched_process_exec_enabled = 0; +static int was_sched_process_fork_enabled = 0; static netdata_idx_t *process_hash_values = NULL; static netdata_syscall_stat_t process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_END]; static netdata_publish_syscall_t process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_END]; -static ebpf_data_t process_data; - ebpf_process_stat_t **global_process_stats = NULL; ebpf_process_publish_apps_t **current_apps_data = NULL; int process_enabled = 0; -static int *map_fd = NULL; static struct bpf_object *objects = NULL; static struct bpf_link **probe_links = NULL; @@ -43,6 +56,8 @@ struct config process_config = { .first_section = NULL, .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare }, .rwlock = AVL_LOCK_INITIALIZER } }; +static struct netdata_static_thread cgroup_thread = {"EBPF CGROUP", NULL, NULL, + 1, NULL, NULL, NULL}; /***************************************************************** * * PROCESS DATA AND SEND TO NETDATA @@ -56,40 +71,30 @@ struct config process_config = { .first_section = NULL, * @param pvc the second output structure with correlated dimensions * @param input the structure with the input data. */ -static void ebpf_update_global_publish( - netdata_publish_syscall_t *publish, netdata_publish_vfs_common_t *pvc, netdata_syscall_stat_t *input) +static void ebpf_update_global_publish(netdata_publish_syscall_t *publish, netdata_publish_vfs_common_t *pvc, + netdata_syscall_stat_t *input) { netdata_publish_syscall_t *move = publish; - int selector = NETDATA_KEY_PUBLISH_PROCESS_OPEN; + int selector = NETDATA_KEY_PUBLISH_PROCESS_EXIT; while (move) { - // Until NETDATA_KEY_PUBLISH_PROCESS_READ we are creating accumulators, so it is possible - // to use incremental charts, but after this we will do some math with the values, so we are storing - // absolute values - if (selector < NETDATA_KEY_PUBLISH_PROCESS_READ) { - move->ncall = input->call; - move->nbyte = input->bytes; - move->nerr = input->ecall; - } else { - move->ncall = (input->call > move->pcall) ? input->call - move->pcall : move->pcall - input->call; - move->nbyte = (input->bytes > move->pbyte) ? input->bytes - move->pbyte : move->pbyte - input->bytes; - move->nerr = (input->ecall > move->nerr) ? input->ecall - move->perr : move->perr - input->ecall; + move->ncall = (input->call > move->pcall) ? input->call - move->pcall : move->pcall - input->call; + move->nbyte = (input->bytes > move->pbyte) ? input->bytes - move->pbyte : move->pbyte - input->bytes; + move->nerr = (input->ecall > move->nerr) ? input->ecall - move->perr : move->perr - input->ecall; - move->pcall = input->call; - move->pbyte = input->bytes; - move->perr = input->ecall; - } + move->pcall = input->call; + move->pbyte = input->bytes; + move->perr = input->ecall; input = input->next; move = move->next; selector++; } - pvc->write = -((long)publish[NETDATA_KEY_PUBLISH_PROCESS_WRITE].nbyte); - pvc->read = (long)publish[NETDATA_KEY_PUBLISH_PROCESS_READ].nbyte; - - pvc->running = (long)publish[NETDATA_KEY_PUBLISH_PROCESS_FORK].ncall - (long)publish[NETDATA_KEY_PUBLISH_PROCESS_CLONE].ncall; + pvc->running = (long)publish[NETDATA_KEY_PUBLISH_PROCESS_FORK].ncall - + (long)publish[NETDATA_KEY_PUBLISH_PROCESS_CLONE].ncall; publish[NETDATA_KEY_PUBLISH_PROCESS_RELEASE_TASK].ncall = -publish[NETDATA_KEY_PUBLISH_PROCESS_RELEASE_TASK].ncall; - pvc->zombie = (long)publish[NETDATA_KEY_PUBLISH_PROCESS_EXIT].ncall + (long)publish[NETDATA_KEY_PUBLISH_PROCESS_RELEASE_TASK].ncall; + pvc->zombie = (long)publish[NETDATA_KEY_PUBLISH_PROCESS_EXIT].ncall + + (long)publish[NETDATA_KEY_PUBLISH_PROCESS_RELEASE_TASK].ncall; } /** @@ -109,7 +114,7 @@ static void write_status_chart(char *family, netdata_publish_vfs_common_t *pvc) } /** - * Send data to Netdata calling auxiliar functions. + * Send data to Netdata calling auxiliary functions. * * @param em the structure with thread information */ @@ -118,33 +123,16 @@ static void ebpf_process_send_data(ebpf_module_t *em) netdata_publish_vfs_common_t pvc; ebpf_update_global_publish(process_publish_aggregated, &pvc, process_aggregated_data); - write_count_chart( - NETDATA_FILE_OPEN_CLOSE_COUNT, NETDATA_EBPF_FAMILY, process_publish_aggregated, 2); - - write_count_chart( - NETDATA_VFS_FILE_CLEAN_COUNT, NETDATA_EBPF_FAMILY, &process_publish_aggregated[NETDATA_DEL_START], 1); - - write_count_chart( - NETDATA_VFS_FILE_IO_COUNT, NETDATA_EBPF_FAMILY, &process_publish_aggregated[NETDATA_IN_START_BYTE], 2); - - write_count_chart( - NETDATA_EXIT_SYSCALL, NETDATA_EBPF_FAMILY, &process_publish_aggregated[NETDATA_EXIT_START], 2); - write_count_chart( - NETDATA_PROCESS_SYSCALL, NETDATA_EBPF_FAMILY, &process_publish_aggregated[NETDATA_PROCESS_START], 2); + write_count_chart(NETDATA_EXIT_SYSCALL, NETDATA_EBPF_SYSTEM_GROUP, + &process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_EXIT], 2); + write_count_chart(NETDATA_PROCESS_SYSCALL, NETDATA_EBPF_SYSTEM_GROUP, + &process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_FORK], 2); - write_status_chart(NETDATA_EBPF_FAMILY, &pvc); + write_status_chart(NETDATA_EBPF_SYSTEM_GROUP, &pvc); if (em->mode < MODE_ENTRY) { - write_err_chart( - NETDATA_FILE_OPEN_ERR_COUNT, NETDATA_EBPF_FAMILY, process_publish_aggregated, 2); - write_err_chart( - NETDATA_VFS_FILE_ERR_COUNT, NETDATA_EBPF_FAMILY, &process_publish_aggregated[2], NETDATA_VFS_ERRORS); - write_err_chart( - NETDATA_PROCESS_ERROR_NAME, NETDATA_EBPF_FAMILY, &process_publish_aggregated[NETDATA_PROCESS_START], 2); + write_err_chart(NETDATA_PROCESS_ERROR_NAME, NETDATA_EBPF_SYSTEM_GROUP, + &process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_FORK], 2); } - - write_io_chart(NETDATA_VFS_IO_FILE_BYTES, NETDATA_EBPF_FAMILY, - process_id_names[NETDATA_KEY_PUBLISH_PROCESS_WRITE], (long long) pvc.write, - process_id_names[NETDATA_KEY_PUBLISH_PROCESS_READ], (long long)pvc.read); } /** @@ -180,7 +168,7 @@ long long ebpf_process_sum_values_for_pids(struct pid_on_target *root, size_t of void ebpf_process_remove_pids() { struct pid_stat *pids = root_of_pids; - int pid_fd = map_fd[0]; + int pid_fd = process_maps[NETDATA_PROCESS_PID_TABLE].map_fd; while (pids) { uint32_t pid = pids->pid; ebpf_process_stat_t *w = global_process_stats[pid]; @@ -197,163 +185,65 @@ void ebpf_process_remove_pids() } /** - * Send data to Netdata calling auxiliar functions. + * Send data to Netdata calling auxiliary functions. * - * @param em the structure with thread information * @param root the target list. */ -void ebpf_process_send_apps_data(ebpf_module_t *em, struct target *root) +void ebpf_process_send_apps_data(struct target *root, ebpf_module_t *em) { struct target *w; collected_number value; - write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_FILE_OPEN); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed && w->processes)) { - value = ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_publish_apps_t, call_sys_open)); - write_chart_dimension(w->name, value); - } - } - write_end_chart(); - - if (em->mode < MODE_ENTRY) { - write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_FILE_OPEN_ERROR); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed && w->processes)) { - value = ebpf_process_sum_values_for_pids( - w->root_pid, offsetof(ebpf_process_publish_apps_t, ecall_sys_open)); - write_chart_dimension(w->name, value); - } - } - write_end_chart(); - } - - write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_FILE_CLOSED); + write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_TASK_PROCESS); for (w = root; w; w = w->next) { if (unlikely(w->exposed && w->processes)) { - value = - ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_publish_apps_t, call_close_fd)); + value = ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_publish_apps_t, create_process)); write_chart_dimension(w->name, value); } } write_end_chart(); - if (em->mode < MODE_ENTRY) { - write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_FILE_CLOSE_ERROR); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed && w->processes)) { - value = ebpf_process_sum_values_for_pids( - w->root_pid, offsetof(ebpf_process_publish_apps_t, ecall_close_fd)); - write_chart_dimension(w->name, value); - } - } - write_end_chart(); - } - - write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_FILE_DELETED); + write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_TASK_THREAD); for (w = root; w; w = w->next) { if (unlikely(w->exposed && w->processes)) { - value = - ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_publish_apps_t, call_vfs_unlink)); + value = ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_publish_apps_t, create_thread)); write_chart_dimension(w->name, value); } } write_end_chart(); - write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS); + write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_TASK_EXIT); for (w = root; w; w = w->next) { if (unlikely(w->exposed && w->processes)) { - value = ebpf_process_sum_values_for_pids( - w->root_pid, offsetof(ebpf_process_publish_apps_t, call_write)); + value = ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_publish_apps_t, + call_do_exit)); write_chart_dimension(w->name, value); } } write_end_chart(); - if (em->mode < MODE_ENTRY) { - write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS_ERROR); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed && w->processes)) { - value = ebpf_process_sum_values_for_pids( - w->root_pid, offsetof(ebpf_process_publish_apps_t, ecall_write)); - write_chart_dimension(w->name, value); - } - } - write_end_chart(); - } - - write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_READ_CALLS); + write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_TASK_CLOSE); for (w = root; w; w = w->next) { if (unlikely(w->exposed && w->processes)) { - value = - ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_publish_apps_t, call_read)); + value = ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_publish_apps_t, + call_release_task)); write_chart_dimension(w->name, value); } } write_end_chart(); if (em->mode < MODE_ENTRY) { - write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_READ_CALLS_ERROR); + write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_TASK_ERROR); for (w = root; w; w = w->next) { if (unlikely(w->exposed && w->processes)) { - value = ebpf_process_sum_values_for_pids( - w->root_pid, offsetof(ebpf_process_publish_apps_t, ecall_read)); + value = ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_publish_apps_t, + task_err)); write_chart_dimension(w->name, value); } } write_end_chart(); } - write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_WRITE_BYTES); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed && w->processes)) { - value = ebpf_process_sum_values_for_pids( - w->root_pid, offsetof(ebpf_process_publish_apps_t, bytes_written)); - write_chart_dimension(w->name, value); - } - } - write_end_chart(); - - write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_READ_BYTES); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed && w->processes)) { - value = ebpf_process_sum_values_for_pids( - w->root_pid, offsetof(ebpf_process_publish_apps_t, bytes_read)); - write_chart_dimension(w->name, value); - } - } - write_end_chart(); - - write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_TASK_PROCESS); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed && w->processes)) { - value = - ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_publish_apps_t, call_do_fork)); - write_chart_dimension(w->name, value); - } - } - write_end_chart(); - - write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_TASK_THREAD); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed && w->processes)) { - value = - ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_publish_apps_t, call_sys_clone)); - write_chart_dimension(w->name, value); - } - } - write_end_chart(); - - write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_TASK_CLOSE); - for (w = root; w; w = w->next) { - if (unlikely(w->exposed && w->processes)) { - value = ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_publish_apps_t, - call_release_task)); - write_chart_dimension(w->name, value); - } - } - write_end_chart(); - ebpf_process_remove_pids(); } @@ -369,14 +259,15 @@ void ebpf_process_send_apps_data(ebpf_module_t *em, struct target *root) static void read_hash_global_tables() { uint64_t idx; - netdata_idx_t res[NETDATA_GLOBAL_VECTOR]; + netdata_idx_t res[NETDATA_KEY_END_VECTOR]; netdata_idx_t *val = process_hash_values; - for (idx = 0; idx < NETDATA_GLOBAL_VECTOR; idx++) { - if (!bpf_map_lookup_elem(map_fd[1], &idx, val)) { + int fd = process_maps[NETDATA_PROCESS_GLOBAL_TABLE].map_fd; + for (idx = 0; idx < NETDATA_KEY_END_VECTOR; idx++) { + if (!bpf_map_lookup_elem(fd, &idx, val)) { uint64_t total = 0; int i; - int end = (running_on_kernel < NETDATA_KERNEL_V4_15) ? 1 : ebpf_nprocs; + int end = ebpf_nprocs; for (i = 0; i < end; i++) total += val[i]; @@ -386,28 +277,13 @@ static void read_hash_global_tables() } } - process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_OPEN].call = res[NETDATA_KEY_CALLS_DO_SYS_OPEN]; - process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_CLOSE].call = res[NETDATA_KEY_CALLS_CLOSE_FD]; - process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_UNLINK].call = res[NETDATA_KEY_CALLS_VFS_UNLINK]; - process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_READ].call = res[NETDATA_KEY_CALLS_VFS_READ] + res[NETDATA_KEY_CALLS_VFS_READV]; - process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_WRITE].call = res[NETDATA_KEY_CALLS_VFS_WRITE] + res[NETDATA_KEY_CALLS_VFS_WRITEV]; process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_EXIT].call = res[NETDATA_KEY_CALLS_DO_EXIT]; process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_RELEASE_TASK].call = res[NETDATA_KEY_CALLS_RELEASE_TASK]; process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_FORK].call = res[NETDATA_KEY_CALLS_DO_FORK]; process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_CLONE].call = res[NETDATA_KEY_CALLS_SYS_CLONE]; - process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_OPEN].ecall = res[NETDATA_KEY_ERROR_DO_SYS_OPEN]; - process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_CLOSE].ecall = res[NETDATA_KEY_ERROR_CLOSE_FD]; - process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_UNLINK].ecall = res[NETDATA_KEY_ERROR_VFS_UNLINK]; - process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_READ].ecall = res[NETDATA_KEY_ERROR_VFS_READ] + res[NETDATA_KEY_ERROR_VFS_READV]; - process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_WRITE].ecall = res[NETDATA_KEY_ERROR_VFS_WRITE] + res[NETDATA_KEY_ERROR_VFS_WRITEV]; process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_FORK].ecall = res[NETDATA_KEY_ERROR_DO_FORK]; process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_CLONE].ecall = res[NETDATA_KEY_ERROR_SYS_CLONE]; - - process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_WRITE].bytes = (uint64_t)res[NETDATA_KEY_BYTES_VFS_WRITE] + - (uint64_t)res[NETDATA_KEY_BYTES_VFS_WRITEV]; - process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_READ].bytes = (uint64_t)res[NETDATA_KEY_BYTES_VFS_READ] + - (uint64_t)res[NETDATA_KEY_BYTES_VFS_READV]; } /** @@ -431,67 +307,53 @@ static void ebpf_process_update_apps_data() } //Read data - cad->call_sys_open = ps->open_call; - cad->call_close_fd = ps->close_call; - cad->call_vfs_unlink = ps->unlink_call; - cad->call_read = ps->read_call + ps->readv_call; - cad->call_write = ps->write_call + ps->writev_call; cad->call_do_exit = ps->exit_call; cad->call_release_task = ps->release_call; - cad->call_do_fork = ps->fork_call; - cad->call_sys_clone = ps->clone_call; + cad->create_process = ps->create_process; + cad->create_thread = ps->create_thread; - cad->ecall_sys_open = ps->open_err; - cad->ecall_close_fd = ps->close_err; - cad->ecall_vfs_unlink = ps->unlink_err; - cad->ecall_read = ps->read_err + ps->readv_err; - cad->ecall_write = ps->write_err + ps->writev_err; - cad->ecall_do_fork = ps->fork_err; - cad->ecall_sys_clone = ps->clone_err; - - cad->bytes_written = (uint64_t)ps->write_bytes + (uint64_t)ps->write_bytes; - cad->bytes_read = (uint64_t)ps->read_bytes + (uint64_t)ps->readv_bytes; + cad->task_err = ps->task_err; pids = pids->next; } } -/***************************************************************** - * - * FUNCTIONS TO CREATE CHARTS - * - *****************************************************************/ - /** - * Create IO chart + * Update cgroup * - * @param family the chart family - * @param name the chart name - * @param axis the axis label - * @param web the group name used to attach the chart on dashboard - * @param order the order number of the specified chart - * @param algorithm the algorithm used to make the charts. + * Update cgroup data based in */ -static void ebpf_create_io_chart(char *family, char *name, char *axis, char *web, int order, int algorithm) +static void ebpf_update_process_cgroup() { - printf("CHART %s.%s '' 'Bytes written and read' '%s' '%s' '' line %d %d\n", - family, - name, - axis, - web, - order, - update_every); - - printf("DIMENSION %s %s %s 1 1\n", - process_id_names[NETDATA_KEY_PUBLISH_PROCESS_READ], - process_dimension_names[NETDATA_KEY_PUBLISH_PROCESS_READ], - ebpf_algorithms[algorithm]); - printf("DIMENSION %s %s %s 1 1\n", - process_id_names[NETDATA_KEY_PUBLISH_PROCESS_WRITE], - process_dimension_names[NETDATA_KEY_PUBLISH_PROCESS_WRITE], - ebpf_algorithms[algorithm]); + ebpf_cgroup_target_t *ect ; + int pid_fd = process_maps[NETDATA_PROCESS_PID_TABLE].map_fd; + + pthread_mutex_lock(&mutex_cgroup_shm); + for (ect = ebpf_cgroup_pids; ect; ect = ect->next) { + struct pid_on_target2 *pids; + for (pids = ect->pids; pids; pids = pids->next) { + int pid = pids->pid; + ebpf_process_stat_t *out = &pids->ps; + if (global_process_stats[pid]) { + ebpf_process_stat_t *in = global_process_stats[pid]; + + memcpy(out, in, sizeof(ebpf_process_stat_t)); + } else { + if (bpf_map_lookup_elem(pid_fd, &pid, out)) { + memset(out, 0, sizeof(ebpf_process_stat_t)); + } + } + } + } + pthread_mutex_unlock(&mutex_cgroup_shm); } +/***************************************************************** + * + * FUNCTIONS TO CREATE CHARTS + * + *****************************************************************/ + /** * Create process status chart * @@ -500,11 +362,12 @@ static void ebpf_create_io_chart(char *family, char *name, char *axis, char *web * @param axis the axis label * @param web the group name used to attach the chart on dashboard * @param order the order number of the specified chart + * @param update_every value to overwrite the update frequency set by the server. */ static void ebpf_process_status_chart(char *family, char *name, char *axis, - char *web, char *algorithm, int order) + char *web, char *algorithm, int order, int update_every) { - printf("CHART %s.%s '' 'Process not closed' '%s' '%s' '' line %d %d ''\n", + printf("CHART %s.%s '' 'Process not closed' '%s' '%s' '' line %d %d '' 'ebpf.plugin' 'process'\n", family, name, axis, @@ -525,119 +388,49 @@ static void ebpf_process_status_chart(char *family, char *name, char *axis, */ static void ebpf_create_global_charts(ebpf_module_t *em) { - ebpf_create_chart(NETDATA_EBPF_FAMILY, - NETDATA_FILE_OPEN_CLOSE_COUNT, - "Open and close calls", - EBPF_COMMON_DIMENSION_CALL, - NETDATA_FILE_GROUP, - NULL, - NETDATA_EBPF_CHART_TYPE_LINE, - 21000, - ebpf_create_global_dimension, - process_publish_aggregated, - 2); - - if (em->mode < MODE_ENTRY) { - ebpf_create_chart(NETDATA_EBPF_FAMILY, - NETDATA_FILE_OPEN_ERR_COUNT, - "Open fails", - EBPF_COMMON_DIMENSION_CALL, - NETDATA_FILE_GROUP, - NULL, - NETDATA_EBPF_CHART_TYPE_LINE, - 21001, - ebpf_create_global_dimension, - process_publish_aggregated, - 2); - } - - ebpf_create_chart(NETDATA_EBPF_FAMILY, - NETDATA_VFS_FILE_CLEAN_COUNT, - "Remove files", - EBPF_COMMON_DIMENSION_CALL, - NETDATA_VFS_GROUP, - NULL, - NETDATA_EBPF_CHART_TYPE_LINE, - 21002, - ebpf_create_global_dimension, - &process_publish_aggregated[NETDATA_DEL_START], - 1); - - ebpf_create_chart(NETDATA_EBPF_FAMILY, - NETDATA_VFS_FILE_IO_COUNT, - "Calls to IO", - EBPF_COMMON_DIMENSION_CALL, - NETDATA_VFS_GROUP, - NULL, - NETDATA_EBPF_CHART_TYPE_LINE, - 21003, - ebpf_create_global_dimension, - &process_publish_aggregated[NETDATA_IN_START_BYTE], - 2); - - ebpf_create_io_chart(NETDATA_EBPF_FAMILY, - NETDATA_VFS_IO_FILE_BYTES, EBPF_COMMON_DIMENSION_BYTES, - NETDATA_VFS_GROUP, - 21004, - NETDATA_EBPF_ABSOLUTE_IDX); - - if (em->mode < MODE_ENTRY) { - ebpf_create_chart(NETDATA_EBPF_FAMILY, - NETDATA_VFS_FILE_ERR_COUNT, - "Fails to write or read", - EBPF_COMMON_DIMENSION_CALL, - NETDATA_VFS_GROUP, - NULL, - NETDATA_EBPF_CHART_TYPE_LINE, - 21005, - ebpf_create_global_dimension, - &process_publish_aggregated[2], - NETDATA_VFS_ERRORS); - } - - ebpf_create_chart(NETDATA_EBPF_FAMILY, + ebpf_create_chart(NETDATA_EBPF_SYSTEM_GROUP, NETDATA_PROCESS_SYSCALL, "Start process", EBPF_COMMON_DIMENSION_CALL, NETDATA_PROCESS_GROUP, NULL, NETDATA_EBPF_CHART_TYPE_LINE, - 21006, + 21002, ebpf_create_global_dimension, - &process_publish_aggregated[NETDATA_PROCESS_START], - 2); + &process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_FORK], + 2, em->update_every, NETDATA_EBPF_MODULE_NAME_PROCESS); - ebpf_create_chart(NETDATA_EBPF_FAMILY, + ebpf_create_chart(NETDATA_EBPF_SYSTEM_GROUP, NETDATA_EXIT_SYSCALL, "Exit process", EBPF_COMMON_DIMENSION_CALL, NETDATA_PROCESS_GROUP, NULL, NETDATA_EBPF_CHART_TYPE_LINE, - 21007, + 21003, ebpf_create_global_dimension, - &process_publish_aggregated[NETDATA_EXIT_START], - 2); + &process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_EXIT], + 2, em->update_every, NETDATA_EBPF_MODULE_NAME_PROCESS); - ebpf_process_status_chart(NETDATA_EBPF_FAMILY, + ebpf_process_status_chart(NETDATA_EBPF_SYSTEM_GROUP, NETDATA_PROCESS_STATUS_NAME, EBPF_COMMON_DIMENSION_DIFFERENCE, NETDATA_PROCESS_GROUP, ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX], - 21008); + 21004, em->update_every); if (em->mode < MODE_ENTRY) { - ebpf_create_chart(NETDATA_EBPF_FAMILY, + ebpf_create_chart(NETDATA_EBPF_SYSTEM_GROUP, NETDATA_PROCESS_ERROR_NAME, "Fails to create process", EBPF_COMMON_DIMENSION_CALL, NETDATA_PROCESS_GROUP, NULL, NETDATA_EBPF_CHART_TYPE_LINE, - 21009, + 21005, ebpf_create_global_dimension, - &process_publish_aggregated[NETDATA_PROCESS_START], - 2); + &process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_FORK], + 2, em->update_every, NETDATA_EBPF_MODULE_NAME_PROCESS); } } @@ -652,137 +445,53 @@ static void ebpf_create_global_charts(ebpf_module_t *em) void ebpf_process_create_apps_charts(struct ebpf_module *em, void *ptr) { struct target *root = ptr; - ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_FILE_OPEN, - "Number of open files", - EBPF_COMMON_DIMENSION_CALL, - NETDATA_APPS_FILE_GROUP, - NETDATA_EBPF_CHART_TYPE_STACKED, - 20061, - ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], - root); - - if (em->mode < MODE_ENTRY) { - ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_FILE_OPEN_ERROR, - "Fails to open files", - EBPF_COMMON_DIMENSION_CALL, - NETDATA_APPS_FILE_GROUP, - NETDATA_EBPF_CHART_TYPE_STACKED, - 20062, - ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], - root); - } - - ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_FILE_CLOSED, - "Files closed", - EBPF_COMMON_DIMENSION_CALL, - NETDATA_APPS_FILE_GROUP, - NETDATA_EBPF_CHART_TYPE_STACKED, - 20063, - ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], - root); - - if (em->mode < MODE_ENTRY) { - ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_FILE_CLOSE_ERROR, - "Fails to close files", - EBPF_COMMON_DIMENSION_CALL, - NETDATA_APPS_FILE_GROUP, - NETDATA_EBPF_CHART_TYPE_STACKED, - 20064, - ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], - root); - } - - ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_FILE_DELETED, - "Files deleted", + ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_TASK_PROCESS, + "Process started", EBPF_COMMON_DIMENSION_CALL, - NETDATA_APPS_VFS_GROUP, + NETDATA_PROCESS_GROUP, NETDATA_EBPF_CHART_TYPE_STACKED, 20065, - ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], - root); + ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX], + root, em->update_every, NETDATA_EBPF_MODULE_NAME_PROCESS); - ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS, - "Write to disk", + ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_TASK_THREAD, + "Threads started", EBPF_COMMON_DIMENSION_CALL, - NETDATA_APPS_VFS_GROUP, + NETDATA_PROCESS_GROUP, NETDATA_EBPF_CHART_TYPE_STACKED, 20066, - ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], - apps_groups_root_target); - - if (em->mode < MODE_ENTRY) { - ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS_ERROR, - "Fails to write", - EBPF_COMMON_DIMENSION_CALL, - NETDATA_APPS_VFS_GROUP, - NETDATA_EBPF_CHART_TYPE_STACKED, - 20067, - ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], - root); - } - - ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_READ_CALLS, - "Read from disk", - EBPF_COMMON_DIMENSION_CALL, - NETDATA_APPS_VFS_GROUP, - NETDATA_EBPF_CHART_TYPE_STACKED, - 20068, - ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], - root); - - if (em->mode < MODE_ENTRY) { - ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_READ_CALLS_ERROR, - "Fails to read", - EBPF_COMMON_DIMENSION_CALL, - NETDATA_APPS_VFS_GROUP, - NETDATA_EBPF_CHART_TYPE_STACKED, - 20069, - ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], - root); - } - - ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_WRITE_BYTES, - "Bytes written on disk", EBPF_COMMON_DIMENSION_BYTES, - NETDATA_APPS_VFS_GROUP, - NETDATA_EBPF_CHART_TYPE_STACKED, - 20070, - ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], - root); - - ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_READ_BYTES, - "Bytes read from disk", EBPF_COMMON_DIMENSION_BYTES, - NETDATA_APPS_VFS_GROUP, - NETDATA_EBPF_CHART_TYPE_STACKED, - 20071, - ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], - root); - - ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_TASK_PROCESS, - "Process started", - EBPF_COMMON_DIMENSION_CALL, - NETDATA_APPS_PROCESS_GROUP, - NETDATA_EBPF_CHART_TYPE_STACKED, - 20072, ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX], - root); + root, em->update_every, NETDATA_EBPF_MODULE_NAME_PROCESS); - ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_TASK_THREAD, - "Threads started", + ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_TASK_EXIT, + "Tasks starts exit process.", EBPF_COMMON_DIMENSION_CALL, - NETDATA_APPS_PROCESS_GROUP, + NETDATA_PROCESS_GROUP, NETDATA_EBPF_CHART_TYPE_STACKED, - 20073, + 20067, ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX], - root); + root, em->update_every, NETDATA_EBPF_MODULE_NAME_PROCESS); ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_TASK_CLOSE, "Tasks closed", EBPF_COMMON_DIMENSION_CALL, - NETDATA_APPS_PROCESS_GROUP, + NETDATA_PROCESS_GROUP, NETDATA_EBPF_CHART_TYPE_STACKED, - 20074, + 20068, ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX], - root); + root, em->update_every, NETDATA_EBPF_MODULE_NAME_PROCESS); + + if (em->mode < MODE_ENTRY) { + ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_TASK_ERROR, + "Errors to create process or threads.", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_PROCESS_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + 20069, + ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX], + root, + em->update_every, NETDATA_EBPF_MODULE_NAME_PROCESS); + } } /** @@ -790,10 +499,9 @@ void ebpf_process_create_apps_charts(struct ebpf_module *em, void *ptr) * * Call ebpf_create_chart to create the charts on apps submenu. * - * @param em a pointer to the structure with the default values. * @param root a pointer for the targets. */ -static void ebpf_create_apps_charts(ebpf_module_t *em, struct target *root) +static void ebpf_create_apps_charts(struct target *root) { struct target *w; int newly_added = 0; @@ -831,7 +539,7 @@ static void ebpf_create_apps_charts(ebpf_module_t *em, struct target *root) for (counter = 0; ebpf_modules[counter].thread_name; counter++) { ebpf_module_t *current = &ebpf_modules[counter]; if (current->enabled && current->apps_charts && current->apps_routine) - current->apps_routine(em, root); + current->apps_routine(current, root); } } @@ -841,48 +549,441 @@ static void ebpf_create_apps_charts(ebpf_module_t *em, struct target *root) * *****************************************************************/ +/** + * Cgroup update shm + * + * This is the thread callback. + * This thread is necessary, because we cannot freeze the whole plugin to read the data from shared memory. + * + * @param ptr It is a NULL value for this thread. + * + * @return It always returns NULL. + */ +void *ebpf_cgroup_update_shm(void *ptr) +{ + UNUSED(ptr); + heartbeat_t hb; + heartbeat_init(&hb); + + usec_t step = 30 * USEC_PER_SEC; + while (!close_ebpf_plugin) { + usec_t dt = heartbeat_next(&hb, step); + (void)dt; + + if (close_ebpf_plugin) + break; + + if (!shm_ebpf_cgroup.header) + ebpf_map_cgroup_shared_memory(); + + ebpf_parse_cgroup_shm_data(); + } + + return NULL; +} + +/** + * Sum PIDs + * + * Sum values for all targets. + * + * @param ps structure used to store data + * @param pids input data + */ +static void ebpf_process_sum_cgroup_pids(ebpf_process_stat_t *ps, struct pid_on_target2 *pids) +{ + ebpf_process_stat_t accumulator; + memset(&accumulator, 0, sizeof(accumulator)); + + while (pids) { + ebpf_process_stat_t *ps = &pids->ps; + + accumulator.exit_call += ps->exit_call; + accumulator.release_call += ps->release_call; + accumulator.create_process += ps->create_process; + accumulator.create_thread += ps->create_thread; + + accumulator.task_err += ps->task_err; + + pids = pids->next; + } + + ps->exit_call = (accumulator.exit_call >= ps->exit_call) ? accumulator.exit_call : ps->exit_call; + ps->release_call = (accumulator.release_call >= ps->release_call) ? accumulator.release_call : ps->release_call; + ps->create_process = (accumulator.create_process >= ps->create_process) ? accumulator.create_process : ps->create_process; + ps->create_thread = (accumulator.create_thread >= ps->create_thread) ? accumulator.create_thread : ps->create_thread; + + ps->task_err = (accumulator.task_err >= ps->task_err) ? accumulator.task_err : ps->task_err; +} + +/* + * Send Specific Process data + * + * Send data for specific cgroup/apps. + * + * @param type chart type + * @param values structure with values that will be sent to netdata + * @param em the structure with thread information + */ +static void ebpf_send_specific_process_data(char *type, ebpf_process_stat_t *values, ebpf_module_t *em) +{ + write_begin_chart(type, NETDATA_SYSCALL_APPS_TASK_PROCESS); + write_chart_dimension(process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_FORK].name, + (long long) values->create_process); + write_end_chart(); + + write_begin_chart(type, NETDATA_SYSCALL_APPS_TASK_THREAD); + write_chart_dimension(process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_CLONE].name, + (long long) values->create_thread); + write_end_chart(); + + write_begin_chart(type, NETDATA_SYSCALL_APPS_TASK_EXIT); + write_chart_dimension(process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_EXIT].name, + (long long) values->release_call); + write_end_chart(); + + write_begin_chart(type, NETDATA_SYSCALL_APPS_TASK_CLOSE); + write_chart_dimension(process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_RELEASE_TASK].name, + (long long) values->release_call); + write_end_chart(); + + if (em->mode < MODE_ENTRY) { + write_begin_chart(type, NETDATA_SYSCALL_APPS_TASK_ERROR); + write_chart_dimension(process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_EXIT].name, + (long long) values->task_err); + write_end_chart(); + } +} + +/** + * Create specific process charts + * + * Create charts for cgroup/application + * + * @param type the chart type. + * @param em the structure with thread information + */ +static void ebpf_create_specific_process_charts(char *type, ebpf_module_t *em) +{ + ebpf_create_chart(type, NETDATA_SYSCALL_APPS_TASK_PROCESS, "Process started", + EBPF_COMMON_DIMENSION_CALL, NETDATA_PROCESS_CGROUP_GROUP, + NETDATA_CGROUP_PROCESS_CREATE_CONTEXT, NETDATA_EBPF_CHART_TYPE_LINE, + NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5000, + ebpf_create_global_dimension, &process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_FORK], + 1, em->update_every, NETDATA_EBPF_MODULE_NAME_PROCESS); + + ebpf_create_chart(type, NETDATA_SYSCALL_APPS_TASK_THREAD, "Threads started", + EBPF_COMMON_DIMENSION_CALL, NETDATA_PROCESS_CGROUP_GROUP, + NETDATA_CGROUP_THREAD_CREATE_CONTEXT, NETDATA_EBPF_CHART_TYPE_LINE, + NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5001, + ebpf_create_global_dimension, + &process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_CLONE], + 1, em->update_every, NETDATA_EBPF_MODULE_NAME_PROCESS); + + ebpf_create_chart(type, NETDATA_SYSCALL_APPS_TASK_EXIT, "Tasks starts exit process.", + EBPF_COMMON_DIMENSION_CALL, NETDATA_PROCESS_CGROUP_GROUP, + NETDATA_CGROUP_PROCESS_EXIT_CONTEXT, + NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5002, + ebpf_create_global_dimension, + &process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_EXIT], + 1, em->update_every, NETDATA_EBPF_MODULE_NAME_PROCESS); + + ebpf_create_chart(type, NETDATA_SYSCALL_APPS_TASK_CLOSE, "Tasks closed", + EBPF_COMMON_DIMENSION_CALL, NETDATA_PROCESS_CGROUP_GROUP, + NETDATA_CGROUP_PROCESS_CLOSE_CONTEXT, + NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5003, + ebpf_create_global_dimension, + &process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_RELEASE_TASK], + 1, em->update_every, NETDATA_EBPF_MODULE_NAME_PROCESS); + + if (em->mode < MODE_ENTRY) { + ebpf_create_chart(type, NETDATA_SYSCALL_APPS_TASK_ERROR, "Errors to create process or threads.", + EBPF_COMMON_DIMENSION_CALL, NETDATA_PROCESS_CGROUP_GROUP, + NETDATA_CGROUP_PROCESS_ERROR_CONTEXT, + NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5004, + ebpf_create_global_dimension, + &process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_EXIT], + 1, em->update_every, NETDATA_EBPF_MODULE_NAME_PROCESS); + } +} + +/** + * Obsolete specific process charts + * + * Obsolete charts for cgroup/application + * + * @param type the chart type. + * @param em the structure with thread information + */ +static void ebpf_obsolete_specific_process_charts(char *type, ebpf_module_t *em) +{ + ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_TASK_PROCESS, "Process started", + EBPF_COMMON_DIMENSION_CALL, NETDATA_PROCESS_GROUP, NETDATA_EBPF_CHART_TYPE_LINE, + NETDATA_CGROUP_PROCESS_CREATE_CONTEXT, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5000, + em->update_every); + + ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_TASK_THREAD, "Threads started", + EBPF_COMMON_DIMENSION_CALL, NETDATA_PROCESS_GROUP, NETDATA_EBPF_CHART_TYPE_LINE, + NETDATA_CGROUP_THREAD_CREATE_CONTEXT, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5001, + em->update_every); + + ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_TASK_EXIT,"Tasks starts exit process.", + EBPF_COMMON_DIMENSION_CALL, NETDATA_PROCESS_GROUP, NETDATA_EBPF_CHART_TYPE_LINE, + NETDATA_CGROUP_PROCESS_EXIT_CONTEXT, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5002, + em->update_every); + + ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_TASK_CLOSE,"Tasks closed", + EBPF_COMMON_DIMENSION_CALL, NETDATA_PROCESS_GROUP, NETDATA_EBPF_CHART_TYPE_LINE, + NETDATA_CGROUP_PROCESS_CLOSE_CONTEXT, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5003, + em->update_every); + + if (em->mode < MODE_ENTRY) { + ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_TASK_ERROR,"Errors to create process or threads.", + EBPF_COMMON_DIMENSION_CALL, NETDATA_PROCESS_GROUP, NETDATA_EBPF_CHART_TYPE_LINE, + NETDATA_CGROUP_PROCESS_ERROR_CONTEXT, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5004, + em->update_every); + } +} + +/** + * Create Systemd process Charts + * + * Create charts when systemd is enabled + * + * @param em the structure with thread information + **/ +static void ebpf_create_systemd_process_charts(ebpf_module_t *em) +{ + ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_TASK_PROCESS, "Process started", + EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_PROCESS_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, 20065, + ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_PROCESS_CREATE_CONTEXT, + NETDATA_EBPF_MODULE_NAME_PROCESS, em->update_every); + + ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_TASK_THREAD, "Threads started", + EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_PROCESS_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, 20066, + ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_THREAD_CREATE_CONTEXT, + NETDATA_EBPF_MODULE_NAME_PROCESS, em->update_every); + + ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_TASK_CLOSE, "Tasks starts exit process.", + EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_PROCESS_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, 20067, + ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_PROCESS_EXIT_CONTEXT, + NETDATA_EBPF_MODULE_NAME_PROCESS, em->update_every); + + ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_TASK_EXIT, "Tasks closed", + EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_PROCESS_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, 20068, + ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_PROCESS_CLOSE_CONTEXT, + NETDATA_EBPF_MODULE_NAME_PROCESS, em->update_every); + + if (em->mode < MODE_ENTRY) { + ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_TASK_ERROR, "Errors to create process or threads.", + EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_PROCESS_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, 20069, + ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_PROCESS_ERROR_CONTEXT, + NETDATA_EBPF_MODULE_NAME_PROCESS, em->update_every); + } +} + +/** + * Send Systemd charts + * + * Send collected data to Netdata. + * + * @param em the structure with thread information + * + * @return It returns the status for chart creation, if it is necessary to remove a specific dimension, zero is returned + * otherwise function returns 1 to avoid chart recreation + */ +static int ebpf_send_systemd_process_charts(ebpf_module_t *em) +{ + int ret = 1; + ebpf_cgroup_target_t *ect; + write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_TASK_PROCESS); + for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { + if (unlikely(ect->systemd) && unlikely(ect->updated)) { + write_chart_dimension(ect->name, ect->publish_systemd_ps.create_process); + } else + ret = 0; + } + write_end_chart(); + + write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_TASK_THREAD); + for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { + if (unlikely(ect->systemd) && unlikely(ect->updated)) { + write_chart_dimension(ect->name, ect->publish_systemd_ps.create_thread); + } + } + write_end_chart(); + + write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_TASK_EXIT); + for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { + if (unlikely(ect->systemd) && unlikely(ect->updated)) { + write_chart_dimension(ect->name, ect->publish_systemd_ps.exit_call); + } + } + write_end_chart(); + + write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_TASK_CLOSE); + for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { + if (unlikely(ect->systemd) && unlikely(ect->updated)) { + write_chart_dimension(ect->name, ect->publish_systemd_ps.release_call); + } + } + write_end_chart(); + + if (em->mode < MODE_ENTRY) { + write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_TASK_ERROR); + for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { + if (unlikely(ect->systemd) && unlikely(ect->updated)) { + write_chart_dimension(ect->name, ect->publish_systemd_ps.task_err); + } + } + write_end_chart(); + } + + return ret; +} + +/** + * Send data to Netdata calling auxiliary functions. + * + * @param em the structure with thread information +*/ +static void ebpf_process_send_cgroup_data(ebpf_module_t *em) +{ + if (!ebpf_cgroup_pids) + return; + + pthread_mutex_lock(&mutex_cgroup_shm); + ebpf_cgroup_target_t *ect; + for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { + ebpf_process_sum_cgroup_pids(&ect->publish_systemd_ps, ect->pids); + } + + int has_systemd = shm_ebpf_cgroup.header->systemd_enabled; + + if (has_systemd) { + static int systemd_chart = 0; + if (!systemd_chart) { + ebpf_create_systemd_process_charts(em); + systemd_chart = 1; + } + + systemd_chart = ebpf_send_systemd_process_charts(em); + } + + for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { + if (ect->systemd) + continue; + + if (!(ect->flags & NETDATA_EBPF_CGROUP_HAS_PROCESS_CHART) && ect->updated) { + ebpf_create_specific_process_charts(ect->name, em); + ect->flags |= NETDATA_EBPF_CGROUP_HAS_PROCESS_CHART; + } + + if (ect->flags & NETDATA_EBPF_CGROUP_HAS_PROCESS_CHART) { + if (ect->updated) { + ebpf_send_specific_process_data(ect->name, &ect->publish_systemd_ps, em); + } else { + ebpf_obsolete_specific_process_charts(ect->name, em); + ect->flags &= ~NETDATA_EBPF_CGROUP_HAS_PROCESS_CHART; + } + } + } + + pthread_mutex_unlock(&mutex_cgroup_shm); +} + +/** + * Update Cgroup algorithm + * + * Change algorithm from absolute to incremental + */ +void ebpf_process_update_cgroup_algorithm() +{ + int i; + for (i = 0; i < NETDATA_KEY_PUBLISH_PROCESS_END; i++) { + netdata_publish_syscall_t *ptr = &process_publish_aggregated[i]; + freez(ptr->algorithm); + ptr->algorithm = strdupz(ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]); + } +} + /** * Main loop for this collector. * - * @param step the number of microseconds used with heart beat * @param em the structure with thread information */ -static void process_collector(usec_t step, ebpf_module_t *em) +static void process_collector(ebpf_module_t *em) { + cgroup_thread.thread = mallocz(sizeof(netdata_thread_t)); + cgroup_thread.start_routine = ebpf_cgroup_update_shm; + + netdata_thread_create(cgroup_thread.thread, cgroup_thread.name, NETDATA_THREAD_OPTION_JOINABLE, + ebpf_cgroup_update_shm, em); + heartbeat_t hb; heartbeat_init(&hb); int publish_global = em->global_charts; int apps_enabled = em->apps_charts; - int pid_fd = map_fd[0]; + int cgroups = em->cgroup_charts; + int thread_enabled = em->enabled; + if (cgroups) + ebpf_process_update_cgroup_algorithm(); + + int pid_fd = process_maps[NETDATA_PROCESS_PID_TABLE].map_fd; + int update_every = em->update_every; + int counter = update_every - 1; while (!close_ebpf_plugin) { - usec_t dt = heartbeat_next(&hb, step); + usec_t dt = heartbeat_next(&hb, USEC_PER_SEC); (void)dt; - read_hash_global_tables(); - pthread_mutex_lock(&collect_data_mutex); cleanup_exited_pids(); collect_data_for_all_processes(pid_fd); - ebpf_create_apps_charts(em, apps_groups_root_target); + ebpf_create_apps_charts(apps_groups_root_target); pthread_cond_broadcast(&collect_data_cond_var); pthread_mutex_unlock(&collect_data_mutex); - int publish_apps = 0; - if (apps_enabled && all_pids_count > 0) { - publish_apps = 1; - ebpf_process_update_apps_data(); - } + if (++counter == update_every) { + counter = 0; - pthread_mutex_lock(&lock); - if (publish_global) { - ebpf_process_send_data(em); - } + read_hash_global_tables(); + + int publish_apps = 0; + if (all_pids_count > 0) { + if (apps_enabled) { + publish_apps = 1; + ebpf_process_update_apps_data(); + } - if (publish_apps) { - ebpf_process_send_apps_data(em, apps_groups_root_target); + if (cgroups) { + ebpf_update_process_cgroup(); + } + } + + if (thread_enabled) { + pthread_mutex_lock(&lock); + if (publish_global) { + ebpf_process_send_data(em); + } + + if (publish_apps) { + ebpf_process_send_apps_data(apps_groups_root_target, em); + } + + if (cgroups) { + ebpf_process_send_cgroup_data(em); + } + pthread_mutex_unlock(&lock); + } } + pthread_mutex_unlock(&lock); fflush(stdout); @@ -896,7 +997,7 @@ static void process_collector(usec_t step, ebpf_module_t *em) *****************************************************************/ void clean_global_memory() { - int pid_fd = map_fd[0]; + int pid_fd = process_maps[NETDATA_PROCESS_PID_TABLE].map_fd; struct pid_stat *pids = root_of_pids; while (pids) { uint32_t pid = pids->pid; @@ -909,6 +1010,30 @@ void clean_global_memory() { } } +/** + * Process disable tracepoints + * + * Disable tracepoints when the plugin was responsible to enable it. + */ +static void ebpf_process_disable_tracepoints() +{ + char *default_message = { "Cannot disable the tracepoint" }; + if (!was_sched_process_exit_enabled) { + if (ebpf_disable_tracing_values(tracepoint_sched_type, tracepoint_sched_process_exit)) + error("%s %s/%s.", default_message, tracepoint_sched_type, tracepoint_sched_process_exit); + } + + if (!was_sched_process_exec_enabled) { + if (ebpf_disable_tracing_values(tracepoint_sched_type, tracepoint_sched_process_exec)) + error("%s %s/%s.", default_message, tracepoint_sched_type, tracepoint_sched_process_exec); + } + + if (!was_sched_process_fork_enabled) { + if (ebpf_disable_tracing_values(tracepoint_sched_type, tracepoint_sched_process_fork)) + error("%s %s/%s.", default_message, tracepoint_sched_type, tracepoint_sched_process_fork); + } +} + /** * Clean up the main thread. * @@ -920,7 +1045,7 @@ static void ebpf_process_cleanup(void *ptr) heartbeat_t hb; heartbeat_init(&hb); - uint32_t tick = 50*USEC_PER_MS; + uint32_t tick = 1 * USEC_PER_SEC; while (!finalized_threads) { usec_t dt = heartbeat_next(&hb, tick); UNUSED(dt); @@ -933,15 +1058,19 @@ static void ebpf_process_cleanup(void *ptr) freez(global_process_stats); freez(current_apps_data); - freez(process_data.map_fd); + ebpf_process_disable_tracepoints(); - struct bpf_program *prog; - size_t i = 0 ; - bpf_object__for_each_program(prog, objects) { - bpf_link__destroy(probe_links[i]); - i++; + if (probe_links) { + struct bpf_program *prog; + size_t i = 0 ; + bpf_object__for_each_program(prog, objects) { + bpf_link__destroy(probe_links[i]); + i++; + } + bpf_object__close(objects); } - bpf_object__close(objects); + + freez(cgroup_thread.thread); } /***************************************************************** @@ -970,7 +1099,7 @@ static void ebpf_process_allocate_global_vectors(size_t length) static void change_syscalls() { static char *lfork = { "do_fork" }; - process_id_names[7] = lfork; + process_id_names[NETDATA_KEY_PUBLISH_PROCESS_FORK] = lfork; } /** @@ -979,9 +1108,7 @@ static void change_syscalls() */ static void set_local_pointers() { - map_fd = process_data.map_fd; - - if (process_data.isrh >= NETDATA_MINIMUM_RH_VERSION && process_data.isrh < NETDATA_RH_8) + if (isrh >= NETDATA_MINIMUM_RH_VERSION && isrh < NETDATA_RH_8) change_syscalls(); } @@ -1019,6 +1146,45 @@ static void wait_for_all_threads_die() } } +/** + * Enable tracepoints + * + * Enable necessary tracepoints for thread. + * + * @return It returns 0 on success and -1 otherwise + */ +static int ebpf_process_enable_tracepoints() +{ + int test = ebpf_is_tracepoint_enabled(tracepoint_sched_type, tracepoint_sched_process_exit); + if (test == -1) + return -1; + else if (!test) { + if (ebpf_enable_tracing_values(tracepoint_sched_type, tracepoint_sched_process_exit)) + return -1; + } + was_sched_process_exit_enabled = test; + + test = ebpf_is_tracepoint_enabled(tracepoint_sched_type, tracepoint_sched_process_exec); + if (test == -1) + return -1; + else if (!test) { + if (ebpf_enable_tracing_values(tracepoint_sched_type, tracepoint_sched_process_exec)) + return -1; + } + was_sched_process_exec_enabled = test; + + test = ebpf_is_tracepoint_enabled(tracepoint_sched_type, tracepoint_sched_process_fork); + if (test == -1) + return -1; + else if (!test) { + if (ebpf_enable_tracing_values(tracepoint_sched_type, tracepoint_sched_process_fork)) + return -1; + } + was_sched_process_fork_enabled = test; + + return 0; +} + /** * Process thread * @@ -1034,31 +1200,26 @@ void *ebpf_process_thread(void *ptr) ebpf_module_t *em = (ebpf_module_t *)ptr; em->maps = process_maps; + + if (ebpf_process_enable_tracepoints()) { + em->enabled = em->global_charts = em->apps_charts = em->cgroup_charts = CONFIG_BOOLEAN_NO; + } process_enabled = em->enabled; - fill_ebpf_data(&process_data); pthread_mutex_lock(&lock); ebpf_process_allocate_global_vectors(NETDATA_KEY_PUBLISH_PROCESS_END); - if (ebpf_update_kernel(&process_data)) { - pthread_mutex_unlock(&lock); - goto endprocess; - } - - ebpf_update_module(em, &process_config, NETDATA_PROCESS_CONFIG_FILE); ebpf_update_pid_table(&process_maps[0], em); set_local_pointers(); - probe_links = ebpf_load_program(ebpf_plugin_dir, em, kernel_string, &objects, process_data.map_fd); + probe_links = ebpf_load_program(ebpf_plugin_dir, em, kernel_string, &objects); if (!probe_links) { pthread_mutex_unlock(&lock); goto endprocess; } int algorithms[NETDATA_KEY_PUBLISH_PROCESS_END] = { - NETDATA_EBPF_INCREMENTAL_IDX, NETDATA_EBPF_INCREMENTAL_IDX,NETDATA_EBPF_INCREMENTAL_IDX, //open, close, unlink - NETDATA_EBPF_ABSOLUTE_IDX, NETDATA_EBPF_ABSOLUTE_IDX, NETDATA_EBPF_ABSOLUTE_IDX, - NETDATA_EBPF_ABSOLUTE_IDX, NETDATA_EBPF_ABSOLUTE_IDX, NETDATA_EBPF_ABSOLUTE_IDX + NETDATA_EBPF_ABSOLUTE_IDX, NETDATA_EBPF_ABSOLUTE_IDX, NETDATA_EBPF_ABSOLUTE_IDX, NETDATA_EBPF_ABSOLUTE_IDX }; ebpf_global_labels( @@ -1071,7 +1232,7 @@ void *ebpf_process_thread(void *ptr) pthread_mutex_unlock(&lock); - process_collector((usec_t)(em->update_time * USEC_PER_SEC), em); + process_collector(em); endprocess: wait_for_all_threads_die(); diff --git a/collectors/ebpf.plugin/ebpf_process.h b/collectors/ebpf.plugin/ebpf_process.h index a731227e1..73421049e 100644 --- a/collectors/ebpf.plugin/ebpf_process.h +++ b/collectors/ebpf.plugin/ebpf_process.h @@ -3,73 +3,44 @@ #ifndef NETDATA_EBPF_PROCESS_H #define NETDATA_EBPF_PROCESS_H 1 -// Groups used on Dashboard -#define NETDATA_FILE_GROUP "File" -#define NETDATA_VFS_GROUP "VFS" -#define NETDATA_PROCESS_GROUP "Process" - -// Internal constants -#define NETDATA_GLOBAL_VECTOR 24 -#define NETDATA_VFS_ERRORS 3 +// Module name +#define NETDATA_EBPF_MODULE_NAME_PROCESS "process" -// Map index -#define NETDATA_DEL_START 2 -#define NETDATA_IN_START_BYTE 3 -#define NETDATA_EXIT_START 5 -#define NETDATA_PROCESS_START 7 +// Groups used on Dashboard +#define NETDATA_PROCESS_GROUP "processes" +#define NETDATA_PROCESS_CGROUP_GROUP "processes (eBPF)" // Global chart name -#define NETDATA_FILE_OPEN_CLOSE_COUNT "file_descriptor" -#define NETDATA_FILE_OPEN_ERR_COUNT "file_error" -#define NETDATA_VFS_FILE_CLEAN_COUNT "deleted_objects" -#define NETDATA_VFS_FILE_IO_COUNT "io" -#define NETDATA_VFS_FILE_ERR_COUNT "io_error" - #define NETDATA_EXIT_SYSCALL "exit" #define NETDATA_PROCESS_SYSCALL "process_thread" #define NETDATA_PROCESS_ERROR_NAME "task_error" #define NETDATA_PROCESS_STATUS_NAME "process_status" -#define NETDATA_VFS_IO_FILE_BYTES "io_bytes" - // Charts created on Apps submenu -#define NETDATA_SYSCALL_APPS_FILE_OPEN "file_open" -#define NETDATA_SYSCALL_APPS_FILE_CLOSED "file_closed" -#define NETDATA_SYSCALL_APPS_FILE_DELETED "file_deleted" -#define NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS "vfs_write_call" -#define NETDATA_SYSCALL_APPS_VFS_READ_CALLS "vfs_read_call" -#define NETDATA_SYSCALL_APPS_VFS_WRITE_BYTES "vfs_write_bytes" -#define NETDATA_SYSCALL_APPS_VFS_READ_BYTES "vfs_read_bytes" #define NETDATA_SYSCALL_APPS_TASK_PROCESS "process_create" #define NETDATA_SYSCALL_APPS_TASK_THREAD "thread_create" +#define NETDATA_SYSCALL_APPS_TASK_EXIT "task_exit" #define NETDATA_SYSCALL_APPS_TASK_CLOSE "task_close" - -// Charts created on Apps submenu, if and only if, the return mode is active - -#define NETDATA_SYSCALL_APPS_FILE_OPEN_ERROR "file_open_error" -#define NETDATA_SYSCALL_APPS_FILE_CLOSE_ERROR "file_close_error" -#define NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS_ERROR "vfs_write_error" -#define NETDATA_SYSCALL_APPS_VFS_READ_CALLS_ERROR "vfs_read_error" +#define NETDATA_SYSCALL_APPS_TASK_ERROR "task_error" // Process configuration name #define NETDATA_PROCESS_CONFIG_FILE "process.conf" -// Index from kernel -typedef enum ebpf_process_index { - NETDATA_KEY_CALLS_DO_SYS_OPEN, - NETDATA_KEY_ERROR_DO_SYS_OPEN, - - NETDATA_KEY_CALLS_VFS_WRITE, - NETDATA_KEY_ERROR_VFS_WRITE, - NETDATA_KEY_BYTES_VFS_WRITE, - - NETDATA_KEY_CALLS_VFS_READ, - NETDATA_KEY_ERROR_VFS_READ, - NETDATA_KEY_BYTES_VFS_READ, +// Contexts +#define NETDATA_CGROUP_PROCESS_CREATE_CONTEXT "cgroup.process_create" +#define NETDATA_CGROUP_THREAD_CREATE_CONTEXT "cgroup.thread_create" +#define NETDATA_CGROUP_PROCESS_CLOSE_CONTEXT "cgroup.task_close" +#define NETDATA_CGROUP_PROCESS_EXIT_CONTEXT "cgroup.task_exit" +#define NETDATA_CGROUP_PROCESS_ERROR_CONTEXT "cgroup.task_error" - NETDATA_KEY_CALLS_VFS_UNLINK, - NETDATA_KEY_ERROR_VFS_UNLINK, +#define NETDATA_SYSTEMD_PROCESS_CREATE_CONTEXT "services.process_create" +#define NETDATA_SYSTEMD_THREAD_CREATE_CONTEXT "services.thread_create" +#define NETDATA_SYSTEMD_PROCESS_CLOSE_CONTEXT "services.task_close" +#define NETDATA_SYSTEMD_PROCESS_EXIT_CONTEXT "services.task_exit" +#define NETDATA_SYSTEMD_PROCESS_ERROR_CONTEXT "services.task_error" +// Index from kernel +typedef enum ebpf_process_index { NETDATA_KEY_CALLS_DO_EXIT, NETDATA_KEY_CALLS_RELEASE_TASK, @@ -77,20 +48,10 @@ typedef enum ebpf_process_index { NETDATA_KEY_CALLS_DO_FORK, NETDATA_KEY_ERROR_DO_FORK, - NETDATA_KEY_CALLS_CLOSE_FD, - NETDATA_KEY_ERROR_CLOSE_FD, - NETDATA_KEY_CALLS_SYS_CLONE, NETDATA_KEY_ERROR_SYS_CLONE, - NETDATA_KEY_CALLS_VFS_WRITEV, - NETDATA_KEY_ERROR_VFS_WRITEV, - NETDATA_KEY_BYTES_VFS_WRITEV, - - NETDATA_KEY_CALLS_VFS_READV, - NETDATA_KEY_ERROR_VFS_READV, - NETDATA_KEY_BYTES_VFS_READV - + NETDATA_KEY_END_VECTOR } ebpf_process_index_t; // This enum acts as an index for publish vector. @@ -99,11 +60,6 @@ typedef enum ebpf_process_index { // values (the three initial positions) and absolute values // (the remaining charts). typedef enum netdata_publish_process { - NETDATA_KEY_PUBLISH_PROCESS_OPEN, - NETDATA_KEY_PUBLISH_PROCESS_CLOSE, - NETDATA_KEY_PUBLISH_PROCESS_UNLINK, - NETDATA_KEY_PUBLISH_PROCESS_READ, - NETDATA_KEY_PUBLISH_PROCESS_WRITE, NETDATA_KEY_PUBLISH_PROCESS_EXIT, NETDATA_KEY_PUBLISH_PROCESS_RELEASE_TASK, NETDATA_KEY_PUBLISH_PROCESS_FORK, @@ -114,28 +70,21 @@ typedef enum netdata_publish_process { typedef struct ebpf_process_publish_apps { // Number of calls during the last read - uint64_t call_sys_open; - uint64_t call_close_fd; - uint64_t call_vfs_unlink; - uint64_t call_read; - uint64_t call_write; uint64_t call_do_exit; uint64_t call_release_task; - uint64_t call_do_fork; - uint64_t call_sys_clone; + uint64_t create_process; + uint64_t create_thread; // Number of errors during the last read - uint64_t ecall_sys_open; - uint64_t ecall_close_fd; - uint64_t ecall_vfs_unlink; - uint64_t ecall_read; - uint64_t ecall_write; - uint64_t ecall_do_fork; - uint64_t ecall_sys_clone; - - // Number of bytes during the last read - uint64_t bytes_written; - uint64_t bytes_read; + uint64_t task_err; } ebpf_process_publish_apps_t; +enum ebpf_process_tables { + NETDATA_PROCESS_PID_TABLE, + NETDATA_PROCESS_GLOBAL_TABLE, + NETDATA_PROCESS_CTRL_TABLE +}; + +extern struct config process_config; + #endif /* NETDATA_EBPF_PROCESS_H */ diff --git a/collectors/ebpf.plugin/ebpf_shm.c b/collectors/ebpf.plugin/ebpf_shm.c new file mode 100644 index 000000000..156ae9aa5 --- /dev/null +++ b/collectors/ebpf.plugin/ebpf_shm.c @@ -0,0 +1,855 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "ebpf.h" +#include "ebpf_shm.h" + +static char *shm_dimension_name[NETDATA_SHM_END] = { "get", "at", "dt", "ctl" }; +static netdata_syscall_stat_t shm_aggregated_data[NETDATA_SHM_END]; +static netdata_publish_syscall_t shm_publish_aggregated[NETDATA_SHM_END]; + +static int read_thread_closed = 1; +netdata_publish_shm_t *shm_vector = NULL; + +static netdata_idx_t shm_hash_values[NETDATA_SHM_END]; +static netdata_idx_t *shm_values = NULL; + +netdata_publish_shm_t **shm_pid = NULL; + +struct config shm_config = { .first_section = NULL, + .last_section = NULL, + .mutex = NETDATA_MUTEX_INITIALIZER, + .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare }, + .rwlock = AVL_LOCK_INITIALIZER } }; + +static ebpf_local_maps_t shm_maps[] = {{.name = "tbl_pid_shm", .internal_input = ND_EBPF_DEFAULT_PID_SIZE, + .user_input = 0, + .type = NETDATA_EBPF_MAP_RESIZABLE | NETDATA_EBPF_MAP_PID, + .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}, + {.name = "shm_ctrl", .internal_input = NETDATA_CONTROLLER_END, + .user_input = 0, + .type = NETDATA_EBPF_MAP_CONTROLLER, + .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}, + {.name = "tbl_shm", .internal_input = NETDATA_SHM_END, + .user_input = 0, + .type = NETDATA_EBPF_MAP_STATIC, + .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}, + {.name = NULL, .internal_input = 0, .user_input = 0}}; + +static struct bpf_link **probe_links = NULL; +static struct bpf_object *objects = NULL; + +struct netdata_static_thread shm_threads = {"SHM KERNEL", NULL, NULL, 1, + NULL, NULL, NULL}; + +/***************************************************************** + * FUNCTIONS TO CLOSE THE THREAD + *****************************************************************/ + +/** + * Clean shm structure + */ +void clean_shm_pid_structures() { + struct pid_stat *pids = root_of_pids; + while (pids) { + freez(shm_pid[pids->pid]); + + pids = pids->next; + } +} + +/** + * Clean up the main thread. + * + * @param ptr thread data. + */ +static void ebpf_shm_cleanup(void *ptr) +{ + ebpf_module_t *em = (ebpf_module_t *)ptr; + if (!em->enabled) { + return; + } + + heartbeat_t hb; + heartbeat_init(&hb); + uint32_t tick = 2 * USEC_PER_MS; + while (!read_thread_closed) { + usec_t dt = heartbeat_next(&hb, tick); + UNUSED(dt); + } + + ebpf_cleanup_publish_syscall(shm_publish_aggregated); + + freez(shm_vector); + freez(shm_values); + + if (probe_links) { + struct bpf_program *prog; + size_t i = 0 ; + bpf_object__for_each_program(prog, objects) { + bpf_link__destroy(probe_links[i]); + i++; + } + bpf_object__close(objects); + } +} + +/***************************************************************** + * COLLECTOR THREAD + *****************************************************************/ + +/** + * Apps Accumulator + * + * Sum all values read from kernel and store in the first address. + * + * @param out the vector with read values. + */ +static void shm_apps_accumulator(netdata_publish_shm_t *out) +{ + int i, end = (running_on_kernel >= NETDATA_KERNEL_V4_15) ? ebpf_nprocs : 1; + netdata_publish_shm_t *total = &out[0]; + for (i = 1; i < end; i++) { + netdata_publish_shm_t *w = &out[i]; + total->get += w->get; + total->at += w->at; + total->dt += w->dt; + total->ctl += w->ctl; + } +} + +/** + * Fill PID + * + * Fill PID structures + * + * @param current_pid pid that we are collecting data + * @param out values read from hash tables; + */ +static void shm_fill_pid(uint32_t current_pid, netdata_publish_shm_t *publish) +{ + netdata_publish_shm_t *curr = shm_pid[current_pid]; + if (!curr) { + curr = callocz(1, sizeof(netdata_publish_shm_t)); + shm_pid[current_pid] = curr; + } + + memcpy(curr, publish, sizeof(netdata_publish_shm_t)); +} + +/** + * Update cgroup + * + * Update cgroup data based in + */ +static void ebpf_update_shm_cgroup() +{ + netdata_publish_shm_t *cv = shm_vector; + int fd = shm_maps[NETDATA_PID_SHM_TABLE].map_fd; + size_t length = sizeof(netdata_publish_shm_t) * ebpf_nprocs; + ebpf_cgroup_target_t *ect; + + memset(cv, 0, length); + + pthread_mutex_lock(&mutex_cgroup_shm); + for (ect = ebpf_cgroup_pids; ect; ect = ect->next) { + struct pid_on_target2 *pids; + for (pids = ect->pids; pids; pids = pids->next) { + int pid = pids->pid; + netdata_publish_shm_t *out = &pids->shm; + if (likely(shm_pid) && shm_pid[pid]) { + netdata_publish_shm_t *in = shm_pid[pid]; + + memcpy(out, in, sizeof(netdata_publish_shm_t)); + } else { + if (!bpf_map_lookup_elem(fd, &pid, cv)) { + shm_apps_accumulator(cv); + + memcpy(out, cv, sizeof(netdata_publish_shm_t)); + + // now that we've consumed the value, zero it out in the map. + memset(cv, 0, length); + bpf_map_update_elem(fd, &pid, cv, BPF_EXIST); + } + } + } + } + pthread_mutex_unlock(&mutex_cgroup_shm); +} + +/** + * Read APPS table + * + * Read the apps table and store data inside the structure. + */ +static void read_apps_table() +{ + netdata_publish_shm_t *cv = shm_vector; + uint32_t key; + struct pid_stat *pids = root_of_pids; + int fd = shm_maps[NETDATA_PID_SHM_TABLE].map_fd; + size_t length = sizeof(netdata_publish_shm_t)*ebpf_nprocs; + while (pids) { + key = pids->pid; + + if (bpf_map_lookup_elem(fd, &key, cv)) { + pids = pids->next; + continue; + } + + shm_apps_accumulator(cv); + + shm_fill_pid(key, cv); + + // now that we've consumed the value, zero it out in the map. + memset(cv, 0, length); + bpf_map_update_elem(fd, &key, cv, BPF_EXIST); + + pids = pids->next; + } +} + +/** +* Send global charts to netdata agent. +*/ +static void shm_send_global() +{ + write_begin_chart(NETDATA_EBPF_SYSTEM_GROUP, NETDATA_SHM_GLOBAL_CHART); + write_chart_dimension( + shm_publish_aggregated[NETDATA_KEY_SHMGET_CALL].dimension, + (long long) shm_hash_values[NETDATA_KEY_SHMGET_CALL] + ); + write_chart_dimension( + shm_publish_aggregated[NETDATA_KEY_SHMAT_CALL].dimension, + (long long) shm_hash_values[NETDATA_KEY_SHMAT_CALL] + ); + write_chart_dimension( + shm_publish_aggregated[NETDATA_KEY_SHMDT_CALL].dimension, + (long long) shm_hash_values[NETDATA_KEY_SHMDT_CALL] + ); + write_chart_dimension( + shm_publish_aggregated[NETDATA_KEY_SHMCTL_CALL].dimension, + (long long) shm_hash_values[NETDATA_KEY_SHMCTL_CALL] + ); + write_end_chart(); +} + +/** + * Read global counter + * + * Read the table with number of calls for all functions + */ +static void read_global_table() +{ + netdata_idx_t *stored = shm_values; + netdata_idx_t *val = shm_hash_values; + int fd = shm_maps[NETDATA_SHM_GLOBAL_TABLE].map_fd; + + uint32_t i, end = NETDATA_SHM_END; + for (i = NETDATA_KEY_SHMGET_CALL; i < end; i++) { + if (!bpf_map_lookup_elem(fd, &i, stored)) { + int j; + int last = ebpf_nprocs; + netdata_idx_t total = 0; + for (j = 0; j < last; j++) + total += stored[j]; + + val[i] = total; + } + } +} + +/** + * Shared memory reader thread. + * + * @param ptr It is a NULL value for this thread. + * @return It always returns NULL. + */ +void *ebpf_shm_read_hash(void *ptr) +{ + read_thread_closed = 0; + + heartbeat_t hb; + heartbeat_init(&hb); + + ebpf_module_t *em = (ebpf_module_t *)ptr; + usec_t step = NETDATA_SHM_SLEEP_MS * em->update_every; + while (!close_ebpf_plugin) { + usec_t dt = heartbeat_next(&hb, step); + (void)dt; + + read_global_table(); + } + + read_thread_closed = 1; + return NULL; +} + +/** + * Sum values for all targets. + */ +static void ebpf_shm_sum_pids(netdata_publish_shm_t *shm, struct pid_on_target *root) +{ + while (root) { + int32_t pid = root->pid; + netdata_publish_shm_t *w = shm_pid[pid]; + if (w) { + shm->get += w->get; + shm->at += w->at; + shm->dt += w->dt; + shm->ctl += w->ctl; + + // reset for next collection. + w->get = 0; + w->at = 0; + w->dt = 0; + w->ctl = 0; + } + root = root->next; + } +} + +/** + * Send data to Netdata calling auxiliary functions. + * + * @param root the target list. +*/ +void ebpf_shm_send_apps_data(struct target *root) +{ + struct target *w; + for (w = root; w; w = w->next) { + if (unlikely(w->exposed && w->processes)) { + ebpf_shm_sum_pids(&w->shm, w->root_pid); + } + } + + write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SHMGET_CHART); + for (w = root; w; w = w->next) { + if (unlikely(w->exposed && w->processes)) { + write_chart_dimension(w->name, (long long) w->shm.get); + } + } + write_end_chart(); + + write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SHMAT_CHART); + for (w = root; w; w = w->next) { + if (unlikely(w->exposed && w->processes)) { + write_chart_dimension(w->name, (long long) w->shm.at); + } + } + write_end_chart(); + + write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SHMDT_CHART); + for (w = root; w; w = w->next) { + if (unlikely(w->exposed && w->processes)) { + write_chart_dimension(w->name, (long long) w->shm.dt); + } + } + write_end_chart(); + + write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SHMCTL_CHART); + for (w = root; w; w = w->next) { + if (unlikely(w->exposed && w->processes)) { + write_chart_dimension(w->name, (long long) w->shm.ctl); + } + } + write_end_chart(); +} + +/** + * Sum values for all targets. + */ +static void ebpf_shm_sum_cgroup_pids(netdata_publish_shm_t *shm, struct pid_on_target2 *root) +{ + netdata_publish_shm_t shmv; + memset(&shmv, 0, sizeof(shmv)); + while (root) { + netdata_publish_shm_t *w = &root->shm; + shmv.get += w->get; + shmv.at += w->at; + shmv.dt += w->dt; + shmv.ctl += w->ctl; + + root = root->next; + } + + memcpy(shm, &shmv, sizeof(shmv)); +} + +/** + * Create specific shared memory charts + * + * Create charts for cgroup/application. + * + * @param type the chart type. + * @param update_every value to overwrite the update frequency set by the server. + */ +static void ebpf_create_specific_shm_charts(char *type, int update_every) +{ + ebpf_create_chart(type, NETDATA_SHMGET_CHART, + "Calls to syscall shmget(2).", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_APPS_IPC_SHM_GROUP, + NETDATA_CGROUP_SHM_GET_CONTEXT, + NETDATA_EBPF_CHART_TYPE_LINE, + NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5800, + ebpf_create_global_dimension, + &shm_publish_aggregated[NETDATA_KEY_SHMGET_CALL], + 1, + update_every, + NETDATA_EBPF_MODULE_NAME_SHM); + + ebpf_create_chart(type, NETDATA_SHMAT_CHART, + "Calls to syscall shmat(2).", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_APPS_IPC_SHM_GROUP, + NETDATA_CGROUP_SHM_AT_CONTEXT, + NETDATA_EBPF_CHART_TYPE_LINE, + NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5801, + ebpf_create_global_dimension, + &shm_publish_aggregated[NETDATA_KEY_SHMAT_CALL], + 1, + update_every, + NETDATA_EBPF_MODULE_NAME_SHM); + + ebpf_create_chart(type, NETDATA_SHMDT_CHART, + "Calls to syscall shmdt(2).", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_APPS_IPC_SHM_GROUP, + NETDATA_CGROUP_SHM_DT_CONTEXT, + NETDATA_EBPF_CHART_TYPE_LINE, + NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5802, + ebpf_create_global_dimension, + &shm_publish_aggregated[NETDATA_KEY_SHMDT_CALL], + 1, + update_every, + NETDATA_EBPF_MODULE_NAME_SHM); + + ebpf_create_chart(type, NETDATA_SHMCTL_CHART, + "Calls to syscall shmctl(2).", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_APPS_IPC_SHM_GROUP, + NETDATA_CGROUP_SHM_CTL_CONTEXT, + NETDATA_EBPF_CHART_TYPE_LINE, + NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5803, + ebpf_create_global_dimension, + &shm_publish_aggregated[NETDATA_KEY_SHMCTL_CALL], + 1, + update_every, + NETDATA_EBPF_MODULE_NAME_SHM); +} + +/** + * Obsolete specific shared memory charts + * + * Obsolete charts for cgroup/application. + * + * @param type the chart type. + * @param update_every value to overwrite the update frequency set by the server. + */ +static void ebpf_obsolete_specific_shm_charts(char *type, int update_every) +{ + ebpf_write_chart_obsolete(type, NETDATA_SHMGET_CHART, + "Calls to syscall shmget(2).", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_APPS_IPC_SHM_GROUP, + NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_SHM_GET_CONTEXT, + NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5800, update_every); + + ebpf_write_chart_obsolete(type, NETDATA_SHMAT_CHART, + "Calls to syscall shmat(2).", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_APPS_IPC_SHM_GROUP, + NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_SHM_AT_CONTEXT, + NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5801, update_every); + + ebpf_write_chart_obsolete(type, NETDATA_SHMDT_CHART, + "Calls to syscall shmdt(2).", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_APPS_IPC_SHM_GROUP, + NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_SHM_DT_CONTEXT, + NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5802, update_every); + + ebpf_write_chart_obsolete(type, NETDATA_SHMCTL_CHART, + "Calls to syscall shmctl(2).", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_APPS_IPC_SHM_GROUP, + NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_SHM_CTL_CONTEXT, + NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5803, update_every); +} + +/** + * Create Systemd Swap Charts + * + * Create charts when systemd is enabled + * + * @param update_every value to overwrite the update frequency set by the server. + **/ +static void ebpf_create_systemd_shm_charts(int update_every) +{ + ebpf_create_charts_on_systemd(NETDATA_SHMGET_CHART, + "Calls to syscall shmget(2).", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_APPS_IPC_SHM_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + 20191, + ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], + NETDATA_SYSTEMD_SHM_GET_CONTEXT, NETDATA_EBPF_MODULE_NAME_SHM, update_every); + + ebpf_create_charts_on_systemd(NETDATA_SHMAT_CHART, + "Calls to syscall shmat(2).", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_APPS_IPC_SHM_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + 20192, + ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], + NETDATA_SYSTEMD_SHM_AT_CONTEXT, NETDATA_EBPF_MODULE_NAME_SHM, update_every); + + ebpf_create_charts_on_systemd(NETDATA_SHMDT_CHART, + "Calls to syscall shmdt(2).", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_APPS_IPC_SHM_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + 20193, + ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], + NETDATA_SYSTEMD_SHM_DT_CONTEXT, NETDATA_EBPF_MODULE_NAME_SHM, update_every); + + ebpf_create_charts_on_systemd(NETDATA_SHMCTL_CHART, + "Calls to syscall shmctl(2).", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_APPS_IPC_SHM_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + 20193, + ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], + NETDATA_SYSTEMD_SHM_CTL_CONTEXT, NETDATA_EBPF_MODULE_NAME_SHM, update_every); +} + +/** + * Send Systemd charts + * + * Send collected data to Netdata. + * + * @return It returns the status for chart creation, if it is necessary to remove a specific dimension, zero is returned + * otherwise function returns 1 to avoid chart recreation + */ +static int ebpf_send_systemd_shm_charts() +{ + int ret = 1; + ebpf_cgroup_target_t *ect; + write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SHMGET_CHART); + for (ect = ebpf_cgroup_pids; ect; ect = ect->next) { + if (unlikely(ect->systemd) && unlikely(ect->updated)) { + write_chart_dimension(ect->name, (long long)ect->publish_shm.get); + } else + ret = 0; + } + write_end_chart(); + + write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SHMAT_CHART); + for (ect = ebpf_cgroup_pids; ect; ect = ect->next) { + if (unlikely(ect->systemd) && unlikely(ect->updated)) { + write_chart_dimension(ect->name, (long long)ect->publish_shm.at); + } + } + write_end_chart(); + + write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SHMDT_CHART); + for (ect = ebpf_cgroup_pids; ect; ect = ect->next) { + if (unlikely(ect->systemd) && unlikely(ect->updated)) { + write_chart_dimension(ect->name, (long long)ect->publish_shm.dt); + } + } + write_end_chart(); + + write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SHMCTL_CHART); + for (ect = ebpf_cgroup_pids; ect; ect = ect->next) { + if (unlikely(ect->systemd) && unlikely(ect->updated)) { + write_chart_dimension(ect->name, (long long)ect->publish_shm.ctl); + } + } + write_end_chart(); + + return ret; +} + +/* + * Send Specific Shared memory data + * + * Send data for specific cgroup/apps. + * + * @param type chart type + * @param values structure with values that will be sent to netdata + */ +static void ebpf_send_specific_shm_data(char *type, netdata_publish_shm_t *values) +{ + write_begin_chart(type, NETDATA_SHMGET_CHART); + write_chart_dimension(shm_publish_aggregated[NETDATA_KEY_SHMGET_CALL].name, (long long)values->get); + write_end_chart(); + + write_begin_chart(type, NETDATA_SHMAT_CHART); + write_chart_dimension(shm_publish_aggregated[NETDATA_KEY_SHMAT_CALL].name, (long long)values->at); + write_end_chart(); + + write_begin_chart(type, NETDATA_SHMDT_CHART); + write_chart_dimension(shm_publish_aggregated[NETDATA_KEY_SHMDT_CALL].name, (long long)values->dt); + write_end_chart(); + + write_begin_chart(type, NETDATA_SHMCTL_CHART); + write_chart_dimension(shm_publish_aggregated[NETDATA_KEY_SHMCTL_CALL].name, (long long)values->ctl); + write_end_chart(); +} + +/** + * Send data to Netdata calling auxiliary functions. + * + * @param update_every value to overwrite the update frequency set by the server. +*/ +void ebpf_shm_send_cgroup_data(int update_every) +{ + if (!ebpf_cgroup_pids) + return; + + pthread_mutex_lock(&mutex_cgroup_shm); + ebpf_cgroup_target_t *ect; + for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { + ebpf_shm_sum_cgroup_pids(&ect->publish_shm, ect->pids); + } + + int has_systemd = shm_ebpf_cgroup.header->systemd_enabled; + if (has_systemd) { + static int systemd_charts = 0; + if (!systemd_charts) { + ebpf_create_systemd_shm_charts(update_every); + systemd_charts = 1; + } + + systemd_charts = ebpf_send_systemd_shm_charts(); + } + + for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { + if (ect->systemd) + continue; + + if (!(ect->flags & NETDATA_EBPF_CGROUP_HAS_SHM_CHART) && ect->updated) { + ebpf_create_specific_shm_charts(ect->name, update_every); + ect->flags |= NETDATA_EBPF_CGROUP_HAS_SHM_CHART; + } + + if (ect->flags & NETDATA_EBPF_CGROUP_HAS_SHM_CHART) { + if (ect->updated) { + ebpf_send_specific_shm_data(ect->name, &ect->publish_shm); + } else { + ebpf_obsolete_specific_shm_charts(ect->name, update_every); + ect->flags &= ~NETDATA_EBPF_CGROUP_HAS_SWAP_CHART; + } + } + } + + pthread_mutex_unlock(&mutex_cgroup_shm); +} + +/** +* Main loop for this collector. +*/ +static void shm_collector(ebpf_module_t *em) +{ + shm_threads.thread = mallocz(sizeof(netdata_thread_t)); + shm_threads.start_routine = ebpf_shm_read_hash; + + netdata_thread_create( + shm_threads.thread, + shm_threads.name, + NETDATA_THREAD_OPTION_JOINABLE, + ebpf_shm_read_hash, + em + ); + + int apps = em->apps_charts; + int cgroups = em->cgroup_charts; + int update_every = em->update_every; + int counter = update_every - 1; + while (!close_ebpf_plugin) { + pthread_mutex_lock(&collect_data_mutex); + pthread_cond_wait(&collect_data_cond_var, &collect_data_mutex); + + if (++counter == update_every) { + counter = 0; + if (apps) { + read_apps_table(); + } + + if (cgroups) { + ebpf_update_shm_cgroup(); + } + + pthread_mutex_lock(&lock); + + shm_send_global(); + + if (apps) { + ebpf_shm_send_apps_data(apps_groups_root_target); + } + + if (cgroups) { + ebpf_shm_send_cgroup_data(update_every); + } + + pthread_mutex_unlock(&lock); + } + + pthread_mutex_unlock(&collect_data_mutex); + } +} + +/***************************************************************** + * INITIALIZE THREAD + *****************************************************************/ + +/** + * Create apps charts + * + * Call ebpf_create_chart to create the charts on apps submenu. + * + * @param em a pointer to the structure with the default values. + */ +void ebpf_shm_create_apps_charts(struct ebpf_module *em, void *ptr) +{ + struct target *root = ptr; + ebpf_create_charts_on_apps(NETDATA_SHMGET_CHART, + "Calls to syscall shmget(2).", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_APPS_IPC_SHM_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + 20191, + ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], + root, em->update_every, NETDATA_EBPF_MODULE_NAME_SHM); + + ebpf_create_charts_on_apps(NETDATA_SHMAT_CHART, + "Calls to syscall shmat(2).", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_APPS_IPC_SHM_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + 20192, + ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], + root, em->update_every, NETDATA_EBPF_MODULE_NAME_SHM); + + ebpf_create_charts_on_apps(NETDATA_SHMDT_CHART, + "Calls to syscall shmdt(2).", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_APPS_IPC_SHM_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + 20193, + ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], + root, em->update_every, NETDATA_EBPF_MODULE_NAME_SHM); + + ebpf_create_charts_on_apps(NETDATA_SHMCTL_CHART, + "Calls to syscall shmctl(2).", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_APPS_IPC_SHM_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + 20194, + ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], + root, em->update_every, NETDATA_EBPF_MODULE_NAME_SHM); +} + +/** + * Allocate vectors used with this thread. + * + * We are not testing the return, because callocz does this and shutdown the software + * case it was not possible to allocate. + * + * @param apps is apps enabled? + */ +static void ebpf_shm_allocate_global_vectors(int apps) +{ + if (apps) + shm_pid = callocz((size_t)pid_max, sizeof(netdata_publish_shm_t *)); + + shm_vector = callocz((size_t)ebpf_nprocs, sizeof(netdata_publish_shm_t)); + + shm_values = callocz((size_t)ebpf_nprocs, sizeof(netdata_idx_t)); + + memset(shm_hash_values, 0, sizeof(shm_hash_values)); +} + +/***************************************************************** + * MAIN THREAD + *****************************************************************/ + +/** + * Create global charts + * + * Call ebpf_create_chart to create the charts for the collector. + * + * @param update_every value to overwrite the update frequency set by the server. + */ +static void ebpf_create_shm_charts(int update_every) +{ + ebpf_create_chart( + NETDATA_EBPF_SYSTEM_GROUP, + NETDATA_SHM_GLOBAL_CHART, + "Calls to shared memory system calls.", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_SYSTEM_IPC_SHM_SUBMENU, + NULL, + NETDATA_EBPF_CHART_TYPE_LINE, + NETDATA_CHART_PRIO_SYSTEM_IPC_SHARED_MEM_CALLS, + ebpf_create_global_dimension, + shm_publish_aggregated, + NETDATA_SHM_END, + update_every, NETDATA_EBPF_MODULE_NAME_SHM + ); + + fflush(stdout); +} + +/** + * Shared memory thread. + * + * @param ptr a pointer to `struct ebpf_module` + * @return It always return NULL + */ +void *ebpf_shm_thread(void *ptr) +{ + netdata_thread_cleanup_push(ebpf_shm_cleanup, ptr); + + ebpf_module_t *em = (ebpf_module_t *)ptr; + em->maps = shm_maps; + + ebpf_update_pid_table(&shm_maps[NETDATA_PID_SHM_TABLE], em); + + if (!em->enabled) { + goto endshm; + } + + probe_links = ebpf_load_program(ebpf_plugin_dir, em, kernel_string, &objects); + if (!probe_links) { + goto endshm; + } + + ebpf_shm_allocate_global_vectors(em->apps_charts); + + int algorithms[NETDATA_SHM_END] = { + NETDATA_EBPF_INCREMENTAL_IDX, + NETDATA_EBPF_INCREMENTAL_IDX, + NETDATA_EBPF_INCREMENTAL_IDX, + NETDATA_EBPF_INCREMENTAL_IDX + }; + ebpf_global_labels( + shm_aggregated_data, + shm_publish_aggregated, + shm_dimension_name, + shm_dimension_name, + algorithms, + NETDATA_SHM_END + ); + + pthread_mutex_lock(&lock); + ebpf_create_shm_charts(em->update_every); + pthread_mutex_unlock(&lock); + + shm_collector(em); + +endshm: + netdata_thread_cleanup_pop(1); + return NULL; +} diff --git a/collectors/ebpf.plugin/ebpf_shm.h b/collectors/ebpf.plugin/ebpf_shm.h new file mode 100644 index 000000000..4e7e183a7 --- /dev/null +++ b/collectors/ebpf.plugin/ebpf_shm.h @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_EBPF_SHM_H +#define NETDATA_EBPF_SHM_H 1 + +// Module name +#define NETDATA_EBPF_MODULE_NAME_SHM "shm" + +#define NETDATA_SHM_SLEEP_MS 850000ULL + +// charts +#define NETDATA_SHM_GLOBAL_CHART "shared_memory_calls" +#define NETDATA_SHMGET_CHART "shmget_call" +#define NETDATA_SHMAT_CHART "shmat_call" +#define NETDATA_SHMDT_CHART "shmdt_call" +#define NETDATA_SHMCTL_CHART "shmctl_call" + +// configuration file +#define NETDATA_DIRECTORY_SHM_CONFIG_FILE "shm.conf" + +// Contexts +#define NETDATA_CGROUP_SHM_GET_CONTEXT "cgroup.shmget" +#define NETDATA_CGROUP_SHM_AT_CONTEXT "cgroup.shmat" +#define NETDATA_CGROUP_SHM_DT_CONTEXT "cgroup.shmdt" +#define NETDATA_CGROUP_SHM_CTL_CONTEXT "cgroup.shmctl" + +#define NETDATA_SYSTEMD_SHM_GET_CONTEXT "services.shmget" +#define NETDATA_SYSTEMD_SHM_AT_CONTEXT "services.shmat" +#define NETDATA_SYSTEMD_SHM_DT_CONTEXT "services.shmdt" +#define NETDATA_SYSTEMD_SHM_CTL_CONTEXT "services.shmctl" + +typedef struct netdata_publish_shm { + uint64_t get; + uint64_t at; + uint64_t dt; + uint64_t ctl; +} netdata_publish_shm_t; + +enum shm_tables { + NETDATA_PID_SHM_TABLE, + NETDATA_SHM_CONTROLLER, + NETDATA_SHM_GLOBAL_TABLE +}; + +enum shm_counters { + NETDATA_KEY_SHMGET_CALL, + NETDATA_KEY_SHMAT_CALL, + NETDATA_KEY_SHMDT_CALL, + NETDATA_KEY_SHMCTL_CALL, + + // Keep this as last and don't skip numbers as it is used as element counter + NETDATA_SHM_END +}; + +extern netdata_publish_shm_t **shm_pid; + +extern void *ebpf_shm_thread(void *ptr); +extern void ebpf_shm_create_apps_charts(struct ebpf_module *em, void *ptr); +extern void clean_shm_pid_structures(); + +extern struct config shm_config; + +#endif diff --git a/collectors/ebpf.plugin/ebpf_socket.c b/collectors/ebpf.plugin/ebpf_socket.c index cbb4dded0..f7710ff22 100644 --- a/collectors/ebpf.plugin/ebpf_socket.c +++ b/collectors/ebpf.plugin/ebpf_socket.c @@ -11,31 +11,49 @@ * *****************************************************************/ -static char *socket_dimension_names[NETDATA_MAX_SOCKET_VECTOR] = { "sent", "received", "close", "sent", - "received", "retransmitted" }; -static char *socket_id_names[NETDATA_MAX_SOCKET_VECTOR] = { "tcp_sendmsg", "tcp_cleanup_rbuf", "tcp_close", - "udp_sendmsg", "udp_recvmsg", "tcp_retransmit_skb" }; +static char *socket_dimension_names[NETDATA_MAX_SOCKET_VECTOR] = { "received", "sent", "close", + "received", "sent", "retransmitted" }; +static char *socket_id_names[NETDATA_MAX_SOCKET_VECTOR] = { "tcp_cleanup_rbuf", "tcp_sendmsg", "tcp_close", + "udp_recvmsg", "udp_sendmsg", "tcp_retransmit_skb" }; static ebpf_local_maps_t socket_maps[] = {{.name = "tbl_bandwidth", .internal_input = NETDATA_COMPILED_CONNECTIONS_ALLOWED, - .user_input = NETDATA_MAXIMUM_CONNECTIONS_ALLOWED}, + .user_input = NETDATA_MAXIMUM_CONNECTIONS_ALLOWED, + .type = NETDATA_EBPF_MAP_STATIC, + .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}, + {.name = "tbl_global_sock", + .internal_input = NETDATA_SOCKET_COUNTER, + .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC, + .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}, + {.name = "tbl_lports", + .internal_input = NETDATA_SOCKET_COUNTER, + .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC, + .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}, {.name = "tbl_conn_ipv4", .internal_input = NETDATA_COMPILED_CONNECTIONS_ALLOWED, - .user_input = NETDATA_MAXIMUM_CONNECTIONS_ALLOWED}, + .user_input = NETDATA_MAXIMUM_CONNECTIONS_ALLOWED, + .type = NETDATA_EBPF_MAP_STATIC, + .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}, {.name = "tbl_conn_ipv6", .internal_input = NETDATA_COMPILED_CONNECTIONS_ALLOWED, - .user_input = NETDATA_MAXIMUM_CONNECTIONS_ALLOWED}, - {.name = "tbl_nv_udp_conn_stats", + .user_input = NETDATA_MAXIMUM_CONNECTIONS_ALLOWED, + .type = NETDATA_EBPF_MAP_STATIC, + .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}, + {.name = "tbl_nv_udp", .internal_input = NETDATA_COMPILED_UDP_CONNECTIONS_ALLOWED, - .user_input = NETDATA_MAXIMUM_UDP_CONNECTIONS_ALLOWED}, + .user_input = NETDATA_MAXIMUM_UDP_CONNECTIONS_ALLOWED, + .type = NETDATA_EBPF_MAP_STATIC, + .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}, + {.name = "socket_ctrl", .internal_input = NETDATA_CONTROLLER_END, + .user_input = 0, + .type = NETDATA_EBPF_MAP_CONTROLLER, + .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}, {.name = NULL, .internal_input = 0, .user_input = 0}}; static netdata_idx_t *socket_hash_values = NULL; static netdata_syscall_stat_t socket_aggregated_data[NETDATA_MAX_SOCKET_VECTOR]; static netdata_publish_syscall_t socket_publish_aggregated[NETDATA_MAX_SOCKET_VECTOR]; -static ebpf_data_t socket_data; - ebpf_socket_publish_apps_t **socket_bandwidth_curr = NULL; static ebpf_bandwidth_t *bandwidth_vector = NULL; @@ -50,7 +68,6 @@ netdata_socket_t *socket_values; ebpf_network_viewer_port_list_t *listen_ports = NULL; -static int *map_fd = NULL; static struct bpf_object *objects = NULL; static struct bpf_link **probe_links = NULL; @@ -277,7 +294,7 @@ static void ebpf_socket_send_nv_data(netdata_vector_plot_t *ptr) } /** - * Send data to Netdata calling auxiliar functions. + * Send data to Netdata calling auxiliary functions. * * @param em the structure with thread information */ @@ -287,32 +304,26 @@ static void ebpf_socket_send_data(ebpf_module_t *em) netdata_publish_vfs_common_t common_udp; ebpf_update_global_publish(socket_publish_aggregated, &common_tcp, &common_udp, socket_aggregated_data); - // We read bytes from function arguments, but bandiwdth is given in bits, + // We read bytes from function arguments, but bandwidth is given in bits, // so we need to multiply by 8 to convert for the final value. - write_count_chart( - NETDATA_TCP_FUNCTION_COUNT, NETDATA_EBPF_FAMILY, socket_publish_aggregated, 3); - write_io_chart( - NETDATA_TCP_FUNCTION_BITS, NETDATA_EBPF_FAMILY, socket_id_names[0], common_tcp.write*8/1000, - socket_id_names[1], common_tcp.read*8/1000); + write_count_chart(NETDATA_TCP_FUNCTION_COUNT, NETDATA_EBPF_IP_FAMILY, socket_publish_aggregated, 3); + write_io_chart(NETDATA_TCP_FUNCTION_BITS, NETDATA_EBPF_IP_FAMILY, socket_id_names[0], + common_tcp.read * 8/BITS_IN_A_KILOBIT, socket_id_names[1], + common_tcp.write * 8/BITS_IN_A_KILOBIT); if (em->mode < MODE_ENTRY) { - write_err_chart( - NETDATA_TCP_FUNCTION_ERROR, NETDATA_EBPF_FAMILY, socket_publish_aggregated, 2); - } - write_count_chart( - NETDATA_TCP_RETRANSMIT, NETDATA_EBPF_FAMILY, &socket_publish_aggregated[NETDATA_IDX_TCP_RETRANSMIT], - 1); - - write_count_chart( - NETDATA_UDP_FUNCTION_COUNT, NETDATA_EBPF_FAMILY, &socket_publish_aggregated[NETDATA_IDX_UDP_RECVBUF], - 2); - write_io_chart( - NETDATA_UDP_FUNCTION_BITS, NETDATA_EBPF_FAMILY, - socket_id_names[3],(long long)common_udp.write*8/100, - socket_id_names[4], (long long)common_udp.read*8/1000); + write_err_chart(NETDATA_TCP_FUNCTION_ERROR, NETDATA_EBPF_IP_FAMILY, socket_publish_aggregated, 2); + } + write_count_chart(NETDATA_TCP_RETRANSMIT, NETDATA_EBPF_IP_FAMILY, + &socket_publish_aggregated[NETDATA_IDX_TCP_RETRANSMIT],1); + + write_count_chart(NETDATA_UDP_FUNCTION_COUNT, NETDATA_EBPF_IP_FAMILY, + &socket_publish_aggregated[NETDATA_IDX_UDP_RECVBUF],2); + write_io_chart(NETDATA_UDP_FUNCTION_BITS, NETDATA_EBPF_IP_FAMILY, + socket_id_names[3], (long long)common_udp.read * 8/BITS_IN_A_KILOBIT, + socket_id_names[4], (long long)common_udp.write * 8/BITS_IN_A_KILOBIT); if (em->mode < MODE_ENTRY) { - write_err_chart( - NETDATA_UDP_FUNCTION_ERROR, NETDATA_EBPF_FAMILY, &socket_publish_aggregated[NETDATA_UDP_START], - 2); + write_err_chart(NETDATA_UDP_FUNCTION_ERROR, NETDATA_EBPF_IP_FAMILY, + &socket_publish_aggregated[NETDATA_UDP_START], 2); } } @@ -342,7 +353,7 @@ long long ebpf_socket_sum_values_for_pids(struct pid_on_target *root, size_t off } /** - * Send data to Netdata calling auxiliar functions. + * Send data to Netdata calling auxiliary functions. * * @param em the structure with thread information * @param root the target list. @@ -445,88 +456,88 @@ void ebpf_socket_send_apps_data(ebpf_module_t *em, struct target *root) */ static void ebpf_create_global_charts(ebpf_module_t *em) { - ebpf_create_chart(NETDATA_EBPF_FAMILY, + ebpf_create_chart(NETDATA_EBPF_IP_FAMILY, NETDATA_TCP_FUNCTION_COUNT, "Calls to internal functions", EBPF_COMMON_DIMENSION_CALL, - NETDATA_SOCKET_GROUP, + NETDATA_SOCKET_KERNEL_FUNCTIONS, NULL, NETDATA_EBPF_CHART_TYPE_LINE, 21070, ebpf_create_global_dimension, socket_publish_aggregated, - 3); + 3, em->update_every, NETDATA_EBPF_MODULE_NAME_SOCKET); - ebpf_create_chart(NETDATA_EBPF_FAMILY, NETDATA_TCP_FUNCTION_BITS, + ebpf_create_chart(NETDATA_EBPF_IP_FAMILY, NETDATA_TCP_FUNCTION_BITS, "TCP bandwidth", EBPF_COMMON_DIMENSION_BITS, - NETDATA_SOCKET_GROUP, + NETDATA_SOCKET_KERNEL_FUNCTIONS, NULL, NETDATA_EBPF_CHART_TYPE_LINE, 21071, ebpf_create_global_dimension, socket_publish_aggregated, - 3); + 2, em->update_every, NETDATA_EBPF_MODULE_NAME_SOCKET); if (em->mode < MODE_ENTRY) { - ebpf_create_chart(NETDATA_EBPF_FAMILY, + ebpf_create_chart(NETDATA_EBPF_IP_FAMILY, NETDATA_TCP_FUNCTION_ERROR, "TCP errors", EBPF_COMMON_DIMENSION_CALL, - NETDATA_SOCKET_GROUP, + NETDATA_SOCKET_KERNEL_FUNCTIONS, NULL, NETDATA_EBPF_CHART_TYPE_LINE, 21072, ebpf_create_global_dimension, socket_publish_aggregated, - 2); + 2, em->update_every, NETDATA_EBPF_MODULE_NAME_SOCKET); } - ebpf_create_chart(NETDATA_EBPF_FAMILY, + ebpf_create_chart(NETDATA_EBPF_IP_FAMILY, NETDATA_TCP_RETRANSMIT, "Packages retransmitted", EBPF_COMMON_DIMENSION_CALL, - NETDATA_SOCKET_GROUP, + NETDATA_SOCKET_KERNEL_FUNCTIONS, NULL, NETDATA_EBPF_CHART_TYPE_LINE, 21073, ebpf_create_global_dimension, &socket_publish_aggregated[NETDATA_IDX_TCP_RETRANSMIT], - 1); + 1, em->update_every, NETDATA_EBPF_MODULE_NAME_SOCKET); - ebpf_create_chart(NETDATA_EBPF_FAMILY, + ebpf_create_chart(NETDATA_EBPF_IP_FAMILY, NETDATA_UDP_FUNCTION_COUNT, "UDP calls", EBPF_COMMON_DIMENSION_CALL, - NETDATA_SOCKET_GROUP, + NETDATA_SOCKET_KERNEL_FUNCTIONS, NULL, NETDATA_EBPF_CHART_TYPE_LINE, 21074, ebpf_create_global_dimension, &socket_publish_aggregated[NETDATA_IDX_UDP_RECVBUF], - 2); + 2, em->update_every, NETDATA_EBPF_MODULE_NAME_SOCKET); - ebpf_create_chart(NETDATA_EBPF_FAMILY, NETDATA_UDP_FUNCTION_BITS, + ebpf_create_chart(NETDATA_EBPF_IP_FAMILY, NETDATA_UDP_FUNCTION_BITS, "UDP bandwidth", EBPF_COMMON_DIMENSION_BITS, - NETDATA_SOCKET_GROUP, + NETDATA_SOCKET_KERNEL_FUNCTIONS, NULL, NETDATA_EBPF_CHART_TYPE_LINE, 21075, ebpf_create_global_dimension, &socket_publish_aggregated[NETDATA_IDX_UDP_RECVBUF], - 2); + 2, em->update_every, NETDATA_EBPF_MODULE_NAME_SOCKET); if (em->mode < MODE_ENTRY) { - ebpf_create_chart(NETDATA_EBPF_FAMILY, + ebpf_create_chart(NETDATA_EBPF_IP_FAMILY, NETDATA_UDP_FUNCTION_ERROR, "UDP errors", EBPF_COMMON_DIMENSION_CALL, - NETDATA_SOCKET_GROUP, + NETDATA_SOCKET_KERNEL_FUNCTIONS, NULL, NETDATA_EBPF_CHART_TYPE_LINE, 21076, ebpf_create_global_dimension, &socket_publish_aggregated[NETDATA_IDX_UDP_RECVBUF], - 2); + 2, em->update_every, NETDATA_EBPF_MODULE_NAME_SOCKET); } } @@ -540,7 +551,6 @@ static void ebpf_create_global_charts(ebpf_module_t *em) */ void ebpf_socket_create_apps_charts(struct ebpf_module *em, void *ptr) { - UNUSED(em); struct target *root = ptr;; ebpf_create_charts_on_apps(NETDATA_NET_APPS_BANDWIDTH_SENT, "Bytes sent", EBPF_COMMON_DIMENSION_BITS, @@ -548,7 +558,7 @@ void ebpf_socket_create_apps_charts(struct ebpf_module *em, void *ptr) NETDATA_EBPF_CHART_TYPE_STACKED, 20080, ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], - root); + root, em->update_every, NETDATA_EBPF_MODULE_NAME_SOCKET); ebpf_create_charts_on_apps(NETDATA_NET_APPS_BANDWIDTH_RECV, "bytes received", EBPF_COMMON_DIMENSION_BITS, @@ -556,7 +566,7 @@ void ebpf_socket_create_apps_charts(struct ebpf_module *em, void *ptr) NETDATA_EBPF_CHART_TYPE_STACKED, 20081, ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], - root); + root, em->update_every, NETDATA_EBPF_MODULE_NAME_SOCKET); ebpf_create_charts_on_apps(NETDATA_NET_APPS_BANDWIDTH_TCP_SEND_CALLS, "Calls for tcp_sendmsg", @@ -565,7 +575,7 @@ void ebpf_socket_create_apps_charts(struct ebpf_module *em, void *ptr) NETDATA_EBPF_CHART_TYPE_STACKED, 20082, ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], - root); + root, em->update_every, NETDATA_EBPF_MODULE_NAME_SOCKET); ebpf_create_charts_on_apps(NETDATA_NET_APPS_BANDWIDTH_TCP_RECV_CALLS, "Calls for tcp_cleanup_rbuf", @@ -574,7 +584,7 @@ void ebpf_socket_create_apps_charts(struct ebpf_module *em, void *ptr) NETDATA_EBPF_CHART_TYPE_STACKED, 20083, ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], - root); + root, em->update_every, NETDATA_EBPF_MODULE_NAME_SOCKET); ebpf_create_charts_on_apps(NETDATA_NET_APPS_BANDWIDTH_TCP_RETRANSMIT, "Calls for tcp_retransmit", @@ -583,7 +593,7 @@ void ebpf_socket_create_apps_charts(struct ebpf_module *em, void *ptr) NETDATA_EBPF_CHART_TYPE_STACKED, 20084, ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], - root); + root, em->update_every, NETDATA_EBPF_MODULE_NAME_SOCKET); ebpf_create_charts_on_apps(NETDATA_NET_APPS_BANDWIDTH_UDP_SEND_CALLS, "Calls for udp_sendmsg", @@ -592,7 +602,7 @@ void ebpf_socket_create_apps_charts(struct ebpf_module *em, void *ptr) NETDATA_EBPF_CHART_TYPE_STACKED, 20085, ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], - root); + root, em->update_every, NETDATA_EBPF_MODULE_NAME_SOCKET); ebpf_create_charts_on_apps(NETDATA_NET_APPS_BANDWIDTH_UDP_RECV_CALLS, "Calls for udp_recvmsg", @@ -601,7 +611,7 @@ void ebpf_socket_create_apps_charts(struct ebpf_module *em, void *ptr) NETDATA_EBPF_CHART_TYPE_STACKED, 20086, ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], - root); + root, em->update_every, NETDATA_EBPF_MODULE_NAME_SOCKET); socket_apps_created = 1; } @@ -611,15 +621,16 @@ void ebpf_socket_create_apps_charts(struct ebpf_module *em, void *ptr) * * Create common charts. * - * @param id the chart id - * @param title the chart title - * @param units the units label - * @param family the group name used to attach the chart on dashboard - * @param order the chart order - * @param ptr the plot structure with values. + * @param id chart id + * @param title chart title + * @param units units label + * @param family group name used to attach the chart on dashboard + * @param order chart order + * @param update_every value to overwrite the update frequency set by the server. + * @param ptr plot structure with values. */ static void ebpf_socket_create_nv_chart(char *id, char *title, char *units, - char *family, int order, netdata_vector_plot_t *ptr) + char *family, int order, int update_every, netdata_vector_plot_t *ptr) { ebpf_write_chart_cmd(NETDATA_EBPF_FAMILY, id, @@ -628,7 +639,9 @@ static void ebpf_socket_create_nv_chart(char *id, char *title, char *units, family, NETDATA_EBPF_CHART_TYPE_STACKED, NULL, - order); + order, + update_every, + NETDATA_EBPF_MODULE_NAME_SOCKET); uint32_t i; uint32_t end = ptr->last_plot; @@ -653,10 +666,11 @@ static void ebpf_socket_create_nv_chart(char *id, char *title, char *units, * @param units the units label * @param family the group name used to attach the chart on dashboard * @param order the chart order + * @param update_every value to overwrite the update frequency set by the server. * @param ptr the plot structure with values. */ static void ebpf_socket_create_nv_retransmit(char *id, char *title, char *units, - char *family, int order, netdata_vector_plot_t *ptr) + char *family, int order, int update_every, netdata_vector_plot_t *ptr) { ebpf_write_chart_cmd(NETDATA_EBPF_FAMILY, id, @@ -665,7 +679,9 @@ static void ebpf_socket_create_nv_retransmit(char *id, char *title, char *units, family, NETDATA_EBPF_CHART_TYPE_STACKED, NULL, - order); + order, + update_every, + NETDATA_EBPF_MODULE_NAME_SOCKET); uint32_t i; uint32_t end = ptr->last_plot; @@ -684,8 +700,9 @@ static void ebpf_socket_create_nv_retransmit(char *id, char *title, char *units, * Recreate the charts when new sockets are created. * * @param ptr a pointer for inbound or outbound vectors. + * @param update_every value to overwrite the update frequency set by the server. */ -static void ebpf_socket_create_nv_charts(netdata_vector_plot_t *ptr) +static void ebpf_socket_create_nv_charts(netdata_vector_plot_t *ptr, int update_every) { // We do not have new sockets, so we do not need move forward if (ptr->max_plot == ptr->last_plot) @@ -698,34 +715,34 @@ static void ebpf_socket_create_nv_charts(netdata_vector_plot_t *ptr) "Outbound connections (bytes).", EBPF_COMMON_DIMENSION_BYTES, NETDATA_NETWORK_CONNECTIONS_GROUP, 21080, - ptr); + update_every, ptr); ebpf_socket_create_nv_chart(NETDATA_NV_OUTBOUND_PACKETS, "Outbound connections (packets)", EBPF_COMMON_DIMENSION_PACKETS, NETDATA_NETWORK_CONNECTIONS_GROUP, 21082, - ptr); + update_every, ptr); ebpf_socket_create_nv_retransmit(NETDATA_NV_OUTBOUND_RETRANSMIT, "Retransmitted packets", EBPF_COMMON_DIMENSION_CALL, NETDATA_NETWORK_CONNECTIONS_GROUP, 21083, - ptr); + update_every, ptr); } else { ebpf_socket_create_nv_chart(NETDATA_NV_INBOUND_BYTES, "Inbound connections (bytes)", EBPF_COMMON_DIMENSION_BYTES, NETDATA_NETWORK_CONNECTIONS_GROUP, 21084, - ptr); + update_every, ptr); ebpf_socket_create_nv_chart(NETDATA_NV_INBOUND_PACKETS, "Inbound connections (packets)", EBPF_COMMON_DIMENSION_PACKETS, NETDATA_NETWORK_CONNECTIONS_GROUP, 21085, - ptr); + update_every, ptr); } ptr->flags |= NETWORK_VIEWER_CHARTS_CREATED; @@ -1437,7 +1454,7 @@ static void read_listen_table() uint16_t key = 0; uint16_t next_key = 0; - int fd = map_fd[NETDATA_SOCKET_LISTEN_TABLE]; + int fd = socket_maps[NETDATA_SOCKET_LPORTS].map_fd; uint8_t value; while (bpf_map_get_next_key(fd, &key, &next_key) == 0) { int test = bpf_map_lookup_elem(fd, &key, &value); @@ -1475,9 +1492,9 @@ void *ebpf_socket_read_hash(void *ptr) read_thread_closed = 0; heartbeat_t hb; heartbeat_init(&hb); - usec_t step = NETDATA_SOCKET_READ_SLEEP_MS * em->update_time; - int fd_ipv4 = map_fd[NETDATA_SOCKET_IPV4_HASH_TABLE]; - int fd_ipv6 = map_fd[NETDATA_SOCKET_IPV6_HASH_TABLE]; + usec_t step = NETDATA_SOCKET_READ_SLEEP_MS * em->update_every; + int fd_ipv4 = socket_maps[NETDATA_SOCKET_TABLE_IPV4].map_fd; + int fd_ipv6 = socket_maps[NETDATA_SOCKET_TABLE_IPV6].map_fd; int network_connection = em->optional; while (!close_ebpf_plugin) { usec_t dt = heartbeat_next(&hb, step); @@ -1504,12 +1521,12 @@ static void read_hash_global_tables() netdata_idx_t res[NETDATA_SOCKET_COUNTER]; netdata_idx_t *val = socket_hash_values; - int fd = map_fd[NETDATA_SOCKET_GLOBAL_HASH_TABLE]; + int fd = socket_maps[NETDATA_SOCKET_GLOBAL].map_fd; for (idx = 0; idx < NETDATA_SOCKET_COUNTER; idx++) { if (!bpf_map_lookup_elem(fd, &idx, val)) { uint64_t total = 0; int i; - int end = (running_on_kernel < NETDATA_KERNEL_V4_15) ? 1 : ebpf_nprocs; + int end = ebpf_nprocs; for (i = 0; i < end; i++) total += val[i]; @@ -1586,7 +1603,7 @@ void ebpf_socket_bandwidth_accumulator(ebpf_bandwidth_t *out) */ static void ebpf_socket_update_apps_data() { - int fd = map_fd[NETDATA_SOCKET_APPS_HASH_TABLE]; + int fd = socket_maps[NETDATA_SOCKET_TABLE_BANDWIDTH].map_fd; ebpf_bandwidth_t *eb = bandwidth_vector; uint32_t key; struct pid_stat *pids = root_of_pids; @@ -1606,6 +1623,475 @@ static void ebpf_socket_update_apps_data() } } +/** + * Update cgroup + * + * Update cgroup data based in + */ +static void ebpf_update_socket_cgroup() +{ + ebpf_cgroup_target_t *ect ; + + ebpf_bandwidth_t *eb = bandwidth_vector; + int fd = socket_maps[NETDATA_SOCKET_TABLE_BANDWIDTH].map_fd; + + pthread_mutex_lock(&mutex_cgroup_shm); + for (ect = ebpf_cgroup_pids; ect; ect = ect->next) { + struct pid_on_target2 *pids; + for (pids = ect->pids; pids; pids = pids->next) { + int pid = pids->pid; + ebpf_bandwidth_t *out = &pids->socket; + ebpf_socket_publish_apps_t *publish = &ect->publish_socket; + if (likely(socket_bandwidth_curr) && socket_bandwidth_curr[pid]) { + ebpf_socket_publish_apps_t *in = socket_bandwidth_curr[pid]; + + publish->bytes_sent = in->bytes_sent; + publish->bytes_received = in->bytes_received; + publish->call_tcp_sent = in->call_tcp_sent; + publish->call_tcp_received = in->call_tcp_received; + publish->retransmit = in->retransmit; + publish->call_udp_sent = in->call_udp_sent; + publish->call_udp_received = in->call_udp_received; + } else { + if (!bpf_map_lookup_elem(fd, &pid, eb)) { + ebpf_socket_bandwidth_accumulator(eb); + + memcpy(out, eb, sizeof(ebpf_bandwidth_t)); + + publish->bytes_sent = out->bytes_sent; + publish->bytes_received = out->bytes_received; + publish->call_tcp_sent = out->call_tcp_sent; + publish->call_tcp_received = out->call_tcp_received; + publish->retransmit = out->retransmit; + publish->call_udp_sent = out->call_udp_sent; + publish->call_udp_received = out->call_udp_received; + } + } + } + } + pthread_mutex_unlock(&mutex_cgroup_shm); +} + +/** + * Sum PIDs + * + * Sum values for all targets. + * + * @param fd structure used to store data + * @param pids input data + */ +static void ebpf_socket_sum_cgroup_pids(ebpf_socket_publish_apps_t *socket, struct pid_on_target2 *pids) +{ + ebpf_socket_publish_apps_t accumulator; + memset(&accumulator, 0, sizeof(accumulator)); + + while (pids) { + ebpf_bandwidth_t *w = &pids->socket; + + accumulator.bytes_received += w->bytes_received; + accumulator.bytes_sent += w->bytes_sent; + accumulator.call_tcp_received += w->call_tcp_received; + accumulator.call_tcp_sent += w->call_tcp_sent; + accumulator.retransmit += w->retransmit; + accumulator.call_udp_received += w->call_udp_received; + accumulator.call_udp_sent += w->call_udp_sent; + + pids = pids->next; + } + + socket->bytes_sent = (accumulator.bytes_sent >= socket->bytes_sent) ? accumulator.bytes_sent : socket->bytes_sent; + socket->bytes_received = (accumulator.bytes_received >= socket->bytes_received) ? accumulator.bytes_received : socket->bytes_received; + socket->call_tcp_sent = (accumulator.call_tcp_sent >= socket->call_tcp_sent) ? accumulator.call_tcp_sent : socket->call_tcp_sent; + socket->call_tcp_received = (accumulator.call_tcp_received >= socket->call_tcp_received) ? accumulator.call_tcp_received : socket->call_tcp_received; + socket->retransmit = (accumulator.retransmit >= socket->retransmit) ? accumulator.retransmit : socket->retransmit; + socket->call_udp_sent = (accumulator.call_udp_sent >= socket->call_udp_sent) ? accumulator.call_udp_sent : socket->call_udp_sent; + socket->call_udp_received = (accumulator.call_udp_received >= socket->call_udp_received) ? accumulator.call_udp_received : socket->call_udp_received; +} + +/** + * Create specific socket charts + * + * Create charts for cgroup/application. + * + * @param type the chart type. + * @param update_every value to overwrite the update frequency set by the server. + */ +static void ebpf_create_specific_socket_charts(char *type, int update_every) +{ + ebpf_create_chart(type, NETDATA_NET_APPS_BANDWIDTH_RECV, + "Bytes received", + EBPF_COMMON_DIMENSION_CALL, NETDATA_CGROUP_NET_GROUP, + NETDATA_CGROUP_SOCKET_BYTES_RECV_CONTEXT, + NETDATA_EBPF_CHART_TYPE_LINE, + NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5300, + ebpf_create_global_dimension, + &socket_publish_aggregated[NETDATA_IDX_TCP_CLEANUP_RBUF], 1, + update_every, NETDATA_EBPF_MODULE_NAME_SWAP); + + ebpf_create_chart(type, NETDATA_NET_APPS_BANDWIDTH_SENT, + "Bytes sent", + EBPF_COMMON_DIMENSION_CALL, NETDATA_CGROUP_NET_GROUP, + NETDATA_CGROUP_SOCKET_BYTES_SEND_CONTEXT, + NETDATA_EBPF_CHART_TYPE_LINE, + NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5301, + ebpf_create_global_dimension, + socket_publish_aggregated, 1, + update_every, NETDATA_EBPF_MODULE_NAME_SWAP); + + ebpf_create_chart(type, NETDATA_NET_APPS_BANDWIDTH_TCP_RECV_CALLS, + "Calls to tcp_cleanup_rbuf.", + EBPF_COMMON_DIMENSION_CALL, NETDATA_CGROUP_NET_GROUP, + NETDATA_CGROUP_SOCKET_TCP_RECV_CONTEXT, + NETDATA_EBPF_CHART_TYPE_LINE, + NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5302, + ebpf_create_global_dimension, + &socket_publish_aggregated[NETDATA_IDX_TCP_CLEANUP_RBUF], 1, + update_every, NETDATA_EBPF_MODULE_NAME_SWAP); + + ebpf_create_chart(type, NETDATA_NET_APPS_BANDWIDTH_TCP_SEND_CALLS, + "Calls to tcp_sendmsg.", + EBPF_COMMON_DIMENSION_CALL, NETDATA_CGROUP_NET_GROUP, + NETDATA_CGROUP_SOCKET_TCP_SEND_CONTEXT, + NETDATA_EBPF_CHART_TYPE_LINE, + NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5303, + ebpf_create_global_dimension, + socket_publish_aggregated, 1, + update_every, NETDATA_EBPF_MODULE_NAME_SWAP); + + ebpf_create_chart(type, NETDATA_NET_APPS_BANDWIDTH_TCP_RETRANSMIT, + "Calls to tcp_retransmit.", + EBPF_COMMON_DIMENSION_CALL, NETDATA_CGROUP_NET_GROUP, + NETDATA_CGROUP_SOCKET_TCP_RETRANSMIT_CONTEXT, + NETDATA_EBPF_CHART_TYPE_LINE, + NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5304, + ebpf_create_global_dimension, + &socket_publish_aggregated[NETDATA_IDX_TCP_RETRANSMIT], 1, + update_every, NETDATA_EBPF_MODULE_NAME_SWAP); + + ebpf_create_chart(type, NETDATA_NET_APPS_BANDWIDTH_UDP_SEND_CALLS, + "Calls to udp_sendmsg", + EBPF_COMMON_DIMENSION_CALL, NETDATA_CGROUP_NET_GROUP, + NETDATA_CGROUP_SOCKET_UDP_SEND_CONTEXT, + NETDATA_EBPF_CHART_TYPE_LINE, + NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5305, + ebpf_create_global_dimension, + &socket_publish_aggregated[NETDATA_IDX_UDP_SENDMSG], 1, + update_every, NETDATA_EBPF_MODULE_NAME_SWAP); + + ebpf_create_chart(type, NETDATA_NET_APPS_BANDWIDTH_UDP_RECV_CALLS, + "Calls to udp_recvmsg", + EBPF_COMMON_DIMENSION_CALL, NETDATA_CGROUP_NET_GROUP, + NETDATA_CGROUP_SOCKET_UDP_RECV_CONTEXT, + NETDATA_EBPF_CHART_TYPE_LINE, + NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5306, + ebpf_create_global_dimension, + &socket_publish_aggregated[NETDATA_IDX_UDP_RECVBUF], 1, + update_every, NETDATA_EBPF_MODULE_NAME_SWAP); +} + +/** + * Obsolete specific socket charts + * + * Obsolete charts for cgroup/application. + * + * @param type the chart type. + * @param update_every value to overwrite the update frequency set by the server. + */ +static void ebpf_obsolete_specific_socket_charts(char *type, int update_every) +{ + ebpf_write_chart_obsolete(type, NETDATA_NET_APPS_BANDWIDTH_RECV, "Bytes received", + EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_NET_GROUP, + NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_SERVICES_SOCKET_BYTES_RECV_CONTEXT, + NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5300, update_every); + + ebpf_write_chart_obsolete(type, NETDATA_NET_APPS_BANDWIDTH_SENT,"Bytes sent", + EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_NET_GROUP, + NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_SERVICES_SOCKET_BYTES_SEND_CONTEXT, + NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5301, update_every); + + ebpf_write_chart_obsolete(type, NETDATA_NET_APPS_BANDWIDTH_TCP_RECV_CALLS, "Calls to tcp_cleanup_rbuf.", + EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_NET_GROUP, + NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_SERVICES_SOCKET_TCP_RECV_CONTEXT, + NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5302, update_every); + + ebpf_write_chart_obsolete(type, NETDATA_NET_APPS_BANDWIDTH_TCP_SEND_CALLS, "Calls to tcp_sendmsg.", + EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_NET_GROUP, + NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_SERVICES_SOCKET_TCP_SEND_CONTEXT, + NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5303, update_every); + + ebpf_write_chart_obsolete(type, NETDATA_NET_APPS_BANDWIDTH_TCP_RETRANSMIT, "Calls to tcp_retransmit.", + EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_NET_GROUP, + NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_SERVICES_SOCKET_TCP_RETRANSMIT_CONTEXT, + NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5304, update_every); + + ebpf_write_chart_obsolete(type, NETDATA_NET_APPS_BANDWIDTH_UDP_SEND_CALLS, "Calls to udp_sendmsg", + EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_NET_GROUP, + NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_SERVICES_SOCKET_UDP_SEND_CONTEXT, + NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5305, update_every); + + ebpf_write_chart_obsolete(type, NETDATA_NET_APPS_BANDWIDTH_UDP_RECV_CALLS, "Calls to udp_recvmsg", + EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_NET_GROUP, NETDATA_EBPF_CHART_TYPE_LINE, + NETDATA_SERVICES_SOCKET_UDP_RECV_CONTEXT, + NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5306, update_every); +} + +/* + * Send Specific Swap data + * + * Send data for specific cgroup/apps. + * + * @param type chart type + * @param values structure with values that will be sent to netdata + */ +static void ebpf_send_specific_socket_data(char *type, ebpf_socket_publish_apps_t *values) +{ + write_begin_chart(type, NETDATA_NET_APPS_BANDWIDTH_SENT); + write_chart_dimension(socket_publish_aggregated[NETDATA_IDX_TCP_SENDMSG].name, + (long long) values->bytes_sent); + write_end_chart(); + + write_begin_chart(type, NETDATA_NET_APPS_BANDWIDTH_RECV); + write_chart_dimension(socket_publish_aggregated[NETDATA_IDX_TCP_CLEANUP_RBUF].name, + (long long) values->bytes_received); + write_end_chart(); + + write_begin_chart(type, NETDATA_NET_APPS_BANDWIDTH_TCP_SEND_CALLS); + write_chart_dimension(socket_publish_aggregated[NETDATA_IDX_TCP_SENDMSG].name, + (long long) values->call_tcp_sent); + write_end_chart(); + + write_begin_chart(type, NETDATA_NET_APPS_BANDWIDTH_TCP_RECV_CALLS); + write_chart_dimension(socket_publish_aggregated[NETDATA_IDX_TCP_CLEANUP_RBUF].name, + (long long) values->call_tcp_received); + write_end_chart(); + + write_begin_chart(type, NETDATA_NET_APPS_BANDWIDTH_TCP_RETRANSMIT); + write_chart_dimension(socket_publish_aggregated[NETDATA_IDX_TCP_RETRANSMIT].name, + (long long) values->retransmit); + write_end_chart(); + + write_begin_chart(type, NETDATA_NET_APPS_BANDWIDTH_UDP_SEND_CALLS); + write_chart_dimension(socket_publish_aggregated[NETDATA_IDX_UDP_SENDMSG].name, + (long long) values->call_udp_sent); + write_end_chart(); + + write_begin_chart(type, NETDATA_NET_APPS_BANDWIDTH_UDP_RECV_CALLS); + write_chart_dimension(socket_publish_aggregated[NETDATA_IDX_UDP_RECVBUF].name, + (long long) values->call_udp_received); + write_end_chart(); +} + +/** + * Create Systemd Socket Charts + * + * Create charts when systemd is enabled + * + * @param update_every value to overwrite the update frequency set by the server. + **/ +static void ebpf_create_systemd_socket_charts(int update_every) +{ + ebpf_create_charts_on_systemd(NETDATA_NET_APPS_BANDWIDTH_RECV, + "Bytes received", EBPF_COMMON_DIMENSION_BITS, + NETDATA_APPS_NET_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + 20080, + ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], + NETDATA_SERVICES_SOCKET_BYTES_RECV_CONTEXT, NETDATA_EBPF_MODULE_NAME_SOCKET, + update_every); + + ebpf_create_charts_on_systemd(NETDATA_NET_APPS_BANDWIDTH_SENT, + "Bytes sent", EBPF_COMMON_DIMENSION_BITS, + NETDATA_APPS_NET_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + 20081, + ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], + NETDATA_SERVICES_SOCKET_BYTES_SEND_CONTEXT, NETDATA_EBPF_MODULE_NAME_SOCKET, + update_every); + + ebpf_create_charts_on_systemd(NETDATA_NET_APPS_BANDWIDTH_TCP_RECV_CALLS, + "Calls to tcp_cleanup_rbuf.", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_APPS_NET_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + 20082, + ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], + NETDATA_SERVICES_SOCKET_TCP_RECV_CONTEXT, NETDATA_EBPF_MODULE_NAME_SOCKET, + update_every); + + ebpf_create_charts_on_systemd(NETDATA_NET_APPS_BANDWIDTH_TCP_SEND_CALLS, + "Calls to tcp_sendmsg.", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_APPS_NET_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + 20083, + ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], + NETDATA_SERVICES_SOCKET_TCP_SEND_CONTEXT, NETDATA_EBPF_MODULE_NAME_SOCKET, + update_every); + + ebpf_create_charts_on_systemd(NETDATA_NET_APPS_BANDWIDTH_TCP_RETRANSMIT, + "Calls to tcp_retransmit", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_APPS_NET_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + 20084, + ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], + NETDATA_SERVICES_SOCKET_TCP_RETRANSMIT_CONTEXT, NETDATA_EBPF_MODULE_NAME_SOCKET, + update_every); + + ebpf_create_charts_on_systemd(NETDATA_NET_APPS_BANDWIDTH_UDP_SEND_CALLS, + "Calls to udp_sendmsg", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_APPS_NET_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + 20085, + ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], + NETDATA_SERVICES_SOCKET_UDP_SEND_CONTEXT, NETDATA_EBPF_MODULE_NAME_SOCKET, + update_every); + + ebpf_create_charts_on_systemd(NETDATA_NET_APPS_BANDWIDTH_UDP_RECV_CALLS, + "Calls to udp_recvmsg", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_APPS_NET_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + 20086, + ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], + NETDATA_SERVICES_SOCKET_UDP_RECV_CONTEXT, NETDATA_EBPF_MODULE_NAME_SOCKET, + update_every); +} + +/** + * Send Systemd charts + * + * Send collected data to Netdata. + * + * @return It returns the status for chart creation, if it is necessary to remove a specific dimension, zero is returned + * otherwise function returns 1 to avoid chart recreation + */ +static int ebpf_send_systemd_socket_charts() +{ + int ret = 1; + ebpf_cgroup_target_t *ect; + write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_NET_APPS_BANDWIDTH_SENT); + for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { + if (unlikely(ect->systemd) && unlikely(ect->updated)) { + write_chart_dimension(ect->name, (long long)ect->publish_socket.bytes_sent); + } else + ret = 0; + } + write_end_chart(); + + write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_NET_APPS_BANDWIDTH_RECV); + for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { + if (unlikely(ect->systemd) && unlikely(ect->updated)) { + write_chart_dimension(ect->name, (long long)ect->publish_socket.bytes_received); + } + } + write_end_chart(); + + write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_NET_APPS_BANDWIDTH_TCP_SEND_CALLS); + for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { + if (unlikely(ect->systemd) && unlikely(ect->updated)) { + write_chart_dimension(ect->name, (long long)ect->publish_socket.call_tcp_sent); + } + } + write_end_chart(); + + write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_NET_APPS_BANDWIDTH_TCP_RECV_CALLS); + for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { + if (unlikely(ect->systemd) && unlikely(ect->updated)) { + write_chart_dimension(ect->name, (long long)ect->publish_socket.call_tcp_received); + } + } + write_end_chart(); + + write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_NET_APPS_BANDWIDTH_TCP_RETRANSMIT); + for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { + if (unlikely(ect->systemd) && unlikely(ect->updated)) { + write_chart_dimension(ect->name, (long long)ect->publish_socket.retransmit); + } + } + write_end_chart(); + + write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_NET_APPS_BANDWIDTH_UDP_SEND_CALLS); + for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { + if (unlikely(ect->systemd) && unlikely(ect->updated)) { + write_chart_dimension(ect->name, (long long)ect->publish_socket.call_udp_sent); + } + } + write_end_chart(); + + write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_NET_APPS_BANDWIDTH_UDP_RECV_CALLS); + for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { + if (unlikely(ect->systemd) && unlikely(ect->updated)) { + write_chart_dimension(ect->name, (long long)ect->publish_socket.call_udp_received); + } + } + write_end_chart(); + + return ret; +} + +/** + * Update Cgroup algorithm + * + * Change algorithm from absolute to incremental + */ +void ebpf_socket_update_cgroup_algorithm() +{ + int i; + for (i = 0; i < NETDATA_MAX_SOCKET_VECTOR; i++) { + netdata_publish_syscall_t *ptr = &socket_publish_aggregated[i]; + freez(ptr->algorithm); + ptr->algorithm = strdupz(ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]); + } +} + +/** + * Send data to Netdata calling auxiliary functions. + * + * @param update_every value to overwrite the update frequency set by the server. +*/ +static void ebpf_socket_send_cgroup_data(int update_every) +{ + if (!ebpf_cgroup_pids) + return; + + pthread_mutex_lock(&mutex_cgroup_shm); + ebpf_cgroup_target_t *ect; + for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { + ebpf_socket_sum_cgroup_pids(&ect->publish_socket, ect->pids); + } + + int has_systemd = shm_ebpf_cgroup.header->systemd_enabled; + if (has_systemd) { + static int systemd_charts = 0; + if (!systemd_charts) { + ebpf_create_systemd_socket_charts(update_every); + systemd_charts = 1; + } + systemd_charts = ebpf_send_systemd_socket_charts(); + } + + for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { + if (ect->systemd) + continue; + + if (!(ect->flags & NETDATA_EBPF_CGROUP_HAS_SOCKET_CHART)) { + ebpf_create_specific_socket_charts(ect->name, update_every); + ect->flags |= NETDATA_EBPF_CGROUP_HAS_SOCKET_CHART; + } + + if (ect->flags & NETDATA_EBPF_CGROUP_HAS_SOCKET_CHART && ect->updated) { + ebpf_send_specific_socket_data(ect->name, &ect->publish_socket); + } else { + ebpf_obsolete_specific_socket_charts(ect->name, update_every); + ect->flags &= ~NETDATA_EBPF_CGROUP_HAS_SOCKET_CHART; + } + } + + pthread_mutex_unlock(&mutex_cgroup_shm); +} + /***************************************************************** * * FUNCTIONS WITH THE MAIN LOOP @@ -1624,7 +2110,6 @@ struct netdata_static_thread socket_threads = {"EBPF SOCKET READ", */ static void socket_collector(usec_t step, ebpf_module_t *em) { - UNUSED(em); UNUSED(step); heartbeat_t hb; heartbeat_init(&hb); @@ -1634,49 +2119,63 @@ static void socket_collector(usec_t step, ebpf_module_t *em) netdata_thread_create(socket_threads.thread, socket_threads.name, NETDATA_THREAD_OPTION_JOINABLE, ebpf_socket_read_hash, em); + int cgroups = em->cgroup_charts; + if (cgroups) + ebpf_socket_update_cgroup_algorithm(); + int socket_apps_enabled = ebpf_modules[EBPF_MODULE_SOCKET_IDX].apps_charts; int socket_global_enabled = ebpf_modules[EBPF_MODULE_SOCKET_IDX].global_charts; int network_connection = em->optional; + int update_every = em->update_every; + int counter = update_every - 1; while (!close_ebpf_plugin) { pthread_mutex_lock(&collect_data_mutex); pthread_cond_wait(&collect_data_cond_var, &collect_data_mutex); - if (socket_global_enabled) - read_hash_global_tables(); + if (++counter == update_every) { + counter = 0; + if (socket_global_enabled) + read_hash_global_tables(); - if (socket_apps_enabled) - ebpf_socket_update_apps_data(); + if (socket_apps_enabled) + ebpf_socket_update_apps_data(); - calculate_nv_plot(); + if (cgroups) + ebpf_update_socket_cgroup(); - pthread_mutex_lock(&lock); - if (socket_global_enabled) - ebpf_socket_send_data(em); + calculate_nv_plot(); - if (socket_apps_enabled) - ebpf_socket_send_apps_data(em, apps_groups_root_target); + pthread_mutex_lock(&lock); + if (socket_global_enabled) + ebpf_socket_send_data(em); - fflush(stdout); + if (socket_apps_enabled) + ebpf_socket_send_apps_data(em, apps_groups_root_target); - if (network_connection) { - // We are calling fflush many times, because when we have a lot of dimensions - // we began to have not expected outputs and Netdata closed the plugin. - pthread_mutex_lock(&nv_mutex); - ebpf_socket_create_nv_charts(&inbound_vectors); - fflush(stdout); - ebpf_socket_send_nv_data(&inbound_vectors); + if (cgroups) + ebpf_socket_send_cgroup_data(update_every); - ebpf_socket_create_nv_charts(&outbound_vectors); fflush(stdout); - ebpf_socket_send_nv_data(&outbound_vectors); - wait_to_plot = 0; - pthread_mutex_unlock(&nv_mutex); + if (network_connection) { + // We are calling fflush many times, because when we have a lot of dimensions + // we began to have not expected outputs and Netdata closed the plugin. + pthread_mutex_lock(&nv_mutex); + ebpf_socket_create_nv_charts(&inbound_vectors, update_every); + fflush(stdout); + ebpf_socket_send_nv_data(&inbound_vectors); + + ebpf_socket_create_nv_charts(&outbound_vectors, update_every); + fflush(stdout); + ebpf_socket_send_nv_data(&outbound_vectors); + wait_to_plot = 0; + pthread_mutex_unlock(&nv_mutex); + + } + pthread_mutex_unlock(&lock); } pthread_mutex_unlock(&collect_data_mutex); - pthread_mutex_unlock(&lock); - } } @@ -1885,17 +2384,18 @@ static void ebpf_socket_cleanup(void *ptr) clean_hostnames(network_viewer_opt.excluded_hostnames); pthread_mutex_destroy(&nv_mutex); - freez(socket_data.map_fd); freez(socket_threads.thread); - struct bpf_program *prog; - size_t i = 0 ; - bpf_object__for_each_program(prog, objects) { - bpf_link__destroy(probe_links[i]); - i++; + if (probe_links) { + struct bpf_program *prog; + size_t i = 0 ; + bpf_object__for_each_program(prog, objects) { + bpf_link__destroy(probe_links[i]); + i++; + } + bpf_object__close(objects); } - bpf_object__close(objects); finalized_threads = 1; } @@ -1910,15 +2410,17 @@ static void ebpf_socket_cleanup(void *ptr) * We are not testing the return, because callocz does this and shutdown the software * case it was not possible to allocate. * - * @param length is the length for the vectors used inside the collector. + * @param apps is apps enabled? */ -static void ebpf_socket_allocate_global_vectors(size_t length) +static void ebpf_socket_allocate_global_vectors(int apps) { - memset(socket_aggregated_data, 0 ,length * sizeof(netdata_syscall_stat_t)); - memset(socket_publish_aggregated, 0 ,length * sizeof(netdata_publish_syscall_t)); + memset(socket_aggregated_data, 0 ,NETDATA_MAX_SOCKET_VECTOR * sizeof(netdata_syscall_stat_t)); + memset(socket_publish_aggregated, 0 ,NETDATA_MAX_SOCKET_VECTOR * sizeof(netdata_publish_syscall_t)); socket_hash_values = callocz(ebpf_nprocs, sizeof(netdata_idx_t)); - socket_bandwidth_curr = callocz((size_t)pid_max, sizeof(ebpf_socket_publish_apps_t *)); + if (apps) + socket_bandwidth_curr = callocz((size_t)pid_max, sizeof(ebpf_socket_publish_apps_t *)); + bandwidth_vector = callocz((size_t)ebpf_nprocs, sizeof(ebpf_bandwidth_t)); socket_values = callocz((size_t)ebpf_nprocs, sizeof(netdata_socket_t)); @@ -1926,14 +2428,6 @@ static void ebpf_socket_allocate_global_vectors(size_t length) outbound_vectors.plot = callocz(network_viewer_opt.max_dim, sizeof(netdata_socket_plot_t)); } -/** - * Set local function pointers, this function will never be compiled with static libraries - */ -static void set_local_pointers() -{ - map_fd = socket_data.map_fd; -} - /** * Initialize Inbound and Outbound * @@ -2860,9 +3354,7 @@ void *ebpf_socket_thread(void *ptr) ebpf_module_t *em = (ebpf_module_t *)ptr; em->maps = socket_maps; - fill_ebpf_data(&socket_data); - ebpf_update_module(em, &socket_config, NETDATA_NETWORK_CONFIG_FILE); parse_network_viewer_section(&socket_config); parse_service_name_section(&socket_config); parse_table_size_options(&socket_config); @@ -2876,16 +3368,13 @@ void *ebpf_socket_thread(void *ptr) } pthread_mutex_lock(&lock); - ebpf_socket_allocate_global_vectors(NETDATA_MAX_SOCKET_VECTOR); + ebpf_socket_allocate_global_vectors(em->apps_charts); initialize_inbound_outbound(); - if (ebpf_update_kernel(&socket_data)) { - pthread_mutex_unlock(&lock); - goto endsocket; - } + if (running_on_kernel < NETDATA_EBPF_KERNEL_5_0) + em->mode = MODE_ENTRY; - set_local_pointers(); - probe_links = ebpf_load_program(ebpf_plugin_dir, em, kernel_string, &objects, socket_data.map_fd); + probe_links = ebpf_load_program(ebpf_plugin_dir, em, kernel_string, &objects); if (!probe_links) { pthread_mutex_unlock(&lock); goto endsocket; @@ -2904,7 +3393,7 @@ void *ebpf_socket_thread(void *ptr) finalized_threads = 0; pthread_mutex_unlock(&lock); - socket_collector((usec_t)(em->update_time * USEC_PER_SEC), em); + socket_collector((usec_t)(em->update_every * USEC_PER_SEC), em); endsocket: netdata_thread_cleanup_pop(1); diff --git a/collectors/ebpf.plugin/ebpf_socket.h b/collectors/ebpf.plugin/ebpf_socket.h index 8dd422507..e3c000c76 100644 --- a/collectors/ebpf.plugin/ebpf_socket.h +++ b/collectors/ebpf.plugin/ebpf_socket.h @@ -4,15 +4,11 @@ #include #include "libnetdata/avl/avl.h" +// Module name +#define NETDATA_EBPF_MODULE_NAME_SOCKET "socket" + // Vector indexes #define NETDATA_UDP_START 3 -#define NETDATA_RETRANSMIT_START 5 - -#define NETDATA_SOCKET_APPS_HASH_TABLE 0 -#define NETDATA_SOCKET_IPV4_HASH_TABLE 1 -#define NETDATA_SOCKET_IPV6_HASH_TABLE 2 -#define NETDATA_SOCKET_GLOBAL_HASH_TABLE 4 -#define NETDATA_SOCKET_LISTEN_TABLE 5 #define NETDATA_SOCKET_READ_SLEEP_MS 800000ULL @@ -32,9 +28,12 @@ enum ebpf_socket_table_list { NETDATA_SOCKET_TABLE_BANDWIDTH, + NETDATA_SOCKET_GLOBAL, + NETDATA_SOCKET_LPORTS, NETDATA_SOCKET_TABLE_IPV4, NETDATA_SOCKET_TABLE_IPV6, - NETDATA_SOCKET_TABLE_UDP + NETDATA_SOCKET_TABLE_UDP, + NETDATA_SOCKET_TABLE_CTRL }; enum ebpf_socket_publish_index { @@ -74,8 +73,9 @@ typedef enum ebpf_socket_idx { NETDATA_SOCKET_COUNTER } ebpf_socket_index_t; -#define NETDATA_SOCKET_GROUP "Socket" -#define NETDATA_NETWORK_CONNECTIONS_GROUP "Network connections" +#define NETDATA_SOCKET_KERNEL_FUNCTIONS "kernel" +#define NETDATA_NETWORK_CONNECTIONS_GROUP "network connections" +#define NETDATA_CGROUP_NET_GROUP "network (eBPF)" // Global chart name #define NETDATA_TCP_FUNCTION_COUNT "tcp_functions" @@ -113,6 +113,23 @@ typedef enum ebpf_socket_idx { #define NETDATA_MINIMUM_IPV4_CIDR 0 #define NETDATA_MAXIMUM_IPV4_CIDR 32 +// Contexts +#define NETDATA_CGROUP_SOCKET_BYTES_RECV_CONTEXT "cgroup.net_bytes_recv" +#define NETDATA_CGROUP_SOCKET_BYTES_SEND_CONTEXT "cgroup.net_bytes_send" +#define NETDATA_CGROUP_SOCKET_TCP_RECV_CONTEXT "cgroup.net_tcp_recv" +#define NETDATA_CGROUP_SOCKET_TCP_SEND_CONTEXT "cgroup.net_tcp_send" +#define NETDATA_CGROUP_SOCKET_TCP_RETRANSMIT_CONTEXT "cgroup.net_retransmit" +#define NETDATA_CGROUP_SOCKET_UDP_RECV_CONTEXT "cgroup.net_udp_recv" +#define NETDATA_CGROUP_SOCKET_UDP_SEND_CONTEXT "cgroup.net_udp_send" + +#define NETDATA_SERVICES_SOCKET_BYTES_RECV_CONTEXT "services.net_bytes_recv" +#define NETDATA_SERVICES_SOCKET_BYTES_SEND_CONTEXT "services.net_bytes_send" +#define NETDATA_SERVICES_SOCKET_TCP_RECV_CONTEXT "services.net_tcp_recv" +#define NETDATA_SERVICES_SOCKET_TCP_SEND_CONTEXT "services.net_tcp_send" +#define NETDATA_SERVICES_SOCKET_TCP_RETRANSMIT_CONTEXT "services.net_retransmit" +#define NETDATA_SERVICES_SOCKET_UDP_RECV_CONTEXT "services.net_udp_recv" +#define NETDATA_SERVICES_SOCKET_UDP_SEND_CONTEXT "services.net_udp_send" + typedef struct ebpf_socket_publish_apps { // Data read uint64_t bytes_sent; // Bytes sent @@ -312,5 +329,6 @@ extern void parse_service_name_section(struct config *cfg); extern void clean_socket_apps_structures(); extern ebpf_socket_publish_apps_t **socket_bandwidth_curr; +extern struct config socket_config; #endif diff --git a/collectors/ebpf.plugin/ebpf_softirq.c b/collectors/ebpf.plugin/ebpf_softirq.c new file mode 100644 index 000000000..119c1222a --- /dev/null +++ b/collectors/ebpf.plugin/ebpf_softirq.c @@ -0,0 +1,273 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "ebpf.h" +#include "ebpf_softirq.h" + +struct config softirq_config = { .first_section = NULL, + .last_section = NULL, + .mutex = NETDATA_MUTEX_INITIALIZER, + .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare }, + .rwlock = AVL_LOCK_INITIALIZER } }; + +#define SOFTIRQ_MAP_LATENCY 0 +static ebpf_local_maps_t softirq_maps[] = { + { + .name = "tbl_softirq", + .internal_input = NETDATA_SOFTIRQ_MAX_IRQS, + .user_input = 0, + .type = NETDATA_EBPF_MAP_STATIC, + .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED + }, + /* end */ + { + .name = NULL, + .internal_input = 0, + .user_input = 0, + .type = NETDATA_EBPF_MAP_CONTROLLER, + .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED + } +}; + +#define SOFTIRQ_TP_CLASS_IRQ "irq" +static ebpf_tracepoint_t softirq_tracepoints[] = { + {.enabled = false, .class = SOFTIRQ_TP_CLASS_IRQ, .event = "softirq_entry"}, + {.enabled = false, .class = SOFTIRQ_TP_CLASS_IRQ, .event = "softirq_exit"}, + /* end */ + {.enabled = false, .class = NULL, .event = NULL} +}; + +// these must be in the order defined by the kernel: +// https://elixir.bootlin.com/linux/v5.12.19/source/include/trace/events/irq.h#L13 +static softirq_val_t softirq_vals[] = { + {.name = "HI", .latency = 0}, + {.name = "TIMER", .latency = 0}, + {.name = "NET_TX", .latency = 0}, + {.name = "NET_RX", .latency = 0}, + {.name = "BLOCK", .latency = 0}, + {.name = "IRQ_POLL", .latency = 0}, + {.name = "TASKLET", .latency = 0}, + {.name = "SCHED", .latency = 0}, + {.name = "HRTIMER", .latency = 0}, + {.name = "RCU", .latency = 0}, +}; + +// tmp store for soft IRQ values we get from a per-CPU eBPF map. +static softirq_ebpf_val_t *softirq_ebpf_vals = NULL; + +static struct bpf_link **probe_links = NULL; +static struct bpf_object *objects = NULL; + +static int read_thread_closed = 1; + +static struct netdata_static_thread softirq_threads = {"SOFTIRQ KERNEL", + NULL, NULL, 1, NULL, + NULL, NULL }; + +/** + * Clean up the main thread. + * + * @param ptr thread data. + */ +static void softirq_cleanup(void *ptr) +{ + for (int i = 0; softirq_tracepoints[i].class != NULL; i++) { + ebpf_disable_tracepoint(&softirq_tracepoints[i]); + } + + ebpf_module_t *em = (ebpf_module_t *)ptr; + if (!em->enabled) { + return; + } + + heartbeat_t hb; + heartbeat_init(&hb); + uint32_t tick = 1 * USEC_PER_MS; + while (!read_thread_closed) { + usec_t dt = heartbeat_next(&hb, tick); + UNUSED(dt); + } + + freez(softirq_ebpf_vals); + freez(softirq_threads.thread); + + if (probe_links) { + struct bpf_program *prog; + size_t i = 0 ; + bpf_object__for_each_program(prog, objects) { + bpf_link__destroy(probe_links[i]); + i++; + } + bpf_object__close(objects); + } +} + +/***************************************************************** + * MAIN LOOP + *****************************************************************/ + +static void softirq_read_latency_map() +{ + int fd = softirq_maps[SOFTIRQ_MAP_LATENCY].map_fd; + int i; + for (i = 0; i < NETDATA_SOFTIRQ_MAX_IRQS; i++) { + int test = bpf_map_lookup_elem(fd, &i, softirq_ebpf_vals); + if (unlikely(test < 0)) { + continue; + } + + uint64_t total_latency = 0; + int cpu_i; + int end = ebpf_nprocs; + for (cpu_i = 0; cpu_i < end; cpu_i++) { + total_latency += softirq_ebpf_vals[cpu_i].latency/1000; + } + + softirq_vals[i].latency = total_latency; + } +} + +/** + * Read eBPF maps for soft IRQ. + */ +static void *softirq_reader(void *ptr) +{ + read_thread_closed = 0; + + heartbeat_t hb; + heartbeat_init(&hb); + + ebpf_module_t *em = (ebpf_module_t *)ptr; + + usec_t step = NETDATA_SOFTIRQ_SLEEP_MS * em->update_every; + while (!close_ebpf_plugin) { + usec_t dt = heartbeat_next(&hb, step); + UNUSED(dt); + + softirq_read_latency_map(); + } + + read_thread_closed = 1; + return NULL; +} + +static void softirq_create_charts(int update_every) +{ + ebpf_create_chart( + NETDATA_EBPF_SYSTEM_GROUP, + "softirq_latency", + "Software IRQ latency", + EBPF_COMMON_DIMENSION_MILLISECONDS, + "softirqs", + NULL, + NETDATA_EBPF_CHART_TYPE_STACKED, + NETDATA_CHART_PRIO_SYSTEM_SOFTIRQS+1, + NULL, NULL, 0, update_every, + NETDATA_EBPF_MODULE_NAME_SOFTIRQ + ); + + fflush(stdout); +} + +static void softirq_create_dims() +{ + uint32_t i; + for (i = 0; i < NETDATA_SOFTIRQ_MAX_IRQS; i++) { + ebpf_write_global_dimension( + softirq_vals[i].name, softirq_vals[i].name, + ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX] + ); + } +} + +static inline void softirq_write_dims() +{ + uint32_t i; + for (i = 0; i < NETDATA_SOFTIRQ_MAX_IRQS; i++) { + write_chart_dimension(softirq_vals[i].name, softirq_vals[i].latency); + } +} + +/** +* Main loop for this collector. +*/ +static void softirq_collector(ebpf_module_t *em) +{ + softirq_ebpf_vals = callocz(ebpf_nprocs, sizeof(softirq_ebpf_val_t)); + + // create reader thread. + softirq_threads.thread = mallocz(sizeof(netdata_thread_t)); + softirq_threads.start_routine = softirq_reader; + netdata_thread_create( + softirq_threads.thread, + softirq_threads.name, + NETDATA_THREAD_OPTION_JOINABLE, + softirq_reader, + em + ); + + // create chart and static dims. + pthread_mutex_lock(&lock); + softirq_create_charts(em->update_every); + softirq_create_dims(); + pthread_mutex_unlock(&lock); + + // loop and read from published data until ebpf plugin is closed. + int update_every = em->update_every; + int counter = update_every - 1; + while (!close_ebpf_plugin) { + pthread_mutex_lock(&collect_data_mutex); + pthread_cond_wait(&collect_data_cond_var, &collect_data_mutex); + + if (++counter == update_every) { + counter = 0; + pthread_mutex_lock(&lock); + + // write dims now for all hitherto discovered IRQs. + write_begin_chart(NETDATA_EBPF_SYSTEM_GROUP, "softirq_latency"); + softirq_write_dims(); + write_end_chart(); + + pthread_mutex_unlock(&lock); + } + pthread_mutex_unlock(&collect_data_mutex); + } +} + +/***************************************************************** + * EBPF SOFTIRQ THREAD + *****************************************************************/ + +/** + * Soft IRQ latency thread. + * + * @param ptr a `ebpf_module_t *`. + * @return always NULL. + */ +void *ebpf_softirq_thread(void *ptr) +{ + netdata_thread_cleanup_push(softirq_cleanup, ptr); + + ebpf_module_t *em = (ebpf_module_t *)ptr; + em->maps = softirq_maps; + + if (!em->enabled) { + goto endsoftirq; + } + + if (ebpf_enable_tracepoints(softirq_tracepoints) == 0) { + em->enabled = CONFIG_BOOLEAN_NO; + goto endsoftirq; + } + + probe_links = ebpf_load_program(ebpf_plugin_dir, em, kernel_string, &objects); + if (!probe_links) { + goto endsoftirq; + } + + softirq_collector(em); + +endsoftirq: + netdata_thread_cleanup_pop(1); + + return NULL; +} diff --git a/collectors/ebpf.plugin/ebpf_softirq.h b/collectors/ebpf.plugin/ebpf_softirq.h new file mode 100644 index 000000000..a22751895 --- /dev/null +++ b/collectors/ebpf.plugin/ebpf_softirq.h @@ -0,0 +1,34 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_EBPF_SOFTIRQ_H +#define NETDATA_EBPF_SOFTIRQ_H 1 + +/***************************************************************** + * copied from kernel-collectors repo, with modifications needed + * for inclusion here. + *****************************************************************/ + +#define NETDATA_SOFTIRQ_MAX_IRQS 10 + +typedef struct softirq_ebpf_val { + uint64_t latency; + uint64_t ts; +} softirq_ebpf_val_t; + +/***************************************************************** + * below this is eBPF plugin-specific code. + *****************************************************************/ + +#define NETDATA_EBPF_MODULE_NAME_SOFTIRQ "softirq" +#define NETDATA_SOFTIRQ_SLEEP_MS 650000ULL +#define NETDATA_SOFTIRQ_CONFIG_FILE "softirq.conf" + +typedef struct sofirq_val { + uint64_t latency; + char *name; +} softirq_val_t; + +extern struct config softirq_config; +extern void *ebpf_softirq_thread(void *ptr); + +#endif /* NETDATA_EBPF_SOFTIRQ_H */ diff --git a/collectors/ebpf.plugin/ebpf_swap.c b/collectors/ebpf.plugin/ebpf_swap.c new file mode 100644 index 000000000..34750c79d --- /dev/null +++ b/collectors/ebpf.plugin/ebpf_swap.c @@ -0,0 +1,698 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "ebpf.h" +#include "ebpf_swap.h" + +static char *swap_dimension_name[NETDATA_SWAP_END] = { "read", "write" }; +static netdata_syscall_stat_t swap_aggregated_data[NETDATA_SWAP_END]; +static netdata_publish_syscall_t swap_publish_aggregated[NETDATA_SWAP_END]; + +static int read_thread_closed = 1; +netdata_publish_swap_t *swap_vector = NULL; + +static netdata_idx_t swap_hash_values[NETDATA_SWAP_END]; +static netdata_idx_t *swap_values = NULL; + +netdata_publish_swap_t **swap_pid = NULL; + +struct config swap_config = { .first_section = NULL, + .last_section = NULL, + .mutex = NETDATA_MUTEX_INITIALIZER, + .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare }, + .rwlock = AVL_LOCK_INITIALIZER } }; + +static ebpf_local_maps_t swap_maps[] = {{.name = "tbl_pid_swap", .internal_input = ND_EBPF_DEFAULT_PID_SIZE, + .user_input = 0, + .type = NETDATA_EBPF_MAP_RESIZABLE | NETDATA_EBPF_MAP_PID, + .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}, + {.name = "swap_ctrl", .internal_input = NETDATA_CONTROLLER_END, + .user_input = 0, + .type = NETDATA_EBPF_MAP_CONTROLLER, + .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}, + {.name = "tbl_swap", .internal_input = NETDATA_SWAP_END, + .user_input = 0, + .type = NETDATA_EBPF_MAP_STATIC, + .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}, + {.name = NULL, .internal_input = 0, .user_input = 0}}; + +static struct bpf_link **probe_links = NULL; +static struct bpf_object *objects = NULL; + +struct netdata_static_thread swap_threads = {"SWAP KERNEL", NULL, NULL, 1, + NULL, NULL, NULL}; + +/***************************************************************** + * + * FUNCTIONS TO CLOSE THE THREAD + * + *****************************************************************/ + +/** + * Clean swap structure + */ +void clean_swap_pid_structures() { + struct pid_stat *pids = root_of_pids; + while (pids) { + freez(swap_pid[pids->pid]); + + pids = pids->next; + } +} + +/** + * Clean up the main thread. + * + * @param ptr thread data. + */ +static void ebpf_swap_cleanup(void *ptr) +{ + ebpf_module_t *em = (ebpf_module_t *)ptr; + if (!em->enabled) + return; + + heartbeat_t hb; + heartbeat_init(&hb); + uint32_t tick = 2 * USEC_PER_MS; + while (!read_thread_closed) { + usec_t dt = heartbeat_next(&hb, tick); + UNUSED(dt); + } + + ebpf_cleanup_publish_syscall(swap_publish_aggregated); + + freez(swap_vector); + freez(swap_values); + + if (probe_links) { + struct bpf_program *prog; + size_t i = 0 ; + bpf_object__for_each_program(prog, objects) { + bpf_link__destroy(probe_links[i]); + i++; + } + bpf_object__close(objects); + } +} + +/***************************************************************** + * + * COLLECTOR THREAD + * + *****************************************************************/ + +/** + * Apps Accumulator + * + * Sum all values read from kernel and store in the first address. + * + * @param out the vector with read values. + */ +static void swap_apps_accumulator(netdata_publish_swap_t *out) +{ + int i, end = (running_on_kernel >= NETDATA_KERNEL_V4_15) ? ebpf_nprocs : 1; + netdata_publish_swap_t *total = &out[0]; + for (i = 1; i < end; i++) { + netdata_publish_swap_t *w = &out[i]; + total->write += w->write; + total->read += w->read; + } +} + +/** + * Fill PID + * + * Fill PID structures + * + * @param current_pid pid that we are collecting data + * @param out values read from hash tables; + */ +static void swap_fill_pid(uint32_t current_pid, netdata_publish_swap_t *publish) +{ + netdata_publish_swap_t *curr = swap_pid[current_pid]; + if (!curr) { + curr = callocz(1, sizeof(netdata_publish_swap_t)); + swap_pid[current_pid] = curr; + } + + memcpy(curr, publish, sizeof(netdata_publish_swap_t)); +} + +/** + * Update cgroup + * + * Update cgroup data based in + */ +static void ebpf_update_swap_cgroup() +{ + ebpf_cgroup_target_t *ect ; + netdata_publish_swap_t *cv = swap_vector; + int fd = swap_maps[NETDATA_PID_SWAP_TABLE].map_fd; + size_t length = sizeof(netdata_publish_swap_t)*ebpf_nprocs; + pthread_mutex_lock(&mutex_cgroup_shm); + for (ect = ebpf_cgroup_pids; ect; ect = ect->next) { + struct pid_on_target2 *pids; + for (pids = ect->pids; pids; pids = pids->next) { + int pid = pids->pid; + netdata_publish_swap_t *out = &pids->swap; + if (likely(swap_pid) && swap_pid[pid]) { + netdata_publish_swap_t *in = swap_pid[pid]; + + memcpy(out, in, sizeof(netdata_publish_swap_t)); + } else { + memset(cv, 0, length); + if (!bpf_map_lookup_elem(fd, &pid, cv)) { + swap_apps_accumulator(cv); + + memcpy(out, cv, sizeof(netdata_publish_swap_t)); + } + } + } + } + pthread_mutex_unlock(&mutex_cgroup_shm); +} + +/** + * Read APPS table + * + * Read the apps table and store data inside the structure. + */ +static void read_apps_table() +{ + netdata_publish_swap_t *cv = swap_vector; + uint32_t key; + struct pid_stat *pids = root_of_pids; + int fd = swap_maps[NETDATA_PID_SWAP_TABLE].map_fd; + size_t length = sizeof(netdata_publish_swap_t)*ebpf_nprocs; + while (pids) { + key = pids->pid; + + if (bpf_map_lookup_elem(fd, &key, cv)) { + pids = pids->next; + continue; + } + + swap_apps_accumulator(cv); + + swap_fill_pid(key, cv); + + // We are cleaning to avoid passing data read from one process to other. + memset(cv, 0, length); + + pids = pids->next; + } +} + +/** +* Send global +* +* Send global charts to Netdata +*/ +static void swap_send_global() +{ + write_io_chart(NETDATA_MEM_SWAP_CHART, NETDATA_EBPF_SYSTEM_GROUP, + swap_publish_aggregated[NETDATA_KEY_SWAP_WRITEPAGE_CALL].dimension, + (long long) swap_hash_values[NETDATA_KEY_SWAP_WRITEPAGE_CALL], + swap_publish_aggregated[NETDATA_KEY_SWAP_READPAGE_CALL].dimension, + (long long) swap_hash_values[NETDATA_KEY_SWAP_READPAGE_CALL]); +} + +/** + * Read global counter + * + * Read the table with number of calls to all functions + */ +static void read_global_table() +{ + netdata_idx_t *stored = swap_values; + netdata_idx_t *val = swap_hash_values; + int fd = swap_maps[NETDATA_SWAP_GLOBAL_TABLE].map_fd; + + uint32_t i, end = NETDATA_SWAP_END; + for (i = NETDATA_KEY_SWAP_READPAGE_CALL; i < end; i++) { + if (!bpf_map_lookup_elem(fd, &i, stored)) { + int j; + int last = ebpf_nprocs; + netdata_idx_t total = 0; + for (j = 0; j < last; j++) + total += stored[j]; + + val[i] = total; + } + } +} + +/** + * Swap read hash + * + * This is the thread callback. + * + * @param ptr It is a NULL value for this thread. + * + * @return It always returns NULL. + */ +void *ebpf_swap_read_hash(void *ptr) +{ + read_thread_closed = 0; + + heartbeat_t hb; + heartbeat_init(&hb); + + ebpf_module_t *em = (ebpf_module_t *)ptr; + usec_t step = NETDATA_SWAP_SLEEP_MS * em->update_every; + while (!close_ebpf_plugin) { + usec_t dt = heartbeat_next(&hb, step); + (void)dt; + + read_global_table(); + } + + read_thread_closed = 1; + return NULL; +} + +/** + * Sum PIDs + * + * Sum values for all targets. + * + * @param swap + * @param root + */ +static void ebpf_swap_sum_pids(netdata_publish_swap_t *swap, struct pid_on_target *root) +{ + uint64_t local_read = 0; + uint64_t local_write = 0; + + while (root) { + int32_t pid = root->pid; + netdata_publish_swap_t *w = swap_pid[pid]; + if (w) { + local_write += w->write; + local_read += w->read; + } + root = root->next; + } + + // These conditions were added, because we are using incremental algorithm + swap->write = (local_write >= swap->write) ? local_write : swap->write; + swap->read = (local_read >= swap->read) ? local_read : swap->read; +} + +/** + * Send data to Netdata calling auxiliary functions. + * + * @param root the target list. +*/ +void ebpf_swap_send_apps_data(struct target *root) +{ + struct target *w; + for (w = root; w; w = w->next) { + if (unlikely(w->exposed && w->processes)) { + ebpf_swap_sum_pids(&w->swap, w->root_pid); + } + } + + write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_MEM_SWAP_READ_CHART); + for (w = root; w; w = w->next) { + if (unlikely(w->exposed && w->processes)) { + write_chart_dimension(w->name, (long long) w->swap.read); + } + } + write_end_chart(); + + write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_MEM_SWAP_WRITE_CHART); + for (w = root; w; w = w->next) { + if (unlikely(w->exposed && w->processes)) { + write_chart_dimension(w->name, (long long) w->swap.write); + } + } + write_end_chart(); +} + +/** + * Sum PIDs + * + * Sum values for all targets. + * + * @param swap + * @param root + */ +static void ebpf_swap_sum_cgroup_pids(netdata_publish_swap_t *swap, struct pid_on_target2 *pids) +{ + uint64_t local_read = 0; + uint64_t local_write = 0; + + while (pids) { + netdata_publish_swap_t *w = &pids->swap; + local_write += w->write; + local_read += w->read; + + pids = pids->next; + } + + // These conditions were added, because we are using incremental algorithm + swap->write = (local_write >= swap->write) ? local_write : swap->write; + swap->read = (local_read >= swap->read) ? local_read : swap->read; +} + +/** + * Send Systemd charts + * + * Send collected data to Netdata. + * + * @return It returns the status for chart creation, if it is necessary to remove a specific dimension, zero is returned + * otherwise function returns 1 to avoid chart recreation + */ +static int ebpf_send_systemd_swap_charts() +{ + int ret = 1; + ebpf_cgroup_target_t *ect; + write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_MEM_SWAP_READ_CHART); + for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { + if (unlikely(ect->systemd) && unlikely(ect->updated)) { + write_chart_dimension(ect->name, (long long) ect->publish_systemd_swap.read); + } else + ret = 0; + } + write_end_chart(); + + write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_MEM_SWAP_WRITE_CHART); + for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { + if (unlikely(ect->systemd) && unlikely(ect->updated)) { + write_chart_dimension(ect->name, (long long) ect->publish_systemd_swap.write); + } + } + write_end_chart(); + + return ret; +} + +/** + * Create specific swap charts + * + * Create charts for cgroup/application. + * + * @param type the chart type. + * @param update_every value to overwrite the update frequency set by the server. + */ +static void ebpf_create_specific_swap_charts(char *type, int update_every) +{ + ebpf_create_chart(type, NETDATA_MEM_SWAP_READ_CHART, + "Calls to function swap_readpage.", + EBPF_COMMON_DIMENSION_CALL, NETDATA_SYSTEM_CGROUP_SWAP_SUBMENU, + NETDATA_CGROUP_SWAP_READ_CONTEXT, NETDATA_EBPF_CHART_TYPE_LINE, + NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5100, + ebpf_create_global_dimension, + swap_publish_aggregated, 1, update_every, NETDATA_EBPF_MODULE_NAME_SWAP); + + ebpf_create_chart(type, NETDATA_MEM_SWAP_WRITE_CHART, + "Calls to function swap_writepage.", + EBPF_COMMON_DIMENSION_CALL, NETDATA_SYSTEM_CGROUP_SWAP_SUBMENU, + NETDATA_CGROUP_SWAP_WRITE_CONTEXT, NETDATA_EBPF_CHART_TYPE_LINE, + NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5101, + ebpf_create_global_dimension, + &swap_publish_aggregated[NETDATA_KEY_SWAP_WRITEPAGE_CALL], 1, + update_every, NETDATA_EBPF_MODULE_NAME_SWAP); +} + +/** + * Create specific swap charts + * + * Create charts for cgroup/application. + * + * @param type the chart type. + * @param update_every value to overwrite the update frequency set by the server. + */ +static void ebpf_obsolete_specific_swap_charts(char *type, int update_every) +{ + ebpf_write_chart_obsolete(type, NETDATA_MEM_SWAP_READ_CHART,"Calls to function swap_readpage.", + EBPF_COMMON_DIMENSION_CALL, NETDATA_SYSTEM_CGROUP_SWAP_SUBMENU, + NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_SWAP_READ_CONTEXT, + NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5100, update_every); + + ebpf_write_chart_obsolete(type, NETDATA_MEM_SWAP_WRITE_CHART, "Calls to function swap_writepage.", + EBPF_COMMON_DIMENSION_CALL, NETDATA_SYSTEM_CGROUP_SWAP_SUBMENU, + NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_SWAP_WRITE_CONTEXT, + NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5101, update_every); +} + +/* + * Send Specific Swap data + * + * Send data for specific cgroup/apps. + * + * @param type chart type + * @param values structure with values that will be sent to netdata + */ +static void ebpf_send_specific_swap_data(char *type, netdata_publish_swap_t *values) +{ + write_begin_chart(type, NETDATA_MEM_SWAP_READ_CHART); + write_chart_dimension(swap_publish_aggregated[NETDATA_KEY_SWAP_READPAGE_CALL].name, (long long) values->read); + write_end_chart(); + + write_begin_chart(type, NETDATA_MEM_SWAP_WRITE_CHART); + write_chart_dimension(swap_publish_aggregated[NETDATA_KEY_SWAP_WRITEPAGE_CALL].name, (long long) values->write); + write_end_chart(); +} + +/** + * Create Systemd Swap Charts + * + * Create charts when systemd is enabled + * + * @param update_every value to overwrite the update frequency set by the server. + **/ +static void ebpf_create_systemd_swap_charts(int update_every) +{ + ebpf_create_charts_on_systemd(NETDATA_MEM_SWAP_READ_CHART, + "Calls to swap_readpage.", + EBPF_COMMON_DIMENSION_CALL, NETDATA_SYSTEM_CGROUP_SWAP_SUBMENU, + NETDATA_EBPF_CHART_TYPE_STACKED, 20191, + ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_SWAP_READ_CONTEXT, + NETDATA_EBPF_MODULE_NAME_SWAP, update_every); + + ebpf_create_charts_on_systemd(NETDATA_MEM_SWAP_WRITE_CHART, + "Calls to function swap_writepage.", + EBPF_COMMON_DIMENSION_CALL, NETDATA_SYSTEM_CGROUP_SWAP_SUBMENU, + NETDATA_EBPF_CHART_TYPE_STACKED, 20192, + ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_SWAP_WRITE_CONTEXT, + NETDATA_EBPF_MODULE_NAME_SWAP, update_every); +} + +/** + * Send data to Netdata calling auxiliary functions. + * + * @param update_every value to overwrite the update frequency set by the server. +*/ +void ebpf_swap_send_cgroup_data(int update_every) +{ + if (!ebpf_cgroup_pids) + return; + + pthread_mutex_lock(&mutex_cgroup_shm); + ebpf_cgroup_target_t *ect; + for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { + ebpf_swap_sum_cgroup_pids(&ect->publish_systemd_swap, ect->pids); + } + + int has_systemd = shm_ebpf_cgroup.header->systemd_enabled; + + if (has_systemd) { + static int systemd_charts = 0; + if (!systemd_charts) { + ebpf_create_systemd_swap_charts(update_every); + systemd_charts = 1; + fflush(stdout); + } + + systemd_charts = ebpf_send_systemd_swap_charts(); + } + + for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { + if (ect->systemd) + continue; + + if (!(ect->flags & NETDATA_EBPF_CGROUP_HAS_SWAP_CHART) && ect->updated) { + ebpf_create_specific_swap_charts(ect->name, update_every); + ect->flags |= NETDATA_EBPF_CGROUP_HAS_SWAP_CHART; + } + + if (ect->flags & NETDATA_EBPF_CGROUP_HAS_SWAP_CHART) { + if (ect->updated) { + ebpf_send_specific_swap_data(ect->name, &ect->publish_systemd_swap); + } else { + ebpf_obsolete_specific_swap_charts(ect->name, update_every); + ect->flags &= ~NETDATA_EBPF_CGROUP_HAS_SWAP_CHART; + } + } + } + + pthread_mutex_unlock(&mutex_cgroup_shm); +} + +/** +* Main loop for this collector. +*/ +static void swap_collector(ebpf_module_t *em) +{ + swap_threads.thread = mallocz(sizeof(netdata_thread_t)); + swap_threads.start_routine = ebpf_swap_read_hash; + + netdata_thread_create(swap_threads.thread, swap_threads.name, NETDATA_THREAD_OPTION_JOINABLE, + ebpf_swap_read_hash, em); + + int apps = em->apps_charts; + int cgroup = em->cgroup_charts; + int update_every = em->update_every; + int counter = update_every - 1; + while (!close_ebpf_plugin) { + pthread_mutex_lock(&collect_data_mutex); + pthread_cond_wait(&collect_data_cond_var, &collect_data_mutex); + + if (++counter == update_every) { + counter = 0; + if (apps) + read_apps_table(); + + if (cgroup) + ebpf_update_swap_cgroup(); + + pthread_mutex_lock(&lock); + + swap_send_global(); + + if (apps) + ebpf_swap_send_apps_data(apps_groups_root_target); + + if (cgroup) + ebpf_swap_send_cgroup_data(update_every); + + pthread_mutex_unlock(&lock); + } + pthread_mutex_unlock(&collect_data_mutex); + } +} + +/***************************************************************** + * + * INITIALIZE THREAD + * + *****************************************************************/ + +/** + * Create apps charts + * + * Call ebpf_create_chart to create the charts on apps submenu. + * + * @param em a pointer to the structure with the default values. + */ +void ebpf_swap_create_apps_charts(struct ebpf_module *em, void *ptr) +{ + struct target *root = ptr; + ebpf_create_charts_on_apps(NETDATA_MEM_SWAP_READ_CHART, + "Calls to function swap_readpage.", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_SWAP_SUBMENU, + NETDATA_EBPF_CHART_TYPE_STACKED, + 20191, + ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], + root, em->update_every, NETDATA_EBPF_MODULE_NAME_SWAP); + + ebpf_create_charts_on_apps(NETDATA_MEM_SWAP_WRITE_CHART, + "Calls to function swap_writepage.", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_SWAP_SUBMENU, + NETDATA_EBPF_CHART_TYPE_STACKED, + 20192, + ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], + root, em->update_every, NETDATA_EBPF_MODULE_NAME_SWAP); +} + +/** + * Allocate vectors used with this thread. + * + * We are not testing the return, because callocz does this and shutdown the software + * case it was not possible to allocate. + * + * @param apps is apps enabled? + */ +static void ebpf_swap_allocate_global_vectors(int apps) +{ + if (apps) + swap_pid = callocz((size_t)pid_max, sizeof(netdata_publish_swap_t *)); + + swap_vector = callocz((size_t)ebpf_nprocs, sizeof(netdata_publish_swap_t)); + + swap_values = callocz((size_t)ebpf_nprocs, sizeof(netdata_idx_t)); + + memset(swap_hash_values, 0, sizeof(swap_hash_values)); +} + +/***************************************************************** + * + * MAIN THREAD + * + *****************************************************************/ + +/** + * Create global charts + * + * Call ebpf_create_chart to create the charts for the collector. + * + * @param update_every value to overwrite the update frequency set by the server. + */ +static void ebpf_create_swap_charts(int update_every) +{ + ebpf_create_chart(NETDATA_EBPF_SYSTEM_GROUP, NETDATA_MEM_SWAP_CHART, + "Calls to internal functions used to access swap.", + EBPF_COMMON_DIMENSION_CALL, NETDATA_SYSTEM_SWAP_SUBMENU, + NULL, + NETDATA_EBPF_CHART_TYPE_LINE, + 202, + ebpf_create_global_dimension, + swap_publish_aggregated, NETDATA_SWAP_END, + update_every, NETDATA_EBPF_MODULE_NAME_SWAP); +} + +/** + * SWAP thread + * + * Thread used to make swap thread + * + * @param ptr a pointer to `struct ebpf_module` + * + * @return It always return NULL + */ +void *ebpf_swap_thread(void *ptr) +{ + netdata_thread_cleanup_push(ebpf_swap_cleanup, ptr); + + ebpf_module_t *em = (ebpf_module_t *)ptr; + em->maps = swap_maps; + + ebpf_update_pid_table(&swap_maps[NETDATA_PID_SWAP_TABLE], em); + + if (!em->enabled) + goto endswap; + + probe_links = ebpf_load_program(ebpf_plugin_dir, em, kernel_string, &objects); + if (!probe_links) { + goto endswap; + } + + ebpf_swap_allocate_global_vectors(em->apps_charts); + + int algorithms[NETDATA_SWAP_END] = { NETDATA_EBPF_INCREMENTAL_IDX, NETDATA_EBPF_INCREMENTAL_IDX }; + ebpf_global_labels(swap_aggregated_data, swap_publish_aggregated, swap_dimension_name, swap_dimension_name, + algorithms, NETDATA_SWAP_END); + + pthread_mutex_lock(&lock); + ebpf_create_swap_charts(em->update_every); + pthread_mutex_unlock(&lock); + + swap_collector(em); + +endswap: + netdata_thread_cleanup_pop(1); + return NULL; +} diff --git a/collectors/ebpf.plugin/ebpf_swap.h b/collectors/ebpf.plugin/ebpf_swap.h new file mode 100644 index 000000000..1dba9c17a --- /dev/null +++ b/collectors/ebpf.plugin/ebpf_swap.h @@ -0,0 +1,53 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_EBPF_SWAP_H +#define NETDATA_EBPF_SWAP_H 1 + +// Module name +#define NETDATA_EBPF_MODULE_NAME_SWAP "swap" + +#define NETDATA_SWAP_SLEEP_MS 850000ULL + +// charts +#define NETDATA_MEM_SWAP_CHART "swapcalls" +#define NETDATA_MEM_SWAP_READ_CHART "swap_read_call" +#define NETDATA_MEM_SWAP_WRITE_CHART "swap_write_call" +#define NETDATA_SWAP_SUBMENU "swap" + +// configuration file +#define NETDATA_DIRECTORY_SWAP_CONFIG_FILE "swap.conf" + +// Contexts +#define NETDATA_CGROUP_SWAP_READ_CONTEXT "cgroup.swap_read" +#define NETDATA_CGROUP_SWAP_WRITE_CONTEXT "cgroup.swap_write" +#define NETDATA_SYSTEMD_SWAP_READ_CONTEXT "services.swap_read" +#define NETDATA_SYSTEMD_SWAP_WRITE_CONTEXT "services.swap_write" + +typedef struct netdata_publish_swap { + uint64_t read; + uint64_t write; +} netdata_publish_swap_t; + +enum swap_tables { + NETDATA_PID_SWAP_TABLE, + NETDATA_SWAP_CONTROLLER, + NETDATA_SWAP_GLOBAL_TABLE +}; + +enum swap_counters { + NETDATA_KEY_SWAP_READPAGE_CALL, + NETDATA_KEY_SWAP_WRITEPAGE_CALL, + + // Keep this as last and don't skip numbers as it is used as element counter + NETDATA_SWAP_END +}; + +extern netdata_publish_swap_t **swap_pid; + +extern void *ebpf_swap_thread(void *ptr); +extern void ebpf_swap_create_apps_charts(struct ebpf_module *em, void *ptr); +extern void clean_swap_pid_structures(); + +extern struct config swap_config; + +#endif diff --git a/collectors/ebpf.plugin/ebpf_sync.c b/collectors/ebpf.plugin/ebpf_sync.c index f0db1cc4a..4bd62bcae 100644 --- a/collectors/ebpf.plugin/ebpf_sync.c +++ b/collectors/ebpf.plugin/ebpf_sync.c @@ -3,8 +3,6 @@ #include "ebpf.h" #include "ebpf_sync.h" -static ebpf_data_t sync_data; - static char *sync_counter_dimension_name[NETDATA_SYNC_IDX_END] = { "sync", "syncfs", "msync", "fsync", "fdatasync", "sync_file_range" }; static netdata_syscall_stat_t sync_counter_aggregated_data[NETDATA_SYNC_IDX_END]; @@ -17,6 +15,28 @@ static netdata_idx_t sync_hash_values[NETDATA_SYNC_IDX_END]; struct netdata_static_thread sync_threads = {"SYNC KERNEL", NULL, NULL, 1, NULL, NULL, NULL}; +static ebpf_local_maps_t sync_maps[] = {{.name = "tbl_sync", .internal_input = NETDATA_SYNC_END, + .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC, + .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}, + {.name = "tbl_syncfs", .internal_input = NETDATA_SYNC_END, + .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC, + .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}, + {.name = "tbl_msync", .internal_input = NETDATA_SYNC_END, + .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC, + .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}, + {.name = "tbl_fsync", .internal_input = NETDATA_SYNC_END, + .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC, + .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}, + {.name = "tbl_fdatasync", .internal_input = NETDATA_SYNC_END, + .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC, + .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}, + {.name = "tbl_syncfr", .internal_input = NETDATA_SYNC_END, + .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC, + .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}, + {.name = NULL, .internal_input = 0, .user_input = 0, + .type = NETDATA_EBPF_MAP_CONTROLLER, + .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}}; + struct config sync_config = { .first_section = NULL, .last_section = NULL, .mutex = NETDATA_MUTEX_INITIALIZER, @@ -53,15 +73,8 @@ static int ebpf_sync_initialize_syscall(ebpf_module_t *em) for (i = 0; local_syscalls[i].syscall; i++) { ebpf_sync_syscalls_t *w = &local_syscalls[i]; if (!w->probe_links && w->enabled) { - fill_ebpf_data(&w->kernel_info); - if (ebpf_update_kernel(&w->kernel_info)) { - em->thread_name = saved_name; - error("Cannot update the kernel for eBPF module %s", w->syscall); - return -1; - } - em->thread_name = w->syscall; - w->probe_links = ebpf_load_program(ebpf_plugin_dir, em, kernel_string, &w->objects, w->kernel_info.map_fd); + w->probe_links = ebpf_load_program(ebpf_plugin_dir, em, kernel_string, &w->objects); if (!w->probe_links) { em->thread_name = saved_name; return -1; @@ -95,7 +108,7 @@ static void read_global_table() int i; for (i = 0; local_syscalls[i].syscall; i++) { if (local_syscalls[i].enabled) { - int fd = local_syscalls[i].kernel_info.map_fd[NETDATA_SYNC_GLOBLAL_TABLE]; + int fd = sync_maps[i].map_fd; if (!bpf_map_lookup_elem(fd, &idx, &stored)) { sync_hash_values[i] = stored; } @@ -119,7 +132,7 @@ void *ebpf_sync_read_hash(void *ptr) heartbeat_t hb; heartbeat_init(&hb); - usec_t step = NETDATA_EBPF_SYNC_SLEEP_MS * em->update_time; + usec_t step = NETDATA_EBPF_SYNC_SLEEP_MS * em->update_every; while (!close_ebpf_plugin) { usec_t dt = heartbeat_next(&hb, step); @@ -197,15 +210,20 @@ static void sync_collector(ebpf_module_t *em) netdata_thread_create(sync_threads.thread, sync_threads.name, NETDATA_THREAD_OPTION_JOINABLE, ebpf_sync_read_hash, em); + int update_every = em->update_every; + int counter = update_every - 1; while (!close_ebpf_plugin) { pthread_mutex_lock(&collect_data_mutex); pthread_cond_wait(&collect_data_cond_var, &collect_data_mutex); - pthread_mutex_lock(&lock); + if (++counter == update_every) { + counter = 0; + pthread_mutex_lock(&lock); - sync_send_data(); + sync_send_data(); - pthread_mutex_unlock(&lock); + pthread_mutex_unlock(&lock); + } pthread_mutex_unlock(&collect_data_mutex); } } @@ -228,8 +246,6 @@ void ebpf_sync_cleanup_objects() for (i = 0; local_syscalls[i].syscall; i++) { ebpf_sync_syscalls_t *w = &local_syscalls[i]; if (w->probe_links) { - freez(w->kernel_info.map_fd); - struct bpf_program *prog; size_t j = 0 ; bpf_object__for_each_program(prog, w->objects) { @@ -280,15 +296,19 @@ static void ebpf_sync_cleanup(void *ptr) * @param order order number of the specified chart * @param idx the first index with data. * @param end the last index with data. + * @param update_every value to overwrite the update frequency set by the server. */ static void ebpf_create_sync_chart(char *id, char *title, int order, int idx, - int end) + int end, + int update_every) { ebpf_write_chart_cmd(NETDATA_EBPF_MEMORY_GROUP, id, title, EBPF_COMMON_DIMENSION_CALL, - NETDATA_EBPF_SYNC_SUBMENU, NETDATA_EBPF_CHART_TYPE_LINE, NULL, order); + NETDATA_EBPF_SYNC_SUBMENU, NETDATA_EBPF_CHART_TYPE_LINE, NULL, order, + update_every, + NETDATA_EBPF_MODULE_NAME_SYNC); netdata_publish_syscall_t *move = &sync_counter_publish_aggregated[idx]; @@ -305,28 +325,30 @@ static void ebpf_create_sync_chart(char *id, * Create global charts * * Call ebpf_create_chart to create the charts for the collector. + * + * @param update_every value to overwrite the update frequency set by the server. */ -static void ebpf_create_sync_charts() +static void ebpf_create_sync_charts(int update_every) { if (local_syscalls[NETDATA_SYNC_FSYNC_IDX].enabled || local_syscalls[NETDATA_SYNC_FDATASYNC_IDX].enabled) ebpf_create_sync_chart(NETDATA_EBPF_FILE_SYNC_CHART, "Monitor calls for fsync(2) and fdatasync(2).", 21300, - NETDATA_SYNC_FSYNC_IDX, NETDATA_SYNC_FDATASYNC_IDX); + NETDATA_SYNC_FSYNC_IDX, NETDATA_SYNC_FDATASYNC_IDX, update_every); if (local_syscalls[NETDATA_SYNC_MSYNC_IDX].enabled) ebpf_create_sync_chart(NETDATA_EBPF_MSYNC_CHART, "Monitor calls for msync(2).", 21301, - NETDATA_SYNC_MSYNC_IDX, NETDATA_SYNC_MSYNC_IDX); + NETDATA_SYNC_MSYNC_IDX, NETDATA_SYNC_MSYNC_IDX, update_every); if (local_syscalls[NETDATA_SYNC_SYNC_IDX].enabled || local_syscalls[NETDATA_SYNC_SYNCFS_IDX].enabled) ebpf_create_sync_chart(NETDATA_EBPF_SYNC_CHART, "Monitor calls for sync(2) and syncfs(2).", 21302, - NETDATA_SYNC_SYNC_IDX, NETDATA_SYNC_SYNCFS_IDX); + NETDATA_SYNC_SYNC_IDX, NETDATA_SYNC_SYNCFS_IDX, update_every); if (local_syscalls[NETDATA_SYNC_SYNC_FILE_RANGE_IDX].enabled) ebpf_create_sync_chart(NETDATA_EBPF_FILE_SEGMENT_CHART, "Monitor calls for sync_file_range(2).", 21303, - NETDATA_SYNC_SYNC_FILE_RANGE_IDX, NETDATA_SYNC_SYNC_FILE_RANGE_IDX); + NETDATA_SYNC_SYNC_FILE_RANGE_IDX, NETDATA_SYNC_SYNC_FILE_RANGE_IDX, update_every); } /** @@ -357,9 +379,8 @@ void *ebpf_sync_thread(void *ptr) netdata_thread_cleanup_push(ebpf_sync_cleanup, ptr); ebpf_module_t *em = (ebpf_module_t *)ptr; - fill_ebpf_data(&sync_data); + em->maps = sync_maps; - ebpf_update_module(em, &sync_config, NETDATA_SYNC_CONFIG_FILE); ebpf_sync_parse_syscalls(); if (!em->enabled) @@ -378,7 +399,7 @@ void *ebpf_sync_thread(void *ptr) algorithms, NETDATA_SYNC_IDX_END); pthread_mutex_lock(&lock); - ebpf_create_sync_charts(); + ebpf_create_sync_charts(em->update_every); pthread_mutex_unlock(&lock); sync_collector(em); diff --git a/collectors/ebpf.plugin/ebpf_sync.h b/collectors/ebpf.plugin/ebpf_sync.h index 458318218..1f811d341 100644 --- a/collectors/ebpf.plugin/ebpf_sync.h +++ b/collectors/ebpf.plugin/ebpf_sync.h @@ -3,6 +3,9 @@ #ifndef NETDATA_EBPF_SYNC_H #define NETDATA_EBPF_SYNC_H 1 +// Module name +#define NETDATA_EBPF_MODULE_NAME_SYNC "sync" + // charts #define NETDATA_EBPF_SYNC_CHART "sync" #define NETDATA_EBPF_MSYNC_CHART "memory_map" @@ -34,8 +37,6 @@ typedef struct ebpf_sync_syscalls { struct bpf_object *objects; struct bpf_link **probe_links; - - ebpf_data_t kernel_info; } ebpf_sync_syscalls_t; enum netdata_sync_charts { @@ -50,5 +51,6 @@ enum netdata_sync_table { }; extern void *ebpf_sync_thread(void *ptr); +extern struct config sync_config; #endif /* NETDATA_EBPF_SYNC_H */ diff --git a/collectors/ebpf.plugin/ebpf_vfs.c b/collectors/ebpf.plugin/ebpf_vfs.c new file mode 100644 index 000000000..060469ec5 --- /dev/null +++ b/collectors/ebpf.plugin/ebpf_vfs.c @@ -0,0 +1,1601 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include + +#include "ebpf.h" +#include "ebpf_vfs.h" + +static char *vfs_dimension_names[NETDATA_KEY_PUBLISH_VFS_END] = { "delete", "read", "write", + "fsync", "open", "create" }; +static char *vfs_id_names[NETDATA_KEY_PUBLISH_VFS_END] = { "vfs_unlink", "vfs_read", "vfs_write", + "vfs_fsync", "vfs_open", "vfs_create"}; + +static netdata_idx_t *vfs_hash_values = NULL; +static netdata_syscall_stat_t vfs_aggregated_data[NETDATA_KEY_PUBLISH_VFS_END]; +static netdata_publish_syscall_t vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_END]; +netdata_publish_vfs_t **vfs_pid = NULL; +netdata_publish_vfs_t *vfs_vector = NULL; + +static ebpf_local_maps_t vfs_maps[] = {{.name = "tbl_vfs_pid", .internal_input = ND_EBPF_DEFAULT_PID_SIZE, + .user_input = 0, .type = NETDATA_EBPF_MAP_RESIZABLE | NETDATA_EBPF_MAP_PID, + .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}, + {.name = "tbl_vfs_stats", .internal_input = NETDATA_VFS_COUNTER, + .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC, + .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}, + {.name = "vfs_ctrl", .internal_input = NETDATA_CONTROLLER_END, + .user_input = 0, + .type = NETDATA_EBPF_MAP_CONTROLLER, + .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}, + {.name = NULL, .internal_input = 0, .user_input = 0}}; + +struct config vfs_config = { .first_section = NULL, + .last_section = NULL, + .mutex = NETDATA_MUTEX_INITIALIZER, + .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare }, + .rwlock = AVL_LOCK_INITIALIZER } }; + +static struct bpf_object *objects = NULL; +static struct bpf_link **probe_links = NULL; + +struct netdata_static_thread vfs_threads = {"VFS KERNEL", + NULL, NULL, 1, NULL, + NULL, NULL}; + +static int read_thread_closed = 1; + +/***************************************************************** + * + * FUNCTIONS TO CLOSE THE THREAD + * + *****************************************************************/ + +/** + * Clean PID structures + * + * Clean the allocated structures. + */ +void clean_vfs_pid_structures() { + struct pid_stat *pids = root_of_pids; + while (pids) { + freez(vfs_pid[pids->pid]); + + pids = pids->next; + } +} + +/** +* Clean up the main thread. +* +* @param ptr thread data. +**/ +static void ebpf_vfs_cleanup(void *ptr) +{ + ebpf_module_t *em = (ebpf_module_t *)ptr; + if (!em->enabled) + return; + + heartbeat_t hb; + heartbeat_init(&hb); + uint32_t tick = 50 * USEC_PER_MS; + while (!read_thread_closed) { + usec_t dt = heartbeat_next(&hb, tick); + UNUSED(dt); + } + + freez(vfs_hash_values); + freez(vfs_vector); + + if (probe_links) { + struct bpf_program *prog; + size_t i = 0 ; + bpf_object__for_each_program(prog, objects) { + bpf_link__destroy(probe_links[i]); + i++; + } + bpf_object__close(objects); + } +} + +/***************************************************************** + * + * FUNCTIONS WITH THE MAIN LOOP + * + *****************************************************************/ + +/** + * Send data to Netdata calling auxiliary functions. + * + * @param em the structure with thread information +*/ +static void ebpf_vfs_send_data(ebpf_module_t *em) +{ + netdata_publish_vfs_common_t pvc; + + pvc.write = (long)vfs_aggregated_data[NETDATA_KEY_PUBLISH_VFS_WRITE].bytes; + pvc.read = (long)vfs_aggregated_data[NETDATA_KEY_PUBLISH_VFS_READ].bytes; + + write_count_chart(NETDATA_VFS_FILE_CLEAN_COUNT, NETDATA_FILESYSTEM_FAMILY, + &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_UNLINK], 1); + + write_count_chart(NETDATA_VFS_FILE_IO_COUNT, NETDATA_FILESYSTEM_FAMILY, + &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_READ], 2); + + if (em->mode < MODE_ENTRY) { + write_err_chart(NETDATA_VFS_FILE_ERR_COUNT, NETDATA_FILESYSTEM_FAMILY, + &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_READ], 2); + } + + write_io_chart(NETDATA_VFS_IO_FILE_BYTES, NETDATA_FILESYSTEM_FAMILY, vfs_id_names[NETDATA_KEY_PUBLISH_VFS_WRITE], + (long long)pvc.write, vfs_id_names[NETDATA_KEY_PUBLISH_VFS_READ], (long long)pvc.read); + + write_count_chart(NETDATA_VFS_FSYNC, NETDATA_FILESYSTEM_FAMILY, + &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_FSYNC], 1); + + if (em->mode < MODE_ENTRY) { + write_err_chart(NETDATA_VFS_FSYNC_ERR, NETDATA_FILESYSTEM_FAMILY, + &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_FSYNC], 1); + } + + write_count_chart(NETDATA_VFS_OPEN, NETDATA_FILESYSTEM_FAMILY, + &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_OPEN], 1); + + if (em->mode < MODE_ENTRY) { + write_err_chart(NETDATA_VFS_OPEN_ERR, NETDATA_FILESYSTEM_FAMILY, + &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_OPEN], 1); + } + + write_count_chart(NETDATA_VFS_CREATE, NETDATA_FILESYSTEM_FAMILY, + &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_CREATE], 1); + + if (em->mode < MODE_ENTRY) { + write_err_chart( + NETDATA_VFS_CREATE_ERR, + NETDATA_FILESYSTEM_FAMILY, + &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_CREATE], + 1); + } +} + +/** + * Read the hash table and store data to allocated vectors. + */ +static void read_global_table() +{ + uint64_t idx; + netdata_idx_t res[NETDATA_VFS_COUNTER]; + + netdata_idx_t *val = vfs_hash_values; + int fd = vfs_maps[NETDATA_VFS_ALL].map_fd; + for (idx = 0; idx < NETDATA_VFS_COUNTER; idx++) { + uint64_t total = 0; + if (!bpf_map_lookup_elem(fd, &idx, val)) { + int i; + int end = ebpf_nprocs; + for (i = 0; i < end; i++) + total += val[i]; + } + res[idx] = total; + } + + vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_UNLINK].ncall = res[NETDATA_KEY_CALLS_VFS_UNLINK]; + vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_READ].ncall = res[NETDATA_KEY_CALLS_VFS_READ] + + res[NETDATA_KEY_CALLS_VFS_READV]; + vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_WRITE].ncall = res[NETDATA_KEY_CALLS_VFS_WRITE] + + res[NETDATA_KEY_CALLS_VFS_WRITEV]; + vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_FSYNC].ncall = res[NETDATA_KEY_CALLS_VFS_FSYNC]; + vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_OPEN].ncall = res[NETDATA_KEY_CALLS_VFS_OPEN]; + vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_CREATE].ncall = res[NETDATA_KEY_CALLS_VFS_CREATE]; + + vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_UNLINK].nerr = res[NETDATA_KEY_ERROR_VFS_UNLINK]; + vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_READ].nerr = res[NETDATA_KEY_ERROR_VFS_READ] + + res[NETDATA_KEY_ERROR_VFS_READV]; + vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_WRITE].nerr = res[NETDATA_KEY_ERROR_VFS_WRITE] + + res[NETDATA_KEY_ERROR_VFS_WRITEV]; + vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_FSYNC].nerr = res[NETDATA_KEY_ERROR_VFS_FSYNC]; + vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_OPEN].nerr = res[NETDATA_KEY_ERROR_VFS_OPEN]; + vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_CREATE].nerr = res[NETDATA_KEY_ERROR_VFS_CREATE]; + + vfs_aggregated_data[NETDATA_KEY_PUBLISH_VFS_WRITE].bytes = (uint64_t)res[NETDATA_KEY_BYTES_VFS_WRITE] + + (uint64_t)res[NETDATA_KEY_BYTES_VFS_WRITEV]; + vfs_aggregated_data[NETDATA_KEY_PUBLISH_VFS_READ].bytes = (uint64_t)res[NETDATA_KEY_BYTES_VFS_READ] + + (uint64_t)res[NETDATA_KEY_BYTES_VFS_READV]; +} + +/** + * Sum PIDs + * + * Sum values for all targets. + * + * @param swap output structure + * @param root link list with structure to be used + */ +static void ebpf_vfs_sum_pids(netdata_publish_vfs_t *vfs, struct pid_on_target *root) +{ + netdata_publish_vfs_t accumulator; + memset(&accumulator, 0, sizeof(accumulator)); + + while (root) { + int32_t pid = root->pid; + netdata_publish_vfs_t *w = vfs_pid[pid]; + if (w) { + accumulator.write_call += w->write_call; + accumulator.writev_call += w->writev_call; + accumulator.read_call += w->read_call; + accumulator.readv_call += w->readv_call; + accumulator.unlink_call += w->unlink_call; + accumulator.fsync_call += w->fsync_call; + accumulator.open_call += w->open_call; + accumulator.create_call += w->create_call; + + accumulator.write_bytes += w->write_bytes; + accumulator.writev_bytes += w->writev_bytes; + accumulator.read_bytes += w->read_bytes; + accumulator.readv_bytes += w->readv_bytes; + + accumulator.write_err += w->write_err; + accumulator.writev_err += w->writev_err; + accumulator.read_err += w->read_err; + accumulator.readv_err += w->readv_err; + accumulator.unlink_err += w->unlink_err; + accumulator.fsync_err += w->fsync_err; + accumulator.open_err += w->open_err; + accumulator.create_err += w->create_err; + } + root = root->next; + } + + // These conditions were added, because we are using incremental algorithm + vfs->write_call = (accumulator.write_call >= vfs->write_call) ? accumulator.write_call : vfs->write_call; + vfs->writev_call = (accumulator.writev_call >= vfs->writev_call) ? accumulator.writev_call : vfs->writev_call; + vfs->read_call = (accumulator.read_call >= vfs->read_call) ? accumulator.read_call : vfs->read_call; + vfs->readv_call = (accumulator.readv_call >= vfs->readv_call) ? accumulator.readv_call : vfs->readv_call; + vfs->unlink_call = (accumulator.unlink_call >= vfs->unlink_call) ? accumulator.unlink_call : vfs->unlink_call; + vfs->fsync_call = (accumulator.fsync_call >= vfs->fsync_call) ? accumulator.fsync_call : vfs->fsync_call; + vfs->open_call = (accumulator.open_call >= vfs->open_call) ? accumulator.open_call : vfs->open_call; + vfs->create_call = (accumulator.create_call >= vfs->create_call) ? accumulator.create_call : vfs->create_call; + + vfs->write_bytes = (accumulator.write_bytes >= vfs->write_bytes) ? accumulator.write_bytes : vfs->write_bytes; + vfs->writev_bytes = (accumulator.writev_bytes >= vfs->writev_bytes) ? accumulator.writev_bytes : vfs->writev_bytes; + vfs->read_bytes = (accumulator.read_bytes >= vfs->read_bytes) ? accumulator.read_bytes : vfs->read_bytes; + vfs->readv_bytes = (accumulator.readv_bytes >= vfs->readv_bytes) ? accumulator.readv_bytes : vfs->readv_bytes; + + vfs->write_err = (accumulator.write_err >= vfs->write_err) ? accumulator.write_err : vfs->write_err; + vfs->writev_err = (accumulator.writev_err >= vfs->writev_err) ? accumulator.writev_err : vfs->writev_err; + vfs->read_err = (accumulator.read_err >= vfs->read_err) ? accumulator.read_err : vfs->read_err; + vfs->readv_err = (accumulator.readv_err >= vfs->readv_err) ? accumulator.readv_err : vfs->readv_err; + vfs->unlink_err = (accumulator.unlink_err >= vfs->unlink_err) ? accumulator.unlink_err : vfs->unlink_err; + vfs->fsync_err = (accumulator.fsync_err >= vfs->fsync_err) ? accumulator.fsync_err : vfs->fsync_err; + vfs->open_err = (accumulator.open_err >= vfs->open_err) ? accumulator.open_err : vfs->open_err; + vfs->create_err = (accumulator.create_err >= vfs->create_err) ? accumulator.create_err : vfs->create_err; +} + +/** + * Send data to Netdata calling auxiliary functions. + * + * @param em the structure with thread information + * @param root the target list. + */ +void ebpf_vfs_send_apps_data(ebpf_module_t *em, struct target *root) +{ + struct target *w; + for (w = root; w; w = w->next) { + if (unlikely(w->exposed && w->processes)) { + ebpf_vfs_sum_pids(&w->vfs, w->root_pid); + } + } + + write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_FILE_DELETED); + for (w = root; w; w = w->next) { + if (unlikely(w->exposed && w->processes)) { + write_chart_dimension(w->name, w->vfs.unlink_call); + } + } + write_end_chart(); + + write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS); + for (w = root; w; w = w->next) { + if (unlikely(w->exposed && w->processes)) { + write_chart_dimension(w->name, w->vfs.write_call + w->vfs.writev_call); + } + } + write_end_chart(); + + if (em->mode < MODE_ENTRY) { + write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS_ERROR); + for (w = root; w; w = w->next) { + if (unlikely(w->exposed && w->processes)) { + write_chart_dimension(w->name, w->vfs.write_err + w->vfs.writev_err); + } + } + write_end_chart(); + } + + write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_READ_CALLS); + for (w = root; w; w = w->next) { + if (unlikely(w->exposed && w->processes)) { + write_chart_dimension(w->name, w->vfs.read_call + w->vfs.readv_call); + } + } + write_end_chart(); + + if (em->mode < MODE_ENTRY) { + write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_READ_CALLS_ERROR); + for (w = root; w; w = w->next) { + if (unlikely(w->exposed && w->processes)) { + write_chart_dimension(w->name, w->vfs.read_err + w->vfs.readv_err); + } + } + write_end_chart(); + } + + write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_WRITE_BYTES); + for (w = root; w; w = w->next) { + if (unlikely(w->exposed && w->processes)) { + write_chart_dimension(w->name, w->vfs.write_bytes + w->vfs.writev_bytes); + } + } + write_end_chart(); + + write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_READ_BYTES); + for (w = root; w; w = w->next) { + if (unlikely(w->exposed && w->processes)) { + write_chart_dimension(w->name, w->vfs.read_bytes + w->vfs.readv_bytes); + } + } + write_end_chart(); + + write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_FSYNC); + for (w = root; w; w = w->next) { + if (unlikely(w->exposed && w->processes)) { + write_chart_dimension(w->name, w->vfs.fsync_call); + } + } + write_end_chart(); + + if (em->mode < MODE_ENTRY) { + write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_FSYNC_CALLS_ERROR); + for (w = root; w; w = w->next) { + if (unlikely(w->exposed && w->processes)) { + write_chart_dimension(w->name, w->vfs.fsync_err); + } + } + write_end_chart(); + } + + write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_OPEN); + for (w = root; w; w = w->next) { + if (unlikely(w->exposed && w->processes)) { + write_chart_dimension(w->name, w->vfs.open_call); + } + } + write_end_chart(); + + if (em->mode < MODE_ENTRY) { + write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_OPEN_CALLS_ERROR); + for (w = root; w; w = w->next) { + if (unlikely(w->exposed && w->processes)) { + write_chart_dimension(w->name, w->vfs.open_err); + } + } + write_end_chart(); + } + + write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_CREATE); + for (w = root; w; w = w->next) { + if (unlikely(w->exposed && w->processes)) { + write_chart_dimension(w->name, w->vfs.create_call); + } + } + write_end_chart(); + + if (em->mode < MODE_ENTRY) { + write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_CREATE_CALLS_ERROR); + for (w = root; w; w = w->next) { + if (unlikely(w->exposed && w->processes)) { + write_chart_dimension(w->name, w->vfs.create_err); + } + } + write_end_chart(); + } +} + +/** + * Apps Accumulator + * + * Sum all values read from kernel and store in the first address. + * + * @param out the vector with read values. + */ +static void vfs_apps_accumulator(netdata_publish_vfs_t *out) +{ + int i, end = (running_on_kernel >= NETDATA_KERNEL_V4_15) ? ebpf_nprocs : 1; + netdata_publish_vfs_t *total = &out[0]; + for (i = 1; i < end; i++) { + netdata_publish_vfs_t *w = &out[i]; + + total->write_call += w->write_call; + total->writev_call += w->writev_call; + total->read_call += w->read_call; + total->readv_call += w->readv_call; + total->unlink_call += w->unlink_call; + + total->write_bytes += w->write_bytes; + total->writev_bytes += w->writev_bytes; + total->read_bytes += w->read_bytes; + total->readv_bytes += w->readv_bytes; + + total->write_err += w->write_err; + total->writev_err += w->writev_err; + total->read_err += w->read_err; + total->readv_err += w->readv_err; + total->unlink_err += w->unlink_err; + } +} + +/** + * Fill PID + * + * Fill PID structures + * + * @param current_pid pid that we are collecting data + * @param out values read from hash tables; + */ +static void vfs_fill_pid(uint32_t current_pid, netdata_publish_vfs_t *publish) +{ + netdata_publish_vfs_t *curr = vfs_pid[current_pid]; + if (!curr) { + curr = callocz(1, sizeof(netdata_publish_vfs_t)); + vfs_pid[current_pid] = curr; + } + + memcpy(curr, &publish[0], sizeof(netdata_publish_vfs_t)); +} + +/** + * Read the hash table and store data to allocated vectors. + */ +static void ebpf_vfs_read_apps() +{ + struct pid_stat *pids = root_of_pids; + netdata_publish_vfs_t *vv = vfs_vector; + int fd = vfs_maps[NETDATA_VFS_PID].map_fd; + size_t length = sizeof(netdata_publish_vfs_t) * ebpf_nprocs; + while (pids) { + uint32_t key = pids->pid; + + if (bpf_map_lookup_elem(fd, &key, vv)) { + pids = pids->next; + continue; + } + + vfs_apps_accumulator(vv); + + vfs_fill_pid(key, vv); + + // We are cleaning to avoid passing data read from one process to other. + memset(vv, 0, length); + + pids = pids->next; + } +} + +/** + * Update cgroup + * + * Update cgroup data based in + */ +static void read_update_vfs_cgroup() +{ + ebpf_cgroup_target_t *ect ; + netdata_publish_vfs_t *vv = vfs_vector; + int fd = vfs_maps[NETDATA_VFS_PID].map_fd; + size_t length = sizeof(netdata_publish_vfs_t) * ebpf_nprocs; + + pthread_mutex_lock(&mutex_cgroup_shm); + for (ect = ebpf_cgroup_pids; ect; ect = ect->next) { + struct pid_on_target2 *pids; + for (pids = ect->pids; pids; pids = pids->next) { + int pid = pids->pid; + netdata_publish_vfs_t *out = &pids->vfs; + if (likely(vfs_pid) && vfs_pid[pid]) { + netdata_publish_vfs_t *in = vfs_pid[pid]; + + memcpy(out, in, sizeof(netdata_publish_vfs_t)); + } else { + memset(vv, 0, length); + if (!bpf_map_lookup_elem(fd, &pid, vv)) { + vfs_apps_accumulator(vv); + + memcpy(out, vv, sizeof(netdata_publish_vfs_t)); + } + } + } + } + pthread_mutex_unlock(&mutex_cgroup_shm); +} + +/** + * VFS read hash + * + * This is the thread callback. + * This thread is necessary, because we cannot freeze the whole plugin to read the data. + * + * @param ptr It is a NULL value for this thread. + * + * @return It always returns NULL. + */ +void *ebpf_vfs_read_hash(void *ptr) +{ + read_thread_closed = 0; + + heartbeat_t hb; + heartbeat_init(&hb); + + ebpf_module_t *em = (ebpf_module_t *)ptr; + + usec_t step = NETDATA_LATENCY_VFS_SLEEP_MS * em->update_every; + while (!close_ebpf_plugin) { + usec_t dt = heartbeat_next(&hb, step); + (void)dt; + + read_global_table(); + } + + read_thread_closed = 1; + + return NULL; +} + +/** + * Sum PIDs + * + * Sum values for all targets. + * + * @param vfs structure used to store data + * @param pids input data + */ +static void ebpf_vfs_sum_cgroup_pids(netdata_publish_vfs_t *vfs, struct pid_on_target2 *pids) + { + netdata_publish_vfs_t accumulator; + memset(&accumulator, 0, sizeof(accumulator)); + + while (pids) { + netdata_publish_vfs_t *w = &pids->vfs; + + accumulator.write_call += w->write_call; + accumulator.writev_call += w->writev_call; + accumulator.read_call += w->read_call; + accumulator.readv_call += w->readv_call; + accumulator.unlink_call += w->unlink_call; + accumulator.fsync_call += w->fsync_call; + accumulator.open_call += w->open_call; + accumulator.create_call += w->create_call; + + accumulator.write_bytes += w->write_bytes; + accumulator.writev_bytes += w->writev_bytes; + accumulator.read_bytes += w->read_bytes; + accumulator.readv_bytes += w->readv_bytes; + + accumulator.write_err += w->write_err; + accumulator.writev_err += w->writev_err; + accumulator.read_err += w->read_err; + accumulator.readv_err += w->readv_err; + accumulator.unlink_err += w->unlink_err; + accumulator.fsync_err += w->fsync_err; + accumulator.open_err += w->open_err; + accumulator.create_err += w->create_err; + + pids = pids->next; + } + + // These conditions were added, because we are using incremental algorithm + vfs->write_call = (accumulator.write_call >= vfs->write_call) ? accumulator.write_call : vfs->write_call; + vfs->writev_call = (accumulator.writev_call >= vfs->writev_call) ? accumulator.writev_call : vfs->writev_call; + vfs->read_call = (accumulator.read_call >= vfs->read_call) ? accumulator.read_call : vfs->read_call; + vfs->readv_call = (accumulator.readv_call >= vfs->readv_call) ? accumulator.readv_call : vfs->readv_call; + vfs->unlink_call = (accumulator.unlink_call >= vfs->unlink_call) ? accumulator.unlink_call : vfs->unlink_call; + vfs->fsync_call = (accumulator.fsync_call >= vfs->fsync_call) ? accumulator.fsync_call : vfs->fsync_call; + vfs->open_call = (accumulator.open_call >= vfs->open_call) ? accumulator.open_call : vfs->open_call; + vfs->create_call = (accumulator.create_call >= vfs->create_call) ? accumulator.create_call : vfs->create_call; + + vfs->write_bytes = (accumulator.write_bytes >= vfs->write_bytes) ? accumulator.write_bytes : vfs->write_bytes; + vfs->writev_bytes = (accumulator.writev_bytes >= vfs->writev_bytes) ? accumulator.writev_bytes : vfs->writev_bytes; + vfs->read_bytes = (accumulator.read_bytes >= vfs->read_bytes) ? accumulator.read_bytes : vfs->read_bytes; + vfs->readv_bytes = (accumulator.readv_bytes >= vfs->readv_bytes) ? accumulator.readv_bytes : vfs->readv_bytes; + + vfs->write_err = (accumulator.write_err >= vfs->write_err) ? accumulator.write_err : vfs->write_err; + vfs->writev_err = (accumulator.writev_err >= vfs->writev_err) ? accumulator.writev_err : vfs->writev_err; + vfs->read_err = (accumulator.read_err >= vfs->read_err) ? accumulator.read_err : vfs->read_err; + vfs->readv_err = (accumulator.readv_err >= vfs->readv_err) ? accumulator.readv_err : vfs->readv_err; + vfs->unlink_err = (accumulator.unlink_err >= vfs->unlink_err) ? accumulator.unlink_err : vfs->unlink_err; + vfs->fsync_err = (accumulator.fsync_err >= vfs->fsync_err) ? accumulator.fsync_err : vfs->fsync_err; + vfs->open_err = (accumulator.open_err >= vfs->open_err) ? accumulator.open_err : vfs->open_err; + vfs->create_err = (accumulator.create_err >= vfs->create_err) ? accumulator.create_err : vfs->create_err; +} + +/** + * Create specific VFS charts + * + * Create charts for cgroup/application. + * + * @param type the chart type. + * @param em the main thread structure. + */ +static void ebpf_create_specific_vfs_charts(char *type, ebpf_module_t *em) +{ + ebpf_create_chart(type, NETDATA_SYSCALL_APPS_FILE_DELETED,"Files deleted", + EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, NETDATA_CGROUP_VFS_UNLINK_CONTEXT, + NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5500, + ebpf_create_global_dimension, &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_UNLINK], + 1, em->update_every, NETDATA_EBPF_MODULE_NAME_SWAP); + + ebpf_create_chart(type, NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS, "Write to disk", + EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, NETDATA_CGROUP_VFS_WRITE_CONTEXT, + NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5501, + ebpf_create_global_dimension, &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_WRITE], + 1, em->update_every, NETDATA_EBPF_MODULE_NAME_SWAP); + + if (em->mode < MODE_ENTRY) { + ebpf_create_chart(type, NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS_ERROR, "Fails to write", + EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, NETDATA_CGROUP_VFS_WRITE_ERROR_CONTEXT, + NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5502, + ebpf_create_global_dimension, &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_WRITE], + 1, em->update_every, NETDATA_EBPF_MODULE_NAME_SWAP); + } + + ebpf_create_chart(type, NETDATA_SYSCALL_APPS_VFS_READ_CALLS, "Read from disk", + EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, NETDATA_CGROUP_VFS_READ_CONTEXT, + NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5503, + ebpf_create_global_dimension, &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_READ], + 1, em->update_every, NETDATA_EBPF_MODULE_NAME_SWAP); + + if (em->mode < MODE_ENTRY) { + ebpf_create_chart(type, NETDATA_SYSCALL_APPS_VFS_READ_CALLS_ERROR, "Fails to read", + EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, NETDATA_CGROUP_VFS_READ_ERROR_CONTEXT, + NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5504, + ebpf_create_global_dimension, &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_READ], + 1, em->update_every, NETDATA_EBPF_MODULE_NAME_SWAP); + } + + ebpf_create_chart(type, NETDATA_SYSCALL_APPS_VFS_WRITE_BYTES, "Bytes written on disk", + EBPF_COMMON_DIMENSION_BYTES, NETDATA_VFS_CGROUP_GROUP, NETDATA_CGROUP_VFS_WRITE_BYTES_CONTEXT, + NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5505, + ebpf_create_global_dimension, &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_WRITE], + 1, em->update_every, NETDATA_EBPF_MODULE_NAME_SWAP); + + ebpf_create_chart(type, NETDATA_SYSCALL_APPS_VFS_READ_BYTES, "Bytes read from disk", + EBPF_COMMON_DIMENSION_BYTES, NETDATA_VFS_CGROUP_GROUP, NETDATA_CGROUP_VFS_READ_BYTES_CONTEXT, + NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5506, + ebpf_create_global_dimension, &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_READ], + 1, em->update_every, NETDATA_EBPF_MODULE_NAME_SWAP); + + ebpf_create_chart(type, NETDATA_SYSCALL_APPS_VFS_FSYNC, "Calls for vfs_fsync", + EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, NULL, + NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5507, + ebpf_create_global_dimension, &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_FSYNC], + 1, em->update_every, NETDATA_EBPF_MODULE_NAME_SWAP); + + if (em->mode < MODE_ENTRY) { + ebpf_create_chart(type, NETDATA_SYSCALL_APPS_VFS_FSYNC_CALLS_ERROR, "Sync error", + EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, NULL, + NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5508, + ebpf_create_global_dimension, &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_FSYNC], + 1, em->update_every, NETDATA_EBPF_MODULE_NAME_SWAP); + } + + ebpf_create_chart(type, NETDATA_SYSCALL_APPS_VFS_OPEN, "Calls for vfs_open", + EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, NULL, + NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5509, + ebpf_create_global_dimension, &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_OPEN], + 1, em->update_every, NETDATA_EBPF_MODULE_NAME_SWAP); + + if (em->mode < MODE_ENTRY) { + ebpf_create_chart(type, NETDATA_SYSCALL_APPS_VFS_OPEN_CALLS_ERROR, "Open error", + EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, NULL, + NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5510, + ebpf_create_global_dimension, &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_OPEN], + 1, em->update_every, NETDATA_EBPF_MODULE_NAME_SWAP); + } + + ebpf_create_chart(type, NETDATA_SYSCALL_APPS_VFS_CREATE, "Calls for vfs_create", + EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, NULL, + NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5511, + ebpf_create_global_dimension, &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_CREATE], + 1, em->update_every, NETDATA_EBPF_MODULE_NAME_SWAP); + + if (em->mode < MODE_ENTRY) { + ebpf_create_chart(type, NETDATA_SYSCALL_APPS_VFS_CREATE_CALLS_ERROR, "Create error", + EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, NULL, + NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5512, + ebpf_create_global_dimension, &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_CREATE], + 1, em->update_every, NETDATA_EBPF_MODULE_NAME_SWAP); + } +} + +/** + * Obsolete specific VFS charts + * + * Obsolete charts for cgroup/application. + * + * @param type the chart type. + * @param em the main thread structure. + */ +static void ebpf_obsolete_specific_vfs_charts(char *type, ebpf_module_t *em) +{ + ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_FILE_DELETED, "Files deleted", + EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_GROUP, + NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_VFS_UNLINK_CONTEXT, + NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5500, em->update_every); + + ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS, "Write to disk", + EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_GROUP, + NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_VFS_WRITE_CONTEXT, + NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5501, em->update_every); + + if (em->mode < MODE_ENTRY) { + ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS_ERROR, "Fails to write", + EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_GROUP, + NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_VFS_WRITE_ERROR_CONTEXT, + NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5502, em->update_every); + } + + ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_VFS_READ_CALLS, "Read from disk", + EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_GROUP, + NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_VFS_READ_CONTEXT, + NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5503, em->update_every); + + if (em->mode < MODE_ENTRY) { + ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_VFS_READ_CALLS_ERROR, "Fails to read", + EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_GROUP, + NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_VFS_READ_ERROR_CONTEXT, + NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5504, em->update_every); + } + + ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_VFS_WRITE_BYTES, "Bytes written on disk", + EBPF_COMMON_DIMENSION_BYTES, NETDATA_VFS_GROUP, + NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_VFS_WRITE_BYTES_CONTEXT, + NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5505, em->update_every); + + ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_VFS_READ_BYTES, "Bytes read from disk", + EBPF_COMMON_DIMENSION_BYTES, NETDATA_VFS_GROUP, + NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_VFS_READ_BYTES_CONTEXT, + NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5506, em->update_every); + + ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_VFS_FSYNC, "Calls for vfs_fsync", + EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_GROUP, + NETDATA_EBPF_CHART_TYPE_LINE, NULL, + NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5507, em->update_every); + + if (em->mode < MODE_ENTRY) { + ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_VFS_FSYNC_CALLS_ERROR, "Sync error", + EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_GROUP, + NETDATA_EBPF_CHART_TYPE_LINE, NULL, + NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5508, em->update_every); + } + + ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_VFS_OPEN, "Calls for vfs_open", + EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_GROUP, + NETDATA_EBPF_CHART_TYPE_LINE, NULL, + NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5509, em->update_every); + + if (em->mode < MODE_ENTRY) { + ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_VFS_OPEN_CALLS_ERROR, "Open error", + EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_GROUP, + NETDATA_EBPF_CHART_TYPE_LINE, NULL, + NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5510, em->update_every); + } + + ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_VFS_CREATE, "Calls for vfs_create", + EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_GROUP, + NETDATA_EBPF_CHART_TYPE_LINE, NULL, + NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5511, em->update_every); + + if (em->mode < MODE_ENTRY) { + ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_VFS_CREATE_CALLS_ERROR, "Create error", + EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_GROUP, + NETDATA_EBPF_CHART_TYPE_LINE, NULL, + NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5512, em->update_every); + } +} + +/* + * Send specific VFS data + * + * Send data for specific cgroup/apps. + * + * @param type chart type + * @param values structure with values that will be sent to netdata + */ +static void ebpf_send_specific_vfs_data(char *type, netdata_publish_vfs_t *values, ebpf_module_t *em) +{ + write_begin_chart(type, NETDATA_SYSCALL_APPS_FILE_DELETED); + write_chart_dimension(vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_UNLINK].name, (long long)values->unlink_call); + write_end_chart(); + + write_begin_chart(type, NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS); + write_chart_dimension(vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_WRITE].name, + (long long)values->write_call + (long long)values->writev_call); + write_end_chart(); + + if (em->mode < MODE_ENTRY) { + write_begin_chart(type, NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS_ERROR); + write_chart_dimension(vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_WRITE].name, + (long long)values->write_err + (long long)values->writev_err); + write_end_chart(); + } + + write_begin_chart(type, NETDATA_SYSCALL_APPS_VFS_READ_CALLS); + write_chart_dimension(vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_READ].name, + (long long)values->read_call + (long long)values->readv_call); + write_end_chart(); + + if (em->mode < MODE_ENTRY) { + write_begin_chart(type, NETDATA_SYSCALL_APPS_VFS_READ_CALLS_ERROR); + write_chart_dimension(vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_READ].name, + (long long)values->read_err + (long long)values->readv_err); + write_end_chart(); + } + + write_begin_chart(type, NETDATA_SYSCALL_APPS_VFS_WRITE_BYTES); + write_chart_dimension(vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_WRITE].name, + (long long)values->write_bytes + (long long)values->writev_bytes); + write_end_chart(); + + write_begin_chart(type, NETDATA_SYSCALL_APPS_VFS_READ_BYTES); + write_chart_dimension(vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_READ].name, + (long long)values->read_bytes + (long long)values->readv_bytes); + write_end_chart(); + + write_begin_chart(type, NETDATA_SYSCALL_APPS_VFS_FSYNC); + write_chart_dimension(vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_FSYNC].name, + (long long)values->fsync_call); + write_end_chart(); + + if (em->mode < MODE_ENTRY) { + write_begin_chart(type, NETDATA_SYSCALL_APPS_VFS_FSYNC_CALLS_ERROR); + write_chart_dimension(vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_FSYNC].name, + (long long)values->fsync_err); + write_end_chart(); + } + + write_begin_chart(type, NETDATA_SYSCALL_APPS_VFS_OPEN); + write_chart_dimension(vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_OPEN].name, + (long long)values->open_call); + write_end_chart(); + + if (em->mode < MODE_ENTRY) { + write_begin_chart(type, NETDATA_SYSCALL_APPS_VFS_OPEN_CALLS_ERROR); + write_chart_dimension(vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_OPEN].name, + (long long)values->open_err); + write_end_chart(); + } + + write_begin_chart(type, NETDATA_SYSCALL_APPS_VFS_CREATE); + write_chart_dimension(vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_CREATE].name, + (long long)values->create_call); + write_end_chart(); + + if (em->mode < MODE_ENTRY) { + write_begin_chart(type, NETDATA_SYSCALL_APPS_VFS_CREATE_CALLS_ERROR); + write_chart_dimension(vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_CREATE].name, + (long long)values->create_err); + write_end_chart(); + } +} + +/** + * Create Systemd Socket Charts + * + * Create charts when systemd is enabled + * + * @param em the main collector structure + **/ +static void ebpf_create_systemd_vfs_charts(ebpf_module_t *em) +{ + ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_FILE_DELETED, "Files deleted", + EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, 20065, + ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_VFS_UNLINK_CONTEXT, + NETDATA_EBPF_MODULE_NAME_VFS, em->update_every); + + ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS, "Write to disk", + EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, 20066, + ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_VFS_WRITE_CONTEXT, + NETDATA_EBPF_MODULE_NAME_VFS, em->update_every); + + if (em->mode < MODE_ENTRY) { + ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS_ERROR, "Fails to write", + EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, 20067, + ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], + NETDATA_SYSTEMD_VFS_WRITE_ERROR_CONTEXT, + NETDATA_EBPF_MODULE_NAME_VFS, em->update_every); + } + + ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_VFS_READ_CALLS, "Read from disk", + EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, 20068, + ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_VFS_READ_CONTEXT, + NETDATA_EBPF_MODULE_NAME_VFS, em->update_every); + + if (em->mode < MODE_ENTRY) { + ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_VFS_READ_CALLS_ERROR, "Fails to read", + EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, 20069, + ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], + NETDATA_SYSTEMD_VFS_READ_ERROR_CONTEXT, + NETDATA_EBPF_MODULE_NAME_VFS, em->update_every); + } + + ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_VFS_WRITE_BYTES, "Bytes written on disk", + EBPF_COMMON_DIMENSION_BYTES, NETDATA_VFS_CGROUP_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, 20070, + ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_VFS_WRITE_BYTES_CONTEXT, + NETDATA_EBPF_MODULE_NAME_VFS, em->update_every); + + ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_VFS_READ_BYTES, "Bytes read from disk", + EBPF_COMMON_DIMENSION_BYTES, NETDATA_VFS_CGROUP_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, 20071, + ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_VFS_READ_BYTES_CONTEXT, + NETDATA_EBPF_MODULE_NAME_VFS, em->update_every); + + ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_VFS_FSYNC, "Calls to vfs_fsync", + EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, 20072, + ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NULL, + NETDATA_EBPF_MODULE_NAME_VFS, em->update_every); + + if (em->mode < MODE_ENTRY) { + ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_VFS_FSYNC_CALLS_ERROR, "Sync error", + EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, 20073, + ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NULL, + NETDATA_EBPF_MODULE_NAME_VFS, em->update_every); + } + ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_VFS_OPEN, "Calls to vfs_open", + EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, 20074, + ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NULL, + NETDATA_EBPF_MODULE_NAME_VFS, em->update_every); + + if (em->mode < MODE_ENTRY) { + ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_VFS_OPEN_CALLS_ERROR, "Open error", + EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, 20075, + ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NULL, + NETDATA_EBPF_MODULE_NAME_VFS, em->update_every); + } + + ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_VFS_CREATE, "Calls to vfs_create", + EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, 20076, + ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NULL, + NETDATA_EBPF_MODULE_NAME_VFS, em->update_every); + + if (em->mode < MODE_ENTRY) { + ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_VFS_CREATE_CALLS_ERROR, "Create error", + EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, 20077, + ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NULL, + NETDATA_EBPF_MODULE_NAME_VFS, em->update_every); + } +} + +/** + * Send Systemd charts + * + * Send collected data to Netdata. + * + * @param em the main collector structure + * + * @return It returns the status for chart creation, if it is necessary to remove a specific dimension, zero is returned + * otherwise function returns 1 to avoid chart recreation + */ +static int ebpf_send_systemd_vfs_charts(ebpf_module_t *em) +{ + int ret = 1; + ebpf_cgroup_target_t *ect; + write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_FILE_DELETED); + for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { + if (unlikely(ect->systemd) && unlikely(ect->updated)) { + write_chart_dimension(ect->name, ect->publish_systemd_vfs.unlink_call); + } else + ret = 0; + } + write_end_chart(); + + write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS); + for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { + if (unlikely(ect->systemd) && unlikely(ect->updated)) { + write_chart_dimension(ect->name, ect->publish_systemd_vfs.write_call + + ect->publish_systemd_vfs.writev_call); + } + } + write_end_chart(); + + if (em->mode < MODE_ENTRY) { + write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS_ERROR); + for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { + if (unlikely(ect->systemd) && unlikely(ect->updated)) { + write_chart_dimension(ect->name, ect->publish_systemd_vfs.write_err + + ect->publish_systemd_vfs.writev_err); + } + } + write_end_chart(); + } + + write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_READ_CALLS); + for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { + if (unlikely(ect->systemd) && unlikely(ect->updated)) { + write_chart_dimension(ect->name, ect->publish_systemd_vfs.read_call + + ect->publish_systemd_vfs.readv_call); + } + } + write_end_chart(); + + if (em->mode < MODE_ENTRY) { + write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_READ_CALLS_ERROR); + for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { + if (unlikely(ect->systemd) && unlikely(ect->updated)) { + write_chart_dimension(ect->name, ect->publish_systemd_vfs.read_err + + ect->publish_systemd_vfs.readv_err); + } + } + write_end_chart(); + } + + write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_WRITE_BYTES); + for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { + if (unlikely(ect->systemd) && unlikely(ect->updated)) { + write_chart_dimension(ect->name, ect->publish_systemd_vfs.write_bytes + + ect->publish_systemd_vfs.writev_bytes); + } + } + write_end_chart(); + + + write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_READ_BYTES); + for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { + if (unlikely(ect->systemd) && unlikely(ect->updated)) { + write_chart_dimension(ect->name, ect->publish_systemd_vfs.read_bytes + + ect->publish_systemd_vfs.readv_bytes); + } + } + write_end_chart(); + + write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_FSYNC); + for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { + if (unlikely(ect->systemd) && unlikely(ect->updated)) { + write_chart_dimension(ect->name, ect->publish_systemd_vfs.fsync_call); + } + } + write_end_chart(); + + if (em->mode < MODE_ENTRY) { + write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_FSYNC_CALLS_ERROR); + for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { + if (unlikely(ect->systemd) && unlikely(ect->updated)) { + write_chart_dimension(ect->name, ect->publish_systemd_vfs.fsync_err); + } + } + write_end_chart(); + } + + write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_OPEN); + for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { + if (unlikely(ect->systemd) && unlikely(ect->updated)) { + write_chart_dimension(ect->name, ect->publish_systemd_vfs.open_call); + } + } + write_end_chart(); + + if (em->mode < MODE_ENTRY) { + write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_OPEN_CALLS_ERROR); + for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { + if (unlikely(ect->systemd) && unlikely(ect->updated)) { + write_chart_dimension(ect->name, ect->publish_systemd_vfs.open_err); + } + } + write_end_chart(); + } + + write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_CREATE); + for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { + if (unlikely(ect->systemd) && unlikely(ect->updated)) { + write_chart_dimension(ect->name, ect->publish_systemd_vfs.create_call); + } + } + write_end_chart(); + + if (em->mode < MODE_ENTRY) { + write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_CREATE_CALLS_ERROR); + for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { + if (unlikely(ect->systemd) && unlikely(ect->updated)) { + write_chart_dimension(ect->name, ect->publish_systemd_vfs.create_err); + } + } + write_end_chart(); + } + + return ret; +} + +/** + * Send data to Netdata calling auxiliary functions. + * + * @param em the main collector structure +*/ +static void ebpf_vfs_send_cgroup_data(ebpf_module_t *em) +{ + if (!ebpf_cgroup_pids) + return; + + pthread_mutex_lock(&mutex_cgroup_shm); + ebpf_cgroup_target_t *ect; + for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { + ebpf_vfs_sum_cgroup_pids(&ect->publish_systemd_vfs, ect->pids); + } + + int has_systemd = shm_ebpf_cgroup.header->systemd_enabled; + if (has_systemd) { + static int systemd_charts = 0; + if (!systemd_charts) { + ebpf_create_systemd_vfs_charts(em); + systemd_charts = 1; + } + + systemd_charts = ebpf_send_systemd_vfs_charts(em); + } + + for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) { + if (ect->systemd) + continue; + + if (!(ect->flags & NETDATA_EBPF_CGROUP_HAS_VFS_CHART) && ect->updated) { + ebpf_create_specific_vfs_charts(ect->name, em); + ect->flags |= NETDATA_EBPF_CGROUP_HAS_VFS_CHART; + } + + if (ect->flags & NETDATA_EBPF_CGROUP_HAS_VFS_CHART) { + if (ect->updated) { + ebpf_send_specific_vfs_data(ect->name, &ect->publish_systemd_vfs, em); + } else { + ebpf_obsolete_specific_vfs_charts(ect->name, em); + ect->flags &= ~NETDATA_EBPF_CGROUP_HAS_VFS_CHART; + } + } + } + + pthread_mutex_unlock(&mutex_cgroup_shm); +} + +/** + * Main loop for this collector. + * + * @param step the number of microseconds used with heart beat + * @param em the structure with thread information + */ +static void vfs_collector(ebpf_module_t *em) +{ + vfs_threads.thread = mallocz(sizeof(netdata_thread_t)); + vfs_threads.start_routine = ebpf_vfs_read_hash; + + netdata_thread_create(vfs_threads.thread, vfs_threads.name, NETDATA_THREAD_OPTION_JOINABLE, + ebpf_vfs_read_hash, em); + + int apps = em->apps_charts; + int cgroups = em->cgroup_charts; + int update_every = em->update_every; + int counter = update_every - 1; + while (!close_ebpf_plugin) { + pthread_mutex_lock(&collect_data_mutex); + pthread_cond_wait(&collect_data_cond_var, &collect_data_mutex); + + if (++counter == update_every) { + counter = 0; + if (apps) + ebpf_vfs_read_apps(); + + if (cgroups) + read_update_vfs_cgroup(); + + pthread_mutex_lock(&lock); + + ebpf_vfs_send_data(em); + fflush(stdout); + + if (apps) + ebpf_vfs_send_apps_data(em, apps_groups_root_target); + + if (cgroups) + ebpf_vfs_send_cgroup_data(em); + + pthread_mutex_unlock(&lock); + } + pthread_mutex_unlock(&collect_data_mutex); + } +} + +/***************************************************************** + * + * FUNCTIONS TO CREATE CHARTS + * + *****************************************************************/ + +/** + * Create IO chart + * + * @param family the chart family + * @param name the chart name + * @param axis the axis label + * @param web the group name used to attach the chart on dashboard + * @param order the order number of the specified chart + * @param algorithm the algorithm used to make the charts. + * @param update_every value to overwrite the update frequency set by the server. + */ +static void ebpf_create_io_chart(char *family, char *name, char *axis, char *web, + int order, int algorithm, int update_every) +{ + printf("CHART %s.%s '' 'Bytes written and read' '%s' '%s' '' line %d %d '' 'ebpf.plugin' 'filesystem'\n", + family, + name, + axis, + web, + order, + update_every); + + printf("DIMENSION %s %s %s 1 1\n", + vfs_id_names[NETDATA_KEY_PUBLISH_VFS_READ], + vfs_dimension_names[NETDATA_KEY_PUBLISH_VFS_READ], + ebpf_algorithms[algorithm]); + printf("DIMENSION %s %s %s -1 1\n", + vfs_id_names[NETDATA_KEY_PUBLISH_VFS_WRITE], + vfs_dimension_names[NETDATA_KEY_PUBLISH_VFS_WRITE], + ebpf_algorithms[algorithm]); +} + +/** + * Create global charts + * + * Call ebpf_create_chart to create the charts for the collector. + * + * @param em a pointer to the structure with the default values. + */ +static void ebpf_create_global_charts(ebpf_module_t *em) +{ + ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY, + NETDATA_VFS_FILE_CLEAN_COUNT, + "Remove files", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_VFS_GROUP, + NULL, + NETDATA_EBPF_CHART_TYPE_LINE, + NETDATA_CHART_PRIO_FILESYSTEM_VFS_CLEAN, + ebpf_create_global_dimension, + &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_UNLINK], + 1, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS); + + ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY, + NETDATA_VFS_FILE_IO_COUNT, + "Calls to IO", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_VFS_GROUP, + NULL, + NETDATA_EBPF_CHART_TYPE_LINE, + NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_COUNT, + ebpf_create_global_dimension, + &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_READ], + 2, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS); + + ebpf_create_io_chart(NETDATA_FILESYSTEM_FAMILY, + NETDATA_VFS_IO_FILE_BYTES, EBPF_COMMON_DIMENSION_BYTES, + NETDATA_VFS_GROUP, + NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_BYTES, + NETDATA_EBPF_INCREMENTAL_IDX, em->update_every); + + if (em->mode < MODE_ENTRY) { + ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY, + NETDATA_VFS_FILE_ERR_COUNT, + "Fails to write or read", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_VFS_GROUP, + NULL, + NETDATA_EBPF_CHART_TYPE_LINE, + NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_EBYTES, + ebpf_create_global_dimension, + &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_READ], + 2, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS); + } + + ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY, + NETDATA_VFS_FSYNC, + "Calls for vfs_fsync", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_VFS_GROUP, + NULL, + NETDATA_EBPF_CHART_TYPE_LINE, + NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_FSYNC, + ebpf_create_global_dimension, + &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_FSYNC], + 1, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS); + + if (em->mode < MODE_ENTRY) { + ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY, + NETDATA_VFS_FSYNC_ERR, + "Fails to synchronize", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_VFS_GROUP, + NULL, + NETDATA_EBPF_CHART_TYPE_LINE, + NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_EFSYNC, + ebpf_create_global_dimension, + &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_FSYNC], + 1, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS); + } + + ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY, + NETDATA_VFS_OPEN, + "Calls for vfs_open", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_VFS_GROUP, + NULL, + NETDATA_EBPF_CHART_TYPE_LINE, + NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_OPEN, + ebpf_create_global_dimension, + &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_OPEN], + 1, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS); + + if (em->mode < MODE_ENTRY) { + ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY, + NETDATA_VFS_OPEN_ERR, + "Fails to open a file", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_VFS_GROUP, + NULL, + NETDATA_EBPF_CHART_TYPE_LINE, + NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_EOPEN, + ebpf_create_global_dimension, + &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_OPEN], + 1, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS); + } + + ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY, + NETDATA_VFS_CREATE, + "Calls for vfs_create", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_VFS_GROUP, + NULL, + NETDATA_EBPF_CHART_TYPE_LINE, + NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_CREATE, + ebpf_create_global_dimension, + &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_CREATE], + 1, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS); + + if (em->mode < MODE_ENTRY) { + ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY, + NETDATA_VFS_CREATE_ERR, + "Fails to create a file.", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_VFS_GROUP, + NULL, + NETDATA_EBPF_CHART_TYPE_LINE, + NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_ECREATE, + ebpf_create_global_dimension, + &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_CREATE], + 1, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS); + } +} + +/** + * Create process apps charts + * + * Call ebpf_create_chart to create the charts on apps submenu. + * + * @param em a pointer to the structure with the default values. + * @param ptr a pointer for the targets. + **/ +void ebpf_vfs_create_apps_charts(struct ebpf_module *em, void *ptr) +{ + struct target *root = ptr; + + ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_FILE_DELETED, + "Files deleted", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_VFS_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + 20065, + ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], + root, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS); + + ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS, + "Write to disk", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_VFS_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + 20066, + ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], + root, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS); + + if (em->mode < MODE_ENTRY) { + ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS_ERROR, + "Fails to write", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_VFS_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + 20067, + ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], + root, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS); + } + + ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_READ_CALLS, + "Read from disk", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_VFS_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + 20068, + ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], + root, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS); + + if (em->mode < MODE_ENTRY) { + ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_READ_CALLS_ERROR, + "Fails to read", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_VFS_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + 20069, + ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], + root, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS); + } + + ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_WRITE_BYTES, + "Bytes written on disk", EBPF_COMMON_DIMENSION_BYTES, + NETDATA_VFS_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + 20070, + ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], + root, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS); + + ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_READ_BYTES, + "Bytes read from disk", EBPF_COMMON_DIMENSION_BYTES, + NETDATA_VFS_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + 20071, + ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], + root, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS); + + ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_FSYNC, + "Calls for vfs_fsync", EBPF_COMMON_DIMENSION_CALL, + NETDATA_VFS_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + 20072, + ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], + root, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS); + + if (em->mode < MODE_ENTRY) { + ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_FSYNC_CALLS_ERROR, + "Sync error", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_VFS_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + 20073, + ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], + root, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS); + } + + ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_OPEN, + "Calls for vfs_open", EBPF_COMMON_DIMENSION_CALL, + NETDATA_VFS_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + 20074, + ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], + root, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS); + + if (em->mode < MODE_ENTRY) { + ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_OPEN_CALLS_ERROR, + "Open error", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_VFS_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + 20075, + ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], + root, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS); + } + + ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_CREATE, + "Calls for vfs_create", EBPF_COMMON_DIMENSION_CALL, + NETDATA_VFS_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + 20076, + ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], + root, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS); + + if (em->mode < MODE_ENTRY) { + ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_CREATE_CALLS_ERROR, + "Create error", + EBPF_COMMON_DIMENSION_CALL, + NETDATA_VFS_GROUP, + NETDATA_EBPF_CHART_TYPE_STACKED, + 20077, + ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], + root, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS); + } +} + +/***************************************************************** + * + * FUNCTIONS TO START THREAD + * + *****************************************************************/ + +/** + * Allocate vectors used with this thread. + * We are not testing the return, because callocz does this and shutdown the software + * case it was not possible to allocate. + * + * @param apps is apps enabled? + */ +static void ebpf_vfs_allocate_global_vectors(int apps) +{ + memset(vfs_aggregated_data, 0, sizeof(vfs_aggregated_data)); + memset(vfs_publish_aggregated, 0, sizeof(vfs_publish_aggregated)); + + vfs_hash_values = callocz(ebpf_nprocs, sizeof(netdata_idx_t)); + vfs_vector = callocz(ebpf_nprocs, sizeof(netdata_publish_vfs_t)); + + if (apps) + vfs_pid = callocz((size_t)pid_max, sizeof(netdata_publish_vfs_t *)); +} + +/***************************************************************** + * + * EBPF VFS THREAD + * + *****************************************************************/ + +/** + * Process thread + * + * Thread used to generate process charts. + * + * @param ptr a pointer to `struct ebpf_module` + * + * @return It always return NULL + */ +void *ebpf_vfs_thread(void *ptr) +{ + netdata_thread_cleanup_push(ebpf_vfs_cleanup, ptr); + + ebpf_module_t *em = (ebpf_module_t *)ptr; + em->maps = vfs_maps; + + ebpf_update_pid_table(&vfs_maps[NETDATA_VFS_PID], em); + + ebpf_vfs_allocate_global_vectors(em->apps_charts); + + if (!em->enabled) + goto endvfs; + + probe_links = ebpf_load_program(ebpf_plugin_dir, em, kernel_string, &objects); + if (!probe_links) { + goto endvfs; + } + + int algorithms[NETDATA_KEY_PUBLISH_VFS_END] = { + NETDATA_EBPF_INCREMENTAL_IDX, NETDATA_EBPF_INCREMENTAL_IDX,NETDATA_EBPF_INCREMENTAL_IDX, + NETDATA_EBPF_INCREMENTAL_IDX, NETDATA_EBPF_INCREMENTAL_IDX,NETDATA_EBPF_INCREMENTAL_IDX + }; + + ebpf_global_labels(vfs_aggregated_data, vfs_publish_aggregated, vfs_dimension_names, + vfs_id_names, algorithms, NETDATA_KEY_PUBLISH_VFS_END); + + pthread_mutex_lock(&lock); + ebpf_create_global_charts(em); + pthread_mutex_unlock(&lock); + + vfs_collector(em); + +endvfs: + netdata_thread_cleanup_pop(1); + return NULL; +} diff --git a/collectors/ebpf.plugin/ebpf_vfs.h b/collectors/ebpf.plugin/ebpf_vfs.h new file mode 100644 index 000000000..0a972c983 --- /dev/null +++ b/collectors/ebpf.plugin/ebpf_vfs.h @@ -0,0 +1,151 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_EBPF_VFS_H +#define NETDATA_EBPF_VFS_H 1 + +// Module name +#define NETDATA_EBPF_MODULE_NAME_VFS "vfs" + +#define NETDATA_DIRECTORY_VFS_CONFIG_FILE "vfs.conf" + +#define NETDATA_LATENCY_VFS_SLEEP_MS 750000ULL + +// Global chart name +#define NETDATA_VFS_FILE_CLEAN_COUNT "vfs_deleted_objects" +#define NETDATA_VFS_FILE_IO_COUNT "vfs_io" +#define NETDATA_VFS_FILE_ERR_COUNT "vfs_io_error" +#define NETDATA_VFS_IO_FILE_BYTES "vfs_io_bytes" +#define NETDATA_VFS_FSYNC "vfs_fsync" +#define NETDATA_VFS_FSYNC_ERR "vfs_fsync_error" +#define NETDATA_VFS_OPEN "vfs_open" +#define NETDATA_VFS_OPEN_ERR "vfs_open_error" +#define NETDATA_VFS_CREATE "vfs_create" +#define NETDATA_VFS_CREATE_ERR "vfs_create_error" + +// Charts created on Apps submenu +#define NETDATA_SYSCALL_APPS_FILE_DELETED "file_deleted" +#define NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS "vfs_write_call" +#define NETDATA_SYSCALL_APPS_VFS_READ_CALLS "vfs_read_call" +#define NETDATA_SYSCALL_APPS_VFS_WRITE_BYTES "vfs_write_bytes" +#define NETDATA_SYSCALL_APPS_VFS_READ_BYTES "vfs_read_bytes" +#define NETDATA_SYSCALL_APPS_VFS_FSYNC "vfs_fsync" +#define NETDATA_SYSCALL_APPS_VFS_OPEN "vfs_open" +#define NETDATA_SYSCALL_APPS_VFS_CREATE "vfs_create" + +#define NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS_ERROR "vfs_write_error" +#define NETDATA_SYSCALL_APPS_VFS_READ_CALLS_ERROR "vfs_read_error" +#define NETDATA_SYSCALL_APPS_VFS_FSYNC_CALLS_ERROR "vfs_fsync_error" +#define NETDATA_SYSCALL_APPS_VFS_OPEN_CALLS_ERROR "vfs_open_error" +#define NETDATA_SYSCALL_APPS_VFS_CREATE_CALLS_ERROR "vfs_create_error" + +// Group used on Dashboard +#define NETDATA_VFS_GROUP "vfs" +#define NETDATA_VFS_CGROUP_GROUP "vfs (eBPF)" + +// Contexts +#define NETDATA_CGROUP_VFS_UNLINK_CONTEXT "cgroup.vfs_unlink" +#define NETDATA_CGROUP_VFS_WRITE_CONTEXT "cgroup.vfs_write" +#define NETDATA_CGROUP_VFS_WRITE_ERROR_CONTEXT "cgroup.vfs_write_error" +#define NETDATA_CGROUP_VFS_READ_CONTEXT "cgroup.vfs_read" +#define NETDATA_CGROUP_VFS_READ_ERROR_CONTEXT "cgroup.vfs_read_error" +#define NETDATA_CGROUP_VFS_WRITE_BYTES_CONTEXT "cgroup.vfs_write_bytes" +#define NETDATA_CGROUP_VFS_READ_BYTES_CONTEXT "cgroup.vfs_read_bytes" + +#define NETDATA_SYSTEMD_VFS_UNLINK_CONTEXT "services.vfs_unlink" +#define NETDATA_SYSTEMD_VFS_WRITE_CONTEXT "services.vfs_write" +#define NETDATA_SYSTEMD_VFS_WRITE_ERROR_CONTEXT "services.vfs_write_error" +#define NETDATA_SYSTEMD_VFS_READ_CONTEXT "services.vfs_read" +#define NETDATA_SYSTEMD_VFS_READ_ERROR_CONTEXT "services.vfs_read_error" +#define NETDATA_SYSTEMD_VFS_WRITE_BYTES_CONTEXT "services.vfs_write_bytes" +#define NETDATA_SYSTEMD_VFS_READ_BYTES_CONTEXT "services.vfs_read_bytes" + +typedef struct netdata_publish_vfs { + uint64_t pid_tgid; + uint32_t pid; + uint32_t pad; + + //Counter + uint32_t write_call; + uint32_t writev_call; + uint32_t read_call; + uint32_t readv_call; + uint32_t unlink_call; + uint32_t fsync_call; + uint32_t open_call; + uint32_t create_call; + + //Accumulator + uint64_t write_bytes; + uint64_t writev_bytes; + uint64_t readv_bytes; + uint64_t read_bytes; + + //Counter + uint32_t write_err; + uint32_t writev_err; + uint32_t read_err; + uint32_t readv_err; + uint32_t unlink_err; + uint32_t fsync_err; + uint32_t open_err; + uint32_t create_err; +} netdata_publish_vfs_t; + +enum netdata_publish_vfs_list { + NETDATA_KEY_PUBLISH_VFS_UNLINK, + NETDATA_KEY_PUBLISH_VFS_READ, + NETDATA_KEY_PUBLISH_VFS_WRITE, + NETDATA_KEY_PUBLISH_VFS_FSYNC, + NETDATA_KEY_PUBLISH_VFS_OPEN, + NETDATA_KEY_PUBLISH_VFS_CREATE, + + NETDATA_KEY_PUBLISH_VFS_END +}; + +enum vfs_counters { + NETDATA_KEY_CALLS_VFS_WRITE, + NETDATA_KEY_ERROR_VFS_WRITE, + NETDATA_KEY_BYTES_VFS_WRITE, + + NETDATA_KEY_CALLS_VFS_WRITEV, + NETDATA_KEY_ERROR_VFS_WRITEV, + NETDATA_KEY_BYTES_VFS_WRITEV, + + NETDATA_KEY_CALLS_VFS_READ, + NETDATA_KEY_ERROR_VFS_READ, + NETDATA_KEY_BYTES_VFS_READ, + + NETDATA_KEY_CALLS_VFS_READV, + NETDATA_KEY_ERROR_VFS_READV, + NETDATA_KEY_BYTES_VFS_READV, + + NETDATA_KEY_CALLS_VFS_UNLINK, + NETDATA_KEY_ERROR_VFS_UNLINK, + + NETDATA_KEY_CALLS_VFS_FSYNC, + NETDATA_KEY_ERROR_VFS_FSYNC, + + NETDATA_KEY_CALLS_VFS_OPEN, + NETDATA_KEY_ERROR_VFS_OPEN, + + NETDATA_KEY_CALLS_VFS_CREATE, + NETDATA_KEY_ERROR_VFS_CREATE, + + // Keep this as last and don't skip numbers as it is used as element counter + NETDATA_VFS_COUNTER +}; + +enum netdata_vfs_tables { + NETDATA_VFS_PID, + NETDATA_VFS_ALL +}; + +extern netdata_publish_vfs_t **vfs_pid; + +extern void *ebpf_vfs_thread(void *ptr); +extern void ebpf_vfs_create_apps_charts(struct ebpf_module *em, void *ptr); +extern void clean_vfs_pid_structures(); + +extern struct config vfs_config; + +#endif /* NETDATA_EBPF_VFS_H */ diff --git a/collectors/ebpf.plugin/reset_netdata_trace.sh.in b/collectors/ebpf.plugin/reset_netdata_trace.sh.in deleted file mode 100644 index 51d981ee3..000000000 --- a/collectors/ebpf.plugin/reset_netdata_trace.sh.in +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash - -KPROBE_FILE="/sys/kernel/debug/tracing/kprobe_events" - -DATA="$(grep _netdata_ $KPROBE_FILE| cut -d' ' -f1 | cut -d: -f2)" - -for I in $DATA; do - echo "-:$I" > $KPROBE_FILE 2>/dev/null; -done diff --git a/collectors/freebsd.plugin/freebsd_getifaddrs.c b/collectors/freebsd.plugin/freebsd_getifaddrs.c index 1a84902d6..0c0c1e7ab 100644 --- a/collectors/freebsd.plugin/freebsd_getifaddrs.c +++ b/collectors/freebsd.plugin/freebsd_getifaddrs.c @@ -144,7 +144,7 @@ int do_getifaddrs(int update_every, usec_t dt) { (void)dt; #define DEFAULT_EXCLUDED_INTERFACES "lo*" -#define DEFAULT_PHYSICAL_INTERFACES "igb* ix* cxl* em* ixl* ixlv* bge* ixgbe* vtnet* vmx* re*" +#define DEFAULT_PHYSICAL_INTERFACES "igb* ix* cxl* em* ixl* ixlv* bge* ixgbe* vtnet* vmx* re* igc* dwc*" #define CONFIG_SECTION_GETIFADDRS "plugin:freebsd:getifaddrs" static int enable_new_interfaces = -1; diff --git a/collectors/freebsd.plugin/freebsd_kstat_zfs.c b/collectors/freebsd.plugin/freebsd_kstat_zfs.c index 7d609eafc..8b5cc5799 100644 --- a/collectors/freebsd.plugin/freebsd_kstat_zfs.c +++ b/collectors/freebsd.plugin/freebsd_kstat_zfs.c @@ -213,8 +213,8 @@ int do_kstat_zfs_misc_arcstats(int update_every, usec_t dt) { // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.arc_need_free", mibs.arc_need_free, arcstats.arc_need_free); // missing mib: GETSYSCTL_SIMPLE("kstat.zfs.misc.arcstats.arc_sys_free", mibs.arc_sys_free, arcstats.arc_sys_free); - generate_charts_arcstats("freebsd", "zfs", show_zero_charts, update_every); - generate_charts_arc_summary("freebsd", "zfs", show_zero_charts, update_every); + generate_charts_arcstats("freebsd.plugin", "zfs", show_zero_charts, update_every); + generate_charts_arc_summary("freebsd.plugin", "zfs", show_zero_charts, update_every); return 0; } @@ -252,7 +252,7 @@ int do_kstat_zfs_misc_zio_trim(int update_every, usec_t dt) { NULL, "Successfully TRIMmed bytes", "bytes", - "freebsd", + "freebsd.plugin", "zfs", 2320, update_every, @@ -280,7 +280,7 @@ int do_kstat_zfs_misc_zio_trim(int update_every, usec_t dt) { NULL, "TRIM requests", "requests", - "freebsd", + "freebsd.plugin", "zfs", 2321, update_every, @@ -301,4 +301,4 @@ int do_kstat_zfs_misc_zio_trim(int update_every, usec_t dt) { } return 0; -} \ No newline at end of file +} diff --git a/collectors/freebsd.plugin/freebsd_sysctl.c b/collectors/freebsd.plugin/freebsd_sysctl.c index 7d48e76dc..3dc1fbfb1 100644 --- a/collectors/freebsd.plugin/freebsd_sysctl.c +++ b/collectors/freebsd.plugin/freebsd_sysctl.c @@ -641,52 +641,58 @@ int do_hw_intcnt(int update_every, usec_t dt) { static int mib_hw_intrnames[2] = {0, 0}; static char *intrnames = NULL; - size = nintr * (MAXCOMLEN + 1); - if (unlikely(nintr != old_nintr)) - intrnames = reallocz(intrnames, size); - if (unlikely(GETSYSCTL_WSIZE("hw.intrnames", mib_hw_intrnames, intrnames, size))) { + if (unlikely(GETSYSCTL_SIZE("hw.intrnames", mib_hw_intrnames, size))) { error("DISABLED: system.intr chart"); error("DISABLED: system.interrupts chart"); error("DISABLED: hw.intrcnt module"); return 1; } else { + if (unlikely(nintr != old_nintr)) + intrnames = reallocz(intrnames, size); + if (unlikely(GETSYSCTL_WSIZE("hw.intrnames", mib_hw_intrnames, intrnames, size))) { + error("DISABLED: system.intr chart"); + error("DISABLED: system.interrupts chart"); + error("DISABLED: hw.intrcnt module"); + return 1; + } else { - // -------------------------------------------------------------------- - - static RRDSET *st_interrupts = NULL; - - if (unlikely(!st_interrupts)) - st_interrupts = rrdset_create_localhost( - "system", - "interrupts", - NULL, - "interrupts", - NULL, - "System interrupts", - "interrupts/s", - "freebsd.plugin", - "hw.intrcnt", - NETDATA_CHART_PRIO_SYSTEM_INTERRUPTS, - update_every, - RRDSET_TYPE_STACKED - ); - else - rrdset_next(st_interrupts); - - for (i = 0; i < nintr; i++) { - void *p; - - p = intrnames + i * (MAXCOMLEN + 1); - if (unlikely((intrcnt[i] != 0) && (*(char *) p != 0))) { - RRDDIM *rd_interrupts = rrddim_find_active(st_interrupts, p); - - if (unlikely(!rd_interrupts)) - rd_interrupts = rrddim_add(st_interrupts, p, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - - rrddim_set_by_pointer(st_interrupts, rd_interrupts, intrcnt[i]); + // -------------------------------------------------------------------- + + static RRDSET *st_interrupts = NULL; + + if (unlikely(!st_interrupts)) + st_interrupts = rrdset_create_localhost( + "system", + "interrupts", + NULL, + "interrupts", + NULL, + "System interrupts", + "interrupts/s", + "freebsd.plugin", + "hw.intrcnt", + NETDATA_CHART_PRIO_SYSTEM_INTERRUPTS, + update_every, + RRDSET_TYPE_STACKED + ); + else + rrdset_next(st_interrupts); + + for (i = 0; i < nintr; i++) { + void *p; + + p = intrnames + i * (strlen(intrnames) + 1); + if (unlikely((intrcnt[i] != 0) && (*(char *) p != 0))) { + RRDDIM *rd_interrupts = rrddim_find_active(st_interrupts, p); + + if (unlikely(!rd_interrupts)) + rd_interrupts = rrddim_add(st_interrupts, p, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); + + rrddim_set_by_pointer(st_interrupts, rd_interrupts, intrcnt[i]); + } } + rrdset_done(st_interrupts); } - rrdset_done(st_interrupts); } } diff --git a/collectors/freebsd.plugin/plugin_freebsd.c b/collectors/freebsd.plugin/plugin_freebsd.c index 17fec4128..97ca1d9ae 100644 --- a/collectors/freebsd.plugin/plugin_freebsd.c +++ b/collectors/freebsd.plugin/plugin_freebsd.c @@ -146,7 +146,7 @@ void *freebsd_main(void *ptr) NULL, "Netdata FreeBSD plugin CPU usage", "milliseconds/s", - "freebsd", + "freebsd.plugin", "stats", 132000, localhost->rrd_update_every, @@ -178,7 +178,7 @@ void *freebsd_main(void *ptr) NULL, "Netdata FreeBSD plugin modules durations", "milliseconds/run", - "freebsd", + "freebsd.plugin", "stats", 132001, localhost->rrd_update_every, diff --git a/collectors/freeipmi.plugin/README.md b/collectors/freeipmi.plugin/README.md index 02a61dd2f..3ff6f4099 100644 --- a/collectors/freeipmi.plugin/README.md +++ b/collectors/freeipmi.plugin/README.md @@ -15,6 +15,11 @@ Netdata has a [freeipmi](https://www.gnu.org/software/freeipmi/) plugin. 2. re-install Netdata from source. The installer will detect that the required libraries are now available and will also build `freeipmi.plugin`. +> ❗ In some distributions `libipmimonitoring.pc` is located in an unregistered directory. +> In that case you should find the file and link it to the standard pkg-config directory. Usually, running +> `sudo ln -s /usr/lib/x86_64-linux-gnu/pkgconfig/libipmimonitoring.pc/libipmimonitoring.pc /usr/lib/pkgconfig/libipmimonitoring.pc` +> resolves the issue. + Keep in mind IPMI requires root access, so the plugin is setuid to root. If you just installed the required IPMI tools, please run at least once the command `ipmimonitoring` and verify it returns sensors information. This command initialises IPMI configuration, so that the Netdata plugin will be able to work. diff --git a/collectors/freeipmi.plugin/freeipmi_plugin.c b/collectors/freeipmi.plugin/freeipmi_plugin.c index e9702e785..0141a6a78 100644 --- a/collectors/freeipmi.plugin/freeipmi_plugin.c +++ b/collectors/freeipmi.plugin/freeipmi_plugin.c @@ -15,7 +15,7 @@ * UCRL-CODE-222073 */ -#include "../../libnetdata/libnetdata.h" +#include "libnetdata/libnetdata.h" #include #include diff --git a/collectors/idlejitter.plugin/plugin_idlejitter.h b/collectors/idlejitter.plugin/plugin_idlejitter.h index 62fabea16..6da78a085 100644 --- a/collectors/idlejitter.plugin/plugin_idlejitter.h +++ b/collectors/idlejitter.plugin/plugin_idlejitter.h @@ -3,7 +3,7 @@ #ifndef NETDATA_PLUGIN_IDLEJITTER_H #define NETDATA_PLUGIN_IDLEJITTER_H 1 -#include "../../daemon/common.h" +#include "daemon/common.h" #define NETDATA_PLUGIN_HOOK_IDLEJITTER \ { \ diff --git a/collectors/macos.plugin/macos_fw.c b/collectors/macos.plugin/macos_fw.c index d0b3e0fd2..1fa2d39c8 100644 --- a/collectors/macos.plugin/macos_fw.c +++ b/collectors/macos.plugin/macos_fw.c @@ -155,7 +155,7 @@ int do_macos_iokit(int update_every, usec_t dt) { , "disk.io" , "Disk I/O Bandwidth" , "KiB/s" - , "macos" + , "macos.plugin" , "iokit" , 2000 , update_every @@ -193,7 +193,7 @@ int do_macos_iokit(int update_every, usec_t dt) { , "disk.ops" , "Disk Completed I/O Operations" , "operations/s" - , "macos" + , "macos.plugin" , "iokit" , 2001 , update_every @@ -232,7 +232,7 @@ int do_macos_iokit(int update_every, usec_t dt) { , "disk.util" , "Disk Utilization Time" , "% of time working" - , "macos" + , "macos.plugin" , "iokit" , 2004 , update_every @@ -270,7 +270,7 @@ int do_macos_iokit(int update_every, usec_t dt) { , "disk.iotime" , "Disk Total I/O Time" , "milliseconds/s" - , "macos" + , "macos.plugin" , "iokit" , 2022 , update_every @@ -307,7 +307,7 @@ int do_macos_iokit(int update_every, usec_t dt) { , "disk.await" , "Average Completed I/O Operation Time" , "milliseconds/operation" - , "macos" + , "macos.plugin" , "iokit" , 2005 , update_every @@ -338,7 +338,7 @@ int do_macos_iokit(int update_every, usec_t dt) { , "disk.avgsz" , "Average Completed I/O Operation Bandwidth" , "KiB/operation" - , "macos" + , "macos.plugin" , "iokit" , 2006 , update_every @@ -369,7 +369,7 @@ int do_macos_iokit(int update_every, usec_t dt) { , "disk.svctm" , "Average Service Time" , "milliseconds/operation" - , "macos" + , "macos.plugin" , "iokit" , 2007 , update_every @@ -411,7 +411,7 @@ int do_macos_iokit(int update_every, usec_t dt) { , NULL , "Disk I/O" , "KiB/s" - , "macos" + , "macos.plugin" , "iokit" , 150 , update_every @@ -464,7 +464,7 @@ int do_macos_iokit(int update_every, usec_t dt) { , "disk.space" , title , "GiB" - , "macos" + , "macos.plugin" , "iokit" , 2023 , update_every @@ -497,7 +497,7 @@ int do_macos_iokit(int update_every, usec_t dt) { , "disk.inodes" , title , "inodes" - , "macos" + , "macos.plugin" , "iokit" , 2024 , update_every @@ -543,7 +543,7 @@ int do_macos_iokit(int update_every, usec_t dt) { , "net.net" , "Bandwidth" , "kilobits/s" - , "macos" + , "macos.plugin" , "iokit" , 7000 , update_every @@ -571,7 +571,7 @@ int do_macos_iokit(int update_every, usec_t dt) { , "net.packets" , "Packets" , "packets/s" - , "macos" + , "macos.plugin" , "iokit" , 7001 , update_every @@ -604,7 +604,7 @@ int do_macos_iokit(int update_every, usec_t dt) { , "net.errors" , "Interface Errors" , "errors/s" - , "macos" + , "macos.plugin" , "iokit" , 7002 , update_every @@ -633,7 +633,7 @@ int do_macos_iokit(int update_every, usec_t dt) { , "net.drops" , "Interface Drops" , "drops/s" - , "macos" + , "macos.plugin" , "iokit" , 7003 , update_every @@ -660,7 +660,7 @@ int do_macos_iokit(int update_every, usec_t dt) { , "net.events" , "Network Interface Events" , "events/s" - , "macos" + , "macos.plugin" , "iokit" , 7006 , update_every diff --git a/collectors/macos.plugin/macos_mach_smi.c b/collectors/macos.plugin/macos_mach_smi.c index 973b90a20..f2c4623c9 100644 --- a/collectors/macos.plugin/macos_mach_smi.c +++ b/collectors/macos.plugin/macos_mach_smi.c @@ -65,7 +65,7 @@ int do_macos_mach_smi(int update_every, usec_t dt) { , "system.cpu" , "Total CPU utilization" , "percentage" - , "macos" + , "macos.plugin" , "mach_smi" , 100 , update_every @@ -119,7 +119,7 @@ int do_macos_mach_smi(int update_every, usec_t dt) { , NULL , "System RAM" , "MiB" - , "macos" + , "macos.plugin" , "mach_smi" , 200 , update_every @@ -166,7 +166,7 @@ int do_macos_mach_smi(int update_every, usec_t dt) { , NULL , "Swap I/O" , "KiB/s" - , "macos" + , "macos.plugin" , "mach_smi" , 250 , update_every @@ -197,7 +197,7 @@ int do_macos_mach_smi(int update_every, usec_t dt) { , NULL , "Memory Page Faults" , "faults/s" - , "macos" + , "macos.plugin" , "mach_smi" , NETDATA_CHART_PRIO_MEM_SYSTEM_PGFAULTS , update_every diff --git a/collectors/macos.plugin/macos_sysctl.c b/collectors/macos.plugin/macos_sysctl.c index 84f754185..b744ebbcc 100644 --- a/collectors/macos.plugin/macos_sysctl.c +++ b/collectors/macos.plugin/macos_sysctl.c @@ -240,7 +240,7 @@ int do_macos_sysctl(int update_every, usec_t dt) { , NULL , "System Load Average" , "load" - , "macos" + , "macos.plugin" , "sysctl" , 100 , (update_every < MIN_LOADAVG_UPDATE_EVERY) ? MIN_LOADAVG_UPDATE_EVERY : update_every @@ -280,7 +280,7 @@ int do_macos_sysctl(int update_every, usec_t dt) { , NULL , "System Swap" , "MiB" - , "macos" + , "macos.plugin" , "sysctl" , 201 , update_every @@ -342,7 +342,7 @@ int do_macos_sysctl(int update_every, usec_t dt) { , NULL , "IPv4 Bandwidth" , "kilobits/s" - , "macos" + , "macos.plugin" , "sysctl" , 500 , update_every @@ -392,7 +392,7 @@ int do_macos_sysctl(int update_every, usec_t dt) { , NULL , "IPv4 TCP Packets" , "packets/s" - , "macos" + , "macos.plugin" , "sysctl" , 2600 , update_every @@ -422,7 +422,7 @@ int do_macos_sysctl(int update_every, usec_t dt) { , NULL , "IPv4 TCP Errors" , "packets/s" - , "macos" + , "macos.plugin" , "sysctl" , 2700 , update_every @@ -455,7 +455,7 @@ int do_macos_sysctl(int update_every, usec_t dt) { , NULL , "IPv4 TCP Handshake Issues" , "events/s" - , "macos" + , "macos.plugin" , "sysctl" , 2900 , update_every @@ -496,7 +496,7 @@ int do_macos_sysctl(int update_every, usec_t dt) { , NULL , "TCP Connection Aborts" , "connections/s" - , "macos" + , "macos.plugin" , "sysctl" , 3010 , update_every @@ -533,7 +533,7 @@ int do_macos_sysctl(int update_every, usec_t dt) { , NULL , "TCP Out-Of-Order Queue" , "packets/s" - , "macos" + , "macos.plugin" , "sysctl" , 3050 , update_every @@ -567,7 +567,7 @@ int do_macos_sysctl(int update_every, usec_t dt) { , NULL , "TCP SYN Cookies" , "packets/s" - , "macos" + , "macos.plugin" , "sysctl" , 3100 , update_every @@ -605,7 +605,7 @@ int do_macos_sysctl(int update_every, usec_t dt) { , NULL , "IPv4 ECN Statistics" , "packets/s" - , "macos" + , "macos.plugin" , "sysctl" , 8700 , update_every @@ -648,7 +648,7 @@ int do_macos_sysctl(int update_every, usec_t dt) { , NULL , "IPv4 UDP Packets" , "packets/s" - , "macos" + , "macos.plugin" , "sysctl" , 2601 , update_every @@ -678,7 +678,7 @@ int do_macos_sysctl(int update_every, usec_t dt) { , NULL , "IPv4 UDP Errors" , "events/s" - , "macos" + , "macos.plugin" , "sysctl" , 2701 , update_every @@ -739,7 +739,7 @@ int do_macos_sysctl(int update_every, usec_t dt) { , NULL , "IPv4 ICMP Packets" , "packets/s" - , "macos" + , "macos.plugin" , "sysctl" , 2602 , update_every @@ -768,7 +768,7 @@ int do_macos_sysctl(int update_every, usec_t dt) { , NULL , "IPv4 ICMP Errors" , "packets/s" - , "macos" + , "macos.plugin" , "sysctl" , 2603 , update_every @@ -801,7 +801,7 @@ int do_macos_sysctl(int update_every, usec_t dt) { , NULL , "IPv4 ICMP Messages" , "packets/s" - , "macos" + , "macos.plugin" , "sysctl" , 2604 , update_every @@ -850,7 +850,7 @@ int do_macos_sysctl(int update_every, usec_t dt) { , NULL , "IPv4 Packets" , "packets/s" - , "macos" + , "macos.plugin" , "sysctl" , 3000 , update_every @@ -884,7 +884,7 @@ int do_macos_sysctl(int update_every, usec_t dt) { , NULL , "IPv4 Fragments Sent" , "packets/s" - , "macos" + , "macos.plugin" , "sysctl" , 3010 , update_every @@ -917,7 +917,7 @@ int do_macos_sysctl(int update_every, usec_t dt) { , NULL , "IPv4 Fragments Reassembly" , "packets/s" - , "macos" + , "macos.plugin" , "sysctl" , 3011 , update_every @@ -950,7 +950,7 @@ int do_macos_sysctl(int update_every, usec_t dt) { , NULL , "IPv4 Errors" , "packets/s" - , "macos" + , "macos.plugin" , "sysctl" , 3002 , update_every @@ -1010,7 +1010,7 @@ int do_macos_sysctl(int update_every, usec_t dt) { , NULL , "IPv6 Packets" , "packets/s" - , "macos" + , "macos.plugin" , "sysctl" , 3000 , update_every @@ -1049,7 +1049,7 @@ int do_macos_sysctl(int update_every, usec_t dt) { , NULL , "IPv6 Fragments Sent" , "packets/s" - , "macos" + , "macos.plugin" , "sysctl" , 3010 , update_every @@ -1088,7 +1088,7 @@ int do_macos_sysctl(int update_every, usec_t dt) { , NULL , "IPv6 Fragments Reassembly" , "packets/s" - , "macos" + , "macos.plugin" , "sysctl" , 3011 , update_every @@ -1134,7 +1134,7 @@ int do_macos_sysctl(int update_every, usec_t dt) { , NULL , "IPv6 Errors" , "packets/s" - , "macos" + , "macos.plugin" , "sysctl" , 3002 , update_every @@ -1196,7 +1196,7 @@ int do_macos_sysctl(int update_every, usec_t dt) { , NULL , "IPv6 ICMP Messages" , "messages/s" - , "macos" + , "macos.plugin" , "sysctl" , 10000 , update_every @@ -1230,7 +1230,7 @@ int do_macos_sysctl(int update_every, usec_t dt) { , NULL , "IPv6 ICMP Redirects" , "redirects/s" - , "macos" + , "macos.plugin" , "sysctl" , 10050 , update_every @@ -1273,7 +1273,7 @@ int do_macos_sysctl(int update_every, usec_t dt) { , NULL , "IPv6 ICMP Errors" , "errors/s" - , "macos" + , "macos.plugin" , "sysctl" , 10100 , update_every @@ -1326,7 +1326,7 @@ int do_macos_sysctl(int update_every, usec_t dt) { , NULL , "IPv6 ICMP Echo" , "messages/s" - , "macos" + , "macos.plugin" , "sysctl" , 10200 , update_every @@ -1366,7 +1366,7 @@ int do_macos_sysctl(int update_every, usec_t dt) { , NULL , "IPv6 Router Messages" , "messages/s" - , "macos" + , "macos.plugin" , "sysctl" , 10400 , update_every @@ -1406,7 +1406,7 @@ int do_macos_sysctl(int update_every, usec_t dt) { , NULL , "IPv6 Neighbor Messages" , "messages/s" - , "macos" + , "macos.plugin" , "sysctl" , 10500 , update_every @@ -1452,7 +1452,7 @@ int do_macos_sysctl(int update_every, usec_t dt) { , NULL , "IPv6 ICMP Types" , "messages/s" - , "macos" + , "macos.plugin" , "sysctl" , 10700 , update_every @@ -1506,7 +1506,7 @@ int do_macos_sysctl(int update_every, usec_t dt) { , NULL , "System Uptime" , "seconds" - , "macos" + , "macos.plugin" , "sysctl" , 1000 , update_every diff --git a/collectors/macos.plugin/plugin_macos.c b/collectors/macos.plugin/plugin_macos.c index 1a64ed81c..4566c09ee 100644 --- a/collectors/macos.plugin/plugin_macos.c +++ b/collectors/macos.plugin/plugin_macos.c @@ -92,7 +92,7 @@ void *macos_main(void *ptr) NULL, "Netdata macOS plugin CPU usage", "milliseconds/s", - "macos", + "macos.plugin", "stats", 132000, localhost->rrd_update_every, @@ -124,7 +124,7 @@ void *macos_main(void *ptr) NULL, "Netdata macOS plugin modules durations", "milliseconds/run", - "macos", + "macos.plugin", "stats", 132001, localhost->rrd_update_every, diff --git a/collectors/macos.plugin/plugin_macos.h b/collectors/macos.plugin/plugin_macos.h index 0815c59c3..a66ec0852 100644 --- a/collectors/macos.plugin/plugin_macos.h +++ b/collectors/macos.plugin/plugin_macos.h @@ -4,7 +4,7 @@ #ifndef NETDATA_PLUGIN_MACOS_H #define NETDATA_PLUGIN_MACOS_H 1 -#include "../../daemon/common.h" +#include "daemon/common.h" #if (TARGET_OS == OS_MACOS) diff --git a/collectors/nfacct.plugin/plugin_nfacct.c b/collectors/nfacct.plugin/plugin_nfacct.c index acdd0586d..7876c231f 100644 --- a/collectors/nfacct.plugin/plugin_nfacct.c +++ b/collectors/nfacct.plugin/plugin_nfacct.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-3.0-or-later -#include "../../libnetdata/libnetdata.h" +#include "libnetdata/libnetdata.h" #include #include #include diff --git a/collectors/node.d.plugin/named/named.node.js b/collectors/node.d.plugin/named/named.node.js index 04cded8bd..668a044c7 100644 --- a/collectors/node.d.plugin/named/named.node.js +++ b/collectors/node.d.plugin/named/named.node.js @@ -233,7 +233,7 @@ var named = { x = keys[len]; // we maintain an index of the values found - // mapping them to objects splitted + // mapping them to objects split look = named.lookups.nsstats[x]; if(typeof look === 'undefined') { @@ -418,7 +418,7 @@ var named = { var y = ykeys[ylen]; // we maintain an index of the values found - // mapping them to objects splitted + // mapping them to objects split look = named.lookups.resolver_stats[y]; if(typeof look === 'undefined') { diff --git a/collectors/perf.plugin/perf_plugin.c b/collectors/perf.plugin/perf_plugin.c index 135e77984..151ba9078 100644 --- a/collectors/perf.plugin/perf_plugin.c +++ b/collectors/perf.plugin/perf_plugin.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-3.0-or-later -#include "../../libnetdata/libnetdata.h" +#include "libnetdata/libnetdata.h" #include @@ -9,10 +9,11 @@ // Hardware counters #define NETDATA_CHART_PRIO_PERF_CPU_CYCLES 8800 #define NETDATA_CHART_PRIO_PERF_INSTRUCTIONS 8801 -#define NETDATA_CHART_PRIO_PERF_BRANCH_INSTRUCTIONS 8802 -#define NETDATA_CHART_PRIO_PERF_CACHE 8803 -#define NETDATA_CHART_PRIO_PERF_BUS_CYCLES 8804 -#define NETDATA_CHART_PRIO_PERF_FRONT_BACK_CYCLES 8805 +#define NETDATA_CHART_PRIO_PERF_IPC 8802 +#define NETDATA_CHART_PRIO_PERF_BRANCH_INSTRUCTIONS 8803 +#define NETDATA_CHART_PRIO_PERF_CACHE 8804 +#define NETDATA_CHART_PRIO_PERF_BUS_CYCLES 8805 +#define NETDATA_CHART_PRIO_PERF_FRONT_BACK_CYCLES 8806 // Software counters #define NETDATA_CHART_PRIO_PERF_MIGRATIONS 8810 @@ -436,6 +437,7 @@ static void perf_send_metrics() { static int // Hardware counters cpu_cycles_chart_generated = 0, instructions_chart_generated = 0, + ipc_chart_generated = 0, branch_chart_generated = 0, cache_chart_generated = 0, bus_cycles_chart_generated = 0, @@ -461,7 +463,7 @@ static void perf_send_metrics() { if(unlikely(!cpu_cycles_chart_generated)) { cpu_cycles_chart_generated = 1; - printf("CHART %s.%s '' 'CPU cycles' 'cycles/s' %s '' line %d %d %s\n" + printf("CHART %s.%s '' 'CPU cycles' 'cycles/s' %s '' line %d %d '' %s\n" , RRD_TYPE_PERF , "cpu_cycles" , RRD_FAMILY_HW @@ -501,7 +503,7 @@ static void perf_send_metrics() { if(unlikely(!instructions_chart_generated)) { instructions_chart_generated = 1; - printf("CHART %s.%s '' 'Instructions' 'instructions/s' %s '' line %d %d %s\n" + printf("CHART %s.%s '' 'Instructions' 'instructions/s' %s '' line %d %d '' %s\n" , RRD_TYPE_PERF , "instructions" , RRD_FAMILY_HW @@ -527,11 +529,43 @@ static void perf_send_metrics() { // ------------------------------------------------------------------------ + if(likely(perf_events[EV_ID_INSTRUCTIONS].updated) && likely(perf_events[EV_ID_CPU_CYCLES].updated)) { + if(unlikely(!ipc_chart_generated)) { + ipc_chart_generated = 1; + + printf("CHART %s.%s '' '%s' 'instructions/cycle' %s '' line %d %d '' %s\n" + , RRD_TYPE_PERF + , "instructions_per_cycle" + , "Instructions per Cycle(IPC)" + , RRD_FAMILY_HW + , NETDATA_CHART_PRIO_PERF_IPC + , update_every + , PLUGIN_PERF_NAME + ); + printf("DIMENSION %s '' absolute 1 100\n", "ipc"); + } + + printf("BEGIN %s.%s\n" + , RRD_TYPE_PERF + , "instructions_per_cycle" + ); + + calculated_number result = ((calculated_number)perf_events[EV_ID_INSTRUCTIONS].value / + (calculated_number)perf_events[EV_ID_CPU_CYCLES].value) * 100.0; + printf("SET %s = %lld\n" + , "ipc" + , (collected_number) result + ); + printf("END\n"); + } + + // ------------------------------------------------------------------------ + if(likely(perf_events[EV_ID_BRANCH_INSTRUCTIONS].updated || perf_events[EV_ID_BRANCH_MISSES].updated)) { if(unlikely(!branch_chart_generated)) { branch_chart_generated = 1; - printf("CHART %s.%s '' 'Branch instructions' 'instructions/s' %s '' line %d %d %s\n" + printf("CHART %s.%s '' 'Branch instructions' 'instructions/s' %s '' line %d %d '' %s\n" , RRD_TYPE_PERF , "branch_instructions" , RRD_FAMILY_HW @@ -571,7 +605,7 @@ static void perf_send_metrics() { if(unlikely(!cache_chart_generated)) { cache_chart_generated = 1; - printf("CHART %s.%s '' 'Cache operations' 'operations/s' %s '' line %d %d %s\n" + printf("CHART %s.%s '' 'Cache operations' 'operations/s' %s '' line %d %d '' %s\n" , RRD_TYPE_PERF , "cache" , RRD_FAMILY_HW @@ -611,7 +645,7 @@ static void perf_send_metrics() { if(unlikely(!bus_cycles_chart_generated)) { bus_cycles_chart_generated = 1; - printf("CHART %s.%s '' 'Bus cycles' 'cycles/s' %s '' line %d %d %s\n" + printf("CHART %s.%s '' 'Bus cycles' 'cycles/s' %s '' line %d %d '' %s\n" , RRD_TYPE_PERF , "bus_cycles" , RRD_FAMILY_HW @@ -641,7 +675,7 @@ static void perf_send_metrics() { if(unlikely(!stalled_cycles_chart_generated)) { stalled_cycles_chart_generated = 1; - printf("CHART %s.%s '' 'Stalled frontend and backend cycles' 'cycles/s' %s '' line %d %d %s\n" + printf("CHART %s.%s '' 'Stalled frontend and backend cycles' 'cycles/s' %s '' line %d %d '' %s\n" , RRD_TYPE_PERF , "stalled_cycles" , RRD_FAMILY_HW @@ -681,7 +715,7 @@ static void perf_send_metrics() { if(unlikely(!migrations_chart_generated)) { migrations_chart_generated = 1; - printf("CHART %s.%s '' 'CPU migrations' 'migrations' %s '' line %d %d %s\n" + printf("CHART %s.%s '' 'CPU migrations' 'migrations' %s '' line %d %d '' %s\n" , RRD_TYPE_PERF , "migrations" , RRD_FAMILY_SW @@ -711,7 +745,7 @@ static void perf_send_metrics() { if(unlikely(!alignment_chart_generated)) { alignment_chart_generated = 1; - printf("CHART %s.%s '' 'Alignment faults' 'faults' %s '' line %d %d %s\n" + printf("CHART %s.%s '' 'Alignment faults' 'faults' %s '' line %d %d '' %s\n" , RRD_TYPE_PERF , "alignment_faults" , RRD_FAMILY_SW @@ -741,7 +775,7 @@ static void perf_send_metrics() { if(unlikely(!emulation_chart_generated)) { emulation_chart_generated = 1; - printf("CHART %s.%s '' 'Emulation faults' 'faults' %s '' line %d %d %s\n" + printf("CHART %s.%s '' 'Emulation faults' 'faults' %s '' line %d %d '' %s\n" , RRD_TYPE_PERF , "emulation_faults" , RRD_FAMILY_SW @@ -772,7 +806,7 @@ static void perf_send_metrics() { if(unlikely(!L1D_chart_generated)) { L1D_chart_generated = 1; - printf("CHART %s.%s '' 'L1D cache operations' 'events/s' %s '' line %d %d %s\n" + printf("CHART %s.%s '' 'L1D cache operations' 'events/s' %s '' line %d %d '' %s\n" , RRD_TYPE_PERF , "l1d_cache" , RRD_FAMILY_CACHE @@ -828,7 +862,7 @@ static void perf_send_metrics() { if(unlikely(!L1D_prefetch_chart_generated)) { L1D_prefetch_chart_generated = 1; - printf("CHART %s.%s '' 'L1D prefetch cache operations' 'prefetches/s' %s '' line %d %d %s\n" + printf("CHART %s.%s '' 'L1D prefetch cache operations' 'prefetches/s' %s '' line %d %d '' %s\n" , RRD_TYPE_PERF , "l1d_cache_prefetch" , RRD_FAMILY_CACHE @@ -858,7 +892,7 @@ static void perf_send_metrics() { if(unlikely(!L1I_chart_generated)) { L1I_chart_generated = 1; - printf("CHART %s.%s '' 'L1I cache operations' 'events/s' %s '' line %d %d %s\n" + printf("CHART %s.%s '' 'L1I cache operations' 'events/s' %s '' line %d %d '' %s\n" , RRD_TYPE_PERF , "l1i_cache" , RRD_FAMILY_CACHE @@ -899,7 +933,7 @@ static void perf_send_metrics() { if(unlikely(!LL_chart_generated)) { LL_chart_generated = 1; - printf("CHART %s.%s '' 'LL cache operations' 'events/s' %s '' line %d %d %s\n" + printf("CHART %s.%s '' 'LL cache operations' 'events/s' %s '' line %d %d '' %s\n" , RRD_TYPE_PERF , "ll_cache" , RRD_FAMILY_CACHE @@ -956,7 +990,7 @@ static void perf_send_metrics() { if(unlikely(!DTLB_chart_generated)) { DTLB_chart_generated = 1; - printf("CHART %s.%s '' 'DTLB cache operations' 'events/s' %s '' line %d %d %s\n" + printf("CHART %s.%s '' 'DTLB cache operations' 'events/s' %s '' line %d %d '' %s\n" , RRD_TYPE_PERF , "dtlb_cache" , RRD_FAMILY_CACHE @@ -1012,7 +1046,7 @@ static void perf_send_metrics() { if(unlikely(!ITLB_chart_generated)) { ITLB_chart_generated = 1; - printf("CHART %s.%s '' 'ITLB cache operations' 'events/s' %s '' line %d %d %s\n" + printf("CHART %s.%s '' 'ITLB cache operations' 'events/s' %s '' line %d %d '' %s\n" , RRD_TYPE_PERF , "itlb_cache" , RRD_FAMILY_CACHE @@ -1052,7 +1086,7 @@ static void perf_send_metrics() { if(unlikely(!PBU_chart_generated)) { PBU_chart_generated = 1; - printf("CHART %s.%s '' 'PBU cache operations' 'events/s' %s '' line %d %d %s\n" + printf("CHART %s.%s '' 'PBU cache operations' 'events/s' %s '' line %d %d '' %s\n" , RRD_TYPE_PERF , "pbu_cache" , RRD_FAMILY_CACHE diff --git a/collectors/plugins.d/plugins_d.h b/collectors/plugins.d/plugins_d.h index fd99b3584..b9e30e12b 100644 --- a/collectors/plugins.d/plugins_d.h +++ b/collectors/plugins.d/plugins_d.h @@ -3,7 +3,7 @@ #ifndef NETDATA_PLUGINS_D_H #define NETDATA_PLUGINS_D_H 1 -#include "../../daemon/common.h" +#include "daemon/common.h" #define NETDATA_PLUGIN_HOOK_PLUGINSD \ { \ diff --git a/collectors/plugins.d/pluginsd_parser.c b/collectors/plugins.d/pluginsd_parser.c index 2d0788d80..c0dcedb67 100644 --- a/collectors/plugins.d/pluginsd_parser.c +++ b/collectors/plugins.d/pluginsd_parser.c @@ -152,6 +152,24 @@ PARSER_RC pluginsd_label_action(void *user, char *key, char *value, LABEL_SOURCE return PARSER_RC_OK; } +PARSER_RC pluginsd_clabel_action(void *user, char *key, char *value, LABEL_SOURCE source) +{ + ((PARSER_USER_OBJECT *) user)->chart_labels = add_label_to_list(((PARSER_USER_OBJECT *) user)->chart_labels, key, value, source); + + return PARSER_RC_OK; +} + +PARSER_RC pluginsd_clabel_commit_action(void *user, RRDHOST *host, struct label *new_labels) +{ + RRDSET *st = ((PARSER_USER_OBJECT *)user)->st; + if (unlikely(!st)) { + error("requested CLABEL_COMMIT on host '%s', without a BEGIN, ignoring it.", host->hostname); + return PARSER_RC_OK; + } + + rrdset_update_labels(st, new_labels); + return PARSER_RC_OK; +} PARSER_RC pluginsd_overwrite_action(void *user, RRDHOST *host, struct label *new_labels) { @@ -560,6 +578,38 @@ PARSER_RC pluginsd_label(char **words, void *user, PLUGINSD_ACTION *plugins_act return PARSER_RC_OK; } +PARSER_RC pluginsd_clabel(char **words, void *user, PLUGINSD_ACTION *plugins_action) +{ + if (!words[1] || !words[2] || !words[3]) { + error("Ignoring malformed or empty CHART LABEL command."); + return PARSER_RC_OK; + } + + if (plugins_action->clabel_action) { + PARSER_RC rc = plugins_action->clabel_action(user, words[1], words[2], strtol(words[3], NULL, 10)); + return rc; + } + + return PARSER_RC_OK; +} + +PARSER_RC pluginsd_clabel_commit(char **words, void *user, PLUGINSD_ACTION *plugins_action) +{ + UNUSED(words); + + RRDHOST *host = ((PARSER_USER_OBJECT *) user)->host; + debug(D_PLUGINSD, "requested to commit chart labels"); + + struct label *chart_labels = ((PARSER_USER_OBJECT *)user)->chart_labels; + ((PARSER_USER_OBJECT *)user)->chart_labels = NULL; + + if (plugins_action->clabel_commit_action) { + return plugins_action->clabel_commit_action(user, host, chart_labels); + } + + return PARSER_RC_OK; +} + PARSER_RC pluginsd_overwrite(char **words, void *user, PLUGINSD_ACTION *plugins_action) { UNUSED(words); diff --git a/collectors/plugins.d/pluginsd_parser.h b/collectors/plugins.d/pluginsd_parser.h index 61e9c9bab..fb4a45b7a 100644 --- a/collectors/plugins.d/pluginsd_parser.h +++ b/collectors/plugins.d/pluginsd_parser.h @@ -3,7 +3,7 @@ #ifndef NETDATA_PLUGINSD_PARSER_H #define NETDATA_PLUGINSD_PARSER_H -#include "../../parser/parser.h" +#include "parser/parser.h" typedef struct parser_user_object { @@ -14,6 +14,7 @@ typedef struct parser_user_object { struct plugind *cd; int trust_durations; struct label *new_labels; + struct label *chart_labels; size_t count; int enabled; uint8_t st_exists; @@ -35,6 +36,8 @@ extern PARSER_RC pluginsd_dimension_action(void *user, RRDSET *st, char *id, cha long multiplier, long divisor, char *options, RRD_ALGORITHM algorithm_type); extern PARSER_RC pluginsd_label_action(void *user, char *key, char *value, LABEL_SOURCE source); extern PARSER_RC pluginsd_overwrite_action(void *user, RRDHOST *host, struct label *new_labels); +extern PARSER_RC pluginsd_clabel_commit_action(void *user, RRDHOST *host, struct label *new_labels); +extern PARSER_RC pluginsd_clabel_action(void *user, char *key, char *value, LABEL_SOURCE source); #endif //NETDATA_PLUGINSD_PARSER_H diff --git a/collectors/proc.plugin/README.md b/collectors/proc.plugin/README.md index 7fff1ec0a..a9ce2dfa5 100644 --- a/collectors/proc.plugin/README.md +++ b/collectors/proc.plugin/README.md @@ -553,7 +553,7 @@ Each port will have its counters metrics monitored, grouped in the following cha - **Errors Statistics** Many errors counters are provided, presenting statistics for: - - Packets: malformated, sent/received discarded by card/switch, missing ressource + - Packets: malformed, sent/received discarded by card/switch, missing resource - Link: downed, recovered, integrity error, minor error - Other events: Tick Wait to send, buffer overrun diff --git a/collectors/proc.plugin/plugin_proc.h b/collectors/proc.plugin/plugin_proc.h index b0d60cd86..18714b548 100644 --- a/collectors/proc.plugin/plugin_proc.h +++ b/collectors/proc.plugin/plugin_proc.h @@ -3,7 +3,7 @@ #ifndef NETDATA_PLUGIN_PROC_H #define NETDATA_PLUGIN_PROC_H 1 -#include "../../daemon/common.h" +#include "daemon/common.h" #if (TARGET_OS == OS_LINUX) diff --git a/collectors/proc.plugin/proc_interrupts.c b/collectors/proc.plugin/proc_interrupts.c index 73b117179..2db980a03 100644 --- a/collectors/proc.plugin/proc_interrupts.c +++ b/collectors/proc.plugin/proc_interrupts.c @@ -65,7 +65,7 @@ int do_proc_interrupts(int update_every, usec_t dt) { if(unlikely(!ff)) { char filename[FILENAME_MAX + 1]; snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/interrupts"); - ff = procfile_open(config_get(CONFIG_SECTION_PLUGIN_PROC_INTERRUPTS, "filename to monitor", filename), " \t", PROCFILE_FLAG_DEFAULT); + ff = procfile_open(config_get(CONFIG_SECTION_PLUGIN_PROC_INTERRUPTS, "filename to monitor", filename), " \t:", PROCFILE_FLAG_DEFAULT); } if(unlikely(!ff)) return 1; diff --git a/collectors/proc.plugin/proc_mdstat.c b/collectors/proc.plugin/proc_mdstat.c index 46f0134e6..bdc298d6b 100644 --- a/collectors/proc.plugin/proc_mdstat.c +++ b/collectors/proc.plugin/proc_mdstat.c @@ -560,8 +560,8 @@ int do_proc_mdstat(int update_every, usec_t dt) id, NULL, family, - "md.rate", - "Approximate Time Unit Finish", + "md.expected_time_until_operation_finish", + "Approximate Time Until Finish", "seconds", PLUGIN_PROC_NAME, PLUGIN_PROC_MODULE_MDSTAT_NAME, @@ -591,7 +591,7 @@ int do_proc_mdstat(int update_every, usec_t dt) id, NULL, family, - "md.rate", + "md.operation_speed", "Operation Speed", "KiB/s", PLUGIN_PROC_NAME, diff --git a/collectors/proc.plugin/proc_net_dev.c b/collectors/proc.plugin/proc_net_dev.c index bbf8a590a..e06da69aa 100644 --- a/collectors/proc.plugin/proc_net_dev.c +++ b/collectors/proc.plugin/proc_net_dev.c @@ -979,7 +979,7 @@ int do_proc_net_dev(int update_every, usec_t dt) { , NULL , d->chart_family , "net.carrier" - , "Inteface Physical Link State" + , "Interface Physical Link State" , "state" , PLUGIN_PROC_NAME , PLUGIN_PROC_MODULE_NETDEV_NAME diff --git a/collectors/proc.plugin/proc_net_rpc_nfsd.c b/collectors/proc.plugin/proc_net_rpc_nfsd.c index 29ef7a394..48f218e44 100644 --- a/collectors/proc.plugin/proc_net_rpc_nfsd.c +++ b/collectors/proc.plugin/proc_net_rpc_nfsd.c @@ -226,7 +226,7 @@ int do_proc_net_rpc_nfsd(int update_every, usec_t dt) { (void)dt; static procfile *ff = NULL; static int do_rc = -1, do_fh = -1, do_io = -1, do_th = -1, do_ra = -1, do_net = -1, do_rpc = -1, do_proc2 = -1, do_proc3 = -1, do_proc4 = -1, do_proc4ops = -1; - static int ra_warning = 0, th_warning = 0, proc2_warning = 0, proc3_warning = 0, proc4_warning = 0, proc4ops_warning = 0; + static int ra_warning = 0, proc2_warning = 0, proc3_warning = 0, proc4_warning = 0, proc4ops_warning = 0; if(unlikely(!ff)) { char filename[FILENAME_MAX + 1]; @@ -270,9 +270,9 @@ int do_proc_net_rpc_nfsd(int update_every, usec_t dt) { char *type; unsigned long long rc_hits = 0, rc_misses = 0, rc_nocache = 0; - unsigned long long fh_stale = 0, fh_total_lookups = 0, fh_anonymous_lookups = 0, fh_dir_not_in_dcache = 0, fh_non_dir_not_in_dcache = 0; + unsigned long long fh_stale = 0; unsigned long long io_read = 0, io_write = 0; - unsigned long long th_threads = 0, th_fullcnt = 0, th_hist10 = 0, th_hist20 = 0, th_hist30 = 0, th_hist40 = 0, th_hist50 = 0, th_hist60 = 0, th_hist70 = 0, th_hist80 = 0, th_hist90 = 0, th_hist100 = 0; + unsigned long long th_threads = 0; unsigned long long ra_size = 0, ra_hist10 = 0, ra_hist20 = 0, ra_hist30 = 0, ra_hist40 = 0, ra_hist50 = 0, ra_hist60 = 0, ra_hist70 = 0, ra_hist80 = 0, ra_hist90 = 0, ra_hist100 = 0, ra_none = 0; unsigned long long net_count = 0, net_udp_count = 0, net_tcp_count = 0, net_tcp_connections = 0; unsigned long long rpc_calls = 0, rpc_bad_format = 0, rpc_bad_auth = 0, rpc_bad_client = 0; @@ -304,13 +304,10 @@ int do_proc_net_rpc_nfsd(int update_every, usec_t dt) { } fh_stale = str2ull(procfile_lineword(ff, l, 1)); - fh_total_lookups = str2ull(procfile_lineword(ff, l, 2)); - fh_anonymous_lookups = str2ull(procfile_lineword(ff, l, 3)); - fh_dir_not_in_dcache = str2ull(procfile_lineword(ff, l, 4)); - fh_non_dir_not_in_dcache = str2ull(procfile_lineword(ff, l, 5)); + + // other file handler metrics were never used and are always zero - unsigned long long sum = fh_stale + fh_total_lookups + fh_anonymous_lookups + fh_dir_not_in_dcache + fh_non_dir_not_in_dcache; - if(sum == 0ULL) do_fh = -1; + if(fh_stale == 0ULL) do_fh = -1; else do_fh = 2; } else if(do_io == 1 && strcmp(type, "io") == 0) { @@ -333,29 +330,11 @@ int do_proc_net_rpc_nfsd(int update_every, usec_t dt) { } th_threads = str2ull(procfile_lineword(ff, l, 1)); - th_fullcnt = str2ull(procfile_lineword(ff, l, 2)); - th_hist10 = (unsigned long long)(atof(procfile_lineword(ff, l, 3)) * 1000.0); - th_hist20 = (unsigned long long)(atof(procfile_lineword(ff, l, 4)) * 1000.0); - th_hist30 = (unsigned long long)(atof(procfile_lineword(ff, l, 5)) * 1000.0); - th_hist40 = (unsigned long long)(atof(procfile_lineword(ff, l, 6)) * 1000.0); - th_hist50 = (unsigned long long)(atof(procfile_lineword(ff, l, 7)) * 1000.0); - th_hist60 = (unsigned long long)(atof(procfile_lineword(ff, l, 8)) * 1000.0); - th_hist70 = (unsigned long long)(atof(procfile_lineword(ff, l, 9)) * 1000.0); - th_hist80 = (unsigned long long)(atof(procfile_lineword(ff, l, 10)) * 1000.0); - th_hist90 = (unsigned long long)(atof(procfile_lineword(ff, l, 11)) * 1000.0); - th_hist100 = (unsigned long long)(atof(procfile_lineword(ff, l, 12)) * 1000.0); - - // threads histogram has been disabled on recent kernels - // http://permalink.gmane.org/gmane.linux.nfs/24528 - unsigned long long sum = th_hist10 + th_hist20 + th_hist30 + th_hist40 + th_hist50 + th_hist60 + th_hist70 + th_hist80 + th_hist90 + th_hist100; - if(sum == 0ULL) { - if(!th_warning) { - info("Disabling /proc/net/rpc/nfsd threads histogram. It seems unused on this machine. It will be enabled automatically when found with data in it."); - th_warning = 1; - } - do_th = -1; - } - else do_th = 2; + + // thread histogram has been disabled since 2009 (kernel 2.6.30) + // https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id=8bbfa9f3889b643fc7de82c0c761ef17097f8faf + + do_th = 2; } else if(do_ra == 1 && strcmp(type, "ra") == 0) { if(unlikely(words < 13)) { @@ -363,6 +342,9 @@ int do_proc_net_rpc_nfsd(int update_every, usec_t dt) { continue; } + // readahead cache has been disabled since 2019 (kernel 5.4) + // https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/fs/nfsd/vfs.c?id=501cb1849f865960501d19d54e6a5af306f9b6fd + ra_size = str2ull(procfile_lineword(ff, l, 1)); ra_hist10 = str2ull(procfile_lineword(ff, l, 2)); ra_hist20 = str2ull(procfile_lineword(ff, l, 3)); @@ -408,9 +390,9 @@ int do_proc_net_rpc_nfsd(int update_every, usec_t dt) { } rpc_calls = str2ull(procfile_lineword(ff, l, 1)); - rpc_bad_format = str2ull(procfile_lineword(ff, l, 2)); - rpc_bad_auth = str2ull(procfile_lineword(ff, l, 3)); - rpc_bad_client = str2ull(procfile_lineword(ff, l, 4)); + rpc_bad_format = str2ull(procfile_lineword(ff, l, 3)); + rpc_bad_auth = str2ull(procfile_lineword(ff, l, 4)); + rpc_bad_client = str2ull(procfile_lineword(ff, l, 5)); unsigned long long sum = rpc_calls + rpc_bad_format + rpc_bad_auth + rpc_bad_client; if(sum == 0ULL) do_rpc = -1; @@ -542,11 +524,7 @@ int do_proc_net_rpc_nfsd(int update_every, usec_t dt) { if(do_fh == 2) { static RRDSET *st = NULL; - static RRDDIM *rd_stale = NULL, - *rd_total_lookups = NULL, - *rd_anonymous_lookups = NULL, - *rd_dir_not_in_dcache = NULL, - *rd_non_dir_not_in_dcache = NULL; + static RRDDIM *rd_stale = NULL; if(unlikely(!st)) { st = rrdset_create_localhost( @@ -566,18 +544,10 @@ int do_proc_net_rpc_nfsd(int update_every, usec_t dt) { rrdset_flag_set(st, RRDSET_FLAG_DETAIL); rd_stale = rrddim_add(st, "stale", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - rd_total_lookups = rrddim_add(st, "total_lookups", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_anonymous_lookups = rrddim_add(st, "anonymous_lookups", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_dir_not_in_dcache = rrddim_add(st, "dir_not_in_dcache", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); - rd_non_dir_not_in_dcache = rrddim_add(st, "non_dir_not_in_dcache", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL); } else rrdset_next(st); rrddim_set_by_pointer(st, rd_stale, fh_stale); - rrddim_set_by_pointer(st, rd_total_lookups, fh_total_lookups); - rrddim_set_by_pointer(st, rd_anonymous_lookups, fh_anonymous_lookups); - rrddim_set_by_pointer(st, rd_dir_not_in_dcache, fh_dir_not_in_dcache); - rrddim_set_by_pointer(st, rd_non_dir_not_in_dcache, fh_non_dir_not_in_dcache); rrdset_done(st); } @@ -617,116 +587,32 @@ int do_proc_net_rpc_nfsd(int update_every, usec_t dt) { // -------------------------------------------------------------------- if(do_th == 2) { - { - static RRDSET *st = NULL; - static RRDDIM *rd_threads = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - "nfsd" - , "threads" - , NULL - , "threads" - , NULL - , "NFS Server Threads" - , "threads" - , PLUGIN_PROC_NAME - , PLUGIN_PROC_MODULE_NFSD_NAME - , NETDATA_CHART_PRIO_NFSD_THREADS - , update_every - , RRDSET_TYPE_LINE - ); - - rd_threads = rrddim_add(st, "threads", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_threads, th_threads); - rrdset_done(st); - } + static RRDSET *st = NULL; + static RRDDIM *rd_threads = NULL; - { - static RRDSET *st = NULL; - static RRDDIM *rd_full_count = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - "nfsd" - , "threads_fullcnt" - , NULL - , "threads" - , NULL - , "NFS Server Threads Full Count" - , "events" - , PLUGIN_PROC_NAME - , PLUGIN_PROC_MODULE_NFSD_NAME - , NETDATA_CHART_PRIO_NFSD_THREADS_FULLCNT - , update_every - , RRDSET_TYPE_LINE - ); - - rd_full_count = rrddim_add(st, "full_count", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL); - } - else rrdset_next(st); + if(unlikely(!st)) { + st = rrdset_create_localhost( + "nfsd" + , "threads" + , NULL + , "threads" + , NULL + , "NFS Server Threads" + , "threads" + , PLUGIN_PROC_NAME + , PLUGIN_PROC_MODULE_NFSD_NAME + , NETDATA_CHART_PRIO_NFSD_THREADS + , update_every + , RRDSET_TYPE_LINE + ); - rrddim_set_by_pointer(st, rd_full_count, th_fullcnt); - rrdset_done(st); + rd_threads = rrddim_add(st, "threads", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); } + else rrdset_next(st); + + rrddim_set_by_pointer(st, rd_threads, th_threads); + rrdset_done(st); - { - static RRDSET *st = NULL; - static RRDDIM *rd_th_hist10 = NULL, - *rd_th_hist20 = NULL, - *rd_th_hist30 = NULL, - *rd_th_hist40 = NULL, - *rd_th_hist50 = NULL, - *rd_th_hist60 = NULL, - *rd_th_hist70 = NULL, - *rd_th_hist80 = NULL, - *rd_th_hist90 = NULL, - *rd_th_hist100 = NULL; - - if(unlikely(!st)) { - st = rrdset_create_localhost( - "nfsd" - , "threads_histogram" - , NULL - , "threads" - , NULL - , "NFS Server Threads Usage Histogram" - , "percentage" - , PLUGIN_PROC_NAME - , PLUGIN_PROC_MODULE_NFSD_NAME - , NETDATA_CHART_PRIO_NFSD_THREADS_HISTOGRAM - , update_every - , RRDSET_TYPE_LINE - ); - - rd_th_hist10 = rrddim_add(st, "0%-10%", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); - rd_th_hist20 = rrddim_add(st, "10%-20%", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); - rd_th_hist30 = rrddim_add(st, "20%-30%", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); - rd_th_hist40 = rrddim_add(st, "30%-40%", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); - rd_th_hist50 = rrddim_add(st, "40%-50%", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); - rd_th_hist60 = rrddim_add(st, "50%-60%", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); - rd_th_hist70 = rrddim_add(st, "60%-70%", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); - rd_th_hist80 = rrddim_add(st, "70%-80%", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); - rd_th_hist90 = rrddim_add(st, "80%-90%", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); - rd_th_hist100 = rrddim_add(st, "90%-100%", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(st); - - rrddim_set_by_pointer(st, rd_th_hist10, th_hist10); - rrddim_set_by_pointer(st, rd_th_hist20, th_hist20); - rrddim_set_by_pointer(st, rd_th_hist30, th_hist30); - rrddim_set_by_pointer(st, rd_th_hist40, th_hist40); - rrddim_set_by_pointer(st, rd_th_hist50, th_hist50); - rrddim_set_by_pointer(st, rd_th_hist60, th_hist60); - rrddim_set_by_pointer(st, rd_th_hist70, th_hist70); - rrddim_set_by_pointer(st, rd_th_hist80, th_hist80); - rrddim_set_by_pointer(st, rd_th_hist90, th_hist90); - rrddim_set_by_pointer(st, rd_th_hist100, th_hist100); - rrdset_done(st); - } } // -------------------------------------------------------------------- @@ -978,7 +864,7 @@ int do_proc_net_rpc_nfsd(int update_every, usec_t dt) { "nfsd" , "proc4ops" , NULL - , "nfsv2ops" + , "nfsv4ops" , NULL , "NFS v4 Server Operations" , "operations/s" diff --git a/collectors/proc.plugin/proc_net_stat_synproxy.c b/collectors/proc.plugin/proc_net_stat_synproxy.c index f5030f99c..c74c5374d 100644 --- a/collectors/proc.plugin/proc_net_stat_synproxy.c +++ b/collectors/proc.plugin/proc_net_stat_synproxy.c @@ -10,11 +10,10 @@ int do_proc_net_stat_synproxy(int update_every, usec_t dt) { (void)dt; - static int do_entries = -1, do_cookies = -1, do_syns = -1, do_reopened = -1; + static int do_cookies = -1, do_syns = -1, do_reopened = -1; static procfile *ff = NULL; - if(unlikely(do_entries == -1)) { - do_entries = config_get_boolean_ondemand("plugin:proc:/proc/net/stat/synproxy", "SYNPROXY entries", CONFIG_BOOLEAN_AUTO); + if(unlikely(do_cookies == -1)) { do_cookies = config_get_boolean_ondemand("plugin:proc:/proc/net/stat/synproxy", "SYNPROXY cookies", CONFIG_BOOLEAN_AUTO); do_syns = config_get_boolean_ondemand("plugin:proc:/proc/net/stat/synproxy", "SYNPROXY SYN received", CONFIG_BOOLEAN_AUTO); do_reopened = config_get_boolean_ondemand("plugin:proc:/proc/net/stat/synproxy", "SYNPROXY connections reopened", CONFIG_BOOLEAN_AUTO); @@ -39,7 +38,7 @@ int do_proc_net_stat_synproxy(int update_every, usec_t dt) { return 1; } - unsigned long long entries = 0, syn_received = 0, cookie_invalid = 0, cookie_valid = 0, cookie_retrans = 0, conn_reopened = 0; + unsigned long long syn_received = 0, cookie_invalid = 0, cookie_valid = 0, cookie_retrans = 0, conn_reopened = 0; // synproxy gives its values per CPU for(l = 1; l < lines ;l++) { @@ -47,7 +46,6 @@ int do_proc_net_stat_synproxy(int update_every, usec_t dt) { if(unlikely(words < 6)) continue; - entries += strtoull(procfile_lineword(ff, l, 0), NULL, 16); syn_received += strtoull(procfile_lineword(ff, l, 1), NULL, 16); cookie_invalid += strtoull(procfile_lineword(ff, l, 2), NULL, 16); cookie_valid += strtoull(procfile_lineword(ff, l, 3), NULL, 16); @@ -55,38 +53,7 @@ int do_proc_net_stat_synproxy(int update_every, usec_t dt) { conn_reopened += strtoull(procfile_lineword(ff, l, 5), NULL, 16); } - unsigned long long events = entries + syn_received + cookie_invalid + cookie_valid + cookie_retrans + conn_reopened; - - // -------------------------------------------------------------------- - - if(do_entries == CONFIG_BOOLEAN_YES || (do_entries == CONFIG_BOOLEAN_AUTO && - (events || netdata_zero_metrics_enabled == CONFIG_BOOLEAN_YES))) { - do_entries = CONFIG_BOOLEAN_YES; - - static RRDSET *st = NULL; - if(unlikely(!st)) { - st = rrdset_create_localhost( - RRD_TYPE_NET_STAT_NETFILTER - , RRD_TYPE_NET_STAT_SYNPROXY "_entries" - , NULL - , RRD_TYPE_NET_STAT_SYNPROXY - , NULL - , "SYNPROXY Entries Used" - , "entries" - , PLUGIN_PROC_NAME - , PLUGIN_PROC_MODULE_SYNPROXY_NAME - , NETDATA_CHART_PRIO_SYNPROXY_ENTRIES - , update_every - , RRDSET_TYPE_LINE - ); - - rrddim_add(st, "entries", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE); - } - else rrdset_next(st); - - rrddim_set(st, "entries", entries); - rrdset_done(st); - } + unsigned long long events = syn_received + cookie_invalid + cookie_valid + cookie_retrans + conn_reopened; // -------------------------------------------------------------------- diff --git a/collectors/proc.plugin/proc_pagetypeinfo.c b/collectors/proc.plugin/proc_pagetypeinfo.c index 3ce292227..e1026cf51 100644 --- a/collectors/proc.plugin/proc_pagetypeinfo.c +++ b/collectors/proc.plugin/proc_pagetypeinfo.c @@ -139,7 +139,7 @@ int do_proc_pagetypeinfo(int update_every, usec_t dt) { return 1; } - // 4th line is the "Free pages count per migrate type at order". Just substract these 8 words. + // 4th line is the "Free pages count per migrate type at order". Just subtract these 8 words. pageorders_cnt = procfile_linewords(ff, 3); if (pageorders_cnt < 9) { error("PLUGIN: PROC_PAGETYPEINFO: Unable to parse Line 4 of %s", ff_path); diff --git a/collectors/proc.plugin/proc_spl_kstat_zfs.c b/collectors/proc.plugin/proc_spl_kstat_zfs.c index ce95c2d35..fedc03436 100644 --- a/collectors/proc.plugin/proc_spl_kstat_zfs.c +++ b/collectors/proc.plugin/proc_spl_kstat_zfs.c @@ -6,7 +6,7 @@ #define ZFS_PROC_ARCSTATS "/proc/spl/kstat/zfs/arcstats" #define ZFS_PROC_POOLS "/proc/spl/kstat/zfs" -#define STATE_SIZE 8 +#define STATE_SIZE 9 #define MAX_CHART_ID 256 extern struct arcstats arcstats; diff --git a/collectors/proc.plugin/sys_class_infiniband.c b/collectors/proc.plugin/sys_class_infiniband.c index 69e27f81e..1a75ce13f 100644 --- a/collectors/proc.plugin/sys_class_infiniband.c +++ b/collectors/proc.plugin/sys_class_infiniband.c @@ -37,7 +37,7 @@ GEN(port_rcv_constraint_errors, errors, "Pkts rcvd discarded ", 1, __VA_ARGS__) \ GEN(port_xmit_discards, errors, "Pkts sent discarded", 1, __VA_ARGS__) \ GEN(port_xmit_wait, errors, "Tick Wait to send", 1, __VA_ARGS__) \ - GEN(VL15_dropped, errors, "Pkts missed ressource", 1, __VA_ARGS__) \ + GEN(VL15_dropped, errors, "Pkts missed resource", 1, __VA_ARGS__) \ GEN(excessive_buffer_overrun_errors, errors, "Buffer overrun", 1, __VA_ARGS__) \ GEN(link_downed, errors, "Link Downed", 1, __VA_ARGS__) \ GEN(link_error_recovery, errors, "Link recovered", 1, __VA_ARGS__) \ diff --git a/collectors/proc.plugin/zfs_common.h b/collectors/proc.plugin/zfs_common.h index 148f9e474..9d61de2f3 100644 --- a/collectors/proc.plugin/zfs_common.h +++ b/collectors/proc.plugin/zfs_common.h @@ -3,7 +3,7 @@ #ifndef NETDATA_ZFS_COMMON_H #define NETDATA_ZFS_COMMON_H 1 -#include "../../daemon/common.h" +#include "daemon/common.h" #define ZFS_FAMILY_SIZE "size" #define ZFS_FAMILY_EFFICIENCY "efficiency" diff --git a/collectors/python.d.plugin/anomalies/README.md b/collectors/python.d.plugin/anomalies/README.md index 9d24e8685..c58c858bf 100644 --- a/collectors/python.d.plugin/anomalies/README.md +++ b/collectors/python.d.plugin/anomalies/README.md @@ -82,8 +82,8 @@ The default configuration should look something like this. Here you can see each # JOBS (data collection sources) # Pull data from local Netdata node. -local: - name: 'local' +anomalies: + name: 'Anomalies' # Host to pull data from. host: '127.0.0.1:19999' diff --git a/collectors/python.d.plugin/anomalies/anomalies.chart.py b/collectors/python.d.plugin/anomalies/anomalies.chart.py index 61b51d9c0..8ca3df682 100644 --- a/collectors/python.d.plugin/anomalies/anomalies.chart.py +++ b/collectors/python.d.plugin/anomalies/anomalies.chart.py @@ -188,7 +188,7 @@ class Service(SimpleService): self.custom_model_scalers[model] = MinMaxScaler() def reinitialize(self): - """Reinitialize charts, models and data to a begining state. + """Reinitialize charts, models and data to a beginning state. """ self.charts_init() self.custom_models_init() @@ -385,7 +385,7 @@ class Service(SimpleService): def get_data(self): - # initialize to whats available right now + # initialize to what's available right now if self.reinitialize_at_every_step or len(self.host_charts_dict[self.host]) == 0: self.charts_init() self.custom_models_init() diff --git a/collectors/python.d.plugin/anomalies/anomalies.conf b/collectors/python.d.plugin/anomalies/anomalies.conf index 0dc40ef2c..ef867709a 100644 --- a/collectors/python.d.plugin/anomalies/anomalies.conf +++ b/collectors/python.d.plugin/anomalies/anomalies.conf @@ -31,8 +31,8 @@ # JOBS (data collection sources) # Pull data from local Netdata node. -local: - name: 'local' +anomalies: + name: 'Anomalies' # Host to pull data from. host: '127.0.0.1:19999' diff --git a/collectors/python.d.plugin/changefinder/README.md b/collectors/python.d.plugin/changefinder/README.md index e1c1d4ba4..051639d1e 100644 --- a/collectors/python.d.plugin/changefinder/README.md +++ b/collectors/python.d.plugin/changefinder/README.md @@ -12,8 +12,8 @@ on your Netdata charts and/or dimensions. Instead of this collector just _collecting_ data, it also does some computation on the data it collects to return a changepoint score for each chart or dimension you configure it to work on. This is -an [online](https://en.wikipedia.org/wiki/Online_machine_learning) machine learning algorithim so there is no batch step -to train the model, instead it evolves over time as more data arrives. That makes this particualr algorithim quite cheap +an [online](https://en.wikipedia.org/wiki/Online_machine_learning) machine learning algorithm so there is no batch step +to train the model, instead it evolves over time as more data arrives. That makes this particular algorithm quite cheap to compute at each step of data collection (see the notes section below for more details) and it should scale fairly well to work on lots of charts or hosts (if running on a parent node for example). @@ -28,7 +28,7 @@ Two charts are available: This chart shows the percentile of the score that is output from the ChangeFinder library (it is turned off by default but available with `show_scores: true`). -A high observed score is more likley to be a valid changepoint worth exploring, even more so when multiple charts or +A high observed score is more likely to be a valid changepoint worth exploring, even more so when multiple charts or dimensions have high changepoint scores at the same time or very close together. ### ChangeFinder Flags (`changefinder.flags`) @@ -36,11 +36,11 @@ dimensions have high changepoint scores at the same time or very close together. This chart shows `1` or `0` if the latest score has a percentile value that exceeds the `cf_threshold` threshold. By default, any scores that are in the 99th or above percentile will raise a flag on this chart. -The raw changefinder score itself can be a little noisey and so limiting ourselves to just periods where it surpasses +The raw changefinder score itself can be a little noisy and so limiting ourselves to just periods where it surpasses the 99th percentile can help manage the "[signal to noise ratio](https://en.wikipedia.org/wiki/Signal-to-noise_ratio)" better. -The `cf_threshold` paramater might be one you want to play around with to tune things specifically for the workloads on +The `cf_threshold` parameter might be one you want to play around with to tune things specifically for the workloads on your node and the specific charts you want to monitor. For example, maybe the 95th percentile might work better for you than the 99th percentile. @@ -164,7 +164,7 @@ sudo su -s /bin/bash netdata - It may take an hour or two (depending on your choice of `n_score_samples`) for the collector to 'settle' into it's typical behaviour in terms of the trained models and scores you will see in the normal running of your node. Mainly this is because it can take a while to build up a proper distribution of previous scores in over to convert the raw - score returned by the ChangeFinder algorithim into a percentile based on the most recent `n_score_samples` that have + score returned by the ChangeFinder algorithm into a percentile based on the most recent `n_score_samples` that have already been produced. So when you first turn the collector on, it will have a lot of flags in the beginning and then should 'settle down' once it has built up enough history. This is a typical characteristic of online machine learning approaches which need some initial window of time before they can be useful. diff --git a/collectors/python.d.plugin/elasticsearch/elasticsearch.chart.py b/collectors/python.d.plugin/elasticsearch/elasticsearch.chart.py index dddf50b4c..93614b08c 100644 --- a/collectors/python.d.plugin/elasticsearch/elasticsearch.chart.py +++ b/collectors/python.d.plugin/elasticsearch/elasticsearch.chart.py @@ -506,7 +506,9 @@ CHARTS = { def convert_index_store_size_to_bytes(size): - # can be b, kb, mb, gb + # can be b, kb, mb, gb or None + if size is None: + return -1 if size.endswith('kb'): return round(float(size[:-2]) * 1024) elif size.endswith('mb'): @@ -520,6 +522,12 @@ def convert_index_store_size_to_bytes(size): return -1 +def convert_index_null_value(value): + if value is None: + return -1 + return value + + def convert_index_health(health): if health == 'green': return 0 @@ -634,6 +642,30 @@ class Service(UrlService): # "docs.count": "10", # "docs.deleted": "3", # "store.size": "650b" + # }, + # { + # "status":"open", + # "index":".kibana_3", + # "health":"red", + # "uuid":"umAdNrq6QaOXrmZjAowTNw", + # "store.size":null, + # "pri.store.size":null, + # "docs.count":null, + # "rep":"0", + # "pri":"1", + # "docs.deleted":null + # }, + # { + # "health" : "green", + # "status" : "close", + # "index" : "siem-events-2021.09.12", + # "uuid" : "mTQ-Yl5TS7S3lGoRORE-Pg", + # "pri" : "4", + # "rep" : "0", + # "docs.count" : null, + # "docs.deleted" : null, + # "store.size" : null, + # "pri.store.size" : null # } # ] raw_data = self._get_raw_data(url) @@ -654,10 +686,12 @@ class Service(UrlService): continue v = { - '{0}_index_docs_count'.format(name): idx['docs.count'], '{0}_index_replica'.format(name): idx['rep'], '{0}_index_health'.format(name): convert_index_health(idx['health']), } + docs_count = convert_index_null_value(idx['docs.count']) + if docs_count != -1: + v['{0}_index_docs_count'.format(name)] = idx['docs.count'] size = convert_index_store_size_to_bytes(idx['store.size']) if size != -1: v['{0}_index_store_size'.format(name)] = size diff --git a/collectors/python.d.plugin/go_expvar/go_expvar.chart.py b/collectors/python.d.plugin/go_expvar/go_expvar.chart.py index f9bbdc164..dca010817 100644 --- a/collectors/python.d.plugin/go_expvar/go_expvar.chart.py +++ b/collectors/python.d.plugin/go_expvar/go_expvar.chart.py @@ -237,7 +237,7 @@ class Service(UrlService): gc_pauses = memstats['PauseNs'] try: gc_pause_avg = sum(gc_pauses) / len([x for x in gc_pauses if x > 0]) - # no GC cycles have occured yet + # no GC cycles have occurred yet except ZeroDivisionError: gc_pause_avg = 0 diff --git a/collectors/python.d.plugin/httpcheck/README.md b/collectors/python.d.plugin/httpcheck/README.md index 55aad52f0..59c60f544 100644 --- a/collectors/python.d.plugin/httpcheck/README.md +++ b/collectors/python.d.plugin/httpcheck/README.md @@ -25,7 +25,7 @@ Following charts are drawn per job: ## Configuration -Edit the `python.d/httpcheck.conf` configuration file using `edit-config` from the Netdata [config +Edit the [`python.d/httpcheck.conf`](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/httpcheck/httpcheck.conf) configuration file using `edit-config` from the Netdata [config directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`. ```bash diff --git a/collectors/python.d.plugin/mongodb/mongodb.chart.py b/collectors/python.d.plugin/mongodb/mongodb.chart.py index 2e6fb220a..bec94d3ef 100644 --- a/collectors/python.d.plugin/mongodb/mongodb.chart.py +++ b/collectors/python.d.plugin/mongodb/mongodb.chart.py @@ -250,10 +250,10 @@ CHARTS = { ] }, 'cursors': { - 'options': [None, 'Currently openned cursors, cursors with timeout disabled and timed out cursors', + 'options': [None, 'Currently opened cursors, cursors with timeout disabled and timed out cursors', 'cursors', 'database performance', 'mongodb.cursors', 'stacked'], 'lines': [ - ['cursor_total', 'openned', 'absolute', 1, 1], + ['cursor_total', 'opened', 'absolute', 1, 1], ['noTimeout', None, 'absolute', 1, 1], ['timedOut', None, 'incremental', 1, 1] ] diff --git a/collectors/python.d.plugin/mysql/README.md b/collectors/python.d.plugin/mysql/README.md index d8d3c1d0b..63d2c1e53 100644 --- a/collectors/python.d.plugin/mysql/README.md +++ b/collectors/python.d.plugin/mysql/README.md @@ -17,7 +17,7 @@ To create the `netdata` user, execute the following in the MySQL shell: ```sh create user 'netdata'@'localhost'; -grant usage on *.* to 'netdata'@'localhost'; +grant usage, replication client on *.* to 'netdata'@'localhost'; flush privileges; ``` The `netdata` user will have the ability to connect to the MySQL server on `localhost` without a password. diff --git a/collectors/python.d.plugin/mysql/mysql.chart.py b/collectors/python.d.plugin/mysql/mysql.chart.py index 1737e16b4..e8c03cb00 100644 --- a/collectors/python.d.plugin/mysql/mysql.chart.py +++ b/collectors/python.d.plugin/mysql/mysql.chart.py @@ -398,7 +398,7 @@ CHARTS = { ] }, 'innodb_os_log_fsync_writes': { - 'options': [None, 'InnoDB OS Log Operations', 'operations/s', 'innodb', 'mysql.innodb_os_log', 'line'], + 'options': [None, 'InnoDB OS Log Operations', 'operations/s', 'innodb', 'mysql.innodb_os_log_fsyncs', 'line'], 'lines': [ ['Innodb_os_log_fsyncs', 'fsyncs', 'incremental'], ] @@ -445,7 +445,7 @@ CHARTS = { }, 'innodb_buffer_pool_flush_pages_requests': { 'options': [None, 'InnoDB Buffer Pool Flush Pages Requests', 'requests/s', 'innodb', - 'mysql.innodb_buffer_pool_pages', 'line'], + 'mysql.innodb_buffer_pool_pages_flushed', 'line'], 'lines': [ ['Innodb_buffer_pool_pages_flushed', 'flush pages', 'incremental'], ] diff --git a/collectors/python.d.plugin/postgres/README.md b/collectors/python.d.plugin/postgres/README.md index dc9b18467..0515ec57c 100644 --- a/collectors/python.d.plugin/postgres/README.md +++ b/collectors/python.d.plugin/postgres/README.md @@ -12,6 +12,8 @@ Collects database health and performance metrics. - `python-psycopg2` package. You have to install it manually and make sure that it is available to the `netdata` user, either using `pip`, the package manager of your Linux distribution, or any other method you prefer. +- PostgreSQL v9.4+ + Following charts are drawn: 1. **Database size** MB @@ -68,6 +70,23 @@ Following charts are drawn: - locks +12. **Standby delta** KB + + - sent delta + - write delta + - flush delta + - replay delta + +13. **Standby lag** seconds + + - write lag + - flush lag + - replay lag + +14. **Average number of blocking transactions in db** processes + + - blocking + ## Configuration Edit the `python.d/postgres.conf` configuration file using `edit-config` from the Netdata [config diff --git a/collectors/python.d.plugin/postgres/postgres.chart.py b/collectors/python.d.plugin/postgres/postgres.chart.py index bd28dd9b7..29026a6a3 100644 --- a/collectors/python.d.plugin/postgres/postgres.chart.py +++ b/collectors/python.d.plugin/postgres/postgres.chart.py @@ -45,14 +45,18 @@ QUERY_NAME_INDEX_STATS = 'INDEX_STATS' QUERY_NAME_DATABASE = 'DATABASE' QUERY_NAME_BGWRITER = 'BGWRITER' QUERY_NAME_LOCKS = 'LOCKS' +QUERY_NAME_BLOCKERS = 'BLOCKERS' QUERY_NAME_DATABASES = 'DATABASES' QUERY_NAME_STANDBY = 'STANDBY' QUERY_NAME_REPLICATION_SLOT = 'REPLICATION_SLOT' QUERY_NAME_STANDBY_DELTA = 'STANDBY_DELTA' +QUERY_NAME_STANDBY_LAG = 'STANDBY_LAG' QUERY_NAME_REPSLOT_FILES = 'REPSLOT_FILES' QUERY_NAME_IF_SUPERUSER = 'IF_SUPERUSER' QUERY_NAME_SERVER_VERSION = 'SERVER_VERSION' QUERY_NAME_AUTOVACUUM = 'AUTOVACUUM' +QUERY_NAME_FORCED_AUTOVACUUM = 'FORCED_AUTOVACUUM' +QUERY_NAME_TX_WRAPAROUND = 'TX_WRAPAROUND' QUERY_NAME_DIFF_LSN = 'DIFF_LSN' QUERY_NAME_WAL_WRITES = 'WAL_WRITES' @@ -123,6 +127,9 @@ METRICS = { 'ShareLock', 'RowExclusiveLock' ], + QUERY_NAME_BLOCKERS: [ + 'blocking_pids_avg' + ], QUERY_NAME_AUTOVACUUM: [ 'analyze', 'vacuum_analyze', @@ -130,12 +137,24 @@ METRICS = { 'vacuum_freeze', 'brin_summarize' ], + QUERY_NAME_FORCED_AUTOVACUUM: [ + 'percent_towards_forced_vacuum' + ], + QUERY_NAME_TX_WRAPAROUND: [ + 'oldest_current_xid', + 'percent_towards_wraparound' + ], QUERY_NAME_STANDBY_DELTA: [ 'sent_delta', 'write_delta', 'flush_delta', 'replay_delta' ], + QUERY_NAME_STANDBY_LAG: [ + 'write_lag', + 'flush_lag', + 'replay_lag' + ], QUERY_NAME_REPSLOT_FILES: [ 'replslot_wal_keep', 'replslot_files' @@ -177,7 +196,7 @@ FROM FROM pg_catalog.pg_ls_dir('pg_wal') AS wal(name) WHERE name ~ '^[0-9A-F]{24}$' ORDER BY - (pg_stat_file('pg_wal/'||name)).modification, + (pg_stat_file('pg_wal/'||name, true)).modification, wal.name DESC) sub; """, V96: """ @@ -204,7 +223,7 @@ FROM FROM pg_catalog.pg_ls_dir('pg_xlog') AS wal(name) WHERE name ~ '^[0-9A-F]{24}$' ORDER BY - (pg_stat_file('pg_xlog/'||name)).modification, + (pg_stat_file('pg_xlog/'||name, true)).modification, wal.name DESC) sub; """, } @@ -263,7 +282,7 @@ FROM ( FROM pg_catalog.pg_stat_activity WHERE backend_type IN ('client backend', 'background worker') UNION ALL - SELECT 'r', COUNT(1) + SELECT 'r', COUNT(1) FROM pg_catalog.pg_stat_replication ) as s; """, @@ -277,7 +296,7 @@ FROM ( FROM pg_catalog.pg_stat_activity WHERE query NOT LIKE 'autovacuum: %%' UNION ALL - SELECT 'r', COUNT(1) + SELECT 'r', COUNT(1) FROM pg_catalog.pg_stat_replication ) as s; """, @@ -291,7 +310,7 @@ FROM ( FROM pg_catalog.pg_stat_activity WHERE current_query NOT LIKE 'autovacuum: %%' UNION ALL - SELECT 'r', COUNT(1) + SELECT 'r', COUNT(1) FROM pg_catalog.pg_stat_replication ) as s; """, @@ -386,6 +405,48 @@ ORDER BY datname, mode; """, } +QUERY_BLOCKERS = { + DEFAULT: """ +WITH B AS ( +SELECT DISTINCT + pg_database.datname as database_name, + pg_locks.pid, + cardinality(pg_blocking_pids(pg_locks.pid)) AS blocking_pids +FROM pg_locks +INNER JOIN pg_database ON pg_database.oid = pg_locks.database +WHERE NOT pg_locks.granted) +SELECT database_name, AVG(blocking_pids) AS blocking_pids_avg +FROM B +GROUP BY database_name +""", + V96: """ +WITH B AS ( +SELECT DISTINCT + pg_database.datname as database_name, + blocked_locks.pid AS blocked_pid, + COUNT(blocking_locks.pid) AS blocking_pids +FROM pg_catalog.pg_locks blocked_locks +INNER JOIN pg_database ON pg_database.oid = blocked_locks.database +JOIN pg_catalog.pg_locks blocking_locks + ON blocking_locks.locktype = blocked_locks.locktype + AND blocking_locks.database IS NOT DISTINCT FROM blocked_locks.database + AND blocking_locks.relation IS NOT DISTINCT FROM blocked_locks.relation + AND blocking_locks.page IS NOT DISTINCT FROM blocked_locks.page + AND blocking_locks.tuple IS NOT DISTINCT FROM blocked_locks.tuple + AND blocking_locks.virtualxid IS NOT DISTINCT FROM blocked_locks.virtualxid + AND blocking_locks.transactionid IS NOT DISTINCT FROM blocked_locks.transactionid + AND blocking_locks.classid IS NOT DISTINCT FROM blocked_locks.classid + AND blocking_locks.objid IS NOT DISTINCT FROM blocked_locks.objid + AND blocking_locks.objsubid IS NOT DISTINCT FROM blocked_locks.objsubid + AND blocking_locks.pid != blocked_locks.pid +WHERE NOT blocked_locks.GRANTED +GROUP BY database_name, blocked_pid) +SELECT database_name, AVG(blocking_pids) AS blocking_pids_avg +FROM B +GROUP BY database_name +""" +} + QUERY_DATABASES = { DEFAULT: """ SELECT @@ -394,17 +455,18 @@ FROM pg_stat_database WHERE has_database_privilege( (SELECT current_user), datname, 'connect') - AND NOT datname ~* '^template\d'; + AND NOT datname ~* '^template\d' +ORDER BY datname; """, } QUERY_STANDBY = { DEFAULT: """ SELECT - application_name -FROM pg_stat_replication -WHERE application_name IS NOT NULL -GROUP BY application_name; + COALESCE(prs.slot_name, psr.application_name) application_name +FROM pg_stat_replication psr +LEFT OUTER JOIN pg_replication_slots prs on psr.pid = prs.active_pid +WHERE application_name IS NOT NULL; """, } @@ -418,7 +480,7 @@ FROM pg_replication_slots; QUERY_STANDBY_DELTA = { DEFAULT: """ SELECT - application_name, + COALESCE(prs.slot_name, psr.application_name) application_name, pg_wal_lsn_diff( CASE pg_is_in_recovery() WHEN true THEN pg_last_wal_receive_lsn() @@ -443,12 +505,13 @@ SELECT ELSE pg_current_wal_lsn() END, replay_lsn) AS replay_delta -FROM pg_stat_replication +FROM pg_stat_replication psr +LEFT OUTER JOIN pg_replication_slots prs on psr.pid = prs.active_pid WHERE application_name IS NOT NULL; """, V96: """ SELECT - application_name, + COALESCE(prs.slot_name, psr.application_name) application_name, pg_xlog_location_diff( CASE pg_is_in_recovery() WHEN true THEN pg_last_xlog_receive_location() @@ -473,11 +536,25 @@ SELECT ELSE pg_current_xlog_location() END, replay_location) AS replay_delta -FROM pg_stat_replication +FROM pg_stat_replication psr +LEFT OUTER JOIN pg_replication_slots prs on psr.pid = prs.active_pid WHERE application_name IS NOT NULL; """, } +QUERY_STANDBY_LAG = { + DEFAULT: """ +SELECT + COALESCE(prs.slot_name, psr.application_name) application_name, + COALESCE(EXTRACT(EPOCH FROM write_lag)::bigint, 0) AS write_lag, + COALESCE(EXTRACT(EPOCH FROM flush_lag)::bigint, 0) AS flush_lag, + COALESCE(EXTRACT(EPOCH FROM replay_lag)::bigint, 0) AS replay_lag +FROM pg_stat_replication psr +LEFT OUTER JOIN pg_replication_slots prs on psr.pid = prs.active_pid +WHERE application_name IS NOT NULL; +""" +} + QUERY_REPSLOT_FILES = { DEFAULT: """ WITH wal_size AS ( @@ -500,8 +577,20 @@ FROM slot_type, COALESCE ( floor( - (pg_wal_lsn_diff(pg_current_wal_lsn (),slot.restart_lsn) - - (pg_walfile_name_offset (restart_lsn)).file_offset) / (s.val) + CASE WHEN pg_is_in_recovery() + THEN ( + pg_wal_lsn_diff(pg_last_wal_receive_lsn(), slot.restart_lsn) + -- this is needed to account for whole WAL retention and + -- not only size retention + + (pg_wal_lsn_diff(restart_lsn, '0/0') %% s.val) + ) / s.val + ELSE ( + pg_wal_lsn_diff(pg_current_wal_lsn(), slot.restart_lsn) + -- this is needed to account for whole WAL retention and + -- not only size retention + + (pg_walfile_name_offset(restart_lsn)).file_offset + ) / s.val + END ),0) AS replslot_wal_keep FROM pg_replication_slots slot LEFT JOIN ( @@ -539,8 +628,20 @@ FROM slot_type, COALESCE ( floor( - (pg_wal_lsn_diff(pg_current_wal_lsn (),slot.restart_lsn) - - (pg_walfile_name_offset (restart_lsn)).file_offset) / (s.val) + CASE WHEN pg_is_in_recovery() + THEN ( + pg_wal_lsn_diff(pg_last_wal_receive_lsn(), slot.restart_lsn) + -- this is needed to account for whole WAL retention and + -- not only size retention + + (pg_wal_lsn_diff(restart_lsn, '0/0') %% s.val) + ) / s.val + ELSE ( + pg_wal_lsn_diff(pg_current_wal_lsn(), slot.restart_lsn) + -- this is needed to account for whole WAL retention and + -- not only size retention + + (pg_walfile_name_offset(restart_lsn)).file_offset + ) / s.val + END ),0) AS replslot_wal_keep FROM pg_replication_slots slot LEFT JOIN ( @@ -586,6 +687,43 @@ WHERE query NOT LIKE '%%pg_stat_activity%%'; """, } +QUERY_FORCED_AUTOVACUUM = { + DEFAULT: """ +WITH max_age AS ( + SELECT setting AS autovacuum_freeze_max_age + FROM pg_catalog.pg_settings + WHERE name = 'autovacuum_freeze_max_age' ) +, per_database_stats AS ( + SELECT datname + , m.autovacuum_freeze_max_age::int + , age(d.datfrozenxid) AS oldest_current_xid + FROM pg_catalog.pg_database d + JOIN max_age m ON (true) + WHERE d.datallowconn ) +SELECT max(ROUND(100*(oldest_current_xid/autovacuum_freeze_max_age::float))) AS percent_towards_forced_autovacuum +FROM per_database_stats; +""", +} + +QUERY_TX_WRAPAROUND = { + DEFAULT: """ +WITH max_age AS ( + SELECT 2000000000 as max_old_xid + FROM pg_catalog.pg_settings + WHERE name = 'autovacuum_freeze_max_age' ) +, per_database_stats AS ( + SELECT datname + , m.max_old_xid::int + , age(d.datfrozenxid) AS oldest_current_xid + FROM pg_catalog.pg_database d + JOIN max_age m ON (true) + WHERE d.datallowconn ) +SELECT max(oldest_current_xid) AS oldest_current_xid + , max(ROUND(100*(oldest_current_xid/max_old_xid::float))) AS percent_towards_wraparound +FROM per_database_stats; +""", +} + QUERY_DIFF_LSN = { DEFAULT: """ SELECT @@ -632,6 +770,10 @@ def query_factory(name, version=NO_VERSION): return QUERY_BGWRITER[DEFAULT] elif name == QUERY_NAME_LOCKS: return QUERY_LOCKS[DEFAULT] + elif name == QUERY_NAME_BLOCKERS: + if version < 90600: + return QUERY_BLOCKERS[V96] + return QUERY_BLOCKERS[DEFAULT] elif name == QUERY_NAME_DATABASES: return QUERY_DATABASES[DEFAULT] elif name == QUERY_NAME_STANDBY: @@ -644,6 +786,10 @@ def query_factory(name, version=NO_VERSION): return QUERY_SHOW_VERSION[DEFAULT] elif name == QUERY_NAME_AUTOVACUUM: return QUERY_AUTOVACUUM[DEFAULT] + elif name == QUERY_NAME_FORCED_AUTOVACUUM: + return QUERY_FORCED_AUTOVACUUM[DEFAULT] + elif name == QUERY_NAME_TX_WRAPAROUND: + return QUERY_TX_WRAPAROUND[DEFAULT] elif name == QUERY_NAME_WAL: if version < 100000: return QUERY_WAL[V96] @@ -656,6 +802,8 @@ def query_factory(name, version=NO_VERSION): if version < 100000: return QUERY_STANDBY_DELTA[V96] return QUERY_STANDBY_DELTA[DEFAULT] + elif name == QUERY_NAME_STANDBY_LAG: + return QUERY_STANDBY_LAG[DEFAULT] elif name == QUERY_NAME_REPSLOT_FILES: if version < 110000: return QUERY_REPSLOT_FILES[V10] @@ -676,6 +824,7 @@ ORDER = [ 'db_stat_tuple_write', 'db_stat_transactions', 'db_stat_connections', + 'db_stat_blocking_pids_avg', 'database_size', 'backend_process', 'backend_usage', @@ -695,7 +844,11 @@ ORDER = [ 'stat_bgwriter_maxwritten', 'replication_slot', 'standby_delta', - 'autovacuum' + 'standby_lag', + 'autovacuum', + 'forced_autovacuum', + 'tx_wraparound_oldest_current_xid', + 'tx_wraparound_percent_towards_wraparound' ] CHARTS = { @@ -752,6 +905,13 @@ CHARTS = { ['temp_files', 'files', 'incremental'] ] }, + 'db_stat_blocking_pids_avg': { + 'options': [None, 'Average number of blocking transactions in db', 'processes', 'db statistics', + 'postgres.db_stat_blocking_pids_avg', 'line'], + 'lines': [ + ['blocking_pids_avg', 'blocking', 'absolute'] + ] + }, 'database_size': { 'options': [None, 'Database size', 'MiB', 'database size', 'postgres.db_size', 'stacked'], 'lines': [ @@ -875,6 +1035,24 @@ CHARTS = { ['brin_summarize', 'brin summarize', 'absolute'] ] }, + 'forced_autovacuum': { + 'options': [None, 'Percent towards forced autovacuum', 'percent', 'autovacuum', 'postgres.forced_autovacuum', 'line'], + 'lines': [ + ['percent_towards_forced_autovacuum', 'percent', 'absolute'] + ] + }, + 'tx_wraparound_oldest_current_xid': { + 'options': [None, 'Oldest current XID', 'xid', 'tx_wraparound', 'postgres.tx_wraparound_oldest_current_xid', 'line'], + 'lines': [ + ['oldest_current_xid', 'xid', 'absolute'] + ] + }, + 'tx_wraparound_percent_towards_wraparound': { + 'options': [None, 'Percent towards wraparound', 'percent', 'tx_wraparound', 'postgres.percent_towards_wraparound', 'line'], + 'lines': [ + ['percent_towards_wraparound', 'percent', 'absolute'] + ] + }, 'standby_delta': { 'options': [None, 'Standby delta', 'KiB', 'replication delta', 'postgres.standby_delta', 'line'], 'lines': [ @@ -884,6 +1062,14 @@ CHARTS = { ['replay_delta', 'replay delta', 'absolute', 1, 1024] ] }, + 'standby_lag': { + 'options': [None, 'Standby lag', 'seconds', 'replication lag', 'postgres.standby_lag', 'line'], + 'lines': [ + ['write_lag', 'write lag', 'absolute'], + ['flush_lag', 'flush lag', 'absolute'], + ['replay_lag', 'replay lag', 'absolute'] + ] + }, 'replication_slot': { 'options': [None, 'Replication slot files', 'files', 'replication slot', 'postgres.replication_slot', 'line'], 'lines': [ @@ -1073,6 +1259,7 @@ class Service(SimpleService): self.queries[query_factory(QUERY_NAME_BGWRITER)] = METRICS[QUERY_NAME_BGWRITER] self.queries[query_factory(QUERY_NAME_DIFF_LSN, self.server_version)] = METRICS[QUERY_NAME_WAL_WRITES] self.queries[query_factory(QUERY_NAME_STANDBY_DELTA, self.server_version)] = METRICS[QUERY_NAME_STANDBY_DELTA] + self.queries[query_factory(QUERY_NAME_BLOCKERS, self.server_version)] = METRICS[QUERY_NAME_BLOCKERS] if self.do_index_stats: self.queries[query_factory(QUERY_NAME_INDEX_STATS)] = METRICS[QUERY_NAME_INDEX_STATS] @@ -1092,6 +1279,12 @@ class Service(SimpleService): if self.server_version >= 90400: self.queries[query_factory(QUERY_NAME_AUTOVACUUM)] = METRICS[QUERY_NAME_AUTOVACUUM] + self.queries[query_factory(QUERY_NAME_FORCED_AUTOVACUUM)] = METRICS[QUERY_NAME_FORCED_AUTOVACUUM] + self.queries[query_factory(QUERY_NAME_TX_WRAPAROUND)] = METRICS[QUERY_NAME_TX_WRAPAROUND] + + if self.server_version >= 100000: + self.queries[query_factory(QUERY_NAME_STANDBY_LAG)] = METRICS[QUERY_NAME_STANDBY_LAG] + def create_dynamic_charts(self): for database_name in self.databases[::-1]: dim = [ @@ -1116,11 +1309,19 @@ class Service(SimpleService): ) for application_name in self.secondaries[::-1]: - add_replication_delta_chart( + add_replication_standby_chart( order=self.order, definitions=self.definitions, name='standby_delta', application_name=application_name, + chart_family='replication delta', + ) + add_replication_standby_chart( + order=self.order, + definitions=self.definitions, + name='standby_lag', + application_name=application_name, + chart_family='replication lag', ) for slot_name in self.replication_slots[::-1]: @@ -1199,7 +1400,7 @@ def add_database_stat_chart(order, definitions, name, database_name): 'lines': create_lines(database_name, chart_template['lines'])} -def add_replication_delta_chart(order, definitions, name, application_name): +def add_replication_standby_chart(order, definitions, name, application_name, chart_family): def create_lines(standby, lines): result = list() for line in lines: @@ -1213,7 +1414,7 @@ def add_replication_delta_chart(order, definitions, name, application_name): order.insert(position, chart_name) name, title, units, _, context, chart_type = chart_template['options'] definitions[chart_name] = { - 'options': [name, title + ': ' + application_name, units, 'replication delta', context, chart_type], + 'options': [name, title + ': ' + application_name, units, chart_family, context, chart_type], 'lines': create_lines(application_name, chart_template['lines'])} diff --git a/collectors/python.d.plugin/postgres/postgres.conf b/collectors/python.d.plugin/postgres/postgres.conf index 1970a7a27..7e354d99b 100644 --- a/collectors/python.d.plugin/postgres/postgres.conf +++ b/collectors/python.d.plugin/postgres/postgres.conf @@ -97,14 +97,7 @@ # the client (Netdata) is not considered local, unless it runs from inside # the same container. # -# Postgres supported versions are : -# - 9.3 (without autovacuum) -# - 9.4 -# - 9.5 -# - 9.6 -# - 10 -# -# Superuser access is needed for theses charts: +# Superuser access is needed for these charts: # Write-Ahead Logs # Archive Write-Ahead Logs # diff --git a/collectors/python.d.plugin/python.d.plugin.in b/collectors/python.d.plugin/python.d.plugin.in index 9d575d86f..b263f229e 100644 --- a/collectors/python.d.plugin/python.d.plugin.in +++ b/collectors/python.d.plugin/python.d.plugin.in @@ -500,27 +500,31 @@ class Plugin: self.saver = None self.runs = 0 - def load_config(self): - paths = [ - DIRS.plugin_user_config, - DIRS.plugin_stock_config, - ] - self.log.debug("looking for '{0}' in {1}".format(self.config_name, paths)) - abs_path = multi_path_find(self.config_name, *paths) - if not abs_path: - self.log.warning("'{0}' was not found, using defaults".format(self.config_name)) - return True - - self.log.debug("loading '{0}'".format(abs_path)) + def load_config_file(self, filepath, expected): + self.log.debug("looking for '{0}'".format(filepath)) + if not os.path.isfile(filepath): + log = self.log.info if not expected else self.log.error + log("'{0}' was not found".format(filepath)) + return dict() try: - config = load_config(abs_path) + config = load_config(filepath) except Exception as error: - self.log.error("error on loading '{0}' : {1}".format(abs_path, repr(error))) - return False + self.log.error("error on loading '{0}' : {1}".format(filepath, repr(error))) + return dict() + self.log.debug("'{0}' is loaded".format(filepath)) + return config - self.log.debug("'{0}' is loaded".format(abs_path)) - self.config.update(config) - return True + def load_config(self): + user_config = self.load_config_file( + filepath=os.path.join(DIRS.plugin_user_config, self.config_name), + expected=False, + ) + stock_config = self.load_config_file( + filepath=os.path.join(DIRS.plugin_stock_config, self.config_name), + expected=True, + ) + self.config.update(stock_config) + self.config.update(user_config) def load_job_statuses(self): self.log.debug("looking for '{0}' in {1}".format(self.jobs_status_dump_name, DIRS.var_lib)) @@ -593,8 +597,7 @@ class Plugin: return jobs def setup(self): - if not self.load_config(): - return False + self.load_config() if not self.config['enabled']: self.log.info('disabled in the configuration file') diff --git a/collectors/python.d.plugin/smartd_log/smartd_log.chart.py b/collectors/python.d.plugin/smartd_log/smartd_log.chart.py index 402035f14..75b8c8c40 100644 --- a/collectors/python.d.plugin/smartd_log/smartd_log.chart.py +++ b/collectors/python.d.plugin/smartd_log/smartd_log.chart.py @@ -154,7 +154,7 @@ CHARTS = { 'algo': INCREMENTAL, }, 'write_total_err_corrected': { - 'options': [None, 'Write Error Corrected', 'errors', 'errors', 'smartd_log.read_total_err_corrected', 'line'], + 'options': [None, 'Write Error Corrected', 'errors', 'errors', 'smartd_log.write_total_err_corrected', 'line'], 'lines': [], 'attrs': [ATTR_WRITE_ERR_COR], 'algo': INCREMENTAL, diff --git a/collectors/python.d.plugin/varnish/varnish.chart.py b/collectors/python.d.plugin/varnish/varnish.chart.py index 534d70926..506ad026a 100644 --- a/collectors/python.d.plugin/varnish/varnish.chart.py +++ b/collectors/python.d.plugin/varnish/varnish.chart.py @@ -197,7 +197,7 @@ class VarnishVersion: class Parser: _backend_new = re.compile(r'VBE.([\d\w_.]+)\(.*?\).(beresp[\w_]+)\s+(\d+)') - _backend_old = re.compile(r'VBE\.[\d\w-]+\.([\w\d_]+).(beresp[\w_]+)\s+(\d+)') + _backend_old = re.compile(r'VBE\.[\d\w-]+\.([\w\d_-]+).(beresp[\w_]+)\s+(\d+)') _default = re.compile(r'([A-Z]+\.)?([\d\w_.]+)\s+(\d+)') def __init__(self): diff --git a/collectors/python.d.plugin/zscores/README.md b/collectors/python.d.plugin/zscores/README.md index 0b4472374..7fb189f6a 100644 --- a/collectors/python.d.plugin/zscores/README.md +++ b/collectors/python.d.plugin/zscores/README.md @@ -43,7 +43,7 @@ looking at first (for more background information on why 3 stddev see [here](https://en.wikipedia.org/wiki/68%E2%80%9395%E2%80%9399.7_rule#:~:text=In%20the%20empirical%20sciences%20the,99.7%25%20probability%20as%20near%20certainty.)) . -In the example below we basically took a sledge hammer to our system so its not suprising that lots of charts light up +In the example below we basically took a sledge hammer to our system so its not surprising that lots of charts light up after we run the stress command. In a more realistic setting you might just see a handful of charts with strange zscores and that could be a good indication of where to look first. @@ -101,9 +101,9 @@ information about each one and what it does. host: '127.0.0.1:19999' # What charts to pull data for - A regex like 'system\..*|' or 'system\..*|apps.cpu|apps.mem' etc. charts_regex: 'system\..*' -# length of time to base calulcations off for mean and stddev +# length of time to base calculations off for mean and stddev train_secs: 14400 # use last 4 hours to work out the mean and stddev for the zscore -# offset preceeding latest data to ignore when calculating mean and stddev +# offset preceding latest data to ignore when calculating mean and stddev offset_secs: 300 # ignore last 5 minutes of data when calculating the mean and stddev # recalculate the mean and stddev every n steps of the collector train_every_n: 900 # recalculate mean and stddev every 15 minutes @@ -114,11 +114,11 @@ z_clip: 10 # cap each zscore at 10 so as to avoid really large individual zscore # set z_abs: 'true' to make all zscores be absolute values only. z_abs: 'true' # burn in period in which to initially calculate mean and stddev on every step -burn_in: 2 # on startup of the collector continually update the mean and stddev in case any gaps or inital calculations fail to return +burn_in: 2 # on startup of the collector continually update the mean and stddev in case any gaps or initial calculations fail to return # mode can be to get a zscore 'per_dim' or 'per_chart' mode: 'per_chart' # 'per_chart' means individual dimension level smoothed zscores will be aggregated to one zscore per chart per time step # per_chart_agg is how you aggregate from dimension to chart when mode='per_chart' -per_chart_agg: 'mean' # 'absmax' will take the max absolute value accross all dimensions but will maintain the sign. 'mean' will just average. +per_chart_agg: 'mean' # 'absmax' will take the max absolute value across all dimensions but will maintain the sign. 'mean' will just average. ``` ## Notes @@ -128,7 +128,7 @@ per_chart_agg: 'mean' # 'absmax' will take the max absolute value accross all di calls to the netdata rest api to get the required data for each chart when calculating the mean and stddev. - It may take a few hours or so for the collector to 'settle' into it's typical behaviour in terms of the scores you will see in the normal running of your system. -- The zscore you see for each chart when using `mode: 'per_chart'` as actually an aggregated zscore accross all the +- The zscore you see for each chart when using `mode: 'per_chart'` as actually an aggregated zscore across all the dimensions on the underlying chart. - If you set `mode: 'per_dim'` then you will see a zscore for each dimension on each chart as opposed to one per chart. - As this collector does some calculations itself in python you may want to try it out first on a test or development diff --git a/collectors/python.d.plugin/zscores/zscores.chart.py b/collectors/python.d.plugin/zscores/zscores.chart.py index 48397d8dd..1099b9376 100644 --- a/collectors/python.d.plugin/zscores/zscores.chart.py +++ b/collectors/python.d.plugin/zscores/zscores.chart.py @@ -24,11 +24,11 @@ ORDER = [ CHARTS = { 'z': { - 'options': ['z', 'Z Score', 'z', 'Z Score', 'z', 'line'], + 'options': ['z', 'Z Score', 'z', 'Z Score', 'zscores.z', 'line'], 'lines': [] }, '3stddev': { - 'options': ['3stddev', 'Z Score >3', 'count', '3 Stddev', '3stddev', 'stacked'], + 'options': ['3stddev', 'Z Score >3', 'count', '3 Stddev', 'zscores.3stddev', 'stacked'], 'lines': [] }, } diff --git a/collectors/python.d.plugin/zscores/zscores.conf b/collectors/python.d.plugin/zscores/zscores.conf index fab18c787..07d62ebe6 100644 --- a/collectors/python.d.plugin/zscores/zscores.conf +++ b/collectors/python.d.plugin/zscores/zscores.conf @@ -83,7 +83,7 @@ local: # length of time to base calculations off for mean and stddev train_secs: 14400 # use last 4 hours to work out the mean and stddev for the zscore - # offset preceeding latest data to ignore when calculating mean and stddev + # offset preceding latest data to ignore when calculating mean and stddev offset_secs: 300 # ignore last 5 minutes of data when calculating the mean and stddev # recalculate the mean and stddev every n steps of the collector @@ -99,10 +99,10 @@ local: z_abs: 'true' # burn in period in which to initially calculate mean and stddev on every step - burn_in: 2 # on startup of the collector continually update the mean and stddev in case any gaps or inital calculations fail to return + burn_in: 2 # on startup of the collector continually update the mean and stddev in case any gaps or initial calculations fail to return # mode can be to get a zscore 'per_dim' or 'per_chart' mode: 'per_chart' # 'per_chart' means individual dimension level smoothed zscores will be aggregated to one zscore per chart per time step # per_chart_agg is how you aggregate from dimension to chart when mode='per_chart' - per_chart_agg: 'mean' # 'absmax' will take the max absolute value accross all dimensions but will maintain the sign. 'mean' will just average. + per_chart_agg: 'mean' # 'absmax' will take the max absolute value across all dimensions but will maintain the sign. 'mean' will just average. diff --git a/collectors/slabinfo.plugin/slabinfo.c b/collectors/slabinfo.plugin/slabinfo.c index 863f440e4..58d9c4fff 100644 --- a/collectors/slabinfo.plugin/slabinfo.c +++ b/collectors/slabinfo.plugin/slabinfo.c @@ -51,6 +51,8 @@ char *netdata_configured_host_prefix = ""; int running = 1; int debug = 0; +size_t lines_discovered = 0; +int redraw_chart = 0; // ---------------------------------------------------------------------------- @@ -187,6 +189,10 @@ struct slabinfo *read_file_slabinfo() { // Iterate on all lines to populate / update the slabinfo struct size_t lines = procfile_lines(ff), l; + if (unlikely(lines != lines_discovered)) { + lines_discovered = lines; + redraw_chart = 1; + } slabdebug(" Read %lu lines from procfile", (unsigned long)lines); for(l = 2; l < lines; l++) { @@ -254,7 +260,8 @@ unsigned int do_slab_stats(int update_every) { sactive = read_file_slabinfo(); // Init Charts - if (unlikely(loops == 0)) { + if (unlikely(redraw_chart)) { + redraw_chart = 0; // Memory Usage printf("CHART %s.%s '' 'Memory Usage' 'B' '%s' '' line %d %d %s\n" , CHART_TYPE diff --git a/collectors/statsd.plugin/README.md b/collectors/statsd.plugin/README.md index f3050cebb..ba4ada517 100644 --- a/collectors/statsd.plugin/README.md +++ b/collectors/statsd.plugin/README.md @@ -21,7 +21,7 @@ Netdata statsd is fast. It can collect more than **1.200.000 metrics per second* # Available StatsD collectors -Netdata ships with collectors implemented using the StatsD collector. They are configuration files (as you will read bellow), but they function as a collector, in the sense that configuration file organize the metrics of a data source into pre-defined charts. +Netdata ships with collectors implemented using the StatsD collector. They are configuration files (as you will read below), but they function as a collector, in the sense that configuration file organize the metrics of a data source into pre-defined charts. On these charts, we can have alarms as with any metric and chart. @@ -64,7 +64,7 @@ Netdata fully supports the StatsD protocol. All StatsD client libraries can be u - Timers use `|ms` - Histograms use `|h` - The only difference between the two, is the `units` of the charts, as timers report *miliseconds*. + The only difference between the two, is the `units` of the charts, as timers report *milliseconds*. [Sampling rate](#sampling-rates) is supported. @@ -102,7 +102,7 @@ When sending multiple packets over UDP, it is important not to exceed the networ Netdata will accept UDP packets up to 9000 bytes, but the underlying network will not exceed MTU. -> You can read more about the network maxium transmission unit(MTU) in this cloudflare [article](https://www.cloudflare.com/en-gb/learning/network-layer/what-is-mtu/). +> You can read more about the network maximum transmission unit(MTU) in this cloudflare [article](https://www.cloudflare.com/en-gb/learning/network-layer/what-is-mtu/). ## Configuration diff --git a/collectors/statsd.plugin/statsd.c b/collectors/statsd.plugin/statsd.c index e30cc6e2b..9e152b09e 100644 --- a/collectors/statsd.plugin/statsd.c +++ b/collectors/statsd.plugin/statsd.c @@ -196,13 +196,13 @@ typedef struct statsd_app_chart_dimension { } STATSD_APP_CHART_DIM; typedef struct statsd_app_chart { - const char *source; const char *id; const char *name; const char *title; const char *family; const char *context; const char *units; + const char *module; long priority; RRDSET_TYPE chart_type; STATSD_APP_CHART_DIM *dimensions; @@ -1214,10 +1214,15 @@ static int statsd_readfile(const char *filename, STATSD_APP *app, STATSD_APP_CHA chart->next = app->charts; app->charts = chart; - { - char lineandfile[FILENAME_MAX + 1]; - snprintfz(lineandfile, FILENAME_MAX, "%zu@%s", line, filename); - chart->source = strdupz(lineandfile); + if (!strncmp( + filename, + netdata_configured_stock_config_dir, + strlen(netdata_configured_stock_config_dir))) { + char tmpfilename[FILENAME_MAX + 1]; + strncpyz(tmpfilename, filename, FILENAME_MAX); + chart->module = strdupz(basename(tmpfilename)); + } else { + chart->module = strdupz("synthetic_chart"); } } } @@ -1996,7 +2001,7 @@ static inline void statsd_update_app_chart(STATSD_APP *app, STATSD_APP_CHART *ch , chart->title // title , chart->units // units , PLUGIN_STATSD_NAME // plugin - , chart->source // module + , chart->module // module , chart->priority // priority , statsd.update_every // update every , chart->chart_type // chart type @@ -2175,8 +2180,8 @@ void *statsd_main(void *ptr) { statsd.histogram_percentile = 95.0; } { - char buffer[100 + 1]; - snprintf(buffer, 100, "%0.1f%%", statsd.histogram_percentile); + char buffer[314 + 1]; + snprintfz(buffer, 314, "%0.1f%%", statsd.histogram_percentile); statsd.histogram_percentile_str = strdupz(buffer); } @@ -2436,7 +2441,7 @@ void *statsd_main(void *ptr) { char title[100 + 1]; snprintfz(id, 100, "plugin_statsd_collector%d_cpu", i + 1); - snprintfz(title, 100, "NetData statsd collector thread No %d CPU usage", i + 1); + snprintfz(title, 100, "Netdata statsd collector thread No %d CPU usage", i + 1); statsd.collection_threads_status[i].st_cpu = rrdset_create_localhost( "netdata" diff --git a/collectors/statsd.plugin/statsd.h b/collectors/statsd.plugin/statsd.h index b741be76d..37d6a08b3 100644 --- a/collectors/statsd.plugin/statsd.h +++ b/collectors/statsd.plugin/statsd.h @@ -3,7 +3,7 @@ #ifndef NETDATA_STATSD_H #define NETDATA_STATSD_H 1 -#include "../../daemon/common.h" +#include "daemon/common.h" #define STATSD_LISTEN_PORT 8125 #define STATSD_LISTEN_BACKLOG 4096 diff --git a/collectors/tc.plugin/plugin_tc.h b/collectors/tc.plugin/plugin_tc.h index c64658415..d51fcf674 100644 --- a/collectors/tc.plugin/plugin_tc.h +++ b/collectors/tc.plugin/plugin_tc.h @@ -3,7 +3,7 @@ #ifndef NETDATA_PLUGIN_TC_H #define NETDATA_PLUGIN_TC_H 1 -#include "../../daemon/common.h" +#include "daemon/common.h" #if (TARGET_OS == OS_LINUX) diff --git a/collectors/timex.plugin/plugin_timex.h b/collectors/timex.plugin/plugin_timex.h index 6025641a3..f83786163 100644 --- a/collectors/timex.plugin/plugin_timex.h +++ b/collectors/timex.plugin/plugin_timex.h @@ -3,7 +3,7 @@ #ifndef NETDATA_PLUGIN_TIMEX_H #define NETDATA_PLUGIN_TIMEX_H -#include "../../daemon/common.h" +#include "daemon/common.h" #if (TARGET_OS == OS_LINUX) diff --git a/collectors/xenstat.plugin/xenstat_plugin.c b/collectors/xenstat.plugin/xenstat_plugin.c index a322dd1c1..abcb5a1c3 100644 --- a/collectors/xenstat.plugin/xenstat_plugin.c +++ b/collectors/xenstat.plugin/xenstat_plugin.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-3.0-or-later -#include "../../libnetdata/libnetdata.h" +#include "libnetdata/libnetdata.h" #include #include diff --git a/configure.ac b/configure.ac index 2f3cac5d4..b5b6893e5 100644 --- a/configure.ac +++ b/configure.ac @@ -144,9 +144,9 @@ AC_ARG_ENABLE( ) AC_ARG_ENABLE( [lto], - [AS_HELP_STRING([--disable-lto], [Link Time Optimizations @<:@default autodetect@:>@])], + [AS_HELP_STRING([--enable-lto], [Link Time Optimizations @<:@default disabled@:>@])], , - [enable_lto="detect"] + [enable_lto="no"] ) AC_ARG_ENABLE( [https], @@ -174,17 +174,32 @@ AC_ARG_ENABLE( ) AC_ARG_WITH( [bundled-lws], - [AS_HELP_STRING([--with-bundled-lws=DIR], [Use a specific Libwebsockets static library @<:@default use system library@:>@])], - [ - with_bundled_lws="yes" - bundled_lws_dir="${withval}" - ], - [with_bundled_lws="no"] + [AS_HELP_STRING([--with-bundled-lws], [Use the bundled version of libwebsockets library @<:@default use system library@:>@])], + [with_bundled_lws="yes"], [with_bundled_lws="no"] +) +AC_ARG_WITH( + [bundled-protobuf], + [AS_HELP_STRING([--with-bundled-protobuf], + [Uses the bundled version of Google Protocol Buffers @<:@default bundled if present@:>@])], + [with_bundled_protobuf="$withval"], + [with_bundled_protobuf="detect"] +) +AC_ARG_ENABLE( + [ml], + [AS_HELP_STRING([--enable-ml], [Enable anomaly detection @<:@default autodetect@:>@])], + , + [enable_ml="detect"] +) +AC_ARG_ENABLE( + [ml_tests], + [AS_HELP_STRING([--enable-ml-tests], [Enable anomaly detection tests @<:@no@:>@])], + [enable_ml_tests="yes"], + [enable_ml_tests="no"] ) # ----------------------------------------------------------------------------- # Enforce building with C99, bail early if we can't. -test "${ac_cv_prog_cc_c99}" = "no" && AC_MSG_ERROR([Netdata rquires a compiler that supports C99 to build]) +test "${ac_cv_prog_cc_c99}" = "no" && AC_MSG_ERROR([Netdata requires a compiler that supports C99 to build]) # ----------------------------------------------------------------------------- # Check if cloud is enabled and if the functionality is available @@ -197,12 +212,28 @@ AC_ARG_ENABLE( [ enable_cloud="detect" ] ) +AC_ARG_WITH( + [aclk-legacy], + [AS_HELP_STRING([--with-aclk-legacy], + [Requires Legacy ACLK to be used even in case ACLK-NG can run on this system])], + [aclk_legacy="$withval"], + [aclk_legacy="detect"] +) + AC_ARG_WITH( [aclk-ng], [AS_HELP_STRING([--with-aclk-ng], [Requires ACLK-NG to be used even in case ACLK Legacy can run on this system])], [aclk_ng="$withval"], - [aclk_ng="fallback"] + [aclk_ng="detect"] +) + +AC_ARG_WITH( + [new-cloud-protocol], + [AS_HELP_STRING([--with-new-cloud-protocol], + [Requires New Cloud Protocol support to be built])], + [new_cloud_protocol="$withval"], + [new_cloud_protocol="detect"] ) if test "${enable_cloud}" = "no"; then @@ -432,15 +463,14 @@ test "${enable_dbengine}" = "yes" -a -z "${LZ4_LIBS}" && \ AC_MSG_ERROR([liblz4 required but not found. Try installing 'liblz4-dev' or 'lz4-devel'.]) -AC_ARG_WITH([libJudy], - [AS_HELP_STRING([--with-libJudy=PREFIX],[Use a specific Judy library (default is system-library)])], +AC_ARG_WITH([bundled-libJudy], + [AS_HELP_STRING([--with-bundled-libJudy],[Use the bundled version of Judy library (default is system-library)])], [ - libJudy_dir="$withval" AC_MSG_CHECKING(for libJudy in $withval) - if test -f "${libJudy_dir}/libJudy.a" -a -f "${libJudy_dir}/Judy.h"; then + if test -f "externaldeps/libJudy/libJudy.a" -a -f "externaldeps/libJudy/Judy.h"; then LIBS_BACKUP="${LIBS}" - LIBS="${libJudy_dir}/libJudy.a" - AC_LINK_IFELSE([AC_LANG_SOURCE([[#include "${libJudy_dir}/Judy.h" + LIBS="externaldeps/libJudy/libJudy.a" + AC_LINK_IFELSE([AC_LANG_SOURCE([[#include "externaldeps/libJudy/Judy.h" int main (int argc, char **argv) { Pvoid_t PJLArray = (Pvoid_t) NULL; Word_t * PValue; @@ -450,8 +480,8 @@ AC_ARG_WITH([libJudy], [HAVE_libJudy_a="yes"], [HAVE_libJudy_a="no"]) LIBS="${LIBS_BACKUP}" - JUDY_LIBS="${libJudy_dir}/libJudy.a" - JUDY_CFLAGS="-I${libJudy_dir}" + JUDY_LIBS="\$(abs_top_srcdir)/externaldeps/libJudy/libJudy.a" + JUDY_CFLAGS="-I \$(abs_top_srcdir)/externaldeps/libJudy" AC_MSG_RESULT([$HAVE_libJudy_a]) else libjudy_dir="" @@ -531,7 +561,7 @@ if test "${enable_jsonc}" != "no" -a -z "${JSONC_LIBS}"; then if test "${HAVE_libjson_c_a}" = "yes"; then AC_DEFINE([LINK_STATIC_JSONC], [1], [static json-c should be used]) JSONC_LIBS="static" - OPTIONAL_JSONC_STATIC_CFLAGS="-I externaldeps/jsonc" + OPTIONAL_JSONC_STATIC_CFLAGS="-I \$(abs_top_srcdir)/externaldeps/jsonc" fi AC_MSG_RESULT([${HAVE_libjson_c_a}]) fi @@ -639,11 +669,175 @@ AM_CONDITIONAL([ENABLE_CAPABILITY], [test "${with_libcap}" = "yes"]) # ----------------------------------------------------------------------------- # ACLK -AC_MSG_CHECKING([if cloud functionality should be enabled]) +bundled_proto_avail="no" +if test "${with_bundled_protobuf}" != "no"; then + AC_MSG_CHECKING([is bundled protobuf available]) + if test -f "externaldeps/protobuf/src/protoc"; then + bundled_proto_avail="yes" + fi + AC_MSG_RESULT([${bundled_proto_avail}]) + if test "${with_bundled_protobuf}" == "yes" -a "${bundled_proto_avail}" != "yes"; then + AC_MSG_ERROR([Bundled protobuf requested using --with-bundled-protobuf but it cannot be used/found]) + fi + if test "${with_bundled_protobuf}" == "detect" -a "${bundled_proto_avail}" == "yes"; then + with_bundled_protobuf="yes" + fi +fi + +if test "${with_bundled_protobuf}" != "yes"; then +PKG_CHECK_MODULES( + [PROTOBUF], + [protobuf >= 3], + [have_libprotobuf=yes], + [have_libprotobuf=no] +) + +AC_PATH_PROG([PROTOC], [protoc], [no]) +AS_IF( + [test x"${PROTOC}" == x"no"], + [have_protoc=no], + [have_protoc=yes] +) +else + AC_MSG_NOTICE([using bundled protobuf]) + AC_DEFINE([BUNDLED_PROTOBUF], [1], [Using a bundled copy of protobuf]) + PROTOC="\$(abs_top_srcdir)/externaldeps/protobuf/src/protoc" + PROTOBUF_CFLAGS="-I \$(abs_top_srcdir)/externaldeps/protobuf/src" + PROTOBUF_LIBS="\$(abs_top_srcdir)/externaldeps/protobuf/src/.libs/libprotobuf.a" + have_libprotobuf="yes" + have_protoc="yes" +fi + +AC_PATH_PROG([CXX_BINARY], [${CXX}], [no]) +AS_IF( + [test x"${CXX_BINARY}" == x"no"], + [have_CXX_compiler=no], + [have_CXX_compiler=yes] +) + +if test "${have_libprotobuf}" == "yes" && test "${have_CXX_compiler}" == "yes"; then + AC_DEFINE([HAVE_PROTOBUF], [1], [Protobuf is available]) +fi + +AC_MSG_CHECKING([if Cloud functionality should be enabled]) AC_MSG_RESULT([${enable_cloud}]) -if test "$enable_cloud" != "no" -a "$aclk_ng" != "yes"; then - # just to have all messages that can fail ACLK build in one place - # so it is easier to see why it can't be built +if test "$aclk_ng" = "no"; then + AC_DEFINE([ACLK_NG_DISABLED], [1], [ACLK NG was disabled by user request]) +fi +if test "$aclk_legacy" = "no"; then + AC_DEFINE([ACLK_LEGACY_DISABLED], [1], [ACLK Legacy was disabled by user request]) +fi + +if test "$enable_cloud" = "no" -a "$aclk_legacy" = "yes"; then + AC_MSG_ERROR([--disable-cloud && --with-aclk-legacy not allowed together (such configuration is self contradicting)]) +fi + +if test "$enable_cloud" = "no" -a "$aclk_ng" = "yes"; then + AC_MSG_ERROR([--disable-cloud && --with-aclk-ng not allowed together (such configuration is self contradicting)]) +fi + +if test "$enable_cloud" != "no" -a "$aclk_ng" != "no"; then + AC_MSG_NOTICE([Checking if ACLK Next Generation can be built]) + can_enable_ng="yes" + AC_MSG_CHECKING([if git submodules present for ACLK Next Generation]) + if test -f "mqtt_websockets/src/mqtt_wss_client.c"; then + AC_MSG_RESULT([yes]) + else + AC_MSG_RESULT([no]) + can_enable_ng="no" + fi + AC_MSG_CHECKING([if SSL available for ACLK Next Generation]) + if test -n "${SSL_LIBS}"; then + AC_MSG_RESULT([yes]) + OPTIONAL_SSL_CFLAGS="${SSL_CFLAGS}" + OPTIONAL_SSL_LIBS="${SSL_LIBS}" + else + AC_MSG_RESULT([no]) + fi + AC_MSG_CHECKING([if JSON-C available for ACLK Next Generation]) + if test "$enable_jsonc" != "yes"; then + AC_MSG_RESULT([no]) + can_enable_ng="no" + else + AC_MSG_RESULT([yes]) + fi + + AC_MSG_CHECKING([ACLK Next Generation can be built]) + AC_MSG_RESULT([${can_enable_ng}]) + if test "$can_enable_ng" = "no" -a "$aclk_ng" = "yes"; then + AC_MSG_ERROR([You have requested --with-aclk-ng but it can't be built. See reasons in lines above]) + fi + if test "$can_enable_ng" = "yes"; then + aclk_ng="yes" + enable_aclk="yes" + AC_DEFINE([ACLK_NG], [1], [ACLK Next Generation Should be used]) + AC_DEFINE([ENABLE_ACLK], [1], [netdata ACLK]) + OPTIONAL_ACLK_NG_CFLAGS="-I \$(abs_top_srcdir)/mqtt_websockets/src/include -I \$(abs_top_srcdir)/mqtt_websockets/c-rbuf/include -I \$(abs_top_srcdir)/mqtt_websockets/MQTT-C/include" + fi + + if test "$aclk_ng" = "yes" -a "$new_cloud_protocol" != "no"; then + can_build_new_cloud_protocol="yes" + AC_MSG_CHECKING([if protobuf available for New Cloud Protocol]) + if test "${have_libprotobuf}" != "yes"; then + AC_MSG_RESULT([no]) + can_build_new_cloud_protocol="no" + else + AC_MSG_RESULT([yes]) + fi + AC_MSG_CHECKING([if protoc available for New Cloud Protocol]) + if test "${have_protoc}" != "yes"; then + AC_MSG_RESULT([no]) + can_build_new_cloud_protocol="no" + else + AC_MSG_RESULT([yes]) + fi + AC_MSG_CHECKING([if C++ compiler available for New Cloud Protocol]) + if test "${have_CXX_compiler}" != "yes"; then + AC_MSG_RESULT([no]) + can_build_new_cloud_protocol="no" + else + AC_MSG_RESULT([yes]) + fi + + if test "${with_bundled_protobuf}" = "yes"; then + AC_LANG_PUSH([C++]) + CXXFLAGS="${CXXFLAGS} -std=c++11" + + # On some platforms, std::atomic needs a helper library + AC_MSG_CHECKING(whether -latomic is needed for static protobuf) + AC_LINK_IFELSE([AC_LANG_SOURCE([[ + #include + #include + std::atomic v; + int main() { + return v; + } + ]])], STD_ATOMIC_NEED_LIBATOMIC=no, STD_ATOMIC_NEED_LIBATOMIC=yes) + AC_MSG_RESULT($STD_ATOMIC_NEED_LIBATOMIC) + if test "x$STD_ATOMIC_NEED_LIBATOMIC" = xyes; then + OPTIONAL_ATOMIC_LIBS="-latomic" + fi + AC_SUBST([OPTIONAL_ATOMIC_LIBS]) + AC_LANG_POP([C++]) + fi + AC_MSG_CHECKING([ACLK Next Generation can support New Cloud protocol]) + AC_MSG_RESULT([${can_build_new_cloud_protocol}]) + if test "$new_cloud_protocol" = "yes" -a "$can_build_new_cloud_protocol" != "yes"; then + AC_MSG_ERROR([Requested new cloud protocol support but it can't be build]) + fi + if test "$can_build_new_cloud_protocol" = "yes"; then + new_cloud_protocol="yes" + AC_DEFINE([ENABLE_NEW_CLOUD_PROTOCOL], [1], [New protobuf based Netdata Cloud Protocol Support]) + OPTIONAL_ACLK_NG_CFLAGS="${OPTIONAL_ACLK_NG_CFLAGS} -I \$(abs_top_srcdir)/aclk/aclk-schemas" + OPTIONAL_PROTOBUF_CFLAGS="${PROTOBUF_CFLAGS}" + CXX11FLAG="-std=c++11" + OPTIONAL_PROTOBUF_LIBS="${PROTOBUF_LIBS}" + fi + fi +fi + +if test "$enable_cloud" != "no" -a "$aclk_legacy" != "no"; then + AC_MSG_NOTICE([Checking if ACLK Legacy can be built]) if test -n "${SSL_LIBS}"; then OPTIONAL_SSL_CFLAGS="${SSL_CFLAGS}" OPTIONAL_SSL_LIBS="${SSL_LIBS}" @@ -670,10 +864,10 @@ if test "$enable_cloud" != "no" -a "$aclk_ng" != "yes"; then AC_MSG_RESULT([${HAVE_libmosquitto_a}]) if test "${with_bundled_lws}" = "yes"; then - AC_MSG_CHECKING([if libwebsockets static lib is present]) - if test -f "${bundled_lws_dir}/libwebsockets.a"; then - LWS_CFLAGS="-I ${bundled_lws_dir}/include" - OPTIONAL_LWS_LIBS="${bundled_lws_dir}/libwebsockets.a" + AC_MSG_CHECKING([if libwebsockets static lib is present for ACLK Legacy]) + if test -f "externaldeps/libwebsockets/libwebsockets.a"; then + LWS_CFLAGS="-I \$(abs_top_srcdir)/externaldeps/libwebsockets/include" + OPTIONAL_LWS_LIBS="\$(abs_top_srcdir)/externaldeps/libwebsockets/libwebsockets.a" AC_MSG_RESULT([yes]) AC_DEFINE([BUNDLED_LWS], [1], [using statically linked libwebsockets]) else @@ -682,7 +876,7 @@ if test "$enable_cloud" != "no" -a "$aclk_ng" != "yes"; then # as currently this is default we prefer building netdata without ACLK # instead of error fail AC_MSG_RESULT([no]) - AC_MSG_WARN([You required static libwebsockets to be used but we can't use it. Disabling ACLK]) + AC_MSG_WARN([You required static libwebsockets to be used but we can't use it. Disabling ACLK Legacy]) fi else AC_CHECK_LIB([websockets], @@ -691,7 +885,7 @@ if test "$enable_cloud" != "no" -a "$aclk_ng" != "yes"; then [AC_DEFINE([ACLK_NO_LWS], [1], [usable system libwebsockets was not found during build.])]) fi - if test "${build_target}" = "linux" -a "${enable_cloud}" != "no"; then + if test "${build_target}" = "linux"; then if test "${have_libcap}" = "yes" -a "${with_libcap}" = "no"; then AC_MSG_ERROR([agent-cloud-link can't be built without libcap. Disable it by --disable-cloud or enable libcap]) fi @@ -701,86 +895,39 @@ if test "$enable_cloud" != "no" -a "$aclk_ng" != "yes"; then fi # next 2 lines are just to have info for ACLK dependencies in common place - AC_MSG_CHECKING([if json-c available for ACLK]) + AC_MSG_CHECKING([if json-c available for ACLK Legacy]) AC_MSG_RESULT([${enable_jsonc}]) - test "${enable_cloud}" = "yes" -a "${enable_jsonc}" = "no" && \ - AC_MSG_ERROR([You have asked for ACLK to be built but no json-c available. ACLK requires json-c]) - - AC_MSG_CHECKING([if netdata agent-cloud-link can be enabled]) + AC_MSG_CHECKING([if netdata ACLK Legacy can be built]) if test "${HAVE_libmosquitto_a}" = "yes" -a -n "${OPTIONAL_LWS_LIBS}" -a -n "${SSL_LIBS}" -a "${enable_jsonc}" = "yes"; then - can_enable_aclk="yes" + can_build_legacy="yes" else - can_enable_aclk="no" + can_build_legacy="no" fi - AC_MSG_RESULT([${can_enable_aclk}]) + AC_MSG_RESULT([${can_build_legacy}]) -# TODO fix this (you need to try fallback) - test "${enable_cloud}" = "yes" -a "${can_enable_aclk}" = "no" && \ - AC_MSG_ERROR([User required agent-cloud-link but it can't be built!]) - - AC_MSG_CHECKING([if netdata agent-cloud-link should/will be enabled]) - if test "${enable_cloud}" = "detect"; then - enable_aclk=$can_enable_aclk - else - enable_aclk=$enable_cloud + if test "$can_build_legacy" = "no" -a "$aclk_legacy" = "yes"; then + AC_MSG_ERROR([You have requested --with-aclk-legacy but it can't be built. See reasons in lines above]) fi - if test "${enable_aclk}" = "yes"; then + if test "$can_build_legacy" = "yes"; then + AC_DEFINE([ACLK_LEGACY], [1], [ACLK Legacy Should be used]) AC_DEFINE([ENABLE_ACLK], [1], [netdata ACLK]) + aclk_legacy="yes" + enable_aclk="yes" fi - - AC_MSG_RESULT([${enable_aclk}]) fi -if test "$enable_cloud" = "no" -a "$aclk_ng" = "yes"; then - AC_MSG_ERROR([--disable-cloud && --aclk-ng not allowed together (such configuration is self contradicting)]) +if test "$enable_cloud" = "yes" -a "$enable_aclk" != "yes"; then + AC_MSG_ERROR([Neither ACLK-NG nor ACLK-Legacy can be built but --enable-cloud was requested]) fi -if test "$enable_cloud" != "no" -a "$aclk_ng" != "no"; then - can_enable_ng="yes" - AC_MSG_CHECKING([if git submodules present for ACLK Next Generation]) - if test -f "mqtt_websockets/src/mqtt_wss_client.c"; then - AC_MSG_RESULT([yes]) - else - AC_MSG_RESULT([no]) - can_enable_ng="no" - fi - AC_MSG_CHECKING([if SSL available for ACLK Next Generation]) - if test -n "${SSL_LIBS}"; then - AC_MSG_RESULT([yes]) - OPTIONAL_SSL_CFLAGS="${SSL_CFLAGS}" - OPTIONAL_SSL_LIBS="${SSL_LIBS}" - else - AC_MSG_RESULT([no]) - fi - AC_MSG_CHECKING([if JSON-C available for ACLK Next Generation]) - if test "$enable_jsonc" != "yes"; then - AC_MSG_RESULT([no]) - can_enable_ng="no" - else - AC_MSG_RESULT([yes]) - fi - AC_MSG_CHECKING([ACLK Next Generation can be built]) - AC_MSG_RESULT([${can_enable_ng}]) - if test "$aclk_ng" = "yes" -a "$can_enable_ng" != "yes"; then - AC_MSG_ERROR([ACLK-NG requested but can't be built]) - fi - if test "$aclk_ng" != "yes" -a "$enable_aclk" == "no" -a "$can_enable_ng" = "yes"; then #default "fallback" - AC_MSG_NOTICE([ACLK Legacy could not be built. Trying ACLK-NG as fallback.]) - aclk_ng="yes" - fi - if test "$aclk_ng" = "yes"; then - AC_DEFINE([ACLK_NG], [1], [ACLK Next Generation Should be used]) - AC_DEFINE([ENABLE_ACLK], [1], [netdata ACLK]) - enable_aclk="yes" - OPTIONAL_ACLK_NG_CFLAGS="-Imqtt_websockets/src/include -Imqtt_websockets/c-rbuf/include -Imqtt_websockets/MQTT-C/include" - fi -fi AC_SUBST([enable_cloud]) AC_SUBST([enable_aclk]) AM_CONDITIONAL([ACLK_NG], [test "${aclk_ng}" = "yes"]) +AM_CONDITIONAL([ACLK_LEGACY], [test "${aclk_legacy}" = "yes"]) AM_CONDITIONAL([ENABLE_ACLK], [test "${enable_aclk}" = "yes"]) +AM_CONDITIONAL([ENABLE_NEW_CLOUD_PROTOCOL], [test "${can_build_new_cloud_protocol}" = "yes"]) # ----------------------------------------------------------------------------- # apps.plugin @@ -1059,6 +1206,90 @@ fi AC_MSG_RESULT([${enable_plugin_perf}]) AM_CONDITIONAL([ENABLE_PLUGIN_PERF], [test "${enable_plugin_perf}" = "yes"]) +# ----------------------------------------------------------------------------- +# gtest/gmock + +AC_MSG_CHECKING([if gtest and gmock can be found]) + +PKG_CHECK_MODULES([GTEST], [gtest], [have_gtest=yes], [have_gtest=no]) +PKG_CHECK_MODULES([GMOCK], [gmock], [have_gmock=yes], [have_gmock=no]) + +if test "${have_gtest}" = "yes" -a "${have_gmock}" = "yes"; then + OPTIONAL_GTEST_CFLAGS="${GTEST_CFLAGS} ${GMOCK_CFLAGS}" + OPTIONAL_GTEST_LIBS="${GTEST_LIBS} ${GMOCK_LIBS}" + have_gtest="yes" +else + have_gtest="no" +fi + +# ----------------------------------------------------------------------------- +# ml - anomaly detection + +# Check if uuid is available. Fail if ML was explicitly requested. +if test "${enable_ml}" = "yes" -a "${have_uuid}" != "yes"; then + AC_MSG_ERROR([You have explicitly requested --enable-ml functionality but libuuid can not be found."]) +fi + +# Check if submodules have not been fetched. Fail if ML was explicitly requested. +AC_MSG_CHECKING([if git submodules are present for machine learning functionality]) +if test -f "ml/kmeans/dlib/dlib/all/source.cpp" -a -f "ml/json/single_include/nlohmann/json.hpp"; then + AC_MSG_RESULT([yes]) + have_ml_submodules="yes" +else + AC_MSG_RESULT([no]) + have_ml_submodules="no" +fi + +if test "${enable_ml}" = "yes" -a "${have_ml_submodules}" = "no"; then + AC_MSG_ERROR([You have explicitly requested --enable-ml functionality but it cannot be built because the required git submodules are missing.]) +fi + +# Check if C++ toolchain does not support C++11. Fail if ML was explicitly requested. +AC_LANG_PUSH([C++]) +AX_CHECK_COMPILE_FLAG([-std=c++11], [have_cxx11=yes], [have_cxx11=no]) +AC_LANG_POP([C++]) + +# PPC64LE needs -std=gnu++11 in order to build dlib. However, the rest of +# the agent's components use and have been tested only with -std=c++11. +# Skip ML compilation on that CPU until we reorganize and test the C++ flags. +if test "${host_cpu}" = "powerpc64le"; then + have_cxx11="no" +fi + +if test "${enable_ml}" = "yes" -a "${have_cxx11}" = "no"; then + AC_MSG_ERROR([You have explicitly requested --enable-ml functionality but it cannot be built without a C++11 toolchain.]) +else + CXX11FLAG="$CXX11FLAG -std=c++11" +fi + +# Decide if we should build ML +if test "${enable_ml}" != "no" -a "${have_ml_submodules}" = "yes" -a "${have_cxx11}" = "yes" -a "${have_uuid}" = "yes"; then + build_ml="yes" +else + build_ml="no" +fi + +AM_CONDITIONAL([ENABLE_ML], [test "${build_ml}" = "yes"]) +if test "${build_ml}" = "yes"; then + AC_DEFINE([ENABLE_ML], [1], [anomaly detection usability]) + OPTIONAL_ML_CFLAGS="-DDLIB_NO_GUI_SUPPORT -I \$(abs_top_srcdir)/ml/kmeans/dlib" + OPTIONAL_ML_LIBS="" +fi + +# Decide if we should build ML tests. +if test "${build_ml}" = "yes" -a "${enable_ml_tests}" = "yes" -a "${have_gtest}" = "yes"; then + build_ml_tests="yes" +else + build_ml_tests="no" +fi + +AM_CONDITIONAL([ENABLE_ML_TESTS], [test "${build_ml_tests}" = "yes"]) +if test "${build_ml_tests}" = "yes"; then + AC_DEFINE([ENABLE_ML_TESTS], [1], [anomaly detection tests]) + OPTIONAL_ML_TESTS_CFLAGS="${OPTIONAL_GTEST_CFLAGS}" + OPTIONAL_ML_TESTS_LIBS="${OPTIONAL_GTEST_LIBS}" +fi + # ----------------------------------------------------------------------------- # ebpf.plugin @@ -1087,8 +1318,8 @@ if test "${build_target}" = "linux" -a "${enable_ebpf}" != "no"; then if test "${have_libelf}" = "yes" -a \ "${have_bpf}" = "yes" -a \ "${have_libbpf}" = "yes"; then - OPTIONAL_BPF_CFLAGS="${LIBELF_CFLAGS} -I externaldeps/libbpf/include" - OPTIONAL_BPF_LIBS="externaldeps/libbpf/libbpf.a ${LIBELF_LIBS}" + OPTIONAL_BPF_CFLAGS="${LIBELF_CFLAGS} -I \$(abs_top_srcdir)/externaldeps/libbpf/include" + OPTIONAL_BPF_LIBS="\$(abs_top_srcdir)/externaldeps/libbpf/libbpf.a ${LIBELF_LIBS}" AC_DEFINE([HAVE_LIBBPF], [1], [libbpf usability]) enable_ebpf="yes" else @@ -1287,13 +1518,6 @@ AM_CONDITIONAL([ENABLE_EXPORTING_PUBSUB], [test "${enable_exporting_pubsub}" = " # ----------------------------------------------------------------------------- # Prometheus remote write backend - libprotobuf, libsnappy, protoc -PKG_CHECK_MODULES( - [PROTOBUF], - [protobuf >= 3], - [have_libprotobuf=yes], - [have_libprotobuf=no] -) - AC_MSG_CHECKING([for snappy::RawCompress in -lsnappy]) AC_LANG_SAVE @@ -1329,20 +1553,6 @@ AC_MSG_CHECKING([for snappy::RawCompress in -lsnappy]) AC_MSG_RESULT([${have_libsnappy}]) -AC_PATH_PROG([PROTOC], [protoc], [no]) -AS_IF( - [test x"${PROTOC}" == x"no"], - [have_protoc=no], - [have_protoc=yes] -) - -AC_PATH_PROG([CXX_BINARY], [${CXX}], [no]) -AS_IF( - [test x"${CXX_BINARY}" == x"no"], - [have_CXX_compiler=no], - [have_CXX_compiler=yes] -) - test "${enable_backend_prometheus_remote_write}" = "yes" -a "${have_libprotobuf}" != "yes" && \ AC_MSG_ERROR([libprotobuf required but not found. try installing protobuf]) @@ -1360,9 +1570,11 @@ if test "${enable_backend_prometheus_remote_write}" != "no" -a "${have_libprotob -a "${have_protoc}" = "yes" -a "${have_CXX_compiler}" = "yes"; then enable_backend_prometheus_remote_write="yes" AC_DEFINE([ENABLE_PROMETHEUS_REMOTE_WRITE], [1], [Prometheus remote write API usability]) - OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS="${PROTOBUF_CFLAGS} ${SNAPPY_CFLAGS} -Iexporting/prometheus/remote_write" + OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS="${SNAPPY_CFLAGS} -I \$(abs_top_srcdir)/exporting/prometheus/remote_write" CXX11FLAG="-std=c++11" - OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS="${PROTOBUF_LIBS} ${SNAPPY_LIBS}" + OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS="${SNAPPY_LIBS}" + OPTIONAL_PROTOBUF_CFLAGS="${PROTOBUF_CFLAGS}" + OPTIONAL_PROTOBUF_LIBS="${PROTOBUF_LIBS}" else enable_backend_prometheus_remote_write="no" fi @@ -1445,7 +1657,9 @@ AC_MSG_RESULT([${enable_lto}]) AM_CONDITIONAL([ENABLE_CXX_LINKER], [test "${enable_backend_kinesis}" = "yes" \ -o "${enable_exporting_pubsub}" = "yes" \ - -o "${enable_backend_prometheus_remote_write}" = "yes"]) + -o "${enable_backend_prometheus_remote_write}" = "yes" \ + -o "${new_cloud_protocol}" = "yes" \ + -o "${build_ml}" = "yes"]) AC_DEFINE_UNQUOTED([NETDATA_USER], ["${with_user}"], [use this user to drop privileged]) @@ -1473,11 +1687,11 @@ AC_SUBST([logdir]) AC_SUBST([pluginsdir]) AC_SUBST([webdir]) -CFLAGS="${CFLAGS} ${OPTIONAL_MATH_CFLAGS} ${OPTIONAL_NFACCT_CFLAGS} ${OPTIONAL_ZLIB_CFLAGS} ${OPTIONAL_UUID_CFLAGS} \ +CFLAGS="${CFLAGS} ${OPTIONAL_PROTOBUF_CFLAGS} ${OPTIONAL_MATH_CFLAGS} ${OPTIONAL_NFACCT_CFLAGS} ${OPTIONAL_ZLIB_CFLAGS} ${OPTIONAL_UUID_CFLAGS} \ ${OPTIONAL_LIBCAP_CFLAGS} ${OPTIONAL_IPMIMONITORING_CFLAGS} ${OPTIONAL_CUPS_CFLAGS} ${OPTIONAL_XENSTAT_FLAGS} \ ${OPTIONAL_KINESIS_CFLAGS} ${OPTIONAL_PUBSUB_CFLAGS} ${OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS} \ ${OPTIONAL_MONGOC_CFLAGS} ${LWS_CFLAGS} ${OPTIONAL_JSONC_STATIC_CFLAGS} ${OPTIONAL_BPF_CFLAGS} ${OPTIONAL_JUDY_CFLAGS} \ - ${OPTIONAL_ACLK_NG_CFLAGS}" + ${OPTIONAL_ACLK_NG_CFLAGS} ${OPTIONAL_ML_CFLAGS} ${OPTIONAL_ML_TESTS_CFLAGS}" CXXFLAGS="${CFLAGS} ${CXX11FLAG}" @@ -1528,6 +1742,14 @@ AC_SUBST([OPTIONAL_MONGOC_CFLAGS]) AC_SUBST([OPTIONAL_MONGOC_LIBS]) AC_SUBST([OPTIONAL_LWS_LIBS]) AC_SUBST([OPTIONAL_ACLK_NG_CFLAGS]) +AC_SUBST([OPTIONAL_PROTOBUF_CFLAGS]) +AC_SUBST([OPTIONAL_PROTOBUF_LIBS]) +AC_SUBST([OPTIONAL_GTEST_CFLAGS]) +AC_SUBST([OPTIONAL_GTEST_LIBS]) +AC_SUBST([OPTIONAL_ML_CFLAGS]) +AC_SUBST([OPTIONAL_ML_LIBS]) +AC_SUBST([OPTIONAL_ML_TESTS_CFLAGS]) +AC_SUBST([OPTIONAL_ML_TESTS_LIBS]) # ----------------------------------------------------------------------------- # Check if cmocka is available - needed for unit testing @@ -1617,6 +1839,8 @@ AC_CONFIG_FILES([ exporting/tests/Makefile health/Makefile health/notifications/Makefile + ml/Makefile + ml/kmeans/Makefile libnetdata/Makefile libnetdata/tests/Makefile libnetdata/adaptive_resortable_list/Makefile diff --git a/contrib/debian/control b/contrib/debian/control index 2659c389d..622e730f2 100644 --- a/contrib/debian/control +++ b/contrib/debian/control @@ -1,7 +1,6 @@ Source: netdata -Build-Depends: debhelper (>= 9), +Build-Depends: debhelper (>= 9.20160709), dh-autoreconf, - dh-systemd (>= 1.5), dpkg-dev (>= 1.13.19), zlib1g-dev, uuid-dev, diff --git a/contrib/debian/control.xenial b/contrib/debian/control.xenial new file mode 100644 index 000000000..2659c389d --- /dev/null +++ b/contrib/debian/control.xenial @@ -0,0 +1,62 @@ +Source: netdata +Build-Depends: debhelper (>= 9), + dh-autoreconf, + dh-systemd (>= 1.5), + dpkg-dev (>= 1.13.19), + zlib1g-dev, + uuid-dev, + libelf-dev, + libuv1-dev, + liblz4-dev, + libjudy-dev, + libssl-dev, + libmnl-dev, + libjson-c-dev, + libcups2-dev, + libipmimonitoring-dev, + libnetfilter-acct-dev, + libsnappy-dev, + libprotobuf-dev, + libprotoc-dev, + cmake, + autogen, + autoconf, + automake, + pkg-config, + curl, + protobuf-compiler +Section: net +Priority: optional +Maintainer: Netdata Builder +Standards-Version: 3.9.6 +Homepage: https://netdata.cloud + +Package: netdata +Architecture: any +Depends: adduser, + libcap2-bin (>= 1:2.0), + lsb-base (>= 3.1-23.2), + openssl, + ${misc:Depends}, + ${shlibs:Depends} +Pre-Depends: dpkg (>= 1.17.14) +Description: real-time charts for system monitoring + Netdata is a daemon that collects data in realtime (per second) + and presents a web site to view and analyze them. The presentation + is also real-time and full of interactive charts that precisely + render all collected values. + +Package: netdata-plugin-cups +Architecture: any +Depends: cups, + netdata (>= ${source:Version}) +Description: The Common Unix Printing System plugin for metrics collection from cupsd + +Package: netdata-plugin-freeipmi +Architecture: any +Depends: freeipmi, + netdata (= ${source:Version}) +Description: FreeIPMI - The Intelligent Platform Management System. + The IPMI specification defines a set of interfaces for platform management. + It is implemented by a number vendors for system management. The features of IPMI that most users will be interested in + are sensor monitoring, system event monitoring, power control, and serial-over-LAN (SOL). diff --git a/contrib/debian/netdata.postinst b/contrib/debian/netdata.postinst index 17182c7e9..f26c94f93 100644 --- a/contrib/debian/netdata.postinst +++ b/contrib/debian/netdata.postinst @@ -55,7 +55,7 @@ case "$1" in chown -R root:netdata /var/lib/netdata/www setcap cap_dac_read_search,cap_sys_ptrace+ep /usr/libexec/netdata/plugins.d/apps.plugin setcap cap_dac_read_search+ep /usr/libexec/netdata/plugins.d/slabinfo.plugin - setcap cap_perfmon+ep /usr/libexec/netdata/plugins.d/perf.plugin || setcap cap_sys_admin+ep /usr/libexec/netdata/plugins.d/perf.plugin + capsh --supports=cap_perfmon 2>/dev/null && setcap cap_perfmon+ep /usr/libexec/netdata/plugins.d/perf.plugin || setcap cap_sys_admin+ep /usr/libexec/netdata/plugins.d/perf.plugin chmod 4750 /usr/libexec/netdata/plugins.d/cgroup-network chmod 4750 /usr/libexec/netdata/plugins.d/nfacct.plugin diff --git a/contrib/debian/rules b/contrib/debian/rules index eb50fffb4..0d54b9107 100755 --- a/contrib/debian/rules +++ b/contrib/debian/rules @@ -15,6 +15,13 @@ else SYSTEMD_UNIT = system/netdata.service endif +ifeq ($(shell test `uname -m` != "x86_64" && echo "1"), 1) +HAVE_EBPF = 0 +EBPF_CONFIG = --disable-ebpf +else +HAVE_EBPF = 1 +endif + %: # For jessie and beyond # @@ -36,11 +43,13 @@ override_dh_installinit: override_dh_auto_configure: packaging/bundle-mosquitto.sh . packaging/bundle-lws.sh . - packaging/bundle-libbpf.sh . + if [ $(HAVE_EBPF) -eq 1 ]; then \ + packaging/bundle-libbpf.sh . ${TOP}/usr/libexec/netdata/plugins.d; \ + fi autoreconf -ivf dh_auto_configure -- --prefix=/usr --sysconfdir=/etc --localstatedir=/var --libdir=/usr/lib \ --libexecdir=/usr/libexec --with-user=netdata --with-math --with-zlib --with-webdir=/var/lib/netdata/www \ - --with-bundled-lws=externaldeps/libwebsockets + --with-bundled-lws $(EBPF_CONFIG) override_dh_install: cp -v $(BASE_CONFIG) debian/netdata.conf @@ -77,7 +86,9 @@ override_dh_install: ln -s "/usr/share/netdata/www/$$D" "$(TOP)/var/lib/netdata/www/$$D"; \ done - packaging/bundle-ebpf.sh . ${TOP}/usr/libexec/netdata/plugins.d + if [ $(HAVE_EBPF) -eq 1 ]; then \ + packaging/bundle-ebpf.sh . ${TOP}/usr/libexec/netdata/plugins.d; \ + fi # Install go # diff --git a/daemon/README.md b/daemon/README.md index 359b3ea39..1ea865f89 100644 --- a/daemon/README.md +++ b/daemon/README.md @@ -184,7 +184,7 @@ The command line options of the Netdata 1.10.0 version are the following: Check if string matches pattern and exit. -W "claim -token=TOKEN -rooms=ROOM1,ROOM2 url=https://app.netdata.cloud" - Claim the agent to the workspace rooms pointed to by TOKEN and ROOM*. + Connect the agent to the workspace rooms pointed to by TOKEN and ROOM*. Signals netdata handles: diff --git a/daemon/analytics.c b/daemon/analytics.c index 08923a3cb..bb878f708 100644 --- a/daemon/analytics.c +++ b/daemon/analytics.c @@ -4,6 +4,7 @@ struct analytics_data analytics_data; extern void analytics_exporting_connectors (BUFFER *b); +extern void analytics_exporting_connectors_ssl (BUFFER *b); extern void analytics_build_info (BUFFER *b); extern int aclk_connected; @@ -54,6 +55,12 @@ void analytics_log_data(void) debug(D_ANALYTICS, "NETDATA_HOST_ACLK_IMPLEMENTATION : [%s]", analytics_data.netdata_host_aclk_implementation); debug(D_ANALYTICS, "NETDATA_HOST_AGENT_CLAIMED : [%s]", analytics_data.netdata_host_agent_claimed); debug(D_ANALYTICS, "NETDATA_HOST_CLOUD_ENABLED : [%s]", analytics_data.netdata_host_cloud_enabled); + debug(D_ANALYTICS, "NETDATA_CONFIG_HTTPS_AVAILABLE : [%s]", analytics_data.netdata_config_https_available); + debug(D_ANALYTICS, "NETDATA_INSTALL_TYPE : [%s]", analytics_data.netdata_install_type); + debug(D_ANALYTICS, "NETDATA_PREBUILT_DISTRO : [%s]", analytics_data.netdata_prebuilt_distro); + debug(D_ANALYTICS, "NETDATA_CONFIG_IS_PRIVATE_REGISTRY : [%s]", analytics_data.netdata_config_is_private_registry); + debug(D_ANALYTICS, "NETDATA_CONFIG_USE_PRIVATE_REGISTRY: [%s]", analytics_data.netdata_config_use_private_registry); + debug(D_ANALYTICS, "NETDATA_CONFIG_OOM_SCORE : [%s]", analytics_data.netdata_config_oom_score); } /* @@ -93,6 +100,12 @@ void analytics_free_data(void) freez(analytics_data.netdata_host_aclk_implementation); freez(analytics_data.netdata_host_agent_claimed); freez(analytics_data.netdata_host_cloud_enabled); + freez(analytics_data.netdata_config_https_available); + freez(analytics_data.netdata_install_type); + freez(analytics_data.netdata_config_is_private_registry); + freez(analytics_data.netdata_config_use_private_registry); + freez(analytics_data.netdata_config_oom_score); + freez(analytics_data.netdata_prebuilt_distro); } /* @@ -137,7 +150,7 @@ void analytics_get_data(char *name, BUFFER *wb) */ void analytics_log_prometheus(void) { - if (likely(analytics_data.prometheus_hits < ANALYTICS_MAX_PROMETHEUS_HITS)) { + if (netdata_anonymous_statistics_enabled == 1 && likely(analytics_data.prometheus_hits < ANALYTICS_MAX_PROMETHEUS_HITS)) { analytics_data.prometheus_hits++; char b[7]; snprintfz(b, 6, "%d", analytics_data.prometheus_hits); @@ -150,7 +163,7 @@ void analytics_log_prometheus(void) */ void analytics_log_shell(void) { - if (likely(analytics_data.shell_hits < ANALYTICS_MAX_SHELL_HITS)) { + if (netdata_anonymous_statistics_enabled == 1 && likely(analytics_data.shell_hits < ANALYTICS_MAX_SHELL_HITS)) { analytics_data.shell_hits++; char b[7]; snprintfz(b, 6, "%d", analytics_data.shell_hits); @@ -163,7 +176,7 @@ void analytics_log_shell(void) */ void analytics_log_json(void) { - if (likely(analytics_data.json_hits < ANALYTICS_MAX_JSON_HITS)) { + if (netdata_anonymous_statistics_enabled == 1 && likely(analytics_data.json_hits < ANALYTICS_MAX_JSON_HITS)) { analytics_data.json_hits++; char b[7]; snprintfz(b, 6, "%d", analytics_data.json_hits); @@ -176,7 +189,7 @@ void analytics_log_json(void) */ void analytics_log_dashboard(void) { - if (likely(analytics_data.dashboard_hits < ANALYTICS_MAX_DASHBOARD_HITS)) { + if (netdata_anonymous_statistics_enabled == 1 && likely(analytics_data.dashboard_hits < ANALYTICS_MAX_DASHBOARD_HITS)) { analytics_data.dashboard_hits++; char b[7]; snprintfz(b, 6, "%d", analytics_data.dashboard_hits); @@ -184,6 +197,15 @@ void analytics_log_dashboard(void) } } +/* + * Called when setting the oom score + */ +void analytics_report_oom_score(long long int score){ + char b[7]; + snprintfz(b, 6, "%d", (int)score); + analytics_set_data(&analytics_data.netdata_config_oom_score, b); +} + void analytics_mirrored_hosts(void) { RRDHOST *host; @@ -217,7 +239,7 @@ void analytics_mirrored_hosts(void) void analytics_exporters(void) { //when no exporters are available, an empty string will be sent - //decide if something else is more suitable (but propably not null) + //decide if something else is more suitable (but probably not null) BUFFER *bi = buffer_create(1000); analytics_exporting_connectors(bi); analytics_set_data_str(&analytics_data.netdata_exporting_connectors, (char *)buffer_tostring(bi)); @@ -335,6 +357,67 @@ void analytics_alarms_notifications(void) buffer_free(b); } +char *get_value_from_key(char *buffer, char *key) +{ + char *s = NULL, *t = NULL; + s = t = buffer + strlen(key) + 2; + if (s) { + while (*s == '\'') + s++; + while (*++t != '\0'); + while (--t > s && *t == '\'') + *t = '\0'; + } + return s; +} + +/* + * Checks for the existence of .install_type file and reads it + */ +void analytics_get_install_type(void) +{ + char *install_type_filename; + analytics_set_data_str(&analytics_data.netdata_install_type, ""); + analytics_set_data_str(&analytics_data.netdata_prebuilt_distro, ""); + + int install_type_filename_len = (strlen(netdata_configured_user_config_dir) + strlen(".install-type") + 3); + install_type_filename = mallocz(sizeof(char) * install_type_filename_len); + snprintfz(install_type_filename, install_type_filename_len - 1, "%s/%s", netdata_configured_user_config_dir, ".install-type"); + + FILE *fp = fopen(install_type_filename, "r"); + if (fp) { + char *s, buf[256 + 1]; + size_t len = 0; + + while ((s = fgets_trim_len(buf, 256, fp, &len))) { + if (!strncmp(buf, "INSTALL_TYPE='", 14)) + analytics_set_data_str(&analytics_data.netdata_install_type, (char *)get_value_from_key(buf, "INSTALL_TYPE")); + else if (!strncmp(buf, "PREBUILT_DISTRO='", 17)) + analytics_set_data_str(&analytics_data.netdata_prebuilt_distro, (char *)get_value_from_key(buf, "PREBUILT_DISTRO")); + } + fclose(fp); + } + freez(install_type_filename); +} + +/* + * Pick up if https is actually used + */ +void analytics_https(void) +{ + BUFFER *b = buffer_create(30); +#ifdef ENABLE_HTTPS + analytics_exporting_connectors_ssl(b); + buffer_strcat(b, netdata_client_ctx && localhost->ssl.flags == NETDATA_SSL_HANDSHAKE_COMPLETE && localhost->rrdpush_sender_connected == 1 ? "streaming|" : "|"); + buffer_strcat(b, netdata_srv_ctx ? "web" : ""); +#else + buffer_strcat(b, "||"); +#endif + + analytics_set_data_str(&analytics_data.netdata_config_https_available, (char *)buffer_tostring(b)); + buffer_free(b); +} + void analytics_charts(void) { RRDSET *st; @@ -411,13 +494,13 @@ void analytics_misc(void) { #ifdef ENABLE_ACLK analytics_set_data(&analytics_data.netdata_host_cloud_available, "true"); -#ifdef ACLK_NG - analytics_set_data_str(&analytics_data.netdata_host_aclk_implementation, "Next Generation"); -#else - analytics_set_data_str(&analytics_data.netdata_host_aclk_implementation, "legacy"); -#endif + if (aclk_ng) + analytics_set_data_str(&analytics_data.netdata_host_aclk_implementation, "Next Generation"); + else + analytics_set_data_str(&analytics_data.netdata_host_aclk_implementation, "legacy"); #else analytics_set_data(&analytics_data.netdata_host_cloud_available, "false"); + analytics_set_data_str(&analytics_data.netdata_host_aclk_implementation, ""); #endif #ifdef ENABLE_ACLK @@ -426,17 +509,33 @@ void analytics_misc(void) else #endif analytics_set_data(&analytics_data.netdata_host_aclk_available, "false"); + + analytics_set_data(&analytics_data.netdata_config_exporting_enabled, appconfig_get_boolean(&exporting_config, CONFIG_SECTION_EXPORTING, "enabled", CONFIG_BOOLEAN_NO) ? "true" : "false"); + + analytics_set_data(&analytics_data.netdata_config_is_private_registry, "false"); + analytics_set_data(&analytics_data.netdata_config_use_private_registry, "false"); + + if (strcmp( + config_get(CONFIG_SECTION_REGISTRY, "registry to announce", "https://registry.my-netdata.io"), + "https://registry.my-netdata.io")) + analytics_set_data(&analytics_data.netdata_config_use_private_registry, "true"); + + //do we need both registry to announce and enabled to indicate that this is a private registry ? + if (config_get_boolean(CONFIG_SECTION_REGISTRY, "enabled", CONFIG_BOOLEAN_NO) && + web_server_mode != WEB_SERVER_MODE_NONE) + analytics_set_data(&analytics_data.netdata_config_is_private_registry, "true"); } /* * Get the meta data, called from the thread once after the original delay - * These are values that won't change between agent restarts, and therefore + * These are values that won't change during agent runtime, and therefore * don't try to read them on each META event send */ void analytics_gather_immutable_meta_data(void) { analytics_misc(); analytics_exporters(); + analytics_https(); } /* @@ -490,6 +589,7 @@ void analytics_main_cleanup(void *ptr) static_thread->enabled = NETDATA_MAIN_THREAD_EXITING; debug(D_ANALYTICS, "Cleaning up..."); + analytics_free_data(); static_thread->enabled = NETDATA_MAIN_THREAD_EXITED; } @@ -521,7 +621,7 @@ void *analytics_main(void *ptr) analytics_gather_immutable_meta_data(); analytics_gather_mutable_meta_data(); - send_statistics("META", "-", "-"); + send_statistics("META_START", "-", "-"); analytics_log_data(); sec = 0; @@ -567,7 +667,6 @@ void set_late_global_environment() { analytics_set_data(&analytics_data.netdata_config_stream_enabled, default_rrdpush_enabled ? "true" : "false"); analytics_set_data_str(&analytics_data.netdata_config_memory_mode, (char *)rrd_memory_mode_name(default_rrd_memory_mode)); - analytics_set_data(&analytics_data.netdata_config_exporting_enabled, appconfig_get_boolean(&exporting_config, CONFIG_SECTION_EXPORTING, "enabled", CONFIG_BOOLEAN_NO) ? "true" : "false"); #ifdef DISABLE_CLOUD analytics_set_data(&analytics_data.netdata_host_cloud_enabled, "false"); @@ -607,9 +706,11 @@ void set_late_global_environment() analytics_set_data_str(&analytics_data.netdata_buildinfo, (char *)buffer_tostring(bi)); buffer_free(bi); } + + analytics_get_install_type(); } -static void get_system_timezone(void) +void get_system_timezone(void) { // avoid flood calls to stat(/etc/localtime) // http://stackoverflow.com/questions/4554271/how-to-avoid-excessive-stat-etc-localtime-calls-in-strftime-on-linux @@ -630,7 +731,7 @@ static void get_system_timezone(void) // use the contents of /etc/timezone if (!timezone && !read_file("/etc/timezone", buffer, FILENAME_MAX)) { timezone = buffer; - info("TIMEZONE: using the contents of /etc/timezone: '%s'", timezone); + info("TIMEZONE: using the contents of /etc/timezone"); } // read the link /etc/localtime @@ -696,6 +797,46 @@ static void get_system_timezone(void) timezone = "unknown"; netdata_configured_timezone = config_get(CONFIG_SECTION_GLOBAL, "timezone", timezone); + + //get the utc offset, and the timezone as returned by strftime + //will be sent to the cloud + //Note: This will need an agent restart to get new offset on time change (dst, etc). + { + time_t t; + struct tm *tmp, tmbuf; + char zone[FILENAME_MAX + 1]; + char sign[2], hh[3], mm[3]; + + t = now_realtime_sec(); + tmp = localtime_r(&t, &tmbuf); + + if (tmp != NULL) { + if (strftime(zone, FILENAME_MAX, "%Z", tmp) == 0) { + netdata_configured_abbrev_timezone = strdupz("UTC"); + } else + netdata_configured_abbrev_timezone = strdupz(zone); + + if (strftime(zone, FILENAME_MAX, "%z", tmp) == 0) { + netdata_configured_utc_offset = 0; + } else { + sign[0] = zone[0] == '-' || zone[0] == '+' ? zone[0] : '0'; + sign[1] = '\0'; + hh[0] = isdigit(zone[1]) ? zone[1] : '0'; + hh[1] = isdigit(zone[2]) ? zone[2] : '0'; + hh[2] = '\0'; + mm[0] = isdigit(zone[3]) ? zone[3] : '0'; + mm[1] = isdigit(zone[4]) ? zone[4] : '0'; + mm[2] = '\0'; + + netdata_configured_utc_offset = (str2i(hh) * 3600) + (str2i(mm) * 60); + netdata_configured_utc_offset = + sign[0] == '-' ? -netdata_configured_utc_offset : netdata_configured_utc_offset; + } + } else { + netdata_configured_abbrev_timezone = strdupz("UTC"); + netdata_configured_utc_offset = 0; + } + } } void set_global_environment() @@ -753,6 +894,12 @@ void set_global_environment() analytics_set_data(&analytics_data.netdata_host_aclk_available, "null"); analytics_set_data(&analytics_data.netdata_host_agent_claimed, "null"); analytics_set_data(&analytics_data.netdata_host_cloud_enabled, "null"); + analytics_set_data(&analytics_data.netdata_config_https_available, "null"); + analytics_set_data(&analytics_data.netdata_install_type, "null"); + analytics_set_data(&analytics_data.netdata_config_is_private_registry, "null"); + analytics_set_data(&analytics_data.netdata_config_use_private_registry, "null"); + analytics_set_data(&analytics_data.netdata_config_oom_score, "null"); + analytics_set_data(&analytics_data.netdata_prebuilt_distro, "null"); analytics_data.prometheus_hits = 0; analytics_data.shell_hits = 0; @@ -770,8 +917,6 @@ void set_global_environment() if (clean) freez(default_port); - get_system_timezone(); - // set the path we need char path[1024 + 1], *p = getenv("PATH"); if (!p) @@ -834,7 +979,7 @@ void send_statistics(const char *action, const char *action_result, const char * sprintf( command_to_run, - "%s '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' ", + "%s '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' '%s' ", as_script, action, action_result, @@ -870,16 +1015,27 @@ void send_statistics(const char *action, const char *action_result, const char * analytics_data.netdata_host_aclk_available, analytics_data.netdata_host_aclk_implementation, analytics_data.netdata_host_agent_claimed, - analytics_data.netdata_host_cloud_enabled); + analytics_data.netdata_host_cloud_enabled, + analytics_data.netdata_config_https_available, + analytics_data.netdata_install_type, + analytics_data.netdata_config_is_private_registry, + analytics_data.netdata_config_use_private_registry, + analytics_data.netdata_config_oom_score, + analytics_data.netdata_prebuilt_distro); info("%s '%s' '%s' '%s'", as_script, action, action_result, action_data); FILE *fp = mypopen(command_to_run, &command_pid); if (fp) { - char buffer[100 + 1]; - while (fgets(buffer, 100, fp) != NULL) - ; - mypclose(fp, command_pid); + char buffer[4 + 1]; + char *s = fgets(buffer, 4, fp); + int exit_code = mypclose(fp, command_pid); + if (exit_code) + error("Execution of anonymous statistics script returned %d.", exit_code); + if (s && strncmp(buffer, "200", 3)) + error("Execution of anonymous statistics script returned http code %s.", buffer); + } else { + error("Failed to run anonymous statistics script %s.", as_script); } freez(command_to_run); } diff --git a/daemon/analytics.h b/daemon/analytics.h index e888297df..78ced981e 100644 --- a/daemon/analytics.h +++ b/daemon/analytics.h @@ -3,7 +3,7 @@ #ifndef NETDATA_ANALYTICS_H #define NETDATA_ANALYTICS_H 1 -#include "../daemon/common.h" +#include "daemon/common.h" /* Max number of seconds before the first META analytics is sent */ #define ANALYTICS_INIT_SLEEP_SEC 120 @@ -29,7 +29,7 @@ }, /* Needed to calculate the space needed for parameters */ -#define ANALYTICS_NO_OF_ITEMS 32 +#define ANALYTICS_NO_OF_ITEMS 38 struct analytics_data { char *netdata_config_stream_enabled; @@ -64,6 +64,12 @@ struct analytics_data { char *netdata_host_aclk_implementation; char *netdata_host_agent_claimed; char *netdata_host_cloud_enabled; + char *netdata_config_https_available; + char *netdata_install_type; + char *netdata_config_is_private_registry; + char *netdata_config_use_private_registry; + char *netdata_config_oom_score; + char *netdata_prebuilt_distro; size_t data_length; @@ -84,6 +90,8 @@ extern void analytics_log_json(void); extern void analytics_log_prometheus(void); extern void analytics_log_dashboard(void); extern void analytics_gather_mutable_meta_data(void); +extern void analytics_report_oom_score(long long int score); +extern void get_system_timezone(void); extern struct analytics_data analytics_data; diff --git a/daemon/anonymous-statistics.sh.in b/daemon/anonymous-statistics.sh.in index bd22963d9..f49d19d09 100755 --- a/daemon/anonymous-statistics.sh.in +++ b/daemon/anonymous-statistics.sh.in @@ -22,9 +22,6 @@ if [ -f "@configdir_POST@/.opt-out-from-anonymous-statistics" ] || [ ! "${DO_NOT exit 0 fi -# Shorten version for easier reporting -NETDATA_VERSION=$(echo "${NETDATA_VERSION}" | sed 's/-.*//g' | tr -d 'v') - # ------------------------------------------------------------------------------------------------- # Get the extra variables @@ -60,6 +57,13 @@ NETDATA_HOST_ACLK_AVAILABLE="${32}" NETDATA_HOST_ACLK_IMPLEMENTATION="${33}" NETDATA_HOST_AGENT_CLAIMED="${34}" NETDATA_HOST_CLOUD_ENABLED="${35}" +NETDATA_CONFIG_HTTPS_AVAILABLE="${36}" +NETDATA_INSTALL_TYPE="${37}" +NETDATA_IS_PRIVATE_REGISTRY="${38}" +NETDATA_USE_PRIVATE_REGISTRY="${39}" +NETDATA_CONFIG_OOM_SCORE="${40}" +NETDATA_PREBUILT_DISTRO="${41}" + # define body of request to be sent REQ_BODY="$(cat << EOF @@ -80,6 +84,8 @@ REQ_BODY="$(cat << EOF "netdata_version": "${NETDATA_VERSION}", "netdata_buildinfo": ${NETDATA_BUILDINFO}, "netdata_release_channel": ${NETDATA_CONFIG_RELEASE_CHANNEL}, + "netdata_install_type": ${NETDATA_INSTALL_TYPE}, + "netdata_prebuilt_distro": ${NETDATA_PREBUILT_DISTRO}, "host_os_name": "${NETDATA_HOST_OS_NAME}", "host_os_id": "${NETDATA_HOST_OS_ID}", "host_os_id_like": "${NETDATA_HOST_OS_ID_LIKE}", @@ -100,6 +106,7 @@ REQ_BODY="$(cat << EOF "container_os_version": "${NETDATA_CONTAINER_OS_VERSION}", "container_os_version_id": "${NETDATA_CONTAINER_OS_VERSION_ID}", "container_os_detection": "${NETDATA_CONTAINER_OS_DETECTION}", + "container_is_official_image": ${NETDATA_CONTAINER_IS_OFFICIAL_IMAGE}, "system_cpu_detection": "${NETDATA_SYSTEM_CPU_DETECTION}", "system_cpu_freq": "${NETDATA_SYSTEM_CPU_FREQ}", "system_cpu_logical_cpu_count": "${NETDATA_SYSTEM_CPU_LOGICAL_CPU_COUNT}", @@ -114,10 +121,14 @@ REQ_BODY="$(cat << EOF "config_page_cache_size": ${NETDATA_CONFIG_PAGE_CACHE_SIZE}, "config_multidb_disk_quota": ${NETDATA_CONFIG_MULTIDB_DISK_QUOTA}, "config_https_enabled": ${NETDATA_CONFIG_HTTPS_ENABLED}, + "config_https_available": ${NETDATA_CONFIG_HTTPS_AVAILABLE}, "config_web_enabled": ${NETDATA_CONFIG_WEB_ENABLED}, "config_exporting_enabled": ${NETDATA_CONFIG_EXPORTING_ENABLED}, "config_is_parent": ${NETDATA_CONFIG_IS_PARENT}, + "config_is_private_registry": ${NETDATA_IS_PRIVATE_REGISTRY}, + "config_private_registry_used": ${NETDATA_USE_PRIVATE_REGISTRY}, "config_hosts_available": ${NETDATA_CONFIG_HOSTS_AVAILABLE}, + "config_oom_score": ${NETDATA_CONFIG_OOM_SCORE}, "alarms_normal": ${NETDATA_ALARMS_NORMAL}, "alarms_warning": ${NETDATA_ALARMS_WARNING}, "alarms_critical": ${NETDATA_ALARMS_CRITICAL}, @@ -148,12 +159,13 @@ EOF # send the anonymous statistics to the Netdata PostHog if [ -n "$(command -v curl 2> /dev/null)" ]; then - curl -X POST --max-time 2 --header "Content-Type: application/json" -d "${REQ_BODY}" https://posthog.netdata.cloud/capture/ > /dev/null 2>&1 + curl --silent -o /dev/null --write-out '%{http_code}' -X POST --max-time 2 --header "Content-Type: application/json" -d "${REQ_BODY}" https://posthog.netdata.cloud/capture/ else wget -q -O - --no-check-certificate \ + --server-response \ --method POST \ --timeout=1 \ --header 'Content-Type: application/json' \ --body-data "${REQ_BODY}" \ - 'https://posthog.netdata.cloud/capture/' > /dev/null 2>&1 + 'https://posthog.netdata.cloud/capture/' 2>&1 | awk '/^ HTTP/{print $2}' fi diff --git a/daemon/buildinfo.c b/daemon/buildinfo.c index ebeaa996d..a15250f48 100644 --- a/daemon/buildinfo.c +++ b/daemon/buildinfo.c @@ -9,13 +9,7 @@ #ifdef ENABLE_ACLK #define FEAT_CLOUD 1 #define FEAT_CLOUD_MSG "" -#ifdef ACLK_NG -#define ACLK_IMPL "Next Generation" -#else -#define ACLK_IMPL "Legacy" -#endif #else -#define ACLK_IMPL "" #ifdef DISABLE_CLOUD #define FEAT_CLOUD 0 #define FEAT_CLOUD_MSG "(by user request)" @@ -25,6 +19,24 @@ #endif #endif +#ifdef ACLK_NG +#define FEAT_ACLK_NG 1 +#else +#define FEAT_ACLK_NG 0 +#endif + +#if defined(ACLK_NG) && defined(ENABLE_NEW_CLOUD_PROTOCOL) +#define NEW_CLOUD_PROTO 1 +#else +#define NEW_CLOUD_PROTO 0 +#endif + +#ifdef ACLK_LEGACY +#define FEAT_ACLK_LEGACY 1 +#else +#define FEAT_ACLK_LEGACY 0 +#endif + #ifdef ENABLE_DBENGINE #define FEAT_DBENGINE 1 #else @@ -43,8 +55,31 @@ #define FEAT_NATIVE_HTTPS 0 #endif +#ifdef ENABLE_ML +#define FEAT_ML 1 +#else +#define FEAT_ML 0 +#endif + // Optional libraries +#ifdef HAVE_PROTOBUF +#if defined(ACLK_NG) || defined(ENABLE_PROMETHEUS_REMOTE_WRITE) +#define FEAT_PROTOBUF 1 +#ifdef BUNDLED_PROTOBUF +#define FEAT_PROTOBUF_BUNDLED " (bundled)" +#else +#define FEAT_PROTOBUF_BUNDLED " (system)" +#endif +#else +#define FEAT_PROTOBUF 0 +#define FEAT_PROTOBUF_BUNDLED "" +#endif +#else +#define FEAT_PROTOBUF 0 +#define FEAT_PROTOBUF_BUNDLED "" +#endif + #ifdef ENABLE_JSONC #define FEAT_JSONC 1 #else @@ -69,29 +104,29 @@ #define FEAT_LIBCAP 0 #endif -#ifndef ACLK_NG -#ifdef ACLK_NO_LIBMOSQ -#define FEAT_MOSQUITTO 0 -#else -#define FEAT_MOSQUITTO 1 -#endif - -#ifdef ACLK_NO_LWS -#define FEAT_LWS 0 -#define FEAT_LWS_MSG "" -#else -#ifdef ENABLE_ACLK -#include -#endif -#ifdef BUNDLED_LWS -#define FEAT_LWS 1 -#define FEAT_LWS_MSG "static" -#else -#define FEAT_LWS 1 -#define FEAT_LWS_MSG "shared-lib" -#endif -#endif -#endif /* ACLK_NG */ +#ifndef ACLK_LEGACY_DISABLED + #ifdef ACLK_NO_LIBMOSQ + #define FEAT_MOSQUITTO 0 + #else + #define FEAT_MOSQUITTO 1 + #endif + + #ifdef ACLK_NO_LWS + #define FEAT_LWS 0 + #define FEAT_LWS_MSG "" + #else + #ifdef ACLK_LEGACY + #include + #endif + #ifdef BUNDLED_LWS + #define FEAT_LWS 1 + #define FEAT_LWS_MSG "static" + #else + #define FEAT_LWS 1 + #define FEAT_LWS_MSG "shared-lib" + #endif + #endif +#endif /* ACLK_LEGACY_DISABLED */ #ifdef NETDATA_WITH_ZLIB #define FEAT_ZLIB 1 @@ -205,22 +240,24 @@ void print_build_info(void) { printf("Configure options: %s\n", CONFIGURE_COMMAND); printf("Features:\n"); - printf(" dbengine: %s\n", FEAT_YES_NO(FEAT_DBENGINE)); - printf(" Native HTTPS: %s\n", FEAT_YES_NO(FEAT_NATIVE_HTTPS)); - printf(" Netdata Cloud: %s %s\n", FEAT_YES_NO(FEAT_CLOUD), FEAT_CLOUD_MSG); -#if FEAT_CLOUD == 1 - printf(" Cloud Implementation: %s\n", ACLK_IMPL); -#endif - printf(" TLS Host Verification: %s\n", FEAT_YES_NO(FEAT_TLS_HOST_VERIFY)); + printf(" dbengine: %s\n", FEAT_YES_NO(FEAT_DBENGINE)); + printf(" Native HTTPS: %s\n", FEAT_YES_NO(FEAT_NATIVE_HTTPS)); + printf(" Netdata Cloud: %s %s\n", FEAT_YES_NO(FEAT_CLOUD), FEAT_CLOUD_MSG); + printf(" ACLK Next Generation: %s\n", FEAT_YES_NO(FEAT_ACLK_NG)); + printf(" ACLK-NG New Cloud Protocol: %s\n", FEAT_YES_NO(NEW_CLOUD_PROTO)); + printf(" ACLK Legacy: %s\n", FEAT_YES_NO(FEAT_ACLK_LEGACY)); + printf(" TLS Host Verification: %s\n", FEAT_YES_NO(FEAT_TLS_HOST_VERIFY)); + printf(" Machine Learning: %s\n", FEAT_YES_NO(FEAT_ML)); printf("Libraries:\n"); + printf(" protobuf: %s%s\n", FEAT_YES_NO(FEAT_PROTOBUF), FEAT_PROTOBUF_BUNDLED); printf(" jemalloc: %s\n", FEAT_YES_NO(FEAT_JEMALLOC)); printf(" JSON-C: %s\n", FEAT_YES_NO(FEAT_JSONC)); printf(" libcap: %s\n", FEAT_YES_NO(FEAT_LIBCAP)); printf(" libcrypto: %s\n", FEAT_YES_NO(FEAT_CRYPTO)); printf(" libm: %s\n", FEAT_YES_NO(FEAT_LIBM)); -#ifndef ACLK_NG -#if defined(ENABLE_ACLK) +#ifndef ACLK_LEGACY_DISABLED +#if defined(ACLK_LEGACY) printf(" LWS: %s %s v%d.%d.%d\n", FEAT_YES_NO(FEAT_LWS), FEAT_LWS_MSG, LWS_LIBRARY_VERSION_MAJOR, LWS_LIBRARY_VERSION_MINOR, LWS_LIBRARY_VERSION_PATCH); #else printf(" LWS: %s %s\n", FEAT_YES_NO(FEAT_LWS), FEAT_LWS_MSG); @@ -266,13 +303,17 @@ void print_build_info_json(void) { #else printf(" \"cloud-disabled\": false,\n"); #endif -#if FEAT_CLOUD == 1 - printf(" \"cloud-implementation\": \"%s\",\n", ACLK_IMPL); -#endif - printf(" \"tls-host-verify\": %s\n", FEAT_JSON_BOOL(FEAT_TLS_HOST_VERIFY)); + printf(" \"aclk-ng\": %s,\n", FEAT_JSON_BOOL(FEAT_ACLK_NG)); + printf(" \"aclk-ng-new-cloud-proto\": %s,\n", FEAT_JSON_BOOL(NEW_CLOUD_PROTO)); + printf(" \"aclk-legacy\": %s,\n", FEAT_JSON_BOOL(FEAT_ACLK_LEGACY)); + + printf(" \"tls-host-verify\": %s,\n", FEAT_JSON_BOOL(FEAT_TLS_HOST_VERIFY)); + printf(" \"machine-learning\": %s\n", FEAT_JSON_BOOL(FEAT_ML)); printf(" },\n"); printf(" \"libs\": {\n"); + printf(" \"protobuf\": %s,\n", FEAT_JSON_BOOL(FEAT_PROTOBUF)); + printf(" \"protobuf-source\": \"%s\",\n", FEAT_PROTOBUF_BUNDLED); printf(" \"jemalloc\": %s,\n", FEAT_JSON_BOOL(FEAT_JEMALLOC)); printf(" \"jsonc\": %s,\n", FEAT_JSON_BOOL(FEAT_JSONC)); printf(" \"libcap\": %s,\n", FEAT_JSON_BOOL(FEAT_LIBCAP)); @@ -320,16 +361,21 @@ void analytics_build_info(BUFFER *b) { if(FEAT_DBENGINE) buffer_strcat (b, "dbengine"); if(FEAT_NATIVE_HTTPS) buffer_strcat (b, "|Native HTTPS"); if(FEAT_CLOUD) buffer_strcat (b, "|Netdata Cloud"); + if(FEAT_ACLK_NG) buffer_strcat (b, "|ACLK Next Generation"); + if(NEW_CLOUD_PROTO) buffer_strcat (b, "|New Cloud Protocol Support"); + if(FEAT_ACLK_LEGACY) buffer_strcat (b, "|ACLK Legacy"); if(FEAT_TLS_HOST_VERIFY) buffer_strcat (b, "|TLS Host Verification"); + if(FEAT_ML) buffer_strcat (b, "|Machine Learning"); + if(FEAT_PROTOBUF) buffer_strcat (b, "|protobuf"); if(FEAT_JEMALLOC) buffer_strcat (b, "|jemalloc"); if(FEAT_JSONC) buffer_strcat (b, "|JSON-C"); if(FEAT_LIBCAP) buffer_strcat (b, "|libcap"); if(FEAT_CRYPTO) buffer_strcat (b, "|libcrypto"); if(FEAT_LIBM) buffer_strcat (b, "|libm"); -#ifndef ACLK_NG -#if defined(ENABLE_ACLK) +#ifndef ACLK_LEGACY_DISABLED +#if defined(ENABLE_ACLK) && defined(ACLK_LEGACY) { char buf[20]; snprintfz(buf, 19, "|LWS v%d.%d.%d", LWS_LIBRARY_VERSION_MAJOR, LWS_LIBRARY_VERSION_MINOR, LWS_LIBRARY_VERSION_PATCH); diff --git a/daemon/commands.c b/daemon/commands.c index eac392e33..18468183f 100644 --- a/daemon/commands.c +++ b/daemon/commands.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later #include "common.h" -#include "../database/engine/rrdenginelib.h" +#include "database/engine/rrdenginelib.h" static uv_thread_t thread; static uv_loop_t* loop; @@ -46,6 +46,7 @@ static cmd_status_t cmd_reload_labels_execute(char *args, char **message); static cmd_status_t cmd_read_config_execute(char *args, char **message); static cmd_status_t cmd_write_config_execute(char *args, char **message); static cmd_status_t cmd_ping_execute(char *args, char **message); +static cmd_status_t cmd_aclk_state(char *args, char **message); static command_info_t command_info_array[] = { {"help", cmd_help_execute, CMD_TYPE_HIGH_PRIORITY}, // show help menu @@ -58,7 +59,8 @@ static command_info_t command_info_array[] = { {"reload-labels", cmd_reload_labels_execute, CMD_TYPE_ORTHOGONAL}, // reload the labels {"read-config", cmd_read_config_execute, CMD_TYPE_CONCURRENT}, {"write-config", cmd_write_config_execute, CMD_TYPE_ORTHOGONAL}, - {"ping", cmd_ping_execute, CMD_TYPE_ORTHOGONAL} + {"ping", cmd_ping_execute, CMD_TYPE_ORTHOGONAL}, + {"aclk-state", cmd_aclk_state, CMD_TYPE_ORTHOGONAL} }; /* Mutexes for commands of type CMD_TYPE_ORTHOGONAL */ @@ -121,7 +123,9 @@ static cmd_status_t cmd_help_execute(char *args, char **message) "reload-claiming-state\n" " Reload agent claiming state from disk.\n" "ping\n" - " Return with 'pong' if agent is alive.\n", + " Return with 'pong' if agent is alive.\n" + "aclk-state [json]\n" + " Returns current state of ACLK and Cloud connection. (optionally in json)\n", MAX_COMMAND_LENGTH - 1); return CMD_STATUS_SUCCESS; } @@ -310,6 +314,17 @@ static cmd_status_t cmd_ping_execute(char *args, char **message) return CMD_STATUS_SUCCESS; } +static cmd_status_t cmd_aclk_state(char *args, char **message) +{ + info("COMMAND: Reopening aclk/cloud state."); + if (strstr(args, "json")) + *message = aclk_state_json(); + else + *message = aclk_state(); + + return CMD_STATUS_SUCCESS; +} + static void cmd_lock_exclusive(unsigned index) { (void)index; diff --git a/daemon/commands.h b/daemon/commands.h index bd4aabfe1..1253e2dc1 100644 --- a/daemon/commands.h +++ b/daemon/commands.h @@ -24,6 +24,7 @@ typedef enum cmd { CMD_READ_CONFIG, CMD_WRITE_CONFIG, CMD_PING, + CMD_ACLK_STATE, CMD_TOTAL_COMMANDS } cmd_t; diff --git a/daemon/common.c b/daemon/common.c index 45d5fa3fd..85d638631 100644 --- a/daemon/common.c +++ b/daemon/common.c @@ -14,6 +14,8 @@ char *netdata_configured_lock_dir = NULL; char *netdata_configured_home_dir = VARLIB_DIR; char *netdata_configured_host_prefix = NULL; char *netdata_configured_timezone = NULL; +char *netdata_configured_abbrev_timezone = NULL; +int32_t netdata_configured_utc_offset = 0; int netdata_ready; int netdata_cloud_setting; diff --git a/daemon/common.h b/daemon/common.h index 4cb54010c..c892dbdb1 100644 --- a/daemon/common.h +++ b/daemon/common.h @@ -3,7 +3,7 @@ #ifndef NETDATA_COMMON_H #define NETDATA_COMMON_H 1 -#include "../libnetdata/libnetdata.h" +#include "libnetdata/libnetdata.h" // ---------------------------------------------------------------------------- // shortcuts for the default netdata configuration @@ -44,6 +44,9 @@ // health monitoring and alarm notifications #include "health/health.h" +// anomaly detection +#include "ml/ml.h" + // the netdata registry // the registry is actually an API feature #include "registry/registry.h" @@ -66,11 +69,7 @@ #include "claim/claim.h" // netdata agent cloud link -#ifndef ACLK_NG -#include "aclk/legacy/agent_cloud_link.h" -#else -#include "aclk/aclk.h" -#endif +#include "aclk/aclk_api.h" // global GUID map functions @@ -81,6 +80,7 @@ #include "daemon.h" #include "main.h" #include "signals.h" +#include "service.h" #include "commands.h" #include "analytics.h" @@ -97,6 +97,8 @@ extern char *netdata_configured_lock_dir; extern char *netdata_configured_home_dir; extern char *netdata_configured_host_prefix; extern char *netdata_configured_timezone; +extern char *netdata_configured_abbrev_timezone; +extern int32_t netdata_configured_utc_offset; extern int netdata_zero_metrics_enabled; extern int netdata_anonymous_statistics_enabled; diff --git a/daemon/daemon.c b/daemon/daemon.c index 83191109a..68e161a3f 100644 --- a/daemon/daemon.c +++ b/daemon/daemon.c @@ -181,8 +181,10 @@ static void oom_score_adj(void) { return; } - if(old_score != 0) + if (old_score != 0) { wanted_score = old_score; + analytics_report_oom_score(old_score); + } // check the environment char *s = getenv("OOMScoreAdjust"); @@ -234,6 +236,7 @@ static void oom_score_adj(void) { info("Adjusted my Out-Of-Memory (OOM) score from %d to %d.", (int)old_score, (int)final_score); else error("Adjusted my Out-Of-Memory (OOM) score from %d to %d, but it has been set to %d.", (int)old_score, (int)wanted_score, (int)final_score); + analytics_report_oom_score(final_score); } else error("Failed to adjust my Out-Of-Memory (OOM) score to %d. Running with %d. (systemd systems may change it via netdata.service)", (int)wanted_score, (int)old_score); diff --git a/daemon/global_statistics.c b/daemon/global_statistics.c index edd261476..a152a00ae 100644 --- a/daemon/global_statistics.c +++ b/daemon/global_statistics.c @@ -157,7 +157,7 @@ static inline void global_statistics_copy(struct global_statistics *gs, uint8_t if(options & GLOBAL_STATS_RESET_WEB_USEC_MAX) { uint64_t n = 0; - __atomic_compare_exchange(&global_statistics.web_usec_max, &gs->web_usec_max, &n, 1, __ATOMIC_SEQ_CST, + __atomic_compare_exchange(&global_statistics.web_usec_max, (uint64_t *) &gs->web_usec_max, &n, 1, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); } #else diff --git a/daemon/main.c b/daemon/main.c index 61041f540..2ec5c33f9 100644 --- a/daemon/main.c +++ b/daemon/main.c @@ -28,7 +28,6 @@ void netdata_cleanup_and_exit(int ret) { info("EXIT: netdata prepares to exit with code %d...", ret); send_statistics("EXIT", ret?"ERROR":"OK","-"); - analytics_free_data(); char agent_crash_file[FILENAME_MAX + 1]; char agent_incomplete_shutdown_file[FILENAME_MAX + 1]; @@ -45,6 +44,9 @@ void netdata_cleanup_and_exit(int ret) { // stop everything info("EXIT: stopping static threads..."); +#ifdef ENABLE_NEW_CLOUD_PROTOCOL + aclk_sync_exit_all(); +#endif cancel_main_threads(); // free the database @@ -104,6 +106,7 @@ struct netdata_static_thread static_threads[] = { NETDATA_PLUGIN_HOOK_PLUGINSD NETDATA_PLUGIN_HOOK_HEALTH NETDATA_PLUGIN_HOOK_ANALYTICS + NETDATA_PLUGIN_HOOK_SERVICE {NULL, NULL, NULL, 0, NULL, NULL, NULL} }; @@ -360,13 +363,16 @@ int help(int exitcode) { " -W stacksize=N Set the stacksize (in bytes).\n\n" " -W debug_flags=N Set runtime tracing to debug.log.\n\n" " -W unittest Run internal unittests and exit.\n\n" + " -W sqlite-check Check metadata database integrity and exit.\n\n" + " -W sqlite-fix Check metadata database integrity, fix if needed and exit.\n\n" + " -W sqlite-compact Reclaim metadata database unused space and exit.\n\n" #ifdef ENABLE_DBENGINE " -W createdataset=N Create a DB engine dataset of N seconds and exit.\n\n" " -W stresstest=A,B,C,D,E,F\n" " Run a DB engine stress test for A seconds,\n" " with B writers and C readers, with a ramp up\n" " time of D seconds for writers, a page cache\n" - " size of E MiB, an optional disk space limit" + " size of E MiB, an optional disk space limit\n" " of F MiB and exit.\n\n" #endif " -W set section option value\n" @@ -388,20 +394,6 @@ int help(int exitcode) { return exitcode; } -// TODO: Remove this function with the nix major release. -void remove_option(int opt_index, int *argc, char **argv) { - int i; - - // remove the options. - do { - *argc = *argc - 1; - for(i = opt_index; i < *argc; i++) { - argv[i] = argv[i+1]; - } - i = opt_index; - } while(argv[i][0] != '-' && opt_index >= *argc); -} - #ifdef ENABLE_HTTPS static void security_init(){ char filename[FILENAME_MAX + 1]; @@ -556,7 +548,6 @@ static void get_netdata_configured_variables() { // get default memory mode for the database default_rrd_memory_mode = rrd_memory_mode_id(config_get(CONFIG_SECTION_GLOBAL, "memory mode", rrd_memory_mode_name(default_rrd_memory_mode))); - #ifdef ENABLE_DBENGINE // ------------------------------------------------------------------------ // get default Database Engine page cache size in MiB @@ -581,7 +572,11 @@ static void get_netdata_configured_variables() { error("Invalid multidb disk space %d given. Defaulting to %d.", default_multidb_disk_quota_mb, default_rrdeng_disk_quota_mb); default_multidb_disk_quota_mb = default_rrdeng_disk_quota_mb; } - +#else + if (default_rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE) { + error_report("RRD_MEMORY_MODE_DBENGINE is not supported in this platform. The agent will use memory mode ram instead."); + default_rrd_memory_mode = RRD_MEMORY_MODE_RAM; + } #endif // ------------------------------------------------------------------------ @@ -733,34 +728,6 @@ int main(int argc, char **argv) { // set the name for logging program_name = "netdata"; - // parse deprecated options - // TODO: Remove this block with the next major release. - { - i = 1; - while(i < argc) { - if(strcmp(argv[i], "-pidfile") == 0 && (i+1) < argc) { - strncpyz(pidfile, argv[i+1], FILENAME_MAX); - fprintf(stderr, "%s: deprecated option -- %s -- please use -P instead.\n", argv[0], argv[i]); - remove_option(i, &argc, argv); - } - else if(strcmp(argv[i], "-nodaemon") == 0 || strcmp(argv[i], "-nd") == 0) { - dont_fork = 1; - fprintf(stderr, "%s: deprecated option -- %s -- please use -D instead.\n ", argv[0], argv[i]); - remove_option(i, &argc, argv); - } - else if(strcmp(argv[i], "-ch") == 0 && (i+1) < argc) { - config_set(CONFIG_SECTION_GLOBAL, "host access prefix", argv[i+1]); - fprintf(stderr, "%s: deprecated option -- %s -- please use -s instead.\n", argv[0], argv[i]); - remove_option(i, &argc, argv); - } - else if(strcmp(argv[i], "-l") == 0 && (i+1) < argc) { - config_set(CONFIG_SECTION_GLOBAL, "history", argv[i+1]); - fprintf(stderr, "%s: deprecated option -- %s -- This option will be removed with V2.*.\n", argv[0], argv[i]); - remove_option(i, &argc, argv); - } - else i++; - } - } if (argc > 1 && strcmp(argv[1], SPAWN_SERVER_COMMAND_LINE_ARGUMENT) == 0) { // don't run netdata, this is the spawn server spawn_server(); @@ -840,6 +807,20 @@ int main(int argc, char **argv) { char* createdataset_string = "createdataset="; char* stresstest_string = "stresstest="; #endif + if(strcmp(optarg, "sqlite-check") == 0) { + sql_init_database(DB_CHECK_INTEGRITY); + return 0; + } + + if(strcmp(optarg, "sqlite-fix") == 0) { + sql_init_database(DB_CHECK_FIX_DB); + return 0; + } + + if(strcmp(optarg, "sqlite-compact") == 0) { + sql_init_database(DB_CHECK_RECLAIM_SPACE); + return 0; + } if(strcmp(optarg, "unittest") == 0) { if(unit_test_buffer()) return 1; @@ -861,9 +842,15 @@ int main(int argc, char **argv) { #ifdef ENABLE_DBENGINE if(test_dbengine()) return 1; #endif + if(test_sqlite()) return 1; fprintf(stderr, "\n\nALL TESTS PASSED\n\n"); return 0; } +#ifdef ENABLE_ML_TESTS + else if(strcmp(optarg, "mltest") == 0) { + return test_ml(argc, argv); + } +#endif #ifdef ENABLE_DBENGINE else if(strncmp(optarg, createdataset_string, strlen(createdataset_string)) == 0) { optarg += strlen(createdataset_string); @@ -1167,7 +1154,10 @@ int main(int argc, char **argv) { // get log filenames and settings log_init(); error_log_limit_unlimited(); + // initialize the log files + open_all_log_files(); + get_system_timezone(); // -------------------------------------------------------------------- // get the certificate and start security #ifdef ENABLE_HTTPS @@ -1179,6 +1169,10 @@ int main(int argc, char **argv) { set_silencers_filename(); health_initialize_global_silencers(); + // -------------------------------------------------------------------- + // Initialize ML configuration + ml_init(); + // -------------------------------------------------------------------- // setup process signals @@ -1217,9 +1211,6 @@ int main(int argc, char **argv) { api_listen_sockets_setup(); } - // initialize the log files - open_all_log_files(); - #ifdef NETDATA_INTERNAL_CHECKS if(debug_flags != 0) { struct rlimit rl = { RLIM_INFINITY, RLIM_INFINITY }; @@ -1269,6 +1260,7 @@ int main(int argc, char **argv) { netdata_anonymous_statistics_enabled=-1; struct rrdhost_system_info *system_info = calloc(1, sizeof(struct rrdhost_system_info)); get_system_info(system_info); + system_info->hops = 0; if(rrd_init(netdata_configured_hostname, system_info)) fatal("Cannot initialize localhost instance with name '%s'.", netdata_configured_hostname); @@ -1306,6 +1298,8 @@ int main(int argc, char **argv) { netdata_zero_metrics_enabled = config_get_boolean_ondemand(CONFIG_SECTION_GLOBAL, "enable zero metrics", CONFIG_BOOLEAN_NO); + set_late_global_environment(); + for (i = 0; static_threads[i].name != NULL ; i++) { struct netdata_static_thread *st = &static_threads[i]; @@ -1325,8 +1319,6 @@ int main(int argc, char **argv) { info("netdata initialization completed. Enjoy real-time performance monitoring!"); netdata_ready = 1; - set_late_global_environment(); - send_statistics("START", "-", "-"); if (crash_detected) send_statistics("CRASH", "-", "-"); diff --git a/daemon/service.c b/daemon/service.c new file mode 100644 index 000000000..9cba0694f --- /dev/null +++ b/daemon/service.c @@ -0,0 +1,38 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "common.h" + +/* Run service jobs every X seconds */ +#define SERVICE_HEARTBEAT 10 + +void service_main_cleanup(void *ptr) +{ + struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr; + static_thread->enabled = NETDATA_MAIN_THREAD_EXITING; + + debug(D_SYSTEM, "Cleaning up..."); + + static_thread->enabled = NETDATA_MAIN_THREAD_EXITED; +} + +/* + * The service thread. + */ +void *service_main(void *ptr) +{ + netdata_thread_cleanup_push(service_main_cleanup, ptr); + heartbeat_t hb; + heartbeat_init(&hb); + usec_t step = USEC_PER_SEC * SERVICE_HEARTBEAT; + + debug(D_SYSTEM, "Service thread starts"); + + while (!netdata_exit) { + heartbeat_next(&hb, step); + + rrd_cleanup_obsolete_charts(); + } + + netdata_thread_cleanup_pop(1); + return NULL; +} diff --git a/daemon/service.h b/daemon/service.h new file mode 100644 index 000000000..cb03bec2c --- /dev/null +++ b/daemon/service.h @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_SERVICE_H +#define NETDATA_SERVICE_H 1 + +#define NETDATA_PLUGIN_HOOK_SERVICE \ + { \ + .name = "SERVICE", \ + .config_section = NULL, \ + .config_name = NULL, \ + .enabled = 1, \ + .thread = NULL, \ + .init_routine = NULL, \ + .start_routine = service_main \ + }, + +extern void *service_main(void *ptr); + +#endif //NETDATA_SERVICE_H diff --git a/daemon/system-info.sh b/daemon/system-info.sh index 05d8667c2..13c9b90bd 100755 --- a/daemon/system-info.sh +++ b/daemon/system-info.sh @@ -12,6 +12,7 @@ ARCHITECTURE="$(uname -m)" CONTAINER="unknown" CONT_DETECTION="none" +CONTAINER_IS_OFFICIAL_IMAGE="${NETDATA_OFFICIAL_IMAGE:-false}" if [ -z "${VIRTUALIZATION}" ]; then VIRTUALIZATION="unknown" @@ -381,6 +382,7 @@ echo "NETDATA_CONTAINER_OS_ID_LIKE=${CONTAINER_ID_LIKE}" echo "NETDATA_CONTAINER_OS_VERSION=${CONTAINER_VERSION}" echo "NETDATA_CONTAINER_OS_VERSION_ID=${CONTAINER_VERSION_ID}" echo "NETDATA_CONTAINER_OS_DETECTION=${CONTAINER_OS_DETECTION}" +echo "NETDATA_CONTAINER_IS_OFFICIAL_IMAGE=${CONTAINER_IS_OFFICIAL_IMAGE}" echo "NETDATA_HOST_OS_NAME=${HOST_NAME}" echo "NETDATA_HOST_OS_ID=${HOST_ID}" echo "NETDATA_HOST_OS_ID_LIKE=${HOST_ID_LIKE}" diff --git a/daemon/unit_test.c b/daemon/unit_test.c index 81090736e..2dcc88c45 100644 --- a/daemon/unit_test.c +++ b/daemon/unit_test.c @@ -97,7 +97,7 @@ static int check_rrdcalc_comparisons(void) { int check_storage_number(calculated_number n, int debug) { char buffer[100]; - uint32_t flags = SN_EXISTS; + uint32_t flags = SN_DEFAULT_FLAGS; storage_number s = pack_storage_number(n, flags); calculated_number d = unpack_storage_number(s); @@ -150,7 +150,7 @@ calculated_number storage_number_min(calculated_number n) { do { last = n; n /= 2.0; - storage_number t = pack_storage_number(n, SN_EXISTS); + storage_number t = pack_storage_number(n, SN_DEFAULT_FLAGS); r = unpack_storage_number(t); } while(r != 0.0 && r != last); @@ -263,7 +263,7 @@ void benchmark_storage_number(int loop, int multiplier) { n *= multiplier; if(n > storage_number_positive_max) n = storage_number_positive_min; - s = pack_storage_number(n, SN_EXISTS); + s = pack_storage_number(n, SN_DEFAULT_FLAGS); d = unpack_storage_number(s); print_calculated_number(buffer, d); } @@ -289,25 +289,12 @@ void benchmark_storage_number(int loop, int multiplier) { } static int check_storage_number_exists() { - uint32_t flags; - - - for(flags = 0; flags < 7 ; flags++) { - if(get_storage_number_flags(flags << 24) != flags << 24) { - fprintf(stderr, "Flag 0x%08x is not checked correctly. It became 0x%08x\n", flags << 24, get_storage_number_flags(flags << 24)); - return 1; - } - } - - flags = SN_EXISTS; + uint32_t flags = SN_DEFAULT_FLAGS; calculated_number n = 0.0; storage_number s = pack_storage_number(n, flags); calculated_number d = unpack_storage_number(s); - if(get_storage_number_flags(s) != flags) { - fprintf(stderr, "Wrong flags. Given %08x, Got %08x!\n", flags, get_storage_number_flags(s)); - return 1; - } + if(n != d) { fprintf(stderr, "Wrong number returned. Expected " CALCULATED_NUMBER_FORMAT ", returned " CALCULATED_NUMBER_FORMAT "!\n", n, d); return 1; @@ -1192,7 +1179,7 @@ int run_test(struct test *test) unsigned long max = (st->counter < test->result_entries)?st->counter:test->result_entries; for(c = 0 ; c < max ; c++) { calculated_number v = unpack_storage_number(rd->values[c]); - calculated_number n = unpack_storage_number(pack_storage_number(test->results[c], SN_EXISTS)); + calculated_number n = unpack_storage_number(pack_storage_number(test->results[c], SN_DEFAULT_FLAGS)); int same = (calculated_number_round(v * 10000000.0) == calculated_number_round(n * 10000000.0))?1:0; fprintf(stderr, " %s/%s: checking position %lu (at %lu secs), expecting value " CALCULATED_NUMBER_FORMAT ", found " CALCULATED_NUMBER_FORMAT ", %s\n", test->name, rd->name, c+1, @@ -1475,6 +1462,38 @@ int unit_test(long delay, long shift) return ret; } +int test_sqlite(void) { + sqlite3 *db_meta; + fprintf(stderr, "Testing SQLIte\n"); + + int rc = sqlite3_open(":memory:", &db_meta); + if (rc != SQLITE_OK) { + fprintf(stderr,"Failed to test SQLite: DB init failed\n"); + return 1; + } + + rc = sqlite3_exec(db_meta, "CREATE TABLE IF NOT EXISTS mine (id1, id2);", 0, 0, NULL); + if (rc != SQLITE_OK) { + fprintf(stderr,"Failed to test SQLite: Create table failed\n"); + return 1; + } + + rc = sqlite3_exec(db_meta, "DELETE FROM MINE LIMIT 1;", 0, 0, NULL); + if (rc != SQLITE_OK) { + fprintf(stderr,"Failed to test SQLite: Delete with LIMIT failed\n"); + return 1; + } + + rc = sqlite3_exec(db_meta, "UPDATE MINE SET id1=1 LIMIT 1;", 0, 0, NULL); + if (rc != SQLITE_OK) { + fprintf(stderr,"Failed to test SQLite: Update with LIMIT failed\n"); + return 1; + } + fprintf(stderr,"SQLite is OK\n"); + return 0; +} + + #ifdef ENABLE_DBENGINE static inline void rrddim_set_by_pointer_fake_time(RRDDIM *rd, collected_number value, time_t now) { @@ -1500,6 +1519,8 @@ static RRDHOST *dbengine_rrdhost_find_or_create(char *name) , name , os_type , netdata_configured_timezone + , netdata_configured_abbrev_timezone + , netdata_configured_utc_offset , config_get(CONFIG_SECTION_BACKEND, "host tags", "") , program_name , program_version @@ -1583,7 +1604,7 @@ static time_t test_dbengine_create_metrics(RRDSET *st[CHARTS], RRDDIM *rd[CHARTS collected_number next; update_every = REGION_UPDATE_EVERY[current_region]; - time_now = time_start + update_every; + time_now = time_start; // feed it with the test data for (i = 0 ; i < CHARTS ; ++i) { for (j = 0 ; j < DIMS ; ++j) { @@ -1594,7 +1615,7 @@ static time_t test_dbengine_create_metrics(RRDSET *st[CHARTS], RRDDIM *rd[CHARTS } } for (c = 0; c < REGION_POINTS[current_region] ; ++c) { - time_now += update_every; // time_now = start + (c + 2) * update_every + time_now += update_every; // time_now = start + (c + 1) * update_every for (i = 0 ; i < CHARTS ; ++i) { st[i]->usec_since_last_update = USEC_PER_SEC * update_every; @@ -1626,14 +1647,14 @@ static int test_dbengine_check_metrics(RRDSET *st[CHARTS], RRDDIM *rd[CHARTS][DI // check the result for (c = 0; c < REGION_POINTS[current_region] ; c += QUERY_BATCH) { - time_now = time_start + (c + 2) * update_every; + time_now = time_start + (c + 1) * update_every; for (i = 0 ; i < CHARTS ; ++i) { for (j = 0; j < DIMS; ++j) { rd[i][j]->state->query_ops.init(rd[i][j], &handle, time_now, time_now + QUERY_BATCH * update_every); for (k = 0; k < QUERY_BATCH; ++k) { last = ((collected_number)i * DIMS) * REGION_POINTS[current_region] + j * REGION_POINTS[current_region] + c + k; - expected = unpack_storage_number(pack_storage_number((calculated_number)last, SN_EXISTS)); + expected = unpack_storage_number(pack_storage_number((calculated_number)last, SN_DEFAULT_FLAGS)); n = rd[i][j]->state->query_ops.next_metric(&handle, &time_retrieved); value = unpack_storage_number(n); @@ -1671,7 +1692,7 @@ static int test_dbengine_check_rrdr(RRDSET *st[CHARTS], RRDDIM *rd[CHARTS][DIMS] errors = 0; update_every = REGION_UPDATE_EVERY[current_region]; - long points = (time_end - time_start) / update_every - 1; + long points = (time_end - time_start) / update_every; for (i = 0 ; i < CHARTS ; ++i) { RRDR *r = rrd2rrdr(st[i], points, time_start + update_every, time_end, RRDR_GROUPING_AVERAGE, 0, 0, NULL, NULL); if (!r) { @@ -1690,8 +1711,8 @@ static int test_dbengine_check_rrdr(RRDSET *st[CHARTS], RRDDIM *rd[CHARTS][DIMS] value = cn[j]; assert(rd[i][j] == d); - last = i * DIMS * REGION_POINTS[current_region] + j * REGION_POINTS[current_region] + c; - expected = unpack_storage_number(pack_storage_number((calculated_number)last, SN_EXISTS)); + last = i * DIMS * REGION_POINTS[current_region] + j * REGION_POINTS[current_region] + c + 1; + expected = unpack_storage_number(pack_storage_number((calculated_number)last, SN_DEFAULT_FLAGS)); same = (calculated_number_round(value) == calculated_number_round(expected)) ? 1 : 0; if(!same) { @@ -1789,7 +1810,7 @@ int test_dbengine(void) current_region = 1; update_every = REGION_UPDATE_EVERY[current_region]; // use the maximum update_every = 3 errors = 0; - long points = (time_end[REGIONS - 1] - time_start[0]) / update_every - 1; // cover all time regions with RRDR + long points = (time_end[REGIONS - 1] - time_start[0]) / update_every; // cover all time regions with RRDR long point_offset = (time_start[current_region] - time_start[0]) / update_every; for (i = 0 ; i < CHARTS ; ++i) { RRDR *r = rrd2rrdr(st[i], points, time_start[0] + update_every, time_end[REGIONS - 1], RRDR_GROUPING_AVERAGE, 0, 0, NULL, NULL); @@ -1812,8 +1833,8 @@ int test_dbengine(void) calculated_number value = cn[j]; assert(rd[i][j] == d); - collected_number last = i * DIMS * REGION_POINTS[current_region] + j * REGION_POINTS[current_region] + c - point_offset; - calculated_number expected = unpack_storage_number(pack_storage_number((calculated_number)last, SN_EXISTS)); + collected_number last = i * DIMS * REGION_POINTS[current_region] + j * REGION_POINTS[current_region] + c - point_offset + 1; + calculated_number expected = unpack_storage_number(pack_storage_number((calculated_number)last, SN_DEFAULT_FLAGS)); uint8_t same = (calculated_number_round(value) == calculated_number_round(expected)) ? 1 : 0; if(!same) { @@ -2042,7 +2063,7 @@ static void query_dbengine_chart(void *arg) ++thread_info->queries_nr; for (time_now = time_after ; time_now <= time_before ; time_now += update_every) { generatedv = generate_dbengine_chart_value(i, j, time_now); - expected = unpack_storage_number(pack_storage_number((calculated_number) generatedv, SN_EXISTS)); + expected = unpack_storage_number(pack_storage_number((calculated_number) generatedv, SN_DEFAULT_FLAGS)); if (unlikely(rd->state->query_ops.is_finished(&handle))) { if (!thread_info->delete_old_data) { /* data validation only when we don't delete */ diff --git a/daemon/unit_test.h b/daemon/unit_test.h index 79d415be0..3a3b64902 100644 --- a/daemon/unit_test.h +++ b/daemon/unit_test.h @@ -8,6 +8,7 @@ extern int unit_test(long delay, long shift); extern int run_all_mockup_tests(void); extern int unit_test_str2ld(void); extern int unit_test_buffer(void); +extern int test_sqlite(void); #ifdef ENABLE_DBENGINE extern int test_dbengine(void); extern void generate_dbengine_dataset(unsigned history_seconds); diff --git a/database/engine/datafile.c b/database/engine/datafile.c index 7a052f963..d42311079 100644 --- a/database/engine/datafile.c +++ b/database/engine/datafile.c @@ -51,7 +51,7 @@ static void datafile_init(struct rrdengine_datafile *datafile, struct rrdengine_ void generate_datafilepath(struct rrdengine_datafile *datafile, char *str, size_t maxlen) { - (void) snprintf(str, maxlen, "%s/" DATAFILE_PREFIX RRDENG_FILE_NUMBER_PRINT_TMPL DATAFILE_EXTENSION, + (void) snprintfz(str, maxlen, "%s/" DATAFILE_PREFIX RRDENG_FILE_NUMBER_PRINT_TMPL DATAFILE_EXTENSION, datafile->ctx->dbfiles_path, datafile->tier, datafile->fileno); } @@ -457,4 +457,4 @@ void finalize_data_files(struct rrdengine_instance *ctx) freez(datafile); } -} \ No newline at end of file +} diff --git a/database/engine/journalfile.c b/database/engine/journalfile.c index 9fecc48ff..640656161 100644 --- a/database/engine/journalfile.c +++ b/database/engine/journalfile.c @@ -94,7 +94,7 @@ void * wal_get_transaction_buffer(struct rrdengine_worker_config* wc, unsigned s void generate_journalfilepath(struct rrdengine_datafile *datafile, char *str, size_t maxlen) { - (void) snprintf(str, maxlen, "%s/" WALFILE_PREFIX RRDENG_FILE_NUMBER_PRINT_TMPL WALFILE_EXTENSION, + (void) snprintfz(str, maxlen, "%s/" WALFILE_PREFIX RRDENG_FILE_NUMBER_PRINT_TMPL WALFILE_EXTENSION, datafile->ctx->dbfiles_path, datafile->tier, datafile->fileno); } @@ -428,8 +428,9 @@ static uint64_t iterate_transactions(struct rrdengine_instance *ctx, struct rrde iov = uv_buf_init(buf, size_bytes); ret = uv_fs_read(NULL, &req, file, &iov, 1, pos, NULL); if (ret < 0) { - fatal("uv_fs_read: %s", uv_strerror(ret)); - /*uv_fs_req_cleanup(&req);*/ + error("uv_fs_read: pos=%lu, %s", pos, uv_strerror(ret)); + uv_fs_req_cleanup(&req); + goto skip_file; } fatal_assert(req.result >= 0); uv_fs_req_cleanup(&req); @@ -451,7 +452,7 @@ static uint64_t iterate_transactions(struct rrdengine_instance *ctx, struct rrde max_id = MAX(max_id, id); } } - +skip_file: free(buf); return max_id; } @@ -512,4 +513,4 @@ void init_commit_log(struct rrdengine_instance *ctx) ctx->commit_log.buf = NULL; ctx->commit_log.buf_pos = 0; ctx->commit_log.transaction_id = 1; -} \ No newline at end of file +} diff --git a/database/engine/metadata_log/logfile.c b/database/engine/metadata_log/logfile.c index b7c5c0618..f5bd9b2d2 100644 --- a/database/engine/metadata_log/logfile.c +++ b/database/engine/metadata_log/logfile.c @@ -6,7 +6,7 @@ void generate_metadata_logfile_path(struct metadata_logfile *metalogfile, char *str, size_t maxlen) { - (void) snprintf(str, maxlen, "%s/" METALOG_PREFIX METALOG_FILE_NUMBER_PRINT_TMPL METALOG_EXTENSION, + (void) snprintfz(str, maxlen, "%s/" METALOG_PREFIX METALOG_FILE_NUMBER_PRINT_TMPL METALOG_EXTENSION, metalogfile->ctx->rrdeng_ctx->dbfiles_path, metalogfile->starting_fileno, metalogfile->fileno); } diff --git a/database/engine/metadata_log/metalogpluginsd.h b/database/engine/metadata_log/metalogpluginsd.h index 96808aaa2..4fd8c3900 100644 --- a/database/engine/metadata_log/metalogpluginsd.h +++ b/database/engine/metadata_log/metalogpluginsd.h @@ -3,9 +3,9 @@ #ifndef NETDATA_METALOGPLUGINSD_H #define NETDATA_METALOGPLUGINSD_H -#include "../../../collectors/plugins.d/pluginsd_parser.h" -#include "../../../collectors/plugins.d/plugins_d.h" -#include "../../../parser/parser.h" +#include "collectors/plugins.d/pluginsd_parser.h" +#include "collectors/plugins.d/plugins_d.h" +#include "parser/parser.h" struct metalog_pluginsd_state { struct metalog_instance *ctx; diff --git a/database/engine/pagecache.c b/database/engine/pagecache.c index f17afc22b..90423176c 100644 --- a/database/engine/pagecache.c +++ b/database/engine/pagecache.c @@ -299,10 +299,15 @@ static void pg_cache_reserve_pages(struct rrdengine_instance *ctx, unsigned numb destroy_completion(&compl); if (unlikely(failures > 1)) { - unsigned long slots; + unsigned long slots, usecs_to_sleep; /* exponential backoff */ slots = random() % (2LU << MIN(failures, FAILURES_CEILING)); - (void)sleep_usec(slots * exp_backoff_slot_usec); + usecs_to_sleep = slots * exp_backoff_slot_usec; + + if (usecs_to_sleep >= USEC_PER_SEC) + error("Page cache is full. Sleeping for %llu second(s).", usecs_to_sleep / USEC_PER_SEC); + + (void)sleep_usec(usecs_to_sleep); } uv_rwlock_wrlock(&pg_cache->pg_cache_rwlock); } @@ -1243,4 +1248,4 @@ void free_page_cache(struct rrdengine_instance *ctx) bytes_freed += ret_Judy; info("Freed %lu bytes of memory from page cache.", bytes_freed); -} \ No newline at end of file +} diff --git a/database/engine/rrdengine.c b/database/engine/rrdengine.c index 0c4a401cb..54a9cdf8d 100644 --- a/database/engine/rrdengine.c +++ b/database/engine/rrdengine.c @@ -336,7 +336,7 @@ after_crc_check: /* care, we don't hold the descriptor mutex */ if (have_read_error) { /* Applications should make sure NULL values match 0 as does SN_EMPTY_SLOT */ - memset(page, 0, descr->page_length); + memset(page, SN_EMPTY_SLOT, descr->page_length); } else if (RRD_NO_COMPRESSION == header->compression_algorithm) { (void) memcpy(page, xt_io_descr->buf + payload_offset + page_offset, descr->page_length); } else { @@ -861,6 +861,7 @@ static void after_delete_old_data(struct rrdengine_worker_config* wc) wc->now_deleting_files = NULL; wc->cleanup_thread_deleting_files = 0; + aclk_data_rotated(); /* interrupt event loop */ uv_stop(wc->loop); diff --git a/database/engine/rrdengine.h b/database/engine/rrdengine.h index 07cc1479d..b0c8e4d02 100644 --- a/database/engine/rrdengine.h +++ b/database/engine/rrdengine.h @@ -11,7 +11,7 @@ #include #include #include -#include "../../daemon/common.h" +#include "daemon/common.h" #include "../rrd.h" #include "rrddiskprotocol.h" #include "rrdenginelib.h" diff --git a/database/engine/rrdengineapi.c b/database/engine/rrdengineapi.c index d847969e8..d81b95805 100755 --- a/database/engine/rrdengineapi.c +++ b/database/engine/rrdengineapi.c @@ -49,7 +49,7 @@ void rrdeng_convert_legacy_uuid_to_multihost(char machine_guid[GUID_LEN + 1], uu memcpy(ret_uuid, hash_value, sizeof(uuid_t)); } -void rrdeng_metric_init(RRDDIM *rd, uuid_t *dim_uuid) +void rrdeng_metric_init(RRDDIM *rd) { struct page_cache *pg_cache; struct rrdengine_instance *ctx; @@ -68,7 +68,6 @@ void rrdeng_metric_init(RRDDIM *rd, uuid_t *dim_uuid) pg_cache = &ctx->pg_cache; rrdeng_generate_legacy_uuid(rd->id, rd->rrdset->id, &legacy_uuid); - rd->state->metric_uuid = dim_uuid; if (host != localhost && host->rrdeng_ctx == &multidb_ctx) is_multihost_child = 1; @@ -82,20 +81,17 @@ void rrdeng_metric_init(RRDDIM *rd, uuid_t *dim_uuid) /* First time we see the legacy UUID or metric belongs to child host in multi-host DB. * Drop legacy support, normal path */ - if (unlikely(!rd->state->metric_uuid)) - rd->state->metric_uuid = create_dimension_uuid(rd->rrdset, rd); - uv_rwlock_rdlock(&pg_cache->metrics_index.lock); - PValue = JudyHSGet(pg_cache->metrics_index.JudyHS_array, rd->state->metric_uuid, sizeof(uuid_t)); + PValue = JudyHSGet(pg_cache->metrics_index.JudyHS_array, &rd->state->metric_uuid, sizeof(uuid_t)); if (likely(NULL != PValue)) { page_index = *PValue; } uv_rwlock_rdunlock(&pg_cache->metrics_index.lock); if (NULL == PValue) { uv_rwlock_wrlock(&pg_cache->metrics_index.lock); - PValue = JudyHSIns(&pg_cache->metrics_index.JudyHS_array, rd->state->metric_uuid, sizeof(uuid_t), PJE0); + PValue = JudyHSIns(&pg_cache->metrics_index.JudyHS_array, &rd->state->metric_uuid, sizeof(uuid_t), PJE0); fatal_assert(NULL == *PValue); /* TODO: figure out concurrency model */ - *PValue = page_index = create_page_index(rd->state->metric_uuid); + *PValue = page_index = create_page_index(&rd->state->metric_uuid); page_index->prev = pg_cache->metrics_index.last_page_index; pg_cache->metrics_index.last_page_index = page_index; uv_rwlock_wrunlock(&pg_cache->metrics_index.lock); @@ -106,15 +102,12 @@ void rrdeng_metric_init(RRDDIM *rd, uuid_t *dim_uuid) rrdeng_convert_legacy_uuid_to_multihost(rd->rrdset->rrdhost->machine_guid, &legacy_uuid, &multihost_legacy_uuid); - if (unlikely(!rd->state->metric_uuid)) - rd->state->metric_uuid = mallocz(sizeof(uuid_t)); - - int need_to_store = (dim_uuid == NULL || uuid_compare(*rd->state->metric_uuid, multihost_legacy_uuid)); + int need_to_store = uuid_compare(rd->state->metric_uuid, multihost_legacy_uuid); - uuid_copy(*rd->state->metric_uuid, multihost_legacy_uuid); + uuid_copy(rd->state->metric_uuid, multihost_legacy_uuid); if (unlikely(need_to_store)) - (void)sql_store_dimension(rd->state->metric_uuid, rd->rrdset->chart_uuid, rd->id, rd->name, rd->multiplier, rd->divisor, + (void)sql_store_dimension(&rd->state->metric_uuid, rd->rrdset->chart_uuid, rd->id, rd->name, rd->multiplier, rd->divisor, rd->algorithm); } diff --git a/database/engine/rrdengineapi.h b/database/engine/rrdengineapi.h index 00e55e662..d263259b6 100644 --- a/database/engine/rrdengineapi.h +++ b/database/engine/rrdengineapi.h @@ -36,7 +36,7 @@ extern void rrdeng_convert_legacy_uuid_to_multihost(char machine_guid[GUID_LEN + uuid_t *ret_uuid); -extern void rrdeng_metric_init(RRDDIM *rd, uuid_t *dim_uuid); +extern void rrdeng_metric_init(RRDDIM *rd); extern void rrdeng_store_metric_init(RRDDIM *rd); extern void rrdeng_store_metric_flush_current_page(RRDDIM *rd); extern void rrdeng_store_metric_next(RRDDIM *rd, usec_t point_in_time, storage_number number); diff --git a/database/engine/rrdenginelib.h b/database/engine/rrdenginelib.h index ebab93c8f..8b6751f00 100644 --- a/database/engine/rrdenginelib.h +++ b/database/engine/rrdenginelib.h @@ -3,6 +3,8 @@ #ifndef NETDATA_RRDENGINELIB_H #define NETDATA_RRDENGINELIB_H +#include "libnetdata/libnetdata.h" + /* Forward declarations */ struct rrdeng_page_descr; struct rrdengine_instance; @@ -12,10 +14,6 @@ struct rrdengine_instance; #define BITS_PER_ULONG (sizeof(unsigned long) * 8) -#ifndef UUID_STR_LEN -#define UUID_STR_LEN (37) -#endif - /* Taken from linux kernel */ #define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) @@ -141,4 +139,4 @@ extern char *get_rrdeng_statistics(struct rrdengine_instance *ctx, char *str, si extern int compute_multidb_diskspace(); extern int is_legacy_child(const char *machine_guid); -#endif /* NETDATA_RRDENGINELIB_H */ \ No newline at end of file +#endif /* NETDATA_RRDENGINELIB_H */ diff --git a/database/rrd.h b/database/rrd.h index 380ccb161..7f8b91f7d 100644 --- a/database/rrd.h +++ b/database/rrd.h @@ -15,6 +15,9 @@ typedef struct rrdcalctemplate RRDCALCTEMPLATE; typedef struct alarm_entry ALARM_ENTRY; typedef struct context_param CONTEXT_PARAM; +typedef void *ml_host_t; +typedef void *ml_dimension_t; + // forward declarations struct rrddim_volatile; struct rrdset_volatile; @@ -26,20 +29,16 @@ struct rrdengine_instance; struct pg_cache_page_index; #endif -#include "../daemon/common.h" +#include "daemon/common.h" #include "web/api/queries/query.h" #include "rrdvar.h" #include "rrdsetvar.h" #include "rrddimvar.h" #include "rrdcalc.h" #include "rrdcalctemplate.h" -#include "../streaming/rrdpush.h" - -#ifndef ACLK_NG -#include "../aclk/legacy/aclk_rrdhost_state.h" -#else -#include "aclk/aclk.h" -#endif +#include "streaming/rrdpush.h" +#include "aclk/aclk_rrdhost_state.h" +#include "sqlite/sqlite_health.h" enum { CONTEXT_FLAGS_ARCHIVE = 0x01, @@ -54,6 +53,7 @@ struct context_param { uint8_t flags; }; +#define RRDSET_MINIMUM_LIVE_COUNT 3 #define META_CHART_UPDATED 1 #define META_PLUGIN_UPDATED 2 #define META_MODULE_UPDATED 4 @@ -167,7 +167,8 @@ typedef enum rrddim_flags { RRDDIM_FLAG_OBSOLETE = (1 << 2), // this is marked by the collector/module as obsolete // No new values have been collected for this dimension since agent start or it was marked RRDDIM_FLAG_OBSOLETE at // least rrdset_free_obsolete_time seconds ago. - RRDDIM_FLAG_ARCHIVED = (1 << 3) + RRDDIM_FLAG_ARCHIVED = (1 << 3), + RRDDIM_FLAG_ACLK = (1 << 4) } RRDDIM_FLAGS; #ifdef HAVE_C___ATOMIC @@ -384,7 +385,10 @@ struct rrddim_volatile { uuid_t *rrdeng_uuid; // database engine metric UUID struct pg_cache_page_index *page_index; #endif - uuid_t *metric_uuid; // global UUID for this metric (unique_across hosts) +#ifdef ENABLE_ACLK + int aclk_live_status; +#endif + uuid_t metric_uuid; // global UUID for this metric (unique_across hosts) union rrddim_collect_handle handle; // ------------------------------------------------------------------------ // function pointers that handle data collection @@ -420,6 +424,8 @@ struct rrddim_volatile { // get the timestamp of the first entry of this metric time_t (*oldest_time)(RRDDIM *rd); } query_ops; + + ml_dimension_t ml_dimension; }; // ---------------------------------------------------------------------------- @@ -427,6 +433,7 @@ struct rrddim_volatile { struct rrdset_volatile { char *old_title; char *old_context; + uuid_t hash_id; struct label *new_labels; struct label_index labels; }; @@ -655,6 +662,7 @@ struct alarm_entry { uint32_t unique_id; uint32_t alarm_id; uint32_t alarm_event_id; + uuid_t config_hash_id; time_t when; time_t duration; @@ -745,6 +753,7 @@ struct rrdhost_system_info { char *container; char *container_detection; char *is_k8s_node; + uint16_t hops; }; struct rrdhost { @@ -764,10 +773,14 @@ struct rrdhost { const char *os; // the O/S type of the host const char *tags; // tags for this host const char *timezone; // the timezone of the host + #ifdef ENABLE_ACLK - long obsolete_count; + long deleted_charts_count; #endif + const char *abbrev_timezone; // the abbriviated timezone of the host + int32_t utc_offset; // the offset in seconds from utc + RRDHOST_FLAGS flags; // flags about this RRDHOST RRDHOST_FLAGS *exporting_flags; // array of flags for exporting connector instances @@ -795,6 +808,7 @@ struct rrdhost { struct sender_state *sender; volatile unsigned int rrdpush_sender_spawn:1; // 1 when the sender thread has been spawn netdata_thread_t rrdpush_sender_thread; // the sender thread + void *dbsync_worker; volatile unsigned int rrdpush_sender_connected:1; // 1 when the sender is ready to push metrics int rrdpush_sender_socket; // the fd of the socket to the remote host, or -1 @@ -859,12 +873,18 @@ struct rrdhost { RRDSET *rrdset_root; // the host charts + unsigned int obsolete_charts_count; + // ------------------------------------------------------------------------ // locks netdata_rwlock_t rrdhost_rwlock; // lock for this RRDHOST (protects rrdset_root linked list) + // ------------------------------------------------------------------------ + // ML handle + ml_host_t ml_host; + // ------------------------------------------------------------------------ // Support for host-level labels struct label_index labels; @@ -938,6 +958,8 @@ extern RRDHOST *rrdhost_find_or_create( , const char *guid , const char *os , const char *timezone + , const char *abbrev_timezone + , int32_t utc_offset , const char *tags , const char *program_name , const char *program_version @@ -958,6 +980,8 @@ extern void rrdhost_update(RRDHOST *host , const char *guid , const char *os , const char *timezone + , const char *abbrev_timezone + , int32_t utc_offset , const char *tags , const char *program_name , const char *program_version @@ -1034,6 +1058,7 @@ extern void rrdhost_system_info_free(struct rrdhost_system_info *system_info); extern void rrdhost_free(RRDHOST *host); extern void rrdhost_save_charts(RRDHOST *host); extern void rrdhost_delete_charts(RRDHOST *host); +extern void rrd_cleanup_obsolete_charts(); extern int rrdhost_should_be_removed(RRDHOST *host, RRDHOST *protected_host, time_t now); @@ -1125,7 +1150,10 @@ static inline time_t rrdset_first_entry_t_nolock(RRDSET *st) time_t first_entry_t = LONG_MAX; rrddim_foreach_read(rd, st) { - first_entry_t = MIN(first_entry_t, rd->state->query_ops.oldest_time(rd)); + first_entry_t = + MIN(first_entry_t, + rd->state->query_ops.oldest_time(rd) > st->update_every ? + rd->state->query_ops.oldest_time(rd) - st->update_every : 0); } if (unlikely(LONG_MAX == first_entry_t)) return 0; @@ -1322,20 +1350,19 @@ extern void rrdset_save(RRDSET *st); extern void rrdset_delete_custom(RRDSET *st, int db_rotated); extern void rrdset_delete_obsolete_dimensions(RRDSET *st); -extern void rrdhost_cleanup_obsolete_charts(RRDHOST *host); extern RRDHOST *rrdhost_create( const char *hostname, const char *registry_hostname, const char *guid, const char *os, const char *timezone, - const char *tags, const char *program_name, const char *program_version, int update_every, long entries, - RRD_MEMORY_MODE memory_mode, unsigned int health_enabled, unsigned int rrdpush_enabled, char *rrdpush_destination, - char *rrdpush_api_key, char *rrdpush_send_charts_matching, struct rrdhost_system_info *system_info, + const char *abbrev_timezone, int32_t utc_offset,const char *tags, const char *program_name, const char *program_version, + int update_every, long entries, RRD_MEMORY_MODE memory_mode, unsigned int health_enabled, unsigned int rrdpush_enabled, + char *rrdpush_destination, char *rrdpush_api_key, char *rrdpush_send_charts_matching, struct rrdhost_system_info *system_info, int is_localhost); //TODO: Remove , int is_archived); #endif /* NETDATA_RRD_INTERNALS */ extern void set_host_properties( RRDHOST *host, int update_every, RRD_MEMORY_MODE memory_mode, const char *hostname, const char *registry_hostname, - const char *guid, const char *os, const char *tags, const char *tzone, const char *program_name, - const char *program_version); + const char *guid, const char *os, const char *tags, const char *tzone, const char *abbrev_tzone, int32_t utc_offset, + const char *program_name, const char *program_version); // ---------------------------------------------------------------------------- // RRD DB engine declarations @@ -1344,5 +1371,9 @@ extern void set_host_properties( #include "database/engine/rrdengineapi.h" #endif #include "sqlite/sqlite_functions.h" - +#include "sqlite/sqlite_aclk.h" +#include "sqlite/sqlite_aclk_chart.h" +#include "sqlite/sqlite_aclk_alert.h" +#include "sqlite/sqlite_aclk_node.h" +#include "sqlite/sqlite_health.h" #endif /* NETDATA_RRD_H */ diff --git a/database/rrdcalc.c b/database/rrdcalc.c index 85b9efb75..1b1a14960 100644 --- a/database/rrdcalc.c +++ b/database/rrdcalc.c @@ -87,6 +87,7 @@ static void rrdsetcalc_link(RRDSET *st, RRDCALC *rc) { host, rc->id, rc->next_event_id++, + rc->config_hash_id, now, rc->name, rc->rrdset->id, @@ -164,6 +165,7 @@ inline void rrdsetcalc_unlink(RRDCALC *rc) { host, rc->id, rc->next_event_id++, + rc->config_hash_id, now, rc->name, rc->rrdset->id, @@ -398,6 +400,7 @@ inline RRDCALC *rrdcalc_create_from_template(RRDHOST *host, RRDCALCTEMPLATE *rt, rc->hash = simple_hash(rc->name); rc->chart = strdupz(chart); rc->hash_chart = simple_hash(rc->chart); + uuid_copy(rc->config_hash_id, rt->config_hash_id); rc->id = rrdcalc_get_unique_id(host, rc->chart, rc->name, &rc->next_event_id); @@ -513,6 +516,7 @@ inline RRDCALC *rrdcalc_create_from_rrdcalc(RRDCALC *rc, RRDHOST *host, const ch newrc->hash = simple_hash(newrc->name); newrc->chart = strdupz(rc->chart); newrc->hash_chart = simple_hash(rc->chart); + uuid_copy(newrc->config_hash_id, *((uuid_t *) &rc->config_hash_id)); newrc->dimensions = strdupz(dimension); newrc->foreachdim = NULL; diff --git a/database/rrdcalc.h b/database/rrdcalc.h index b4122c605..d7446f63a 100644 --- a/database/rrdcalc.h +++ b/database/rrdcalc.h @@ -38,6 +38,7 @@ struct rrdcalc { char *name; // the name of this alarm uint32_t hash; // the hash of the alarm name + uuid_t config_hash_id; // a predictable hash_id based on specific alert configuration char *exec; // the command to execute when this alarm switches state char *recipient; // the recipient of the alarm (the first parameter to exec) @@ -149,6 +150,43 @@ struct rrdcalc { struct rrdcalc *next; }; +struct alert_config { + char *alarm; + char *template_key; + char *os; + char *host; + char *on; + char *families; + char *plugin; + char *module; + char *charts; + char *lookup; + char *calc; + char *warn; + char *crit; + char *every; + char *green; + char *red; + char *exec; + char *to; + char *units; + char *info; + char *classification; + char *component; + char *type; + char *delay; + char *options; + char *repeat; + char *host_labels; + + char *p_db_lookup_dimensions; + char *p_db_lookup_method; + uint32_t p_db_lookup_options; + int32_t p_db_lookup_after; + int32_t p_db_lookup_before; + int32_t p_update_every; +}; + extern int alarm_isrepeating(RRDHOST *host, uint32_t alarm_id); extern int alarm_entry_isrepeating(RRDHOST *host, ALARM_ENTRY *ae); extern RRDCALC *alarm_max_last_repeat(RRDHOST *host, char *alarm_name, uint32_t hash); diff --git a/database/rrdcalctemplate.c b/database/rrdcalctemplate.c index 5060313ec..67288e9db 100644 --- a/database/rrdcalctemplate.c +++ b/database/rrdcalctemplate.c @@ -45,9 +45,7 @@ static int rrdcalctemplate_is_there_label_restriction(RRDCALCTEMPLATE *rt, RRDH } static inline int rrdcalctemplate_test_additional_restriction(RRDCALCTEMPLATE *rt, RRDSET *st) { - if (rt->charts_pattern && - !(simple_pattern_matches(rt->charts_pattern, st->id) || - simple_pattern_matches(rt->charts_pattern, st->name))) + if (rt->charts_pattern && !simple_pattern_matches(rt->charts_pattern, st->name)) return 0; if (rt->family_pattern && !simple_pattern_matches(rt->family_pattern, st->family)) diff --git a/database/rrdcalctemplate.h b/database/rrdcalctemplate.h index 65114da6a..0f12bba05 100644 --- a/database/rrdcalctemplate.h +++ b/database/rrdcalctemplate.h @@ -11,6 +11,7 @@ struct rrdcalctemplate { char *name; uint32_t hash_name; + uuid_t config_hash_id; char *exec; char *recipient; diff --git a/database/rrddim.c b/database/rrddim.c index 510538d4b..78885df3d 100644 --- a/database/rrddim.c +++ b/database/rrddim.c @@ -119,7 +119,7 @@ inline int rrddim_set_divisor(RRDSET *st, RRDDIM *rd, collected_number divisor) // RRDDIM legacy data collection functions static void rrddim_collect_init(RRDDIM *rd) { - rd->values[rd->rrdset->current_entry] = SN_EMPTY_SLOT; // pack_storage_number(0, SN_NOT_EXISTS); + rd->values[rd->rrdset->current_entry] = SN_EMPTY_SLOT; } static void rrddim_collect_store_metric(RRDDIM *rd, usec_t point_in_time, storage_number number) { (void)point_in_time; @@ -210,7 +210,7 @@ void rrdcalc_link_to_rrddim(RRDDIM *rd, RRDSET *st, RRDHOST *host) { } } #ifdef ENABLE_ACLK - rrdset_flag_set(st, RRDSET_FLAG_ACLK); + rrdset_flag_clear(st, RRDSET_FLAG_ACLK); #endif } @@ -232,7 +232,7 @@ RRDDIM *rrddim_add_custom(RRDSET *st, const char *id, const char *name, collecte rc += rrddim_set_multiplier(st, rd, multiplier); rc += rrddim_set_divisor(st, rd, divisor); if (rrddim_flag_check(rd, RRDDIM_FLAG_ARCHIVED)) { - store_active_dimension(rd->state->metric_uuid); + store_active_dimension(&rd->state->metric_uuid); rd->state->collect_ops.init(rd); rrddim_flag_clear(rd, RRDDIM_FLAG_ARCHIVED); rrddimvar_create(rd, RRDVAR_TYPE_CALCULATED, NULL, NULL, &rd->last_stored_value, RRDVAR_OPTION_DEFAULT); @@ -242,7 +242,7 @@ RRDDIM *rrddim_add_custom(RRDSET *st, const char *id, const char *name, collecte } if (unlikely(rc)) { debug(D_METADATALOG, "DIMENSION [%s] metadata updated", rd->id); - (void)sql_store_dimension(rd->state->metric_uuid, rd->rrdset->chart_uuid, rd->id, rd->name, rd->multiplier, rd->divisor, + (void)sql_store_dimension(&rd->state->metric_uuid, rd->rrdset->chart_uuid, rd->id, rd->name, rd->multiplier, rd->divisor, rd->algorithm); } rrdset_unlock(st); @@ -386,11 +386,14 @@ RRDDIM *rrddim_add_custom(RRDSET *st, const char *id, const char *name, collecte rd->last_collected_time.tv_sec = 0; rd->last_collected_time.tv_usec = 0; rd->rrdset = st; - rd->state = mallocz(sizeof(*rd->state)); + rd->state = callocz(1, sizeof(*rd->state)); +#ifdef ENABLE_ACLK + rd->state->aclk_live_status = -1; +#endif + (void) find_dimension_uuid(st, rd, &(rd->state->metric_uuid)); if(memory_mode == RRD_MEMORY_MODE_DBENGINE) { #ifdef ENABLE_DBENGINE - uuid_t *dim_uuid = find_dimension_uuid(st, rd); - rrdeng_metric_init(rd, dim_uuid); + rrdeng_metric_init(rd); rd->state->collect_ops.init = rrdeng_store_metric_init; rd->state->collect_ops.store_metric = rrdeng_store_metric_next; rd->state->collect_ops.finalize = rrdeng_store_metric_finalize; @@ -402,9 +405,6 @@ RRDDIM *rrddim_add_custom(RRDSET *st, const char *id, const char *name, collecte rd->state->query_ops.oldest_time = rrdeng_metric_oldest_time; #endif } else { - rd->state->metric_uuid = find_dimension_uuid(st, rd); - if (unlikely(!rd->state->metric_uuid)) - rd->state->metric_uuid = create_dimension_uuid(rd->rrdset, rd); rd->state->collect_ops.init = rrddim_collect_init; rd->state->collect_ops.store_metric = rrddim_collect_store_metric; rd->state->collect_ops.finalize = rrddim_collect_finalize; @@ -415,7 +415,7 @@ RRDDIM *rrddim_add_custom(RRDSET *st, const char *id, const char *name, collecte rd->state->query_ops.latest_time = rrddim_query_latest_time; rd->state->query_ops.oldest_time = rrddim_query_oldest_time; } - store_active_dimension(rd->state->metric_uuid); + store_active_dimension(&rd->state->metric_uuid); rd->state->collect_ops.init(rd); // append this dimension if(!st->dimensions) @@ -454,9 +454,11 @@ RRDDIM *rrddim_add_custom(RRDSET *st, const char *id, const char *name, collecte calc_link_to_rrddim(rd); + ml_new_dimension(rd); + rrdset_unlock(st); #ifdef ENABLE_ACLK - rrdset_flag_set(st, RRDSET_FLAG_ACLK); + rrdset_flag_clear(st, RRDSET_FLAG_ACLK); #endif return(rd); } @@ -466,6 +468,8 @@ RRDDIM *rrddim_add_custom(RRDSET *st, const char *id, const char *name, collecte void rrddim_free_custom(RRDSET *st, RRDDIM *rd, int db_rotated) { + ml_delete_dimension(rd); + #ifndef ENABLE_ACLK UNUSED(db_rotated); #endif @@ -475,7 +479,7 @@ void rrddim_free_custom(RRDSET *st, RRDDIM *rd, int db_rotated) uint8_t can_delete_metric = rd->state->collect_ops.finalize(rd); if (can_delete_metric && rd->rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE) { /* This metric has no data and no references */ - delete_dimension_uuid(rd->state->metric_uuid); + delete_dimension_uuid(&rd->state->metric_uuid); } } @@ -499,7 +503,6 @@ void rrddim_free_custom(RRDSET *st, RRDDIM *rd, int db_rotated) error("RRDDIM: INTERNAL ERROR: attempt to remove from index dimension '%s' on chart '%s', removed a different dimension.", rd->id, st->id); // free(rd->annotations); - freez(rd->state->metric_uuid); RRD_MEMORY_MODE rrd_memory_mode = rd->rrd_memory_mode; switch(rrd_memory_mode) { @@ -525,7 +528,7 @@ void rrddim_free_custom(RRDSET *st, RRDDIM *rd, int db_rotated) } #ifdef ENABLE_ACLK if (db_rotated || RRD_MEMORY_MODE_DBENGINE != rrd_memory_mode) - rrdset_flag_set(st, RRDSET_FLAG_ACLK); + rrdset_flag_clear(st, RRDSET_FLAG_ACLK); #endif } @@ -546,7 +549,7 @@ int rrddim_hide(RRDSET *st, const char *id) { rrddim_flag_set(rd, RRDDIM_FLAG_HIDDEN); #ifdef ENABLE_ACLK - rrdset_flag_set(st, RRDSET_FLAG_ACLK); + rrdset_flag_clear(st, RRDSET_FLAG_ACLK); #endif return 0; } @@ -563,7 +566,7 @@ int rrddim_unhide(RRDSET *st, const char *id) { rrddim_flag_clear(rd, RRDDIM_FLAG_HIDDEN); #ifdef ENABLE_ACLK - rrdset_flag_set(st, RRDSET_FLAG_ACLK); + rrdset_flag_clear(st, RRDSET_FLAG_ACLK); #endif return 0; } @@ -578,7 +581,7 @@ inline void rrddim_is_obsolete(RRDSET *st, RRDDIM *rd) { rrddim_flag_set(rd, RRDDIM_FLAG_OBSOLETE); rrdset_flag_set(st, RRDSET_FLAG_OBSOLETE_DIMENSIONS); #ifdef ENABLE_ACLK - rrdset_flag_set(st, RRDSET_FLAG_ACLK); + rrdset_flag_clear(st, RRDSET_FLAG_ACLK); #endif } @@ -587,7 +590,7 @@ inline void rrddim_isnot_obsolete(RRDSET *st __maybe_unused, RRDDIM *rd) { rrddim_flag_clear(rd, RRDDIM_FLAG_OBSOLETE); #ifdef ENABLE_ACLK - rrdset_flag_set(st, RRDSET_FLAG_ACLK); + rrdset_flag_clear(st, RRDSET_FLAG_ACLK); #endif } diff --git a/database/rrdhost.c b/database/rrdhost.c index 5ce5366d2..d9608b740 100644 --- a/database/rrdhost.c +++ b/database/rrdhost.c @@ -88,13 +88,20 @@ static inline void rrdhost_init_os(RRDHOST *host, const char *os) { freez(old); } -static inline void rrdhost_init_timezone(RRDHOST *host, const char *timezone) { - if(host->timezone && timezone && !strcmp(host->timezone, timezone)) +static inline void rrdhost_init_timezone(RRDHOST *host, const char *timezone, const char *abbrev_timezone, int32_t utc_offset) { + if (host->timezone && timezone && !strcmp(host->timezone, timezone) && host->abbrev_timezone && abbrev_timezone && + !strcmp(host->abbrev_timezone, abbrev_timezone) && host->utc_offset == utc_offset) return; void *old = (void *)host->timezone; host->timezone = strdupz((timezone && *timezone)?timezone:"unknown"); freez(old); + + old = (void *)host->abbrev_timezone; + host->abbrev_timezone = strdupz((abbrev_timezone && *abbrev_timezone) ? abbrev_timezone : "UTC"); + freez(old); + + host->utc_offset = utc_offset; } static inline void rrdhost_init_machine_guid(RRDHOST *host, const char *machine_guid) { @@ -105,7 +112,8 @@ static inline void rrdhost_init_machine_guid(RRDHOST *host, const char *machine_ void set_host_properties(RRDHOST *host, int update_every, RRD_MEMORY_MODE memory_mode, const char *hostname, const char *registry_hostname, const char *guid, const char *os, const char *tags, - const char *tzone, const char *program_name, const char *program_version) + const char *tzone, const char *abbrev_tzone, int32_t utc_offset, const char *program_name, + const char *program_version) { host->rrd_update_every = update_every; @@ -116,7 +124,7 @@ void set_host_properties(RRDHOST *host, int update_every, RRD_MEMORY_MODE memory rrdhost_init_machine_guid(host, guid); rrdhost_init_os(host, os); - rrdhost_init_timezone(host, tzone); + rrdhost_init_timezone(host, tzone, abbrev_tzone, utc_offset); rrdhost_init_tags(host, tags); host->program_name = strdupz((program_name && *program_name) ? program_name : "unknown"); @@ -133,6 +141,8 @@ RRDHOST *rrdhost_create(const char *hostname, const char *guid, const char *os, const char *timezone, + const char *abbrev_timezone, + int32_t utc_offset, const char *tags, const char *program_name, const char *program_version, @@ -160,7 +170,7 @@ RRDHOST *rrdhost_create(const char *hostname, RRDHOST *host = callocz(1, sizeof(RRDHOST)); set_host_properties(host, (update_every > 0)?update_every:1, memory_mode, hostname, registry_hostname, guid, os, - tags, timezone, program_name, program_version); + tags, timezone, abbrev_timezone, utc_offset, program_name, program_version); host->rrd_history_entries = align_entries_to_pagesize(memory_mode, entries); host->health_enabled = ((memory_mode == RRD_MEMORY_MODE_NONE)) ? 0 : health_enabled; @@ -285,9 +295,6 @@ RRDHOST *rrdhost_create(const char *hostname, rrdhost_wrlock(host); health_readdir(host, health_user_config_dir(), health_stock_config_dir(), NULL); rrdhost_unlock(host); - - health_alarm_log_load(host); - health_alarm_log_open(host); } RRDHOST *t = rrdhost_index_add(host); @@ -303,6 +310,23 @@ RRDHOST *rrdhost_create(const char *hostname, if (unlikely(rc)) error_report("Failed to store machine GUID to the database"); sql_load_node_id(host); + if (host->health_enabled) { + if (!file_is_migrated(host->health_log_filename)) { + int rc = sql_create_health_log_table(host); + if (unlikely(rc)) { + error_report("Failed to create health log table in the database"); + health_alarm_log_load(host); + health_alarm_log_open(host); + } + else { + health_alarm_log_load(host); + add_migrated_file(host->health_log_filename, 0); + } + } else { + sql_create_health_log_table(host); + sql_health_alarm_log_load(host); + } + } } else error_report("Host machine GUID %s is not valid", host->machine_guid); @@ -358,6 +382,8 @@ RRDHOST *rrdhost_create(const char *hostname, else localhost = host; } + ml_new_host(host); + info("Host '%s' (at registry as '%s') with guid '%s' initialized" ", os '%s'" ", timezone '%s'" @@ -408,6 +434,8 @@ void rrdhost_update(RRDHOST *host , const char *guid , const char *os , const char *timezone + , const char *abbrev_timezone + , int32_t utc_offset , const char *tags , const char *program_name , const char *program_version @@ -435,7 +463,7 @@ void rrdhost_update(RRDHOST *host host->system_info = system_info; rrdhost_init_os(host, os); - rrdhost_init_timezone(host, timezone); + rrdhost_init_timezone(host, timezone, abbrev_timezone, utc_offset); freez(host->registry_hostname); host->registry_hostname = strdupz((registry_hostname && *registry_hostname)?registry_hostname:hostname); @@ -494,8 +522,21 @@ void rrdhost_update(RRDHOST *host health_readdir(host, health_user_config_dir(), health_stock_config_dir(), NULL); rrdhost_unlock(host); - health_alarm_log_load(host); - health_alarm_log_open(host); + if (!file_is_migrated(host->health_log_filename)) { + int rc = sql_create_health_log_table(host); + if (unlikely(rc)) { + error_report("Failed to create health log table in the database"); + + health_alarm_log_load(host); + health_alarm_log_open(host); + } else { + health_alarm_log_load(host); + add_migrated_file(host->health_log_filename, 0); + } + } else { + sql_create_health_log_table(host); + sql_health_alarm_log_load(host); + } } rrd_hosts_available++; info("Host %s is not in archived mode anymore", host->hostname); @@ -510,6 +551,8 @@ RRDHOST *rrdhost_find_or_create( , const char *guid , const char *os , const char *timezone + , const char *abbrev_timezone + , int32_t utc_offset , const char *tags , const char *program_name , const char *program_version @@ -541,6 +584,8 @@ RRDHOST *rrdhost_find_or_create( , guid , os , timezone + , abbrev_timezone + , utc_offset , tags , program_name , program_version @@ -563,6 +608,8 @@ RRDHOST *rrdhost_find_or_create( , guid , os , timezone + , abbrev_timezone + , utc_offset , tags , program_name , program_version @@ -632,13 +679,20 @@ restart_after_removal: int rrd_init(char *hostname, struct rrdhost_system_info *system_info) { rrdset_free_obsolete_time = config_get_number(CONFIG_SECTION_GLOBAL, "cleanup obsolete charts after seconds", rrdset_free_obsolete_time); + // Current chart locking and invalidation scheme doesn't prevent Netdata from segmentation faults if a short + // cleanup delay is set. Extensive stress tests showed that 10 seconds is quite a safe delay. Look at + // https://github.com/netdata/netdata/pull/11222#issuecomment-868367920 for more information. + if (rrdset_free_obsolete_time < 10) { + rrdset_free_obsolete_time = 10; + info("The \"cleanup obsolete charts after seconds\" option was set to 10 seconds. A lower delay can potentially cause a segmentation fault."); + } gap_when_lost_iterations_above = (int)config_get_number(CONFIG_SECTION_GLOBAL, "gap when lost iterations above", gap_when_lost_iterations_above); if (gap_when_lost_iterations_above < 1) gap_when_lost_iterations_above = 1; - if (unlikely(sql_init_database())) { + if (unlikely(sql_init_database(DB_CHECK_NONE))) { if (default_rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE) - return 1; + fatal("Failed to initialize SQLite"); info("Skipping SQLITE metadata initialization since memory mode is not db engine"); } @@ -654,6 +708,8 @@ int rrd_init(char *hostname, struct rrdhost_system_info *system_info) { , registry_get_this_machine_guid() , os_type , netdata_configured_timezone + , netdata_configured_abbrev_timezone + , netdata_configured_utc_offset , config_get(CONFIG_SECTION_BACKEND, "host tags", "") , program_name , program_version @@ -689,9 +745,10 @@ int rrd_init(char *hostname, struct rrdhost_system_info *system_info) { rrdhost_free(localhost); localhost = NULL; rrd_unlock(); - return 1; + fatal("Failed to initialize dbengine"); } #endif + sql_aclk_sync_init(); rrd_unlock(); web_client_api_v1_management_init(); @@ -849,6 +906,8 @@ void rrdhost_free(RRDHOST *host) { rrdeng_exit(host->rrdeng_ctx); #endif + ml_delete_host(host); + // ------------------------------------------------------------------------ // remove it from the indexes @@ -883,6 +942,7 @@ void rrdhost_free(RRDHOST *host) { free_label_list(host->labels.head); freez((void *)host->os); freez((void *)host->timezone); + freez((void *)host->abbrev_timezone); freez(host->program_version); freez(host->program_name); rrdhost_system_info_free(host->system_info); @@ -1371,6 +1431,7 @@ restart_after_removal: && st->last_updated.tv_sec + rrdset_free_obsolete_time < now && st->last_collected_time.tv_sec + rrdset_free_obsolete_time < now )) { + st->rrdhost->obsolete_charts_count--; #ifdef ENABLE_DBENGINE if(st->rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE) { RRDDIM *rd, *last; @@ -1386,6 +1447,11 @@ restart_after_removal: continue; } + if (rrddim_flag_check(rd, RRDDIM_FLAG_ACLK)) { + last = rd; + rd = rd->next; + continue; + } rrddim_flag_set(rd, RRDDIM_FLAG_ARCHIVED); while (rd->variables) rrddimvar_free(rd->variables); @@ -1396,7 +1462,7 @@ restart_after_removal: uint8_t can_delete_metric = rd->state->collect_ops.finalize(rd); if (can_delete_metric) { /* This metric has no data and no references */ - delete_dimension_uuid(rd->state->metric_uuid); + delete_dimension_uuid(&rd->state->metric_uuid); rrddim_free(st, rd); if (unlikely(!last)) { rd = st->dimensions; @@ -1416,6 +1482,7 @@ restart_after_removal: rrdvar_free_remaining_variables(host, &st->rrdvar_root_index); rrdset_flag_clear(st, RRDSET_FLAG_OBSOLETE); + if (st->dimensions) { /* If the chart still has dimensions don't delete it from the metadata log */ continue; @@ -1437,6 +1504,30 @@ restart_after_removal: } } +void rrd_cleanup_obsolete_charts() +{ + rrd_rdlock(); + + RRDHOST *host; + rrdhost_foreach_read(host) + { + if (host->obsolete_charts_count) { + rrdhost_wrlock(host); +#ifdef ENABLE_ACLK + host->deleted_charts_count = 0; +#endif + rrdhost_cleanup_obsolete_charts(host); +#ifdef ENABLE_ACLK + if (host->deleted_charts_count) + aclk_update_chart(host, "dummy-chart", 0); +#endif + rrdhost_unlock(host); + } + } + + rrd_unlock(); +} + // ---------------------------------------------------------------------------- // RRDHOST - set system info from environment variables // system_info fields must be heap allocated or NULL @@ -1466,8 +1557,8 @@ int rrdhost_set_system_info_variable(struct rrdhost_system_info *system_info, ch system_info->container_os_version_id = strdupz(value); } else if(!strcmp(name, "NETDATA_CONTAINER_OS_DETECTION")){ - freez(system_info->host_os_detection); - system_info->host_os_detection = strdupz(value); + freez(system_info->container_os_detection); + system_info->container_os_detection = strdupz(value); } else if(!strcmp(name, "NETDATA_HOST_OS_NAME")){ freez(system_info->host_os_name); @@ -1551,6 +1642,8 @@ int rrdhost_set_system_info_variable(struct rrdhost_system_info *system_info, ch return res; else if (!strcmp(name, "NETDATA_SYSTEM_DISK_DETECTION")) return res; + else if (!strcmp(name, "NETDATA_CONTAINER_IS_OFFICIAL_IMAGE")) + return res; else { res = 1; } diff --git a/database/rrdset.c b/database/rrdset.c index fd6605dff..2d3ee9609 100644 --- a/database/rrdset.c +++ b/database/rrdset.c @@ -194,6 +194,8 @@ inline void rrdset_is_obsolete(RRDSET *st) { if(unlikely(!(rrdset_flag_check(st, RRDSET_FLAG_OBSOLETE)))) { rrdset_flag_set(st, RRDSET_FLAG_OBSOLETE); + st->rrdhost->obsolete_charts_count++; + rrdset_flag_clear(st, RRDSET_FLAG_UPSTREAM_EXPOSED); // the chart will not get more updates (data collection) @@ -205,6 +207,8 @@ inline void rrdset_is_obsolete(RRDSET *st) { inline void rrdset_isnot_obsolete(RRDSET *st) { if(unlikely((rrdset_flag_check(st, RRDSET_FLAG_OBSOLETE)))) { rrdset_flag_clear(st, RRDSET_FLAG_OBSOLETE); + st->rrdhost->obsolete_charts_count--; + rrdset_flag_clear(st, RRDSET_FLAG_UPSTREAM_EXPOSED); // the chart will be pushed upstream automatically @@ -452,7 +456,7 @@ void rrdset_delete_custom(RRDSET *st, int db_rotated) { #ifdef ENABLE_ACLK if ((netdata_cloud_setting) && (db_rotated || RRD_MEMORY_MODE_DBENGINE != st->rrd_memory_mode)) { aclk_del_collector(st->rrdhost, st->plugin_name, st->module_name); - st->rrdhost->obsolete_count++; + st->rrdhost->deleted_charts_count++; } #endif @@ -645,7 +649,7 @@ RRDSET *rrdset_create_custom( aclk_add_collector(host, st->plugin_name, st->module_name); } } - rrdset_flag_set(st, RRDSET_FLAG_ACLK); + rrdset_flag_clear(st, RRDSET_FLAG_ACLK); } #endif freez(old_plugin); @@ -695,11 +699,11 @@ RRDSET *rrdset_create_custom( // ------------------------------------------------------------------------ // compose the config_section for this chart - char config_section[RRD_ID_LENGTH_MAX + 1]; + char config_section[RRD_ID_LENGTH_MAX + GUID_LEN + 2]; if(host == localhost) strcpy(config_section, fullid); else - snprintfz(config_section, RRD_ID_LENGTH_MAX, "%s/%s", host->machine_guid, fullid); + snprintfz(config_section, RRD_ID_LENGTH_MAX + GUID_LEN + 1, "%s/%s", host->machine_guid, fullid); // ------------------------------------------------------------------------ // get the options from the config, we need to create it @@ -931,21 +935,13 @@ RRDSET *rrdset_create_custom( update_chart_metadata(st->chart_uuid, st, id, name); store_active_chart(st->chart_uuid); - -#ifdef ENABLE_ACLK - host->obsolete_count = 0; -#endif - rrdhost_cleanup_obsolete_charts(host); -#ifdef ENABLE_ACLK - if (host->obsolete_count) - aclk_update_chart(st->rrdhost, "dummy-chart", ACLK_CMD_CHARTDEL); -#endif + compute_chart_hash(st); rrdhost_unlock(host); #ifdef ENABLE_ACLK if (netdata_cloud_setting) aclk_add_collector(host, plugin, module); - rrdset_flag_set(st, RRDSET_FLAG_ACLK); + rrdset_flag_clear(st, RRDSET_FLAG_ACLK); #endif return(st); } @@ -1124,7 +1120,7 @@ static inline size_t rrdset_done_interpolate( , usec_t last_collect_ut , usec_t now_collect_ut , char store_this_entry - , uint32_t storage_flags + , uint32_t has_reset_value ) { RRDDIM *rd; @@ -1139,6 +1135,11 @@ static inline size_t rrdset_done_interpolate( size_t counter = st->counter; long current_entry = st->current_entry; + uint32_t storage_flags = SN_DEFAULT_FLAGS; + + if (has_reset_value) + storage_flags |= SN_EXISTS_RESET; + for( ; next_store_ut <= now_collect_ut ; last_collect_ut = next_store_ut, next_store_ut += update_every_ut, iterations-- ) { #ifdef NETDATA_INTERNAL_CHECKS @@ -1236,13 +1237,22 @@ static inline size_t rrdset_done_interpolate( } if(unlikely(!store_this_entry)) { - rd->state->collect_ops.store_metric(rd, next_store_ut, SN_EMPTY_SLOT); //pack_storage_number(0, SN_NOT_EXISTS) -// rd->values[current_entry] = SN_EMPTY_SLOT; //pack_storage_number(0, SN_NOT_EXISTS); + (void) ml_is_anomalous(rd, 0, false); + + rd->state->collect_ops.store_metric(rd, next_store_ut, SN_EMPTY_SLOT); +// rd->values[current_entry] = SN_EMPTY_SLOT; continue; } if(likely(rd->updated && rd->collections_counter > 1 && iterations < st->gap_when_lost_iterations_above)) { - rd->state->collect_ops.store_metric(rd, next_store_ut, pack_storage_number(new_value, storage_flags)); + uint32_t dim_storage_flags = storage_flags; + + if (ml_is_anomalous(rd, new_value, true)) { + // clear anomaly bit: 0 -> is anomalous, 1 -> not anomalous + dim_storage_flags &= ~ ((uint32_t) SN_ANOMALY_BIT); + } + + rd->state->collect_ops.store_metric(rd, next_store_ut, pack_storage_number(new_value, dim_storage_flags)); // rd->values[current_entry] = pack_storage_number(new_value, storage_flags ); rd->last_stored_value = new_value; @@ -1254,9 +1264,9 @@ static inline size_t rrdset_done_interpolate( , unpack_storage_number(rd->values[current_entry]), new_value ); #endif - } else { + (void) ml_is_anomalous(rd, 0, false); #ifdef NETDATA_INTERNAL_CHECKS rrdset_debug(st, "%s: STORE[%ld] = NON EXISTING " @@ -1265,8 +1275,8 @@ static inline size_t rrdset_done_interpolate( ); #endif -// rd->values[current_entry] = SN_EMPTY_SLOT; // pack_storage_number(0, SN_NOT_EXISTS); - rd->state->collect_ops.store_metric(rd, next_store_ut, SN_EMPTY_SLOT); //pack_storage_number(0, SN_NOT_EXISTS) +// rd->values[current_entry] = SN_EMPTY_SLOT; + rd->state->collect_ops.store_metric(rd, next_store_ut, SN_EMPTY_SLOT); rd->last_stored_value = NAN; } @@ -1278,11 +1288,10 @@ static inline size_t rrdset_done_interpolate( calculated_number t2 = unpack_storage_number(rd->values[current_entry]); calculated_number accuracy = accuracy_loss(t1, t2); - debug(D_RRD_STATS, "%s/%s: UNPACK[%ld] = " CALCULATED_NUMBER_FORMAT " FLAGS=0x%08x (original = " CALCULATED_NUMBER_FORMAT ", accuracy loss = " CALCULATED_NUMBER_FORMAT "%%%s)" + debug(D_RRD_STATS, "%s/%s: UNPACK[%ld] = " CALCULATED_NUMBER_FORMAT " (original = " CALCULATED_NUMBER_FORMAT ", accuracy loss = " CALCULATED_NUMBER_FORMAT "%%%s)" , st->id, rd->name , current_entry , t2 - , get_storage_number_flags(rd->values[current_entry]) , t1 , accuracy , (accuracy > ACCURACY_LOSS_ACCEPTED_PERCENT) ? " **TOO BIG** " : "" @@ -1304,7 +1313,7 @@ static inline size_t rrdset_done_interpolate( #endif } // reset the storage flags for the next point, if any; - storage_flags = SN_EXISTS; + storage_flags = SN_DEFAULT_FLAGS; st->counter = ++counter; st->current_entry = current_entry = ((current_entry + 1) >= st->entries) ? 0 : current_entry + 1; @@ -1353,6 +1362,7 @@ static inline void rrdset_done_fill_the_gap(RRDSET *st) { st->last_updated.tv_sec += c * st->update_every; st->current_entry += c; + st->counter += c; if(st->current_entry >= st->entries) st->current_entry -= st->entries; } @@ -1382,9 +1392,11 @@ void rrdset_done(RRDSET *st) { rrdset_rdlock(st); #ifdef ENABLE_ACLK - if (unlikely(rrdset_flag_check(st, RRDSET_FLAG_ACLK))) { - rrdset_flag_clear(st, RRDSET_FLAG_ACLK); - aclk_update_chart(st->rrdhost, st->id, ACLK_CMD_CHART); + if (unlikely(!rrdset_flag_check(st, RRDSET_FLAG_ACLK))) { + if (st->counter_done >= RRDSET_MINIMUM_LIVE_COUNT && st->dimensions) { + if (likely(!queue_chart_to_aclk(st))) + rrdset_flag_set(st, RRDSET_FLAG_ACLK); + } } #endif @@ -1535,7 +1547,7 @@ after_first_database_work: st->collected_total += rd->collected_value; } - uint32_t storage_flags = SN_EXISTS; + uint32_t has_reset_value = 0; // process all dimensions to calculate their values // based on the collected figures only @@ -1632,7 +1644,7 @@ after_first_database_work: , rd->collected_value); if(!(rrddim_flag_check(rd, RRDDIM_FLAG_DONT_DETECT_RESETS_OR_OVERFLOWS))) - storage_flags = SN_EXISTS_RESET; + has_reset_value = 1; uint64_t last = (uint64_t)rd->last_collected_value; uint64_t new = (uint64_t)rd->collected_value; @@ -1703,7 +1715,7 @@ after_first_database_work: ); if(!(rrddim_flag_check(rd, RRDDIM_FLAG_DONT_DETECT_RESETS_OR_OVERFLOWS))) - storage_flags = SN_EXISTS_RESET; + has_reset_value = 1; rd->last_collected_value = rd->collected_value; } @@ -1782,15 +1794,32 @@ after_first_database_work: , last_collect_ut , now_collect_ut , store_this_entry - , storage_flags + , has_reset_value ); after_second_database_work: st->last_collected_total = st->collected_total; +#ifdef ENABLE_NEW_CLOUD_PROTOCOL + time_t mark = now_realtime_sec(); +#endif rrddim_foreach_read(rd, st) { if (rrddim_flag_check(rd, RRDDIM_FLAG_ARCHIVED)) continue; + +#if defined(ENABLE_ACLK) && defined(ENABLE_NEW_CLOUD_PROTOCOL) + if (!rrddim_flag_check(rd, RRDDIM_FLAG_HIDDEN)) { + int live = ((mark - rd->last_collected_time.tv_sec) < (RRDSET_MINIMUM_LIVE_COUNT * rd->update_every)); + if (unlikely(live != rd->state->aclk_live_status)) { + if (likely(rrdset_flag_check(st, RRDSET_FLAG_ACLK))) { + if (likely(!queue_dimension_to_aclk(rd))) { + rd->state->aclk_live_status = live; + rrddim_flag_set(rd, RRDDIM_FLAG_ACLK); + } + } + } + } +#endif if(unlikely(!rd->updated)) continue; @@ -1866,7 +1895,8 @@ after_second_database_work: rrdset_wrlock(st); for( rd = st->dimensions, last = NULL ; likely(rd) ; ) { - if(unlikely(rrddim_flag_check(rd, RRDDIM_FLAG_OBSOLETE) && (rd->last_collected_time.tv_sec + rrdset_free_obsolete_time < now))) { + if(unlikely(rrddim_flag_check(rd, RRDDIM_FLAG_OBSOLETE) && !rrddim_flag_check(rd, RRDDIM_FLAG_ACLK) + && (rd->last_collected_time.tv_sec + rrdset_free_obsolete_time < now))) { info("Removing obsolete dimension '%s' (%s) of '%s' (%s).", rd->name, rd->id, st->name, st->id); if(likely(rd->rrd_memory_mode == RRD_MEMORY_MODE_SAVE || rd->rrd_memory_mode == RRD_MEMORY_MODE_MAP)) { @@ -1886,7 +1916,7 @@ after_second_database_work: uint8_t can_delete_metric = rd->state->collect_ops.finalize(rd); if (can_delete_metric) { /* This metric has no data and no references */ - delete_dimension_uuid(rd->state->metric_uuid); + delete_dimension_uuid(&rd->state->metric_uuid); } else { /* Do not delete this dimension */ last = rd; diff --git a/database/sqlite/sqlite3.c b/database/sqlite/sqlite3.c index 5a804fba0..057ce52d8 100644 --- a/database/sqlite/sqlite3.c +++ b/database/sqlite/sqlite3.c @@ -1,6 +1,6 @@ /****************************************************************************** ** This file is an amalgamation of many separate C source files from SQLite -** version 3.33.0. By combining all the individual C code files into this +** version 3.36.0. By combining all the individual C code files into this ** single large file, the entire code can be compiled as a single translation ** unit. This allows many compilers to do optimizations that would not be ** possible if the files were compiled separately. Performance improvements @@ -23,6 +23,7 @@ #ifndef SQLITE_PRIVATE # define SQLITE_PRIVATE static #endif +#define SQLITE_UDL_CAPABLE_PARSER 1 /************** Begin file ctime.c *******************************************/ /* ** 2010 February 23 @@ -86,8 +87,10 @@ static const char * const sqlite3azCompileOpt[] = { #if SQLITE_64BIT_STATS "64BIT_STATS", #endif -#if SQLITE_ALLOW_COVERING_INDEX_SCAN - "ALLOW_COVERING_INDEX_SCAN", +#ifdef SQLITE_ALLOW_COVERING_INDEX_SCAN +# if SQLITE_ALLOW_COVERING_INDEX_SCAN != 1 + "ALLOW_COVERING_INDEX_SCAN=" CTIMEOPT_VAL(SQLITE_ALLOW_COVERING_INDEX_SCAN), +# endif #endif #if SQLITE_ALLOW_URI_AUTHORITY "ALLOW_URI_AUTHORITY", @@ -149,8 +152,10 @@ static const char * const sqlite3azCompileOpt[] = { #ifdef SQLITE_DEFAULT_LOOKASIDE "DEFAULT_LOOKASIDE=" CTIMEOPT_VAL2(SQLITE_DEFAULT_LOOKASIDE), #endif -#if SQLITE_DEFAULT_MEMSTATUS - "DEFAULT_MEMSTATUS", +#ifdef SQLITE_DEFAULT_MEMSTATUS +# if SQLITE_DEFAULT_MEMSTATUS != 1 + "DEFAULT_MEMSTATUS=" CTIMEOPT_VAL(SQLITE_DEFAULT_MEMSTATUS), +# endif #endif #ifdef SQLITE_DEFAULT_MMAP_SIZE "DEFAULT_MMAP_SIZE=" CTIMEOPT_VAL(SQLITE_DEFAULT_MMAP_SIZE), @@ -224,7 +229,7 @@ static const char * const sqlite3azCompileOpt[] = { #if SQLITE_ENABLE_BYTECODE_VTAB "ENABLE_BYTECODE_VTAB", #endif -#if SQLITE_ENABLE_CEROD +#ifdef SQLITE_ENABLE_CEROD "ENABLE_CEROD=" CTIMEOPT_VAL(SQLITE_ENABLE_CEROD), #endif #if SQLITE_ENABLE_COLUMN_METADATA @@ -239,17 +244,17 @@ static const char * const sqlite3azCompileOpt[] = { #if SQLITE_ENABLE_CURSOR_HINTS "ENABLE_CURSOR_HINTS", #endif +#if SQLITE_ENABLE_DBPAGE_VTAB + "ENABLE_DBPAGE_VTAB", +#endif #if SQLITE_ENABLE_DBSTAT_VTAB "ENABLE_DBSTAT_VTAB", #endif #if SQLITE_ENABLE_EXPENSIVE_ASSERT "ENABLE_EXPENSIVE_ASSERT", #endif -#if SQLITE_ENABLE_FTS1 - "ENABLE_FTS1", -#endif -#if SQLITE_ENABLE_FTS2 - "ENABLE_FTS2", +#if SQLITE_ENABLE_EXPLAIN_COMMENTS + "ENABLE_EXPLAIN_COMMENTS", #endif #if SQLITE_ENABLE_FTS3 "ENABLE_FTS3", @@ -287,6 +292,9 @@ static const char * const sqlite3azCompileOpt[] = { #ifdef SQLITE_ENABLE_LOCKING_STYLE "ENABLE_LOCKING_STYLE=" CTIMEOPT_VAL(SQLITE_ENABLE_LOCKING_STYLE), #endif +#if SQLITE_ENABLE_MATH_FUNCTIONS + "ENABLE_MATH_FUNCTIONS", +#endif #if SQLITE_ENABLE_MEMORY_MANAGEMENT "ENABLE_MEMORY_MANAGEMENT", #endif @@ -305,6 +313,9 @@ static const char * const sqlite3azCompileOpt[] = { #if SQLITE_ENABLE_NULL_TRIM "ENABLE_NULL_TRIM", #endif +#if SQLITE_ENABLE_OFFSET_SQL_FUNC + "ENABLE_OFFSET_SQL_FUNC", +#endif #if SQLITE_ENABLE_OVERSIZE_CELL_CHECK "ENABLE_OVERSIZE_CELL_CHECK", #endif @@ -335,7 +346,7 @@ static const char * const sqlite3azCompileOpt[] = { #if SQLITE_ENABLE_SQLLOG "ENABLE_SQLLOG", #endif -#if defined(SQLITE_ENABLE_STAT4) +#if SQLITE_ENABLE_STAT4 "ENABLE_STAT4", #endif #if SQLITE_ENABLE_STMTVTAB @@ -389,8 +400,10 @@ static const char * const sqlite3azCompileOpt[] = { #if HAVE_ISNAN || SQLITE_HAVE_ISNAN "HAVE_ISNAN", #endif -#if SQLITE_HOMEGROWN_RECURSIVE_MUTEX - "HOMEGROWN_RECURSIVE_MUTEX", +#ifdef SQLITE_HOMEGROWN_RECURSIVE_MUTEX +# if SQLITE_HOMEGROWN_RECURSIVE_MUTEX != 1 + "HOMEGROWN_RECURSIVE_MUTEX=" CTIMEOPT_VAL(SQLITE_HOMEGROWN_RECURSIVE_MUTEX), +# endif #endif #if SQLITE_IGNORE_AFP_LOCK_ERRORS "IGNORE_AFP_LOCK_ERRORS", @@ -488,9 +501,6 @@ static const char * const sqlite3azCompileOpt[] = { #if SQLITE_MUTEX_NOOP "MUTEX_NOOP", #endif -#if SQLITE_MUTEX_NREF - "MUTEX_NREF", -#endif #if SQLITE_MUTEX_OMIT "MUTEX_OMIT", #endif @@ -560,7 +570,7 @@ static const char * const sqlite3azCompileOpt[] = { #if SQLITE_OMIT_CTE "OMIT_CTE", #endif -#if SQLITE_OMIT_DATETIME_FUNCS +#if defined(SQLITE_OMIT_DATETIME_FUNCS) || defined(SQLITE_OMIT_FLOATING_POINT) "OMIT_DATETIME_FUNCS", #endif #if SQLITE_OMIT_DECLTYPE @@ -569,6 +579,9 @@ static const char * const sqlite3azCompileOpt[] = { #if SQLITE_OMIT_DEPRECATED "OMIT_DEPRECATED", #endif +#if SQLITE_OMIT_DESERIALIZE + "OMIT_DESERIALIZE", +#endif #if SQLITE_OMIT_DISKIO "OMIT_DISKIO", #endif @@ -596,6 +609,9 @@ static const char * const sqlite3azCompileOpt[] = { #if SQLITE_OMIT_INTEGRITY_CHECK "OMIT_INTEGRITY_CHECK", #endif +#if SQLITE_OMIT_INTROSPECTION_PRAGMAS + "OMIT_INTROSPECTION_PRAGMAS", +#endif #if SQLITE_OMIT_LIKE_OPTIMIZATION "OMIT_LIKE_OPTIMIZATION", #endif @@ -659,8 +675,10 @@ static const char * const sqlite3azCompileOpt[] = { #if SQLITE_OMIT_TEST_CONTROL "OMIT_TEST_CONTROL", #endif -#if SQLITE_OMIT_TRACE - "OMIT_TRACE", +#ifdef SQLITE_OMIT_TRACE +# if SQLITE_OMIT_TRACE != 1 + "OMIT_TRACE=" CTIMEOPT_VAL(SQLITE_OMIT_TRACE), +# endif #endif #if SQLITE_OMIT_TRIGGER "OMIT_TRIGGER", @@ -695,8 +713,10 @@ static const char * const sqlite3azCompileOpt[] = { #if SQLITE_PERFORMANCE_TRACE "PERFORMANCE_TRACE", #endif -#if SQLITE_POWERSAFE_OVERWRITE - "POWERSAFE_OVERWRITE", +#ifdef SQLITE_POWERSAFE_OVERWRITE +# if SQLITE_POWERSAFE_OVERWRITE != 1 + "POWERSAFE_OVERWRITE=" CTIMEOPT_VAL(SQLITE_POWERSAFE_OVERWRITE), +# endif #endif #if SQLITE_PREFER_PROXY_LOCKING "PREFER_PROXY_LOCKING", @@ -731,7 +751,10 @@ static const char * const sqlite3azCompileOpt[] = { #if SQLITE_SUBSTR_COMPATIBILITY "SUBSTR_COMPATIBILITY", #endif -#if SQLITE_SYSTEM_MALLOC +#if (!defined(SQLITE_WIN32_MALLOC) \ + && !defined(SQLITE_ZERO_MALLOC) \ + && !defined(SQLITE_MEMDEBUG) \ + ) || defined(SQLITE_SYSTEM_MALLOC) "SYSTEM_MALLOC", #endif #if SQLITE_TCL @@ -993,6 +1016,18 @@ SQLITE_PRIVATE const char **sqlite3CompileOptions(int *pnOpt){ # define MSVC_VERSION 0 #endif +/* +** Some C99 functions in "math.h" are only present for MSVC when its version +** is associated with Visual Studio 2013 or higher. +*/ +#ifndef SQLITE_HAVE_C99_MATH_FUNCS +# if MSVC_VERSION==0 || MSVC_VERSION>=1800 +# define SQLITE_HAVE_C99_MATH_FUNCS (1) +# else +# define SQLITE_HAVE_C99_MATH_FUNCS (0) +# endif +#endif + /* Needed for various definitions... */ #if defined(__GNUC__) && !defined(_GNU_SOURCE) # define _GNU_SOURCE @@ -1174,9 +1209,9 @@ extern "C" { ** [sqlite3_libversion_number()], [sqlite3_sourceid()], ** [sqlite_version()] and [sqlite_source_id()]. */ -#define SQLITE_VERSION "3.33.0" -#define SQLITE_VERSION_NUMBER 3033000 -#define SQLITE_SOURCE_ID "2020-08-14 13:23:32 fca8dc8b578f215a969cd899336378966156154710873e68b3d9ac5881b0alt1" +#define SQLITE_VERSION "3.36.0" +#define SQLITE_VERSION_NUMBER 3036000 +#define SQLITE_SOURCE_ID "2021-06-18 18:36:39 5c9a6c06871cb9fe42814af9c039eb6da5427a6ec28f187af7ebfb62eafa66e5" /* ** CAPI3REF: Run-Time Library Version Numbers @@ -1555,6 +1590,7 @@ SQLITE_API int sqlite3_exec( #define SQLITE_IOERR_COMMIT_ATOMIC (SQLITE_IOERR | (30<<8)) #define SQLITE_IOERR_ROLLBACK_ATOMIC (SQLITE_IOERR | (31<<8)) #define SQLITE_IOERR_DATA (SQLITE_IOERR | (32<<8)) +#define SQLITE_IOERR_CORRUPTFS (SQLITE_IOERR | (33<<8)) #define SQLITE_LOCKED_SHAREDCACHE (SQLITE_LOCKED | (1<<8)) #define SQLITE_LOCKED_VTAB (SQLITE_LOCKED | (2<<8)) #define SQLITE_BUSY_RECOVERY (SQLITE_BUSY | (1<<8)) @@ -2178,6 +2214,23 @@ struct sqlite3_io_methods { ** file to the database file, but before the *-shm file is updated to ** record the fact that the pages have been checkpointed. ** +** +**
  • [[SQLITE_FCNTL_EXTERNAL_READER]] +** The EXPERIMENTAL [SQLITE_FCNTL_EXTERNAL_READER] opcode is used to detect +** whether or not there is a database client in another process with a wal-mode +** transaction open on the database or not. It is only available on unix.The +** (void*) argument passed with this file-control should be a pointer to a +** value of type (int). The integer value is set to 1 if the database is a wal +** mode database and there exists at least one client in another process that +** currently has an SQL transaction open on the database. It is set to 0 if +** the database is not a wal-mode db, or if there is no such connection in any +** other process. This opcode cannot be used to detect transactions opened +** by clients within the current process, only within other processes. +** +** +**
  • [[SQLITE_FCNTL_CKSM_FILE]] +** Used by the cksmvfs VFS module only. +** */ #define SQLITE_FCNTL_LOCKSTATE 1 #define SQLITE_FCNTL_GET_LOCKPROXYFILE 2 @@ -2217,6 +2270,8 @@ struct sqlite3_io_methods { #define SQLITE_FCNTL_CKPT_DONE 37 #define SQLITE_FCNTL_RESERVE_BYTES 38 #define SQLITE_FCNTL_CKPT_START 39 +#define SQLITE_FCNTL_EXTERNAL_READER 40 +#define SQLITE_FCNTL_CKSM_FILE 41 /* deprecated names */ #define SQLITE_GET_LOCKPROXYFILE SQLITE_FCNTL_GET_LOCKPROXYFILE @@ -3165,7 +3220,13 @@ struct sqlite3_mem_methods { ** The second parameter is a pointer to an integer into which ** is written 0 or 1 to indicate whether triggers are disabled or enabled ** following this call. The second parameter may be a NULL pointer, in -** which case the trigger setting is not reported back. +** which case the trigger setting is not reported back. +** +**

    Originally this option disabled all triggers. ^(However, since +** SQLite version 3.35.0, TEMP triggers are still allowed even if +** this option is off. So, in other words, this option now only disables +** triggers in the main database schema or in the schemas of ATTACH-ed +** databases.)^ ** ** [[SQLITE_DBCONFIG_ENABLE_VIEW]] **

    SQLITE_DBCONFIG_ENABLE_VIEW
    @@ -3176,7 +3237,13 @@ struct sqlite3_mem_methods { ** The second parameter is a pointer to an integer into which ** is written 0 or 1 to indicate whether views are disabled or enabled ** following this call. The second parameter may be a NULL pointer, in -** which case the view setting is not reported back. +** which case the view setting is not reported back. +** +**

    Originally this option disabled all views. ^(However, since +** SQLite version 3.35.0, TEMP views are still allowed even if +** this option is off. So, in other words, this option now only disables +** views in the main database schema or in the schemas of ATTACH-ed +** databases.)^ ** ** [[SQLITE_DBCONFIG_ENABLE_FTS3_TOKENIZER]] **

    SQLITE_DBCONFIG_ENABLE_FTS3_TOKENIZER
    @@ -4549,6 +4616,7 @@ SQLITE_API void sqlite3_progress_handler(sqlite3*, int, int(*)(void*), void*); ** that uses dot-files in place of posix advisory locking. ** file:data.db?mode=readonly ** An error. "readonly" is not a valid option for the "mode" parameter. +** Use "ro" instead: "file:data.db?mode=ro". ** ** ** ^URI hexadecimal escape sequences (%HH) are supported within the path and @@ -4747,7 +4815,7 @@ SQLITE_API sqlite3_file *sqlite3_database_file_object(const char*); ** If the Y parameter to sqlite3_free_filename(Y) is anything other ** than a NULL pointer or a pointer previously acquired from ** sqlite3_create_filename(), then bad things such as heap -** corruption or segfaults may occur. The value Y should be +** corruption or segfaults may occur. The value Y should not be ** used again after sqlite3_free_filename(Y) has been called. This means ** that if the [sqlite3_vfs.xOpen()] method of a VFS has been called using Y, ** then the corresponding [sqlite3_module.xClose() method should also be @@ -5216,6 +5284,15 @@ SQLITE_API const char *sqlite3_normalized_sql(sqlite3_stmt *pStmt); ** [BEGIN] merely sets internal flags, but the [BEGIN|BEGIN IMMEDIATE] and ** [BEGIN|BEGIN EXCLUSIVE] commands do touch the database and so ** sqlite3_stmt_readonly() returns false for those commands. +** +** ^This routine returns false if there is any possibility that the +** statement might change the database file. ^A false return does +** not guarantee that the statement will change the database file. +** ^For example, an UPDATE statement might have a WHERE clause that +** makes it a no-op, but the sqlite3_stmt_readonly() result would still +** be false. ^Similarly, a CREATE TABLE IF NOT EXISTS statement is a +** read-only no-op if the table already exists, but +** sqlite3_stmt_readonly() still returns false for such a statement. */ SQLITE_API int sqlite3_stmt_readonly(sqlite3_stmt *pStmt); @@ -5385,18 +5462,22 @@ typedef struct sqlite3_context sqlite3_context; ** contain embedded NULs. The result of expressions involving strings ** with embedded NULs is undefined. ** -** ^The fifth argument to the BLOB and string binding interfaces -** is a destructor used to dispose of the BLOB or -** string after SQLite has finished with it. ^The destructor is called -** to dispose of the BLOB or string even if the call to the bind API fails, -** except the destructor is not called if the third parameter is a NULL -** pointer or the fourth parameter is negative. -** ^If the fifth argument is -** the special value [SQLITE_STATIC], then SQLite assumes that the -** information is in static, unmanaged space and does not need to be freed. -** ^If the fifth argument has the value [SQLITE_TRANSIENT], then -** SQLite makes its own private copy of the data immediately, before -** the sqlite3_bind_*() routine returns. +** ^The fifth argument to the BLOB and string binding interfaces controls +** or indicates the lifetime of the object referenced by the third parameter. +** These three options exist: +** ^ (1) A destructor to dispose of the BLOB or string after SQLite has finished +** with it may be passed. ^It is called to dispose of the BLOB or string even +** if the call to the bind API fails, except the destructor is not called if +** the third parameter is a NULL pointer or the fourth parameter is negative. +** ^ (2) The special constant, [SQLITE_STATIC], may be passsed to indicate that +** the application remains responsible for disposing of the object. ^In this +** case, the object and the provided pointer to it must remain valid until +** either the prepared statement is finalized or the same SQL parameter is +** bound to something else, whichever occurs sooner. +** ^ (3) The constant, [SQLITE_TRANSIENT], may be passed to indicate that the +** object is to be copied prior to the return from sqlite3_bind_*(). ^The +** object and pointer to it must remain valid until then. ^SQLite will then +** manage the lifetime of its private copy. ** ** ^The sixth argument to sqlite3_bind_text64() must be one of ** [SQLITE_UTF8], [SQLITE_UTF16], [SQLITE_UTF16BE], or [SQLITE_UTF16LE] @@ -6138,7 +6219,6 @@ SQLITE_API int sqlite3_reset(sqlite3_stmt *pStmt); ** within VIEWs, TRIGGERs, CHECK constraints, generated column expressions, ** index expressions, or the WHERE clause of partial indexes. ** -** ** For best security, the [SQLITE_DIRECTONLY] flag is recommended for ** all application-defined SQL functions that do not need to be ** used inside of triggers, view, CHECK constraints, or other elements of @@ -6148,7 +6228,6 @@ SQLITE_API int sqlite3_reset(sqlite3_stmt *pStmt); ** a database file to include invocations of the function with parameters ** chosen by the attacker, which the application will then execute when ** the database file is opened and read. -** ** ** ^(The fifth parameter is an arbitrary pointer. The implementation of the ** function can gain access to this pointer using [sqlite3_user_data()].)^ @@ -7237,6 +7316,57 @@ SQLITE_API const char *sqlite3_db_filename(sqlite3 *db, const char *zDbName); */ SQLITE_API int sqlite3_db_readonly(sqlite3 *db, const char *zDbName); +/* +** CAPI3REF: Determine the transaction state of a database +** METHOD: sqlite3 +** +** ^The sqlite3_txn_state(D,S) interface returns the current +** [transaction state] of schema S in database connection D. ^If S is NULL, +** then the highest transaction state of any schema on database connection D +** is returned. Transaction states are (in order of lowest to highest): +**
      +**
    1. SQLITE_TXN_NONE +**
    2. SQLITE_TXN_READ +**
    3. SQLITE_TXN_WRITE +**
    +** ^If the S argument to sqlite3_txn_state(D,S) is not the name of +** a valid schema, then -1 is returned. +*/ +SQLITE_API int sqlite3_txn_state(sqlite3*,const char *zSchema); + +/* +** CAPI3REF: Allowed return values from [sqlite3_txn_state()] +** KEYWORDS: {transaction state} +** +** These constants define the current transaction state of a database file. +** ^The [sqlite3_txn_state(D,S)] interface returns one of these +** constants in order to describe the transaction state of schema S +** in [database connection] D. +** +**
    +** [[SQLITE_TXN_NONE]]
    SQLITE_TXN_NONE
    +**
    The SQLITE_TXN_NONE state means that no transaction is currently +** pending.
    +** +** [[SQLITE_TXN_READ]]
    SQLITE_TXN_READ
    +**
    The SQLITE_TXN_READ state means that the database is currently +** in a read transaction. Content has been read from the database file +** but nothing in the database file has changed. The transaction state +** will advanced to SQLITE_TXN_WRITE if any changes occur and there are +** no other conflicting concurrent write transactions. The transaction +** state will revert to SQLITE_TXN_NONE following a [ROLLBACK] or +** [COMMIT].
    +** +** [[SQLITE_TXN_WRITE]]
    SQLITE_TXN_WRITE
    +**
    The SQLITE_TXN_WRITE state means that the database is currently +** in a write transaction. Content has been written to the database file +** but has not yet committed. The transaction state will change to +** to SQLITE_TXN_NONE at the next [ROLLBACK] or [COMMIT].
    +*/ +#define SQLITE_TXN_NONE 0 +#define SQLITE_TXN_READ 1 +#define SQLITE_TXN_WRITE 2 + /* ** CAPI3REF: Find the next prepared statement ** METHOD: sqlite3 @@ -8763,7 +8893,10 @@ SQLITE_API int sqlite3_test_control(int op, ...); #define SQLITE_TESTCTRL_RESULT_INTREAL 27 #define SQLITE_TESTCTRL_PRNG_SEED 28 #define SQLITE_TESTCTRL_EXTRA_SCHEMA_CHECKS 29 -#define SQLITE_TESTCTRL_LAST 29 /* Largest TESTCTRL */ +#define SQLITE_TESTCTRL_SEEK_COUNT 30 +#define SQLITE_TESTCTRL_TRACEFLAGS 31 +#define SQLITE_TESTCTRL_TUNE 32 +#define SQLITE_TESTCTRL_LAST 32 /* Largest TESTCTRL */ /* ** CAPI3REF: SQL Keyword Checking @@ -10243,10 +10376,11 @@ SQLITE_API int sqlite3_vtab_on_conflict(sqlite3 *); ** CAPI3REF: Determine If Virtual Table Column Access Is For UPDATE ** ** If the sqlite3_vtab_nochange(X) routine is called within the [xColumn] -** method of a [virtual table], then it returns true if and only if the +** method of a [virtual table], then it might return true if the ** column is being fetched as part of an UPDATE operation during which the -** column value will not change. Applications might use this to substitute -** a return value that is less expensive to compute and that the corresponding +** column value will not change. The virtual table implementation can use +** this hint as permission to substitute a return value that is less +** expensive to compute and that the corresponding ** [xUpdate] method understands as a "no-change" value. ** ** If the [xColumn] method calls sqlite3_vtab_nochange() and finds that @@ -10255,6 +10389,12 @@ SQLITE_API int sqlite3_vtab_on_conflict(sqlite3 *); ** any of the [sqlite3_result_int|sqlite3_result_xxxxx() interfaces]. ** In that case, [sqlite3_value_nochange(X)] will return true for the ** same column in the [xUpdate] method. +** +** The sqlite3_vtab_nochange() routine is an optimization. Virtual table +** implementations should continue to give a correct answer even if the +** sqlite3_vtab_nochange() interface were to always return false. In the +** current implementation, the sqlite3_vtab_nochange() interface does always +** returns false for the enhanced [UPDATE FROM] statement. */ SQLITE_API int sqlite3_vtab_nochange(sqlite3_context*); @@ -10396,6 +10536,7 @@ SQLITE_API void sqlite3_stmt_scanstatus_reset(sqlite3_stmt*); /* ** CAPI3REF: Flush caches to disk mid-transaction +** METHOD: sqlite3 ** ** ^If a write-transaction is open on [database connection] D when the ** [sqlite3_db_cacheflush(D)] interface invoked, any dirty @@ -10428,6 +10569,7 @@ SQLITE_API int sqlite3_db_cacheflush(sqlite3*); /* ** CAPI3REF: The pre-update hook. +** METHOD: sqlite3 ** ** ^These interfaces are only available if SQLite is compiled using the ** [SQLITE_ENABLE_PREUPDATE_HOOK] compile-time option. @@ -10468,7 +10610,7 @@ SQLITE_API int sqlite3_db_cacheflush(sqlite3*); ** seventh parameter is the final rowid value of the row being inserted ** or updated. The value of the seventh parameter passed to the callback ** function is not defined for operations on WITHOUT ROWID tables, or for -** INSERT operations on rowid tables. +** DELETE operations on rowid tables. ** ** The [sqlite3_preupdate_old()], [sqlite3_preupdate_new()], ** [sqlite3_preupdate_count()], and [sqlite3_preupdate_depth()] interfaces @@ -10506,6 +10648,15 @@ SQLITE_API int sqlite3_db_cacheflush(sqlite3*); ** triggers; or 2 for changes resulting from triggers called by top-level ** triggers; and so forth. ** +** When the [sqlite3_blob_write()] API is used to update a blob column, +** the pre-update hook is invoked with SQLITE_DELETE. This is because the +** in this case the new values are not available. In this case, when a +** callback made with op==SQLITE_DELETE is actuall a write using the +** sqlite3_blob_write() API, the [sqlite3_preupdate_blobwrite()] returns +** the index of the column being written. In other cases, where the +** pre-update hook is being invoked for some other reason, including a +** regular DELETE, sqlite3_preupdate_blobwrite() returns -1. +** ** See also: [sqlite3_update_hook()] */ #if defined(SQLITE_ENABLE_PREUPDATE_HOOK) @@ -10526,10 +10677,12 @@ SQLITE_API int sqlite3_preupdate_old(sqlite3 *, int, sqlite3_value **); SQLITE_API int sqlite3_preupdate_count(sqlite3 *); SQLITE_API int sqlite3_preupdate_depth(sqlite3 *); SQLITE_API int sqlite3_preupdate_new(sqlite3 *, int, sqlite3_value **); +SQLITE_API int sqlite3_preupdate_blobwrite(sqlite3 *); #endif /* ** CAPI3REF: Low-level system error code +** METHOD: sqlite3 ** ** ^Attempt to return the underlying operating system error code or error ** number that caused the most recent I/O error or failure to open a file. @@ -10763,8 +10916,8 @@ SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_recover(sqlite3 *db, const c ** SQLITE_SERIALIZE_NOCOPY bit is omitted from argument F if a memory ** allocation error occurs. ** -** This interface is only available if SQLite is compiled with the -** [SQLITE_ENABLE_DESERIALIZE] option. +** This interface is omitted if SQLite is compiled with the +** [SQLITE_OMIT_DESERIALIZE] option. */ SQLITE_API unsigned char *sqlite3_serialize( sqlite3 *db, /* The database connection */ @@ -10815,8 +10968,8 @@ SQLITE_API unsigned char *sqlite3_serialize( ** SQLITE_DESERIALIZE_FREEONCLOSE bit is set in argument F, then ** [sqlite3_free()] is invoked on argument P prior to returning. ** -** This interface is only available if SQLite is compiled with the -** [SQLITE_ENABLE_DESERIALIZE] option. +** This interface is omitted if SQLite is compiled with the +** [SQLITE_OMIT_DESERIALIZE] option. */ SQLITE_API int sqlite3_deserialize( sqlite3 *db, /* The database connection */ @@ -11065,6 +11218,38 @@ SQLITE_API int sqlite3session_create( */ SQLITE_API void sqlite3session_delete(sqlite3_session *pSession); +/* +** CAPIREF: Conigure a Session Object +** METHOD: sqlite3_session +** +** This method is used to configure a session object after it has been +** created. At present the only valid value for the second parameter is +** [SQLITE_SESSION_OBJCONFIG_SIZE]. +** +** Arguments for sqlite3session_object_config() +** +** The following values may passed as the the 4th parameter to +** sqlite3session_object_config(). +** +**
    SQLITE_SESSION_OBJCONFIG_SIZE
    +** This option is used to set, clear or query the flag that enables +** the [sqlite3session_changeset_size()] API. Because it imposes some +** computational overhead, this API is disabled by default. Argument +** pArg must point to a value of type (int). If the value is initially +** 0, then the sqlite3session_changeset_size() API is disabled. If it +** is greater than 0, then the same API is enabled. Or, if the initial +** value is less than zero, no change is made. In all cases the (int) +** variable is set to 1 if the sqlite3session_changeset_size() API is +** enabled following the current call, or 0 otherwise. +** +** It is an error (SQLITE_MISUSE) to attempt to modify this setting after +** the first table has been attached to the session object. +*/ +SQLITE_API int sqlite3session_object_config(sqlite3_session*, int op, void *pArg); + +/* +*/ +#define SQLITE_SESSION_OBJCONFIG_SIZE 1 /* ** CAPI3REF: Enable Or Disable A Session Object @@ -11309,6 +11494,22 @@ SQLITE_API int sqlite3session_changeset( void **ppChangeset /* OUT: Buffer containing changeset */ ); +/* +** CAPI3REF: Return An Upper-limit For The Size Of The Changeset +** METHOD: sqlite3_session +** +** By default, this function always returns 0. For it to return +** a useful result, the sqlite3_session object must have been configured +** to enable this API using sqlite3session_object_config() with the +** SQLITE_SESSION_OBJCONFIG_SIZE verb. +** +** When enabled, this function returns an upper limit, in bytes, for the size +** of the changeset that might be produced if sqlite3session_changeset() were +** called. The final changeset size might be equal to or smaller than the +** size in bytes returned by this function. +*/ +SQLITE_API sqlite3_int64 sqlite3session_changeset_size(sqlite3_session *pSession); + /* ** CAPI3REF: Load The Difference Between Tables Into A Session ** METHOD: sqlite3_session @@ -11426,6 +11627,14 @@ SQLITE_API int sqlite3session_patchset( */ SQLITE_API int sqlite3session_isempty(sqlite3_session *pSession); +/* +** CAPI3REF: Query for the amount of heap memory used by a session object. +** +** This API returns the total amount of heap memory in bytes currently +** used by the session object passed as the only argument. +*/ +SQLITE_API sqlite3_int64 sqlite3session_memory_used(sqlite3_session *pSession); + /* ** CAPI3REF: Create An Iterator To Traverse A Changeset ** CONSTRUCTOR: sqlite3_changeset_iter @@ -11528,18 +11737,23 @@ SQLITE_API int sqlite3changeset_next(sqlite3_changeset_iter *pIter); ** call to [sqlite3changeset_next()] must have returned [SQLITE_ROW]. If this ** is not the case, this function returns [SQLITE_MISUSE]. ** -** If argument pzTab is not NULL, then *pzTab is set to point to a -** nul-terminated utf-8 encoded string containing the name of the table -** affected by the current change. The buffer remains valid until either -** sqlite3changeset_next() is called on the iterator or until the -** conflict-handler function returns. If pnCol is not NULL, then *pnCol is -** set to the number of columns in the table affected by the change. If -** pbIndirect is not NULL, then *pbIndirect is set to true (1) if the change +** Arguments pOp, pnCol and pzTab may not be NULL. Upon return, three +** outputs are set through these pointers: +** +** *pOp is set to one of [SQLITE_INSERT], [SQLITE_DELETE] or [SQLITE_UPDATE], +** depending on the type of change that the iterator currently points to; +** +** *pnCol is set to the number of columns in the table affected by the change; and +** +** *pzTab is set to point to a nul-terminated utf-8 encoded string containing +** the name of the table affected by the current change. The buffer remains +** valid until either sqlite3changeset_next() is called on the iterator +** or until the conflict-handler function returns. +** +** If pbIndirect is not NULL, then *pbIndirect is set to true (1) if the change ** is an indirect change, or false (0) otherwise. See the documentation for ** [sqlite3session_indirect()] for a description of direct and indirect -** changes. Finally, if pOp is not NULL, then *pOp is set to one of -** [SQLITE_INSERT], [SQLITE_DELETE] or [SQLITE_UPDATE], depending on the -** type of change that the iterator currently points to. +** changes. ** ** If no error occurs, SQLITE_OK is returned. If an error does occur, an ** SQLite error code is returned. The values of the output variables may not @@ -13300,11 +13514,7 @@ struct fts5_api { ** The maximum depth of an expression tree. This is limited to ** some extent by SQLITE_MAX_SQL_LENGTH. But sometime you might ** want to place more severe limits on the complexity of an -** expression. -** -** A value of 0 used to mean that the limit was not enforced. -** But that is no longer true. The limit is now strictly enforced -** at all times. +** expression. A value of 0 means that there is no limit. */ #ifndef SQLITE_MAX_EXPR_DEPTH # define SQLITE_MAX_EXPR_DEPTH 1000 @@ -13472,7 +13682,8 @@ struct fts5_api { #ifndef __has_extension # define __has_extension(x) 0 /* compatibility with non-clang compilers */ #endif -#if GCC_VERSION>=4007000 || __has_extension(c_atomic) +#if GCC_VERSION>=4007000 || \ + (__has_extension(c_atomic) && __has_extension(c_atomic_store_n)) # define AtomicLoad(PTR) __atomic_load_n((PTR),__ATOMIC_RELAXED) # define AtomicStore(PTR,VAL) __atomic_store_n((PTR),(VAL),__ATOMIC_RELAXED) #else @@ -14039,90 +14250,93 @@ SQLITE_PRIVATE void sqlite3HashClear(Hash*); #define TK_TIES 94 #define TK_GENERATED 95 #define TK_ALWAYS 96 -#define TK_REINDEX 97 -#define TK_RENAME 98 -#define TK_CTIME_KW 99 -#define TK_ANY 100 -#define TK_BITAND 101 -#define TK_BITOR 102 -#define TK_LSHIFT 103 -#define TK_RSHIFT 104 -#define TK_PLUS 105 -#define TK_MINUS 106 -#define TK_STAR 107 -#define TK_SLASH 108 -#define TK_REM 109 -#define TK_CONCAT 110 -#define TK_COLLATE 111 -#define TK_BITNOT 112 -#define TK_ON 113 -#define TK_INDEXED 114 -#define TK_STRING 115 -#define TK_JOIN_KW 116 -#define TK_CONSTRAINT 117 -#define TK_DEFAULT 118 -#define TK_NULL 119 -#define TK_PRIMARY 120 -#define TK_UNIQUE 121 -#define TK_CHECK 122 -#define TK_REFERENCES 123 -#define TK_AUTOINCR 124 -#define TK_INSERT 125 -#define TK_DELETE 126 -#define TK_UPDATE 127 -#define TK_SET 128 -#define TK_DEFERRABLE 129 -#define TK_FOREIGN 130 -#define TK_DROP 131 -#define TK_UNION 132 -#define TK_ALL 133 -#define TK_EXCEPT 134 -#define TK_INTERSECT 135 -#define TK_SELECT 136 -#define TK_VALUES 137 -#define TK_DISTINCT 138 -#define TK_DOT 139 -#define TK_FROM 140 -#define TK_JOIN 141 -#define TK_USING 142 -#define TK_ORDER 143 -#define TK_GROUP 144 -#define TK_HAVING 145 -#define TK_LIMIT 146 -#define TK_WHERE 147 -#define TK_INTO 148 -#define TK_NOTHING 149 -#define TK_FLOAT 150 -#define TK_BLOB 151 -#define TK_INTEGER 152 -#define TK_VARIABLE 153 -#define TK_CASE 154 -#define TK_WHEN 155 -#define TK_THEN 156 -#define TK_ELSE 157 -#define TK_INDEX 158 -#define TK_ALTER 159 -#define TK_ADD 160 -#define TK_WINDOW 161 -#define TK_OVER 162 -#define TK_FILTER 163 -#define TK_COLUMN 164 -#define TK_AGG_FUNCTION 165 -#define TK_AGG_COLUMN 166 -#define TK_TRUEFALSE 167 -#define TK_ISNOT 168 -#define TK_FUNCTION 169 -#define TK_UMINUS 170 -#define TK_UPLUS 171 -#define TK_TRUTH 172 -#define TK_REGISTER 173 -#define TK_VECTOR 174 -#define TK_SELECT_COLUMN 175 -#define TK_IF_NULL_ROW 176 -#define TK_ASTERISK 177 -#define TK_SPAN 178 -#define TK_SPACE 179 -#define TK_ILLEGAL 180 +#define TK_MATERIALIZED 97 +#define TK_REINDEX 98 +#define TK_RENAME 99 +#define TK_CTIME_KW 100 +#define TK_ANY 101 +#define TK_BITAND 102 +#define TK_BITOR 103 +#define TK_LSHIFT 104 +#define TK_RSHIFT 105 +#define TK_PLUS 106 +#define TK_MINUS 107 +#define TK_STAR 108 +#define TK_SLASH 109 +#define TK_REM 110 +#define TK_CONCAT 111 +#define TK_COLLATE 112 +#define TK_BITNOT 113 +#define TK_ON 114 +#define TK_INDEXED 115 +#define TK_STRING 116 +#define TK_JOIN_KW 117 +#define TK_CONSTRAINT 118 +#define TK_DEFAULT 119 +#define TK_NULL 120 +#define TK_PRIMARY 121 +#define TK_UNIQUE 122 +#define TK_CHECK 123 +#define TK_REFERENCES 124 +#define TK_AUTOINCR 125 +#define TK_INSERT 126 +#define TK_DELETE 127 +#define TK_UPDATE 128 +#define TK_SET 129 +#define TK_DEFERRABLE 130 +#define TK_FOREIGN 131 +#define TK_DROP 132 +#define TK_UNION 133 +#define TK_ALL 134 +#define TK_EXCEPT 135 +#define TK_INTERSECT 136 +#define TK_SELECT 137 +#define TK_VALUES 138 +#define TK_DISTINCT 139 +#define TK_DOT 140 +#define TK_FROM 141 +#define TK_JOIN 142 +#define TK_USING 143 +#define TK_ORDER 144 +#define TK_GROUP 145 +#define TK_HAVING 146 +#define TK_LIMIT 147 +#define TK_WHERE 148 +#define TK_RETURNING 149 +#define TK_INTO 150 +#define TK_NOTHING 151 +#define TK_FLOAT 152 +#define TK_BLOB 153 +#define TK_INTEGER 154 +#define TK_VARIABLE 155 +#define TK_CASE 156 +#define TK_WHEN 157 +#define TK_THEN 158 +#define TK_ELSE 159 +#define TK_INDEX 160 +#define TK_ALTER 161 +#define TK_ADD 162 +#define TK_WINDOW 163 +#define TK_OVER 164 +#define TK_FILTER 165 +#define TK_COLUMN 166 +#define TK_AGG_FUNCTION 167 +#define TK_AGG_COLUMN 168 +#define TK_TRUEFALSE 169 +#define TK_ISNOT 170 +#define TK_FUNCTION 171 +#define TK_UMINUS 172 +#define TK_UPLUS 173 +#define TK_TRUTH 174 +#define TK_REGISTER 175 +#define TK_VECTOR 176 +#define TK_SELECT_COLUMN 177 +#define TK_IF_NULL_ROW 178 +#define TK_ASTERISK 179 +#define TK_SPAN 180 +#define TK_ERROR 181 +#define TK_SPACE 182 +#define TK_ILLEGAL 183 /************** End of parse.h ***********************************************/ /************** Continuing where we left off in sqliteInt.h ******************/ @@ -14538,15 +14752,14 @@ typedef INT16_TYPE LogEst; ** SELECTTRACE_ENABLED will be either 1 or 0 depending on whether or not ** the Select query generator tracing logic is turned on. */ -#if defined(SQLITE_ENABLE_SELECTTRACE) -# define SELECTTRACE_ENABLED 1 -#else -# define SELECTTRACE_ENABLED 0 +#if !defined(SQLITE_AMALGAMATION) +SQLITE_PRIVATE u32 sqlite3SelectTrace; #endif -#if defined(SQLITE_ENABLE_SELECTTRACE) +#if defined(SQLITE_DEBUG) \ + && (defined(SQLITE_TEST) || defined(SQLITE_ENABLE_SELECTTRACE)) # define SELECTTRACE_ENABLED 1 # define SELECTTRACE(K,P,S,X) \ - if(sqlite3_unsupported_selecttrace&(K)) \ + if(sqlite3SelectTrace&(K)) \ sqlite3DebugPrintf("%u/%d/%p: ",(S)->selId,(P)->addrExplain,(S)),\ sqlite3DebugPrintf X #else @@ -14554,6 +14767,19 @@ typedef INT16_TYPE LogEst; # define SELECTTRACE_ENABLED 0 #endif +/* +** Macros for "wheretrace" +*/ +SQLITE_PRIVATE u32 sqlite3WhereTrace; +#if defined(SQLITE_DEBUG) \ + && (defined(SQLITE_TEST) || defined(SQLITE_ENABLE_WHERETRACE)) +# define WHERETRACE(K,X) if(sqlite3WhereTrace&(K)) sqlite3DebugPrintf X +# define WHERETRACE_ENABLED 1 +#else +# define WHERETRACE(K,X) +#endif + + /* ** An instance of the following structure is used to store the busy-handler ** callback for a given sqlite handle. @@ -14665,7 +14891,10 @@ typedef struct AutoincInfo AutoincInfo; typedef struct Bitvec Bitvec; typedef struct CollSeq CollSeq; typedef struct Column Column; +typedef struct Cte Cte; +typedef struct CteUse CteUse; typedef struct Db Db; +typedef struct DbFixer DbFixer; typedef struct Schema Schema; typedef struct Expr Expr; typedef struct ExprList ExprList; @@ -14683,14 +14912,17 @@ typedef struct LookasideSlot LookasideSlot; typedef struct Module Module; typedef struct NameContext NameContext; typedef struct Parse Parse; +typedef struct ParseCleanup ParseCleanup; typedef struct PreUpdate PreUpdate; typedef struct PrintfArguments PrintfArguments; typedef struct RenameToken RenameToken; +typedef struct Returning Returning; typedef struct RowSet RowSet; typedef struct Savepoint Savepoint; typedef struct Select Select; typedef struct SQLiteThread SQLiteThread; typedef struct SelectDest SelectDest; +typedef struct SrcItem SrcItem; typedef struct SrcList SrcList; typedef struct sqlite3_str StrAccum; /* Internal alias for sqlite3_str */ typedef struct Table Table; @@ -15085,16 +15317,24 @@ SQLITE_PRIVATE int sqlite3BtreeCommit(Btree*); SQLITE_PRIVATE int sqlite3BtreeRollback(Btree*,int,int); SQLITE_PRIVATE int sqlite3BtreeBeginStmt(Btree*,int); SQLITE_PRIVATE int sqlite3BtreeCreateTable(Btree*, Pgno*, int flags); -SQLITE_PRIVATE int sqlite3BtreeIsInTrans(Btree*); -SQLITE_PRIVATE int sqlite3BtreeIsInReadTrans(Btree*); +SQLITE_PRIVATE int sqlite3BtreeTxnState(Btree*); SQLITE_PRIVATE int sqlite3BtreeIsInBackup(Btree*); + SQLITE_PRIVATE void *sqlite3BtreeSchema(Btree *, int, void(*)(void *)); SQLITE_PRIVATE int sqlite3BtreeSchemaLocked(Btree *pBtree); #ifndef SQLITE_OMIT_SHARED_CACHE SQLITE_PRIVATE int sqlite3BtreeLockTable(Btree *pBtree, int iTab, u8 isWriteLock); #endif + +/* Savepoints are named, nestable SQL transactions mostly implemented */ +/* in vdbe.c and pager.c See https://sqlite.org/lang_savepoint.html */ SQLITE_PRIVATE int sqlite3BtreeSavepoint(Btree *, int, int); +/* "Checkpoint" only refers to WAL. See https://sqlite.org/wal.html#ckpt */ +#ifndef SQLITE_OMIT_WAL +SQLITE_PRIVATE int sqlite3BtreeCheckpoint(Btree*, int, int *, int *); +#endif + SQLITE_PRIVATE const char *sqlite3BtreeGetFilename(Btree *); SQLITE_PRIVATE const char *sqlite3BtreeGetJournalname(Btree *); SQLITE_PRIVATE int sqlite3BtreeCopyFile(Btree *, Btree *); @@ -15254,6 +15494,7 @@ SQLITE_PRIVATE int sqlite3BtreeDelete(BtCursor*, u8 flags); #define BTREE_SAVEPOSITION 0x02 /* Leave cursor pointing at NEXT or PREV */ #define BTREE_AUXDELETE 0x04 /* not the primary delete operation */ #define BTREE_APPEND 0x08 /* Insert is likely an append */ +#define BTREE_PREFORMAT 0x80 /* Inserted data is a preformated cell */ /* An instance of the BtreePayload object describes the content of a single ** entry in either an index or table btree. @@ -15331,6 +15572,12 @@ SQLITE_PRIVATE int sqlite3BtreeCursorHasHint(BtCursor*, unsigned int mask); SQLITE_PRIVATE int sqlite3BtreeIsReadonly(Btree *pBt); SQLITE_PRIVATE int sqlite3HeaderSizeBtree(void); +#ifdef SQLITE_DEBUG +SQLITE_PRIVATE sqlite3_uint64 sqlite3BtreeSeekCount(Btree*); +#else +# define sqlite3BtreeSeekCount(X) 0 +#endif + #ifndef NDEBUG SQLITE_PRIVATE int sqlite3BtreeCursorIsValid(BtCursor*); #endif @@ -15347,6 +15594,8 @@ SQLITE_PRIVATE void sqlite3BtreeCursorList(Btree*); SQLITE_PRIVATE int sqlite3BtreeCheckpoint(Btree*, int, int *, int *); #endif +SQLITE_PRIVATE int sqlite3BtreeTransferRow(BtCursor*, BtCursor*, i64); + /* ** If we are not using shared cache, then there is no need to ** use mutexes to access the BtShared structures. So make the @@ -15624,7 +15873,7 @@ typedef struct VdbeOpList VdbeOpList; #define OP_Le 55 /* jump, same as TK_LE, synopsis: IF r[P3]<=r[P1] */ #define OP_Lt 56 /* jump, same as TK_LT, synopsis: IF r[P3]=r[P1] */ -#define OP_ElseNotEq 58 /* jump, same as TK_ESCAPE */ +#define OP_ElseEq 58 /* jump, same as TK_ESCAPE */ #define OP_DecrJumpZero 59 /* jump, synopsis: if (--r[P1])==0 goto P2 */ #define OP_IncrVacuum 60 /* jump */ #define OP_VNext 61 /* jump */ @@ -15646,102 +15895,106 @@ typedef struct VdbeOpList VdbeOpList; #define OP_Copy 77 /* synopsis: r[P2@P3+1]=r[P1@P3+1] */ #define OP_SCopy 78 /* synopsis: r[P2]=r[P1] */ #define OP_IntCopy 79 /* synopsis: r[P2]=r[P1] */ -#define OP_ResultRow 80 /* synopsis: output=r[P1@P2] */ -#define OP_CollSeq 81 -#define OP_AddImm 82 /* synopsis: r[P1]=r[P1]+P2 */ -#define OP_RealAffinity 83 -#define OP_Cast 84 /* synopsis: affinity(r[P1]) */ -#define OP_Permutation 85 -#define OP_Compare 86 /* synopsis: r[P1@P3] <-> r[P2@P3] */ -#define OP_IsTrue 87 /* synopsis: r[P2] = coalesce(r[P1]==TRUE,P3) ^ P4 */ -#define OP_Offset 88 /* synopsis: r[P3] = sqlite_offset(P1) */ -#define OP_Column 89 /* synopsis: r[P3]=PX */ -#define OP_Affinity 90 /* synopsis: affinity(r[P1@P2]) */ -#define OP_MakeRecord 91 /* synopsis: r[P3]=mkrec(r[P1@P2]) */ -#define OP_Count 92 /* synopsis: r[P2]=count() */ -#define OP_ReadCookie 93 -#define OP_SetCookie 94 -#define OP_ReopenIdx 95 /* synopsis: root=P2 iDb=P3 */ -#define OP_OpenRead 96 /* synopsis: root=P2 iDb=P3 */ -#define OP_OpenWrite 97 /* synopsis: root=P2 iDb=P3 */ -#define OP_OpenDup 98 -#define OP_OpenAutoindex 99 /* synopsis: nColumn=P2 */ -#define OP_OpenEphemeral 100 /* synopsis: nColumn=P2 */ -#define OP_BitAnd 101 /* same as TK_BITAND, synopsis: r[P3]=r[P1]&r[P2] */ -#define OP_BitOr 102 /* same as TK_BITOR, synopsis: r[P3]=r[P1]|r[P2] */ -#define OP_ShiftLeft 103 /* same as TK_LSHIFT, synopsis: r[P3]=r[P2]<>r[P1] */ -#define OP_Add 105 /* same as TK_PLUS, synopsis: r[P3]=r[P1]+r[P2] */ -#define OP_Subtract 106 /* same as TK_MINUS, synopsis: r[P3]=r[P2]-r[P1] */ -#define OP_Multiply 107 /* same as TK_STAR, synopsis: r[P3]=r[P1]*r[P2] */ -#define OP_Divide 108 /* same as TK_SLASH, synopsis: r[P3]=r[P2]/r[P1] */ -#define OP_Remainder 109 /* same as TK_REM, synopsis: r[P3]=r[P2]%r[P1] */ -#define OP_Concat 110 /* same as TK_CONCAT, synopsis: r[P3]=r[P2]+r[P1] */ -#define OP_SorterOpen 111 -#define OP_BitNot 112 /* same as TK_BITNOT, synopsis: r[P2]= ~r[P1] */ -#define OP_SequenceTest 113 /* synopsis: if( cursor[P1].ctr++ ) pc = P2 */ -#define OP_OpenPseudo 114 /* synopsis: P3 columns in r[P2] */ -#define OP_String8 115 /* same as TK_STRING, synopsis: r[P2]='P4' */ -#define OP_Close 116 -#define OP_ColumnsUsed 117 -#define OP_SeekHit 118 /* synopsis: seekHit=P2 */ -#define OP_Sequence 119 /* synopsis: r[P2]=cursor[P1].ctr++ */ -#define OP_NewRowid 120 /* synopsis: r[P2]=rowid */ -#define OP_Insert 121 /* synopsis: intkey=r[P3] data=r[P2] */ -#define OP_Delete 122 -#define OP_ResetCount 123 -#define OP_SorterCompare 124 /* synopsis: if key(P1)!=trim(r[P3],P4) goto P2 */ -#define OP_SorterData 125 /* synopsis: r[P2]=data */ -#define OP_RowData 126 /* synopsis: r[P2]=data */ -#define OP_Rowid 127 /* synopsis: r[P2]=rowid */ -#define OP_NullRow 128 -#define OP_SeekEnd 129 -#define OP_IdxInsert 130 /* synopsis: key=r[P2] */ -#define OP_SorterInsert 131 /* synopsis: key=r[P2] */ -#define OP_IdxDelete 132 /* synopsis: key=r[P2@P3] */ -#define OP_DeferredSeek 133 /* synopsis: Move P3 to P1.rowid if needed */ -#define OP_IdxRowid 134 /* synopsis: r[P2]=rowid */ -#define OP_FinishSeek 135 -#define OP_Destroy 136 -#define OP_Clear 137 -#define OP_ResetSorter 138 -#define OP_CreateBtree 139 /* synopsis: r[P2]=root iDb=P1 flags=P3 */ -#define OP_SqlExec 140 -#define OP_ParseSchema 141 -#define OP_LoadAnalysis 142 -#define OP_DropTable 143 -#define OP_DropIndex 144 -#define OP_DropTrigger 145 -#define OP_IntegrityCk 146 -#define OP_RowSetAdd 147 /* synopsis: rowset(P1)=r[P2] */ -#define OP_Param 148 -#define OP_FkCounter 149 /* synopsis: fkctr[P1]+=P2 */ -#define OP_Real 150 /* same as TK_FLOAT, synopsis: r[P2]=P4 */ -#define OP_MemMax 151 /* synopsis: r[P1]=max(r[P1],r[P2]) */ -#define OP_OffsetLimit 152 /* synopsis: if r[P1]>0 then r[P2]=r[P1]+max(0,r[P3]) else r[P2]=(-1) */ -#define OP_AggInverse 153 /* synopsis: accum=r[P3] inverse(r[P2@P5]) */ -#define OP_AggStep 154 /* synopsis: accum=r[P3] step(r[P2@P5]) */ -#define OP_AggStep1 155 /* synopsis: accum=r[P3] step(r[P2@P5]) */ -#define OP_AggValue 156 /* synopsis: r[P3]=value N=P2 */ -#define OP_AggFinal 157 /* synopsis: accum=r[P1] N=P2 */ -#define OP_Expire 158 -#define OP_CursorLock 159 -#define OP_CursorUnlock 160 -#define OP_TableLock 161 /* synopsis: iDb=P1 root=P2 write=P3 */ -#define OP_VBegin 162 -#define OP_VCreate 163 -#define OP_VDestroy 164 -#define OP_VOpen 165 -#define OP_VColumn 166 /* synopsis: r[P3]=vcolumn(P2) */ -#define OP_VRename 167 -#define OP_Pagecount 168 -#define OP_MaxPgcnt 169 -#define OP_Trace 170 -#define OP_CursorHint 171 -#define OP_ReleaseReg 172 /* synopsis: release r[P1@P2] mask P3 */ -#define OP_Noop 173 -#define OP_Explain 174 -#define OP_Abortable 175 +#define OP_ChngCntRow 80 /* synopsis: output=r[P1] */ +#define OP_ResultRow 81 /* synopsis: output=r[P1@P2] */ +#define OP_CollSeq 82 +#define OP_AddImm 83 /* synopsis: r[P1]=r[P1]+P2 */ +#define OP_RealAffinity 84 +#define OP_Cast 85 /* synopsis: affinity(r[P1]) */ +#define OP_Permutation 86 +#define OP_Compare 87 /* synopsis: r[P1@P3] <-> r[P2@P3] */ +#define OP_IsTrue 88 /* synopsis: r[P2] = coalesce(r[P1]==TRUE,P3) ^ P4 */ +#define OP_ZeroOrNull 89 /* synopsis: r[P2] = 0 OR NULL */ +#define OP_Offset 90 /* synopsis: r[P3] = sqlite_offset(P1) */ +#define OP_Column 91 /* synopsis: r[P3]=PX */ +#define OP_Affinity 92 /* synopsis: affinity(r[P1@P2]) */ +#define OP_MakeRecord 93 /* synopsis: r[P3]=mkrec(r[P1@P2]) */ +#define OP_Count 94 /* synopsis: r[P2]=count() */ +#define OP_ReadCookie 95 +#define OP_SetCookie 96 +#define OP_ReopenIdx 97 /* synopsis: root=P2 iDb=P3 */ +#define OP_OpenRead 98 /* synopsis: root=P2 iDb=P3 */ +#define OP_OpenWrite 99 /* synopsis: root=P2 iDb=P3 */ +#define OP_OpenDup 100 +#define OP_OpenAutoindex 101 /* synopsis: nColumn=P2 */ +#define OP_BitAnd 102 /* same as TK_BITAND, synopsis: r[P3]=r[P1]&r[P2] */ +#define OP_BitOr 103 /* same as TK_BITOR, synopsis: r[P3]=r[P1]|r[P2] */ +#define OP_ShiftLeft 104 /* same as TK_LSHIFT, synopsis: r[P3]=r[P2]<>r[P1] */ +#define OP_Add 106 /* same as TK_PLUS, synopsis: r[P3]=r[P1]+r[P2] */ +#define OP_Subtract 107 /* same as TK_MINUS, synopsis: r[P3]=r[P2]-r[P1] */ +#define OP_Multiply 108 /* same as TK_STAR, synopsis: r[P3]=r[P1]*r[P2] */ +#define OP_Divide 109 /* same as TK_SLASH, synopsis: r[P3]=r[P2]/r[P1] */ +#define OP_Remainder 110 /* same as TK_REM, synopsis: r[P3]=r[P2]%r[P1] */ +#define OP_Concat 111 /* same as TK_CONCAT, synopsis: r[P3]=r[P2]+r[P1] */ +#define OP_OpenEphemeral 112 /* synopsis: nColumn=P2 */ +#define OP_BitNot 113 /* same as TK_BITNOT, synopsis: r[P2]= ~r[P1] */ +#define OP_SorterOpen 114 +#define OP_SequenceTest 115 /* synopsis: if( cursor[P1].ctr++ ) pc = P2 */ +#define OP_String8 116 /* same as TK_STRING, synopsis: r[P2]='P4' */ +#define OP_OpenPseudo 117 /* synopsis: P3 columns in r[P2] */ +#define OP_Close 118 +#define OP_ColumnsUsed 119 +#define OP_SeekScan 120 /* synopsis: Scan-ahead up to P1 rows */ +#define OP_SeekHit 121 /* synopsis: set P2<=seekHit<=P3 */ +#define OP_Sequence 122 /* synopsis: r[P2]=cursor[P1].ctr++ */ +#define OP_NewRowid 123 /* synopsis: r[P2]=rowid */ +#define OP_Insert 124 /* synopsis: intkey=r[P3] data=r[P2] */ +#define OP_RowCell 125 +#define OP_Delete 126 +#define OP_ResetCount 127 +#define OP_SorterCompare 128 /* synopsis: if key(P1)!=trim(r[P3],P4) goto P2 */ +#define OP_SorterData 129 /* synopsis: r[P2]=data */ +#define OP_RowData 130 /* synopsis: r[P2]=data */ +#define OP_Rowid 131 /* synopsis: r[P2]=rowid */ +#define OP_NullRow 132 +#define OP_SeekEnd 133 +#define OP_IdxInsert 134 /* synopsis: key=r[P2] */ +#define OP_SorterInsert 135 /* synopsis: key=r[P2] */ +#define OP_IdxDelete 136 /* synopsis: key=r[P2@P3] */ +#define OP_DeferredSeek 137 /* synopsis: Move P3 to P1.rowid if needed */ +#define OP_IdxRowid 138 /* synopsis: r[P2]=rowid */ +#define OP_FinishSeek 139 +#define OP_Destroy 140 +#define OP_Clear 141 +#define OP_ResetSorter 142 +#define OP_CreateBtree 143 /* synopsis: r[P2]=root iDb=P1 flags=P3 */ +#define OP_SqlExec 144 +#define OP_ParseSchema 145 +#define OP_LoadAnalysis 146 +#define OP_DropTable 147 +#define OP_DropIndex 148 +#define OP_DropTrigger 149 +#define OP_IntegrityCk 150 +#define OP_RowSetAdd 151 /* synopsis: rowset(P1)=r[P2] */ +#define OP_Real 152 /* same as TK_FLOAT, synopsis: r[P2]=P4 */ +#define OP_Param 153 +#define OP_FkCounter 154 /* synopsis: fkctr[P1]+=P2 */ +#define OP_MemMax 155 /* synopsis: r[P1]=max(r[P1],r[P2]) */ +#define OP_OffsetLimit 156 /* synopsis: if r[P1]>0 then r[P2]=r[P1]+max(0,r[P3]) else r[P2]=(-1) */ +#define OP_AggInverse 157 /* synopsis: accum=r[P3] inverse(r[P2@P5]) */ +#define OP_AggStep 158 /* synopsis: accum=r[P3] step(r[P2@P5]) */ +#define OP_AggStep1 159 /* synopsis: accum=r[P3] step(r[P2@P5]) */ +#define OP_AggValue 160 /* synopsis: r[P3]=value N=P2 */ +#define OP_AggFinal 161 /* synopsis: accum=r[P1] N=P2 */ +#define OP_Expire 162 +#define OP_CursorLock 163 +#define OP_CursorUnlock 164 +#define OP_TableLock 165 /* synopsis: iDb=P1 root=P2 write=P3 */ +#define OP_VBegin 166 +#define OP_VCreate 167 +#define OP_VDestroy 168 +#define OP_VOpen 169 +#define OP_VColumn 170 /* synopsis: r[P3]=vcolumn(P2) */ +#define OP_VRename 171 +#define OP_Pagecount 172 +#define OP_MaxPgcnt 173 +#define OP_Trace 174 +#define OP_CursorHint 175 +#define OP_ReleaseReg 176 /* synopsis: release r[P1@P2] mask P3 */ +#define OP_Noop 177 +#define OP_Explain 178 +#define OP_Abortable 179 /* Properties such as "out2" or "jump" that are specified in ** comments following the "case" for each opcode in the vdbe.c @@ -15764,21 +16017,21 @@ typedef struct VdbeOpList VdbeOpList; /* 56 */ 0x0b, 0x0b, 0x01, 0x03, 0x01, 0x01, 0x01, 0x00,\ /* 64 */ 0x00, 0x02, 0x02, 0x08, 0x00, 0x10, 0x10, 0x10,\ /* 72 */ 0x10, 0x00, 0x10, 0x10, 0x00, 0x00, 0x10, 0x10,\ -/* 80 */ 0x00, 0x00, 0x02, 0x02, 0x02, 0x00, 0x00, 0x12,\ -/* 88 */ 0x20, 0x00, 0x00, 0x00, 0x10, 0x10, 0x00, 0x00,\ -/* 96 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x26, 0x26, 0x26,\ -/* 104 */ 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x00,\ -/* 112 */ 0x12, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x10,\ -/* 120 */ 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10,\ -/* 128 */ 0x00, 0x00, 0x04, 0x04, 0x00, 0x00, 0x10, 0x00,\ -/* 136 */ 0x10, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00,\ -/* 144 */ 0x00, 0x00, 0x00, 0x06, 0x10, 0x00, 0x10, 0x04,\ -/* 152 */ 0x1a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\ +/* 80 */ 0x00, 0x00, 0x00, 0x02, 0x02, 0x02, 0x00, 0x00,\ +/* 88 */ 0x12, 0x1e, 0x20, 0x00, 0x00, 0x00, 0x10, 0x10,\ +/* 96 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x26, 0x26,\ +/* 104 */ 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x26,\ +/* 112 */ 0x00, 0x12, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00,\ +/* 120 */ 0x00, 0x00, 0x10, 0x10, 0x00, 0x00, 0x00, 0x00,\ +/* 128 */ 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x04, 0x04,\ +/* 136 */ 0x00, 0x00, 0x10, 0x00, 0x10, 0x00, 0x00, 0x10,\ +/* 144 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06,\ +/* 152 */ 0x10, 0x10, 0x00, 0x04, 0x1a, 0x00, 0x00, 0x00,\ /* 160 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\ -/* 168 */ 0x10, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\ -} +/* 168 */ 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x00, 0x00,\ +/* 176 */ 0x00, 0x00, 0x00, 0x00,} -/* The sqlite3P2Values() routine is able to run faster if it knows +/* The resolve3P2Values() routine is able to run faster if it knows ** the value of the largest JUMP opcode. The smaller the maximum ** JUMP opcode the better, so the mkopcodeh.tcl script that ** generated this include file strives to group all JUMP opcodes @@ -15844,7 +16097,7 @@ SQLITE_PRIVATE void sqlite3ExplainBreakpoint(const char*,const char*); #else # define sqlite3ExplainBreakpoint(A,B) /*no-op*/ #endif -SQLITE_PRIVATE void sqlite3VdbeAddParseSchemaOp(Vdbe*,int,char*); +SQLITE_PRIVATE void sqlite3VdbeAddParseSchemaOp(Vdbe*, int, char*, u16); SQLITE_PRIVATE void sqlite3VdbeChangeOpcode(Vdbe*, int addr, u8); SQLITE_PRIVATE void sqlite3VdbeChangeP1(Vdbe*, int addr, int P1); SQLITE_PRIVATE void sqlite3VdbeChangeP2(Vdbe*, int addr, int P2); @@ -16311,6 +16564,12 @@ SQLITE_PRIVATE int sqlite3PCacheIsDirty(PCache *pCache); # define SET_FULLSYNC(x,y) #endif +/* Maximum pathname length. Note: FILENAME_MAX defined by stdio.h +*/ +#ifndef SQLITE_MAX_PATHLEN +# define SQLITE_MAX_PATHLEN FILENAME_MAX +#endif + /* ** The default size of a disk sector */ @@ -16816,6 +17075,11 @@ SQLITE_PRIVATE void sqlite3CryptFunc(sqlite3_context*,int,sqlite3_value**); #endif /* SQLITE_OMIT_DEPRECATED */ #define SQLITE_TRACE_NONLEGACY_MASK 0x0f /* Normal flags */ +/* +** Maximum number of sqlite3.aDb[] entries. This is the number of attached +** databases plus 2 for "main" and "temp". +*/ +#define SQLITE_MAX_DB (SQLITE_MAX_ATTACHED+2) /* ** Each database connection is an instance of the following structure. @@ -16836,7 +17100,7 @@ struct sqlite3 { int errCode; /* Most recent error code (SQLITE_*) */ int errMask; /* & result codes with this before returning */ int iSysErrno; /* Errno value from last system error */ - u16 dbOptFlags; /* Flags to enable/disable optimizations */ + u32 dbOptFlags; /* Flags to enable/disable optimizations */ u8 enc; /* Text encoding */ u8 autoCommit; /* The auto-commit flag. */ u8 temp_store; /* 1: file 2: memory 0: default */ @@ -17043,24 +17307,26 @@ struct sqlite3 { ** sqlite3_test_control(SQLITE_TESTCTRL_OPTIMIZATIONS,...) interface to ** selectively disable various optimizations. */ -#define SQLITE_QueryFlattener 0x0001 /* Query flattening */ -#define SQLITE_WindowFunc 0x0002 /* Use xInverse for window functions */ -#define SQLITE_GroupByOrder 0x0004 /* GROUPBY cover of ORDERBY */ -#define SQLITE_FactorOutConst 0x0008 /* Constant factoring */ -#define SQLITE_DistinctOpt 0x0010 /* DISTINCT using indexes */ -#define SQLITE_CoverIdxScan 0x0020 /* Covering index scans */ -#define SQLITE_OrderByIdxJoin 0x0040 /* ORDER BY of joins via index */ -#define SQLITE_Transitive 0x0080 /* Transitive constraints */ -#define SQLITE_OmitNoopJoin 0x0100 /* Omit unused tables in joins */ -#define SQLITE_CountOfView 0x0200 /* The count-of-view optimization */ -#define SQLITE_CursorHints 0x0400 /* Add OP_CursorHint opcodes */ -#define SQLITE_Stat4 0x0800 /* Use STAT4 data */ - /* TH3 expects the Stat4 ^^^^^^ value to be 0x0800. Don't change it */ -#define SQLITE_PushDown 0x1000 /* The push-down optimization */ -#define SQLITE_SimplifyJoin 0x2000 /* Convert LEFT JOIN to JOIN */ -#define SQLITE_SkipScan 0x4000 /* Skip-scans */ -#define SQLITE_PropagateConst 0x8000 /* The constant propagation opt */ -#define SQLITE_AllOpts 0xffff /* All optimizations */ +#define SQLITE_QueryFlattener 0x00000001 /* Query flattening */ +#define SQLITE_WindowFunc 0x00000002 /* Use xInverse for window functions */ +#define SQLITE_GroupByOrder 0x00000004 /* GROUPBY cover of ORDERBY */ +#define SQLITE_FactorOutConst 0x00000008 /* Constant factoring */ +#define SQLITE_DistinctOpt 0x00000010 /* DISTINCT using indexes */ +#define SQLITE_CoverIdxScan 0x00000020 /* Covering index scans */ +#define SQLITE_OrderByIdxJoin 0x00000040 /* ORDER BY of joins via index */ +#define SQLITE_Transitive 0x00000080 /* Transitive constraints */ +#define SQLITE_OmitNoopJoin 0x00000100 /* Omit unused tables in joins */ +#define SQLITE_CountOfView 0x00000200 /* The count-of-view optimization */ +#define SQLITE_CursorHints 0x00000400 /* Add OP_CursorHint opcodes */ +#define SQLITE_Stat4 0x00000800 /* Use STAT4 data */ + /* TH3 expects this value ^^^^^^^^^^ to be 0x0000800. Don't change it */ +#define SQLITE_PushDown 0x00001000 /* The push-down optimization */ +#define SQLITE_SimplifyJoin 0x00002000 /* Convert LEFT JOIN to JOIN */ +#define SQLITE_SkipScan 0x00004000 /* Skip-scans */ +#define SQLITE_PropagateConst 0x00008000 /* The constant propagation opt */ +#define SQLITE_MinMaxOpt 0x00010000 /* The min/max optimization */ +#define SQLITE_SeekScan 0x00020000 /* The OP_SeekScan optimization */ +#define SQLITE_AllOpts 0xffffffff /* All optimizations */ /* ** Macros for testing whether or not optimizations are enabled or disabled. @@ -17216,6 +17482,9 @@ struct FuncDestructor { ** a single query. The iArg is ignored. The user-data is always set ** to a NULL pointer. The bNC parameter is not used. ** +** MFUNCTION(zName, nArg, xPtr, xFunc) +** For math-library functions. xPtr is an arbitrary pointer. +** ** PURE_DATE(zName, nArg, iArg, bNC, xFunc) ** Used for "pure" date/time functions, this macro is like DFUNCTION ** except that it does set the SQLITE_FUNC_CONSTANT flags. iArg is @@ -17251,6 +17520,9 @@ struct FuncDestructor { #define SFUNCTION(zName, nArg, iArg, bNC, xFunc) \ {nArg, SQLITE_UTF8|SQLITE_DIRECTONLY|SQLITE_FUNC_UNSAFE, \ SQLITE_INT_TO_PTR(iArg), 0, xFunc, 0, 0, 0, #zName, {0} } +#define MFUNCTION(zName, nArg, xPtr, xFunc) \ + {nArg, SQLITE_FUNC_CONSTANT|SQLITE_UTF8, \ + xPtr, 0, xFunc, 0, 0, 0, #zName, {0} } #define INLINE_FUNC(zName, nArg, iArg, mFlags) \ {nArg, SQLITE_UTF8|SQLITE_FUNC_INLINE|SQLITE_FUNC_CONSTANT|(mFlags), \ SQLITE_INT_TO_PTR(iArg), 0, noopFunc, 0, 0, 0, #zName, {0} } @@ -17345,7 +17617,12 @@ struct Column { u16 colFlags; /* Boolean properties. See COLFLAG_ defines below */ }; -/* Allowed values for Column.colFlags: +/* Allowed values for Column.colFlags. +** +** Constraints: +** TF_HasVirtual == COLFLAG_VIRTUAL +** TF_HasStored == COLFLAG_STORED +** TF_HasHidden == COLFLAG_HIDDEN */ #define COLFLAG_PRIMKEY 0x0001 /* Column is part of the primary key */ #define COLFLAG_HIDDEN 0x0002 /* A hidden column in a virtual table */ @@ -17421,9 +17698,7 @@ struct CollSeq { ** operator is NULL. It is added to certain comparison operators to ** prove that the operands are always NOT NULL. */ -#define SQLITE_KEEPNULL 0x08 /* Used by vector == or <> */ #define SQLITE_JUMPIFNULL 0x10 /* jumps if either operand is NULL */ -#define SQLITE_STOREP2 0x20 /* Store result in reg[P2] rather than jump */ #define SQLITE_NULLEQ 0x80 /* NULL=NULL */ #define SQLITE_NOTNULL 0x90 /* Assert that operands are never NULL */ @@ -17521,7 +17796,6 @@ struct Table { #endif Trigger *pTrigger; /* List of triggers stored in pSchema */ Schema *pSchema; /* Schema that contains this table */ - Table *pNextZombie; /* Next on the Parse.pZombieTab list */ }; /* @@ -17535,11 +17809,12 @@ struct Table { ** ** Constraints: ** -** TF_HasVirtual == COLFLAG_Virtual -** TF_HasStored == COLFLAG_Stored +** TF_HasVirtual == COLFLAG_VIRTUAL +** TF_HasStored == COLFLAG_STORED +** TF_HasHidden == COLFLAG_HIDDEN */ #define TF_Readonly 0x0001 /* Read-only system table */ -#define TF_Ephemeral 0x0002 /* An ephemeral table */ +#define TF_HasHidden 0x0002 /* Has one or more hidden columns */ #define TF_HasPrimaryKey 0x0004 /* Table has a primary key */ #define TF_Autoincrement 0x0008 /* Integer primary key is autoincrement */ #define TF_HasStat1 0x0010 /* nRowLogEst set from sqlite_stat1 */ @@ -17553,6 +17828,9 @@ struct Table { #define TF_OOOHidden 0x0400 /* Out-of-Order hidden columns */ #define TF_HasNotNull 0x0800 /* Contains NOT NULL constraints */ #define TF_Shadow 0x1000 /* True for a shadow table */ +#define TF_HasStat4 0x2000 /* STAT4 info available for this table */ +#define TF_Ephemeral 0x4000 /* An ephemeral table */ +#define TF_Eponymous 0x8000 /* An eponymous virtual table */ /* ** Test to see whether or not a table is a virtual table. This is @@ -17649,16 +17927,22 @@ struct FKey { ** is returned. REPLACE means that preexisting database rows that caused ** a UNIQUE constraint violation are removed so that the new insert or ** update can proceed. Processing continues and no error is reported. +** UPDATE applies to insert operations only and means that the insert +** is omitted and the DO UPDATE clause of an upsert is run instead. ** -** RESTRICT, SETNULL, and CASCADE actions apply only to foreign keys. +** RESTRICT, SETNULL, SETDFLT, and CASCADE actions apply only to foreign keys. ** RESTRICT is the same as ABORT for IMMEDIATE foreign keys and the ** same as ROLLBACK for DEFERRED keys. SETNULL means that the foreign -** key is set to NULL. CASCADE means that a DELETE or UPDATE of the +** key is set to NULL. SETDFLT means that the foreign key is set +** to its default value. CASCADE means that a DELETE or UPDATE of the ** referenced table row is propagated into the row that holds the ** foreign key. ** +** The OE_Default value is a place holder that means to use whatever +** conflict resolution algorthm is required from context. +** ** The following symbolic values are used to record which type -** of action to take. +** of conflict resolution action to take. */ #define OE_None 0 /* There is no constraint to check */ #define OE_Rollback 1 /* Fail the operation and rollback the transaction */ @@ -17912,10 +18196,10 @@ struct AggInfo { FuncDef *pFunc; /* The aggregate function implementation */ int iMem; /* Memory location that acts as accumulator */ int iDistinct; /* Ephemeral table used to enforce DISTINCT */ + int iDistAddr; /* Address of OP_OpenEphemeral */ } *aFunc; int nFunc; /* Number of entries in aFunc[] */ u32 selId; /* Select to which this AggInfo belongs */ - AggInfo *pNext; /* Next in list of them all */ }; /* @@ -18044,7 +18328,7 @@ struct Expr { ** TK_VARIABLE: variable number (always >= 1). ** TK_SELECT_COLUMN: column of the result vector */ i16 iAgg; /* Which entry in pAggInfo->aCol[] or ->aFunc[] */ - i16 iRightJoinTable; /* If EP_FromJoin, the right table of the join */ + int iRightJoinTable; /* If EP_FromJoin, the right table of the join */ AggInfo *pAggInfo; /* Used by TK_AGG_COLUMN and TK_AGG_FUNCTION */ union { Table *pTab; /* TK_COLUMN: Table containing column. Can be NULL @@ -18081,12 +18365,12 @@ struct Expr { #define EP_TokenOnly 0x004000 /* Expr struct EXPR_TOKENONLYSIZE bytes only */ #define EP_Win 0x008000 /* Contains window functions */ #define EP_MemToken 0x010000 /* Need to sqlite3DbFree() Expr.zToken */ - /* 0x020000 // available for reuse */ +#define EP_IfNullRow 0x020000 /* The TK_IF_NULL_ROW opcode */ #define EP_Unlikely 0x040000 /* unlikely() or likelihood() function */ #define EP_ConstFunc 0x080000 /* A SQLITE_FUNC_CONSTANT or _SLOCHNG function */ #define EP_CanBeNull 0x100000 /* Can be null despite NOT NULL constraint */ #define EP_Subquery 0x200000 /* Tree contains a TK_SELECT operator */ -#define EP_Alias 0x400000 /* Is an alias for a result set column */ + /* 0x400000 // Available */ #define EP_Leaf 0x800000 /* Expr.pLeft, .pRight, .u.pSelect all NULL */ #define EP_WinFunc 0x1000000 /* TK_FUNCTION with Expr.y.pWin set */ #define EP_Subrtn 0x2000000 /* Uses Expr.y.sub. TK_IN, _SELECT, or _EXISTS */ @@ -18185,6 +18469,7 @@ struct Expr { */ struct ExprList { int nExpr; /* Number of expressions on the list */ + int nAlloc; /* Number of a[] slots allocated */ struct ExprList_item { /* For each expression in the list */ Expr *pExpr; /* The parse tree for this expression */ char *zEName; /* Token associated with this expression */ @@ -18234,6 +18519,46 @@ struct IdList { int nId; /* Number of identifiers on the list */ }; +/* +** The SrcItem object represents a single term in the FROM clause of a query. +** The SrcList object is mostly an array of SrcItems. +*/ +struct SrcItem { + Schema *pSchema; /* Schema to which this item is fixed */ + char *zDatabase; /* Name of database holding this table */ + char *zName; /* Name of the table */ + char *zAlias; /* The "B" part of a "A AS B" phrase. zName is the "A" */ + Table *pTab; /* An SQL table corresponding to zName */ + Select *pSelect; /* A SELECT statement used in place of a table name */ + int addrFillSub; /* Address of subroutine to manifest a subquery */ + int regReturn; /* Register holding return address of addrFillSub */ + int regResult; /* Registers holding results of a co-routine */ + struct { + u8 jointype; /* Type of join between this table and the previous */ + unsigned notIndexed :1; /* True if there is a NOT INDEXED clause */ + unsigned isIndexedBy :1; /* True if there is an INDEXED BY clause */ + unsigned isTabFunc :1; /* True if table-valued-function syntax */ + unsigned isCorrelated :1; /* True if sub-query is correlated */ + unsigned viaCoroutine :1; /* Implemented as a co-routine */ + unsigned isRecursive :1; /* True for recursive reference in WITH */ + unsigned fromDDL :1; /* Comes from sqlite_schema */ + unsigned isCte :1; /* This is a CTE */ + unsigned notCte :1; /* This item may not match a CTE */ + } fg; + int iCursor; /* The VDBE cursor number used to access this table */ + Expr *pOn; /* The ON clause of a join */ + IdList *pUsing; /* The USING clause of a join */ + Bitmask colUsed; /* Bit N (1<" clause */ + ExprList *pFuncArg; /* Arguments to table-valued-function */ + } u1; + union { + Index *pIBIndex; /* Index structure corresponding to u1.zIndexedBy */ + CteUse *pCteUse; /* CTE Usage info info fg.isCte is true */ + } u2; +}; + /* ** The following structure describes the FROM clause of a SELECT statement. ** Each table or subquery in the FROM clause is a separate element of @@ -18256,36 +18581,7 @@ struct IdList { struct SrcList { int nSrc; /* Number of tables or subqueries in the FROM clause */ u32 nAlloc; /* Number of entries allocated in a[] below */ - struct SrcList_item { - Schema *pSchema; /* Schema to which this item is fixed */ - char *zDatabase; /* Name of database holding this table */ - char *zName; /* Name of the table */ - char *zAlias; /* The "B" part of a "A AS B" phrase. zName is the "A" */ - Table *pTab; /* An SQL table corresponding to zName */ - Select *pSelect; /* A SELECT statement used in place of a table name */ - int addrFillSub; /* Address of subroutine to manifest a subquery */ - int regReturn; /* Register holding return address of addrFillSub */ - int regResult; /* Registers holding results of a co-routine */ - struct { - u8 jointype; /* Type of join between this table and the previous */ - unsigned notIndexed :1; /* True if there is a NOT INDEXED clause */ - unsigned isIndexedBy :1; /* True if there is an INDEXED BY clause */ - unsigned isTabFunc :1; /* True if table-valued-function syntax */ - unsigned isCorrelated :1; /* True if sub-query is correlated */ - unsigned viaCoroutine :1; /* Implemented as a co-routine */ - unsigned isRecursive :1; /* True for recursive reference in WITH */ - unsigned fromDDL :1; /* Comes from sqlite_schema */ - } fg; - int iCursor; /* The VDBE cursor number used to access this table */ - Expr *pOn; /* The ON clause of a join */ - IdList *pUsing; /* The USING clause of a join */ - Bitmask colUsed; /* Bit N (1<" clause */ - ExprList *pFuncArg; /* Arguments to table-valued-function */ - } u1; - Index *pIBIndex; /* Index structure corresponding to u1.zIndexedBy */ - } a[1]; /* One entry for each identifier on the list */ + SrcItem a[1]; /* One entry for each identifier on the list */ }; /* @@ -18319,9 +18615,9 @@ struct SrcList { #define WHERE_DISTINCTBY 0x0080 /* pOrderby is really a DISTINCT clause */ #define WHERE_WANT_DISTINCT 0x0100 /* All output needs to be distinct */ #define WHERE_SORTBYGROUP 0x0200 /* Support sqlite3WhereIsSorted() */ -#define WHERE_SEEK_TABLE 0x0400 /* Do not defer seeks on main table */ +#define WHERE_AGG_DISTINCT 0x0400 /* Query is "SELECT agg(DISTINCT ...)" */ #define WHERE_ORDERBY_LIMIT 0x0800 /* ORDERBY+LIMIT on the inner loop */ -#define WHERE_SEEK_UNIQ_TABLE 0x1000 /* Do not defer seeks if unique */ + /* 0x1000 not currently used */ /* 0x2000 not currently used */ #define WHERE_USE_LIMIT 0x4000 /* Use the LIMIT in cost estimates */ /* 0x8000 not currently used */ @@ -18361,10 +18657,11 @@ struct NameContext { ExprList *pEList; /* Optional list of result-set columns */ AggInfo *pAggInfo; /* Information about aggregates at this level */ Upsert *pUpsert; /* ON CONFLICT clause information from an upsert */ + int iBaseReg; /* For TK_REGISTER when parsing RETURNING */ } uNC; NameContext *pNext; /* Next outer name context. NULL for outermost */ int nRef; /* Number of names resolved by this context */ - int nErr; /* Number of errors encountered while resolving names */ + int nNcErr; /* Number of errors encountered while resolving names */ int ncFlags; /* Zero or more NC_* flags defined below */ Select *pWinSelect; /* SELECT statement for any window functions */ }; @@ -18389,6 +18686,7 @@ struct NameContext { #define NC_UEList 0x00080 /* True if uNC.pEList is used */ #define NC_UAggInfo 0x00100 /* True if uNC.pAggInfo is used */ #define NC_UUpsert 0x00200 /* True if uNC.pUpsert is used */ +#define NC_UBaseReg 0x00400 /* True if uNC.iBaseReg is used */ #define NC_MinMaxAgg 0x01000 /* min/max aggregates seen. See note above */ #define NC_Complex 0x02000 /* True if a function or subquery seen */ #define NC_AllowWin 0x04000 /* Window functions are allowed here */ @@ -18396,6 +18694,7 @@ struct NameContext { #define NC_IsDDL 0x10000 /* Resolving names in a CREATE statement */ #define NC_InAggFunc 0x20000 /* True if analyzing arguments to an agg func */ #define NC_FromDDL 0x40000 /* SQL text comes from sqlite_schema */ +#define NC_NoSelect 0x80000 /* Do not descend into sub-selects */ /* ** An instance of the following object describes a single ON CONFLICT @@ -18412,15 +18711,21 @@ struct NameContext { ** WHERE clause is omitted. */ struct Upsert { - ExprList *pUpsertTarget; /* Optional description of conflicting index */ + ExprList *pUpsertTarget; /* Optional description of conflict target */ Expr *pUpsertTargetWhere; /* WHERE clause for partial index targets */ ExprList *pUpsertSet; /* The SET clause from an ON CONFLICT UPDATE */ Expr *pUpsertWhere; /* WHERE clause for the ON CONFLICT UPDATE */ - /* The fields above comprise the parse tree for the upsert clause. - ** The fields below are used to transfer information from the INSERT - ** processing down into the UPDATE processing while generating code. - ** Upsert owns the memory allocated above, but not the memory below. */ - Index *pUpsertIdx; /* Constraint that pUpsertTarget identifies */ + Upsert *pNextUpsert; /* Next ON CONFLICT clause in the list */ + u8 isDoUpdate; /* True for DO UPDATE. False for DO NOTHING */ + /* Above this point is the parse tree for the ON CONFLICT clauses. + ** The next group of fields stores intermediate data. */ + void *pToFree; /* Free memory when deleting the Upsert object */ + /* All fields above are owned by the Upsert object and must be freed + ** when the Upsert is destroyed. The fields below are used to transfer + ** information from the INSERT processing down into the UPDATE processing + ** while generating code. The fields below are owned by the INSERT + ** statement and will be freed by INSERT processing. */ + Index *pUpsertIdx; /* UNIQUE constraint specified by pUpsertTarget */ SrcList *pUpsertSrc; /* Table to be updated */ int regData; /* First register holding array of VALUES */ int iDataCur; /* Index of the data cursor */ @@ -18500,6 +18805,9 @@ struct Select { #define SF_View 0x0200000 /* SELECT statement is a view */ #define SF_NoopOrderBy 0x0400000 /* ORDER BY is ignored for this query */ #define SF_UpdateFrom 0x0800000 /* Statement is an UPDATE...FROM */ +#define SF_PushDown 0x1000000 /* SELECT has be modified by push-down opt */ +#define SF_MultiPart 0x2000000 /* Has multiple incompatible PARTITIONs */ +#define SF_CopyCte 0x4000000 /* SELECT statement is a copy of a CTE */ /* ** The results of a SELECT can be distributed in several ways, as defined @@ -18518,9 +18826,6 @@ struct Select { ** statements within triggers whose only purpose is ** the side-effects of functions. ** -** All of the above are free to ignore their ORDER BY clause. Those that -** follow must honor the ORDER BY clause. -** ** SRT_Output Generate a row of output (using the OP_ResultRow ** opcode) for each row in the result set. ** @@ -18577,13 +18882,18 @@ struct Select { #define SRT_Except 2 /* Remove result from a UNION index */ #define SRT_Exists 3 /* Store 1 if the result is not empty */ #define SRT_Discard 4 /* Do not save the results anywhere */ -#define SRT_Fifo 5 /* Store result as data with an automatic rowid */ -#define SRT_DistFifo 6 /* Like SRT_Fifo, but unique results only */ +#define SRT_DistFifo 5 /* Like SRT_Fifo, but unique results only */ +#define SRT_DistQueue 6 /* Like SRT_Queue, but unique results only */ + +/* The DISTINCT clause is ignored for all of the above. Not that +** IgnorableDistinct() implies IgnorableOrderby() */ +#define IgnorableDistinct(X) ((X->eDest)<=SRT_DistQueue) + #define SRT_Queue 7 /* Store result in an queue */ -#define SRT_DistQueue 8 /* Like SRT_Queue, but unique results only */ +#define SRT_Fifo 8 /* Store result as data with an automatic rowid */ /* The ORDER BY clause is ignored for all of the above */ -#define IgnorableOrderby(X) ((X->eDest)<=SRT_DistQueue) +#define IgnorableOrderby(X) ((X->eDest)<=SRT_Fifo) #define SRT_Output 9 /* Output each row of result */ #define SRT_Mem 10 /* Store result in a memory cell */ @@ -18668,6 +18978,17 @@ struct TriggerPrg { # define DbMaskNonZero(M) (M)!=0 #endif +/* +** An instance of the ParseCleanup object specifies an operation that +** should be performed after parsing to deallocation resources obtained +** during the parse and which are no longer needed. +*/ +struct ParseCleanup { + ParseCleanup *pNext; /* Next cleanup task */ + void *pPtr; /* Pointer to object to deallocate */ + void (*xCleanup)(sqlite3*,void*); /* Deallocation routine */ +}; + /* ** An SQL parser context. A copy of this structure is passed through ** the parser and down into all the parser action routine in order to @@ -18699,6 +19020,9 @@ struct Parse { u8 okConstFactor; /* OK to factor out constants */ u8 disableLookaside; /* Number of times lookaside has been disabled */ u8 disableVtab; /* Disable all virtual tables for this parse */ +#if defined(SQLITE_DEBUG) || defined(SQLITE_COVERAGE_TEST) + u8 earlyCleanup; /* OOM inside sqlite3ParserAddCleanup() */ +#endif int nRangeReg; /* Size of the temporary register block */ int iRangeReg; /* First register in temporary register block */ int nErr; /* Number of errors seen */ @@ -18726,12 +19050,15 @@ struct Parse { Parse *pToplevel; /* Parse structure for main program (or NULL) */ Table *pTriggerTab; /* Table triggers are being coded for */ Parse *pParentParse; /* Parent parser if this parser is nested */ - AggInfo *pAggList; /* List of all AggInfo objects */ - int addrCrTab; /* Address of OP_CreateBtree opcode on CREATE TABLE */ + union { + int addrCrTab; /* Address of OP_CreateBtree on CREATE TABLE */ + Returning *pReturning; /* The RETURNING clause */ + } u1; u32 nQueryLoop; /* Est number of iterations of a query (10*log2(N)) */ u32 oldmask; /* Mask of old.* columns referenced */ u32 newmask; /* Mask of new.* columns referenced */ u8 eTriggerOp; /* TK_UPDATE, TK_INSERT or TK_DELETE */ + u8 bReturning; /* Coding a RETURNING trigger */ u8 eOrconf; /* Default ON CONFLICT policy for trigger steps */ u8 disableTriggers; /* True to disable triggers */ @@ -18777,10 +19104,9 @@ struct Parse { Token sArg; /* Complete text of a module argument */ Table **apVtabLock; /* Pointer to virtual tables needing locking */ #endif - Table *pZombieTab; /* List of Table objects to delete after code gen */ TriggerPrg *pTriggerPrg; /* Linked list of coded triggers */ With *pWith; /* Current WITH clause, or NULL */ - With *pWithToFree; /* Free this WITH object at the end of the parse */ + ParseCleanup *pCleanup; /* List of cleanup operations to run after parse */ #ifndef SQLITE_OMIT_ALTERTABLE RenameToken *pRename; /* Tokens subject to renaming by ALTER TABLE */ #endif @@ -18860,6 +19186,7 @@ struct AuthContext { #define OPFLAG_SAVEPOSITION 0x02 /* OP_Delete/Insert: save cursor pos */ #define OPFLAG_AUXDELETE 0x04 /* OP_Delete: index in a DELETE op */ #define OPFLAG_NOCHNG_MAGIC 0x6d /* OP_MakeRecord: serialtype 10 is ok */ +#define OPFLAG_PREFORMAT 0x80 /* OP_Insert uses preformatted cell */ /* * Each trigger present in the database schema is stored as an instance of @@ -18881,6 +19208,7 @@ struct Trigger { char *table; /* The table or view to which the trigger applies */ u8 op; /* One of TK_DELETE, TK_UPDATE, TK_INSERT */ u8 tr_tm; /* One of TRIGGER_BEFORE, TRIGGER_AFTER */ + u8 bReturning; /* This trigger implements a RETURNING clause */ Expr *pWhen; /* The WHEN clause of the expression (may be NULL) */ IdList *pColumns; /* If this is an UPDATE OF trigger, the is stored here */ @@ -18939,14 +19267,15 @@ struct Trigger { * */ struct TriggerStep { - u8 op; /* One of TK_DELETE, TK_UPDATE, TK_INSERT, TK_SELECT */ + u8 op; /* One of TK_DELETE, TK_UPDATE, TK_INSERT, TK_SELECT, + ** or TK_RETURNING */ u8 orconf; /* OE_Rollback etc. */ Trigger *pTrig; /* The trigger that this step is a part of */ Select *pSelect; /* SELECT statement or RHS of INSERT INTO SELECT ... */ char *zTarget; /* Target table for DELETE, UPDATE, INSERT */ SrcList *pFrom; /* FROM clause for UPDATE statement (if any) */ Expr *pWhere; /* The WHERE clause for DELETE or UPDATE steps */ - ExprList *pExprList; /* SET clause for UPDATE */ + ExprList *pExprList; /* SET clause for UPDATE, or RETURNING clause */ IdList *pIdList; /* Column names for INSERT */ Upsert *pUpsert; /* Upsert clauses on an INSERT */ char *zSpan; /* Original SQL text of this command */ @@ -18955,18 +19284,16 @@ struct TriggerStep { }; /* -** The following structure contains information used by the sqliteFix... -** routines as they walk the parse tree to make database references -** explicit. +** Information about a RETURNING clause */ -typedef struct DbFixer DbFixer; -struct DbFixer { - Parse *pParse; /* The parsing context. Error messages written here */ - Schema *pSchema; /* Fix items to this schema */ - u8 bTemp; /* True for TEMP schema entries */ - const char *zDb; /* Make sure all objects are contained in this database */ - const char *zType; /* Type of the container - used for error messages */ - const Token *pName; /* Name of the container - used for error messages */ +struct Returning { + Parse *pParse; /* The parse that includes the RETURNING clause */ + ExprList *pReturnEL; /* List of expressions to return */ + Trigger retTrig; /* The transient trigger that implements RETURNING */ + TriggerStep retTStep; /* The trigger step */ + int iRetCur; /* Transient table holding RETURNING results */ + int nRetCol; /* Number of in pReturnEL after expansion */ + int iRetReg; /* Register array for holding a row of RETURNING */ }; /* @@ -19006,7 +19333,24 @@ typedef struct { /* ** Allowed values for mInitFlags */ -#define INITFLAG_AlterTable 0x0001 /* This is a reparse after ALTER TABLE */ +#define INITFLAG_AlterRename 0x0001 /* Reparse after a RENAME */ +#define INITFLAG_AlterDrop 0x0002 /* Reparse after a DROP COLUMN */ + +/* Tuning parameters are set using SQLITE_TESTCTRL_TUNE and are controlled +** on debug-builds of the CLI using ".testctrl tune ID VALUE". Tuning +** parameters are for temporary use during development, to help find +** optimial values for parameters in the query planner. The should not +** be used on trunk check-ins. They are a temporary mechanism available +** for transient development builds only. +** +** Tuning parameters are numbered starting with 1. +*/ +#define SQLITE_NTUNE 6 /* Should be zero for all trunk check-ins */ +#ifdef SQLITE_DEBUG +# define Tuning(X) (sqlite3Config.aTune[(X)-1]) +#else +# define Tuning(X) 0 +#endif /* ** Structure containing global configuration data for the SQLite library. @@ -19062,7 +19406,7 @@ struct Sqlite3Config { void (*xVdbeBranch)(void*,unsigned iSrcLine,u8 eThis,u8 eMx); /* Callback */ void *pVdbeBranchArg; /* 1st argument */ #endif -#ifdef SQLITE_ENABLE_DESERIALIZE +#ifndef SQLITE_OMIT_DESERIALIZE sqlite3_int64 mxMemdbSize; /* Default max memdb size */ #endif #ifndef SQLITE_UNTESTABLE @@ -19072,6 +19416,10 @@ struct Sqlite3Config { int iOnceResetThreshold; /* When to reset OP_Once counters */ u32 szSorterRef; /* Min size in bytes to use sorter-refs */ unsigned int iPrngSeed; /* Alternative fixed seed for the PRNG */ + /* vvvv--- must be last ---vvv */ +#ifdef SQLITE_DEBUG + sqlite3_int64 aTune[SQLITE_NTUNE]; /* Tuning parameters */ +#endif }; /* @@ -19118,10 +19466,26 @@ struct Walker { struct WhereConst *pConst; /* WHERE clause constants */ struct RenameCtx *pRename; /* RENAME COLUMN context */ struct Table *pTab; /* Table of generated column */ - struct SrcList_item *pSrcItem; /* A single FROM clause item */ + SrcItem *pSrcItem; /* A single FROM clause item */ + DbFixer *pFix; } u; }; +/* +** The following structure contains information used by the sqliteFix... +** routines as they walk the parse tree to make database references +** explicit. +*/ +struct DbFixer { + Parse *pParse; /* The parsing context. Error messages written here */ + Walker w; /* Walker object */ + Schema *pSchema; /* Fix items to this schema */ + u8 bTemp; /* True for TEMP schema entries */ + const char *zDb; /* Make sure all objects are contained in this database */ + const char *zType; /* Type of the container - used for error messages */ + const Token *pName; /* Name of the container - used for error messages */ +}; + /* Forward declarations */ SQLITE_PRIVATE int sqlite3WalkExpr(Walker*, Expr*); SQLITE_PRIVATE int sqlite3WalkExprList(Walker*, ExprList*); @@ -19133,11 +19497,18 @@ SQLITE_PRIVATE int sqlite3SelectWalkNoop(Walker*, Select*); SQLITE_PRIVATE int sqlite3SelectWalkFail(Walker*, Select*); SQLITE_PRIVATE int sqlite3WalkerDepthIncrease(Walker*,Select*); SQLITE_PRIVATE void sqlite3WalkerDepthDecrease(Walker*,Select*); +SQLITE_PRIVATE void sqlite3WalkWinDefnDummyCallback(Walker*,Select*); #ifdef SQLITE_DEBUG SQLITE_PRIVATE void sqlite3SelectWalkAssert2(Walker*, Select*); #endif +#ifndef SQLITE_OMIT_CTE +SQLITE_PRIVATE void sqlite3SelectPopWith(Walker*, Select*); +#else +# define sqlite3SelectPopWith 0 +#endif + /* ** Return code from the parse-tree walking primitives and their ** callbacks. @@ -19147,20 +19518,56 @@ SQLITE_PRIVATE void sqlite3SelectWalkAssert2(Walker*, Select*); #define WRC_Abort 2 /* Abandon the tree walk */ /* -** An instance of this structure represents a set of one or more CTEs -** (common table expressions) created by a single WITH clause. +** A single common table expression +*/ +struct Cte { + char *zName; /* Name of this CTE */ + ExprList *pCols; /* List of explicit column names, or NULL */ + Select *pSelect; /* The definition of this CTE */ + const char *zCteErr; /* Error message for circular references */ + CteUse *pUse; /* Usage information for this CTE */ + u8 eM10d; /* The MATERIALIZED flag */ +}; + +/* +** Allowed values for the materialized flag (eM10d): +*/ +#define M10d_Yes 0 /* AS MATERIALIZED */ +#define M10d_Any 1 /* Not specified. Query planner's choice */ +#define M10d_No 2 /* AS NOT MATERIALIZED */ + +/* +** An instance of the With object represents a WITH clause containing +** one or more CTEs (common table expressions). */ struct With { - int nCte; /* Number of CTEs in the WITH clause */ - With *pOuter; /* Containing WITH clause, or NULL */ - struct Cte { /* For each CTE in the WITH clause.... */ - char *zName; /* Name of this CTE */ - ExprList *pCols; /* List of explicit column names, or NULL */ - Select *pSelect; /* The definition of this CTE */ - const char *zCteErr; /* Error message for circular references */ - } a[1]; + int nCte; /* Number of CTEs in the WITH clause */ + int bView; /* Belongs to the outermost Select of a view */ + With *pOuter; /* Containing WITH clause, or NULL */ + Cte a[1]; /* For each CTE in the WITH clause.... */ +}; + +/* +** The Cte object is not guaranteed to persist for the entire duration +** of code generation. (The query flattener or other parser tree +** edits might delete it.) The following object records information +** about each Common Table Expression that must be preserved for the +** duration of the parse. +** +** The CteUse objects are freed using sqlite3ParserAddCleanup() rather +** than sqlite3SelectDelete(), which is what enables them to persist +** until the end of code generation. +*/ +struct CteUse { + int nUse; /* Number of users of this CTE */ + int addrM9e; /* Start of subroutine to compute materialization */ + int regRtn; /* Return address register for addrM9e subroutine */ + int iCur; /* Ephemeral table holding the materialization */ + LogEst nRowEst; /* Estimated number of rows in the table */ + u8 eM10d; /* The MATERIALIZED flag */ }; + #ifdef SQLITE_DEBUG /* ** An instance of the TreeView object is used for printing the content of @@ -19238,7 +19645,6 @@ SQLITE_PRIVATE int sqlite3WindowCompare(Parse*, Window*, Window*, int); SQLITE_PRIVATE void sqlite3WindowCodeInit(Parse*, Select*); SQLITE_PRIVATE void sqlite3WindowCodeStep(Parse*, Select*, WhereInfo*, int, int); SQLITE_PRIVATE int sqlite3WindowRewrite(Parse*, Select*); -SQLITE_PRIVATE int sqlite3ExpandSubquery(Parse*, struct SrcList_item*); SQLITE_PRIVATE void sqlite3WindowUpdate(Parse*, Window*, Window*, FuncDef*); SQLITE_PRIVATE Window *sqlite3WindowDup(sqlite3 *db, Expr *pOwner, Window *p); SQLITE_PRIVATE Window *sqlite3WindowListDup(sqlite3 *db, Window *p); @@ -19507,6 +19913,7 @@ SQLITE_PRIVATE Expr *sqlite3ExprFunction(Parse*,ExprList*, Token*, int); SQLITE_PRIVATE void sqlite3ExprFunctionUsable(Parse*,Expr*,FuncDef*); SQLITE_PRIVATE void sqlite3ExprAssignVarNumber(Parse*, Expr*, u32); SQLITE_PRIVATE void sqlite3ExprDelete(sqlite3*, Expr*); +SQLITE_PRIVATE void sqlite3ExprDeferredDelete(Parse*, Expr*); SQLITE_PRIVATE void sqlite3ExprUnmapAndDelete(Parse*, Expr*); SQLITE_PRIVATE ExprList *sqlite3ExprListAppend(Parse*,ExprList*,Expr*); SQLITE_PRIVATE ExprList *sqlite3ExprListAppendVector(Parse*,ExprList*,IdList*,Expr*); @@ -19528,6 +19935,7 @@ SQLITE_PRIVATE void sqlite3ResetOneSchema(sqlite3*,int); SQLITE_PRIVATE void sqlite3CollapseDatabaseArray(sqlite3*); SQLITE_PRIVATE void sqlite3CommitInternalChanges(sqlite3*); SQLITE_PRIVATE void sqlite3DeleteColumnNames(sqlite3*,Table*); +SQLITE_PRIVATE void sqlite3GenerateColumnNames(Parse *pParse, Select *pSelect); SQLITE_PRIVATE int sqlite3ColumnsFromExprList(Parse*,ExprList*,i16*,Column**); SQLITE_PRIVATE void sqlite3SelectAddColumnTypeAndCollation(Parse*,Table*,Select*,char); SQLITE_PRIVATE Table *sqlite3ResultSetOfSelect(Parse*,Select*,char); @@ -19550,11 +19958,12 @@ SQLITE_PRIVATE void sqlite3ColumnPropertiesFromName(Table*, Column*); SQLITE_PRIVATE void sqlite3AddColumn(Parse*,Token*,Token*); SQLITE_PRIVATE void sqlite3AddNotNull(Parse*, int); SQLITE_PRIVATE void sqlite3AddPrimaryKey(Parse*, ExprList*, int, int, int); -SQLITE_PRIVATE void sqlite3AddCheckConstraint(Parse*, Expr*); +SQLITE_PRIVATE void sqlite3AddCheckConstraint(Parse*, Expr*, const char*, const char*); SQLITE_PRIVATE void sqlite3AddDefaultValue(Parse*,Expr*,const char*,const char*); SQLITE_PRIVATE void sqlite3AddCollateType(Parse*, Token*); SQLITE_PRIVATE void sqlite3AddGenerated(Parse*,Expr*,Token*); SQLITE_PRIVATE void sqlite3EndTable(Parse*,Token*,Token*,u8,Select*); +SQLITE_PRIVATE void sqlite3AddReturning(Parse*,ExprList*); SQLITE_PRIVATE int sqlite3ParseUri(const char*,const char*,unsigned int*, sqlite3_vfs**,char**,char **); #define sqlite3CodecQueryParameters(A,B,C) 0 @@ -19620,7 +20029,7 @@ SQLITE_PRIVATE SrcList *sqlite3SrcListAppendFromTerm(Parse*, SrcList*, Token*, T Token*, Select*, Expr*, IdList*); SQLITE_PRIVATE void sqlite3SrcListIndexedBy(Parse *, SrcList *, Token *); SQLITE_PRIVATE void sqlite3SrcListFuncArgs(Parse*, SrcList*, ExprList*); -SQLITE_PRIVATE int sqlite3IndexedByLookup(Parse *, struct SrcList_item *); +SQLITE_PRIVATE int sqlite3IndexedByLookup(Parse *, SrcItem *); SQLITE_PRIVATE void sqlite3SrcListShiftJoinType(SrcList*); SQLITE_PRIVATE void sqlite3SrcListAssignCursors(Parse*, SrcList*); SQLITE_PRIVATE void sqlite3IdListDelete(sqlite3*, IdList*); @@ -19648,6 +20057,7 @@ SQLITE_PRIVATE LogEst sqlite3WhereOutputRowCount(WhereInfo*); SQLITE_PRIVATE int sqlite3WhereIsDistinct(WhereInfo*); SQLITE_PRIVATE int sqlite3WhereIsOrdered(WhereInfo*); SQLITE_PRIVATE int sqlite3WhereOrderByLimitOptLabel(WhereInfo*); +SQLITE_PRIVATE void sqlite3WhereMinMaxOptEarlyOut(Vdbe*,WhereInfo*); SQLITE_PRIVATE int sqlite3WhereIsSorted(WhereInfo*); SQLITE_PRIVATE int sqlite3WhereContinueLabel(WhereInfo*); SQLITE_PRIVATE int sqlite3WhereBreakLabel(WhereInfo*); @@ -19681,7 +20091,7 @@ SQLITE_PRIVATE Table *sqlite3FindTable(sqlite3*,const char*, const char*); #define LOCATE_VIEW 0x01 #define LOCATE_NOERR 0x02 SQLITE_PRIVATE Table *sqlite3LocateTable(Parse*,u32 flags,const char*, const char*); -SQLITE_PRIVATE Table *sqlite3LocateTableItem(Parse*,u32 flags,struct SrcList_item *); +SQLITE_PRIVATE Table *sqlite3LocateTableItem(Parse*,u32 flags,SrcItem *); SQLITE_PRIVATE Index *sqlite3FindIndex(sqlite3*,const char*, const char*); SQLITE_PRIVATE void sqlite3UnlinkAndDeleteTable(sqlite3*,int,const char*); SQLITE_PRIVATE void sqlite3UnlinkAndDeleteIndex(sqlite3*,int,const char*); @@ -19761,6 +20171,7 @@ SQLITE_PRIVATE void sqlite3RegisterPerConnectionBuiltinFunctions(sqlite3*); SQLITE_PRIVATE int sqlite3SafetyCheckOk(sqlite3*); SQLITE_PRIVATE int sqlite3SafetyCheckSickOrOk(sqlite3*); SQLITE_PRIVATE void sqlite3ChangeCookie(Parse*, int); +SQLITE_PRIVATE With *sqlite3WithDup(sqlite3 *db, With *p); #if !defined(SQLITE_OMIT_VIEW) && !defined(SQLITE_OMIT_TRIGGER) SQLITE_PRIVATE void sqlite3MaterializeView(Parse*, Table*, Expr*, ExprList*,Expr*,int); @@ -19809,6 +20220,7 @@ SQLITE_PRIVATE SrcList *sqlite3TriggerStepSrc(Parse*, TriggerStep*); #endif SQLITE_PRIVATE int sqlite3JoinType(Parse*, Token*, Token*, Token*); +SQLITE_PRIVATE int sqlite3ColumnIndex(Table *pTab, const char *zCol); SQLITE_PRIVATE void sqlite3SetJoinExpr(Expr*,int); SQLITE_PRIVATE void sqlite3CreateForeignKey(Parse*, ExprList*, Token*, ExprList*, int); SQLITE_PRIVATE void sqlite3DeferForeignKey(Parse*, int); @@ -19831,7 +20243,6 @@ SQLITE_PRIVATE void sqlite3FixInit(DbFixer*, Parse*, int, const char*, const Tok SQLITE_PRIVATE int sqlite3FixSrcList(DbFixer*, SrcList*); SQLITE_PRIVATE int sqlite3FixSelect(DbFixer*, Select*); SQLITE_PRIVATE int sqlite3FixExpr(DbFixer*, Expr*); -SQLITE_PRIVATE int sqlite3FixExprList(DbFixer*, ExprList*); SQLITE_PRIVATE int sqlite3FixTriggerStep(DbFixer*, TriggerStep*); SQLITE_PRIVATE int sqlite3RealSameAsInt(double,sqlite3_int64); SQLITE_PRIVATE void sqlite3Int64ToText(i64,char*); @@ -19894,6 +20305,7 @@ SQLITE_PRIVATE int sqlite3Atoi64(const char*, i64*, int, u8); SQLITE_PRIVATE int sqlite3DecOrHexToI64(const char*, i64*); SQLITE_PRIVATE void sqlite3ErrorWithMsg(sqlite3*, int, const char*,...); SQLITE_PRIVATE void sqlite3Error(sqlite3*,int); +SQLITE_PRIVATE void sqlite3ErrorClear(sqlite3*); SQLITE_PRIVATE void sqlite3SystemError(sqlite3*,int); SQLITE_PRIVATE void *sqlite3HexToBlob(sqlite3*, const char *z, int n); SQLITE_PRIVATE u8 sqlite3HexToInt(int h); @@ -19903,7 +20315,7 @@ SQLITE_PRIVATE int sqlite3TwoPartName(Parse *, Token *, Token *, Token **); SQLITE_PRIVATE const char *sqlite3ErrName(int); #endif -#ifdef SQLITE_ENABLE_DESERIALIZE +#ifndef SQLITE_OMIT_DESERIALIZE SQLITE_PRIVATE int sqlite3MemdbInit(void); #endif @@ -19954,10 +20366,12 @@ SQLITE_PRIVATE void sqlite3ValueApplyAffinity(sqlite3_value *, u8, u8); SQLITE_PRIVATE const unsigned char sqlite3OpcodeProperty[]; SQLITE_PRIVATE const char sqlite3StrBINARY[]; SQLITE_PRIVATE const unsigned char sqlite3UpperToLower[]; +SQLITE_PRIVATE const unsigned char *sqlite3aLTb; +SQLITE_PRIVATE const unsigned char *sqlite3aEQb; +SQLITE_PRIVATE const unsigned char *sqlite3aGTb; SQLITE_PRIVATE const unsigned char sqlite3CtypeMap[]; SQLITE_PRIVATE SQLITE_WSD struct Sqlite3Config sqlite3Config; SQLITE_PRIVATE FuncDefHash sqlite3BuiltinFunctions; -SQLITE_API extern u32 sqlite3_unsupported_selecttrace; #ifndef SQLITE_OMIT_WSD SQLITE_PRIVATE int sqlite3PendingByte; #endif @@ -19976,6 +20390,7 @@ SQLITE_PRIVATE void sqlite3ExpirePreparedStatements(sqlite3*, int); SQLITE_PRIVATE void sqlite3CodeRhsOfIN(Parse*, Expr*, int); SQLITE_PRIVATE int sqlite3CodeSubselect(Parse*, Expr*); SQLITE_PRIVATE void sqlite3SelectPrep(Parse*, Select*, NameContext*); +SQLITE_PRIVATE int sqlite3ExpandSubquery(Parse*, SrcItem*); SQLITE_PRIVATE void sqlite3SelectWrongNumTermsError(Parse *pParse, Select *p); SQLITE_PRIVATE int sqlite3MatchEName( const struct ExprList_item*, @@ -19993,6 +20408,7 @@ SQLITE_PRIVATE int sqlite3ResolveOrderGroupBy(Parse*, Select*, ExprList*, const SQLITE_PRIVATE void sqlite3ColumnDefault(Vdbe *, Table *, int, int); SQLITE_PRIVATE void sqlite3AlterFinishAddColumn(Parse *, Token *); SQLITE_PRIVATE void sqlite3AlterBeginAddColumn(Parse *, SrcList *); +SQLITE_PRIVATE void sqlite3AlterDropColumn(Parse*, SrcList*, Token*); SQLITE_PRIVATE void *sqlite3RenameTokenMap(Parse*, void*, Token*); SQLITE_PRIVATE void sqlite3RenameTokenRemap(Parse*, void *pTo, void *pFrom); SQLITE_PRIVATE void sqlite3RenameExprUnmap(Parse*, Expr*); @@ -20016,6 +20432,7 @@ SQLITE_PRIVATE void sqlite3KeyInfoUnref(KeyInfo*); SQLITE_PRIVATE KeyInfo *sqlite3KeyInfoRef(KeyInfo*); SQLITE_PRIVATE KeyInfo *sqlite3KeyInfoOfIndex(Parse*, Index*); SQLITE_PRIVATE KeyInfo *sqlite3KeyInfoFromExprList(Parse*, ExprList*, int, int); +SQLITE_PRIVATE const char *sqlite3SelectOpName(int); SQLITE_PRIVATE int sqlite3HasExplicitNulls(Parse*, ExprList*); #ifdef SQLITE_DEBUG @@ -20146,6 +20563,7 @@ SQLITE_PRIVATE sqlite3_int64 sqlite3StmtCurrentTime(sqlite3_context*); SQLITE_PRIVATE int sqlite3VdbeParameterIndex(Vdbe*, const char*, int); SQLITE_PRIVATE int sqlite3TransferBindings(sqlite3_stmt *, sqlite3_stmt *); SQLITE_PRIVATE void sqlite3ParserReset(Parse*); +SQLITE_PRIVATE void *sqlite3ParserAddCleanup(Parse*,void(*)(sqlite3*,void*),void*); #ifdef SQLITE_ENABLE_NORMALIZE SQLITE_PRIVATE char *sqlite3Normalize(Vdbe*, const char*); #endif @@ -20160,23 +20578,32 @@ SQLITE_PRIVATE int sqlite3Checkpoint(sqlite3*, int, int, int*, int*); SQLITE_PRIVATE int sqlite3WalDefaultHook(void*,sqlite3*,const char*,int); #endif #ifndef SQLITE_OMIT_CTE -SQLITE_PRIVATE With *sqlite3WithAdd(Parse*,With*,Token*,ExprList*,Select*); +SQLITE_PRIVATE Cte *sqlite3CteNew(Parse*,Token*,ExprList*,Select*,u8); +SQLITE_PRIVATE void sqlite3CteDelete(sqlite3*,Cte*); +SQLITE_PRIVATE With *sqlite3WithAdd(Parse*,With*,Cte*); SQLITE_PRIVATE void sqlite3WithDelete(sqlite3*,With*); -SQLITE_PRIVATE void sqlite3WithPush(Parse*, With*, u8); +SQLITE_PRIVATE With *sqlite3WithPush(Parse*, With*, u8); #else -#define sqlite3WithPush(x,y,z) -#define sqlite3WithDelete(x,y) +# define sqlite3CteNew(P,T,E,S) ((void*)0) +# define sqlite3CteDelete(D,C) +# define sqlite3CteWithAdd(P,W,C) ((void*)0) +# define sqlite3WithDelete(x,y) +# define sqlite3WithPush(x,y,z) #endif #ifndef SQLITE_OMIT_UPSERT -SQLITE_PRIVATE Upsert *sqlite3UpsertNew(sqlite3*,ExprList*,Expr*,ExprList*,Expr*); +SQLITE_PRIVATE Upsert *sqlite3UpsertNew(sqlite3*,ExprList*,Expr*,ExprList*,Expr*,Upsert*); SQLITE_PRIVATE void sqlite3UpsertDelete(sqlite3*,Upsert*); SQLITE_PRIVATE Upsert *sqlite3UpsertDup(sqlite3*,Upsert*); SQLITE_PRIVATE int sqlite3UpsertAnalyzeTarget(Parse*,SrcList*,Upsert*); SQLITE_PRIVATE void sqlite3UpsertDoUpdate(Parse*,Upsert*,Table*,Index*,int); +SQLITE_PRIVATE Upsert *sqlite3UpsertOfIndex(Upsert*,Index*); +SQLITE_PRIVATE int sqlite3UpsertNextIsIPK(Upsert*); #else -#define sqlite3UpsertNew(v,w,x,y,z) ((Upsert*)0) +#define sqlite3UpsertNew(u,v,w,x,y,z) ((Upsert*)0) #define sqlite3UpsertDelete(x,y) -#define sqlite3UpsertDup(x,y) ((Upsert*)0) +#define sqlite3UpsertDup(x,y) ((Upsert*)0) +#define sqlite3UpsertOfIndex(x,y) ((Upsert*)0) +#define sqlite3UpsertNextIsIPK(x) 0 #endif @@ -20408,7 +20835,7 @@ SQLITE_PRIVATE const unsigned char sqlite3UpperToLower[] = { 198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215, 216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233, 234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251, - 252,253,254,255 + 252,253,254,255, #endif #ifdef SQLITE_EBCDIC 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, /* 0x */ @@ -20428,7 +20855,35 @@ SQLITE_PRIVATE const unsigned char sqlite3UpperToLower[] = { 224,225,162,163,164,165,166,167,168,169,234,235,236,237,238,239, /* Ex */ 240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255, /* Fx */ #endif +/* All of the upper-to-lower conversion data is above. The following +** 18 integers are completely unrelated. They are appended to the +** sqlite3UpperToLower[] array to avoid UBSAN warnings. Here's what is +** going on: +** +** The SQL comparison operators (<>, =, >, <=, <, and >=) are implemented +** by invoking sqlite3MemCompare(A,B) which compares values A and B and +** returns negative, zero, or positive if A is less then, equal to, or +** greater than B, respectively. Then the true false results is found by +** consulting sqlite3aLTb[opcode], sqlite3aEQb[opcode], or +** sqlite3aGTb[opcode] depending on whether the result of compare(A,B) +** is negative, zero, or positive, where opcode is the specific opcode. +** The only works because the comparison opcodes are consecutive and in +** this order: NE EQ GT LE LT GE. Various assert()s throughout the code +** ensure that is the case. +** +** These elements must be appended to another array. Otherwise the +** index (here shown as [256-OP_Ne]) would be out-of-bounds and thus +** be undefined behavior. That's goofy, but the C-standards people thought +** it was a good idea, so here we are. +*/ +/* NE EQ GT LE LT GE */ + 1, 0, 0, 1, 1, 0, /* aLTb[]: Use when compare(A,B) less than zero */ + 0, 1, 0, 1, 0, 1, /* aEQb[]: Use when compare(A,B) equals zero */ + 1, 0, 1, 0, 0, 1 /* aGTb[]: Use when compare(A,B) greater than zero*/ }; +SQLITE_PRIVATE const unsigned char *sqlite3aLTb = &sqlite3UpperToLower[256-OP_Ne]; +SQLITE_PRIVATE const unsigned char *sqlite3aEQb = &sqlite3UpperToLower[256+6-OP_Ne]; +SQLITE_PRIVATE const unsigned char *sqlite3aGTb = &sqlite3UpperToLower[256+12-OP_Ne]; /* ** The following 256 byte lookup table is used to support SQLites built-in @@ -20622,7 +21077,7 @@ SQLITE_PRIVATE SQLITE_WSD struct Sqlite3Config sqlite3Config = { 0, /* xVdbeBranch */ 0, /* pVbeBranchArg */ #endif -#ifdef SQLITE_ENABLE_DESERIALIZE +#ifndef SQLITE_OMIT_DESERIALIZE SQLITE_MEMDB_DEFAULT_MAXSIZE, /* mxMemdbSize */ #endif #ifndef SQLITE_UNTESTABLE @@ -20672,9 +21127,10 @@ SQLITE_PRIVATE int sqlite3PendingByte = 0x40000000; #endif /* -** Flags for select tracing and the ".selecttrace" macro of the CLI +** Tracing flags set by SQLITE_TESTCTRL_TRACEFLAGS. */ -SQLITE_API u32 sqlite3_unsupported_selecttrace = 0; +SQLITE_PRIVATE u32 sqlite3SelectTrace = 0; +SQLITE_PRIVATE u32 sqlite3WhereTrace = 0; /* #include "opcodes.h" */ /* @@ -20798,7 +21254,8 @@ struct VdbeCursor { Bool isEphemeral:1; /* True for an ephemeral table */ Bool useRandomRowid:1; /* Generate new record numbers semi-randomly */ Bool isOrdered:1; /* True if the table is not BTREE_UNORDERED */ - Bool seekHit:1; /* See the OP_SeekHit and OP_IfNoHope opcodes */ + Bool hasBeenDuped:1; /* This cursor was source or target of OP_OpenDup */ + u16 seekHit; /* See the OP_SeekHit and OP_IfNoHope opcodes */ Btree *pBtx; /* Separate file holding temporary table */ i64 seqCount; /* Sequence counter */ u32 *aAltMap; /* Mapping from table to index column numbers */ @@ -21093,7 +21550,7 @@ struct Vdbe { Vdbe *pPrev,*pNext; /* Linked list of VDBEs with the same Vdbe.db */ Parse *pParse; /* Parsing context used to create this Vdbe */ ynVar nVar; /* Number of entries in aVar[] */ - u32 magic; /* Magic number for sanity checking */ + u32 iVdbeMagic; /* Magic number defining state of the SQL statement */ int nMem; /* Number of memory locations currently allocated */ int nCursor; /* Number of slots in apCsr[] */ u32 cacheCtr; /* VdbeCursor row cache generation counter */ @@ -21183,6 +21640,7 @@ struct PreUpdate { UnpackedRecord *pUnpacked; /* Unpacked version of aRecord[] */ UnpackedRecord *pNewUnpacked; /* Unpacked version of new.* record */ int iNewReg; /* Register for new.* values */ + int iBlobWrite; /* Value returned by preupdate_blobwrite() */ i64 iKey1; /* First key value passed to hook */ i64 iKey2; /* Second key value passed to hook */ Mem *aNew; /* Array of new.* values */ @@ -21226,7 +21684,7 @@ SQLITE_PRIVATE int sqlite3VdbeMemCopy(Mem*, const Mem*); SQLITE_PRIVATE void sqlite3VdbeMemShallowCopy(Mem*, const Mem*, int); SQLITE_PRIVATE void sqlite3VdbeMemMove(Mem*, Mem*); SQLITE_PRIVATE int sqlite3VdbeMemNulTerminate(Mem*); -SQLITE_PRIVATE int sqlite3VdbeMemSetStr(Mem*, const char*, int, u8, void(*)(void*)); +SQLITE_PRIVATE int sqlite3VdbeMemSetStr(Mem*, const char*, i64, u8, void(*)(void*)); SQLITE_PRIVATE void sqlite3VdbeMemSetInt64(Mem*, i64); #ifdef SQLITE_OMIT_FLOATING_POINT # define sqlite3VdbeMemSetDouble sqlite3VdbeMemSetInt64 @@ -21271,7 +21729,8 @@ SQLITE_PRIVATE void sqlite3VdbeFrameMemDel(void*); /* Destructor on Mem */ SQLITE_PRIVATE void sqlite3VdbeFrameDelete(VdbeFrame*); /* Actually deletes the Frame */ SQLITE_PRIVATE int sqlite3VdbeFrameRestore(VdbeFrame *); #ifdef SQLITE_ENABLE_PREUPDATE_HOOK -SQLITE_PRIVATE void sqlite3VdbePreUpdateHook(Vdbe*,VdbeCursor*,int,const char*,Table*,i64,int); +SQLITE_PRIVATE void sqlite3VdbePreUpdateHook( + Vdbe*,VdbeCursor*,int,const char*,Table*,i64,int,int); #endif SQLITE_PRIVATE int sqlite3VdbeTransferError(Vdbe *p); @@ -22598,6 +23057,7 @@ static int isDate( int eType; memset(p, 0, sizeof(*p)); if( argc==0 ){ + if( !sqlite3NotPureFunc(context) ) return 1; return setDateTimeToCurrent(context, p); } if( (eType = sqlite3_value_type(argv[0]))==SQLITE_FLOAT @@ -23098,6 +23558,8 @@ SQLITE_PRIVATE int sqlite3OsFileControl(sqlite3_file *id, int op, void *pArg){ #ifdef SQLITE_TEST if( op!=SQLITE_FCNTL_COMMIT_PHASETWO && op!=SQLITE_FCNTL_LOCK_TIMEOUT + && op!=SQLITE_FCNTL_CKPT_DONE + && op!=SQLITE_FCNTL_CKPT_START ){ /* Faults are not injected into COMMIT_PHASETWO because, assuming SQLite ** is using a regular VFS, it is called after the corresponding @@ -23108,7 +23570,12 @@ SQLITE_PRIVATE int sqlite3OsFileControl(sqlite3_file *id, int op, void *pArg){ ** The core must call OsFileControl() though, not OsFileControlHint(), ** as if a custom VFS (e.g. zipvfs) returns an error here, it probably ** means the commit really has failed and an error should be returned - ** to the user. */ + ** to the user. + ** + ** The CKPT_DONE and CKPT_START file-controls are write-only signals + ** to the cksumvfs. Their return code is meaningless and is ignored + ** by the SQLite core, so there is no point in simulating OOMs for them. + */ DO_OS_MALLOC_TEST(id); } #endif @@ -23191,7 +23658,7 @@ SQLITE_PRIVATE int sqlite3OsOpen( SQLITE_PRIVATE int sqlite3OsDelete(sqlite3_vfs *pVfs, const char *zPath, int dirSync){ DO_OS_MALLOC_TEST(0); assert( dirSync==0 || dirSync==1 ); - return pVfs->xDelete(pVfs, zPath, dirSync); + return pVfs->xDelete!=0 ? pVfs->xDelete(pVfs, zPath, dirSync) : SQLITE_OK; } SQLITE_PRIVATE int sqlite3OsAccess( sqlite3_vfs *pVfs, @@ -23214,6 +23681,8 @@ SQLITE_PRIVATE int sqlite3OsFullPathname( } #ifndef SQLITE_OMIT_LOAD_EXTENSION SQLITE_PRIVATE void *sqlite3OsDlOpen(sqlite3_vfs *pVfs, const char *zPath){ + assert( zPath!=0 ); + assert( strlen(zPath)<=SQLITE_MAX_PATHLEN ); /* tag-20210611-1 */ return pVfs->xDlOpen(pVfs, zPath); } SQLITE_PRIVATE void sqlite3OsDlError(sqlite3_vfs *pVfs, int nByte, char *zBufOut){ @@ -27481,7 +27950,6 @@ SQLITE_PRIVATE int sqlite3MallocInit(void){ if( sqlite3GlobalConfig.m.xMalloc==0 ){ sqlite3MemSetDefault(); } - memset(&mem0, 0, sizeof(mem0)); mem0.mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MEM); if( sqlite3GlobalConfig.pPage==0 || sqlite3GlobalConfig.szPage<512 || sqlite3GlobalConfig.nPage<=0 ){ @@ -27794,12 +28262,17 @@ SQLITE_PRIVATE void *sqlite3Realloc(void *pOld, u64 nBytes){ if( nOld==nNew ){ pNew = pOld; }else if( sqlite3GlobalConfig.bMemstat ){ + sqlite3_int64 nUsed; sqlite3_mutex_enter(mem0.mutex); sqlite3StatusHighwater(SQLITE_STATUS_MALLOC_SIZE, (int)nBytes); nDiff = nNew - nOld; - if( nDiff>0 && sqlite3StatusValue(SQLITE_STATUS_MEMORY_USED) >= + if( nDiff>0 && (nUsed = sqlite3StatusValue(SQLITE_STATUS_MEMORY_USED)) >= mem0.alarmThreshold-nDiff ){ sqlite3MallocAlarm(nDiff); + if( mem0.hardLimit>0 && nUsed >= mem0.hardLimit - nDiff ){ + sqlite3_mutex_leave(mem0.mutex); + return 0; + } } pNew = sqlite3GlobalConfig.m.xRealloc(pOld, nNew); #ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT @@ -28106,12 +28579,15 @@ SQLITE_PRIVATE void sqlite3OomClear(sqlite3 *db){ } /* -** Take actions at the end of an API call to indicate an OOM error +** Take actions at the end of an API call to deal with error codes. */ -static SQLITE_NOINLINE int apiOomError(sqlite3 *db){ - sqlite3OomClear(db); - sqlite3Error(db, SQLITE_NOMEM); - return SQLITE_NOMEM_BKPT; +static SQLITE_NOINLINE int apiHandleError(sqlite3 *db, int rc){ + if( db->mallocFailed || rc==SQLITE_IOERR_NOMEM ){ + sqlite3OomClear(db); + sqlite3Error(db, SQLITE_NOMEM); + return SQLITE_NOMEM_BKPT; + } + return rc & db->errMask; } /* @@ -28133,8 +28609,8 @@ SQLITE_PRIVATE int sqlite3ApiExit(sqlite3* db, int rc){ */ assert( db!=0 ); assert( sqlite3_mutex_held(db->mutex) ); - if( db->mallocFailed || rc==SQLITE_IOERR_NOMEM ){ - return apiOomError(db); + if( db->mallocFailed || rc ){ + return apiHandleError(db, rc); } return rc & db->errMask; } @@ -28172,7 +28648,7 @@ SQLITE_PRIVATE int sqlite3ApiExit(sqlite3* db, int rc){ #define etSQLESCAPE2 10 /* Strings with '\'' doubled and enclosed in '', NULL pointers replaced by SQL NULL. %Q */ #define etTOKEN 11 /* a pointer to a Token structure */ -#define etSRCLIST 12 /* a pointer to a SrcList */ +#define etSRCITEM 12 /* a pointer to a SrcItem */ #define etPOINTER 13 /* The %p conversion */ #define etSQLESCAPE3 14 /* %w -> Strings with '\"' doubled */ #define etORDINAL 15 /* %r -> 1st, 2nd, 3rd, 4th, etc. English only */ @@ -28238,10 +28714,16 @@ static const et_info fmtinfo[] = { /* All the rest are undocumented and are for internal use only */ { 'T', 0, 0, etTOKEN, 0, 0 }, - { 'S', 0, 0, etSRCLIST, 0, 0 }, + { 'S', 0, 0, etSRCITEM, 0, 0 }, { 'r', 10, 1, etORDINAL, 0, 0 }, }; +/* Notes: +** +** %S Takes a pointer to SrcItem. Shows name or database.name +** %!S Like %S but prefer the zName over the zAlias +*/ + /* Floating point constants used for rounding */ static const double arRound[] = { 5.0e-01, 5.0e-02, 5.0e-03, 5.0e-04, 5.0e-05, @@ -28570,11 +29052,10 @@ SQLITE_API void sqlite3_str_vappendf( v = va_arg(ap,int); } if( v<0 ){ - if( v==SMALLEST_INT64 ){ - longvalue = ((u64)1)<<63; - }else{ - longvalue = -v; - } + testcase( v==SMALLEST_INT64 ); + testcase( v==(-1) ); + longvalue = ~v; + longvalue++; prefix = '-'; }else{ longvalue = v; @@ -28997,21 +29478,24 @@ SQLITE_API void sqlite3_str_vappendf( length = width = 0; break; } - case etSRCLIST: { - SrcList *pSrc; - int k; - struct SrcList_item *pItem; + case etSRCITEM: { + SrcItem *pItem; if( (pAccum->printfFlags & SQLITE_PRINTF_INTERNAL)==0 ) return; - pSrc = va_arg(ap, SrcList*); - k = va_arg(ap, int); - pItem = &pSrc->a[k]; + pItem = va_arg(ap, SrcItem*); assert( bArgList==0 ); - assert( k>=0 && knSrc ); - if( pItem->zDatabase ){ - sqlite3_str_appendall(pAccum, pItem->zDatabase); - sqlite3_str_append(pAccum, ".", 1); + if( pItem->zAlias && !flag_altform2 ){ + sqlite3_str_appendall(pAccum, pItem->zAlias); + }else if( pItem->zName ){ + if( pItem->zDatabase ){ + sqlite3_str_appendall(pAccum, pItem->zDatabase); + sqlite3_str_append(pAccum, ".", 1); + } + sqlite3_str_appendall(pAccum, pItem->zName); + }else if( pItem->zAlias ){ + sqlite3_str_appendall(pAccum, pItem->zAlias); + }else if( ALWAYS(pItem->pSelect) ){ + sqlite3_str_appendf(pAccum, "SUBQUERY %u", pItem->pSelect->selId); } - sqlite3_str_appendall(pAccum, pItem->zName); length = width = 0; break; } @@ -29065,7 +29549,7 @@ static int sqlite3StrAccumEnlarge(StrAccum *p, int N){ }else{ char *zOld = isMalloced(p) ? p->zText : 0; i64 szNew = p->nChar; - szNew += N + 1; + szNew += (sqlite3_int64)N + 1; if( szNew+p->nChar<=p->mxAlloc ){ /* Force exponential buffer size growth as long as it does not overflow, ** to avoid having to call this routine too often */ @@ -29568,7 +30052,10 @@ SQLITE_PRIVATE void sqlite3TreeViewWith(TreeView *pView, const With *pWith, u8 m } sqlite3_str_appendf(&x, ")"); } - sqlite3_str_appendf(&x, " AS"); + if( pCte->pUse ){ + sqlite3_str_appendf(&x, " (pUse=0x%p, nUse=%d)", pCte->pUse, + pCte->pUse->nUse); + } sqlite3StrAccumFinish(&x); sqlite3TreeViewItem(pView, zLine, inCte-1); sqlite3TreeViewSelect(pView, pCte->pSelect, 0); @@ -29584,29 +30071,25 @@ SQLITE_PRIVATE void sqlite3TreeViewWith(TreeView *pView, const With *pWith, u8 m SQLITE_PRIVATE void sqlite3TreeViewSrcList(TreeView *pView, const SrcList *pSrc){ int i; for(i=0; inSrc; i++){ - const struct SrcList_item *pItem = &pSrc->a[i]; + const SrcItem *pItem = &pSrc->a[i]; StrAccum x; char zLine[100]; sqlite3StrAccumInit(&x, 0, zLine, sizeof(zLine), 0); - sqlite3_str_appendf(&x, "{%d:*}", pItem->iCursor); - if( pItem->zDatabase ){ - sqlite3_str_appendf(&x, " %s.%s", pItem->zDatabase, pItem->zName); - }else if( pItem->zName ){ - sqlite3_str_appendf(&x, " %s", pItem->zName); - } + x.printfFlags |= SQLITE_PRINTF_INTERNAL; + sqlite3_str_appendf(&x, "{%d:*} %!S", pItem->iCursor, pItem); if( pItem->pTab ){ sqlite3_str_appendf(&x, " tab=%Q nCol=%d ptr=%p used=%llx", pItem->pTab->zName, pItem->pTab->nCol, pItem->pTab, pItem->colUsed); } - if( pItem->zAlias ){ - sqlite3_str_appendf(&x, " (AS %s)", pItem->zAlias); - } if( pItem->fg.jointype & JT_LEFT ){ sqlite3_str_appendf(&x, " LEFT-JOIN"); } if( pItem->fg.fromDDL ){ sqlite3_str_appendf(&x, " DDL"); } + if( pItem->fg.isCte ){ + sqlite3_str_appendf(&x, " CteUse=0x%p", pItem->u2.pCteUse); + } sqlite3StrAccumFinish(&x); sqlite3TreeViewItem(pView, zLine, inSrc-1); if( pItem->pSelect ){ @@ -30164,6 +30647,14 @@ SQLITE_PRIVATE void sqlite3TreeViewExpr(TreeView *pView, const Expr *pExpr, u8 m sqlite3TreeViewExpr(pView, pExpr->pLeft, 0); break; } + case TK_ERROR: { + Expr tmp; + sqlite3TreeViewLine(pView, "ERROR"); + tmp = *pExpr; + tmp.op = pExpr->op2; + sqlite3TreeViewExpr(pView, &tmp, 0); + break; + } default: { sqlite3TreeViewLine(pView, "op=%d", pExpr->op); break; @@ -30313,11 +30804,16 @@ SQLITE_API void sqlite3_randomness(int N, void *pBuf){ ** number generator) not as an encryption device. */ if( !wsdPrng.isInit ){ + sqlite3_vfs *pVfs = sqlite3_vfs_find(0); int i; char k[256]; wsdPrng.j = 0; wsdPrng.i = 0; - sqlite3OsRandomness(sqlite3_vfs_find(0), 256, k); + if( NEVER(pVfs==0) ){ + memset(k, 0, sizeof(k)); + }else{ + sqlite3OsRandomness(pVfs, 256, k); + } for(i=0; i<256; i++){ wsdPrng.s[i] = (u8)i; } @@ -31303,6 +31799,16 @@ SQLITE_PRIVATE void sqlite3Error(sqlite3 *db, int err_code){ if( err_code || db->pErr ) sqlite3ErrorFinish(db, err_code); } +/* +** The equivalent of sqlite3Error(db, SQLITE_OK). Clear the error state +** and error message. +*/ +SQLITE_PRIVATE void sqlite3ErrorClear(sqlite3 *db){ + assert( db!=0 ); + db->errCode = SQLITE_OK; + if( db->pErr ) sqlite3ValueSetNull(db->pErr); +} + /* ** Load the sqlite3.iSysErrno field if that is an appropriate thing ** to do based on the SQLite error code in rc. @@ -31870,6 +32376,7 @@ SQLITE_PRIVATE int sqlite3Atoi64(const char *zNum, i64 *pNum, int length, u8 enc incr = 1; }else{ incr = 2; + length &= ~1; assert( SQLITE_UTF16LE==2 && SQLITE_UTF16BE==3 ); for(i=3-enc; i=r[P1]"), - /* 58 */ "ElseNotEq" OpHelp(""), + /* 58 */ "ElseEq" OpHelp(""), /* 59 */ "DecrJumpZero" OpHelp("if (--r[P1])==0 goto P2"), /* 60 */ "IncrVacuum" OpHelp(""), /* 61 */ "VNext" OpHelp(""), @@ -33248,102 +33755,106 @@ SQLITE_PRIVATE const char *sqlite3OpcodeName(int i){ /* 77 */ "Copy" OpHelp("r[P2@P3+1]=r[P1@P3+1]"), /* 78 */ "SCopy" OpHelp("r[P2]=r[P1]"), /* 79 */ "IntCopy" OpHelp("r[P2]=r[P1]"), - /* 80 */ "ResultRow" OpHelp("output=r[P1@P2]"), - /* 81 */ "CollSeq" OpHelp(""), - /* 82 */ "AddImm" OpHelp("r[P1]=r[P1]+P2"), - /* 83 */ "RealAffinity" OpHelp(""), - /* 84 */ "Cast" OpHelp("affinity(r[P1])"), - /* 85 */ "Permutation" OpHelp(""), - /* 86 */ "Compare" OpHelp("r[P1@P3] <-> r[P2@P3]"), - /* 87 */ "IsTrue" OpHelp("r[P2] = coalesce(r[P1]==TRUE,P3) ^ P4"), - /* 88 */ "Offset" OpHelp("r[P3] = sqlite_offset(P1)"), - /* 89 */ "Column" OpHelp("r[P3]=PX"), - /* 90 */ "Affinity" OpHelp("affinity(r[P1@P2])"), - /* 91 */ "MakeRecord" OpHelp("r[P3]=mkrec(r[P1@P2])"), - /* 92 */ "Count" OpHelp("r[P2]=count()"), - /* 93 */ "ReadCookie" OpHelp(""), - /* 94 */ "SetCookie" OpHelp(""), - /* 95 */ "ReopenIdx" OpHelp("root=P2 iDb=P3"), - /* 96 */ "OpenRead" OpHelp("root=P2 iDb=P3"), - /* 97 */ "OpenWrite" OpHelp("root=P2 iDb=P3"), - /* 98 */ "OpenDup" OpHelp(""), - /* 99 */ "OpenAutoindex" OpHelp("nColumn=P2"), - /* 100 */ "OpenEphemeral" OpHelp("nColumn=P2"), - /* 101 */ "BitAnd" OpHelp("r[P3]=r[P1]&r[P2]"), - /* 102 */ "BitOr" OpHelp("r[P3]=r[P1]|r[P2]"), - /* 103 */ "ShiftLeft" OpHelp("r[P3]=r[P2]<>r[P1]"), - /* 105 */ "Add" OpHelp("r[P3]=r[P1]+r[P2]"), - /* 106 */ "Subtract" OpHelp("r[P3]=r[P2]-r[P1]"), - /* 107 */ "Multiply" OpHelp("r[P3]=r[P1]*r[P2]"), - /* 108 */ "Divide" OpHelp("r[P3]=r[P2]/r[P1]"), - /* 109 */ "Remainder" OpHelp("r[P3]=r[P2]%r[P1]"), - /* 110 */ "Concat" OpHelp("r[P3]=r[P2]+r[P1]"), - /* 111 */ "SorterOpen" OpHelp(""), - /* 112 */ "BitNot" OpHelp("r[P2]= ~r[P1]"), - /* 113 */ "SequenceTest" OpHelp("if( cursor[P1].ctr++ ) pc = P2"), - /* 114 */ "OpenPseudo" OpHelp("P3 columns in r[P2]"), - /* 115 */ "String8" OpHelp("r[P2]='P4'"), - /* 116 */ "Close" OpHelp(""), - /* 117 */ "ColumnsUsed" OpHelp(""), - /* 118 */ "SeekHit" OpHelp("seekHit=P2"), - /* 119 */ "Sequence" OpHelp("r[P2]=cursor[P1].ctr++"), - /* 120 */ "NewRowid" OpHelp("r[P2]=rowid"), - /* 121 */ "Insert" OpHelp("intkey=r[P3] data=r[P2]"), - /* 122 */ "Delete" OpHelp(""), - /* 123 */ "ResetCount" OpHelp(""), - /* 124 */ "SorterCompare" OpHelp("if key(P1)!=trim(r[P3],P4) goto P2"), - /* 125 */ "SorterData" OpHelp("r[P2]=data"), - /* 126 */ "RowData" OpHelp("r[P2]=data"), - /* 127 */ "Rowid" OpHelp("r[P2]=rowid"), - /* 128 */ "NullRow" OpHelp(""), - /* 129 */ "SeekEnd" OpHelp(""), - /* 130 */ "IdxInsert" OpHelp("key=r[P2]"), - /* 131 */ "SorterInsert" OpHelp("key=r[P2]"), - /* 132 */ "IdxDelete" OpHelp("key=r[P2@P3]"), - /* 133 */ "DeferredSeek" OpHelp("Move P3 to P1.rowid if needed"), - /* 134 */ "IdxRowid" OpHelp("r[P2]=rowid"), - /* 135 */ "FinishSeek" OpHelp(""), - /* 136 */ "Destroy" OpHelp(""), - /* 137 */ "Clear" OpHelp(""), - /* 138 */ "ResetSorter" OpHelp(""), - /* 139 */ "CreateBtree" OpHelp("r[P2]=root iDb=P1 flags=P3"), - /* 140 */ "SqlExec" OpHelp(""), - /* 141 */ "ParseSchema" OpHelp(""), - /* 142 */ "LoadAnalysis" OpHelp(""), - /* 143 */ "DropTable" OpHelp(""), - /* 144 */ "DropIndex" OpHelp(""), - /* 145 */ "DropTrigger" OpHelp(""), - /* 146 */ "IntegrityCk" OpHelp(""), - /* 147 */ "RowSetAdd" OpHelp("rowset(P1)=r[P2]"), - /* 148 */ "Param" OpHelp(""), - /* 149 */ "FkCounter" OpHelp("fkctr[P1]+=P2"), - /* 150 */ "Real" OpHelp("r[P2]=P4"), - /* 151 */ "MemMax" OpHelp("r[P1]=max(r[P1],r[P2])"), - /* 152 */ "OffsetLimit" OpHelp("if r[P1]>0 then r[P2]=r[P1]+max(0,r[P3]) else r[P2]=(-1)"), - /* 153 */ "AggInverse" OpHelp("accum=r[P3] inverse(r[P2@P5])"), - /* 154 */ "AggStep" OpHelp("accum=r[P3] step(r[P2@P5])"), - /* 155 */ "AggStep1" OpHelp("accum=r[P3] step(r[P2@P5])"), - /* 156 */ "AggValue" OpHelp("r[P3]=value N=P2"), - /* 157 */ "AggFinal" OpHelp("accum=r[P1] N=P2"), - /* 158 */ "Expire" OpHelp(""), - /* 159 */ "CursorLock" OpHelp(""), - /* 160 */ "CursorUnlock" OpHelp(""), - /* 161 */ "TableLock" OpHelp("iDb=P1 root=P2 write=P3"), - /* 162 */ "VBegin" OpHelp(""), - /* 163 */ "VCreate" OpHelp(""), - /* 164 */ "VDestroy" OpHelp(""), - /* 165 */ "VOpen" OpHelp(""), - /* 166 */ "VColumn" OpHelp("r[P3]=vcolumn(P2)"), - /* 167 */ "VRename" OpHelp(""), - /* 168 */ "Pagecount" OpHelp(""), - /* 169 */ "MaxPgcnt" OpHelp(""), - /* 170 */ "Trace" OpHelp(""), - /* 171 */ "CursorHint" OpHelp(""), - /* 172 */ "ReleaseReg" OpHelp("release r[P1@P2] mask P3"), - /* 173 */ "Noop" OpHelp(""), - /* 174 */ "Explain" OpHelp(""), - /* 175 */ "Abortable" OpHelp(""), + /* 80 */ "ChngCntRow" OpHelp("output=r[P1]"), + /* 81 */ "ResultRow" OpHelp("output=r[P1@P2]"), + /* 82 */ "CollSeq" OpHelp(""), + /* 83 */ "AddImm" OpHelp("r[P1]=r[P1]+P2"), + /* 84 */ "RealAffinity" OpHelp(""), + /* 85 */ "Cast" OpHelp("affinity(r[P1])"), + /* 86 */ "Permutation" OpHelp(""), + /* 87 */ "Compare" OpHelp("r[P1@P3] <-> r[P2@P3]"), + /* 88 */ "IsTrue" OpHelp("r[P2] = coalesce(r[P1]==TRUE,P3) ^ P4"), + /* 89 */ "ZeroOrNull" OpHelp("r[P2] = 0 OR NULL"), + /* 90 */ "Offset" OpHelp("r[P3] = sqlite_offset(P1)"), + /* 91 */ "Column" OpHelp("r[P3]=PX"), + /* 92 */ "Affinity" OpHelp("affinity(r[P1@P2])"), + /* 93 */ "MakeRecord" OpHelp("r[P3]=mkrec(r[P1@P2])"), + /* 94 */ "Count" OpHelp("r[P2]=count()"), + /* 95 */ "ReadCookie" OpHelp(""), + /* 96 */ "SetCookie" OpHelp(""), + /* 97 */ "ReopenIdx" OpHelp("root=P2 iDb=P3"), + /* 98 */ "OpenRead" OpHelp("root=P2 iDb=P3"), + /* 99 */ "OpenWrite" OpHelp("root=P2 iDb=P3"), + /* 100 */ "OpenDup" OpHelp(""), + /* 101 */ "OpenAutoindex" OpHelp("nColumn=P2"), + /* 102 */ "BitAnd" OpHelp("r[P3]=r[P1]&r[P2]"), + /* 103 */ "BitOr" OpHelp("r[P3]=r[P1]|r[P2]"), + /* 104 */ "ShiftLeft" OpHelp("r[P3]=r[P2]<>r[P1]"), + /* 106 */ "Add" OpHelp("r[P3]=r[P1]+r[P2]"), + /* 107 */ "Subtract" OpHelp("r[P3]=r[P2]-r[P1]"), + /* 108 */ "Multiply" OpHelp("r[P3]=r[P1]*r[P2]"), + /* 109 */ "Divide" OpHelp("r[P3]=r[P2]/r[P1]"), + /* 110 */ "Remainder" OpHelp("r[P3]=r[P2]%r[P1]"), + /* 111 */ "Concat" OpHelp("r[P3]=r[P2]+r[P1]"), + /* 112 */ "OpenEphemeral" OpHelp("nColumn=P2"), + /* 113 */ "BitNot" OpHelp("r[P2]= ~r[P1]"), + /* 114 */ "SorterOpen" OpHelp(""), + /* 115 */ "SequenceTest" OpHelp("if( cursor[P1].ctr++ ) pc = P2"), + /* 116 */ "String8" OpHelp("r[P2]='P4'"), + /* 117 */ "OpenPseudo" OpHelp("P3 columns in r[P2]"), + /* 118 */ "Close" OpHelp(""), + /* 119 */ "ColumnsUsed" OpHelp(""), + /* 120 */ "SeekScan" OpHelp("Scan-ahead up to P1 rows"), + /* 121 */ "SeekHit" OpHelp("set P2<=seekHit<=P3"), + /* 122 */ "Sequence" OpHelp("r[P2]=cursor[P1].ctr++"), + /* 123 */ "NewRowid" OpHelp("r[P2]=rowid"), + /* 124 */ "Insert" OpHelp("intkey=r[P3] data=r[P2]"), + /* 125 */ "RowCell" OpHelp(""), + /* 126 */ "Delete" OpHelp(""), + /* 127 */ "ResetCount" OpHelp(""), + /* 128 */ "SorterCompare" OpHelp("if key(P1)!=trim(r[P3],P4) goto P2"), + /* 129 */ "SorterData" OpHelp("r[P2]=data"), + /* 130 */ "RowData" OpHelp("r[P2]=data"), + /* 131 */ "Rowid" OpHelp("r[P2]=rowid"), + /* 132 */ "NullRow" OpHelp(""), + /* 133 */ "SeekEnd" OpHelp(""), + /* 134 */ "IdxInsert" OpHelp("key=r[P2]"), + /* 135 */ "SorterInsert" OpHelp("key=r[P2]"), + /* 136 */ "IdxDelete" OpHelp("key=r[P2@P3]"), + /* 137 */ "DeferredSeek" OpHelp("Move P3 to P1.rowid if needed"), + /* 138 */ "IdxRowid" OpHelp("r[P2]=rowid"), + /* 139 */ "FinishSeek" OpHelp(""), + /* 140 */ "Destroy" OpHelp(""), + /* 141 */ "Clear" OpHelp(""), + /* 142 */ "ResetSorter" OpHelp(""), + /* 143 */ "CreateBtree" OpHelp("r[P2]=root iDb=P1 flags=P3"), + /* 144 */ "SqlExec" OpHelp(""), + /* 145 */ "ParseSchema" OpHelp(""), + /* 146 */ "LoadAnalysis" OpHelp(""), + /* 147 */ "DropTable" OpHelp(""), + /* 148 */ "DropIndex" OpHelp(""), + /* 149 */ "DropTrigger" OpHelp(""), + /* 150 */ "IntegrityCk" OpHelp(""), + /* 151 */ "RowSetAdd" OpHelp("rowset(P1)=r[P2]"), + /* 152 */ "Real" OpHelp("r[P2]=P4"), + /* 153 */ "Param" OpHelp(""), + /* 154 */ "FkCounter" OpHelp("fkctr[P1]+=P2"), + /* 155 */ "MemMax" OpHelp("r[P1]=max(r[P1],r[P2])"), + /* 156 */ "OffsetLimit" OpHelp("if r[P1]>0 then r[P2]=r[P1]+max(0,r[P3]) else r[P2]=(-1)"), + /* 157 */ "AggInverse" OpHelp("accum=r[P3] inverse(r[P2@P5])"), + /* 158 */ "AggStep" OpHelp("accum=r[P3] step(r[P2@P5])"), + /* 159 */ "AggStep1" OpHelp("accum=r[P3] step(r[P2@P5])"), + /* 160 */ "AggValue" OpHelp("r[P3]=value N=P2"), + /* 161 */ "AggFinal" OpHelp("accum=r[P1] N=P2"), + /* 162 */ "Expire" OpHelp(""), + /* 163 */ "CursorLock" OpHelp(""), + /* 164 */ "CursorUnlock" OpHelp(""), + /* 165 */ "TableLock" OpHelp("iDb=P1 root=P2 write=P3"), + /* 166 */ "VBegin" OpHelp(""), + /* 167 */ "VCreate" OpHelp(""), + /* 168 */ "VDestroy" OpHelp(""), + /* 169 */ "VOpen" OpHelp(""), + /* 170 */ "VColumn" OpHelp("r[P3]=vcolumn(P2)"), + /* 171 */ "VRename" OpHelp(""), + /* 172 */ "Pagecount" OpHelp(""), + /* 173 */ "MaxPgcnt" OpHelp(""), + /* 174 */ "Trace" OpHelp(""), + /* 175 */ "CursorHint" OpHelp(""), + /* 176 */ "ReleaseReg" OpHelp("release r[P1@P2] mask P3"), + /* 177 */ "Noop" OpHelp(""), + /* 178 */ "Explain" OpHelp(""), + /* 179 */ "Abortable" OpHelp(""), }; return azName[i]; } @@ -33475,7 +33986,8 @@ SQLITE_PRIVATE const char *sqlite3OpcodeName(int i){ # if defined(__APPLE__) && ((__MAC_OS_X_VERSION_MIN_REQUIRED > 1050) || \ (__IPHONE_OS_VERSION_MIN_REQUIRED > 2000)) # if (!defined(TARGET_OS_EMBEDDED) || (TARGET_OS_EMBEDDED==0)) \ - && (!defined(TARGET_IPHONE_SIMULATOR) || (TARGET_IPHONE_SIMULATOR==0)) + && (!defined(TARGET_IPHONE_SIMULATOR) || (TARGET_IPHONE_SIMULATOR==0))\ + && (!defined(TARGET_OS_MACCATALYST) || (TARGET_OS_MACCATALYST==0)) # undef HAVE_GETHOSTUUID # define HAVE_GETHOSTUUID 1 # else @@ -35095,6 +35607,9 @@ static int unixCheckReservedLock(sqlite3_file *id, int *pResOut){ return rc; } +/* Forward declaration*/ +static int unixSleep(sqlite3_vfs*,int); + /* ** Set a posix-advisory-lock. ** @@ -35124,7 +35639,7 @@ static int osSetPosixAdvisoryLock( ** generic posix, however, there is no such API. So we simply try the ** lock once every millisecond until either the timeout expires, or until ** the lock is obtained. */ - usleep(1000); + unixSleep(0,1000); rc = osFcntl(h,F_SETLK,pLock); tm--; } @@ -35695,6 +36210,7 @@ static int unixClose(sqlite3_file *id){ } sqlite3_mutex_leave(pInode->pLockMutex); releaseInodeInfo(pFile); + assert( pFile->pShm==0 ); rc = closeUnixFile(id); unixLeaveMutex(); return rc; @@ -36921,7 +37437,24 @@ static int unixRead( if( got==amt ){ return SQLITE_OK; }else if( got<0 ){ - /* lastErrno set by seekAndRead */ + /* pFile->lastErrno has been set by seekAndRead(). + ** Usually we return SQLITE_IOERR_READ here, though for some + ** kinds of errors we return SQLITE_IOERR_CORRUPTFS. The + ** SQLITE_IOERR_CORRUPTFS will be converted into SQLITE_CORRUPT + ** prior to returning to the application by the sqlite3ApiExit() + ** routine. + */ + switch( pFile->lastErrno ){ + case ERANGE: + case EIO: +#ifdef ENXIO + case ENXIO: +#endif +#ifdef EDEVERR + case EDEVERR: +#endif + return SQLITE_IOERR_CORRUPTFS; + } return SQLITE_IOERR_READ; }else{ storeLastErrno(pFile, 0); /* not a system error */ @@ -37480,6 +38013,7 @@ static void unixModeBit(unixFile *pFile, unsigned char mask, int *pArg){ /* Forward declaration */ static int unixGetTempname(int nBuf, char *zBuf); +static int unixFcntlExternalReader(unixFile*, int*); /* ** Information and control of an open file handle. @@ -37596,6 +38130,10 @@ static int unixFileControl(sqlite3_file *id, int op, void *pArg){ return proxyFileControl(id,op,pArg); } #endif /* SQLITE_ENABLE_LOCKING_STYLE && defined(__APPLE__) */ + + case SQLITE_FCNTL_EXTERNAL_READER: { + return unixFcntlExternalReader((unixFile*)id, (int*)pArg); + } } return SQLITE_NOTFOUND; } @@ -37805,6 +38343,7 @@ struct unixShmNode { char **apRegion; /* Array of mapped shared-memory regions */ int nRef; /* Number of unixShm objects pointing to this */ unixShm *pFirst; /* All unixShm objects pointing to this */ + int aLock[SQLITE_SHM_NLOCK]; /* # shared locks on slot, -1==excl lock */ #ifdef SQLITE_DEBUG u8 exclMask; /* Mask of exclusive locks held */ u8 sharedMask; /* Mask of shared locks held */ @@ -37840,6 +38379,40 @@ struct unixShm { #define UNIX_SHM_BASE ((22+SQLITE_SHM_NLOCK)*4) /* first lock byte */ #define UNIX_SHM_DMS (UNIX_SHM_BASE+SQLITE_SHM_NLOCK) /* deadman switch */ +/* +** Use F_GETLK to check whether or not there are any readers with open +** wal-mode transactions in other processes on database file pFile. If +** no error occurs, return SQLITE_OK and set (*piOut) to 1 if there are +** such transactions, or 0 otherwise. If an error occurs, return an +** SQLite error code. The final value of *piOut is undefined in this +** case. +*/ +static int unixFcntlExternalReader(unixFile *pFile, int *piOut){ + int rc = SQLITE_OK; + *piOut = 0; + if( pFile->pShm){ + unixShmNode *pShmNode = pFile->pShm->pShmNode; + struct flock f; + + memset(&f, 0, sizeof(f)); + f.l_type = F_WRLCK; + f.l_whence = SEEK_SET; + f.l_start = UNIX_SHM_BASE + 3; + f.l_len = SQLITE_SHM_NLOCK - 3; + + sqlite3_mutex_enter(pShmNode->pShmMutex); + if( osFcntl(pShmNode->hShm, F_GETLK, &f)<0 ){ + rc = SQLITE_IOERR_LOCK; + }else{ + *piOut = (f.l_type!=F_UNLCK); + } + sqlite3_mutex_leave(pShmNode->pShmMutex); + } + + return rc; +} + + /* ** Apply posix advisory locks for all bytes from ofst through ofst+n-1. ** @@ -38345,6 +38918,38 @@ shmpage_out: return rc; } +/* +** Check that the pShmNode->aLock[] array comports with the locking bitmasks +** held by each client. Return true if it does, or false otherwise. This +** is to be used in an assert(). e.g. +** +** assert( assertLockingArrayOk(pShmNode) ); +*/ +#ifdef SQLITE_DEBUG +static int assertLockingArrayOk(unixShmNode *pShmNode){ + unixShm *pX; + int aLock[SQLITE_SHM_NLOCK]; + assert( sqlite3_mutex_held(pShmNode->pShmMutex) ); + + memset(aLock, 0, sizeof(aLock)); + for(pX=pShmNode->pFirst; pX; pX=pX->pNext){ + int i; + for(i=0; iexclMask & (1<sharedMask & (1<=0 ); + aLock[i]++; + } + } + } + + assert( 0==memcmp(pShmNode->aLock, aLock, sizeof(aLock)) ); + return (memcmp(pShmNode->aLock, aLock, sizeof(aLock))==0); +} +#endif + /* ** Change the lock state for a shared-memory segment. ** @@ -38361,10 +38966,10 @@ static int unixShmLock( ){ unixFile *pDbFd = (unixFile*)fd; /* Connection holding shared memory */ unixShm *p = pDbFd->pShm; /* The shared memory being locked */ - unixShm *pX; /* For looping over all siblings */ unixShmNode *pShmNode = p->pShmNode; /* The underlying file iNode */ int rc = SQLITE_OK; /* Result code */ u16 mask; /* Mask of locks to take or release */ + int *aLock = pShmNode->aLock; assert( pShmNode==pDbFd->pInode->pShmNode ); assert( pShmNode->pInode==pDbFd->pInode ); @@ -38403,78 +39008,76 @@ static int unixShmLock( mask = (1<<(ofst+n)) - (1<1 || mask==(1<pShmMutex); + assert( assertLockingArrayOk(pShmNode) ); if( flags & SQLITE_SHM_UNLOCK ){ - u16 allMask = 0; /* Mask of locks held by siblings */ + if( (p->exclMask|p->sharedMask) & mask ){ + int ii; + int bUnlock = 1; - /* See if any siblings hold this same lock */ - for(pX=pShmNode->pFirst; pX; pX=pX->pNext){ - if( pX==p ) continue; - assert( (pX->exclMask & (p->exclMask|p->sharedMask))==0 ); - allMask |= pX->sharedMask; - } + for(ii=ofst; ii((p->sharedMask & (1<sharedMask & (1<1 ); + aLock[ofst]--; + } - /* Undo the local locks */ - if( rc==SQLITE_OK ){ - p->exclMask &= ~mask; - p->sharedMask &= ~mask; + /* Undo the local locks */ + if( rc==SQLITE_OK ){ + p->exclMask &= ~mask; + p->sharedMask &= ~mask; + } } }else if( flags & SQLITE_SHM_SHARED ){ - u16 allShared = 0; /* Union of locks held by connections other than "p" */ - - /* Find out which shared locks are already held by sibling connections. - ** If any sibling already holds an exclusive lock, go ahead and return - ** SQLITE_BUSY. - */ - for(pX=pShmNode->pFirst; pX; pX=pX->pNext){ - if( (pX->exclMask & mask)!=0 ){ + assert( n==1 ); + assert( (p->exclMask & (1<sharedMask & mask)==0 ){ + if( aLock[ofst]<0 ){ rc = SQLITE_BUSY; - break; - } - allShared |= pX->sharedMask; - } - - /* Get shared locks at the system level, if necessary */ - if( rc==SQLITE_OK ){ - if( (allShared & mask)==0 ){ + }else if( aLock[ofst]==0 ){ rc = unixShmSystemLock(pDbFd, F_RDLCK, ofst+UNIX_SHM_BASE, n); - }else{ - rc = SQLITE_OK; } - } - /* Get the local shared locks */ - if( rc==SQLITE_OK ){ - p->sharedMask |= mask; + /* Get the local shared locks */ + if( rc==SQLITE_OK ){ + p->sharedMask |= mask; + aLock[ofst]++; + } } }else{ /* Make sure no sibling connections hold locks that will block this - ** lock. If any do, return SQLITE_BUSY right away. - */ - for(pX=pShmNode->pFirst; pX; pX=pX->pNext){ - if( (pX->exclMask & mask)!=0 || (pX->sharedMask & mask)!=0 ){ + ** lock. If any do, return SQLITE_BUSY right away. */ + int ii; + for(ii=ofst; iisharedMask & mask)==0 ); + if( ALWAYS((p->exclMask & (1<sharedMask & mask)==0 ); p->exclMask |= mask; + for(ii=ofst; iipShmMutex); OSTRACE(("SHM-LOCK shmid-%d, pid-%d got %03x,%03x\n", p->id, osGetpid(0), p->sharedMask, p->exclMask)); @@ -39851,7 +40454,27 @@ static int unixAccess( } /* +** If the last component of the pathname in z[0]..z[j-1] is something +** other than ".." then back it out and return true. If the last +** component is empty or if it is ".." then return false. +*/ +static int unixBackupDir(const char *z, int *pJ){ + int j = *pJ; + int i; + if( j<=0 ) return 0; + for(i=j-1; i>0 && z[i-1]!='/'; i--){} + if( i==0 ) return 0; + if( z[i]=='.' && i==j-2 && z[i+1]=='.' ) return 0; + *pJ = i-1; + return 1; +} + +/* +** Convert a relative pathname into a full pathname. Also +** simplify the pathname as follows: ** +** Remove all instances of /./ +** Remove all isntances of /X/../ for any X */ static int mkFullPathname( const char *zPath, /* Input path */ @@ -39860,6 +40483,7 @@ static int mkFullPathname( ){ int nPath = sqlite3Strlen30(zPath); int iOff = 0; + int i, j; if( zPath[0]!='/' ){ if( osGetcwd(zOut, nOut-2)==0 ){ return unixLogError(SQLITE_CANTOPEN_BKPT, "getcwd", zPath); @@ -39874,6 +40498,41 @@ static int mkFullPathname( return SQLITE_CANTOPEN_BKPT; } sqlite3_snprintf(nOut-iOff, &zOut[iOff], "%s", zPath); + + /* Remove duplicate '/' characters. Except, two // at the beginning + ** of a pathname is allowed since this is important on windows. */ + for(i=j=1; zOut[i]; i++){ + zOut[j++] = zOut[i]; + while( zOut[i]=='/' && zOut[i+1]=='/' ) i++; + } + zOut[j] = 0; + + assert( zOut[0]=='/' ); + for(i=j=0; zOut[i]; i++){ + if( zOut[i]=='/' ){ + /* Skip over internal "/." directory components */ + if( zOut[i+1]=='.' && zOut[i+2]=='/' ){ + i += 1; + continue; + } + + /* If this is a "/.." directory component then back out the + ** previous term of the directory if it is something other than "..". + */ + if( zOut[i+1]=='.' + && zOut[i+2]=='.' + && zOut[i+3]=='/' + && unixBackupDir(zOut, &j) + ){ + i += 2; + continue; + } + } + if( ALWAYS(j>=0) ) zOut[j] = zOut[i]; + j++; + } + if( NEVER(j==0) ) zOut[j++] = '/'; + zOut[j] = 0; return SQLITE_OK; } @@ -40094,7 +40753,8 @@ static int unixSleep(sqlite3_vfs *NotUsed, int microseconds){ UNUSED_PARAMETER(NotUsed); return microseconds; #elif defined(HAVE_USLEEP) && HAVE_USLEEP - usleep(microseconds); + if( microseconds>=1000000 ) sleep(microseconds/1000000); + if( microseconds%1000000 ) usleep(microseconds%1000000); UNUSED_PARAMETER(NotUsed); return microseconds; #else @@ -40667,7 +41327,7 @@ static int proxyConchLock(unixFile *pFile, uuid_t myHostID, int lockType){ if( nTries==1 ){ conchModTime = buf.st_mtimespec; - usleep(500000); /* wait 0.5 sec and try the lock again*/ + unixSleep(0,500000); /* wait 0.5 sec and try the lock again*/ continue; } @@ -40693,7 +41353,7 @@ static int proxyConchLock(unixFile *pFile, uuid_t myHostID, int lockType){ /* don't break the lock on short read or a version mismatch */ return SQLITE_BUSY; } - usleep(10000000); /* wait 10 sec and try the lock again */ + unixSleep(0,10000000); /* wait 10 sec and try the lock again */ continue; } @@ -41469,6 +42129,25 @@ SQLITE_API int sqlite3_os_init(void){ sqlite3_vfs_register(&aVfs[i], i==0); } unixBigLock = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_VFS1); + +#ifndef SQLITE_OMIT_WAL + /* Validate lock assumptions */ + assert( SQLITE_SHM_NLOCK==8 ); /* Number of available locks */ + assert( UNIX_SHM_BASE==120 ); /* Start of locking area */ + /* Locks: + ** WRITE UNIX_SHM_BASE 120 + ** CKPT UNIX_SHM_BASE+1 121 + ** RECOVER UNIX_SHM_BASE+2 122 + ** READ-0 UNIX_SHM_BASE+3 123 + ** READ-1 UNIX_SHM_BASE+4 124 + ** READ-2 UNIX_SHM_BASE+5 125 + ** READ-3 UNIX_SHM_BASE+6 126 + ** READ-4 UNIX_SHM_BASE+7 127 + ** DMS UNIX_SHM_BASE+8 128 + */ + assert( UNIX_SHM_DMS==128 ); /* Byte offset of the deadman-switch */ +#endif + return SQLITE_OK; } @@ -46814,7 +47493,11 @@ static int winOpen( dwCreationDisposition = OPEN_EXISTING; } - dwShareMode = FILE_SHARE_READ | FILE_SHARE_WRITE; + if( 0==sqlite3_uri_boolean(zName, "exclusive", 0) ){ + dwShareMode = FILE_SHARE_READ | FILE_SHARE_WRITE; + }else{ + dwShareMode = 0; + } if( isDelete ){ #if SQLITE_OS_WINCE @@ -47858,31 +48541,88 @@ SQLITE_API int sqlite3_os_end(void){ ** sqlite3_deserialize(). */ /* #include "sqliteInt.h" */ -#ifdef SQLITE_ENABLE_DESERIALIZE +#ifndef SQLITE_OMIT_DESERIALIZE /* ** Forward declaration of objects used by this utility */ typedef struct sqlite3_vfs MemVfs; typedef struct MemFile MemFile; +typedef struct MemStore MemStore; /* Access to a lower-level VFS that (might) implement dynamic loading, ** access to randomness, etc. */ #define ORIGVFS(p) ((sqlite3_vfs*)((p)->pAppData)) -/* An open file */ -struct MemFile { - sqlite3_file base; /* IO methods */ +/* Storage for a memdb file. +** +** An memdb object can be shared or separate. Shared memdb objects can be +** used by more than one database connection. Mutexes are used by shared +** memdb objects to coordinate access. Separate memdb objects are only +** connected to a single database connection and do not require additional +** mutexes. +** +** Shared memdb objects have .zFName!=0 and .pMutex!=0. They are created +** using "file:/name?vfs=memdb". The first character of the name must be +** "/" or else the object will be a separate memdb object. All shared +** memdb objects are stored in memdb_g.apMemStore[] in an arbitrary order. +** +** Separate memdb objects are created using a name that does not begin +** with "/" or using sqlite3_deserialize(). +** +** Access rules for shared MemStore objects: +** +** * .zFName is initialized when the object is created and afterwards +** is unchanged until the object is destroyed. So it can be accessed +** at any time as long as we know the object is not being destroyed, +** which means while either the SQLITE_MUTEX_STATIC_VFS1 or +** .pMutex is held or the object is not part of memdb_g.apMemStore[]. +** +** * Can .pMutex can only be changed while holding the +** SQLITE_MUTEX_STATIC_VFS1 mutex or while the object is not part +** of memdb_g.apMemStore[]. +** +** * Other fields can only be changed while holding the .pMutex mutex +** or when the .nRef is less than zero and the object is not part of +** memdb_g.apMemStore[]. +** +** * The .aData pointer has the added requirement that it can can only +** be changed (for resizing) when nMmap is zero. +** +*/ +struct MemStore { sqlite3_int64 sz; /* Size of the file */ sqlite3_int64 szAlloc; /* Space allocated to aData */ sqlite3_int64 szMax; /* Maximum allowed size of the file */ unsigned char *aData; /* content of the file */ + sqlite3_mutex *pMutex; /* Used by shared stores only */ int nMmap; /* Number of memory mapped pages */ unsigned mFlags; /* Flags */ + int nRdLock; /* Number of readers */ + int nWrLock; /* Number of writers. (Always 0 or 1) */ + int nRef; /* Number of users of this MemStore */ + char *zFName; /* The filename for shared stores */ +}; + +/* An open file */ +struct MemFile { + sqlite3_file base; /* IO methods */ + MemStore *pStore; /* The storage */ int eLock; /* Most recent lock against this file */ }; +/* +** File-scope variables for holding the memdb files that are accessible +** to multiple database connections in separate threads. +** +** Must hold SQLITE_MUTEX_STATIC_VFS1 to access any part of this object. +*/ +static struct MemFS { + int nMemStore; /* Number of shared MemStore objects */ + MemStore **apMemStore; /* Array of all shared MemStore objects */ +} memdb_g; + /* ** Methods for MemFile */ @@ -47936,7 +48676,10 @@ static sqlite3_vfs memdb_vfs = { memdbSleep, /* xSleep */ 0, /* memdbCurrentTime, */ /* xCurrentTime */ memdbGetLastError, /* xGetLastError */ - memdbCurrentTimeInt64 /* xCurrentTimeInt64 */ + memdbCurrentTimeInt64, /* xCurrentTimeInt64 */ + 0, /* xSetSystemCall */ + 0, /* xGetSystemCall */ + 0, /* xNextSystemCall */ }; static const sqlite3_io_methods memdb_io_methods = { @@ -47961,17 +48704,68 @@ static const sqlite3_io_methods memdb_io_methods = { memdbUnfetch /* xUnfetch */ }; +/* +** Enter/leave the mutex on a MemStore +*/ +#if defined(SQLITE_THREADSAFE) && SQLITE_THREADSAFE==0 +static void memdbEnter(MemStore *p){ + UNUSED_PARAMETER(p); +} +static void memdbLeave(MemStore *p){ + UNUSED_PARAMETER(p); +} +#else +static void memdbEnter(MemStore *p){ + sqlite3_mutex_enter(p->pMutex); +} +static void memdbLeave(MemStore *p){ + sqlite3_mutex_leave(p->pMutex); +} +#endif + /* ** Close an memdb-file. -** -** The pData pointer is owned by the application, so there is nothing -** to free. +** Free the underlying MemStore object when its refcount drops to zero +** or less. */ static int memdbClose(sqlite3_file *pFile){ - MemFile *p = (MemFile *)pFile; - if( p->mFlags & SQLITE_DESERIALIZE_FREEONCLOSE ) sqlite3_free(p->aData); + MemStore *p = ((MemFile*)pFile)->pStore; + if( p->zFName ){ + int i; +#ifndef SQLITE_MUTEX_OMIT + sqlite3_mutex *pVfsMutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_VFS1); +#endif + sqlite3_mutex_enter(pVfsMutex); + for(i=0; ALWAYS(inRef==1 ){ + memdb_g.apMemStore[i] = memdb_g.apMemStore[--memdb_g.nMemStore]; + if( memdb_g.nMemStore==0 ){ + sqlite3_free(memdb_g.apMemStore); + memdb_g.apMemStore = 0; + } + } + break; + } + } + sqlite3_mutex_leave(pVfsMutex); + }else{ + memdbEnter(p); + } + p->nRef--; + if( p->nRef<=0 ){ + if( p->mFlags & SQLITE_DESERIALIZE_FREEONCLOSE ){ + sqlite3_free(p->aData); + } + memdbLeave(p); + sqlite3_mutex_free(p->pMutex); + sqlite3_free(p); + }else{ + memdbLeave(p); + } return SQLITE_OK; } @@ -47984,20 +48778,23 @@ static int memdbRead( int iAmt, sqlite_int64 iOfst ){ - MemFile *p = (MemFile *)pFile; + MemStore *p = ((MemFile*)pFile)->pStore; + memdbEnter(p); if( iOfst+iAmt>p->sz ){ memset(zBuf, 0, iAmt); if( iOfstsz ) memcpy(zBuf, p->aData+iOfst, p->sz - iOfst); + memdbLeave(p); return SQLITE_IOERR_SHORT_READ; } memcpy(zBuf, p->aData+iOfst, iAmt); + memdbLeave(p); return SQLITE_OK; } /* ** Try to enlarge the memory allocation to hold at least sz bytes */ -static int memdbEnlarge(MemFile *p, sqlite3_int64 newSz){ +static int memdbEnlarge(MemStore *p, sqlite3_int64 newSz){ unsigned char *pNew; if( (p->mFlags & SQLITE_DESERIALIZE_RESIZEABLE)==0 || p->nMmap>0 ){ return SQLITE_FULL; @@ -48008,7 +48805,7 @@ static int memdbEnlarge(MemFile *p, sqlite3_int64 newSz){ newSz *= 2; if( newSz>p->szMax ) newSz = p->szMax; pNew = sqlite3Realloc(p->aData, newSz); - if( pNew==0 ) return SQLITE_NOMEM; + if( pNew==0 ) return SQLITE_IOERR_NOMEM; p->aData = pNew; p->szAlloc = newSz; return SQLITE_OK; @@ -48023,19 +48820,27 @@ static int memdbWrite( int iAmt, sqlite_int64 iOfst ){ - MemFile *p = (MemFile *)pFile; - if( NEVER(p->mFlags & SQLITE_DESERIALIZE_READONLY) ) return SQLITE_READONLY; + MemStore *p = ((MemFile*)pFile)->pStore; + memdbEnter(p); + if( NEVER(p->mFlags & SQLITE_DESERIALIZE_READONLY) ){ + /* Can't happen: memdbLock() will return SQLITE_READONLY before + ** reaching this point */ + memdbLeave(p); + return SQLITE_IOERR_WRITE; + } if( iOfst+iAmt>p->sz ){ int rc; if( iOfst+iAmt>p->szAlloc && (rc = memdbEnlarge(p, iOfst+iAmt))!=SQLITE_OK ){ + memdbLeave(p); return rc; } if( iOfst>p->sz ) memset(p->aData+p->sz, 0, iOfst-p->sz); p->sz = iOfst+iAmt; } memcpy(p->aData+iOfst, z, iAmt); + memdbLeave(p); return SQLITE_OK; } @@ -48047,16 +48852,24 @@ static int memdbWrite( ** the size of a file, never to increase the size. */ static int memdbTruncate(sqlite3_file *pFile, sqlite_int64 size){ - MemFile *p = (MemFile *)pFile; - if( NEVER(size>p->sz) ) return SQLITE_FULL; - p->sz = size; - return SQLITE_OK; + MemStore *p = ((MemFile*)pFile)->pStore; + int rc = SQLITE_OK; + memdbEnter(p); + if( NEVER(size>p->sz) ){ + rc = SQLITE_FULL; + }else{ + p->sz = size; + } + memdbLeave(p); + return rc; } /* ** Sync an memdb-file. */ static int memdbSync(sqlite3_file *pFile, int flags){ + UNUSED_PARAMETER(pFile); + UNUSED_PARAMETER(flags); return SQLITE_OK; } @@ -48064,8 +48877,10 @@ static int memdbSync(sqlite3_file *pFile, int flags){ ** Return the current file-size of an memdb-file. */ static int memdbFileSize(sqlite3_file *pFile, sqlite_int64 *pSize){ - MemFile *p = (MemFile *)pFile; + MemStore *p = ((MemFile*)pFile)->pStore; + memdbEnter(p); *pSize = p->sz; + memdbLeave(p); return SQLITE_OK; } @@ -48073,19 +48888,48 @@ static int memdbFileSize(sqlite3_file *pFile, sqlite_int64 *pSize){ ** Lock an memdb-file. */ static int memdbLock(sqlite3_file *pFile, int eLock){ - MemFile *p = (MemFile *)pFile; - if( eLock>SQLITE_LOCK_SHARED - && (p->mFlags & SQLITE_DESERIALIZE_READONLY)!=0 - ){ - return SQLITE_READONLY; + MemFile *pThis = (MemFile*)pFile; + MemStore *p = pThis->pStore; + int rc = SQLITE_OK; + if( eLock==pThis->eLock ) return SQLITE_OK; + memdbEnter(p); + if( eLock>SQLITE_LOCK_SHARED ){ + if( p->mFlags & SQLITE_DESERIALIZE_READONLY ){ + rc = SQLITE_READONLY; + }else if( pThis->eLock<=SQLITE_LOCK_SHARED ){ + if( p->nWrLock ){ + rc = SQLITE_BUSY; + }else{ + p->nWrLock = 1; + } + } + }else if( eLock==SQLITE_LOCK_SHARED ){ + if( pThis->eLock > SQLITE_LOCK_SHARED ){ + assert( p->nWrLock==1 ); + p->nWrLock = 0; + }else if( p->nWrLock ){ + rc = SQLITE_BUSY; + }else{ + p->nRdLock++; + } + }else{ + assert( eLock==SQLITE_LOCK_NONE ); + if( pThis->eLock>SQLITE_LOCK_SHARED ){ + assert( p->nWrLock==1 ); + p->nWrLock = 0; + } + assert( p->nRdLock>0 ); + p->nRdLock--; } - p->eLock = eLock; - return SQLITE_OK; + if( rc==SQLITE_OK ) pThis->eLock = eLock; + memdbLeave(p); + return rc; } -#if 0 /* Never used because memdbAccess() always returns false */ +#if 0 /* -** Check if another file-handle holds a RESERVED lock on an memdb-file. +** This interface is only used for crash recovery, which does not +** occur on an in-memory database. */ static int memdbCheckReservedLock(sqlite3_file *pFile, int *pResOut){ *pResOut = 0; @@ -48093,12 +48937,14 @@ static int memdbCheckReservedLock(sqlite3_file *pFile, int *pResOut){ } #endif + /* ** File control method. For custom operations on an memdb-file. */ static int memdbFileControl(sqlite3_file *pFile, int op, void *pArg){ - MemFile *p = (MemFile *)pFile; + MemStore *p = ((MemFile*)pFile)->pStore; int rc = SQLITE_NOTFOUND; + memdbEnter(p); if( op==SQLITE_FCNTL_VFSNAME ){ *(char**)pArg = sqlite3_mprintf("memdb(%p,%lld)", p->aData, p->sz); rc = SQLITE_OK; @@ -48116,6 +48962,7 @@ static int memdbFileControl(sqlite3_file *pFile, int op, void *pArg){ *(sqlite3_int64*)pArg = iLimit; rc = SQLITE_OK; } + memdbLeave(p); return rc; } @@ -48132,6 +48979,7 @@ static int memdbSectorSize(sqlite3_file *pFile){ ** Return the device characteristic flags supported by an memdb-file. */ static int memdbDeviceCharacteristics(sqlite3_file *pFile){ + UNUSED_PARAMETER(pFile); return SQLITE_IOCAP_ATOMIC | SQLITE_IOCAP_POWERSAFE_OVERWRITE | SQLITE_IOCAP_SAFE_APPEND | @@ -48145,20 +48993,26 @@ static int memdbFetch( int iAmt, void **pp ){ - MemFile *p = (MemFile *)pFile; + MemStore *p = ((MemFile*)pFile)->pStore; + memdbEnter(p); if( iOfst+iAmt>p->sz ){ *pp = 0; }else{ p->nMmap++; *pp = (void*)(p->aData + iOfst); } + memdbLeave(p); return SQLITE_OK; } /* Release a memory-mapped page */ static int memdbUnfetch(sqlite3_file *pFile, sqlite3_int64 iOfst, void *pPage){ - MemFile *p = (MemFile *)pFile; + MemStore *p = ((MemFile*)pFile)->pStore; + UNUSED_PARAMETER(iOfst); + UNUSED_PARAMETER(pPage); + memdbEnter(p); p->nMmap--; + memdbLeave(p); return SQLITE_OK; } @@ -48168,20 +49022,79 @@ static int memdbUnfetch(sqlite3_file *pFile, sqlite3_int64 iOfst, void *pPage){ static int memdbOpen( sqlite3_vfs *pVfs, const char *zName, - sqlite3_file *pFile, + sqlite3_file *pFd, int flags, int *pOutFlags ){ - MemFile *p = (MemFile*)pFile; + MemFile *pFile = (MemFile*)pFd; + MemStore *p = 0; + int szName; if( (flags & SQLITE_OPEN_MAIN_DB)==0 ){ - return ORIGVFS(pVfs)->xOpen(ORIGVFS(pVfs), zName, pFile, flags, pOutFlags); + return ORIGVFS(pVfs)->xOpen(ORIGVFS(pVfs), zName, pFd, flags, pOutFlags); } - memset(p, 0, sizeof(*p)); - p->mFlags = SQLITE_DESERIALIZE_RESIZEABLE | SQLITE_DESERIALIZE_FREEONCLOSE; + memset(pFile, 0, sizeof(*p)); + szName = sqlite3Strlen30(zName); + if( szName>1 && zName[0]=='/' ){ + int i; +#ifndef SQLITE_MUTEX_OMIT + sqlite3_mutex *pVfsMutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_VFS1); +#endif + sqlite3_mutex_enter(pVfsMutex); + for(i=0; izFName,zName)==0 ){ + p = memdb_g.apMemStore[i]; + break; + } + } + if( p==0 ){ + MemStore **apNew; + p = sqlite3Malloc( sizeof(*p) + szName + 3 ); + if( p==0 ){ + sqlite3_mutex_leave(pVfsMutex); + return SQLITE_NOMEM; + } + apNew = sqlite3Realloc(memdb_g.apMemStore, + sizeof(apNew[0])*(memdb_g.nMemStore+1) ); + if( apNew==0 ){ + sqlite3_free(p); + sqlite3_mutex_leave(pVfsMutex); + return SQLITE_NOMEM; + } + apNew[memdb_g.nMemStore++] = p; + memdb_g.apMemStore = apNew; + memset(p, 0, sizeof(*p)); + p->mFlags = SQLITE_DESERIALIZE_RESIZEABLE|SQLITE_DESERIALIZE_FREEONCLOSE; + p->szMax = sqlite3GlobalConfig.mxMemdbSize; + p->zFName = (char*)&p[1]; + memcpy(p->zFName, zName, szName+1); + p->pMutex = sqlite3_mutex_alloc(SQLITE_MUTEX_FAST); + if( p->pMutex==0 ){ + memdb_g.nMemStore--; + sqlite3_free(p); + sqlite3_mutex_leave(pVfsMutex); + return SQLITE_NOMEM; + } + p->nRef = 1; + memdbEnter(p); + }else{ + memdbEnter(p); + p->nRef++; + } + sqlite3_mutex_leave(pVfsMutex); + }else{ + p = sqlite3Malloc( sizeof(*p) ); + if( p==0 ){ + return SQLITE_NOMEM; + } + memset(p, 0, sizeof(*p)); + p->mFlags = SQLITE_DESERIALIZE_RESIZEABLE | SQLITE_DESERIALIZE_FREEONCLOSE; + p->szMax = sqlite3GlobalConfig.mxMemdbSize; + } + pFile->pStore = p; assert( pOutFlags!=0 ); /* True because flags==SQLITE_OPEN_MAIN_DB */ *pOutFlags = flags | SQLITE_OPEN_MEMORY; - pFile->pMethods = &memdb_io_methods; - p->szMax = sqlite3GlobalConfig.mxMemdbSize; + pFd->pMethods = &memdb_io_methods; + memdbLeave(p); return SQLITE_OK; } @@ -48209,6 +49122,9 @@ static int memdbAccess( int flags, int *pResOut ){ + UNUSED_PARAMETER(pVfs); + UNUSED_PARAMETER(zPath); + UNUSED_PARAMETER(flags); *pResOut = 0; return SQLITE_OK; } @@ -48224,6 +49140,7 @@ static int memdbFullPathname( int nOut, char *zOut ){ + UNUSED_PARAMETER(pVfs); sqlite3_snprintf(nOut, zOut, "%s", zPath); return SQLITE_OK; } @@ -48296,9 +49213,14 @@ static int memdbCurrentTimeInt64(sqlite3_vfs *pVfs, sqlite3_int64 *p){ */ static MemFile *memdbFromDbSchema(sqlite3 *db, const char *zSchema){ MemFile *p = 0; + MemStore *pStore; int rc = sqlite3_file_control(db, zSchema, SQLITE_FCNTL_FILE_POINTER, &p); if( rc ) return 0; if( p->base.pMethods!=&memdb_io_methods ) return 0; + pStore = p->pStore; + memdbEnter(pStore); + if( pStore->zFName!=0 ) p = 0; + memdbLeave(pStore); return p; } @@ -48334,12 +49256,14 @@ SQLITE_API unsigned char *sqlite3_serialize( if( piSize ) *piSize = -1; if( iDb<0 ) return 0; if( p ){ - if( piSize ) *piSize = p->sz; + MemStore *pStore = p->pStore; + assert( pStore->pMutex==0 ); + if( piSize ) *piSize = pStore->sz; if( mFlags & SQLITE_SERIALIZE_NOCOPY ){ - pOut = p->aData; + pOut = pStore->aData; }else{ - pOut = sqlite3_malloc64( p->sz ); - if( pOut ) memcpy(pOut, p->aData, p->sz); + pOut = sqlite3_malloc64( pStore->sz ); + if( pOut ) memcpy(pOut, pStore->aData, pStore->sz); } return pOut; } @@ -48414,8 +49338,12 @@ SQLITE_API int sqlite3_deserialize( goto end_deserialize; } zSql = sqlite3_mprintf("ATTACH x AS %Q", zSchema); - rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, 0); - sqlite3_free(zSql); + if( zSql==0 ){ + rc = SQLITE_NOMEM; + }else{ + rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, 0); + sqlite3_free(zSql); + } if( rc ) goto end_deserialize; db->init.iDb = (u8)iDb; db->init.reopenMemdb = 1; @@ -48429,19 +49357,24 @@ SQLITE_API int sqlite3_deserialize( if( p==0 ){ rc = SQLITE_ERROR; }else{ - p->aData = pData; - p->sz = szDb; - p->szAlloc = szBuf; - p->szMax = szBuf; - if( p->szMaxszMax = sqlite3GlobalConfig.mxMemdbSize; + MemStore *pStore = p->pStore; + pStore->aData = pData; + pData = 0; + pStore->sz = szDb; + pStore->szAlloc = szBuf; + pStore->szMax = szBuf; + if( pStore->szMaxszMax = sqlite3GlobalConfig.mxMemdbSize; } - p->mFlags = mFlags; + pStore->mFlags = mFlags; rc = SQLITE_OK; } end_deserialize: sqlite3_finalize(pStmt); + if( pData && (mFlags & SQLITE_DESERIALIZE_FREEONCLOSE)!=0 ){ + sqlite3_free(pData); + } sqlite3_mutex_leave(db->mutex); return rc; } @@ -48452,7 +49385,9 @@ end_deserialize: */ SQLITE_PRIVATE int sqlite3MemdbInit(void){ sqlite3_vfs *pLower = sqlite3_vfs_find(0); - int sz = pLower->szOsFile; + unsigned int sz; + if( NEVER(pLower==0) ) return SQLITE_ERROR; + sz = pLower->szOsFile; memdb_vfs.pAppData = pLower; /* The following conditional can only be true when compiled for ** Windows x86 and SQLITE_MAX_MMAP_SIZE=0. We always leave @@ -48462,7 +49397,7 @@ SQLITE_PRIVATE int sqlite3MemdbInit(void){ memdb_vfs.szOsFile = sz; return sqlite3_vfs_register(&memdb_vfs, 0); } -#endif /* SQLITE_ENABLE_DESERIALIZE */ +#endif /* SQLITE_OMIT_DESERIALIZE */ /************** End of memdb.c ***********************************************/ /************** Begin file bitvec.c ******************************************/ @@ -50228,6 +51163,7 @@ static PgHdr1 *pcache1AllocPage(PCache1 *pCache, int benignMalloc){ p->page.pExtra = &p[1]; p->isBulkLocal = 0; p->isAnchor = 0; + p->pLruPrev = 0; /* Initializing this saves a valgrind error */ } (*pCache->pnPurgeable)++; return p; @@ -52146,6 +53082,7 @@ struct PagerSavepoint { Bitvec *pInSavepoint; /* Set of pages in this savepoint */ Pgno nOrig; /* Original number of pages in file */ Pgno iSubRec; /* Index of first record in sub-journal */ + int bTruncateOnRelease; /* If stmt journal may be truncated on RELEASE */ #ifndef SQLITE_OMIT_WAL u32 aWalData[WAL_SAVEPOINT_NDATA]; /* WAL savepoint context */ #endif @@ -52781,6 +53718,9 @@ static int subjRequiresPage(PgHdr *pPg){ for(i=0; inSavepoint; i++){ p = &pPager->aSavepoint[i]; if( p->nOrig>=pgno && 0==sqlite3BitvecTestNotNull(p->pInSavepoint, pgno) ){ + for(i=i+1; inSavepoint; i++){ + pPager->aSavepoint[i].bTruncateOnRelease = 0; + } return 1; } } @@ -54197,6 +55137,7 @@ static int pager_delsuper(Pager *pPager, const char *zSuper){ i64 nSuperJournal; /* Size of super-journal file */ char *zJournal; /* Pointer to one journal within MJ file */ char *zSuperPtr; /* Space to hold super-journal filename */ + char *zFree = 0; /* Free this buffer */ int nSuperPtr; /* Amount of space allocated to zSuperPtr[] */ /* Allocate space for both the pJournal and pSuper file descriptors. @@ -54221,11 +55162,13 @@ static int pager_delsuper(Pager *pPager, const char *zSuper){ rc = sqlite3OsFileSize(pSuper, &nSuperJournal); if( rc!=SQLITE_OK ) goto delsuper_out; nSuperPtr = pVfs->mxPathname+1; - zSuperJournal = sqlite3Malloc(nSuperJournal + nSuperPtr + 2); - if( !zSuperJournal ){ + zFree = sqlite3Malloc(4 + nSuperJournal + nSuperPtr + 2); + if( !zFree ){ rc = SQLITE_NOMEM_BKPT; goto delsuper_out; } + zFree[0] = zFree[1] = zFree[2] = zFree[3] = 0; + zSuperJournal = &zFree[4]; zSuperPtr = &zSuperJournal[nSuperJournal+2]; rc = sqlite3OsRead(pSuper, zSuperJournal, (int)nSuperJournal, 0); if( rc!=SQLITE_OK ) goto delsuper_out; @@ -54273,7 +55216,7 @@ static int pager_delsuper(Pager *pPager, const char *zSuper){ rc = sqlite3OsDelete(pVfs, zSuper, 0); delsuper_out: - sqlite3_free(zSuperJournal); + sqlite3_free(zFree); if( pSuper ){ sqlite3OsClose(pSuper); assert( !isOpen(pJournal) ); @@ -54611,7 +55554,11 @@ end_playback: pPager->changeCountDone = pPager->tempFile; if( rc==SQLITE_OK ){ - zSuper = pPager->pTmpSpace; + /* Leave 4 bytes of space before the super-journal filename in memory. + ** This is because it may end up being passed to sqlite3OsOpen(), in + ** which case it requires 4 0x00 bytes in memory immediately before + ** the filename. */ + zSuper = &pPager->pTmpSpace[4]; rc = readSuperJournal(pPager->jfd, zSuper, pPager->pVfs->mxPathname+1); testcase( rc!=SQLITE_OK ); } @@ -54628,6 +55575,8 @@ end_playback: /* If there was a super-journal and this routine will return success, ** see if it is possible to delete the super-journal. */ + assert( zSuper==&pPager->pTmpSpace[4] ); + memset(&zSuper[-4], 0, 4); rc = pager_delsuper(pPager, zSuper); testcase( rc!=SQLITE_OK ); } @@ -55634,7 +56583,8 @@ static void assertTruncateConstraint(Pager *pPager){ ** then continue writing to the database. */ SQLITE_PRIVATE void sqlite3PagerTruncateImage(Pager *pPager, Pgno nPage){ - assert( pPager->dbSize>=nPage ); + assert( pPager->dbSize>=nPage || CORRUPT_DB ); + testcase( pPager->dbSizeeState>=PAGER_WRITER_CACHEMOD ); pPager->dbSize = nPage; @@ -56362,7 +57312,7 @@ SQLITE_PRIVATE int sqlite3PagerOpen( int rc = SQLITE_OK; /* Return code */ int tempFile = 0; /* True for temp files (incl. in-memory files) */ int memDb = 0; /* True if this is an in-memory file */ -#ifdef SQLITE_ENABLE_DESERIALIZE +#ifndef SQLITE_OMIT_DESERIALIZE int memJM = 0; /* Memory journal mode */ #else # define memJM 0 @@ -56566,7 +57516,7 @@ SQLITE_PRIVATE int sqlite3PagerOpen( int fout = 0; /* VFS flags returned by xOpen() */ rc = sqlite3OsOpen(pVfs, pPager->zFilename, pPager->fd, vfsFlags, &fout); assert( !memDb ); -#ifdef SQLITE_ENABLE_DESERIALIZE +#ifndef SQLITE_OMIT_DESERIALIZE memJM = (fout&SQLITE_OPEN_MEMORY)!=0; #endif readOnly = (fout&SQLITE_OPEN_READONLY)!=0; @@ -57534,7 +58484,7 @@ SQLITE_PRIVATE int sqlite3PagerBegin(Pager *pPager, int exFlag, int subjInMemory assert( pPager->eState>=PAGER_READER && pPager->eStatesubjInMemory = (u8)subjInMemory; - if( ALWAYS(pPager->eState==PAGER_READER) ){ + if( pPager->eState==PAGER_READER ){ assert( pPager->pInJournal==0 ); if( pagerUseWal(pPager) ){ @@ -58550,6 +59500,7 @@ static SQLITE_NOINLINE int pagerOpenSavepoint(Pager *pPager, int nSavepoint){ } aNew[ii].iSubRec = pPager->nSubRec; aNew[ii].pInSavepoint = sqlite3BitvecCreate(pPager->dbSize); + aNew[ii].bTruncateOnRelease = 1; if( !aNew[ii].pInSavepoint ){ return SQLITE_NOMEM_BKPT; } @@ -58631,13 +59582,15 @@ SQLITE_PRIVATE int sqlite3PagerSavepoint(Pager *pPager, int op, int iSavepoint){ /* If this is a release of the outermost savepoint, truncate ** the sub-journal to zero bytes in size. */ if( op==SAVEPOINT_RELEASE ){ - if( nNew==0 && isOpen(pPager->sjfd) ){ + PagerSavepoint *pRel = &pPager->aSavepoint[nNew]; + if( pRel->bTruncateOnRelease && isOpen(pPager->sjfd) ){ /* Only truncate if it is an in-memory sub-journal. */ if( sqlite3JournalIsInMemory(pPager->sjfd) ){ - rc = sqlite3OsTruncate(pPager->sjfd, 0); + i64 sz = (pPager->pageSize+4)*pRel->iSubRec; + rc = sqlite3OsTruncate(pPager->sjfd, sz); assert( rc==SQLITE_OK ); } - pPager->nSubRec = 0; + pPager->nSubRec = pRel->iSubRec; } } /* Else this is a rollback operation, playback the specified savepoint. @@ -60405,7 +61358,6 @@ static void walCleanupHash(Wal *pWal){ int iLimit = 0; /* Zero values greater than this */ int nByte; /* Number of bytes to zero in aPgno[] */ int i; /* Used to iterate through aHash[] */ - int rc; /* Return code form walHashGet() */ assert( pWal->writeLock ); testcase( pWal->hdr.mxFrame==HASHTABLE_NPAGE_ONE-1 ); @@ -60420,8 +61372,8 @@ static void walCleanupHash(Wal *pWal){ */ assert( pWal->nWiData>walFramePage(pWal->hdr.mxFrame) ); assert( pWal->apWiData[walFramePage(pWal->hdr.mxFrame)] ); - rc = walHashGet(pWal, walFramePage(pWal->hdr.mxFrame), &sLoc); - if( NEVER(rc) ) return; /* Defense-in-depth, in case (1) above is wrong */ + i = walHashGet(pWal, walFramePage(pWal->hdr.mxFrame), &sLoc); + if( NEVER(i) ) return; /* Defense-in-depth, in case (1) above is wrong */ /* Zero all hash-table entries that correspond to frame numbers greater ** than pWal->hdr.mxFrame. @@ -63828,9 +64780,12 @@ struct Btree { u8 hasIncrblobCur; /* True if there are one or more Incrblob cursors */ int wantToLock; /* Number of nested calls to sqlite3BtreeEnter() */ int nBackup; /* Number of backup operations reading this btree */ - u32 iDataVersion; /* Combines with pBt->pPager->iDataVersion */ + u32 iBDataVersion; /* Combines with pBt->pPager->iDataVersion */ Btree *pNext; /* List of other sharable Btrees from the same db */ Btree *pPrev; /* Back pointer of the same list */ +#ifdef SQLITE_DEBUG + u64 nSeek; /* Calls to sqlite3BtreeMovetoUnpacked() */ +#endif #ifndef SQLITE_OMIT_SHARED_CACHE BtLock lock; /* Object used to lock page 1 */ #endif @@ -63842,11 +64797,25 @@ struct Btree { ** If the shared-data extension is enabled, there may be multiple users ** of the Btree structure. At most one of these may open a write transaction, ** but any number may have active read transactions. +** +** These values must match SQLITE_TXN_NONE, SQLITE_TXN_READ, and +** SQLITE_TXN_WRITE */ #define TRANS_NONE 0 #define TRANS_READ 1 #define TRANS_WRITE 2 +#if TRANS_NONE!=SQLITE_TXN_NONE +# error wrong numeric code for no-transaction +#endif +#if TRANS_READ!=SQLITE_TXN_READ +# error wrong numeric code for read-transaction +#endif +#if TRANS_WRITE!=SQLITE_TXN_WRITE +# error wrong numeric code for write-transaction +#endif + + /* ** An instance of this object represents a single database file. ** @@ -63916,6 +64885,7 @@ struct BtShared { Btree *pWriter; /* Btree with currently open write transaction */ #endif u8 *pTmpSpace; /* Temp space sufficient to hold a single cell */ + int nPreformatSize; /* Size of last cell written by TransferRow() */ }; /* @@ -64598,6 +65568,17 @@ SQLITE_API int sqlite3_enable_shared_cache(int enable){ #define hasReadConflicts(a, b) 0 #endif +#ifdef SQLITE_DEBUG +/* +** Return and reset the seek counter for a Btree object. +*/ +SQLITE_PRIVATE sqlite3_uint64 sqlite3BtreeSeekCount(Btree *pBt){ + u64 n = pBt->nSeek; + pBt->nSeek = 0; + return n; +} +#endif + /* ** Implementation of the SQLITE_CORRUPT_PAGE() macro. Takes a single ** (MemPage*) as an argument. The (MemPage*) must not be NULL. @@ -65022,7 +66003,7 @@ static void invalidateIncrblobCursors( int isClearTable /* True if all rows are being deleted */ ){ BtCursor *p; - if( pBtree->hasIncrblobCur==0 ) return; + assert( pBtree->hasIncrblobCur ); assert( sqlite3BtreeHoldsMutex(pBtree) ); pBtree->hasIncrblobCur = 0; for(p=pBtree->pBt->pCursor; p; p=p->pNext){ @@ -65618,6 +66599,24 @@ static SQLITE_NOINLINE void btreeParseCellAdjustSizeForOverflow( pInfo->nSize = (u16)(&pInfo->pPayload[pInfo->nLocal] - pCell) + 4; } +/* +** Given a record with nPayload bytes of payload stored within btree +** page pPage, return the number of bytes of payload stored locally. +*/ +static int btreePayloadToLocal(MemPage *pPage, i64 nPayload){ + int maxLocal; /* Maximum amount of payload held locally */ + maxLocal = pPage->maxLocal; + if( nPayload<=maxLocal ){ + return nPayload; + }else{ + int minLocal; /* Minimum amount of payload held locally */ + int surplus; /* Overflow payload available for local storage */ + minLocal = pPage->minLocal; + surplus = minLocal + (nPayload - minLocal)%(pPage->pBt->usableSize-4); + return ( surplus <= maxLocal ) ? surplus : minLocal; + } +} + /* ** The following routines are implementations of the MemPage.xParseCell() ** method. @@ -65905,6 +66904,7 @@ static int defragmentPage(MemPage *pPage, int nMaxFrag){ unsigned char *src; /* Source of content */ int iCellFirst; /* First allowable cell index */ int iCellLast; /* Last possible cell index */ + int iCellStart; /* First cell offset in input */ assert( sqlite3PagerIswriteable(pPage->pDbPage) ); assert( pPage->pBt!=0 ); @@ -65946,7 +66946,7 @@ static int defragmentPage(MemPage *pPage, int nMaxFrag){ if( iFree2+sz2 > usableSize ) return SQLITE_CORRUPT_PAGE(pPage); memmove(&data[iFree+sz+sz2], &data[iFree+sz], iFree2-(iFree+sz)); sz += sz2; - }else if( NEVER(iFree+sz>usableSize) ){ + }else if( iFree+sz>usableSize ){ return SQLITE_CORRUPT_PAGE(pPage); } @@ -65965,6 +66965,7 @@ static int defragmentPage(MemPage *pPage, int nMaxFrag){ cbrk = usableSize; iCellLast = usableSize - 4; + iCellStart = get2byte(&data[hdr+5]); for(i=0; iiCellLast ){ + if( pciCellLast ){ return SQLITE_CORRUPT_PAGE(pPage); } - assert( pc>=iCellFirst && pc<=iCellLast ); + assert( pc>=iCellStart && pc<=iCellLast ); size = pPage->xCellSize(pPage, &src[pc]); cbrk -= size; - if( cbrkusableSize ){ + if( cbrkusableSize ){ return SQLITE_CORRUPT_PAGE(pPage); } - assert( cbrk+size<=usableSize && cbrk>=iCellFirst ); + assert( cbrk+size<=usableSize && cbrk>=iCellStart ); testcase( cbrk+size==usableSize ); testcase( pc+size==usableSize ); put2byte(pAddr, cbrk); if( temp==0 ){ - int x; if( cbrk==pc ) continue; temp = sqlite3PagerTempSpace(pPage->pBt->pPager); - x = get2byte(&data[hdr+5]); - memcpy(&temp[x], &data[x], (cbrk+size) - x); + memcpy(&temp[iCellStart], &data[iCellStart], usableSize - iCellStart); src = temp; } memcpy(&data[cbrk], &src[pc], size); @@ -67091,7 +68090,7 @@ btree_open_out: ** do not change the pager-cache size. */ if( sqlite3BtreeSchema(p, 0, 0)==0 ){ - sqlite3PagerSetCachesize(p->pBt->pPager, SQLITE_DEFAULT_CACHE_SIZE); + sqlite3BtreeSetCacheSize(p, SQLITE_DEFAULT_CACHE_SIZE); } pFile = sqlite3PagerFile(pBt->pPager); @@ -67194,19 +68193,23 @@ static void freeTempSpace(BtShared *pBt){ */ SQLITE_PRIVATE int sqlite3BtreeClose(Btree *p){ BtShared *pBt = p->pBt; - BtCursor *pCur; /* Close all cursors opened via this handle. */ assert( sqlite3_mutex_held(p->db->mutex) ); sqlite3BtreeEnter(p); - pCur = pBt->pCursor; - while( pCur ){ - BtCursor *pTmp = pCur; - pCur = pCur->pNext; - if( pTmp->pBtree==p ){ - sqlite3BtreeCloseCursor(pTmp); + + /* Verify that no other cursors have this Btree open */ +#ifdef SQLITE_DEBUG + { + BtCursor *pCur = pBt->pCursor; + while( pCur ){ + BtCursor *pTmp = pCur; + pCur = pCur->pNext; + assert( pTmp->pBtree!=p ); + } } +#endif /* Rollback any active transaction and free the handle structure. ** The call to sqlite3BtreeRollback() drops any table-locks held by @@ -67358,6 +68361,7 @@ SQLITE_PRIVATE int sqlite3BtreeSetPageSize(Btree *p, int pageSize, int nReserve, ((pageSize-1)&pageSize)==0 ){ assert( (pageSize & 7)==0 ); assert( !pBt->pCursor ); + if( nReserve>32 && pageSize==512 ) pageSize = 1024; pBt->pageSize = (u32)pageSize; freeTempSpace(pBt); } @@ -68587,7 +69591,7 @@ SQLITE_PRIVATE int sqlite3BtreeCommitPhaseTwo(Btree *p, int bCleanup){ sqlite3BtreeLeave(p); return rc; } - p->iDataVersion--; /* Compensate for pPager->iDataVersion++; */ + p->iBDataVersion--; /* Compensate for pPager->iDataVersion++; */ pBt->inTransaction = TRANS_READ; btreeClearHasContent(pBt); } @@ -68997,7 +70001,14 @@ SQLITE_PRIVATE int sqlite3BtreeCloseCursor(BtCursor *pCur){ unlockBtreeIfUnused(pBt); sqlite3_free(pCur->aOverflow); sqlite3_free(pCur->pKey); - sqlite3BtreeLeave(pBtree); + if( (pBt->openFlags & BTREE_SINGLE) && pBt->pCursor==0 ){ + /* Since the BtShared is not sharable, there is no need to + ** worry about the missing sqlite3BtreeLeave() call here. */ + assert( pBtree->sharable==0 ); + sqlite3BtreeClose(pBtree); + }else{ + sqlite3BtreeLeave(pBtree); + } pCur->pBtree = 0; } return SQLITE_OK; @@ -69839,7 +70850,9 @@ SQLITE_PRIVATE int sqlite3BtreeLast(BtCursor *pCur, int *pRes){ for(ii=0; iiiPage; ii++){ assert( pCur->aiIdx[ii]==pCur->apPage[ii]->nCell ); } - assert( pCur->ix==pCur->pPage->nCell-1 ); + assert( pCur->ix==pCur->pPage->nCell-1 || CORRUPT_DB ); + testcase( pCur->ix!=pCur->pPage->nCell-1 ); + /* ^-- dbsqlfuzz b92b72e4de80b5140c30ab71372ca719b8feb618 */ assert( pCur->pPage->leaf ); #endif *pRes = 0; @@ -69945,6 +70958,10 @@ SQLITE_PRIVATE int sqlite3BtreeMovetoUnpacked( } } +#ifdef SQLITE_DEBUG + pCur->pBtree->nSeek++; /* Performance measurement during testing */ +#endif + if( pIdxKey ){ xRecordCompare = sqlite3VdbeFindCompare(pIdxKey); pIdxKey->errCode = 0; @@ -70221,7 +71238,7 @@ static SQLITE_NOINLINE int btreeNext(BtCursor *pCur){ pPage = pCur->pPage; idx = ++pCur->ix; - if( !pPage->isInit ){ + if( !pPage->isInit || sqlite3FaultSim(412) ){ /* The only known way for this to happen is for there to be a ** recursive SQL function that does a DELETE operation as part of a ** SELECT which deletes content out from under an active cursor @@ -70602,7 +71619,7 @@ static int allocateBtreePage( iPage = get4byte(&aData[8+closest*4]); testcase( iPage==mxPage ); - if( iPage>mxPage ){ + if( iPage>mxPage || iPage<2 ){ rc = SQLITE_CORRUPT_PGNO(iTrunk); goto end_allocate_page; } @@ -70858,10 +71875,9 @@ static void freePage(MemPage *pPage, int *pRC){ } /* -** Free any overflow pages associated with the given Cell. Store -** size information about the cell in pInfo. +** Free the overflow pages associated with the given Cell. */ -static int clearCell( +static SQLITE_NOINLINE int clearCellOverflow( MemPage *pPage, /* The page that contains the Cell */ unsigned char *pCell, /* First byte of the Cell */ CellInfo *pInfo /* Size information about the cell */ @@ -70873,10 +71889,7 @@ static int clearCell( u32 ovflPageSize; assert( sqlite3_mutex_held(pPage->pBt->mutex) ); - pPage->xParseCell(pPage, pCell, pInfo); - if( pInfo->nLocal==pInfo->nPayload ){ - return SQLITE_OK; /* No overflow pages. Return without doing anything */ - } + assert( pInfo->nLocal!=pInfo->nPayload ); testcase( pCell + pInfo->nSize == pPage->aDataEnd ); testcase( pCell + (pInfo->nSize-1) == pPage->aDataEnd ); if( pCell + pInfo->nSize > pPage->aDataEnd ){ @@ -70932,6 +71945,21 @@ static int clearCell( return SQLITE_OK; } +/* Call xParseCell to compute the size of a cell. If the cell contains +** overflow, then invoke cellClearOverflow to clear out that overflow. +** STore the result code (SQLITE_OK or some error code) in rc. +** +** Implemented as macro to force inlining for performance. +*/ +#define BTREE_CLEAR_CELL(rc, pPage, pCell, sInfo) \ + pPage->xParseCell(pPage, pCell, &sInfo); \ + if( sInfo.nLocal!=sInfo.nPayload ){ \ + rc = clearCellOverflow(pPage, pCell, &sInfo); \ + }else{ \ + rc = SQLITE_OK; \ + } + + /* ** Create the byte sequence used to represent a cell on page pPage ** and write that byte sequence into pCell[]. Overflow pages are @@ -71454,7 +72482,7 @@ static int rebuildPage( u8 *pCell = pCArray->apCell[i]; u16 sz = pCArray->szCell[i]; assert( sz>0 ); - if( SQLITE_WITHIN(pCell,aData,pEnd) ){ + if( SQLITE_WITHIN(pCell,aData+j,pEnd) ){ if( ((uptr)(pCell+sz))>(uptr)pEnd ) return SQLITE_CORRUPT_BKPT; pCell = &pTmp[pCell - aData]; }else if( (uptr)(pCell+sz)>(uptr)pSrcEnd @@ -71467,9 +72495,8 @@ static int rebuildPage( put2byte(pCellptr, (pData - aData)); pCellptr += 2; if( pData < pCellptr ) return SQLITE_CORRUPT_BKPT; - memcpy(pData, pCell, sz); + memmove(pData, pCell, sz); assert( sz==pPg->xCellSize(pPg, pCell) || CORRUPT_DB ); - testcase( sz!=pPg->xCellSize(pPg,pCell) ) i++; if( i>=iEnd ) break; if( pCArray->ixNx[k]<=i ){ @@ -71608,7 +72635,9 @@ static int pageFreeArray( } pFree = pCell; szFree = sz; - if( pFree+sz>pEnd ) return 0; + if( pFree+sz>pEnd ){ + return 0; + } }else{ pFree = pCell; szFree += sz; @@ -72089,7 +73118,9 @@ static int balance_nonroot( } pgno = get4byte(pRight); while( 1 ){ - rc = getAndInitPage(pBt, pgno, &apOld[i], 0, 0); + if( rc==SQLITE_OK ){ + rc = getAndInitPage(pBt, pgno, &apOld[i], 0, 0); + } if( rc ){ memset(apOld, 0, (i+1)*sizeof(MemPage*)); goto balance_cleanup; @@ -72128,12 +73159,10 @@ static int balance_nonroot( if( pBt->btsFlags & BTS_FAST_SECURE ){ int iOff; + /* If the following if() condition is not true, the db is corrupted. + ** The call to dropCell() below will detect this. */ iOff = SQLITE_PTR_TO_INT(apDiv[i]) - SQLITE_PTR_TO_INT(pParent->aData); - if( (iOff+szNew[i])>(int)pBt->usableSize ){ - rc = SQLITE_CORRUPT_BKPT; - memset(apOld, 0, (i+1)*sizeof(MemPage*)); - goto balance_cleanup; - }else{ + if( (iOff+szNew[i])<=(int)pBt->usableSize ){ memcpy(&aOvflSpace[iOff], apDiv[i], szNew[i]); apDiv[i] = &aOvflSpace[apDiv[i]-pParent->aData]; } @@ -72261,7 +73290,7 @@ static int balance_nonroot( b.szCell[b.nCell] = b.szCell[b.nCell] - leafCorrection; if( !pOld->leaf ){ assert( leafCorrection==0 ); - assert( pOld->hdrOffset==0 ); + assert( pOld->hdrOffset==0 || CORRUPT_DB ); /* The right pointer of the child page pOld becomes the left ** pointer of the divider cell */ memcpy(b.apCell[b.nCell], &pOld->aData[8], 4); @@ -72427,6 +73456,9 @@ static int balance_nonroot( apOld[i] = 0; rc = sqlite3PagerWrite(pNew->pDbPage); nNew++; + if( sqlite3PagerPageRefcount(pNew->pDbPage)!=1+(i==(iParentIdx-nxDiv)) ){ + rc = SQLITE_CORRUPT_BKPT; + } if( rc ) goto balance_cleanup; }else{ assert( i>0 ); @@ -72463,7 +73495,7 @@ static int balance_nonroot( aPgOrder[i] = aPgno[i] = apNew[i]->pgno; aPgFlags[i] = apNew[i]->pDbPage->flags; for(j=0; jmaxLocal+23 ); assert( iOvflSpace <= (int)pBt->pageSize ); + for(k=0; b.ixNx[k]<=i && ALWAYS(kpgno, &rc); if( rc!=SQLITE_OK ) goto balance_cleanup; assert( sqlite3PagerIswriteable(pParent->pDbPage) ); @@ -73131,7 +74170,8 @@ SQLITE_PRIVATE int sqlite3BtreeInsert( unsigned char *oldCell; unsigned char *newCell = 0; - assert( (flags & (BTREE_SAVEPOSITION|BTREE_APPEND))==flags ); + assert( (flags & (BTREE_SAVEPOSITION|BTREE_APPEND|BTREE_PREFORMAT))==flags ); + assert( (flags & BTREE_PREFORMAT)==0 || seekResult || pCur->pKeyInfo==0 ); if( pCur->eState==CURSOR_FAULT ){ assert( pCur->skipNext!=SQLITE_OK ); @@ -73149,7 +74189,7 @@ SQLITE_PRIVATE int sqlite3BtreeInsert( ** keys with no associated data. If the cursor was opened expecting an ** intkey table, the caller should be inserting integer keys with a ** blob of associated data. */ - assert( (pX->pKey==0)==(pCur->pKeyInfo==0) ); + assert( (flags & BTREE_PREFORMAT) || (pX->pKey==0)==(pCur->pKeyInfo==0) ); /* Save the positions of any other cursors open on this table. ** @@ -73165,13 +74205,23 @@ SQLITE_PRIVATE int sqlite3BtreeInsert( if( pCur->curFlags & BTCF_Multiple ){ rc = saveAllCursors(pBt, pCur->pgnoRoot, pCur); if( rc ) return rc; + if( loc && pCur->iPage<0 ){ + /* This can only happen if the schema is corrupt such that there is more + ** than one table or index with the same root page as used by the cursor. + ** Which can only happen if the SQLITE_NoSchemaError flag was set when + ** the schema was loaded. This cannot be asserted though, as a user might + ** set the flag, load the schema, and then unset the flag. */ + return SQLITE_CORRUPT_BKPT; + } } if( pCur->pKeyInfo==0 ){ assert( pX->pKey==0 ); /* If this is an insert into a table b-tree, invalidate any incrblob ** cursors open on the row being replaced */ - invalidateIncrblobCursors(p, pCur->pgnoRoot, pX->nKey, 0); + if( p->hasIncrblobCur ){ + invalidateIncrblobCursors(p, pCur->pgnoRoot, pX->nKey, 0); + } /* If BTREE_SAVEPOSITION is set, the cursor must already be pointing ** to a row with the same key as the new entry being inserted. @@ -73252,17 +74302,16 @@ SQLITE_PRIVATE int sqlite3BtreeInsert( return btreeOverwriteCell(pCur, &x2); } } - } assert( pCur->eState==CURSOR_VALID || (pCur->eState==CURSOR_INVALID && loc) || CORRUPT_DB ); pPage = pCur->pPage; - assert( pPage->intKey || pX->nKey>=0 ); + assert( pPage->intKey || pX->nKey>=0 || (flags & BTREE_PREFORMAT) ); assert( pPage->leaf || !pPage->intKey ); if( pPage->nFree<0 ){ - if( pCur->eState>CURSOR_INVALID ){ + if( NEVER(pCur->eState>CURSOR_INVALID) ){ rc = SQLITE_CORRUPT_BKPT; }else{ rc = btreeComputeFreeSpace(pPage); @@ -73276,7 +74325,21 @@ SQLITE_PRIVATE int sqlite3BtreeInsert( assert( pPage->isInit ); newCell = pBt->pTmpSpace; assert( newCell!=0 ); - rc = fillInCell(pPage, newCell, pX, &szNew); + if( flags & BTREE_PREFORMAT ){ + rc = SQLITE_OK; + szNew = pBt->nPreformatSize; + if( szNew<4 ) szNew = 4; + if( ISAUTOVACUUM && szNew>pPage->maxLocal ){ + CellInfo info; + pPage->xParseCell(pPage, newCell, &info); + if( info.nPayload!=info.nLocal ){ + Pgno ovfl = get4byte(&newCell[szNew-4]); + ptrmapPut(pBt, ovfl, PTRMAP_OVERFLOW1, pPage->pgno, &rc); + } + } + }else{ + rc = fillInCell(pPage, newCell, pX, &szNew); + } if( rc ) goto end_insert; assert( szNew==pPage->xCellSize(pPage, newCell) ); assert( szNew <= MX_CELL_SIZE(pBt) ); @@ -73292,7 +74355,7 @@ SQLITE_PRIVATE int sqlite3BtreeInsert( if( !pPage->leaf ){ memcpy(newCell, oldCell, 4); } - rc = clearCell(pPage, oldCell, &info); + BTREE_CLEAR_CELL(rc, pPage, oldCell, info); testcase( pCur->curFlags & BTCF_ValidOvfl ); invalidateOverflowCache(pCur); if( info.nSize==szNew && info.nLocal==info.nPayload @@ -73383,6 +74446,114 @@ end_insert: return rc; } +/* +** This function is used as part of copying the current row from cursor +** pSrc into cursor pDest. If the cursors are open on intkey tables, then +** parameter iKey is used as the rowid value when the record is copied +** into pDest. Otherwise, the record is copied verbatim. +** +** This function does not actually write the new value to cursor pDest. +** Instead, it creates and populates any required overflow pages and +** writes the data for the new cell into the BtShared.pTmpSpace buffer +** for the destination database. The size of the cell, in bytes, is left +** in BtShared.nPreformatSize. The caller completes the insertion by +** calling sqlite3BtreeInsert() with the BTREE_PREFORMAT flag specified. +** +** SQLITE_OK is returned if successful, or an SQLite error code otherwise. +*/ +SQLITE_PRIVATE int sqlite3BtreeTransferRow(BtCursor *pDest, BtCursor *pSrc, i64 iKey){ + int rc = SQLITE_OK; + BtShared *pBt = pDest->pBt; + u8 *aOut = pBt->pTmpSpace; /* Pointer to next output buffer */ + const u8 *aIn; /* Pointer to next input buffer */ + u32 nIn; /* Size of input buffer aIn[] */ + u32 nRem; /* Bytes of data still to copy */ + + getCellInfo(pSrc); + aOut += putVarint32(aOut, pSrc->info.nPayload); + if( pDest->pKeyInfo==0 ) aOut += putVarint(aOut, iKey); + nIn = pSrc->info.nLocal; + aIn = pSrc->info.pPayload; + if( aIn+nIn>pSrc->pPage->aDataEnd ){ + return SQLITE_CORRUPT_BKPT; + } + nRem = pSrc->info.nPayload; + if( nIn==nRem && nInpPage->maxLocal ){ + memcpy(aOut, aIn, nIn); + pBt->nPreformatSize = nIn + (aOut - pBt->pTmpSpace); + }else{ + Pager *pSrcPager = pSrc->pBt->pPager; + u8 *pPgnoOut = 0; + Pgno ovflIn = 0; + DbPage *pPageIn = 0; + MemPage *pPageOut = 0; + u32 nOut; /* Size of output buffer aOut[] */ + + nOut = btreePayloadToLocal(pDest->pPage, pSrc->info.nPayload); + pBt->nPreformatSize = nOut + (aOut - pBt->pTmpSpace); + if( nOutinfo.nPayload ){ + pPgnoOut = &aOut[nOut]; + pBt->nPreformatSize += 4; + } + + if( nRem>nIn ){ + if( aIn+nIn+4>pSrc->pPage->aDataEnd ){ + return SQLITE_CORRUPT_BKPT; + } + ovflIn = get4byte(&pSrc->info.pPayload[nIn]); + } + + do { + nRem -= nOut; + do{ + assert( nOut>0 ); + if( nIn>0 ){ + int nCopy = MIN(nOut, nIn); + memcpy(aOut, aIn, nCopy); + nOut -= nCopy; + nIn -= nCopy; + aOut += nCopy; + aIn += nCopy; + } + if( nOut>0 ){ + sqlite3PagerUnref(pPageIn); + pPageIn = 0; + rc = sqlite3PagerGet(pSrcPager, ovflIn, &pPageIn, PAGER_GET_READONLY); + if( rc==SQLITE_OK ){ + aIn = (const u8*)sqlite3PagerGetData(pPageIn); + ovflIn = get4byte(aIn); + aIn += 4; + nIn = pSrc->pBt->usableSize - 4; + } + } + }while( rc==SQLITE_OK && nOut>0 ); + + if( rc==SQLITE_OK && nRem>0 ){ + Pgno pgnoNew; + MemPage *pNew = 0; + rc = allocateBtreePage(pBt, &pNew, &pgnoNew, 0, 0); + put4byte(pPgnoOut, pgnoNew); + if( ISAUTOVACUUM && pPageOut ){ + ptrmapPut(pBt, pgnoNew, PTRMAP_OVERFLOW2, pPageOut->pgno, &rc); + } + releasePage(pPageOut); + pPageOut = pNew; + if( pPageOut ){ + pPgnoOut = pPageOut->aData; + put4byte(pPgnoOut, 0); + aOut = &pPgnoOut[4]; + nOut = MIN(pBt->usableSize - 4, nRem); + } + } + }while( nRem>0 && rc==SQLITE_OK ); + + releasePage(pPageOut); + sqlite3PagerUnref(pPageIn); + } + + return rc; +} + /* ** Delete the entry that the cursor is pointing to. ** @@ -73421,9 +74592,10 @@ SQLITE_PRIVATE int sqlite3BtreeDelete(BtCursor *pCur, u8 flags){ assert( (flags & ~(BTREE_SAVEPOSITION | BTREE_AUXDELETE))==0 ); if( pCur->eState==CURSOR_REQUIRESEEK ){ rc = btreeRestoreCursorPosition(pCur); - if( rc ) return rc; + assert( rc!=SQLITE_OK || CORRUPT_DB || pCur->eState==CURSOR_VALID ); + if( rc || pCur->eState!=CURSOR_VALID ) return rc; } - assert( pCur->eState==CURSOR_VALID ); + assert( CORRUPT_DB || pCur->eState==CURSOR_VALID ); iCellDepth = pCur->iPage; iCellIdx = pCur->ix; @@ -73476,7 +74648,7 @@ SQLITE_PRIVATE int sqlite3BtreeDelete(BtCursor *pCur, u8 flags){ /* If this is a delete operation to remove a row from a table b-tree, ** invalidate any incrblob cursors open on the row being deleted. */ - if( pCur->pKeyInfo==0 ){ + if( pCur->pKeyInfo==0 && p->hasIncrblobCur ){ invalidateIncrblobCursors(p, pCur->pgnoRoot, pCur->info.nKey, 0); } @@ -73485,7 +74657,7 @@ SQLITE_PRIVATE int sqlite3BtreeDelete(BtCursor *pCur, u8 flags){ ** itself from within the page. */ rc = sqlite3PagerWrite(pPage->pDbPage); if( rc ) return rc; - rc = clearCell(pPage, pCell, &info); + BTREE_CLEAR_CELL(rc, pPage, pCell, info); dropCell(pPage, iCellIdx, info.nSize, &rc); if( rc ) return rc; @@ -73772,14 +74944,14 @@ static int clearDatabasePage( rc = clearDatabasePage(pBt, get4byte(pCell), 1, pnChange); if( rc ) goto cleardatabasepage_out; } - rc = clearCell(pPage, pCell, &info); + BTREE_CLEAR_CELL(rc, pPage, pCell, info); if( rc ) goto cleardatabasepage_out; } if( !pPage->leaf ){ rc = clearDatabasePage(pBt, get4byte(&pPage->aData[hdr+8]), 1, pnChange); if( rc ) goto cleardatabasepage_out; - }else if( pnChange ){ - assert( pPage->intKey || CORRUPT_DB ); + } + if( pnChange ){ testcase( !pPage->intKey ); *pnChange += pPage->nCell; } @@ -73804,9 +74976,8 @@ cleardatabasepage_out: ** read cursors on the table. Open write cursors are moved to the ** root of the table. ** -** If pnChange is not NULL, then table iTable must be an intkey table. The -** integer value pointed to by pnChange is incremented by the number of -** entries in the table. +** If pnChange is not NULL, then the integer value pointed to by pnChange +** is incremented by the number of entries in the table. */ SQLITE_PRIVATE int sqlite3BtreeClearTable(Btree *p, int iTable, int *pnChange){ int rc; @@ -73820,7 +74991,9 @@ SQLITE_PRIVATE int sqlite3BtreeClearTable(Btree *p, int iTable, int *pnChange){ /* Invalidate all incrblob cursors open on table iTable (assuming iTable ** is the root of a table b-tree - if it is not, the following call is ** a no-op). */ - invalidateIncrblobCursors(p, (Pgno)iTable, 0, 1); + if( p->hasIncrblobCur ){ + invalidateIncrblobCursors(p, (Pgno)iTable, 0, 1); + } rc = clearDatabasePage(pBt, (Pgno)iTable, 0, pnChange); } sqlite3BtreeLeave(p); @@ -73980,7 +75153,7 @@ SQLITE_PRIVATE void sqlite3BtreeGetMeta(Btree *p, int idx, u32 *pMeta){ assert( idx>=0 && idx<=15 ); if( idx==BTREE_DATA_VERSION ){ - *pMeta = sqlite3PagerDataVersion(pBt->pPager) + p->iDataVersion; + *pMeta = sqlite3PagerDataVersion(pBt->pPager) + p->iBDataVersion; }else{ *pMeta = get4byte(&pBt->pPage1->aData[36 + idx*4]); } @@ -74796,11 +75969,12 @@ SQLITE_PRIVATE const char *sqlite3BtreeGetJournalname(Btree *p){ } /* -** Return non-zero if a transaction is active. +** Return one of SQLITE_TXN_NONE, SQLITE_TXN_READ, or SQLITE_TXN_WRITE +** to describe the current transaction state of Btree p. */ -SQLITE_PRIVATE int sqlite3BtreeIsInTrans(Btree *p){ +SQLITE_PRIVATE int sqlite3BtreeTxnState(Btree *p){ assert( p==0 || sqlite3_mutex_held(p->db->mutex) ); - return (p && (p->inTrans==TRANS_WRITE)); + return p ? p->inTrans : 0; } #ifndef SQLITE_OMIT_WAL @@ -74829,14 +76003,8 @@ SQLITE_PRIVATE int sqlite3BtreeCheckpoint(Btree *p, int eMode, int *pnLog, int * #endif /* -** Return non-zero if a read (or write) transaction is active. +** Return true if there is currently a backup running on Btree p. */ -SQLITE_PRIVATE int sqlite3BtreeIsInReadTrans(Btree *p){ - assert( p ); - assert( sqlite3_mutex_held(p->db->mutex) ); - return p->inTrans!=TRANS_NONE; -} - SQLITE_PRIVATE int sqlite3BtreeIsInBackup(Btree *p){ assert( p ); assert( sqlite3_mutex_held(p->db->mutex) ); @@ -75182,7 +76350,7 @@ static int setDestPgsz(sqlite3_backup *p){ ** message in database handle db. */ static int checkReadTransaction(sqlite3 *db, Btree *p){ - if( sqlite3BtreeIsInReadTrans(p) ){ + if( sqlite3BtreeTxnState(p)!=SQLITE_TXN_NONE ){ sqlite3ErrorWithMsg(db, SQLITE_ERROR, "destination database is in use"); return SQLITE_ERROR; } @@ -75413,7 +76581,7 @@ SQLITE_API int sqlite3_backup_step(sqlite3_backup *p, int nPage){ ** one now. If a transaction is opened here, then it will be closed ** before this function exits. */ - if( rc==SQLITE_OK && 0==sqlite3BtreeIsInReadTrans(p->pSrc) ){ + if( rc==SQLITE_OK && SQLITE_TXN_NONE==sqlite3BtreeTxnState(p->pSrc) ){ rc = sqlite3BtreeBeginTrans(p->pSrc, 0, 0); bCloseTrans = 1; } @@ -75785,7 +76953,7 @@ SQLITE_PRIVATE int sqlite3BtreeCopyFile(Btree *pTo, Btree *pFrom){ sqlite3BtreeEnter(pTo); sqlite3BtreeEnter(pFrom); - assert( sqlite3BtreeIsInTrans(pTo) ); + assert( sqlite3BtreeTxnState(pTo)==SQLITE_TXN_WRITE ); pFd = sqlite3PagerFile(sqlite3BtreePager(pTo)); if( pFd->pMethods ){ i64 nByte = sqlite3BtreeGetPageSize(pFrom)*(i64)sqlite3BtreeLastPage(pFrom); @@ -75821,7 +76989,7 @@ SQLITE_PRIVATE int sqlite3BtreeCopyFile(Btree *pTo, Btree *pFrom){ sqlite3PagerClearCache(sqlite3BtreePager(b.pDest)); } - assert( sqlite3BtreeIsInTrans(pTo)==0 ); + assert( sqlite3BtreeTxnState(pTo)!=SQLITE_TXN_WRITE ); copy_finished: sqlite3BtreeLeave(pFrom); sqlite3BtreeLeave(pTo); @@ -75908,7 +77076,9 @@ SQLITE_PRIVATE int sqlite3VdbeCheckMemInvariants(Mem *p){ /* The szMalloc field holds the correct memory allocation size */ assert( p->szMalloc==0 - || p->szMalloc==sqlite3DbMallocSize(p->db,p->zMalloc) ); + || (p->flags==MEM_Undefined + && p->szMalloc<=sqlite3DbMallocSize(p->db,p->zMalloc)) + || p->szMalloc==sqlite3DbMallocSize(p->db,p->zMalloc)); /* If p holds a string or blob, the Mem.z must point to exactly ** one of the following: @@ -76072,7 +77242,9 @@ SQLITE_PRIVATE SQLITE_NOINLINE int sqlite3VdbeMemGrow(Mem *pMem, int n, int bPre testcase( bPreserve && pMem->z==0 ); assert( pMem->szMalloc==0 - || pMem->szMalloc==sqlite3DbMallocSize(pMem->db, pMem->zMalloc) ); + || (pMem->flags==MEM_Undefined + && pMem->szMalloc<=sqlite3DbMallocSize(pMem->db,pMem->zMalloc)) + || pMem->szMalloc==sqlite3DbMallocSize(pMem->db,pMem->zMalloc)); if( pMem->szMalloc>0 && bPreserve && pMem->z==pMem->zMalloc ){ if( pMem->db ){ pMem->z = pMem->zMalloc = sqlite3DbReallocOrFree(pMem->db, pMem->z, n); @@ -76901,11 +78073,11 @@ SQLITE_PRIVATE void sqlite3VdbeMemMove(Mem *pTo, Mem *pFrom){ SQLITE_PRIVATE int sqlite3VdbeMemSetStr( Mem *pMem, /* Memory cell to set to string value */ const char *z, /* String pointer */ - int n, /* Bytes in string, or negative */ + i64 n, /* Bytes in string, or negative */ u8 enc, /* Encoding of z. 0 for BLOBs */ void (*xDel)(void*) /* Destructor function */ ){ - int nByte = n; /* New value for pMem->n */ + i64 nByte = n; /* New value for pMem->n */ int iLimit; /* Maximum allowed string or blob size */ u16 flags = 0; /* New value for pMem->flags */ @@ -76927,7 +78099,7 @@ SQLITE_PRIVATE int sqlite3VdbeMemSetStr( if( nByte<0 ){ assert( enc!=0 ); if( enc==SQLITE_UTF8 ){ - nByte = 0x7fffffff & (int)strlen(z); + nByte = strlen(z); }else{ for(nByte=0; nByte<=iLimit && (z[nByte] | z[nByte+1]); nByte+=2){} } @@ -76939,7 +78111,7 @@ SQLITE_PRIVATE int sqlite3VdbeMemSetStr( ** management (one of MEM_Dyn or MEM_Static). */ if( xDel==SQLITE_TRANSIENT ){ - u32 nAlloc = nByte; + i64 nAlloc = nByte; if( flags&MEM_Term ){ nAlloc += (enc==SQLITE_UTF8?1:2); } @@ -76965,7 +78137,7 @@ SQLITE_PRIVATE int sqlite3VdbeMemSetStr( } } - pMem->n = nByte; + pMem->n = (int)(nByte & 0x7fffffff); pMem->flags = flags; if( enc ){ pMem->enc = enc; @@ -76985,7 +78157,7 @@ SQLITE_PRIVATE int sqlite3VdbeMemSetStr( #endif if( nByte>iLimit ){ - return SQLITE_TOOBIG; + return sqlite3ErrorToParser(pMem->db, SQLITE_TOOBIG); } return SQLITE_OK; @@ -77776,7 +78948,7 @@ SQLITE_PRIVATE Vdbe *sqlite3VdbeCreate(Parse *pParse){ p->pNext = db->pVdbe; p->pPrev = 0; db->pVdbe = p; - p->magic = VDBE_MAGIC_INIT; + p->iVdbeMagic = VDBE_MAGIC_INIT; p->pParse = pParse; pParse->pVdbe = p; assert( pParse->aLabel==0 ); @@ -77977,7 +79149,7 @@ SQLITE_PRIVATE int sqlite3VdbeAddOp3(Vdbe *p, int op, int p1, int p2, int p3){ VdbeOp *pOp; i = p->nOp; - assert( p->magic==VDBE_MAGIC_INIT ); + assert( p->iVdbeMagic==VDBE_MAGIC_INIT ); assert( op>=0 && op<0xff ); if( p->nOpAlloc<=i ){ return growOp3(p, op, p1, p2, p3); @@ -78212,10 +79384,12 @@ SQLITE_PRIVATE void sqlite3VdbeExplainPop(Parse *pParse){ ** The zWhere string must have been obtained from sqlite3_malloc(). ** This routine will take ownership of the allocated memory. */ -SQLITE_PRIVATE void sqlite3VdbeAddParseSchemaOp(Vdbe *p, int iDb, char *zWhere){ +SQLITE_PRIVATE void sqlite3VdbeAddParseSchemaOp(Vdbe *p, int iDb, char *zWhere, u16 p5){ int j; sqlite3VdbeAddOp4(p, OP_ParseSchema, iDb, 0, 0, zWhere, P4_DYNAMIC); + sqlite3VdbeChangeP5(p, p5); for(j=0; jdb->nDb; j++) sqlite3VdbeUsesBtree(p, j); + sqlite3MayAbort(p->pParse); } /* @@ -78305,7 +79479,7 @@ static SQLITE_NOINLINE void resizeResolveLabel(Parse *p, Vdbe *v, int j){ SQLITE_PRIVATE void sqlite3VdbeResolveLabel(Vdbe *v, int x){ Parse *p = v->pParse; int j = ADDR(x); - assert( v->magic==VDBE_MAGIC_INIT ); + assert( v->iVdbeMagic==VDBE_MAGIC_INIT ); assert( j<-p->nLabel ); assert( j>=0 ); #ifdef SQLITE_DEBUG @@ -78444,7 +79618,7 @@ SQLITE_PRIVATE int sqlite3VdbeAssertMayAbort(Vdbe *v, int mayAbort){ if( opcode==OP_Destroy || opcode==OP_VUpdate || opcode==OP_VRename || opcode==OP_VDestroy || opcode==OP_VCreate - || (opcode==OP_ParseSchema && pOp->p4.z==0) + || opcode==OP_ParseSchema || ((opcode==OP_Halt || opcode==OP_HaltIfNull) && ((pOp->p1)!=SQLITE_OK && pOp->p2==OE_Abort)) ){ @@ -78630,7 +79804,7 @@ static void resolveP2Values(Vdbe *p, int *pMaxFuncArgs){ ** Return the address of the next instruction to be inserted. */ SQLITE_PRIVATE int sqlite3VdbeCurrentAddr(Vdbe *p){ - assert( p->magic==VDBE_MAGIC_INIT ); + assert( p->iVdbeMagic==VDBE_MAGIC_INIT ); return p->nOp; } @@ -78715,7 +79889,7 @@ SQLITE_PRIVATE VdbeOp *sqlite3VdbeAddOpList( int i; VdbeOp *pOut, *pFirst; assert( nOp>0 ); - assert( p->magic==VDBE_MAGIC_INIT ); + assert( p->iVdbeMagic==VDBE_MAGIC_INIT ); if( p->nOp + nOp > p->nOpAlloc && growOpArray(p, nOp) ){ return 0; } @@ -79039,7 +80213,7 @@ SQLITE_PRIVATE void sqlite3VdbeChangeP4(Vdbe *p, int addr, const char *zP4, int sqlite3 *db; assert( p!=0 ); db = p->db; - assert( p->magic==VDBE_MAGIC_INIT ); + assert( p->iVdbeMagic==VDBE_MAGIC_INIT ); assert( p->aOp!=0 || db->mallocFailed ); if( db->mallocFailed ){ if( n!=P4_VTAB ) freeP4(db, n, (void*)*(char**)&zP4); @@ -79168,7 +80342,7 @@ SQLITE_PRIVATE VdbeOp *sqlite3VdbeGetOp(Vdbe *p, int addr){ /* C89 specifies that the constant "dummy" will be initialized to all ** zeros, which is correct. MSVC generates a warning, nevertheless. */ static VdbeOp dummy; /* Ignore the MSVC warning about no initializer */ - assert( p->magic==VDBE_MAGIC_INIT ); + assert( p->iVdbeMagic==VDBE_MAGIC_INIT ); if( addr<0 ){ addr = p->nOp - 1; } @@ -79226,11 +80400,7 @@ SQLITE_PRIVATE char *sqlite3VdbeDisplayComment( char c; zSynopsis = zOpName += nOpName + 1; if( strncmp(zSynopsis,"IF ",3)==0 ){ - if( pOp->p5 & SQLITE_STOREP2 ){ - sqlite3_snprintf(sizeof(zAlt), zAlt, "r[P2] = (%s)", zSynopsis+3); - }else{ - sqlite3_snprintf(sizeof(zAlt), zAlt, "if %s goto P2", zSynopsis+3); - } + sqlite3_snprintf(sizeof(zAlt), zAlt, "if %s goto P2", zSynopsis+3); zSynopsis = zAlt; } for(ii=0; (c = zSynopsis[ii])!=0; ii++){ @@ -79262,7 +80432,7 @@ SQLITE_PRIVATE char *sqlite3VdbeDisplayComment( sqlite3_str_appendf(&x, "%d", v1); }else if( pCtx->argc>1 ){ sqlite3_str_appendf(&x, "%d..%d", v1, v1+pCtx->argc-1); - }else{ + }else if( x.accError==0 ){ assert( x.nChar>2 ); x.nChar -= 2; ii++; @@ -79853,7 +81023,7 @@ SQLITE_PRIVATE int sqlite3VdbeList( Op *pOp; /* Current opcode */ assert( p->explain ); - assert( p->magic==VDBE_MAGIC_RUN ); + assert( p->iVdbeMagic==VDBE_MAGIC_RUN ); assert( p->rc==SQLITE_OK || p->rc==SQLITE_BUSY || p->rc==SQLITE_NOMEM ); /* Even though this opcode does not use dynamic strings for @@ -80033,14 +81203,14 @@ SQLITE_PRIVATE void sqlite3VdbeRewind(Vdbe *p){ int i; #endif assert( p!=0 ); - assert( p->magic==VDBE_MAGIC_INIT || p->magic==VDBE_MAGIC_RESET ); + assert( p->iVdbeMagic==VDBE_MAGIC_INIT || p->iVdbeMagic==VDBE_MAGIC_RESET ); /* There should be at least one opcode. */ assert( p->nOp>0 ); /* Set the magic to VDBE_MAGIC_RUN sooner rather than later. */ - p->magic = VDBE_MAGIC_RUN; + p->iVdbeMagic = VDBE_MAGIC_RUN; #ifdef SQLITE_DEBUG for(i=0; inMem; i++){ @@ -80096,8 +81266,10 @@ SQLITE_PRIVATE void sqlite3VdbeMakeReady( assert( p!=0 ); assert( p->nOp>0 ); assert( pParse!=0 ); - assert( p->magic==VDBE_MAGIC_INIT ); + assert( p->iVdbeMagic==VDBE_MAGIC_INIT ); assert( pParse==p->pParse ); + p->pVList = pParse->pVList; + pParse->pVList = 0; db = p->db; assert( db->mallocFailed==0 ); nVar = pParse->nVar; @@ -80182,8 +81354,6 @@ SQLITE_PRIVATE void sqlite3VdbeMakeReady( } } - p->pVList = pParse->pVList; - pParse->pVList = 0; if( db->mallocFailed ){ p->nVar = 0; p->nCursor = 0; @@ -80211,20 +81381,15 @@ SQLITE_PRIVATE void sqlite3VdbeFreeCursor(Vdbe *p, VdbeCursor *pCx){ return; } assert( pCx->pBtx==0 || pCx->eCurType==CURTYPE_BTREE ); + assert( pCx->pBtx==0 || pCx->isEphemeral ); switch( pCx->eCurType ){ case CURTYPE_SORTER: { sqlite3VdbeSorterClose(p->db, pCx); break; } case CURTYPE_BTREE: { - if( pCx->isEphemeral ){ - if( pCx->pBtx ) sqlite3BtreeClose(pCx->pBtx); - /* The pCx->pCursor will be close automatically, if it exists, by - ** the call above. */ - }else{ - assert( pCx->uc.pCursor!=0 ); - sqlite3BtreeCloseCursor(pCx->uc.pCursor); - } + assert( pCx->uc.pCursor!=0 ); + sqlite3BtreeCloseCursor(pCx->uc.pCursor); break; } #ifndef SQLITE_OMIT_VIRTUALTABLE @@ -80404,7 +81569,7 @@ static int vdbeCommit(sqlite3 *db, Vdbe *p){ */ for(i=0; rc==SQLITE_OK && inDb; i++){ Btree *pBt = db->aDb[i].pBt; - if( sqlite3BtreeIsInTrans(pBt) ){ + if( sqlite3BtreeTxnState(pBt)==SQLITE_TXN_WRITE ){ /* Whether or not a database might need a super-journal depends upon ** its journal mode (among other things). This matrix determines which ** journal modes use a super-journal and which do not */ @@ -80539,7 +81704,7 @@ static int vdbeCommit(sqlite3 *db, Vdbe *p){ */ for(i=0; inDb; i++){ Btree *pBt = db->aDb[i].pBt; - if( sqlite3BtreeIsInTrans(pBt) ){ + if( sqlite3BtreeTxnState(pBt)==SQLITE_TXN_WRITE ){ char const *zFile = sqlite3BtreeGetJournalname(pBt); if( zFile==0 ){ continue; /* Ignore TEMP and :memory: databases */ @@ -80781,7 +81946,7 @@ SQLITE_PRIVATE int sqlite3VdbeHalt(Vdbe *p){ ** one, or the complete transaction if there is no statement transaction. */ - if( p->magic!=VDBE_MAGIC_RUN ){ + if( p->iVdbeMagic!=VDBE_MAGIC_RUN ){ return SQLITE_OK; } if( db->mallocFailed ){ @@ -80939,7 +82104,7 @@ SQLITE_PRIVATE int sqlite3VdbeHalt(Vdbe *p){ assert( db->nVdbeRead>=db->nVdbeWrite ); assert( db->nVdbeWrite>=0 ); } - p->magic = VDBE_MAGIC_HALT; + p->iVdbeMagic = VDBE_MAGIC_HALT; checkActiveVdbeCnt(db); if( db->mallocFailed ){ p->rc = SQLITE_NOMEM_BKPT; @@ -81112,7 +82277,7 @@ SQLITE_PRIVATE int sqlite3VdbeReset(Vdbe *p){ } } #endif - p->magic = VDBE_MAGIC_RESET; + p->iVdbeMagic = VDBE_MAGIC_RESET; return p->rc & db->errMask; } @@ -81122,7 +82287,7 @@ SQLITE_PRIVATE int sqlite3VdbeReset(Vdbe *p){ */ SQLITE_PRIVATE int sqlite3VdbeFinalize(Vdbe *p){ int rc = SQLITE_OK; - if( p->magic==VDBE_MAGIC_RUN || p->magic==VDBE_MAGIC_HALT ){ + if( p->iVdbeMagic==VDBE_MAGIC_RUN || p->iVdbeMagic==VDBE_MAGIC_HALT ){ rc = sqlite3VdbeReset(p); assert( (rc & p->db->errMask)==rc ); } @@ -81183,7 +82348,7 @@ SQLITE_PRIVATE void sqlite3VdbeClearObject(sqlite3 *db, Vdbe *p){ vdbeFreeOpArray(db, pSub->aOp, pSub->nOp); sqlite3DbFree(db, pSub); } - if( p->magic!=VDBE_MAGIC_INIT ){ + if( p->iVdbeMagic!=VDBE_MAGIC_INIT ){ releaseMemArray(p->aVar, p->nVar); sqlite3DbFree(db, p->pVList); sqlite3DbFree(db, p->pFree); @@ -81231,7 +82396,7 @@ SQLITE_PRIVATE void sqlite3VdbeDelete(Vdbe *p){ if( p->pNext ){ p->pNext->pPrev = p->pPrev; } - p->magic = VDBE_MAGIC_DEAD; + p->iVdbeMagic = VDBE_MAGIC_DEAD; p->db = 0; sqlite3DbFreeNN(db, p); } @@ -81308,6 +82473,7 @@ SQLITE_PRIVATE int sqlite3VdbeCursorMoveto(VdbeCursor **pp, u32 *piCol){ assert( p->eCurType==CURTYPE_BTREE || p->eCurType==CURTYPE_PSEUDO ); if( p->deferredMoveto ){ u32 iMap; + assert( !p->isEphemeral ); if( p->aAltMap && (iMap = p->aAltMap[1+*piCol])>0 && !p->nullRow ){ *pp = p->pAltCursor; *piCol = iMap - 1; @@ -82035,9 +83201,12 @@ SQLITE_PRIVATE SQLITE_NOINLINE int sqlite3BlobCompare(const Mem *pB1, const Mem static int sqlite3IntFloatCompare(i64 i, double r){ if( sizeof(LONGDOUBLE_TYPE)>8 ){ LONGDOUBLE_TYPE x = (LONGDOUBLE_TYPE)i; + testcase( xr ); + testcase( x==r ); if( xr ) return +1; - return 0; + if( x>r ) return +1; /*NO_TEST*/ /* work around bugs in gcov */ + return 0; /*NO_TEST*/ /* work around bugs in gcov */ }else{ i64 y; double s; @@ -82933,7 +84102,8 @@ SQLITE_PRIVATE void sqlite3VdbePreUpdateHook( const char *zDb, /* Database name */ Table *pTab, /* Modified table */ i64 iKey1, /* Initial key value */ - int iReg /* Register for new.* record */ + int iReg, /* Register for new.* record */ + int iBlobWrite ){ sqlite3 *db = v->db; i64 iKey2; @@ -82969,6 +84139,7 @@ SQLITE_PRIVATE void sqlite3VdbePreUpdateHook( preupdate.iKey1 = iKey1; preupdate.iKey2 = iKey2; preupdate.pTab = pTab; + preupdate.iBlobWrite = iBlobWrite; db->pPreUpdate = &preupdate; db->xPreUpdateCallback(db->pPreUpdateArg, db, op, zDb, zTbl, iKey1, iKey2); @@ -83382,7 +84553,7 @@ static int invokeValueDestructor( }else{ xDel((void*)p); } - if( pCtx ) sqlite3_result_error_toobig(pCtx); + sqlite3_result_error_toobig(pCtx); return SQLITE_TOOBIG; } SQLITE_API void sqlite3_result_blob( @@ -83607,7 +84778,7 @@ static int sqlite3Step(Vdbe *p){ int rc; assert(p); - if( p->magic!=VDBE_MAGIC_RUN ){ + if( p->iVdbeMagic!=VDBE_MAGIC_RUN ){ /* We used to require that sqlite3_reset() be called before retrying ** sqlite3_step() after any error or after SQLITE_DONE. But beginning ** with version 3.7.0, we changed this so that sqlite3_reset() would @@ -84323,7 +85494,7 @@ static int vdbeUnbind(Vdbe *p, int i){ return SQLITE_MISUSE_BKPT; } sqlite3_mutex_enter(p->db->mutex); - if( p->magic!=VDBE_MAGIC_RUN || p->pc>=0 ){ + if( p->iVdbeMagic!=VDBE_MAGIC_RUN || p->pc>=0 ){ sqlite3Error(p->db, SQLITE_MISUSE); sqlite3_mutex_leave(p->db->mutex); sqlite3_log(SQLITE_MISUSE, @@ -84364,7 +85535,7 @@ static int bindText( sqlite3_stmt *pStmt, /* The statement to bind against */ int i, /* Index of the parameter to bind */ const void *zData, /* Pointer to the data to be bound */ - int nData, /* Number of bytes of data to be bound */ + i64 nData, /* Number of bytes of data to be bound */ void (*xDel)(void*), /* Destructor for the data */ u8 encoding /* Encoding for the data */ ){ @@ -84416,11 +85587,7 @@ SQLITE_API int sqlite3_bind_blob64( void (*xDel)(void*) ){ assert( xDel!=SQLITE_DYNAMIC ); - if( nData>0x7fffffff ){ - return invokeValueDestructor(zData, xDel, 0); - }else{ - return bindText(pStmt, i, zData, (int)nData, xDel, 0); - } + return bindText(pStmt, i, zData, nData, xDel, 0); } SQLITE_API int sqlite3_bind_double(sqlite3_stmt *pStmt, int i, double rValue){ int rc; @@ -84490,12 +85657,8 @@ SQLITE_API int sqlite3_bind_text64( unsigned char enc ){ assert( xDel!=SQLITE_DYNAMIC ); - if( nData>0x7fffffff ){ - return invokeValueDestructor(zData, xDel, 0); - }else{ - if( enc==SQLITE_UTF16 ) enc = SQLITE_UTF16NATIVE; - return bindText(pStmt, i, zData, (int)nData, xDel, enc); - } + if( enc==SQLITE_UTF16 ) enc = SQLITE_UTF16NATIVE; + return bindText(pStmt, i, zData, nData, xDel, enc); } #ifndef SQLITE_OMIT_UTF16 SQLITE_API int sqlite3_bind_text16( @@ -84677,7 +85840,7 @@ SQLITE_API int sqlite3_stmt_isexplain(sqlite3_stmt *pStmt){ */ SQLITE_API int sqlite3_stmt_busy(sqlite3_stmt *pStmt){ Vdbe *v = (Vdbe*)pStmt; - return v!=0 && v->magic==VDBE_MAGIC_RUN && v->pc>=0; + return v!=0 && v->iVdbeMagic==VDBE_MAGIC_RUN && v->pc>=0; } /* @@ -84895,6 +86058,17 @@ SQLITE_API int sqlite3_preupdate_depth(sqlite3 *db){ } #endif /* SQLITE_ENABLE_PREUPDATE_HOOK */ +#ifdef SQLITE_ENABLE_PREUPDATE_HOOK +/* +** This function is designed to be called from within a pre-update callback +** only. +*/ +SQLITE_API int sqlite3_preupdate_blobwrite(sqlite3 *db){ + PreUpdate *p = db->pPreUpdate; + return (p ? p->iBlobWrite : -1); +} +#endif + #ifdef SQLITE_ENABLE_PREUPDATE_HOOK /* ** This function is called from within a pre-update callback to retrieve @@ -85169,7 +86343,7 @@ SQLITE_PRIVATE char *sqlite3VdbeExpandSql( assert( idx>0 ); } zRawSql += nToken; - nextIndex = idx + 1; + nextIndex = MAX(idx + 1, nextIndex); assert( idx>0 && idx<=p->nVar ); pVar = &p->aVar[idx-1]; if( pVar->flags & MEM_Null ){ @@ -85513,26 +86687,39 @@ static VdbeCursor *allocateCursor( assert( iCur>=0 && iCurnCursor ); if( p->apCsr[iCur] ){ /*OPTIMIZATION-IF-FALSE*/ - /* Before calling sqlite3VdbeFreeCursor(), ensure the isEphemeral flag - ** is clear. Otherwise, if this is an ephemeral cursor created by - ** OP_OpenDup, the cursor will not be closed and will still be part - ** of a BtShared.pCursor list. */ - if( p->apCsr[iCur]->pBtx==0 ) p->apCsr[iCur]->isEphemeral = 0; sqlite3VdbeFreeCursor(p, p->apCsr[iCur]); p->apCsr[iCur] = 0; } - if( SQLITE_OK==sqlite3VdbeMemClearAndResize(pMem, nByte) ){ - p->apCsr[iCur] = pCx = (VdbeCursor*)pMem->z; - memset(pCx, 0, offsetof(VdbeCursor,pAltCursor)); - pCx->eCurType = eCurType; - pCx->iDb = iDb; - pCx->nField = nField; - pCx->aOffset = &pCx->aType[nField]; - if( eCurType==CURTYPE_BTREE ){ - pCx->uc.pCursor = (BtCursor*) - &pMem->z[ROUND8(sizeof(VdbeCursor))+2*sizeof(u32)*nField]; - sqlite3BtreeCursorZero(pCx->uc.pCursor); + + /* There used to be a call to sqlite3VdbeMemClearAndResize() to make sure + ** the pMem used to hold space for the cursor has enough storage available + ** in pMem->zMalloc. But for the special case of the aMem[] entries used + ** to hold cursors, it is faster to in-line the logic. */ + assert( pMem->flags==MEM_Undefined ); + assert( (pMem->flags & MEM_Dyn)==0 ); + assert( pMem->szMalloc==0 || pMem->z==pMem->zMalloc ); + if( pMem->szMallocszMalloc>0 ){ + sqlite3DbFreeNN(pMem->db, pMem->zMalloc); + } + pMem->z = pMem->zMalloc = sqlite3DbMallocRaw(pMem->db, nByte); + if( pMem->zMalloc==0 ){ + pMem->szMalloc = 0; + return 0; } + pMem->szMalloc = nByte; + } + + p->apCsr[iCur] = pCx = (VdbeCursor*)pMem->zMalloc; + memset(pCx, 0, offsetof(VdbeCursor,pAltCursor)); + pCx->eCurType = eCurType; + pCx->iDb = iDb; + pCx->nField = nField; + pCx->aOffset = &pCx->aType[nField]; + if( eCurType==CURTYPE_BTREE ){ + pCx->uc.pCursor = (BtCursor*) + &pMem->z[ROUND8(sizeof(VdbeCursor))+2*sizeof(u32)*nField]; + sqlite3BtreeCursorZero(pCx->uc.pCursor); } return pCx; } @@ -85679,7 +86866,10 @@ static u16 SQLITE_NOINLINE computeNumericType(Mem *pMem){ sqlite3_int64 ix; assert( (pMem->flags & (MEM_Int|MEM_Real|MEM_IntReal))==0 ); assert( (pMem->flags & (MEM_Str|MEM_Blob))!=0 ); - ExpandBlob(pMem); + if( ExpandBlob(pMem) ){ + pMem->u.i = 0; + return MEM_Int; + } rc = sqlite3AtoF(pMem->z, &pMem->u.r, pMem->n, pMem->enc); if( rc<=0 ){ if( rc==0 && sqlite3Atoi64(pMem->z, &ix, pMem->n, pMem->enc)<=1 ){ @@ -85817,6 +87007,11 @@ static void registerTrace(int iReg, Mem *p){ printf("\n"); sqlite3VdbeCheckMemInvariants(p); } +/**/ void sqlite3PrintMem(Mem *pMem){ + memTracePrint(pMem); + printf("\n"); + fflush(stdout); +} #endif #ifdef SQLITE_DEBUG @@ -86015,7 +87210,7 @@ SQLITE_PRIVATE int sqlite3VdbeExec( #endif /*** INSERT STACK UNION HERE ***/ - assert( p->magic==VDBE_MAGIC_RUN ); /* sqlite3_step() verifies this */ + assert( p->iVdbeMagic==VDBE_MAGIC_RUN ); /* sqlite3_step() verifies this */ sqlite3VdbeEnter(p); #ifndef SQLITE_OMIT_PROGRESS_CALLBACK if( db->xProgress ){ @@ -86775,6 +87970,26 @@ case OP_IntCopy: { /* out2 */ break; } +/* Opcode: ChngCntRow P1 P2 * * * +** Synopsis: output=r[P1] +** +** Output value in register P1 as the chance count for a DML statement, +** due to the "PRAGMA count_changes=ON" setting. Or, if there was a +** foreign key error in the statement, trigger the error now. +** +** This opcode is a variant of OP_ResultRow that checks the foreign key +** immediate constraint count and throws an error if the count is +** non-zero. The P2 opcode must be 1. +*/ +case OP_ChngCntRow: { + assert( pOp->p2==1 ); + if( (rc = sqlite3VdbeCheckFk(p,0))!=SQLITE_OK ){ + goto abort_due_to_error; + } + /* Fall through to the next case, OP_ResultRow */ + /* no break */ deliberate_fall_through +} + /* Opcode: ResultRow P1 P2 * * * ** Synopsis: output=r[P1@P2] ** @@ -86788,37 +88003,9 @@ case OP_ResultRow: { Mem *pMem; int i; assert( p->nResColumn==pOp->p2 ); - assert( pOp->p1>0 ); + assert( pOp->p1>0 || CORRUPT_DB ); assert( pOp->p1+pOp->p2<=(p->nMem+1 - p->nCursor)+1 ); - /* If this statement has violated immediate foreign key constraints, do - ** not return the number of rows modified. And do not RELEASE the statement - ** transaction. It needs to be rolled back. */ - if( SQLITE_OK!=(rc = sqlite3VdbeCheckFk(p, 0)) ){ - assert( db->flags&SQLITE_CountRows ); - assert( p->usesStmtJournal ); - goto abort_due_to_error; - } - - /* If the SQLITE_CountRows flag is set in sqlite3.flags mask, then - ** DML statements invoke this opcode to return the number of rows - ** modified to the user. This is the only way that a VM that - ** opens a statement transaction may invoke this opcode. - ** - ** In case this is such a statement, close any statement transaction - ** opened by this VM before returning control to the user. This is to - ** ensure that statement-transactions are always nested, not overlapping. - ** If the open statement-transaction is not closed here, then the user - ** may step another VM that opens its own statement transaction. This - ** may lead to overlapping statement transactions. - ** - ** The statement transaction is never a top-level transaction. Hence - ** the RELEASE call below can never fail. - */ - assert( p->iStatement==0 || db->flags&SQLITE_CountRows ); - rc = sqlite3VdbeCloseStatement(p, SAVEPOINT_RELEASE); - assert( rc==SQLITE_OK ); - /* Invalidate all ephemeral cursor row caches */ p->cacheCtr = (p->cacheCtr + 2)|1; @@ -87258,8 +88445,7 @@ case OP_Cast: { /* in1 */ ** Synopsis: IF r[P3]==r[P1] ** ** Compare the values in register P1 and P3. If reg(P3)==reg(P1) then -** jump to address P2. Or if the SQLITE_STOREP2 flag is set in P5, then -** store the result of comparison in register P2. +** jump to address P2. ** ** The SQLITE_AFF_MASK portion of P5 must be an affinity character - ** SQLITE_AFF_TEXT, SQLITE_AFF_INTEGER, and so forth. An attempt is made @@ -87285,9 +88471,8 @@ case OP_Cast: { /* in1 */ ** If neither operand is NULL the result is the same as it would be if ** the SQLITE_NULLEQ flag were omitted from P5. ** -** If both SQLITE_STOREP2 and SQLITE_KEEPNULL flags are set then the -** content of r[P2] is only changed if the new value is NULL or 0 (false). -** In other words, a prior r[P2] value will not be overwritten by 1 (true). +** This opcode saves the result of comparison for use by the new +** OP_Jump opcode. */ /* Opcode: Ne P1 P2 P3 P4 P5 ** Synopsis: IF r[P3]!=r[P1] @@ -87295,17 +88480,12 @@ case OP_Cast: { /* in1 */ ** This works just like the Eq opcode except that the jump is taken if ** the operands in registers P1 and P3 are not equal. See the Eq opcode for ** additional information. -** -** If both SQLITE_STOREP2 and SQLITE_KEEPNULL flags are set then the -** content of r[P2] is only changed if the new value is NULL or 1 (true). -** In other words, a prior r[P2] value will not be overwritten by 0 (false). */ /* Opcode: Lt P1 P2 P3 P4 P5 ** Synopsis: IF r[P3]p3]; flags1 = pIn1->flags; flags3 = pIn3->flags; + if( (flags1 & flags3 & MEM_Int)!=0 ){ + assert( (pOp->p5 & SQLITE_AFF_MASK)!=SQLITE_AFF_TEXT || CORRUPT_DB ); + /* Common case of comparison of two integers */ + if( pIn3->u.i > pIn1->u.i ){ + iCompare = +1; + if( sqlite3aGTb[pOp->opcode] ){ + VdbeBranchTaken(1, (pOp->p5 & SQLITE_NULLEQ)?2:3); + goto jump_to_p2; + } + }else if( pIn3->u.i < pIn1->u.i ){ + iCompare = -1; + if( sqlite3aLTb[pOp->opcode] ){ + VdbeBranchTaken(1, (pOp->p5 & SQLITE_NULLEQ)?2:3); + goto jump_to_p2; + } + }else{ + iCompare = 0; + if( sqlite3aEQb[pOp->opcode] ){ + VdbeBranchTaken(1, (pOp->p5 & SQLITE_NULLEQ)?2:3); + goto jump_to_p2; + } + } + VdbeBranchTaken(0, (pOp->p5 & SQLITE_NULLEQ)?2:3); + break; + } if( (flags1 | flags3)&MEM_Null ){ /* One or both operands are NULL */ if( pOp->p5 & SQLITE_NULLEQ ){ @@ -87387,22 +88595,16 @@ case OP_Ge: { /* same as TK_GE, jump, in1, in3 */ ** then the result is always NULL. ** The jump is taken if the SQLITE_JUMPIFNULL bit is set. */ - if( pOp->p5 & SQLITE_STOREP2 ){ - pOut = &aMem[pOp->p2]; - iCompare = 1; /* Operands are not equal */ - memAboutToChange(p, pOut); - MemSetTypeFlag(pOut, MEM_Null); - REGISTER_TRACE(pOp->p2, pOut); - }else{ - VdbeBranchTaken(2,3); - if( pOp->p5 & SQLITE_JUMPIFNULL ){ - goto jump_to_p2; - } + iCompare = 1; /* Operands are not equal */ + VdbeBranchTaken(2,3); + if( pOp->p5 & SQLITE_JUMPIFNULL ){ + goto jump_to_p2; } break; } }else{ - /* Neither operand is NULL. Do a comparison. */ + /* Neither operand is NULL and we couldn't do the special high-speed + ** integer comparison case. So do a general-case comparison. */ affinity = pOp->p5 & SQLITE_AFF_MASK; if( affinity>=SQLITE_AFF_NUMERIC ){ if( (flags1 | flags3)&MEM_Str ){ @@ -87415,14 +88617,6 @@ case OP_Ge: { /* same as TK_GE, jump, in1, in3 */ applyNumericAffinity(pIn3,0); } } - /* Handle the common case of integer comparison here, as an - ** optimization, to avoid a call to sqlite3MemCompare() */ - if( (pIn1->flags & pIn3->flags & MEM_Int)!=0 ){ - if( pIn3->u.i > pIn1->u.i ){ res = +1; goto compare_op; } - if( pIn3->u.i < pIn1->u.i ){ res = -1; goto compare_op; } - res = 0; - goto compare_op; - } }else if( affinity==SQLITE_AFF_TEXT ){ if( (flags1 & MEM_Str)==0 && (flags1&(MEM_Int|MEM_Real|MEM_IntReal))!=0 ){ testcase( pIn1->flags & MEM_Int ); @@ -87445,7 +88639,7 @@ case OP_Ge: { /* same as TK_GE, jump, in1, in3 */ assert( pOp->p4type==P4_COLLSEQ || pOp->p4.pColl==0 ); res = sqlite3MemCompare(pIn3, pIn1, pOp->p4.pColl); } -compare_op: + /* At this point, res is negative, zero, or positive if reg[P1] is ** less than, equal to, or greater than reg[P3], respectively. Compute ** the answer to this operator in res2, depending on what the comparison @@ -87454,16 +88648,14 @@ compare_op: ** order: NE, EQ, GT, LE, LT, GE */ assert( OP_Eq==OP_Ne+1 ); assert( OP_Gt==OP_Ne+2 ); assert( OP_Le==OP_Ne+3 ); assert( OP_Lt==OP_Ne+4 ); assert( OP_Ge==OP_Ne+5 ); - if( res<0 ){ /* ne, eq, gt, le, lt, ge */ - static const unsigned char aLTb[] = { 1, 0, 0, 1, 1, 0 }; - res2 = aLTb[pOp->opcode - OP_Ne]; + if( res<0 ){ + res2 = sqlite3aLTb[pOp->opcode]; }else if( res==0 ){ - static const unsigned char aEQb[] = { 0, 1, 0, 1, 0, 1 }; - res2 = aEQb[pOp->opcode - OP_Ne]; + res2 = sqlite3aEQb[pOp->opcode]; }else{ - static const unsigned char aGTb[] = { 1, 0, 1, 0, 0, 1 }; - res2 = aGTb[pOp->opcode - OP_Ne]; + res2 = sqlite3aGTb[pOp->opcode]; } + iCompare = res; /* Undo any changes made by applyAffinity() to the input registers. */ assert( (pIn3->flags & MEM_Dyn) == (flags3 & MEM_Dyn) ); @@ -87471,67 +88663,39 @@ compare_op: assert( (pIn1->flags & MEM_Dyn) == (flags1 & MEM_Dyn) ); pIn1->flags = flags1; - if( pOp->p5 & SQLITE_STOREP2 ){ - pOut = &aMem[pOp->p2]; - iCompare = res; - if( (pOp->p5 & SQLITE_KEEPNULL)!=0 ){ - /* The KEEPNULL flag prevents OP_Eq from overwriting a NULL with 1 - ** and prevents OP_Ne from overwriting NULL with 0. This flag - ** is only used in contexts where either: - ** (1) op==OP_Eq && (r[P2]==NULL || r[P2]==0) - ** (2) op==OP_Ne && (r[P2]==NULL || r[P2]==1) - ** Therefore it is not necessary to check the content of r[P2] for - ** NULL. */ - assert( pOp->opcode==OP_Ne || pOp->opcode==OP_Eq ); - assert( res2==0 || res2==1 ); - testcase( res2==0 && pOp->opcode==OP_Eq ); - testcase( res2==1 && pOp->opcode==OP_Eq ); - testcase( res2==0 && pOp->opcode==OP_Ne ); - testcase( res2==1 && pOp->opcode==OP_Ne ); - if( (pOp->opcode==OP_Eq)==res2 ) break; - } - memAboutToChange(p, pOut); - MemSetTypeFlag(pOut, MEM_Int); - pOut->u.i = res2; - REGISTER_TRACE(pOp->p2, pOut); - }else{ - VdbeBranchTaken(res2!=0, (pOp->p5 & SQLITE_NULLEQ)?2:3); - if( res2 ){ - goto jump_to_p2; - } + VdbeBranchTaken(res2!=0, (pOp->p5 & SQLITE_NULLEQ)?2:3); + if( res2 ){ + goto jump_to_p2; } break; } -/* Opcode: ElseNotEq * P2 * * * +/* Opcode: ElseEq * P2 * * * ** ** This opcode must follow an OP_Lt or OP_Gt comparison operator. There ** can be zero or more OP_ReleaseReg opcodes intervening, but no other ** opcodes are allowed to occur between this instruction and the previous -** OP_Lt or OP_Gt. Furthermore, the prior OP_Lt or OP_Gt must have the -** SQLITE_STOREP2 bit set in the P5 field. +** OP_Lt or OP_Gt. ** ** If result of an OP_Eq comparison on the same two operands as the -** prior OP_Lt or OP_Gt would have been NULL or false (0), then then -** jump to P2. If the result of an OP_Eq comparison on the two previous -** operands would have been true (1), then fall through. +** prior OP_Lt or OP_Gt would have been true, then jump to P2. +** If the result of an OP_Eq comparison on the two previous +** operands would have been false or NULL, then fall through. */ -case OP_ElseNotEq: { /* same as TK_ESCAPE, jump */ +case OP_ElseEq: { /* same as TK_ESCAPE, jump */ #ifdef SQLITE_DEBUG /* Verify the preconditions of this opcode - that it follows an OP_Lt or - ** OP_Gt with the SQLITE_STOREP2 flag set, with zero or more intervening - ** OP_ReleaseReg opcodes */ + ** OP_Gt with zero or more intervening OP_ReleaseReg opcodes */ int iAddr; for(iAddr = (int)(pOp - aOp) - 1; ALWAYS(iAddr>=0); iAddr--){ if( aOp[iAddr].opcode==OP_ReleaseReg ) continue; assert( aOp[iAddr].opcode==OP_Lt || aOp[iAddr].opcode==OP_Gt ); - assert( aOp[iAddr].p5 & SQLITE_STOREP2 ); break; } #endif /* SQLITE_DEBUG */ - VdbeBranchTaken(iCompare!=0, 2); - if( iCompare!=0 ) goto jump_to_p2; + VdbeBranchTaken(iCompare==0, 2); + if( iCompare==0 ) goto jump_to_p2; break; } @@ -87842,6 +89006,24 @@ case OP_IsNull: { /* same as TK_ISNULL, jump, in1 */ break; } +/* Opcode: ZeroOrNull P1 P2 P3 * * +** Synopsis: r[P2] = 0 OR NULL +** +** If all both registers P1 and P3 are NOT NULL, then store a zero in +** register P2. If either registers P1 or P3 are NULL then put +** a NULL in register P2. +*/ +case OP_ZeroOrNull: { /* in1, in2, out2, in3 */ + if( (aMem[pOp->p1].flags & MEM_Null)!=0 + || (aMem[pOp->p3].flags & MEM_Null)!=0 + ){ + sqlite3VdbeMemSetNull(aMem + pOp->p2); + }else{ + sqlite3VdbeMemSetInt64(aMem + pOp->p2, 0); + } + break; +} + /* Opcode: NotNull P1 P2 * * * ** Synopsis: if r[P1]!=NULL goto P2 ** @@ -88817,7 +89999,8 @@ case OP_AutoCommit: { ** active. ** If P2 is non-zero, then a write-transaction is started, or if a ** read-transaction is already active, it is upgraded to a write-transaction. -** If P2 is zero, then a read-transaction is started. +** If P2 is zero, then a read-transaction is started. If P2 is 2 or more +** then an exclusive transaction is started. ** ** P1 is the index of the database file on which the transaction is ** started. Index 0 is the main database file and index 1 is the @@ -88851,6 +90034,7 @@ case OP_Transaction: { assert( p->bIsReader ); assert( p->readOnly==0 || pOp->p2==0 ); + assert( pOp->p2>=0 && pOp->p2<=2 ); assert( pOp->p1>=0 && pOp->p1nDb ); assert( DbMaskTest(p->btreeMask, pOp->p1) ); if( pOp->p2 && (db->flags & SQLITE_QueryOnly)!=0 ){ @@ -88876,7 +90060,7 @@ case OP_Transaction: { && pOp->p2 && (db->autoCommit==0 || db->nVdbeRead>1) ){ - assert( sqlite3BtreeIsInTrans(pBt) ); + assert( sqlite3BtreeTxnState(pBt)==SQLITE_TXN_WRITE ); if( p->iStatement==0 ){ assert( db->nStatement>=0 && db->nSavepoint>=0 ); db->nStatement++; @@ -89209,7 +90393,7 @@ case OP_OpenDup: { pOrig = p->apCsr[pOp->p2]; assert( pOrig ); - assert( pOrig->pBtx!=0 ); /* Only ephemeral cursors can be duplicated */ + assert( pOrig->isEphemeral ); /* Only ephemeral cursors can be duplicated */ pCx = allocateCursor(p, pOp->p1, pOrig->nField, -1, CURTYPE_BTREE); if( pCx==0 ) goto no_mem; @@ -89219,7 +90403,10 @@ case OP_OpenDup: { pCx->isTable = pOrig->isTable; pCx->pgnoRoot = pOrig->pgnoRoot; pCx->isOrdered = pOrig->isOrdered; - rc = sqlite3BtreeCursor(pOrig->pBtx, pCx->pgnoRoot, BTREE_WRCSR, + pCx->pBtx = pOrig->pBtx; + pCx->hasBeenDuped = 1; + pOrig->hasBeenDuped = 1; + rc = sqlite3BtreeCursor(pCx->pBtx, pCx->pgnoRoot, BTREE_WRCSR, pCx->pKeyInfo, pCx->uc.pCursor); /* The sqlite3BtreeCursor() routine can only fail for the first cursor ** opened for a database. Since there is already an open cursor when this @@ -89229,7 +90416,7 @@ case OP_OpenDup: { } -/* Opcode: OpenEphemeral P1 P2 * P4 P5 +/* Opcode: OpenEphemeral P1 P2 P3 P4 P5 ** Synopsis: nColumn=P2 ** ** Open a new cursor P1 to a transient table. @@ -89249,6 +90436,10 @@ case OP_OpenDup: { ** in btree.h. These flags control aspects of the operation of ** the btree. The BTREE_OMIT_JOURNAL and BTREE_SINGLE flags are ** added automatically. +** +** If P3 is positive, then reg[P3] is modified slightly so that it +** can be used as zero-length data for OP_Insert. This is an optimization +** that avoids an extra OP_Blob opcode to initialize that register. */ /* Opcode: OpenAutoindex P1 P2 * P4 * ** Synopsis: nColumn=P2 @@ -89271,10 +90462,20 @@ case OP_OpenEphemeral: { SQLITE_OPEN_TRANSIENT_DB; assert( pOp->p1>=0 ); assert( pOp->p2>=0 ); + if( pOp->p3>0 ){ + /* Make register reg[P3] into a value that can be used as the data + ** form sqlite3BtreeInsert() where the length of the data is zero. */ + assert( pOp->p2==0 ); /* Only used when number of columns is zero */ + assert( pOp->opcode==OP_OpenEphemeral ); + assert( aMem[pOp->p3].flags & MEM_Null ); + aMem[pOp->p3].n = 0; + aMem[pOp->p3].z = ""; + } pCx = p->apCsr[pOp->p1]; - if( pCx && pCx->pBtx ){ - /* If the ephermeral table is already open, erase all existing content - ** so that the table is empty again, rather than creating a new table. */ + if( pCx && !pCx->hasBeenDuped ){ + /* If the ephermeral table is already open and has no duplicates from + ** OP_OpenDup, then erase all existing content so that the table is + ** empty again, rather than creating a new table. */ assert( pCx->isEphemeral ); pCx->seqCount = 0; pCx->cacheStatus = CACHE_STALE; @@ -89288,33 +90489,36 @@ case OP_OpenEphemeral: { vfsFlags); if( rc==SQLITE_OK ){ rc = sqlite3BtreeBeginTrans(pCx->pBtx, 1, 0); - } - if( rc==SQLITE_OK ){ - /* If a transient index is required, create it by calling - ** sqlite3BtreeCreateTable() with the BTREE_BLOBKEY flag before - ** opening it. If a transient table is required, just use the - ** automatically created table with root-page 1 (an BLOB_INTKEY table). - */ - if( (pCx->pKeyInfo = pKeyInfo = pOp->p4.pKeyInfo)!=0 ){ - assert( pOp->p4type==P4_KEYINFO ); - rc = sqlite3BtreeCreateTable(pCx->pBtx, &pCx->pgnoRoot, - BTREE_BLOBKEY | pOp->p5); - if( rc==SQLITE_OK ){ - assert( pCx->pgnoRoot==SCHEMA_ROOT+1 ); - assert( pKeyInfo->db==db ); - assert( pKeyInfo->enc==ENC(db) ); - rc = sqlite3BtreeCursor(pCx->pBtx, pCx->pgnoRoot, BTREE_WRCSR, - pKeyInfo, pCx->uc.pCursor); + if( rc==SQLITE_OK ){ + /* If a transient index is required, create it by calling + ** sqlite3BtreeCreateTable() with the BTREE_BLOBKEY flag before + ** opening it. If a transient table is required, just use the + ** automatically created table with root-page 1 (an BLOB_INTKEY table). + */ + if( (pCx->pKeyInfo = pKeyInfo = pOp->p4.pKeyInfo)!=0 ){ + assert( pOp->p4type==P4_KEYINFO ); + rc = sqlite3BtreeCreateTable(pCx->pBtx, &pCx->pgnoRoot, + BTREE_BLOBKEY | pOp->p5); + if( rc==SQLITE_OK ){ + assert( pCx->pgnoRoot==SCHEMA_ROOT+1 ); + assert( pKeyInfo->db==db ); + assert( pKeyInfo->enc==ENC(db) ); + rc = sqlite3BtreeCursor(pCx->pBtx, pCx->pgnoRoot, BTREE_WRCSR, + pKeyInfo, pCx->uc.pCursor); + } + pCx->isTable = 0; + }else{ + pCx->pgnoRoot = SCHEMA_ROOT; + rc = sqlite3BtreeCursor(pCx->pBtx, SCHEMA_ROOT, BTREE_WRCSR, + 0, pCx->uc.pCursor); + pCx->isTable = 1; } - pCx->isTable = 0; - }else{ - pCx->pgnoRoot = SCHEMA_ROOT; - rc = sqlite3BtreeCursor(pCx->pBtx, SCHEMA_ROOT, BTREE_WRCSR, - 0, pCx->uc.pCursor); - pCx->isTable = 1; + } + pCx->isOrdered = (pOp->p5!=BTREE_UNORDERED); + if( rc ){ + sqlite3BtreeClose(pCx->pBtx); } } - pCx->isOrdered = (pOp->p5!=BTREE_UNORDERED); } if( rc ) goto abort_due_to_error; pCx->nullRow = 1; @@ -89713,22 +90917,183 @@ seek_not_found: break; } -/* Opcode: SeekHit P1 P2 * * * -** Synopsis: seekHit=P2 + +/* Opcode: SeekScan P1 P2 * * * +** Synopsis: Scan-ahead up to P1 rows +** +** This opcode is a prefix opcode to OP_SeekGE. In other words, this +** opcode must be immediately followed by OP_SeekGE. This constraint is +** checked by assert() statements. +** +** This opcode uses the P1 through P4 operands of the subsequent +** OP_SeekGE. In the text that follows, the operands of the subsequent +** OP_SeekGE opcode are denoted as SeekOP.P1 through SeekOP.P4. Only +** the P1 and P2 operands of this opcode are also used, and are called +** This.P1 and This.P2. +** +** This opcode helps to optimize IN operators on a multi-column index +** where the IN operator is on the later terms of the index by avoiding +** unnecessary seeks on the btree, substituting steps to the next row +** of the b-tree instead. A correct answer is obtained if this opcode +** is omitted or is a no-op. +** +** The SeekGE.P3 and SeekGE.P4 operands identify an unpacked key which +** is the desired entry that we want the cursor SeekGE.P1 to be pointing +** to. Call this SeekGE.P4/P5 row the "target". +** +** If the SeekGE.P1 cursor is not currently pointing to a valid row, +** then this opcode is a no-op and control passes through into the OP_SeekGE. +** +** If the SeekGE.P1 cursor is pointing to a valid row, then that row +** might be the target row, or it might be near and slightly before the +** target row. This opcode attempts to position the cursor on the target +** row by, perhaps by invoking sqlite3BtreeStep() on the cursor +** between 0 and This.P1 times. +** +** There are three possible outcomes from this opcode:
      ** -** Set the seekHit flag on cursor P1 to the value in P2. -** The seekHit flag is used by the IfNoHope opcode. +**
    1. If after This.P1 steps, the cursor is still pointing to a place that +** is earlier in the btree than the target row, then fall through +** into the subsquence OP_SeekGE opcode. ** -** P1 must be a valid b-tree cursor. P2 must be a boolean value, -** either 0 or 1. +**
    2. If the cursor is successfully moved to the target row by 0 or more +** sqlite3BtreeNext() calls, then jump to This.P2, which will land just +** past the OP_IdxGT or OP_IdxGE opcode that follows the OP_SeekGE. +** +**
    3. If the cursor ends up past the target row (indicating the the target +** row does not exist in the btree) then jump to SeekOP.P2. +**
    +*/ +case OP_SeekScan: { + VdbeCursor *pC; + int res; + int nStep; + UnpackedRecord r; + + assert( pOp[1].opcode==OP_SeekGE ); + + /* pOp->p2 points to the first instruction past the OP_IdxGT that + ** follows the OP_SeekGE. */ + assert( pOp->p2>=(int)(pOp-aOp)+2 ); + assert( aOp[pOp->p2-1].opcode==OP_IdxGT || aOp[pOp->p2-1].opcode==OP_IdxGE ); + testcase( aOp[pOp->p2-1].opcode==OP_IdxGE ); + assert( pOp[1].p1==aOp[pOp->p2-1].p1 ); + assert( pOp[1].p2==aOp[pOp->p2-1].p2 ); + assert( pOp[1].p3==aOp[pOp->p2-1].p3 ); + + assert( pOp->p1>0 ); + pC = p->apCsr[pOp[1].p1]; + assert( pC!=0 ); + assert( pC->eCurType==CURTYPE_BTREE ); + assert( !pC->isTable ); + if( !sqlite3BtreeCursorIsValidNN(pC->uc.pCursor) ){ +#ifdef SQLITE_DEBUG + if( db->flags&SQLITE_VdbeTrace ){ + printf("... cursor not valid - fall through\n"); + } +#endif + break; + } + nStep = pOp->p1; + assert( nStep>=1 ); + r.pKeyInfo = pC->pKeyInfo; + r.nField = (u16)pOp[1].p4.i; + r.default_rc = 0; + r.aMem = &aMem[pOp[1].p3]; +#ifdef SQLITE_DEBUG + { + int i; + for(i=0; i0 ){ + seekscan_search_fail: +#ifdef SQLITE_DEBUG + if( db->flags&SQLITE_VdbeTrace ){ + printf("... %d steps and then skip\n", pOp->p1 - nStep); + } +#endif + VdbeBranchTaken(1,3); + pOp++; + goto jump_to_p2; + } + if( res==0 ){ +#ifdef SQLITE_DEBUG + if( db->flags&SQLITE_VdbeTrace ){ + printf("... %d steps and then success\n", pOp->p1 - nStep); + } +#endif + VdbeBranchTaken(2,3); + goto jump_to_p2; + break; + } + if( nStep<=0 ){ +#ifdef SQLITE_DEBUG + if( db->flags&SQLITE_VdbeTrace ){ + printf("... fall through after %d steps\n", pOp->p1); + } +#endif + VdbeBranchTaken(0,3); + break; + } + nStep--; + rc = sqlite3BtreeNext(pC->uc.pCursor, 0); + if( rc ){ + if( rc==SQLITE_DONE ){ + rc = SQLITE_OK; + goto seekscan_search_fail; + }else{ + goto abort_due_to_error; + } + } + } + + break; +} + + +/* Opcode: SeekHit P1 P2 P3 * * +** Synopsis: set P2<=seekHit<=P3 +** +** Increase or decrease the seekHit value for cursor P1, if necessary, +** so that it is no less than P2 and no greater than P3. +** +** The seekHit integer represents the maximum of terms in an index for which +** there is known to be at least one match. If the seekHit value is smaller +** than the total number of equality terms in an index lookup, then the +** OP_IfNoHope opcode might run to see if the IN loop can be abandoned +** early, thus saving work. This is part of the IN-early-out optimization. +** +** P1 must be a valid b-tree cursor. */ case OP_SeekHit: { VdbeCursor *pC; assert( pOp->p1>=0 && pOp->p1nCursor ); pC = p->apCsr[pOp->p1]; assert( pC!=0 ); - assert( pOp->p2==0 || pOp->p2==1 ); - pC->seekHit = pOp->p2 & 1; + assert( pOp->p3>=pOp->p2 ); + if( pC->seekHitp2 ){ +#ifdef SQLITE_DEBUG + if( db->flags&SQLITE_VdbeTrace ){ + printf("seekHit changes from %d to %d\n", pC->seekHit, pOp->p2); + } +#endif + pC->seekHit = pOp->p2; + }else if( pC->seekHit>pOp->p3 ){ +#ifdef SQLITE_DEBUG + if( db->flags&SQLITE_VdbeTrace ){ + printf("seekHit changes from %d to %d\n", pC->seekHit, pOp->p3); + } +#endif + pC->seekHit = pOp->p3; + } break; } @@ -89786,16 +91151,20 @@ case OP_IfNotOpen: { /* jump */ ** Synopsis: key=r[P3@P4] ** ** Register P3 is the first of P4 registers that form an unpacked -** record. +** record. Cursor P1 is an index btree. P2 is a jump destination. +** In other words, the operands to this opcode are the same as the +** operands to OP_NotFound and OP_IdxGT. ** -** Cursor P1 is on an index btree. If the seekHit flag is set on P1, then -** this opcode is a no-op. But if the seekHit flag of P1 is clear, then -** check to see if there is any entry in P1 that matches the -** prefix identified by P3 and P4. If no entry matches the prefix, -** jump to P2. Otherwise fall through. +** This opcode is an optimization attempt only. If this opcode always +** falls through, the correct answer is still obtained, but extra works +** is performed. ** -** This opcode behaves like OP_NotFound if the seekHit -** flag is clear and it behaves like OP_Noop if the seekHit flag is set. +** A value of N in the seekHit flag of cursor P1 means that there exists +** a key P3:N that will match some record in the index. We want to know +** if it is possible for a record P3:P4 to match some record in the +** index. If it is not possible, we can skips some work. So if seekHit +** is less than P4, attempt to find out if a match is possible by running +** OP_NotFound. ** ** This opcode is used in IN clause processing for a multi-column key. ** If an IN clause is attached to an element of the key other than the @@ -89837,7 +91206,12 @@ case OP_IfNoHope: { /* jump, in3 */ assert( pOp->p1>=0 && pOp->p1nCursor ); pC = p->apCsr[pOp->p1]; assert( pC!=0 ); - if( pC->seekHit ) break; +#ifdef SQLITE_DEBUG + if( db->flags&SQLITE_VdbeTrace ){ + printf("seekHit is %d\n", pC->seekHit); + } +#endif + if( pC->seekHit>=pOp->p4.i ) break; /* Fall through into OP_NotFound */ /* no break */ deliberate_fall_through } @@ -89919,6 +91293,7 @@ case OP_Found: { /* jump, in3 */ }else{ VdbeBranchTaken(takeJump||alreadyExists==0,2); if( takeJump || !alreadyExists ) goto jump_to_p2; + if( pOp->opcode==OP_IfNoHope ) pC->seekHit = pOp->p4.i; } break; } @@ -90069,8 +91444,10 @@ case OP_NewRowid: { /* out2 */ VdbeCursor *pC; /* Cursor of table to get the new rowid */ int res; /* Result of an sqlite3BtreeLast() */ int cnt; /* Counter to limit the number of searches */ +#ifndef SQLITE_OMIT_AUTOINCREMENT Mem *pMem; /* Register holding largest rowid for AUTOINCREMENT */ VdbeFrame *pFrame; /* Root frame of VDBE */ +#endif v = 0; res = 0; @@ -90263,7 +91640,7 @@ case OP_Insert: { /* Invoke the pre-update hook, if any */ if( pTab ){ if( db->xPreUpdateCallback && !(pOp->p5 & OPFLAG_ISUPDATE) ){ - sqlite3VdbePreUpdateHook(p, pC, SQLITE_INSERT, zDb, pTab, x.nKey,pOp->p2); + sqlite3VdbePreUpdateHook(p,pC,SQLITE_INSERT,zDb,pTab,x.nKey,pOp->p2,-1); } if( db->xUpdateCallback==0 || pTab->aCol==0 ){ /* Prevent post-update hook from running in cases when it should not */ @@ -90275,7 +91652,7 @@ case OP_Insert: { if( pOp->p5 & OPFLAG_NCHANGE ) p->nChange++; if( pOp->p5 & OPFLAG_LASTROWID ) db->lastRowid = x.nKey; - assert( pData->flags & (MEM_Blob|MEM_Str) ); + assert( (pData->flags & (MEM_Blob|MEM_Str))!=0 || pData->n==0 ); x.pData = pData->z; x.nData = pData->n; seekResult = ((pOp->p5 & OPFLAG_USESEEKRESULT) ? pC->seekResult : 0); @@ -90286,7 +91663,8 @@ case OP_Insert: { } x.pKey = 0; rc = sqlite3BtreeInsert(pC->uc.pCursor, &x, - (pOp->p5 & (OPFLAG_APPEND|OPFLAG_SAVEPOSITION)), seekResult + (pOp->p5 & (OPFLAG_APPEND|OPFLAG_SAVEPOSITION|OPFLAG_PREFORMAT)), + seekResult ); pC->deferredMoveto = 0; pC->cacheStatus = CACHE_STALE; @@ -90303,6 +91681,33 @@ case OP_Insert: { break; } +/* Opcode: RowCell P1 P2 P3 * * +** +** P1 and P2 are both open cursors. Both must be opened on the same type +** of table - intkey or index. This opcode is used as part of copying +** the current row from P2 into P1. If the cursors are opened on intkey +** tables, register P3 contains the rowid to use with the new record in +** P1. If they are opened on index tables, P3 is not used. +** +** This opcode must be followed by either an Insert or InsertIdx opcode +** with the OPFLAG_PREFORMAT flag set to complete the insert operation. +*/ +case OP_RowCell: { + VdbeCursor *pDest; /* Cursor to write to */ + VdbeCursor *pSrc; /* Cursor to read from */ + i64 iKey; /* Rowid value to insert with */ + assert( pOp[1].opcode==OP_Insert || pOp[1].opcode==OP_IdxInsert ); + assert( pOp[1].opcode==OP_Insert || pOp->p3==0 ); + assert( pOp[1].opcode==OP_IdxInsert || pOp->p3>0 ); + assert( pOp[1].p5 & OPFLAG_PREFORMAT ); + pDest = p->apCsr[pOp->p1]; + pSrc = p->apCsr[pOp->p2]; + iKey = pOp->p3 ? aMem[pOp->p3].u.i : 0; + rc = sqlite3BtreeTransferRow(pDest->uc.pCursor, pSrc->uc.pCursor, iKey); + if( rc!=SQLITE_OK ) goto abort_due_to_error; + break; +}; + /* Opcode: Delete P1 P2 P3 P4 P5 ** ** Delete the record at which the P1 cursor is currently pointing. @@ -90395,7 +91800,7 @@ case OP_Delete: { sqlite3VdbePreUpdateHook(p, pC, (opflags & OPFLAG_ISUPDATE) ? SQLITE_UPDATE : SQLITE_DELETE, zDb, pTab, pC->movetoTarget, - pOp->p3 + pOp->p3, -1 ); } if( opflags & OPFLAG_ISNOOP ) break; @@ -90958,7 +92363,7 @@ case OP_IdxInsert: { /* in2 */ assert( pC!=0 ); assert( !isSorter(pC) ); pIn2 = &aMem[pOp->p2]; - assert( pIn2->flags & MEM_Blob ); + assert( (pIn2->flags & MEM_Blob) || (pOp->p5 & OPFLAG_PREFORMAT) ); if( pOp->p5 & OPFLAG_NCHANGE ) p->nChange++; assert( pC->eCurType==CURTYPE_BTREE ); assert( pC->isTable==0 ); @@ -90969,7 +92374,7 @@ case OP_IdxInsert: { /* in2 */ x.aMem = aMem + pOp->p3; x.nMem = (u16)pOp->p4.i; rc = sqlite3BtreeInsert(pC->uc.pCursor, &x, - (pOp->p5 & (OPFLAG_APPEND|OPFLAG_SAVEPOSITION)), + (pOp->p5 & (OPFLAG_APPEND|OPFLAG_SAVEPOSITION|OPFLAG_PREFORMAT)), ((pOp->p5 & OPFLAG_USESEEKRESULT) ? pC->seekResult : 0) ); assert( pC->deferredMoveto==0 ); @@ -91042,7 +92447,7 @@ case OP_IdxDelete: { rc = sqlite3BtreeDelete(pCrsr, BTREE_AUXDELETE); if( rc ) goto abort_due_to_error; }else if( pOp->p5 ){ - rc = SQLITE_CORRUPT_INDEX; + rc = sqlite3ReportError(SQLITE_CORRUPT_INDEX, __LINE__, "index corruption"); goto abort_due_to_error; } assert( pC->deferredMoveto==0 ); @@ -91121,6 +92526,8 @@ case OP_IdxRowid: { /* out2 */ pTabCur->deferredMoveto = 1; assert( pOp->p4type==P4_INTARRAY || pOp->p4.ai==0 ); pTabCur->aAltMap = pOp->p4.ai; + assert( !pC->isEphemeral ); + assert( !pTabCur->isEphemeral ); pTabCur->pAltCursor = pC; }else{ pOut = out2Prerelease(p, pOp); @@ -91151,7 +92558,7 @@ case OP_FinishSeek: { break; } -/* Opcode: IdxGE P1 P2 P3 P4 P5 +/* Opcode: IdxGE P1 P2 P3 P4 * ** Synopsis: key=r[P3@P4] ** ** The P4 register values beginning with P3 form an unpacked index @@ -91162,7 +92569,7 @@ case OP_FinishSeek: { ** If the P1 index entry is greater than or equal to the key value ** then jump to P2. Otherwise fall through to the next instruction. */ -/* Opcode: IdxGT P1 P2 P3 P4 P5 +/* Opcode: IdxGT P1 P2 P3 P4 * ** Synopsis: key=r[P3@P4] ** ** The P4 register values beginning with P3 form an unpacked index @@ -91173,7 +92580,7 @@ case OP_FinishSeek: { ** If the P1 index entry is greater than the key value ** then jump to P2. Otherwise fall through to the next instruction. */ -/* Opcode: IdxLT P1 P2 P3 P4 P5 +/* Opcode: IdxLT P1 P2 P3 P4 * ** Synopsis: key=r[P3@P4] ** ** The P4 register values beginning with P3 form an unpacked index @@ -91184,7 +92591,7 @@ case OP_FinishSeek: { ** If the P1 index entry is less than the key value then jump to P2. ** Otherwise fall through to the next instruction. */ -/* Opcode: IdxLE P1 P2 P3 P4 P5 +/* Opcode: IdxLE P1 P2 P3 P4 * ** Synopsis: key=r[P3@P4] ** ** The P4 register values beginning with P3 form an unpacked index @@ -91210,7 +92617,6 @@ case OP_IdxGE: { /* jump */ assert( pC->eCurType==CURTYPE_BTREE ); assert( pC->uc.pCursor!=0); assert( pC->deferredMoveto==0 ); - assert( pOp->p5==0 || pOp->p5==1 ); assert( pOp->p4type==P4_INT32 ); r.pKeyInfo = pC->pKeyInfo; r.nField = (u16)pOp->p4.i; @@ -91231,8 +92637,31 @@ case OP_IdxGE: { /* jump */ } } #endif - res = 0; /* Not needed. Only used to silence a warning. */ - rc = sqlite3VdbeIdxKeyCompare(db, pC, &r, &res); + + /* Inlined version of sqlite3VdbeIdxKeyCompare() */ + { + i64 nCellKey = 0; + BtCursor *pCur; + Mem m; + + assert( pC->eCurType==CURTYPE_BTREE ); + pCur = pC->uc.pCursor; + assert( sqlite3BtreeCursorIsValid(pCur) ); + nCellKey = sqlite3BtreePayloadSize(pCur); + /* nCellKey will always be between 0 and 0xffffffff because of the way + ** that btreeParseCellPtr() and sqlite3GetVarint32() are implemented */ + if( nCellKey<=0 || nCellKey>0x7fffffff ){ + rc = SQLITE_CORRUPT_BKPT; + goto abort_due_to_error; + } + sqlite3VdbeMemInit(&m, db, 0); + rc = sqlite3VdbeMemFromBtreeZeroOffset(pCur, (u32)nCellKey, &m); + if( rc ) goto abort_due_to_error; + res = sqlite3VdbeRecordCompareWithSkip(m.n, m.z, &r, 0); + sqlite3VdbeMemRelease(&m); + } + /* End of inlined sqlite3VdbeIdxKeyCompare() */ + assert( (OP_IdxLE&1)==(OP_IdxLT&1) && (OP_IdxGE&1)==(OP_IdxGT&1) ); if( (pOp->opcode&1)==(OP_IdxLT&1) ){ assert( pOp->opcode==OP_IdxLE || pOp->opcode==OP_IdxLT ); @@ -91242,7 +92671,7 @@ case OP_IdxGE: { /* jump */ res++; } VdbeBranchTaken(res>0,2); - if( rc ) goto abort_due_to_error; + assert( rc==SQLITE_OK ); if( res>0 ) goto jump_to_p2; break; } @@ -91317,11 +92746,10 @@ case OP_Destroy: { /* out2 */ ** P2==1 then the table to be clear is in the auxiliary database file ** that is used to store tables create using CREATE TEMPORARY TABLE. ** -** If the P3 value is non-zero, then the table referred to must be an -** intkey table (an SQL table, not an index). In this case the row change -** count is incremented by the number of rows in the table being cleared. -** If P3 is greater than zero, then the value stored in register P3 is -** also incremented by the number of rows in the table being cleared. +** If the P3 value is non-zero, then the row change count is incremented +** by the number of rows in the table being cleared. If P3 is greater +** than zero, then the value stored in register P3 is also incremented +** by the number of rows in the table being cleared. ** ** See also: Destroy */ @@ -91332,9 +92760,7 @@ case OP_Clear: { nChange = 0; assert( p->readOnly==0 ); assert( DbMaskTest(p->btreeMask, pOp->p2) ); - rc = sqlite3BtreeClearTable( - db->aDb[pOp->p2].pBt, (u32)pOp->p1, (pOp->p3 ? &nChange : 0) - ); + rc = sqlite3BtreeClearTable(db->aDb[pOp->p2].pBt, (u32)pOp->p1, &nChange); if( pOp->p3 ){ p->nChange += nChange; if( pOp->p3>0 ){ @@ -91440,13 +92866,15 @@ case OP_ParseSchema: { iDb = pOp->p1; assert( iDb>=0 && iDbnDb ); - assert( DbHasProperty(db, iDb, DB_SchemaLoaded) ); + assert( DbHasProperty(db, iDb, DB_SchemaLoaded) + || db->mallocFailed + || (CORRUPT_DB && (db->flags & SQLITE_NoSchemaError)!=0) ); #ifndef SQLITE_OMIT_ALTERTABLE if( pOp->p4.z==0 ){ sqlite3SchemaClear(db->aDb[iDb].pSchema); db->mDbFlags &= ~DBFLAG_SchemaKnownOk; - rc = sqlite3InitOne(db, iDb, &p->zErrMsg, INITFLAG_AlterTable); + rc = sqlite3InitOne(db, iDb, &p->zErrMsg, pOp->p5); db->mDbFlags |= DBFLAG_SchemaChange; p->expired = 0; }else @@ -92322,6 +93750,7 @@ case OP_JournalMode: { /* out2 */ pPager = sqlite3BtreePager(pBt); eOld = sqlite3PagerGetJournalMode(pPager); if( eNew==PAGER_JOURNALMODE_QUERY ) eNew = eOld; + assert( sqlite3BtreeHoldsMutex(pBt) ); if( !sqlite3PagerOkToChangeJournalMode(pPager) ) eNew = eOld; #ifndef SQLITE_OMIT_WAL @@ -92368,7 +93797,7 @@ case OP_JournalMode: { /* out2 */ /* Open a transaction on the database file. Regardless of the journal ** mode, this transaction always uses a rollback journal. */ - assert( sqlite3BtreeIsInTrans(pBt)==0 ); + assert( sqlite3BtreeTxnState(pBt)!=SQLITE_TXN_WRITE ); if( rc==SQLITE_OK ){ rc = sqlite3BtreeSetVersion(pBt, (eNew==PAGER_JOURNALMODE_WAL ? 2 : 1)); } @@ -93308,7 +94737,11 @@ default: { /* This is really OP_Noop, OP_Explain */ ** an error of some kind. */ abort_due_to_error: - if( db->mallocFailed ) rc = SQLITE_NOMEM_BKPT; + if( db->mallocFailed ){ + rc = SQLITE_NOMEM_BKPT; + }else if( rc==SQLITE_IOERR_CORRUPTFS ){ + rc = SQLITE_CORRUPT_BKPT; + } assert( rc ); if( p->zErrMsg==0 && rc!=SQLITE_IOERR_NOMEM ){ sqlite3VdbeError(p, "%s", sqlite3ErrStr(rc)); @@ -93796,7 +95229,7 @@ static int blobReadWrite( sqlite3_int64 iKey; iKey = sqlite3BtreeIntegerKey(p->pCsr); sqlite3VdbePreUpdateHook( - v, v->apCsr[0], SQLITE_DELETE, p->zDb, p->pTab, iKey, -1 + v, v->apCsr[0], SQLITE_DELETE, p->zDb, p->pTab, iKey, -1, p->iCol ); } #endif @@ -93867,6 +95300,7 @@ SQLITE_API int sqlite3_blob_reopen(sqlite3_blob *pBlob, sqlite3_int64 iRow){ rc = SQLITE_ABORT; }else{ char *zErr; + ((Vdbe*)p->pStmt)->rc = SQLITE_OK; rc = blobSeekToRow(p, iRow, &zErr); if( rc!=SQLITE_OK ){ sqlite3ErrorWithMsg(db, rc, (zErr ? "%s" : 0), zErr); @@ -94857,13 +96291,16 @@ SQLITE_PRIVATE int sqlite3VdbeSorterInit( if( pSorter==0 ){ rc = SQLITE_NOMEM_BKPT; }else{ + Btree *pBt = db->aDb[0].pBt; pSorter->pKeyInfo = pKeyInfo = (KeyInfo*)((u8*)pSorter + sz); memcpy(pKeyInfo, pCsr->pKeyInfo, szKeyInfo); pKeyInfo->db = 0; if( nField && nWorker==0 ){ pKeyInfo->nKeyField = nField; } - pSorter->pgsz = pgsz = sqlite3BtreeGetPageSize(db->aDb[0].pBt); + sqlite3BtreeEnter(pBt); + pSorter->pgsz = pgsz = sqlite3BtreeGetPageSize(pBt); + sqlite3BtreeLeave(pBt); pSorter->nTask = nWorker + 1; pSorter->iPrev = (u8)(nWorker - 1); pSorter->bUseThreads = (pSorter->nTask>1); @@ -94957,8 +96394,9 @@ static void vdbeSorterWorkDebug(SortSubtask *pTask, const char *zEvent){ fprintf(stderr, "%lld:%d %s\n", t, iTask, zEvent); } static void vdbeSorterRewindDebug(const char *zEvent){ - i64 t; - sqlite3OsCurrentTimeInt64(sqlite3_vfs_find(0), &t); + i64 t = 0; + sqlite3_vfs *pVfs = sqlite3_vfs_find(0); + if( ALWAYS(pVfs) ) sqlite3OsCurrentTimeInt64(pVfs, &t); fprintf(stderr, "%lld:X %s\n", t, zEvent); } static void vdbeSorterPopulateDebug( @@ -97147,7 +98585,6 @@ struct MemJournal { int nChunkSize; /* In-memory chunk-size */ int nSpill; /* Bytes of data before flushing */ - int nSize; /* Bytes of data currently in memory */ FileChunk *pFirst; /* Head of in-memory chunk-list */ FilePoint endpoint; /* Pointer to the end of the file */ FilePoint readpoint; /* Pointer to the end of the last xRead() */ @@ -97208,14 +98645,13 @@ static int memjrnlRead( /* ** Free the list of FileChunk structures headed at MemJournal.pFirst. */ -static void memjrnlFreeChunks(MemJournal *p){ +static void memjrnlFreeChunks(FileChunk *pFirst){ FileChunk *pIter; FileChunk *pNext; - for(pIter=p->pFirst; pIter; pIter=pNext){ + for(pIter=pFirst; pIter; pIter=pNext){ pNext = pIter->pNext; sqlite3_free(pIter); } - p->pFirst = 0; } /* @@ -97242,7 +98678,7 @@ static int memjrnlCreateFile(MemJournal *p){ } if( rc==SQLITE_OK ){ /* No error has occurred. Free the in-memory buffers. */ - memjrnlFreeChunks(©); + memjrnlFreeChunks(copy.pFirst); } } if( rc!=SQLITE_OK ){ @@ -97325,7 +98761,6 @@ static int memjrnlWrite( nWrite -= iSpace; p->endpoint.iOffset += iSpace; } - p->nSize = iAmt + iOfst; } } @@ -97333,19 +98768,29 @@ static int memjrnlWrite( } /* -** Truncate the file. -** -** If the journal file is already on disk, truncate it there. Or, if it -** is still in main memory but is being truncated to zero bytes in size, -** ignore +** Truncate the in-memory file. */ static int memjrnlTruncate(sqlite3_file *pJfd, sqlite_int64 size){ MemJournal *p = (MemJournal *)pJfd; - if( ALWAYS(size==0) ){ - memjrnlFreeChunks(p); - p->nSize = 0; - p->endpoint.pChunk = 0; - p->endpoint.iOffset = 0; + assert( p->endpoint.pChunk==0 || p->endpoint.pChunk->pNext==0 ); + if( sizeendpoint.iOffset ){ + FileChunk *pIter = 0; + if( size==0 ){ + memjrnlFreeChunks(p->pFirst); + p->pFirst = 0; + }else{ + i64 iOff = p->nChunkSize; + for(pIter=p->pFirst; ALWAYS(pIter) && iOff<=size; pIter=pIter->pNext){ + iOff += p->nChunkSize; + } + if( ALWAYS(pIter) ){ + memjrnlFreeChunks(pIter->pNext); + pIter->pNext = 0; + } + } + + p->endpoint.pChunk = pIter; + p->endpoint.iOffset = size; p->readpoint.pChunk = 0; p->readpoint.iOffset = 0; } @@ -97357,7 +98802,7 @@ static int memjrnlTruncate(sqlite3_file *pJfd, sqlite_int64 size){ */ static int memjrnlClose(sqlite3_file *pJfd){ MemJournal *p = (MemJournal *)pJfd; - memjrnlFreeChunks(p); + memjrnlFreeChunks(p->pFirst); return SQLITE_OK; } @@ -97531,7 +98976,7 @@ SQLITE_PRIVATE int sqlite3JournalSize(sqlite3_vfs *pVfs){ ** Walk all expressions linked into the list of Window objects passed ** as the second argument. */ -static int walkWindowList(Walker *pWalker, Window *pList){ +static int walkWindowList(Walker *pWalker, Window *pList, int bOneOnly){ Window *pWin; for(pWin=pList; pWin; pWin=pWin->pNextWin){ int rc; @@ -97541,15 +98986,11 @@ static int walkWindowList(Walker *pWalker, Window *pList){ if( rc ) return WRC_Abort; rc = sqlite3WalkExpr(pWalker, pWin->pFilter); if( rc ) return WRC_Abort; - - /* The next two are purely for calls to sqlite3RenameExprUnmap() - ** within sqlite3WindowOffsetExpr(). Because of constraints imposed - ** by sqlite3WindowOffsetExpr(), they can never fail. The results do - ** not matter anyhow. */ rc = sqlite3WalkExpr(pWalker, pWin->pStart); - if( NEVER(rc) ) return WRC_Abort; + if( rc ) return WRC_Abort; rc = sqlite3WalkExpr(pWalker, pWin->pEnd); - if( NEVER(rc) ) return WRC_Abort; + if( rc ) return WRC_Abort; + if( bOneOnly ) break; } return WRC_Continue; } @@ -97597,7 +99038,7 @@ static SQLITE_NOINLINE int walkExpr(Walker *pWalker, Expr *pExpr){ } #ifndef SQLITE_OMIT_WINDOWFUNC if( ExprHasProperty(pExpr, EP_WinFunc) ){ - if( walkWindowList(pWalker, pExpr->y.pWin) ) return WRC_Abort; + if( walkWindowList(pWalker, pExpr->y.pWin, 1) ) return WRC_Abort; } #endif } @@ -97625,6 +99066,16 @@ SQLITE_PRIVATE int sqlite3WalkExprList(Walker *pWalker, ExprList *p){ return WRC_Continue; } +/* +** This is a no-op callback for Walker->xSelectCallback2. If this +** callback is set, then the Select->pWinDefn list is traversed. +*/ +SQLITE_PRIVATE void sqlite3WalkWinDefnDummyCallback(Walker *pWalker, Select *p){ + UNUSED_PARAMETER(pWalker); + UNUSED_PARAMETER(p); + /* No-op */ +} + /* ** Walk all expressions associated with SELECT statement p. Do ** not invoke the SELECT callback on p, but do (of course) invoke @@ -97638,13 +99089,18 @@ SQLITE_PRIVATE int sqlite3WalkSelectExpr(Walker *pWalker, Select *p){ if( sqlite3WalkExpr(pWalker, p->pHaving) ) return WRC_Abort; if( sqlite3WalkExprList(pWalker, p->pOrderBy) ) return WRC_Abort; if( sqlite3WalkExpr(pWalker, p->pLimit) ) return WRC_Abort; -#if !defined(SQLITE_OMIT_WINDOWFUNC) && !defined(SQLITE_OMIT_ALTERTABLE) - { - Parse *pParse = pWalker->pParse; - if( pParse && IN_RENAME_OBJECT ){ +#if !defined(SQLITE_OMIT_WINDOWFUNC) + if( p->pWinDefn ){ + Parse *pParse; + if( pWalker->xSelectCallback2==sqlite3WalkWinDefnDummyCallback + || ((pParse = pWalker->pParse)!=0 && IN_RENAME_OBJECT) +#ifndef SQLITE_OMIT_CTE + || pWalker->xSelectCallback2==sqlite3SelectPopWith +#endif + ){ /* The following may return WRC_Abort if there are unresolvable ** symbols (e.g. a table that does not exist) in a window definition. */ - int rc = walkWindowList(pWalker, p->pWinDefn); + int rc = walkWindowList(pWalker, p->pWinDefn, 0); return rc; } } @@ -97662,10 +99118,10 @@ SQLITE_PRIVATE int sqlite3WalkSelectExpr(Walker *pWalker, Select *p){ SQLITE_PRIVATE int sqlite3WalkSelectFrom(Walker *pWalker, Select *p){ SrcList *pSrc; int i; - struct SrcList_item *pItem; + SrcItem *pItem; pSrc = p->pSrc; - if( pSrc ){ + if( ALWAYS(pSrc) ){ for(i=pSrc->nSrc, pItem=pSrc->a; i>0; i--, pItem++){ if( pItem->pSelect && sqlite3WalkSelect(pWalker, pItem->pSelect) ){ return WRC_Abort; @@ -97828,7 +99284,6 @@ static void resolveAlias( ExprList *pEList, /* A result set */ int iCol, /* A column in the result set. 0..pEList->nExpr-1 */ Expr *pExpr, /* Transform this into an alias to the result set */ - const char *zType, /* "GROUP" or "ORDER" or "" */ int nSubquery /* Number of subqueries that the label is moving */ ){ Expr *pOrig; /* The iCol-th column of the result set */ @@ -97840,8 +99295,11 @@ static void resolveAlias( assert( pOrig!=0 ); db = pParse->db; pDup = sqlite3ExprDup(db, pOrig, 0); - if( pDup!=0 ){ - if( zType[0]!='G' ) incrAggFunctionDepth(pDup, nSubquery); + if( db->mallocFailed ){ + sqlite3ExprDelete(db, pDup); + pDup = 0; + }else{ + incrAggFunctionDepth(pDup, nSubquery); if( pExpr->op==TK_COLLATE ){ pDup = sqlite3ExprAddCollateString(pParse, pDup, pExpr->u.zToken); } @@ -97862,15 +99320,12 @@ static void resolveAlias( pExpr->flags |= EP_MemToken; } if( ExprHasProperty(pExpr, EP_WinFunc) ){ - if( pExpr->y.pWin!=0 ){ + if( ALWAYS(pExpr->y.pWin!=0) ){ pExpr->y.pWin->pOwner = pExpr; - }else{ - assert( db->mallocFailed ); } } sqlite3DbFree(db, pDup); } - ExprSetProperty(pExpr, EP_Alias); } @@ -98005,8 +99460,8 @@ static int lookupName( int cntTab = 0; /* Number of matching table names */ int nSubquery = 0; /* How many levels of subquery */ sqlite3 *db = pParse->db; /* The database connection */ - struct SrcList_item *pItem; /* Use for looping over pSrcList items */ - struct SrcList_item *pMatch = 0; /* The matching pSrcList item */ + SrcItem *pItem; /* Use for looping over pSrcList items */ + SrcItem *pMatch = 0; /* The matching pSrcList item */ NameContext *pTopNC = pNC; /* First namecontext in the list */ Schema *pSchema = 0; /* Schema of the expression */ int eNewExprOp = TK_COLUMN; /* New value for pExpr->op on success */ @@ -98127,25 +99582,33 @@ static int lookupName( #if !defined(SQLITE_OMIT_TRIGGER) || !defined(SQLITE_OMIT_UPSERT) /* If we have not already resolved the name, then maybe ** it is a new.* or old.* trigger argument reference. Or - ** maybe it is an excluded.* from an upsert. + ** maybe it is an excluded.* from an upsert. Or maybe it is + ** a reference in the RETURNING clause to a table being modified. */ - if( zDb==0 && zTab!=0 && cntTab==0 ){ + if( cnt==0 && zDb==0 ){ pTab = 0; #ifndef SQLITE_OMIT_TRIGGER if( pParse->pTriggerTab!=0 ){ int op = pParse->eTriggerOp; assert( op==TK_DELETE || op==TK_UPDATE || op==TK_INSERT ); - if( op!=TK_DELETE && sqlite3StrICmp("new",zTab) == 0 ){ + if( pParse->bReturning ){ + if( (pNC->ncFlags & NC_UBaseReg)!=0 + && (zTab==0 || sqlite3StrICmp(zTab,pParse->pTriggerTab->zName)==0) + ){ + pExpr->iTable = op!=TK_DELETE; + pTab = pParse->pTriggerTab; + } + }else if( op!=TK_DELETE && zTab && sqlite3StrICmp("new",zTab) == 0 ){ pExpr->iTable = 1; pTab = pParse->pTriggerTab; - }else if( op!=TK_INSERT && sqlite3StrICmp("old",zTab)==0 ){ + }else if( op!=TK_INSERT && zTab && sqlite3StrICmp("old",zTab)==0 ){ pExpr->iTable = 0; pTab = pParse->pTriggerTab; } } #endif /* SQLITE_OMIT_TRIGGER */ #ifndef SQLITE_OMIT_UPSERT - if( (pNC->ncFlags & NC_UUpsert)!=0 ){ + if( (pNC->ncFlags & NC_UUpsert)!=0 && zTab!=0 ){ Upsert *pUpsert = pNC->uNC.pUpsert; if( pUpsert && sqlite3StrICmp("excluded",zTab)==0 ){ pTab = pUpsert->pUpsertSrc->a[0].pTab; @@ -98173,6 +99636,7 @@ static int lookupName( } if( iColnCol ){ cnt++; + pMatch = 0; #ifndef SQLITE_OMIT_UPSERT if( pExpr->iTable==EXCLUDED_TABLE_NUMBER ){ testcase( iCol==(-1) ); @@ -98184,27 +99648,32 @@ static int lookupName( pExpr->iTable = pNC->uNC.pUpsert->regData + sqlite3TableColumnToStorage(pTab, iCol); eNewExprOp = TK_REGISTER; - ExprSetProperty(pExpr, EP_Alias); } }else #endif /* SQLITE_OMIT_UPSERT */ { -#ifndef SQLITE_OMIT_TRIGGER - if( iCol<0 ){ - pExpr->affExpr = SQLITE_AFF_INTEGER; - }else if( pExpr->iTable==0 ){ - testcase( iCol==31 ); - testcase( iCol==32 ); - pParse->oldmask |= (iCol>=32 ? 0xffffffff : (((u32)1)<newmask |= (iCol>=32 ? 0xffffffff : (((u32)1)<y.pTab = pTab; - pExpr->iColumn = (i16)iCol; - eNewExprOp = TK_TRIGGER; + if( pParse->bReturning ){ + eNewExprOp = TK_REGISTER; + pExpr->iTable = pNC->uNC.iBaseReg + (pTab->nCol+1)*pExpr->iTable + + sqlite3TableColumnToStorage(pTab, iCol) + 1; + }else{ + pExpr->iColumn = (i16)iCol; + eNewExprOp = TK_TRIGGER; +#ifndef SQLITE_OMIT_TRIGGER + if( iCol<0 ){ + pExpr->affExpr = SQLITE_AFF_INTEGER; + }else if( pExpr->iTable==0 ){ + testcase( iCol==31 ); + testcase( iCol==32 ); + pParse->oldmask |= (iCol>=32 ? 0xffffffff : (((u32)1)<newmask |= (iCol>=32 ? 0xffffffff : (((u32)1)<ncFlags & NC_UEList)!=0 - && cnt==0 + if( cnt==0 + && (pNC->ncFlags & NC_UEList)!=0 && zTab==0 ){ pEList = pNC->uNC.pEList; @@ -98274,7 +99743,7 @@ static int lookupName( sqlite3ErrorMsg(pParse, "row value misused"); return WRC_Abort; } - resolveAlias(pParse, pEList, j, pExpr, "", nSubquery); + resolveAlias(pParse, pEList, j, pExpr, nSubquery); cnt = 1; pMatch = 0; assert( zTab==0 && zDb==0 ); @@ -98353,7 +99822,7 @@ static int lookupName( sqlite3ErrorMsg(pParse, "%s: %s", zErr, zCol); } pParse->checkSchema = 1; - pTopNC->nErr++; + pTopNC->nNcErr++; } /* If a column from a table in pSrcList is referenced, then record @@ -98376,18 +99845,24 @@ static int lookupName( /* Clean up and return */ - sqlite3ExprDelete(db, pExpr->pLeft); - pExpr->pLeft = 0; - sqlite3ExprDelete(db, pExpr->pRight); - pExpr->pRight = 0; + if( !ExprHasProperty(pExpr,(EP_TokenOnly|EP_Leaf)) ){ + sqlite3ExprDelete(db, pExpr->pLeft); + pExpr->pLeft = 0; + sqlite3ExprDelete(db, pExpr->pRight); + pExpr->pRight = 0; + } pExpr->op = eNewExprOp; ExprSetProperty(pExpr, EP_Leaf); lookupname_end: if( cnt==1 ){ assert( pNC!=0 ); - if( !ExprHasProperty(pExpr, EP_Alias) ){ +#ifndef SQLITE_OMIT_AUTHORIZATION + if( pParse->db->xAuth + && (pExpr->op==TK_COLUMN || pExpr->op==TK_TRIGGER) + ){ sqlite3AuthRead(pParse, pExpr, pSchema, pNC->pSrcList); } +#endif /* Increment the nRef value on all name contexts from TopNC up to ** the point where the name matched. */ for(;;){ @@ -98409,7 +99884,7 @@ lookupname_end: SQLITE_PRIVATE Expr *sqlite3CreateColumnExpr(sqlite3 *db, SrcList *pSrc, int iSrc, int iCol){ Expr *p = sqlite3ExprAlloc(db, TK_COLUMN, 0, 0); if( p ){ - struct SrcList_item *pItem = &pSrc->a[iSrc]; + SrcItem *pItem = &pSrc->a[iSrc]; Table *pTab = p->y.pTab = pItem->pTab; p->iTable = pItem->iCursor; if( p->y.pTab->iPKey==iCol ){ @@ -98521,7 +99996,7 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ */ case TK_ROW: { SrcList *pSrcList = pNC->pSrcList; - struct SrcList_item *pItem; + SrcItem *pItem; assert( pSrcList && pSrcList->nSrc>=1 ); pItem = pSrcList->a; pExpr->op = TK_COLUMN; @@ -98532,6 +100007,47 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ break; } + /* An optimization: Attempt to convert + ** + ** "expr IS NOT NULL" --> "TRUE" + ** "expr IS NULL" --> "FALSE" + ** + ** if we can prove that "expr" is never NULL. Call this the + ** "NOT NULL strength reduction optimization". + ** + ** If this optimization occurs, also restore the NameContext ref-counts + ** to the state they where in before the "column" LHS expression was + ** resolved. This prevents "column" from being counted as having been + ** referenced, which might prevent a SELECT from being erroneously + ** marked as correlated. + */ + case TK_NOTNULL: + case TK_ISNULL: { + int anRef[8]; + NameContext *p; + int i; + for(i=0, p=pNC; p && ipNext, i++){ + anRef[i] = p->nRef; + } + sqlite3WalkExpr(pWalker, pExpr->pLeft); + if( 0==sqlite3ExprCanBeNull(pExpr->pLeft) && !IN_RENAME_OBJECT ){ + if( pExpr->op==TK_NOTNULL ){ + pExpr->u.zToken = "true"; + ExprSetProperty(pExpr, EP_IsTrue); + }else{ + pExpr->u.zToken = "false"; + ExprSetProperty(pExpr, EP_IsFalse); + } + pExpr->op = TK_TRUEFALSE; + for(i=0, p=pNC; p && ipNext, i++){ + p->nRef = anRef[i]; + } + sqlite3ExprDelete(pParse->db, pExpr->pLeft); + pExpr->pLeft = 0; + } + return WRC_Prune; + } + /* A column name: ID ** Or table name and column name: ID.ID ** Or a database, table and column: ID.ID.ID @@ -98613,7 +100129,7 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ sqlite3ErrorMsg(pParse, "second argument to likelihood() must be a " "constant between 0.0 and 1.0"); - pNC->nErr++; + pNC->nNcErr++; } }else{ /* EVIDENCE-OF: R-61304-29449 The unlikely(X) function is @@ -98635,7 +100151,7 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ if( auth==SQLITE_DENY ){ sqlite3ErrorMsg(pParse, "not authorized to use function: %s", pDef->zName); - pNC->nErr++; + pNC->nNcErr++; } pExpr->op = TK_NULL; return WRC_Prune; @@ -98691,7 +100207,7 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ sqlite3ErrorMsg(pParse, "%.*s() may not be used as a window function", nId, zId ); - pNC->nErr++; + pNC->nNcErr++; }else if( (is_agg && (pNC->ncFlags & NC_AllowAgg)==0) || (is_agg && (pDef->funcFlags&SQLITE_FUNC_WINDOW) && !pWin) @@ -98704,13 +100220,13 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ zType = "aggregate"; } sqlite3ErrorMsg(pParse, "misuse of %s function %.*s()",zType,nId,zId); - pNC->nErr++; + pNC->nNcErr++; is_agg = 0; } #else if( (is_agg && (pNC->ncFlags & NC_AllowAgg)==0) ){ sqlite3ErrorMsg(pParse,"misuse of aggregate function %.*s()",nId,zId); - pNC->nErr++; + pNC->nNcErr++; is_agg = 0; } #endif @@ -98720,11 +100236,11 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ #endif ){ sqlite3ErrorMsg(pParse, "no such function: %.*s", nId, zId); - pNC->nErr++; + pNC->nNcErr++; }else if( wrong_num_args ){ sqlite3ErrorMsg(pParse,"wrong number of arguments to function %.*s()", nId, zId); - pNC->nErr++; + pNC->nNcErr++; } #ifndef SQLITE_OMIT_WINDOWFUNC else if( is_agg==0 && ExprHasProperty(pExpr, EP_WinFunc) ){ @@ -98732,7 +100248,7 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ "FILTER may not be used with non-aggregate %.*s()", nId, zId ); - pNC->nErr++; + pNC->nNcErr++; } #endif if( is_agg ){ @@ -98759,6 +100275,7 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ assert( pWin==pExpr->y.pWin ); if( IN_RENAME_OBJECT==0 ){ sqlite3WindowUpdate(pParse, pSel ? pSel->pWinDefn : 0, pWin, pDef); + if( pParse->db->mallocFailed ) break; } sqlite3WalkExprList(pWalker, pWin->pPartition); sqlite3WalkExprList(pWalker, pWin->pOrderBy); @@ -98833,7 +100350,7 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ assert( !ExprHasProperty(pExpr, EP_Reduced) ); /* Handle special cases of "x IS TRUE", "x IS FALSE", "x IS NOT TRUE", ** and "x IS NOT FALSE". */ - if( pRight && pRight->op==TK_ID ){ + if( ALWAYS(pRight) && (pRight->op==TK_ID || pRight->op==TK_TRUEFALSE) ){ int rc = resolveExprStep(pWalker, pRight); if( rc==WRC_Abort ) return WRC_Abort; if( pRight->op==TK_TRUEFALSE ){ @@ -98955,11 +100472,11 @@ static int resolveOrderByTermToExprList( nc.pParse = pParse; nc.pSrcList = pSelect->pSrc; nc.uNC.pEList = pEList; - nc.ncFlags = NC_AllowAgg|NC_UEList; - nc.nErr = 0; + nc.ncFlags = NC_AllowAgg|NC_UEList|NC_NoSelect; + nc.nNcErr = 0; db = pParse->db; savedSuppErr = db->suppressErr; - if( IN_RENAME_OBJECT==0 ) db->suppressErr = 1; + db->suppressErr = 1; rc = sqlite3ResolveExprNames(&nc, pE); db->suppressErr = savedSuppErr; if( rc ) return 0; @@ -99042,6 +100559,7 @@ static int resolveCompoundOrderBy( Expr *pE, *pDup; if( pItem->done ) continue; pE = sqlite3ExprSkipCollateAndLikely(pItem->pExpr); + if( NEVER(pE==0) ) continue; if( sqlite3ExprIsInteger(pE, &iCol) ){ if( iCol<=0 || iCol>pEList->nExpr ){ resolveOutOfRangeError(pParse, "ORDER", i+1, pEList->nExpr); @@ -99057,29 +100575,24 @@ static int resolveCompoundOrderBy( ** Once the comparisons are finished, the duplicate expression ** is deleted. ** - ** Or, if this is running as part of an ALTER TABLE operation, - ** resolve the symbols in the actual expression, not a duplicate. - ** And, if one of the comparisons is successful, leave the expression - ** as is instead of transforming it to an integer as in the usual - ** case. This allows the code in alter.c to modify column - ** refererences within the ORDER BY expression as required. */ - if( IN_RENAME_OBJECT ){ - pDup = pE; - }else{ - pDup = sqlite3ExprDup(db, pE, 0); - } + ** If this is running as part of an ALTER TABLE operation and + ** the symbols resolve successfully, also resolve the symbols in the + ** actual expression. This allows the code in alter.c to modify + ** column references within the ORDER BY expression as required. */ + pDup = sqlite3ExprDup(db, pE, 0); if( !db->mallocFailed ){ assert(pDup); iCol = resolveOrderByTermToExprList(pParse, pSelect, pDup); + if( IN_RENAME_OBJECT && iCol>0 ){ + resolveOrderByTermToExprList(pParse, pSelect, pE); + } } - if( !IN_RENAME_OBJECT ){ - sqlite3ExprDelete(db, pDup); - } + sqlite3ExprDelete(db, pDup); } } if( iCol>0 ){ /* Convert the ORDER BY term into an integer column number iCol, - ** taking care to preserve the COLLATE clause if it exists */ + ** taking care to preserve the COLLATE clause if it exists. */ if( !IN_RENAME_OBJECT ){ Expr *pNew = sqlite3Expr(db, TK_INTEGER, 0); if( pNew==0 ) return 1; @@ -99148,8 +100661,7 @@ SQLITE_PRIVATE int sqlite3ResolveOrderGroupBy( resolveOutOfRangeError(pParse, zType, i+1, pEList->nExpr); return 1; } - resolveAlias(pParse, pEList, pItem->u.x.iOrderByCol-1, pItem->pExpr, - zType,0); + resolveAlias(pParse, pEList, pItem->u.x.iOrderByCol-1, pItem->pExpr,0); } } return 0; @@ -99215,12 +100727,13 @@ static int resolveOrderGroupBy( Parse *pParse; /* Parsing context */ int nResult; /* Number of terms in the result set */ - if( pOrderBy==0 ) return 0; + assert( pOrderBy!=0 ); nResult = pSelect->pEList->nExpr; pParse = pNC->pParse; for(i=0, pItem=pOrderBy->a; inExpr; i++, pItem++){ Expr *pE = pItem->pExpr; Expr *pE2 = sqlite3ExprSkipCollateAndLikely(pE); + if( NEVER(pE2==0) ) continue; if( zType[0]!='G' ){ iCol = resolveAsName(pParse, pSelect->pEList, pE2); if( iCol>0 ){ @@ -99304,8 +100817,10 @@ static int resolveSelectStep(Walker *pWalker, Select *p){ while( p ){ assert( (p->selFlags & SF_Expanded)!=0 ); assert( (p->selFlags & SF_Resolved)==0 ); + assert( db->suppressErr==0 ); /* SF_Resolved not set if errors suppressed */ p->selFlags |= SF_Resolved; + /* Resolve the expressions in the LIMIT and OFFSET clauses. These ** are not allowed to refer to any names, so pass an empty NameContext. */ @@ -99333,27 +100848,26 @@ static int resolveSelectStep(Walker *pWalker, Select *p){ /* Recursively resolve names in all subqueries */ for(i=0; ipSrc->nSrc; i++){ - struct SrcList_item *pItem = &p->pSrc->a[i]; + SrcItem *pItem = &p->pSrc->a[i]; if( pItem->pSelect && (pItem->pSelect->selFlags & SF_Resolved)==0 ){ - NameContext *pNC; /* Used to iterate name contexts */ - int nRef = 0; /* Refcount for pOuterNC and outer contexts */ + int nRef = pOuterNC ? pOuterNC->nRef : 0; const char *zSavedContext = pParse->zAuthContext; - /* Count the total number of references to pOuterNC and all of its - ** parent contexts. After resolving references to expressions in - ** pItem->pSelect, check if this value has changed. If so, then - ** SELECT statement pItem->pSelect must be correlated. Set the - ** pItem->fg.isCorrelated flag if this is the case. */ - for(pNC=pOuterNC; pNC; pNC=pNC->pNext) nRef += pNC->nRef; - if( pItem->zName ) pParse->zAuthContext = pItem->zName; sqlite3ResolveSelectNames(pParse, pItem->pSelect, pOuterNC); pParse->zAuthContext = zSavedContext; if( pParse->nErr || db->mallocFailed ) return WRC_Abort; - for(pNC=pOuterNC; pNC; pNC=pNC->pNext) nRef -= pNC->nRef; - assert( pItem->fg.isCorrelated==0 && nRef<=0 ); - pItem->fg.isCorrelated = (nRef!=0); + /* If the number of references to the outer context changed when + ** expressions in the sub-select were resolved, the sub-select + ** is correlated. It is not required to check the refcount on any + ** but the innermost outer context object, as lookupName() increments + ** the refcount on all contexts between the current one and the + ** context containing the column when it resolves a name. */ + if( pOuterNC ){ + assert( pItem->fg.isCorrelated==0 && pOuterNC->nRef>=nRef ); + pItem->fg.isCorrelated = (pOuterNC->nRef>nRef); + } } } @@ -99380,13 +100894,6 @@ static int resolveSelectStep(Walker *pWalker, Select *p){ sNC.ncFlags &= ~NC_AllowAgg; } - /* If a HAVING clause is present, then there must be a GROUP BY clause. - */ - if( p->pHaving && !pGroupBy ){ - sqlite3ErrorMsg(pParse, "a GROUP BY clause is required before HAVING"); - return WRC_Abort; - } - /* Add the output column list to the name-context before parsing the ** other expressions in the SELECT statement. This is so that ** expressions in the WHERE clause (etc.) can refer to expressions by @@ -99395,15 +100902,21 @@ static int resolveSelectStep(Walker *pWalker, Select *p){ ** Minor point: If this is the case, then the expression will be ** re-evaluated for each reference to it. */ - assert( (sNC.ncFlags & (NC_UAggInfo|NC_UUpsert))==0 ); + assert( (sNC.ncFlags & (NC_UAggInfo|NC_UUpsert|NC_UBaseReg))==0 ); sNC.uNC.pEList = p->pEList; sNC.ncFlags |= NC_UEList; - if( sqlite3ResolveExprNames(&sNC, p->pHaving) ) return WRC_Abort; + if( p->pHaving ){ + if( !pGroupBy ){ + sqlite3ErrorMsg(pParse, "a GROUP BY clause is required before HAVING"); + return WRC_Abort; + } + if( sqlite3ResolveExprNames(&sNC, p->pHaving) ) return WRC_Abort; + } if( sqlite3ResolveExprNames(&sNC, p->pWhere) ) return WRC_Abort; /* Resolve names in table-valued-function arguments */ for(i=0; ipSrc->nSrc; i++){ - struct SrcList_item *pItem = &p->pSrc->a[i]; + SrcItem *pItem = &p->pSrc->a[i]; if( pItem->fg.isTabFunc && sqlite3ResolveExprListNames(&sNC, pItem->u1.pFuncArg) ){ @@ -99411,6 +100924,19 @@ static int resolveSelectStep(Walker *pWalker, Select *p){ } } +#ifndef SQLITE_OMIT_WINDOWFUNC + if( IN_RENAME_OBJECT ){ + Window *pWin; + for(pWin=p->pWinDefn; pWin; pWin=pWin->pNextWin){ + if( sqlite3ResolveExprListNames(&sNC, pWin->pOrderBy) + || sqlite3ResolveExprListNames(&sNC, pWin->pPartition) + ){ + return WRC_Abort; + } + } + } +#endif + /* The ORDER BY and GROUP BY clauses may not refer to terms in ** outer queries */ @@ -99438,7 +100964,8 @@ static int resolveSelectStep(Walker *pWalker, Select *p){ ** is not detected until much later, and so we need to go ahead and ** resolve those symbols on the incorrect ORDER BY for consistency. */ - if( isCompound<=nCompound /* Defer right-most ORDER BY of a compound */ + if( p->pOrderBy!=0 + && isCompound<=nCompound /* Defer right-most ORDER BY of a compound */ && resolveOrderGroupBy(&sNC, p, p->pOrderBy, "ORDER") ){ return WRC_Abort; @@ -99466,19 +100993,6 @@ static int resolveSelectStep(Walker *pWalker, Select *p){ } } -#ifndef SQLITE_OMIT_WINDOWFUNC - if( IN_RENAME_OBJECT ){ - Window *pWin; - for(pWin=p->pWinDefn; pWin; pWin=pWin->pNextWin){ - if( sqlite3ResolveExprListNames(&sNC, pWin->pOrderBy) - || sqlite3ResolveExprListNames(&sNC, pWin->pPartition) - ){ - return WRC_Abort; - } - } - } -#endif - /* If this is part of a compound SELECT, check that it has the right ** number of expressions in the select list. */ if( p->pNext && p->pEList->nExpr!=p->pNext->pEList->nExpr ){ @@ -99562,7 +101076,7 @@ SQLITE_PRIVATE int sqlite3ResolveExprNames( pNC->ncFlags &= ~(NC_HasAgg|NC_MinMaxAgg|NC_HasWin); w.pParse = pNC->pParse; w.xExprCallback = resolveExprStep; - w.xSelectCallback = resolveSelectStep; + w.xSelectCallback = (pNC->ncFlags & NC_NoSelect) ? 0 : resolveSelectStep; w.xSelectCallback2 = 0; w.u.pNC = pNC; #if SQLITE_MAX_EXPR_DEPTH>0 @@ -99581,7 +101095,7 @@ SQLITE_PRIVATE int sqlite3ResolveExprNames( testcase( pNC->ncFlags & NC_HasWin ); ExprSetProperty(pExpr, pNC->ncFlags & (NC_HasAgg|NC_HasWin) ); pNC->ncFlags |= savedHasAgg; - return pNC->nErr>0 || w.pParse->nErr>0; + return pNC->nNcErr>0 || w.pParse->nErr>0; } /* @@ -99626,7 +101140,7 @@ SQLITE_PRIVATE int sqlite3ResolveExprListNames( savedHasAgg |= pNC->ncFlags & (NC_HasAgg|NC_MinMaxAgg|NC_HasWin); pNC->ncFlags &= ~(NC_HasAgg|NC_MinMaxAgg|NC_HasWin); } - if( pNC->nErr>0 || w.pParse->nErr>0 ) return WRC_Abort; + if( w.pParse->nErr>0 ) return WRC_Abort; } pNC->ncFlags |= savedHasAgg; return WRC_Continue; @@ -99761,12 +101275,18 @@ SQLITE_PRIVATE char sqlite3TableColumnAffinity(Table *pTab, int iCol){ */ SQLITE_PRIVATE char sqlite3ExprAffinity(const Expr *pExpr){ int op; - while( ExprHasProperty(pExpr, EP_Skip) ){ - assert( pExpr->op==TK_COLLATE || pExpr->op==TK_IF_NULL_ROW ); + while( ExprHasProperty(pExpr, EP_Skip|EP_IfNullRow) ){ + assert( pExpr->op==TK_COLLATE + || pExpr->op==TK_IF_NULL_ROW + || (pExpr->op==TK_REGISTER && pExpr->op2==TK_IF_NULL_ROW) ); pExpr = pExpr->pLeft; assert( pExpr!=0 ); } op = pExpr->op; + if( op==TK_REGISTER ) op = pExpr->op2; + if( (op==TK_COLUMN || op==TK_AGG_COLUMN) && pExpr->y.pTab ){ + return sqlite3TableColumnAffinity(pExpr->y.pTab, pExpr->iColumn); + } if( op==TK_SELECT ){ assert( pExpr->flags&EP_xIsSelect ); assert( pExpr->x.pSelect!=0 ); @@ -99774,16 +101294,12 @@ SQLITE_PRIVATE char sqlite3ExprAffinity(const Expr *pExpr){ assert( pExpr->x.pSelect->pEList->a[0].pExpr!=0 ); return sqlite3ExprAffinity(pExpr->x.pSelect->pEList->a[0].pExpr); } - if( op==TK_REGISTER ) op = pExpr->op2; #ifndef SQLITE_OMIT_CAST if( op==TK_CAST ){ assert( !ExprHasProperty(pExpr, EP_IntValue) ); return sqlite3AffinityType(pExpr->u.zToken, 0); } #endif - if( (op==TK_AGG_COLUMN || op==TK_COLUMN) && pExpr->y.pTab ){ - return sqlite3TableColumnAffinity(pExpr->y.pTab, pExpr->iColumn); - } if( op==TK_SELECT_COLUMN ){ assert( pExpr->pLeft->flags&EP_xIsSelect ); return sqlite3ExprAffinity( @@ -99832,7 +101348,7 @@ SQLITE_PRIVATE Expr *sqlite3ExprAddCollateString(Parse *pParse, Expr *pExpr, con */ SQLITE_PRIVATE Expr *sqlite3ExprSkipCollate(Expr *pExpr){ while( pExpr && ExprHasProperty(pExpr, EP_Skip) ){ - assert( pExpr->op==TK_COLLATE || pExpr->op==TK_IF_NULL_ROW ); + assert( pExpr->op==TK_COLLATE ); pExpr = pExpr->pLeft; } return pExpr; @@ -99851,7 +101367,7 @@ SQLITE_PRIVATE Expr *sqlite3ExprSkipCollateAndLikely(Expr *pExpr){ assert( pExpr->op==TK_FUNCTION ); pExpr = pExpr->x.pList->a[0].pExpr; }else{ - assert( pExpr->op==TK_COLLATE || pExpr->op==TK_IF_NULL_ROW ); + assert( pExpr->op==TK_COLLATE ); pExpr = pExpr->pLeft; } } @@ -100160,7 +101676,7 @@ SQLITE_PRIVATE int sqlite3ExprVectorSize(Expr *pExpr){ ** been positioned. */ SQLITE_PRIVATE Expr *sqlite3VectorFieldSubexpr(Expr *pVector, int i){ - assert( iop==TK_ERROR ); if( sqlite3ExprIsVector(pVector) ){ assert( pVector->op2==0 || pVector->op==TK_REGISTER ); if( pVector->op==TK_SELECT || pVector->op2==TK_SELECT ){ @@ -100276,7 +101792,7 @@ static int exprVectorRegister( int *pRegFree /* OUT: Temp register to free */ ){ u8 op = pVector->op; - assert( op==TK_VECTOR || op==TK_REGISTER || op==TK_SELECT ); + assert( op==TK_VECTOR || op==TK_REGISTER || op==TK_SELECT || op==TK_ERROR ); if( op==TK_REGISTER ){ *ppExpr = sqlite3VectorFieldSubexpr(pVector, iField); return pVector->iTable+iField; @@ -100285,8 +101801,11 @@ static int exprVectorRegister( *ppExpr = pVector->x.pSelect->pEList->a[iField].pExpr; return regSelect+iField; } - *ppExpr = pVector->x.pList->a[iField].pExpr; - return sqlite3ExprCodeTemp(pParse, *ppExpr, pRegFree); + if( op==TK_VECTOR ){ + *ppExpr = pVector->x.pList->a[iField].pExpr; + return sqlite3ExprCodeTemp(pParse, *ppExpr, pRegFree); + } + return 0; } /* @@ -100315,6 +101834,7 @@ static void codeVectorCompare( int regLeft = 0; int regRight = 0; u8 opx = op; + int addrCmp = 0; int addrDone = sqlite3VdbeMakeLabel(pParse); int isCommuted = ExprHasProperty(pExpr,EP_Commuted); @@ -100334,21 +101854,24 @@ static void codeVectorCompare( assert( p5==0 || pExpr->op!=op ); assert( p5==SQLITE_NULLEQ || pExpr->op==op ); - p5 |= SQLITE_STOREP2; - if( opx==TK_LE ) opx = TK_LT; - if( opx==TK_GE ) opx = TK_GT; + if( op==TK_LE ) opx = TK_LT; + if( op==TK_GE ) opx = TK_GT; + if( op==TK_NE ) opx = TK_EQ; regLeft = exprCodeSubselect(pParse, pLeft); regRight = exprCodeSubselect(pParse, pRight); + sqlite3VdbeAddOp2(v, OP_Integer, 1, dest); for(i=0; 1 /*Loop exits by "break"*/; i++){ int regFree1 = 0, regFree2 = 0; - Expr *pL, *pR; + Expr *pL = 0, *pR = 0; int r1, r2; assert( i>=0 && i0 @@ -100485,6 +102014,7 @@ SQLITE_PRIVATE int sqlite3SelectExprHeight(Select *p){ ** Expr.flags. */ SQLITE_PRIVATE void sqlite3ExprSetHeightAndFlags(Parse *pParse, Expr *p){ + if( pParse->nErr ) return; if( p && p->x.pList && !ExprHasProperty(p, EP_xIsSelect) ){ p->flags |= EP_Propagate & sqlite3ExprListFlags(p->x.pList); } @@ -100661,8 +102191,8 @@ SQLITE_PRIVATE Expr *sqlite3ExprAnd(Parse *pParse, Expr *pLeft, Expr *pRight){ }else if( (ExprAlwaysFalse(pLeft) || ExprAlwaysFalse(pRight)) && !IN_RENAME_OBJECT ){ - sqlite3ExprDelete(db, pLeft); - sqlite3ExprDelete(db, pRight); + sqlite3ExprDeferredDelete(pParse, pLeft); + sqlite3ExprDeferredDelete(pParse, pRight); return sqlite3Expr(db, TK_INTEGER, "0"); }else{ return sqlite3PExpr(pParse, TK_AND, pLeft, pRight); @@ -100859,6 +102389,22 @@ SQLITE_PRIVATE void sqlite3ExprDelete(sqlite3 *db, Expr *p){ if( p ) sqlite3ExprDeleteNN(db, p); } + +/* +** Arrange to cause pExpr to be deleted when the pParse is deleted. +** This is similar to sqlite3ExprDelete() except that the delete is +** deferred untilthe pParse is deleted. +** +** The pExpr might be deleted immediately on an OOM error. +** +** The deferred delete is (currently) implemented by adding the +** pExpr to the pParse->pConstExpr list with a register number of 0. +*/ +SQLITE_PRIVATE void sqlite3ExprDeferredDelete(Parse *pParse, Expr *pExpr){ + pParse->pConstExpr = + sqlite3ExprListAppend(pParse, pParse->pConstExpr, pExpr); +} + /* Invoke sqlite3RenameExprUnmap() and sqlite3ExprDelete() on the ** expression. */ @@ -101001,6 +102547,7 @@ static Expr *exprDup(sqlite3 *db, Expr *p, int dupFlags, u8 **pzBuffer){ if( pzBuffer ){ zAlloc = *pzBuffer; staticFlag = EP_Static; + assert( zAlloc!=0 ); }else{ zAlloc = sqlite3DbMallocRawNN(db, dupedExprSize(p, dupFlags)); staticFlag = 0; @@ -101079,7 +102626,8 @@ static Expr *exprDup(sqlite3 *db, Expr *p, int dupFlags, u8 **pzBuffer){ if( pNew->op==TK_SELECT_COLUMN ){ pNew->pLeft = p->pLeft; assert( p->iColumn==0 || p->pRight==0 ); - assert( p->pRight==0 || p->pRight==p->pLeft ); + assert( p->pRight==0 || p->pRight==p->pLeft + || ExprHasProperty(p->pLeft, EP_Subquery) ); }else{ pNew->pLeft = sqlite3ExprDup(db, p->pLeft, 0); } @@ -101096,7 +102644,7 @@ static Expr *exprDup(sqlite3 *db, Expr *p, int dupFlags, u8 **pzBuffer){ ** and the db->mallocFailed flag set. */ #ifndef SQLITE_OMIT_CTE -static With *withDup(sqlite3 *db, With *p){ +SQLITE_PRIVATE With *sqlite3WithDup(sqlite3 *db, With *p){ With *pRet = 0; if( p ){ sqlite3_int64 nByte = sizeof(*p) + sizeof(p->a[0]) * (p->nCte-1); @@ -101114,7 +102662,7 @@ static With *withDup(sqlite3 *db, With *p){ return pRet; } #else -# define withDup(x,y) 0 +# define sqlite3WithDup(x,y) 0 #endif #ifndef SQLITE_OMIT_WINDOWFUNC @@ -101181,6 +102729,7 @@ SQLITE_PRIVATE ExprList *sqlite3ExprListDup(sqlite3 *db, ExprList *p, int flags) pNew = sqlite3DbMallocRawNN(db, sqlite3DbMallocSize(db, p)); if( pNew==0 ) return 0; pNew->nExpr = p->nExpr; + pNew->nAlloc = p->nAlloc; pItem = pNew->a; pOldItem = p->a; for(i=0; inExpr; i++, pItem++, pOldItem++){ @@ -101193,7 +102742,8 @@ SQLITE_PRIVATE ExprList *sqlite3ExprListDup(sqlite3 *db, ExprList *p, int flags) ){ assert( pNewExpr->iColumn==0 || i>0 ); if( pNewExpr->iColumn==0 ){ - assert( pOldExpr->pLeft==pOldExpr->pRight ); + assert( pOldExpr->pLeft==pOldExpr->pRight + || ExprHasProperty(pOldExpr->pLeft, EP_Subquery) ); pPriorSelectCol = pNewExpr->pLeft = pNewExpr->pRight; }else{ assert( i>0 ); @@ -101233,8 +102783,8 @@ SQLITE_PRIVATE SrcList *sqlite3SrcListDup(sqlite3 *db, SrcList *p, int flags){ if( pNew==0 ) return 0; pNew->nSrc = pNew->nAlloc = p->nSrc; for(i=0; inSrc; i++){ - struct SrcList_item *pNewItem = &pNew->a[i]; - struct SrcList_item *pOldItem = &p->a[i]; + SrcItem *pNewItem = &pNew->a[i]; + SrcItem *pOldItem = &p->a[i]; Table *pTab; pNewItem->pSchema = pOldItem->pSchema; pNewItem->zDatabase = sqlite3DbStrDup(db, pOldItem->zDatabase); @@ -101247,7 +102797,10 @@ SQLITE_PRIVATE SrcList *sqlite3SrcListDup(sqlite3 *db, SrcList *p, int flags){ if( pNewItem->fg.isIndexedBy ){ pNewItem->u1.zIndexedBy = sqlite3DbStrDup(db, pOldItem->u1.zIndexedBy); } - pNewItem->pIBIndex = pOldItem->pIBIndex; + pNewItem->u2 = pOldItem->u2; + if( pNewItem->fg.isCte ){ + pNewItem->u2.pCteUse->nUse++; + } if( pNewItem->fg.isTabFunc ){ pNewItem->u1.pFuncArg = sqlite3ExprListDup(db, pOldItem->u1.pFuncArg, flags); @@ -101313,13 +102866,21 @@ SQLITE_PRIVATE Select *sqlite3SelectDup(sqlite3 *db, Select *pDup, int flags){ pNew->addrOpenEphm[0] = -1; pNew->addrOpenEphm[1] = -1; pNew->nSelectRow = p->nSelectRow; - pNew->pWith = withDup(db, p->pWith); + pNew->pWith = sqlite3WithDup(db, p->pWith); #ifndef SQLITE_OMIT_WINDOWFUNC pNew->pWin = 0; pNew->pWinDefn = sqlite3WindowListDup(db, p->pWinDefn); if( p->pWin && db->mallocFailed==0 ) gatherSelectWindows(pNew); #endif pNew->selId = p->selId; + if( db->mallocFailed ){ + /* Any prior OOM might have left the Select object incomplete. + ** Delete the whole thing rather than allow an incomplete Select + ** to be used by the code generator. */ + pNew->pNext = 0; + sqlite3SelectDelete(db, pNew); + break; + } *pp = pNew; pp = &pNew->pPrior; pNext = pNew; @@ -101350,41 +102911,64 @@ SQLITE_PRIVATE Select *sqlite3SelectDup(sqlite3 *db, Select *p, int flags){ ** NULL is returned. If non-NULL is returned, then it is guaranteed ** that the new entry was successfully appended. */ +static const struct ExprList_item zeroItem = {0}; +SQLITE_PRIVATE SQLITE_NOINLINE ExprList *sqlite3ExprListAppendNew( + sqlite3 *db, /* Database handle. Used for memory allocation */ + Expr *pExpr /* Expression to be appended. Might be NULL */ +){ + struct ExprList_item *pItem; + ExprList *pList; + + pList = sqlite3DbMallocRawNN(db, sizeof(ExprList)+sizeof(pList->a[0])*4 ); + if( pList==0 ){ + sqlite3ExprDelete(db, pExpr); + return 0; + } + pList->nAlloc = 4; + pList->nExpr = 1; + pItem = &pList->a[0]; + *pItem = zeroItem; + pItem->pExpr = pExpr; + return pList; +} +SQLITE_PRIVATE SQLITE_NOINLINE ExprList *sqlite3ExprListAppendGrow( + sqlite3 *db, /* Database handle. Used for memory allocation */ + ExprList *pList, /* List to which to append. Might be NULL */ + Expr *pExpr /* Expression to be appended. Might be NULL */ +){ + struct ExprList_item *pItem; + ExprList *pNew; + pList->nAlloc *= 2; + pNew = sqlite3DbRealloc(db, pList, + sizeof(*pList)+(pList->nAlloc-1)*sizeof(pList->a[0])); + if( pNew==0 ){ + sqlite3ExprListDelete(db, pList); + sqlite3ExprDelete(db, pExpr); + return 0; + }else{ + pList = pNew; + } + pItem = &pList->a[pList->nExpr++]; + *pItem = zeroItem; + pItem->pExpr = pExpr; + return pList; +} SQLITE_PRIVATE ExprList *sqlite3ExprListAppend( Parse *pParse, /* Parsing context */ ExprList *pList, /* List to which to append. Might be NULL */ Expr *pExpr /* Expression to be appended. Might be NULL */ ){ struct ExprList_item *pItem; - sqlite3 *db = pParse->db; - assert( db!=0 ); if( pList==0 ){ - pList = sqlite3DbMallocRawNN(db, sizeof(ExprList) ); - if( pList==0 ){ - goto no_mem; - } - pList->nExpr = 0; - }else if( (pList->nExpr & (pList->nExpr-1))==0 ){ - ExprList *pNew; - pNew = sqlite3DbRealloc(db, pList, - sizeof(*pList)+(2*(sqlite3_int64)pList->nExpr-1)*sizeof(pList->a[0])); - if( pNew==0 ){ - goto no_mem; - } - pList = pNew; + return sqlite3ExprListAppendNew(pParse->db,pExpr); + } + if( pList->nAllocnExpr+1 ){ + return sqlite3ExprListAppendGrow(pParse->db,pList,pExpr); } pItem = &pList->a[pList->nExpr++]; - assert( offsetof(struct ExprList_item,zEName)==sizeof(pItem->pExpr) ); - assert( offsetof(struct ExprList_item,pExpr)==0 ); - memset(&pItem->zEName,0,sizeof(*pItem)-offsetof(struct ExprList_item,zEName)); + *pItem = zeroItem; pItem->pExpr = pExpr; return pList; - -no_mem: - /* Avoid leaking memory if malloc has failed. */ - sqlite3ExprDelete(db, pExpr); - sqlite3ExprListDelete(db, pList); - return 0; } /* @@ -101997,8 +103581,10 @@ SQLITE_PRIVATE int sqlite3ExprIsInteger(Expr *p, int *pValue){ */ SQLITE_PRIVATE int sqlite3ExprCanBeNull(const Expr *p){ u8 op; + assert( p!=0 ); while( p->op==TK_UPLUS || p->op==TK_UMINUS ){ p = p->pLeft; + assert( p!=0 ); } op = p->op; if( op==TK_REGISTER ) op = p->op2; @@ -102285,7 +103871,7 @@ SQLITE_PRIVATE int sqlite3FindInIndex( /* Code an OP_Transaction and OP_TableLock for . */ iDb = sqlite3SchemaToIndex(db, pTab->pSchema); - assert( iDb>=0 && iDb=0 && iDbtnum, 0, pTab->zName); @@ -102630,19 +104216,23 @@ SQLITE_PRIVATE void sqlite3CodeRhsOfIN( /* If the LHS and RHS of the IN operator do not match, that ** error will have been caught long before we reach this point. */ if( ALWAYS(pEList->nExpr==nVal) ){ + Select *pCopy; SelectDest dest; int i; + int rc; sqlite3SelectDestInit(&dest, SRT_Set, iTab); dest.zAffSdst = exprINAffinity(pParse, pExpr); pSelect->iLimit = 0; testcase( pSelect->selFlags & SF_Distinct ); testcase( pKeyInfo==0 ); /* Caused by OOM in sqlite3KeyInfoAlloc() */ - if( sqlite3Select(pParse, pSelect, &dest) ){ - sqlite3DbFree(pParse->db, dest.zAffSdst); + pCopy = sqlite3SelectDup(pParse->db, pSelect, 0); + rc = pParse->db->mallocFailed ? 1 :sqlite3Select(pParse, pCopy, &dest); + sqlite3SelectDelete(pParse->db, pCopy); + sqlite3DbFree(pParse->db, dest.zAffSdst); + if( rc ){ sqlite3KeyInfoUnref(pKeyInfo); return; } - sqlite3DbFree(pParse->db, dest.zAffSdst); assert( pKeyInfo!=0 ); /* OOM will cause exit after sqlite3Select() */ assert( pEList!=0 ); assert( pEList->nExpr>0 ); @@ -102741,12 +104331,30 @@ SQLITE_PRIVATE int sqlite3CodeSubselect(Parse *pParse, Expr *pExpr){ Vdbe *v = pParse->pVdbe; assert( v!=0 ); + if( pParse->nErr ) return 0; testcase( pExpr->op==TK_EXISTS ); testcase( pExpr->op==TK_SELECT ); assert( pExpr->op==TK_EXISTS || pExpr->op==TK_SELECT ); assert( ExprHasProperty(pExpr, EP_xIsSelect) ); pSel = pExpr->x.pSelect; + /* If this routine has already been coded, then invoke it as a + ** subroutine. */ + if( ExprHasProperty(pExpr, EP_Subrtn) ){ + ExplainQueryPlan((pParse, 0, "REUSE SUBQUERY %d", pSel->selId)); + sqlite3VdbeAddOp2(v, OP_Gosub, pExpr->y.sub.regReturn, + pExpr->y.sub.iAddr); + return pExpr->iTable; + } + + /* Begin coding the subroutine */ + ExprSetProperty(pExpr, EP_Subrtn); + pExpr->y.sub.regReturn = ++pParse->nMem; + pExpr->y.sub.iAddr = + sqlite3VdbeAddOp2(v, OP_Integer, 0, pExpr->y.sub.regReturn) + 1; + VdbeComment((v, "return address")); + + /* The evaluation of the EXISTS/SELECT must be repeated every time it ** is encountered if any of the following is true: ** @@ -102758,22 +104366,6 @@ SQLITE_PRIVATE int sqlite3CodeSubselect(Parse *pParse, Expr *pExpr){ ** save the results, and reuse the same result on subsequent invocations. */ if( !ExprHasProperty(pExpr, EP_VarSelect) ){ - /* If this routine has already been coded, then invoke it as a - ** subroutine. */ - if( ExprHasProperty(pExpr, EP_Subrtn) ){ - ExplainQueryPlan((pParse, 0, "REUSE SUBQUERY %d", pSel->selId)); - sqlite3VdbeAddOp2(v, OP_Gosub, pExpr->y.sub.regReturn, - pExpr->y.sub.iAddr); - return pExpr->iTable; - } - - /* Begin coding the subroutine */ - ExprSetProperty(pExpr, EP_Subrtn); - pExpr->y.sub.regReturn = ++pParse->nMem; - pExpr->y.sub.iAddr = - sqlite3VdbeAddOp2(v, OP_Integer, 0, pExpr->y.sub.regReturn) + 1; - VdbeComment((v, "return address")); - addrOnce = sqlite3VdbeAddOp0(v, OP_Once); VdbeCoverage(v); } @@ -102822,19 +104414,22 @@ SQLITE_PRIVATE int sqlite3CodeSubselect(Parse *pParse, Expr *pExpr){ } pSel->iLimit = 0; if( sqlite3Select(pParse, pSel, &dest) ){ + if( pParse->nErr ){ + pExpr->op2 = pExpr->op; + pExpr->op = TK_ERROR; + } return 0; } pExpr->iTable = rReg = dest.iSDParm; ExprSetVVAProperty(pExpr, EP_NoReduce); if( addrOnce ){ sqlite3VdbeJumpHere(v, addrOnce); - - /* Subroutine return */ - sqlite3VdbeAddOp1(v, OP_Return, pExpr->y.sub.regReturn); - sqlite3VdbeChangeP1(v, pExpr->y.sub.iAddr-1, sqlite3VdbeCurrentAddr(v)-1); - sqlite3ClearTempRegCache(pParse); } + /* Subroutine return */ + sqlite3VdbeAddOp1(v, OP_Return, pExpr->y.sub.regReturn); + sqlite3VdbeChangeP1(v, pExpr->y.sub.iAddr-1, sqlite3VdbeCurrentAddr(v)-1); + sqlite3ClearTempRegCache(pParse); return rReg; } #endif /* SQLITE_OMIT_SUBQUERY */ @@ -102848,7 +104443,7 @@ SQLITE_PRIVATE int sqlite3CodeSubselect(Parse *pParse, Expr *pExpr){ */ SQLITE_PRIVATE int sqlite3ExprCheckIN(Parse *pParse, Expr *pIn){ int nVector = sqlite3ExprVectorSize(pIn->pLeft); - if( (pIn->flags & EP_xIsSelect) ){ + if( (pIn->flags & EP_xIsSelect)!=0 && !pParse->db->mallocFailed ){ if( nVector!=pIn->x.pSelect->pEList->nExpr ){ sqlite3SubselectError(pParse, pIn->x.pSelect->pEList->nExpr, nVector); return 1; @@ -103039,6 +104634,7 @@ static void sqlite3ExprCodeIN( if( pParse->nErr ) goto sqlite3ExprCodeIN_finished; for(i=0; ipLeft, i); + if( pParse->db->mallocFailed ) goto sqlite3ExprCodeIN_oom_error; if( sqlite3ExprCanBeNull(p) ){ sqlite3VdbeAddOp2(v, OP_IsNull, rLhs+i, destStep2); VdbeCoverage(v); @@ -103337,6 +104933,7 @@ SQLITE_PRIVATE void sqlite3ExprCodeMove(Parse *pParse, int iFrom, int iTo, int n */ static void exprToRegister(Expr *pExpr, int iReg){ Expr *p = sqlite3ExprSkipCollateAndLikely(pExpr); + if( NEVER(p==0) ) return; p->op2 = p->op; p->op = TK_REGISTER; p->iTable = iReg; @@ -103663,7 +105260,7 @@ expr_code_doover: ** Expr node to be passed into this function, it will be handled ** sanely and not crash. But keep the assert() to bring the problem ** to the attention of the developers. */ - assert( op==TK_NULL ); + assert( op==TK_NULL || op==TK_ERROR || pParse->db->mallocFailed ); sqlite3VdbeAddOp2(v, OP_Null, 0, target); return target; } @@ -103729,8 +105326,9 @@ expr_code_doover: }else{ r1 = sqlite3ExprCodeTemp(pParse, pLeft, ®Free1); r2 = sqlite3ExprCodeTemp(pParse, pExpr->pRight, ®Free2); - codeCompare(pParse, pLeft, pExpr->pRight, op, - r1, r2, inReg, SQLITE_STOREP2 | p5, + sqlite3VdbeAddOp2(v, OP_Integer, 1, inReg); + codeCompare(pParse, pLeft, pExpr->pRight, op, r1, r2, + sqlite3VdbeCurrentAddr(v)+2, p5, ExprHasProperty(pExpr,EP_Commuted)); assert(TK_LT==OP_Lt); testcase(op==OP_Lt); VdbeCoverageIf(v,op==OP_Lt); assert(TK_LE==OP_Le); testcase(op==OP_Le); VdbeCoverageIf(v,op==OP_Le); @@ -103738,6 +105336,11 @@ expr_code_doover: assert(TK_GE==OP_Ge); testcase(op==OP_Ge); VdbeCoverageIf(v,op==OP_Ge); assert(TK_EQ==OP_Eq); testcase(op==OP_Eq); VdbeCoverageIf(v,op==OP_Eq); assert(TK_NE==OP_Ne); testcase(op==OP_Ne); VdbeCoverageIf(v,op==OP_Ne); + if( p5==SQLITE_NULLEQ ){ + sqlite3VdbeAddOp2(v, OP_Integer, 0, inReg); + }else{ + sqlite3VdbeAddOp3(v, OP_ZeroOrNull, r1, inReg, r2); + } testcase( regFree1==0 ); testcase( regFree2==0 ); } @@ -104000,7 +105603,8 @@ expr_code_doover: if( pExpr->pLeft->iTable==0 ){ pExpr->pLeft->iTable = sqlite3CodeSubselect(pParse, pExpr->pLeft); } - assert( pExpr->iTable==0 || pExpr->pLeft->op==TK_SELECT ); + assert( pExpr->iTable==0 || pExpr->pLeft->op==TK_SELECT + || pExpr->pLeft->op==TK_ERROR ); if( pExpr->iTable!=0 && pExpr->iTable!=(n = sqlite3ExprVectorSize(pExpr->pLeft)) ){ @@ -104324,6 +105928,7 @@ SQLITE_PRIVATE int sqlite3ExprCodeTemp(Parse *pParse, Expr *pExpr, int *pReg){ int r2; pExpr = sqlite3ExprSkipCollateAndLikely(pExpr); if( ConstFactorOk(pParse) + && ALWAYS(pExpr!=0) && pExpr->op!=TK_REGISTER && sqlite3ExprIsConstantNotJoin(pExpr) ){ @@ -105479,8 +107084,7 @@ static int agginfoPersistExprCb(Walker *pWalker, Expr *pExpr){ pExpr = sqlite3ExprDup(db, pExpr, 0); if( pExpr ){ pAggInfo->aCol[iAgg].pCExpr = pExpr; - pParse->pConstExpr = - sqlite3ExprListAppend(pParse, pParse->pConstExpr, pExpr); + sqlite3ExprDeferredDelete(pParse, pExpr); } } }else{ @@ -105489,8 +107093,7 @@ static int agginfoPersistExprCb(Walker *pWalker, Expr *pExpr){ pExpr = sqlite3ExprDup(db, pExpr, 0); if( pExpr ){ pAggInfo->aFunc[iAgg].pFExpr = pExpr; - pParse->pConstExpr = - sqlite3ExprListAppend(pParse, pParse->pConstExpr, pExpr); + sqlite3ExprDeferredDelete(pParse, pExpr); } } } @@ -105562,7 +107165,7 @@ static int analyzeAggregate(Walker *pWalker, Expr *pExpr){ /* Check to see if the column is in one of the tables in the FROM ** clause of the aggregate query */ if( ALWAYS(pSrcList!=0) ){ - struct SrcList_item *pItem = pSrcList->a; + SrcItem *pItem = pSrcList->a; for(i=0; inSrc; i++, pItem++){ struct AggInfo_col *pCol; assert( !ExprHasProperty(pExpr, EP_TokenOnly|EP_Reduced) ); @@ -105633,6 +107236,7 @@ static int analyzeAggregate(Walker *pWalker, Expr *pExpr){ */ struct AggInfo_func *pItem = pAggInfo->aFunc; for(i=0; inFunc; i++, pItem++){ + if( pItem->pFExpr==pExpr ) break; if( sqlite3ExprCompare(0, pItem->pFExpr, pExpr, -1)==0 ){ break; } @@ -105833,6 +107437,7 @@ SQLITE_PRIVATE int sqlite3NoTempsInRange(Parse *pParse, int iFirst, int iLast){ static int isAlterableTable(Parse *pParse, Table *pTab){ if( 0==sqlite3StrNICmp(pTab->zName, "sqlite_", 7) #ifndef SQLITE_OMIT_VIRTUALTABLE + || (pTab->tabFlags & TF_Eponymous)!=0 || ( (pTab->tabFlags & TF_Shadow)!=0 && sqlite3ReadOnlyShadowTables(pParse->db) ) @@ -105851,15 +107456,22 @@ static int isAlterableTable(Parse *pParse, Table *pTab){ ** statement to ensure that the operation has not rendered any schema ** objects unusable. */ -static void renameTestSchema(Parse *pParse, const char *zDb, int bTemp){ +static void renameTestSchema( + Parse *pParse, /* Parse context */ + const char *zDb, /* Name of db to verify schema of */ + int bTemp, /* True if this is the temp db */ + const char *zWhen, /* "when" part of error message */ + int bNoDQS /* Do not allow DQS in the schema */ +){ + pParse->colNamesSet = 1; sqlite3NestedParse(pParse, "SELECT 1 " "FROM \"%w\"." DFLT_SCHEMA_TABLE " " "WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X'" " AND sql NOT LIKE 'create virtual%%'" - " AND sqlite_rename_test(%Q, sql, type, name, %d)=NULL ", + " AND sqlite_rename_test(%Q, sql, type, name, %d, %Q, %d)=NULL ", zDb, - zDb, bTemp + zDb, bTemp, zWhen, bNoDQS ); if( bTemp==0 ){ @@ -105868,8 +107480,32 @@ static void renameTestSchema(Parse *pParse, const char *zDb, int bTemp){ "FROM temp." DFLT_SCHEMA_TABLE " " "WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X'" " AND sql NOT LIKE 'create virtual%%'" - " AND sqlite_rename_test(%Q, sql, type, name, 1)=NULL ", - zDb + " AND sqlite_rename_test(%Q, sql, type, name, 1, %Q, %d)=NULL ", + zDb, zWhen, bNoDQS + ); + } +} + +/* +** Generate VM code to replace any double-quoted strings (but not double-quoted +** identifiers) within the "sql" column of the sqlite_schema table in +** database zDb with their single-quoted equivalents. If argument bTemp is +** not true, similarly update all SQL statements in the sqlite_schema table +** of the temp db. +*/ +static void renameFixQuotes(Parse *pParse, const char *zDb, int bTemp){ + sqlite3NestedParse(pParse, + "UPDATE \"%w\"." DFLT_SCHEMA_TABLE + " SET sql = sqlite_rename_quotefix(%Q, sql)" + "WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X'" + " AND sql NOT LIKE 'create virtual%%'" , zDb, zDb + ); + if( bTemp==0 ){ + sqlite3NestedParse(pParse, + "UPDATE temp." DFLT_SCHEMA_TABLE + " SET sql = sqlite_rename_quotefix('temp', sql)" + "WHERE name NOT LIKE 'sqliteX_%%' ESCAPE 'X'" + " AND sql NOT LIKE 'create virtual%%'" ); } } @@ -105878,12 +107514,12 @@ static void renameTestSchema(Parse *pParse, const char *zDb, int bTemp){ ** Generate code to reload the schema for database iDb. And, if iDb!=1, for ** the temp database as well. */ -static void renameReloadSchema(Parse *pParse, int iDb){ +static void renameReloadSchema(Parse *pParse, int iDb, u16 p5){ Vdbe *v = pParse->pVdbe; if( v ){ sqlite3ChangeCookie(pParse, iDb); - sqlite3VdbeAddParseSchemaOp(pParse->pVdbe, iDb, 0); - if( iDb!=1 ) sqlite3VdbeAddParseSchemaOp(pParse->pVdbe, 1, 0); + sqlite3VdbeAddParseSchemaOp(pParse->pVdbe, iDb, 0, p5); + if( iDb!=1 ) sqlite3VdbeAddParseSchemaOp(pParse->pVdbe, 1, 0, p5); } } @@ -106032,7 +107668,7 @@ SQLITE_PRIVATE void sqlite3AlterRenameTable( "sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, 1), " "tbl_name = " "CASE WHEN tbl_name=%Q COLLATE nocase AND " - " sqlite_rename_test(%Q, sql, type, name, 1) " + " sqlite_rename_test(%Q, sql, type, name, 1, 'after rename', 0) " "THEN %Q ELSE tbl_name END " "WHERE type IN ('view', 'trigger')" , zDb, zTabName, zName, zTabName, zDb, zName); @@ -106051,8 +107687,8 @@ SQLITE_PRIVATE void sqlite3AlterRenameTable( } #endif - renameReloadSchema(pParse, iDb); - renameTestSchema(pParse, zDb, iDb==1); + renameReloadSchema(pParse, iDb, INITFLAG_AlterRename); + renameTestSchema(pParse, zDb, iDb==1, "after rename", 0); exit_rename_table: sqlite3SrcListDelete(db, pSrc); @@ -106183,11 +107819,14 @@ SQLITE_PRIVATE void sqlite3AlterFinishAddColumn(Parse *pParse, Token *pColDef){ *zEnd-- = '\0'; } db->mDbFlags |= DBFLAG_PreferBuiltin; + /* substr() operations on characters, but addColOffset is in bytes. So we + ** have to use printf() to translate between these units: */ sqlite3NestedParse(pParse, "UPDATE \"%w\"." DFLT_SCHEMA_TABLE " SET " - "sql = substr(sql,1,%d) || ', ' || %Q || substr(sql,%d) " + "sql = printf('%%.%ds, ',sql) || %Q" + " || substr(sql,1+length(printf('%%.%ds',sql))) " "WHERE type = 'table' AND name = %Q", - zDb, pNew->addColOffset, zCol, pNew->addColOffset+1, + zDb, pNew->addColOffset, zCol, pNew->addColOffset, zTab ); sqlite3DbFree(db, zCol); @@ -106211,7 +107850,7 @@ SQLITE_PRIVATE void sqlite3AlterFinishAddColumn(Parse *pParse, Token *pColDef){ } /* Reload the table definition */ - renameReloadSchema(pParse, iDb); + renameReloadSchema(pParse, iDb, INITFLAG_AlterRename); } /* @@ -106311,7 +107950,7 @@ exit_begin_add_column: ** Or, if pTab is not a view or virtual table, zero is returned. */ #if !defined(SQLITE_OMIT_VIEW) || !defined(SQLITE_OMIT_VIRTUALTABLE) -static int isRealTable(Parse *pParse, Table *pTab){ +static int isRealTable(Parse *pParse, Table *pTab, int bDrop){ const char *zType = 0; #ifndef SQLITE_OMIT_VIEW if( pTab->pSelect ){ @@ -106324,15 +107963,16 @@ static int isRealTable(Parse *pParse, Table *pTab){ } #endif if( zType ){ - sqlite3ErrorMsg( - pParse, "cannot rename columns of %s \"%s\"", zType, pTab->zName + sqlite3ErrorMsg(pParse, "cannot %s %s \"%s\"", + (bDrop ? "drop column from" : "rename columns of"), + zType, pTab->zName ); return 1; } return 0; } #else /* !defined(SQLITE_OMIT_VIEW) || !defined(SQLITE_OMIT_VIRTUALTABLE) */ -# define isRealTable(x,y) (0) +# define isRealTable(x,y,z) (0) #endif /* @@ -106361,7 +108001,7 @@ SQLITE_PRIVATE void sqlite3AlterRenameColumn( /* Cannot alter a system table */ if( SQLITE_OK!=isAlterableTable(pParse, pTab) ) goto exit_rename_column; - if( SQLITE_OK!=isRealTable(pParse, pTab) ) goto exit_rename_column; + if( SQLITE_OK!=isRealTable(pParse, pTab, 0) ) goto exit_rename_column; /* Which schema holds the table to be altered */ iSchema = sqlite3SchemaToIndex(db, pTab->pSchema); @@ -106387,6 +108027,10 @@ SQLITE_PRIVATE void sqlite3AlterRenameColumn( goto exit_rename_column; } + /* Ensure the schema contains no double-quoted strings */ + renameTestSchema(pParse, zDb, iSchema==1, "", 0); + renameFixQuotes(pParse, zDb, iSchema==1); + /* Do the rename operation using a recursive UPDATE statement that ** uses the sqlite_rename_column() SQL function to compute the new ** CREATE statement text for the sqlite_schema table. @@ -106415,8 +108059,8 @@ SQLITE_PRIVATE void sqlite3AlterRenameColumn( ); /* Drop and reload the database schema. */ - renameReloadSchema(pParse, iSchema); - renameTestSchema(pParse, zDb, iSchema==1); + renameReloadSchema(pParse, iSchema, INITFLAG_AlterRename); + renameTestSchema(pParse, zDb, iSchema==1, "after rename", 1); exit_rename_column: sqlite3SrcListDelete(db, pSrc); @@ -106562,15 +108206,30 @@ static int renameUnmapExprCb(Walker *pWalker, Expr *pExpr){ static void renameWalkWith(Walker *pWalker, Select *pSelect){ With *pWith = pSelect->pWith; if( pWith ){ + Parse *pParse = pWalker->pParse; int i; + With *pCopy = 0; + assert( pWith->nCte>0 ); + if( (pWith->a[0].pSelect->selFlags & SF_Expanded)==0 ){ + /* Push a copy of the With object onto the with-stack. We use a copy + ** here as the original will be expanded and resolved (flags SF_Expanded + ** and SF_Resolved) below. And the parser code that uses the with-stack + ** fails if the Select objects on it have already been expanded and + ** resolved. */ + pCopy = sqlite3WithDup(pParse->db, pWith); + pCopy = sqlite3WithPush(pParse, pCopy, 1); + } for(i=0; inCte; i++){ Select *p = pWith->a[i].pSelect; NameContext sNC; memset(&sNC, 0, sizeof(sNC)); - sNC.pParse = pWalker->pParse; - sqlite3SelectPrep(sNC.pParse, p, &sNC); + sNC.pParse = pParse; + if( pCopy ) sqlite3SelectPrep(sNC.pParse, p, &sNC); sqlite3WalkSelect(pWalker, p); - sqlite3RenameExprlistUnmap(pWalker->pParse, pWith->a[i].pCols); + sqlite3RenameExprlistUnmap(pParse, pWith->a[i].pCols); + } + if( pCopy && pParse->pWith==pCopy ){ + pParse->pWith = pCopy->pOuter; } } } @@ -106597,7 +108256,11 @@ static int renameUnmapSelectCb(Walker *pWalker, Select *p){ Parse *pParse = pWalker->pParse; int i; if( pParse->nErr ) return WRC_Abort; - if( NEVER(p->selFlags & SF_View) ) return WRC_Prune; + if( p->selFlags & (SF_View|SF_CopyCte) ){ + testcase( p->selFlags & SF_View ); + testcase( p->selFlags & SF_CopyCte ); + return WRC_Prune; + } if( ALWAYS(p->pEList) ){ ExprList *pList = p->pEList; for(i=0; inExpr; i++){ @@ -106668,23 +108331,35 @@ static void renameTokenFree(sqlite3 *db, RenameToken *pToken){ /* ** Search the Parse object passed as the first argument for a RenameToken -** object associated with parse tree element pPtr. If found, remove it -** from the Parse object and add it to the list maintained by the -** RenameCtx object passed as the second argument. +** object associated with parse tree element pPtr. If found, return a pointer +** to it. Otherwise, return NULL. +** +** If the second argument passed to this function is not NULL and a matching +** RenameToken object is found, remove it from the Parse object and add it to +** the list maintained by the RenameCtx object. */ -static void renameTokenFind(Parse *pParse, struct RenameCtx *pCtx, void *pPtr){ +static RenameToken *renameTokenFind( + Parse *pParse, + struct RenameCtx *pCtx, + void *pPtr +){ RenameToken **pp; - assert( pPtr!=0 ); + if( NEVER(pPtr==0) ){ + return 0; + } for(pp=&pParse->pRename; (*pp); pp=&(*pp)->pNext){ if( (*pp)->p==pPtr ){ RenameToken *pToken = *pp; - *pp = pToken->pNext; - pToken->pNext = pCtx->pList; - pCtx->pList = pToken; - pCtx->nList++; - break; + if( pCtx ){ + *pp = pToken->pNext; + pToken->pNext = pCtx->pList; + pCtx->pList = pToken; + pCtx->nList++; + } + return pToken; } } + return 0; } /* @@ -106693,7 +108368,11 @@ static void renameTokenFind(Parse *pParse, struct RenameCtx *pCtx, void *pPtr){ ** descend into sub-select statements. */ static int renameColumnSelectCb(Walker *pWalker, Select *p){ - if( p->selFlags & SF_View ) return WRC_Prune; + if( p->selFlags & (SF_View|SF_CopyCte) ){ + testcase( p->selFlags & SF_View ); + testcase( p->selFlags & SF_CopyCte ); + return WRC_Prune; + } renameWalkWith(pWalker, p); return WRC_Continue; } @@ -106755,7 +108434,7 @@ static RenameToken *renameColumnTokenNext(RenameCtx *pCtx){ */ static void renameColumnParseError( sqlite3_context *pCtx, - int bPost, + const char *zWhen, sqlite3_value *pType, sqlite3_value *pObject, Parse *pParse @@ -106764,8 +108443,8 @@ static void renameColumnParseError( const char *zN = (const char*)sqlite3_value_text(pObject); char *zErr; - zErr = sqlite3_mprintf("error in %s %s%s: %s", - zT, zN, (bPost ? " after rename" : ""), + zErr = sqlite3_mprintf("error in %s %s%s%s: %s", + zT, zN, (zWhen[0] ? " " : ""), zWhen, pParse->zErrMsg ); sqlite3_result_error(pCtx, zErr, -1); @@ -106844,7 +108523,7 @@ static int renameParseSql( p->eParseMode = PARSE_MODE_RENAME; p->db = db; p->nQueryLoop = 1; - rc = sqlite3RunParser(p, zSql, &zErr); + rc = zSql ? sqlite3RunParser(p, zSql, &zErr) : SQLITE_NOMEM; assert( p->zErrMsg==0 ); assert( rc!=SQLITE_OK || zErr==0 ); p->zErrMsg = zErr; @@ -106887,51 +108566,76 @@ static int renameEditSql( const char *zNew, /* New token text */ int bQuote /* True to always quote token */ ){ - int nNew = sqlite3Strlen30(zNew); - int nSql = sqlite3Strlen30(zSql); + i64 nNew = sqlite3Strlen30(zNew); + i64 nSql = sqlite3Strlen30(zSql); sqlite3 *db = sqlite3_context_db_handle(pCtx); int rc = SQLITE_OK; - char *zQuot; + char *zQuot = 0; char *zOut; - int nQuot; - - /* Set zQuot to point to a buffer containing a quoted copy of the - ** identifier zNew. If the corresponding identifier in the original - ** ALTER TABLE statement was quoted (bQuote==1), then set zNew to - ** point to zQuot so that all substitutions are made using the - ** quoted version of the new column name. */ - zQuot = sqlite3MPrintf(db, "\"%w\"", zNew); - if( zQuot==0 ){ - return SQLITE_NOMEM; + i64 nQuot = 0; + char *zBuf1 = 0; + char *zBuf2 = 0; + + if( zNew ){ + /* Set zQuot to point to a buffer containing a quoted copy of the + ** identifier zNew. If the corresponding identifier in the original + ** ALTER TABLE statement was quoted (bQuote==1), then set zNew to + ** point to zQuot so that all substitutions are made using the + ** quoted version of the new column name. */ + zQuot = sqlite3MPrintf(db, "\"%w\" ", zNew); + if( zQuot==0 ){ + return SQLITE_NOMEM; + }else{ + nQuot = sqlite3Strlen30(zQuot)-1; + } + + assert( nQuot>=nNew ); + zOut = sqlite3DbMallocZero(db, nSql + pRename->nList*nQuot + 1); }else{ - nQuot = sqlite3Strlen30(zQuot); - } - if( bQuote ){ - zNew = zQuot; - nNew = nQuot; + zOut = (char*)sqlite3DbMallocZero(db, (nSql*2+1) * 3); + if( zOut ){ + zBuf1 = &zOut[nSql*2+1]; + zBuf2 = &zOut[nSql*4+2]; + } } /* At this point pRename->pList contains a list of RenameToken objects ** corresponding to all tokens in the input SQL that must be replaced - ** with the new column name. All that remains is to construct and - ** return the edited SQL string. */ - assert( nQuot>=nNew ); - zOut = sqlite3DbMallocZero(db, nSql + pRename->nList*nQuot + 1); + ** with the new column name, or with single-quoted versions of themselves. + ** All that remains is to construct and return the edited SQL string. */ if( zOut ){ int nOut = nSql; memcpy(zOut, zSql, nSql); while( pRename->pList ){ int iOff; /* Offset of token to replace in zOut */ - RenameToken *pBest = renameColumnTokenNext(pRename); - u32 nReplace; const char *zReplace; - if( sqlite3IsIdChar(*pBest->t.z) ){ - nReplace = nNew; - zReplace = zNew; + RenameToken *pBest = renameColumnTokenNext(pRename); + + if( zNew ){ + if( bQuote==0 && sqlite3IsIdChar(*pBest->t.z) ){ + nReplace = nNew; + zReplace = zNew; + }else{ + nReplace = nQuot; + zReplace = zQuot; + if( pBest->t.z[pBest->t.n]=='"' ) nReplace++; + } }else{ - nReplace = nQuot; - zReplace = zQuot; + /* Dequote the double-quoted token. Then requote it again, this time + ** using single quotes. If the character immediately following the + ** original token within the input SQL was a single quote ('), then + ** add another space after the new, single-quoted version of the + ** token. This is so that (SELECT "string"'alias') maps to + ** (SELECT 'string' 'alias'), and not (SELECT 'string''alias'). */ + memcpy(zBuf1, pBest->t.z, pBest->t.n); + zBuf1[pBest->t.n] = 0; + sqlite3Dequote(zBuf1); + sqlite3_snprintf(nSql*2, zBuf2, "%Q%s", zBuf1, + pBest->t.z[pBest->t.n]=='\'' ? " " : "" + ); + zReplace = zBuf2; + nReplace = sqlite3Strlen30(zReplace); } iOff = pBest->t.z - zSql; @@ -106997,14 +108701,22 @@ static int renameResolveTrigger(Parse *pParse){ if( pSrc ){ int i; for(i=0; inSrc && rc==SQLITE_OK; i++){ - struct SrcList_item *p = &pSrc->a[i]; - p->pTab = sqlite3LocateTableItem(pParse, 0, p); + SrcItem *p = &pSrc->a[i]; p->iCursor = pParse->nTab++; - if( p->pTab==0 ){ - rc = SQLITE_ERROR; + if( p->pSelect ){ + sqlite3SelectPrep(pParse, p->pSelect, 0); + sqlite3ExpandSubquery(pParse, p); + assert( i>0 ); + assert( pStep->pFrom->a[i-1].pSelect ); + sqlite3SelectPrep(pParse, pStep->pFrom->a[i-1].pSelect, 0); }else{ - p->pTab->nTabRef++; - rc = sqlite3ViewGetColumnNames(pParse, p->pTab); + p->pTab = sqlite3LocateTableItem(pParse, 0, p); + if( p->pTab==0 ){ + rc = SQLITE_ERROR; + }else{ + p->pTab->nTabRef++; + rc = sqlite3ViewGetColumnNames(pParse, p->pTab); + } } } sNC.pSrcList = pSrc; @@ -107015,9 +108727,8 @@ static int renameResolveTrigger(Parse *pParse){ rc = sqlite3ResolveExprListNames(&sNC, pStep->pExprList); } assert( !pStep->pUpsert || (!pStep->pWhere && !pStep->pExprList) ); - if( pStep->pUpsert ){ + if( pStep->pUpsert && rc==SQLITE_OK ){ Upsert *pUpsert = pStep->pUpsert; - assert( rc==SQLITE_OK ); pUpsert->pUpsertSrc = pSrc; sNC.uNC.pUpsert = pUpsert; sNC.ncFlags = NC_UUpsert; @@ -107066,6 +108777,12 @@ static void renameWalkTrigger(Walker *pWalker, Trigger *pTrigger){ sqlite3WalkExpr(pWalker, pUpsert->pUpsertWhere); sqlite3WalkExpr(pWalker, pUpsert->pUpsertTargetWhere); } + if( pStep->pFrom ){ + int i; + for(i=0; ipFrom->nSrc; i++){ + sqlite3WalkSelect(pWalker, pStep->pFrom->a[i].pSelect); + } + } } } @@ -107185,9 +108902,11 @@ static void renameColumnFunc( assert( sParse.pNewTable->pSelect==0 ); sCtx.pTab = sParse.pNewTable; if( bFKOnly==0 ){ - renameTokenFind( - &sParse, &sCtx, (void*)sParse.pNewTable->aCol[iCol].zName - ); + if( iColnCol ){ + renameTokenFind( + &sParse, &sCtx, (void*)sParse.pNewTable->aCol[iCol].zName + ); + } if( sCtx.iCol<0 ){ renameTokenFind(&sParse, &sCtx, (void*)&sParse.pNewTable->iPKey); } @@ -107198,12 +108917,12 @@ static void renameColumnFunc( for(pIdx=sParse.pNewIndex; pIdx; pIdx=pIdx->pNext){ sqlite3WalkExprList(&sWalker, pIdx->aColExpr); } - } #ifndef SQLITE_OMIT_GENERATED_COLUMNS - for(i=0; inCol; i++){ - sqlite3WalkExpr(&sWalker, sParse.pNewTable->aCol[i].pDflt); - } + for(i=0; inCol; i++){ + sqlite3WalkExpr(&sWalker, sParse.pNewTable->aCol[i].pDflt); + } #endif + } for(pFKey=sParse.pNewTable->pFKey; pFKey; pFKey=pFKey->pNextFrom){ for(i=0; inCol; i++){ @@ -107257,7 +108976,7 @@ static void renameColumnFunc( renameColumnFunc_done: if( rc!=SQLITE_OK ){ if( sParse.zErrMsg ){ - renameColumnParseError(context, 0, argv[1], argv[2], &sParse); + renameColumnParseError(context, "", argv[1], argv[2], &sParse); }else{ sqlite3_result_error_code(context, rc); } @@ -107289,13 +109008,17 @@ static int renameTableSelectCb(Walker *pWalker, Select *pSelect){ int i; RenameCtx *p = pWalker->u.pRename; SrcList *pSrc = pSelect->pSrc; - if( pSelect->selFlags & SF_View ) return WRC_Prune; - if( pSrc==0 ){ + if( pSelect->selFlags & (SF_View|SF_CopyCte) ){ + testcase( pSelect->selFlags & SF_View ); + testcase( pSelect->selFlags & SF_CopyCte ); + return WRC_Prune; + } + if( NEVER(pSrc==0) ){ assert( pWalker->pParse->db->mallocFailed ); return WRC_Abort; } for(i=0; inSrc; i++){ - struct SrcList_item *pItem = &pSrc->a[i]; + SrcItem *pItem = &pSrc->a[i]; if( pItem->pTab==p->pTab ){ renameTokenFind(pWalker->pParse, p, pItem->zName); } @@ -107446,7 +109169,7 @@ static void renameTableFunc( } if( rc!=SQLITE_OK ){ if( sParse.zErrMsg ){ - renameColumnParseError(context, 0, argv[1], argv[2], &sParse); + renameColumnParseError(context, "", argv[1], argv[2], &sParse); }else{ sqlite3_result_error_code(context, rc); } @@ -107463,6 +109186,119 @@ static void renameTableFunc( return; } +static int renameQuotefixExprCb(Walker *pWalker, Expr *pExpr){ + if( pExpr->op==TK_STRING && (pExpr->flags & EP_DblQuoted) ){ + renameTokenFind(pWalker->pParse, pWalker->u.pRename, (void*)pExpr); + } + return WRC_Continue; +} + +/* +** The implementation of an SQL scalar function that rewrites DDL statements +** so that any string literals that use double-quotes are modified so that +** they use single quotes. +** +** Two arguments must be passed: +** +** 0: Database name ("main", "temp" etc.). +** 1: SQL statement to edit. +** +** The returned value is the modified SQL statement. For example, given +** the database schema: +** +** CREATE TABLE t1(a, b, c); +** +** SELECT sqlite_rename_quotefix('main', +** 'CREATE VIEW v1 AS SELECT "a", "string" FROM t1' +** ); +** +** returns the string: +** +** CREATE VIEW v1 AS SELECT "a", 'string' FROM t1 +*/ +static void renameQuotefixFunc( + sqlite3_context *context, + int NotUsed, + sqlite3_value **argv +){ + sqlite3 *db = sqlite3_context_db_handle(context); + char const *zDb = (const char*)sqlite3_value_text(argv[0]); + char const *zInput = (const char*)sqlite3_value_text(argv[1]); + +#ifndef SQLITE_OMIT_AUTHORIZATION + sqlite3_xauth xAuth = db->xAuth; + db->xAuth = 0; +#endif + + sqlite3BtreeEnterAll(db); + + UNUSED_PARAMETER(NotUsed); + if( zDb && zInput ){ + int rc; + Parse sParse; + rc = renameParseSql(&sParse, zDb, db, zInput, 0); + + if( rc==SQLITE_OK ){ + RenameCtx sCtx; + Walker sWalker; + + /* Walker to find tokens that need to be replaced. */ + memset(&sCtx, 0, sizeof(RenameCtx)); + memset(&sWalker, 0, sizeof(Walker)); + sWalker.pParse = &sParse; + sWalker.xExprCallback = renameQuotefixExprCb; + sWalker.xSelectCallback = renameColumnSelectCb; + sWalker.u.pRename = &sCtx; + + if( sParse.pNewTable ){ + Select *pSelect = sParse.pNewTable->pSelect; + if( pSelect ){ + pSelect->selFlags &= ~SF_View; + sParse.rc = SQLITE_OK; + sqlite3SelectPrep(&sParse, pSelect, 0); + rc = (db->mallocFailed ? SQLITE_NOMEM : sParse.rc); + if( rc==SQLITE_OK ){ + sqlite3WalkSelect(&sWalker, pSelect); + } + }else{ + int i; + sqlite3WalkExprList(&sWalker, sParse.pNewTable->pCheck); +#ifndef SQLITE_OMIT_GENERATED_COLUMNS + for(i=0; inCol; i++){ + sqlite3WalkExpr(&sWalker, sParse.pNewTable->aCol[i].pDflt); + } +#endif /* SQLITE_OMIT_GENERATED_COLUMNS */ + } + }else if( sParse.pNewIndex ){ + sqlite3WalkExprList(&sWalker, sParse.pNewIndex->aColExpr); + sqlite3WalkExpr(&sWalker, sParse.pNewIndex->pPartIdxWhere); + }else{ +#ifndef SQLITE_OMIT_TRIGGER + rc = renameResolveTrigger(&sParse); + if( rc==SQLITE_OK ){ + renameWalkTrigger(&sWalker, sParse.pNewTrigger); + } +#endif /* SQLITE_OMIT_TRIGGER */ + } + + if( rc==SQLITE_OK ){ + rc = renameEditSql(context, &sCtx, zInput, 0, 0); + } + renameTokenFree(db, sCtx.pList); + } + if( rc!=SQLITE_OK ){ + sqlite3_result_error_code(context, rc); + } + renameParseCleanup(&sParse); + } + +#ifndef SQLITE_OMIT_AUTHORIZATION + db->xAuth = xAuth; +#endif + + sqlite3BtreeLeaveAll(db); +} + /* ** An SQL user function that checks that there are no parse or symbol ** resolution problems in a CREATE TRIGGER|TABLE|VIEW|INDEX statement. @@ -107475,6 +109311,8 @@ static void renameTableFunc( ** 2: Object type ("view", "table", "trigger" or "index"). ** 3: Object name. ** 4: True if object is from temp schema. +** 5: "when" part of error message. +** 6: True to disable the DQS quirk when parsing SQL. ** ** Unless it finds an error, this function normally returns NULL. However, it ** returns integer value 1 if: @@ -107492,6 +109330,8 @@ static void renameTableTest( char const *zInput = (const char*)sqlite3_value_text(argv[1]); int bTemp = sqlite3_value_int(argv[4]); int isLegacy = (db->flags & SQLITE_LegacyAlter); + char const *zWhen = (const char*)sqlite3_value_text(argv[5]); + int bNoDQS = sqlite3_value_int(argv[6]); #ifndef SQLITE_OMIT_AUTHORIZATION sqlite3_xauth xAuth = db->xAuth; @@ -107499,10 +109339,14 @@ static void renameTableTest( #endif UNUSED_PARAMETER(NotUsed); + if( zDb && zInput ){ int rc; Parse sParse; + int flags = db->flags; + if( bNoDQS ) db->flags &= ~(SQLITE_DqsDML|SQLITE_DqsDDL); rc = renameParseSql(&sParse, zDb, db, zInput, bTemp); + db->flags |= (flags & (SQLITE_DqsDML|SQLITE_DqsDDL)); if( rc==SQLITE_OK ){ if( isLegacy==0 && sParse.pNewTable && sParse.pNewTable->pSelect ){ NameContext sNC; @@ -107524,8 +109368,8 @@ static void renameTableTest( } } - if( rc!=SQLITE_OK ){ - renameColumnParseError(context, 1, argv[2], argv[3], &sParse); + if( rc!=SQLITE_OK && zWhen ){ + renameColumnParseError(context, zWhen, argv[2], argv[3],&sParse); } renameParseCleanup(&sParse); } @@ -107535,14 +109379,219 @@ static void renameTableTest( #endif } +/* +** The implementation of internal UDF sqlite_drop_column(). +** +** Arguments: +** +** argv[0]: An integer - the index of the schema containing the table +** argv[1]: CREATE TABLE statement to modify. +** argv[2]: An integer - the index of the column to remove. +** +** The value returned is a string containing the CREATE TABLE statement +** with column argv[2] removed. +*/ +static void dropColumnFunc( + sqlite3_context *context, + int NotUsed, + sqlite3_value **argv +){ + sqlite3 *db = sqlite3_context_db_handle(context); + int iSchema = sqlite3_value_int(argv[0]); + const char *zSql = (const char*)sqlite3_value_text(argv[1]); + int iCol = sqlite3_value_int(argv[2]); + const char *zDb = db->aDb[iSchema].zDbSName; + int rc; + Parse sParse; + RenameToken *pCol; + Table *pTab; + const char *zEnd; + char *zNew = 0; + +#ifndef SQLITE_OMIT_AUTHORIZATION + sqlite3_xauth xAuth = db->xAuth; + db->xAuth = 0; +#endif + + UNUSED_PARAMETER(NotUsed); + rc = renameParseSql(&sParse, zDb, db, zSql, iSchema==1); + if( rc!=SQLITE_OK ) goto drop_column_done; + pTab = sParse.pNewTable; + if( pTab==0 || pTab->nCol==1 || iCol>=pTab->nCol ){ + /* This can happen if the sqlite_schema table is corrupt */ + rc = SQLITE_CORRUPT_BKPT; + goto drop_column_done; + } + + pCol = renameTokenFind(&sParse, 0, (void*)pTab->aCol[iCol].zName); + if( iColnCol-1 ){ + RenameToken *pEnd; + pEnd = renameTokenFind(&sParse, 0, (void*)pTab->aCol[iCol+1].zName); + zEnd = (const char*)pEnd->t.z; + }else{ + zEnd = (const char*)&zSql[pTab->addColOffset]; + while( ALWAYS(pCol->t.z[0]!=0) && pCol->t.z[0]!=',' ) pCol->t.z--; + } + + zNew = sqlite3MPrintf(db, "%.*s%s", pCol->t.z-zSql, zSql, zEnd); + sqlite3_result_text(context, zNew, -1, SQLITE_TRANSIENT); + sqlite3_free(zNew); + +drop_column_done: + renameParseCleanup(&sParse); +#ifndef SQLITE_OMIT_AUTHORIZATION + db->xAuth = xAuth; +#endif + if( rc!=SQLITE_OK ){ + sqlite3_result_error_code(context, rc); + } +} + +/* +** This function is called by the parser upon parsing an +** +** ALTER TABLE pSrc DROP COLUMN pName +** +** statement. Argument pSrc contains the possibly qualified name of the +** table being edited, and token pName the name of the column to drop. +*/ +SQLITE_PRIVATE void sqlite3AlterDropColumn(Parse *pParse, SrcList *pSrc, Token *pName){ + sqlite3 *db = pParse->db; /* Database handle */ + Table *pTab; /* Table to modify */ + int iDb; /* Index of db containing pTab in aDb[] */ + const char *zDb; /* Database containing pTab ("main" etc.) */ + char *zCol = 0; /* Name of column to drop */ + int iCol; /* Index of column zCol in pTab->aCol[] */ + + /* Look up the table being altered. */ + assert( pParse->pNewTable==0 ); + assert( sqlite3BtreeHoldsAllMutexes(db) ); + if( NEVER(db->mallocFailed) ) goto exit_drop_column; + pTab = sqlite3LocateTableItem(pParse, 0, &pSrc->a[0]); + if( !pTab ) goto exit_drop_column; + + /* Make sure this is not an attempt to ALTER a view, virtual table or + ** system table. */ + if( SQLITE_OK!=isAlterableTable(pParse, pTab) ) goto exit_drop_column; + if( SQLITE_OK!=isRealTable(pParse, pTab, 1) ) goto exit_drop_column; + + /* Find the index of the column being dropped. */ + zCol = sqlite3NameFromToken(db, pName); + if( zCol==0 ){ + assert( db->mallocFailed ); + goto exit_drop_column; + } + iCol = sqlite3ColumnIndex(pTab, zCol); + if( iCol<0 ){ + sqlite3ErrorMsg(pParse, "no such column: \"%s\"", zCol); + goto exit_drop_column; + } + + /* Do not allow the user to drop a PRIMARY KEY column or a column + ** constrained by a UNIQUE constraint. */ + if( pTab->aCol[iCol].colFlags & (COLFLAG_PRIMKEY|COLFLAG_UNIQUE) ){ + sqlite3ErrorMsg(pParse, "cannot drop %s column: \"%s\"", + (pTab->aCol[iCol].colFlags&COLFLAG_PRIMKEY) ? "PRIMARY KEY" : "UNIQUE", + zCol + ); + goto exit_drop_column; + } + + /* Do not allow the number of columns to go to zero */ + if( pTab->nCol<=1 ){ + sqlite3ErrorMsg(pParse, "cannot drop column \"%s\": no other columns exist",zCol); + goto exit_drop_column; + } + + /* Edit the sqlite_schema table */ + iDb = sqlite3SchemaToIndex(db, pTab->pSchema); + assert( iDb>=0 ); + zDb = db->aDb[iDb].zDbSName; + renameTestSchema(pParse, zDb, iDb==1, "", 0); + renameFixQuotes(pParse, zDb, iDb==1); + sqlite3NestedParse(pParse, + "UPDATE \"%w\"." DFLT_SCHEMA_TABLE " SET " + "sql = sqlite_drop_column(%d, sql, %d) " + "WHERE (type=='table' AND tbl_name=%Q COLLATE nocase)" + , zDb, iDb, iCol, pTab->zName + ); + + /* Drop and reload the database schema. */ + renameReloadSchema(pParse, iDb, INITFLAG_AlterDrop); + renameTestSchema(pParse, zDb, iDb==1, "after drop column", 1); + + /* Edit rows of table on disk */ + if( pParse->nErr==0 && (pTab->aCol[iCol].colFlags & COLFLAG_VIRTUAL)==0 ){ + int i; + int addr; + int reg; + int regRec; + Index *pPk = 0; + int nField = 0; /* Number of non-virtual columns after drop */ + int iCur; + Vdbe *v = sqlite3GetVdbe(pParse); + iCur = pParse->nTab++; + sqlite3OpenTable(pParse, iCur, iDb, pTab, OP_OpenWrite); + addr = sqlite3VdbeAddOp1(v, OP_Rewind, iCur); VdbeCoverage(v); + reg = ++pParse->nMem; + if( HasRowid(pTab) ){ + sqlite3VdbeAddOp2(v, OP_Rowid, iCur, reg); + pParse->nMem += pTab->nCol; + }else{ + pPk = sqlite3PrimaryKeyIndex(pTab); + pParse->nMem += pPk->nColumn; + for(i=0; inKeyCol; i++){ + sqlite3VdbeAddOp3(v, OP_Column, iCur, i, reg+i+1); + } + nField = pPk->nKeyCol; + } + regRec = ++pParse->nMem; + for(i=0; inCol; i++){ + if( i!=iCol && (pTab->aCol[i].colFlags & COLFLAG_VIRTUAL)==0 ){ + int regOut; + if( pPk ){ + int iPos = sqlite3TableColumnToIndex(pPk, i); + int iColPos = sqlite3TableColumnToIndex(pPk, iCol); + if( iPosnKeyCol ) continue; + regOut = reg+1+iPos-(iPos>iColPos); + }else{ + regOut = reg+1+nField; + } + if( i==pTab->iPKey ){ + sqlite3VdbeAddOp2(v, OP_Null, 0, regOut); + }else{ + sqlite3ExprCodeGetColumnOfTable(v, pTab, iCur, i, regOut); + } + nField++; + } + } + sqlite3VdbeAddOp3(v, OP_MakeRecord, reg+1, nField, regRec); + if( pPk ){ + sqlite3VdbeAddOp4Int(v, OP_IdxInsert, iCur, regRec, reg+1, pPk->nKeyCol); + }else{ + sqlite3VdbeAddOp3(v, OP_Insert, iCur, regRec, reg); + } + sqlite3VdbeChangeP5(v, OPFLAG_SAVEPOSITION); + + sqlite3VdbeAddOp2(v, OP_Next, iCur, addr+1); VdbeCoverage(v); + sqlite3VdbeJumpHere(v, addr); + } + +exit_drop_column: + sqlite3DbFree(db, zCol); + sqlite3SrcListDelete(db, pSrc); +} + /* ** Register built-in functions used to help implement ALTER TABLE */ SQLITE_PRIVATE void sqlite3AlterFunctions(void){ static FuncDef aAlterTableFuncs[] = { - INTERNAL_FUNCTION(sqlite_rename_column, 9, renameColumnFunc), - INTERNAL_FUNCTION(sqlite_rename_table, 7, renameTableFunc), - INTERNAL_FUNCTION(sqlite_rename_test, 5, renameTableTest), + INTERNAL_FUNCTION(sqlite_rename_column, 9, renameColumnFunc), + INTERNAL_FUNCTION(sqlite_rename_table, 7, renameTableFunc), + INTERNAL_FUNCTION(sqlite_rename_test, 7, renameTableTest), + INTERNAL_FUNCTION(sqlite_drop_column, 3, dropColumnFunc), + INTERNAL_FUNCTION(sqlite_rename_quotefix,2, renameQuotefixFunc), }; sqlite3InsertBuiltinFuncs(aAlterTableFuncs, ArraySize(aAlterTableFuncs)); } @@ -109324,6 +111373,7 @@ static int loadStatTbl( } pSpace = (tRowcnt*)&pIdx->aSample[nSample]; pIdx->aAvgEq = pSpace; pSpace += nIdxCol; + pIdx->pTable->tabFlags |= TF_HasStat4; for(i=0; iaSample[i].anEq = pSpace; pSpace += nIdxCol; pIdx->aSample[i].anLt = pSpace; pSpace += nIdxCol; @@ -109592,7 +111642,7 @@ static void attachFunc( if( zFile==0 ) zFile = ""; if( zName==0 ) zName = ""; -#ifdef SQLITE_ENABLE_DESERIALIZE +#ifndef SQLITE_OMIT_DESERIALIZE # define REOPEN_AS_MEMDB(db) (db->init.reopenMemdb) #else # define REOPEN_AS_MEMDB(db) (0) @@ -109790,7 +111840,9 @@ static void detachFunc( sqlite3_snprintf(sizeof(zErr),zErr, "cannot detach database %s", zName); goto detach_error; } - if( sqlite3BtreeIsInReadTrans(pDb->pBt) || sqlite3BtreeIsInBackup(pDb->pBt) ){ + if( sqlite3BtreeTxnState(pDb->pBt)!=SQLITE_TXN_NONE + || sqlite3BtreeIsInBackup(pDb->pBt) + ){ sqlite3_snprintf(sizeof(zErr),zErr, "database %s is locked", zName); goto detach_error; } @@ -109928,6 +111980,65 @@ SQLITE_PRIVATE void sqlite3Attach(Parse *pParse, Expr *p, Expr *pDbname, Expr *p } #endif /* SQLITE_OMIT_ATTACH */ +/* +** Expression callback used by sqlite3FixAAAA() routines. +*/ +static int fixExprCb(Walker *p, Expr *pExpr){ + DbFixer *pFix = p->u.pFix; + if( !pFix->bTemp ) ExprSetProperty(pExpr, EP_FromDDL); + if( pExpr->op==TK_VARIABLE ){ + if( pFix->pParse->db->init.busy ){ + pExpr->op = TK_NULL; + }else{ + sqlite3ErrorMsg(pFix->pParse, "%s cannot use variables", pFix->zType); + return WRC_Abort; + } + } + return WRC_Continue; +} + +/* +** Select callback used by sqlite3FixAAAA() routines. +*/ +static int fixSelectCb(Walker *p, Select *pSelect){ + DbFixer *pFix = p->u.pFix; + int i; + SrcItem *pItem; + sqlite3 *db = pFix->pParse->db; + int iDb = sqlite3FindDbName(db, pFix->zDb); + SrcList *pList = pSelect->pSrc; + + if( NEVER(pList==0) ) return WRC_Continue; + for(i=0, pItem=pList->a; inSrc; i++, pItem++){ + if( pFix->bTemp==0 ){ + if( pItem->zDatabase ){ + if( iDb!=sqlite3FindDbName(db, pItem->zDatabase) ){ + sqlite3ErrorMsg(pFix->pParse, + "%s %T cannot reference objects in database %s", + pFix->zType, pFix->pName, pItem->zDatabase); + return WRC_Abort; + } + sqlite3DbFree(db, pItem->zDatabase); + pItem->zDatabase = 0; + pItem->fg.notCte = 1; + } + pItem->pSchema = pFix->pSchema; + pItem->fg.fromDDL = 1; + } +#if !defined(SQLITE_OMIT_VIEW) || !defined(SQLITE_OMIT_TRIGGER) + if( sqlite3WalkExpr(&pFix->w, pList->a[i].pOn) ) return WRC_Abort; +#endif + } + if( pSelect->pWith ){ + for(i=0; ipWith->nCte; i++){ + if( sqlite3WalkSelect(p, pSelect->pWith->a[i].pSelect) ){ + return WRC_Abort; + } + } + } + return WRC_Continue; +} + /* ** Initialize a DbFixer structure. This routine must be called prior ** to passing the structure to one of the sqliteFixAAAA() routines below. @@ -109939,9 +112050,7 @@ SQLITE_PRIVATE void sqlite3FixInit( const char *zType, /* "view", "trigger", or "index" */ const Token *pName /* Name of the view, trigger, or index */ ){ - sqlite3 *db; - - db = pParse->db; + sqlite3 *db = pParse->db; assert( db->nDb>iDb ); pFix->pParse = pParse; pFix->zDb = db->aDb[iDb].zDbSName; @@ -109949,6 +112058,13 @@ SQLITE_PRIVATE void sqlite3FixInit( pFix->zType = zType; pFix->pName = pName; pFix->bTemp = (iDb==1); + pFix->w.pParse = pParse; + pFix->w.xExprCallback = fixExprCb; + pFix->w.xSelectCallback = fixSelectCb; + pFix->w.xSelectCallback2 = sqlite3WalkWinDefnDummyCallback; + pFix->w.walkerDepth = 0; + pFix->w.eCode = 0; + pFix->w.u.pFix = pFix; } /* @@ -109969,115 +112085,27 @@ SQLITE_PRIVATE int sqlite3FixSrcList( DbFixer *pFix, /* Context of the fixation */ SrcList *pList /* The Source list to check and modify */ ){ - int i; - struct SrcList_item *pItem; - sqlite3 *db = pFix->pParse->db; - int iDb = sqlite3FindDbName(db, pFix->zDb); - - if( NEVER(pList==0) ) return 0; - - for(i=0, pItem=pList->a; inSrc; i++, pItem++){ - if( pFix->bTemp==0 ){ - if( pItem->zDatabase && iDb!=sqlite3FindDbName(db, pItem->zDatabase) ){ - sqlite3ErrorMsg(pFix->pParse, - "%s %T cannot reference objects in database %s", - pFix->zType, pFix->pName, pItem->zDatabase); - return 1; - } - sqlite3DbFree(db, pItem->zDatabase); - pItem->zDatabase = 0; - pItem->pSchema = pFix->pSchema; - pItem->fg.fromDDL = 1; - } -#if !defined(SQLITE_OMIT_VIEW) || !defined(SQLITE_OMIT_TRIGGER) - if( sqlite3FixSelect(pFix, pItem->pSelect) ) return 1; - if( sqlite3FixExpr(pFix, pItem->pOn) ) return 1; -#endif - if( pItem->fg.isTabFunc && sqlite3FixExprList(pFix, pItem->u1.pFuncArg) ){ - return 1; - } + int res = 0; + if( pList ){ + Select s; + memset(&s, 0, sizeof(s)); + s.pSrc = pList; + res = sqlite3WalkSelect(&pFix->w, &s); } - return 0; + return res; } #if !defined(SQLITE_OMIT_VIEW) || !defined(SQLITE_OMIT_TRIGGER) SQLITE_PRIVATE int sqlite3FixSelect( DbFixer *pFix, /* Context of the fixation */ Select *pSelect /* The SELECT statement to be fixed to one database */ ){ - while( pSelect ){ - if( sqlite3FixExprList(pFix, pSelect->pEList) ){ - return 1; - } - if( sqlite3FixSrcList(pFix, pSelect->pSrc) ){ - return 1; - } - if( sqlite3FixExpr(pFix, pSelect->pWhere) ){ - return 1; - } - if( sqlite3FixExprList(pFix, pSelect->pGroupBy) ){ - return 1; - } - if( sqlite3FixExpr(pFix, pSelect->pHaving) ){ - return 1; - } - if( sqlite3FixExprList(pFix, pSelect->pOrderBy) ){ - return 1; - } - if( sqlite3FixExpr(pFix, pSelect->pLimit) ){ - return 1; - } - if( pSelect->pWith ){ - int i; - for(i=0; ipWith->nCte; i++){ - if( sqlite3FixSelect(pFix, pSelect->pWith->a[i].pSelect) ){ - return 1; - } - } - } - pSelect = pSelect->pPrior; - } - return 0; + return sqlite3WalkSelect(&pFix->w, pSelect); } SQLITE_PRIVATE int sqlite3FixExpr( DbFixer *pFix, /* Context of the fixation */ Expr *pExpr /* The expression to be fixed to one database */ ){ - while( pExpr ){ - if( !pFix->bTemp ) ExprSetProperty(pExpr, EP_FromDDL); - if( pExpr->op==TK_VARIABLE ){ - if( pFix->pParse->db->init.busy ){ - pExpr->op = TK_NULL; - }else{ - sqlite3ErrorMsg(pFix->pParse, "%s cannot use variables", pFix->zType); - return 1; - } - } - if( ExprHasProperty(pExpr, EP_TokenOnly|EP_Leaf) ) break; - if( ExprHasProperty(pExpr, EP_xIsSelect) ){ - if( sqlite3FixSelect(pFix, pExpr->x.pSelect) ) return 1; - }else{ - if( sqlite3FixExprList(pFix, pExpr->x.pList) ) return 1; - } - if( sqlite3FixExpr(pFix, pExpr->pRight) ){ - return 1; - } - pExpr = pExpr->pLeft; - } - return 0; -} -SQLITE_PRIVATE int sqlite3FixExprList( - DbFixer *pFix, /* Context of the fixation */ - ExprList *pList /* The expression to be fixed to one database */ -){ - int i; - struct ExprList_item *pItem; - if( pList==0 ) return 0; - for(i=0, pItem=pList->a; inExpr; i++, pItem++){ - if( sqlite3FixExpr(pFix, pItem->pExpr) ){ - return 1; - } - } - return 0; + return sqlite3WalkExpr(&pFix->w, pExpr); } #endif @@ -110087,32 +112115,30 @@ SQLITE_PRIVATE int sqlite3FixTriggerStep( TriggerStep *pStep /* The trigger step be fixed to one database */ ){ while( pStep ){ - if( sqlite3FixSelect(pFix, pStep->pSelect) ){ - return 1; - } - if( sqlite3FixExpr(pFix, pStep->pWhere) ){ - return 1; - } - if( sqlite3FixExprList(pFix, pStep->pExprList) ){ - return 1; - } - if( pStep->pFrom && sqlite3FixSrcList(pFix, pStep->pFrom) ){ + if( sqlite3WalkSelect(&pFix->w, pStep->pSelect) + || sqlite3WalkExpr(&pFix->w, pStep->pWhere) + || sqlite3WalkExprList(&pFix->w, pStep->pExprList) + || sqlite3FixSrcList(pFix, pStep->pFrom) + ){ return 1; } #ifndef SQLITE_OMIT_UPSERT - if( pStep->pUpsert ){ - Upsert *pUp = pStep->pUpsert; - if( sqlite3FixExprList(pFix, pUp->pUpsertTarget) - || sqlite3FixExpr(pFix, pUp->pUpsertTargetWhere) - || sqlite3FixExprList(pFix, pUp->pUpsertSet) - || sqlite3FixExpr(pFix, pUp->pUpsertWhere) - ){ - return 1; + { + Upsert *pUp; + for(pUp=pStep->pUpsert; pUp; pUp=pUp->pNextUpsert){ + if( sqlite3WalkExprList(&pFix->w, pUp->pUpsertTarget) + || sqlite3WalkExpr(&pFix->w, pUp->pUpsertTargetWhere) + || sqlite3WalkExprList(&pFix->w, pUp->pUpsertSet) + || sqlite3WalkExpr(&pFix->w, pUp->pUpsertWhere) + ){ + return 1; + } } } #endif pStep = pStep->pNext; } + return 0; } #endif @@ -110264,7 +112290,6 @@ SQLITE_PRIVATE void sqlite3AuthRead( Schema *pSchema, /* The schema of the expression */ SrcList *pTabList /* All table that pExpr might refer to */ ){ - sqlite3 *db = pParse->db; Table *pTab = 0; /* The table being read */ const char *zCol; /* Name of the column of the table */ int iSrc; /* Index in pTabList->a[] of table being read */ @@ -110272,8 +112297,8 @@ SQLITE_PRIVATE void sqlite3AuthRead( int iCol; /* Index of column in table */ assert( pExpr->op==TK_COLUMN || pExpr->op==TK_TRIGGER ); - assert( !IN_RENAME_OBJECT || db->xAuth==0 ); - if( db->xAuth==0 ) return; + assert( !IN_RENAME_OBJECT ); + assert( pParse->db->xAuth!=0 ); iDb = sqlite3SchemaToIndex(pParse->db, pSchema); if( iDb<0 ){ /* An attempt to read a column out of a subquery or other @@ -110285,7 +112310,7 @@ SQLITE_PRIVATE void sqlite3AuthRead( pTab = pParse->pTriggerTab; }else{ assert( pTabList ); - for(iSrc=0; ALWAYS(iSrcnSrc); iSrc++){ + for(iSrc=0; iSrcnSrc; iSrc++){ if( pExpr->iTable==pTabList->a[iSrc].iCursor ){ pTab = pTabList->a[iSrc].pTab; break; @@ -110293,7 +112318,7 @@ SQLITE_PRIVATE void sqlite3AuthRead( } } iCol = pExpr->iColumn; - if( NEVER(pTab==0) ) return; + if( pTab==0 ) return; if( iCol>=0 ){ assert( iColnCol ); @@ -110304,7 +112329,7 @@ SQLITE_PRIVATE void sqlite3AuthRead( }else{ zCol = "ROWID"; } - assert( iDb>=0 && iDbnDb ); + assert( iDb>=0 && iDbdb->nDb ); if( SQLITE_IGNORE==sqlite3AuthReadCol(pParse, pTab->zName, zCol, iDb) ){ pExpr->op = TK_NULL; } @@ -110330,11 +112355,7 @@ SQLITE_PRIVATE int sqlite3AuthCheck( ** or if the parser is being invoked from within sqlite3_declare_vtab. */ assert( !IN_RENAME_OBJECT || db->xAuth==0 ); - if( db->init.busy || IN_SPECIAL_PARSE ){ - return SQLITE_OK; - } - - if( db->xAuth==0 ){ + if( db->xAuth==0 || db->init.busy || IN_SPECIAL_PARSE ){ return SQLITE_OK; } @@ -110443,21 +112464,20 @@ struct TableLock { ** code to make the lock occur is generated by a later call to ** codeTableLocks() which occurs during sqlite3FinishCoding(). */ -SQLITE_PRIVATE void sqlite3TableLock( +static SQLITE_NOINLINE void lockTable( Parse *pParse, /* Parsing context */ int iDb, /* Index of the database containing the table to lock */ Pgno iTab, /* Root page number of the table to be locked */ u8 isWriteLock, /* True for a write lock */ const char *zName /* Name of the table to be locked */ ){ - Parse *pToplevel = sqlite3ParseToplevel(pParse); + Parse *pToplevel; int i; int nBytes; TableLock *p; assert( iDb>=0 ); - if( iDb==1 ) return; - if( !sqlite3BtreeSharable(pParse->db->aDb[iDb].pBt) ) return; + pToplevel = sqlite3ParseToplevel(pParse); for(i=0; inTableLock; i++){ p = &pToplevel->aTableLock[i]; if( p->iDb==iDb && p->iTab==iTab ){ @@ -110480,6 +112500,17 @@ SQLITE_PRIVATE void sqlite3TableLock( sqlite3OomFault(pToplevel->db); } } +SQLITE_PRIVATE void sqlite3TableLock( + Parse *pParse, /* Parsing context */ + int iDb, /* Index of the database containing the table to lock */ + Pgno iTab, /* Root page number of the table to be locked */ + u8 isWriteLock, /* True for a write lock */ + const char *zName /* Name of the table to be locked */ +){ + if( iDb==1 ) return; + if( !sqlite3BtreeSharable(pParse->db->aDb[iDb].pBt) ) return; + lockTable(pParse, iDb, iTab, isWriteLock, zName); +} /* ** Code an OP_TableLock instruction for each table locked by the @@ -110487,10 +112518,8 @@ SQLITE_PRIVATE void sqlite3TableLock( */ static void codeTableLocks(Parse *pParse){ int i; - Vdbe *pVdbe; - - pVdbe = sqlite3GetVdbe(pParse); - assert( pVdbe!=0 ); /* sqlite3GetVdbe cannot fail: VDBE already allocated */ + Vdbe *pVdbe = pParse->pVdbe; + assert( pVdbe!=0 ); for(i=0; inTableLock; i++){ TableLock *p = &pParse->aTableLock[i]; @@ -110541,10 +112570,36 @@ SQLITE_PRIVATE void sqlite3FinishCoding(Parse *pParse){ /* Begin by generating some termination code at the end of the ** vdbe program */ - v = sqlite3GetVdbe(pParse); + v = pParse->pVdbe; + if( v==0 ){ + if( db->init.busy ){ + pParse->rc = SQLITE_DONE; + return; + } + v = sqlite3GetVdbe(pParse); + if( v==0 ) pParse->rc = SQLITE_ERROR; + } assert( !pParse->isMultiWrite || sqlite3VdbeAssertMayAbort(v, pParse->mayAbort)); if( v ){ + if( pParse->bReturning ){ + Returning *pReturning = pParse->u1.pReturning; + int addrRewind; + int i; + int reg; + + addrRewind = + sqlite3VdbeAddOp1(v, OP_Rewind, pReturning->iRetCur); + VdbeCoverage(v); + reg = pReturning->iRetReg; + for(i=0; inRetCol; i++){ + sqlite3VdbeAddOp3(v, OP_Column, pReturning->iRetCur, i, reg+i); + } + sqlite3VdbeAddOp2(v, OP_ResultRow, reg, i); + sqlite3VdbeAddOp2(v, OP_Next, pReturning->iRetCur, addrRewind+1); + VdbeCoverage(v); + sqlite3VdbeJumpHere(v, addrRewind); + } sqlite3VdbeAddOp0(v, OP_Halt); #if SQLITE_USER_AUTHENTICATION @@ -110622,12 +112677,16 @@ SQLITE_PRIVATE void sqlite3FinishCoding(Parse *pParse){ } } + if( pParse->bReturning ){ + Returning *pRet = pParse->u1.pReturning; + sqlite3VdbeAddOp2(v, OP_OpenEphemeral, pRet->iRetCur, pRet->nRetCol); + } + /* Finally, jump back to the beginning of the executable code. */ sqlite3VdbeGoto(v, 1); } } - /* Get the VDBE program ready for execution */ if( v && pParse->nErr==0 && !db->mallocFailed ){ @@ -110806,7 +112865,7 @@ SQLITE_PRIVATE Table *sqlite3LocateTable( /* If zName is the not the name of a table in the schema created using ** CREATE, then check to see if it is the name of an virtual table that ** can be an eponymous virtual table. */ - if( pParse->disableVtab==0 ){ + if( pParse->disableVtab==0 && db->init.busy==0 ){ Module *pMod = (Module*)sqlite3HashFind(&db->aModule, zName); if( pMod==0 && sqlite3_strnicmp(zName, "pragma_", 7)==0 ){ pMod = sqlite3PragmaVtabRegister(db, zName); @@ -110829,6 +112888,8 @@ SQLITE_PRIVATE Table *sqlite3LocateTable( }else{ sqlite3ErrorMsg(pParse, "%s: %s", zMsg, zName); } + }else{ + assert( HasRowid(p) || p->iPKey<0 ); } return p; @@ -110846,7 +112907,7 @@ SQLITE_PRIVATE Table *sqlite3LocateTable( SQLITE_PRIVATE Table *sqlite3LocateTableItem( Parse *pParse, u32 flags, - struct SrcList_item *p + SrcItem *p ){ const char *zDb; assert( p->pSchema==0 || p->zDatabase==0 ); @@ -111245,7 +113306,7 @@ SQLITE_PRIVATE int sqlite3TwoPartName( return -1; } }else{ - assert( db->init.iDb==0 || db->init.busy || IN_RENAME_OBJECT + assert( db->init.iDb==0 || db->init.busy || IN_SPECIAL_PARSE || (db->mDbFlags & DBFLAG_Vacuum)!=0); iDb = db->init.iDb; *pUnqual = pName1; @@ -111414,6 +113475,23 @@ SQLITE_PRIVATE i16 sqlite3TableColumnToStorage(Table *pTab, i16 iCol){ } #endif +/* +** Insert a single OP_JournalMode query opcode in order to force the +** prepared statement to return false for sqlite3_stmt_readonly(). This +** is used by CREATE TABLE IF NOT EXISTS and similar if the table already +** exists, so that the prepared statement for CREATE TABLE IF NOT EXISTS +** will return false for sqlite3_stmt_readonly() even if that statement +** is a read-only no-op. +*/ +static void sqlite3ForceNotReadOnly(Parse *pParse){ + int iReg = ++pParse->nMem; + Vdbe *v = sqlite3GetVdbe(pParse); + if( v ){ + sqlite3VdbeAddOp3(v, OP_JournalMode, 0, iReg, PAGER_JOURNALMODE_QUERY); + sqlite3VdbeUsesBtree(v, 0); + } +} + /* ** Begin constructing a new table representation in memory. This is ** the first of several action routines that get called in response @@ -111513,6 +113591,7 @@ SQLITE_PRIVATE void sqlite3StartTable( }else{ assert( !db->init.busy || CORRUPT_DB ); sqlite3CodeVerifySchema(pParse, iDb); + sqlite3ForceNotReadOnly(pParse); } goto begin_table_error; } @@ -111541,17 +113620,6 @@ SQLITE_PRIVATE void sqlite3StartTable( assert( pParse->pNewTable==0 ); pParse->pNewTable = pTable; - /* If this is the magic sqlite_sequence table used by autoincrement, - ** then record a pointer to this table in the main database structure - ** so that INSERT can find the table easily. - */ -#ifndef SQLITE_OMIT_AUTOINCREMENT - if( !pParse->nested && strcmp(zName, "sqlite_sequence")==0 ){ - assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); - pTable->pSchema->pSeqTab = pTable; - } -#endif - /* Begin generating the code that will insert the table record into ** the schema table. Note in particular that we must go ahead ** and allocate the record number for the table entry now. Before any @@ -111604,7 +113672,8 @@ SQLITE_PRIVATE void sqlite3StartTable( }else #endif { - pParse->addrCrTab = + assert( !pParse->bReturning ); + pParse->u1.addrCrTab = sqlite3VdbeAddOp3(v, OP_CreateBtree, iDb, reg2, BTREE_INTKEY); } sqlite3OpenSchemaTable(pParse, iDb); @@ -111631,12 +113700,86 @@ begin_table_error: SQLITE_PRIVATE void sqlite3ColumnPropertiesFromName(Table *pTab, Column *pCol){ if( sqlite3_strnicmp(pCol->zName, "__hidden__", 10)==0 ){ pCol->colFlags |= COLFLAG_HIDDEN; + if( pTab ) pTab->tabFlags |= TF_HasHidden; }else if( pTab && pCol!=pTab->aCol && (pCol[-1].colFlags & COLFLAG_HIDDEN) ){ pTab->tabFlags |= TF_OOOHidden; } } #endif +/* +** Name of the special TEMP trigger used to implement RETURNING. The +** name begins with "sqlite_" so that it is guaranteed not to collide +** with any application-generated triggers. +*/ +#define RETURNING_TRIGGER_NAME "sqlite_returning" + +/* +** Clean up the data structures associated with the RETURNING clause. +*/ +static void sqlite3DeleteReturning(sqlite3 *db, Returning *pRet){ + Hash *pHash; + pHash = &(db->aDb[1].pSchema->trigHash); + sqlite3HashInsert(pHash, RETURNING_TRIGGER_NAME, 0); + sqlite3ExprListDelete(db, pRet->pReturnEL); + sqlite3DbFree(db, pRet); +} + +/* +** Add the RETURNING clause to the parse currently underway. +** +** This routine creates a special TEMP trigger that will fire for each row +** of the DML statement. That TEMP trigger contains a single SELECT +** statement with a result set that is the argument of the RETURNING clause. +** The trigger has the Trigger.bReturning flag and an opcode of +** TK_RETURNING instead of TK_SELECT, so that the trigger code generator +** knows to handle it specially. The TEMP trigger is automatically +** removed at the end of the parse. +** +** When this routine is called, we do not yet know if the RETURNING clause +** is attached to a DELETE, INSERT, or UPDATE, so construct it as a +** RETURNING trigger instead. It will then be converted into the appropriate +** type on the first call to sqlite3TriggersExist(). +*/ +SQLITE_PRIVATE void sqlite3AddReturning(Parse *pParse, ExprList *pList){ + Returning *pRet; + Hash *pHash; + sqlite3 *db = pParse->db; + if( pParse->pNewTrigger ){ + sqlite3ErrorMsg(pParse, "cannot use RETURNING in a trigger"); + }else{ + assert( pParse->bReturning==0 ); + } + pParse->bReturning = 1; + pRet = sqlite3DbMallocZero(db, sizeof(*pRet)); + if( pRet==0 ){ + sqlite3ExprListDelete(db, pList); + return; + } + pParse->u1.pReturning = pRet; + pRet->pParse = pParse; + pRet->pReturnEL = pList; + sqlite3ParserAddCleanup(pParse, + (void(*)(sqlite3*,void*))sqlite3DeleteReturning, pRet); + testcase( pParse->earlyCleanup ); + if( db->mallocFailed ) return; + pRet->retTrig.zName = RETURNING_TRIGGER_NAME; + pRet->retTrig.op = TK_RETURNING; + pRet->retTrig.tr_tm = TRIGGER_AFTER; + pRet->retTrig.bReturning = 1; + pRet->retTrig.pSchema = db->aDb[1].pSchema; + pRet->retTrig.pTabSchema = db->aDb[1].pSchema; + pRet->retTrig.step_list = &pRet->retTStep; + pRet->retTStep.op = TK_RETURNING; + pRet->retTStep.pTrig = &pRet->retTrig; + pRet->retTStep.pExprList = pList; + pHash = &(db->aDb[1].pSchema->trigHash); + assert( sqlite3HashFind(pHash, RETURNING_TRIGGER_NAME)==0 || pParse->nErr ); + if( sqlite3HashInsert(pHash, RETURNING_TRIGGER_NAME, &pRet->retTrig) + ==&pRet->retTrig ){ + sqlite3OomFault(db); + } +} /* ** Add a new column to the table currently being constructed. @@ -111653,6 +113796,8 @@ SQLITE_PRIVATE void sqlite3AddColumn(Parse *pParse, Token *pName, Token *pType){ char *zType; Column *pCol; sqlite3 *db = pParse->db; + u8 hName; + if( (p = pParse->pNewTable)==0 ) return; if( p->nCol+1>db->aLimit[SQLITE_LIMIT_COLUMN] ){ sqlite3ErrorMsg(pParse, "too many columns on %s", p->zName); @@ -111664,8 +113809,9 @@ SQLITE_PRIVATE void sqlite3AddColumn(Parse *pParse, Token *pName, Token *pType){ memcpy(z, pName->z, pName->n); z[pName->n] = 0; sqlite3Dequote(z); + hName = sqlite3StrIHash(z); for(i=0; inCol; i++){ - if( sqlite3_stricmp(z, p->aCol[i].zName)==0 ){ + if( p->aCol[i].hName==hName && sqlite3StrICmp(z, p->aCol[i].zName)==0 ){ sqlite3ErrorMsg(pParse, "duplicate column name: %s", z); sqlite3DbFree(db, z); return; @@ -111683,7 +113829,7 @@ SQLITE_PRIVATE void sqlite3AddColumn(Parse *pParse, Token *pName, Token *pType){ pCol = &p->aCol[p->nCol]; memset(pCol, 0, sizeof(p->aCol[0])); pCol->zName = z; - pCol->hName = sqlite3StrIHash(z); + pCol->hName = hName; sqlite3ColumnPropertiesFromName(p, pCol); if( pType->n==0 ){ @@ -112015,8 +114161,10 @@ primary_key_exit: ** Add a new CHECK constraint to the table currently under construction. */ SQLITE_PRIVATE void sqlite3AddCheckConstraint( - Parse *pParse, /* Parsing context */ - Expr *pCheckExpr /* The check expression */ + Parse *pParse, /* Parsing context */ + Expr *pCheckExpr, /* The check expression */ + const char *zStart, /* Opening "(" */ + const char *zEnd /* Closing ")" */ ){ #ifndef SQLITE_OMIT_CHECK Table *pTab = pParse->pNewTable; @@ -112027,6 +114175,13 @@ SQLITE_PRIVATE void sqlite3AddCheckConstraint( pTab->pCheck = sqlite3ExprListAppend(pParse, pTab->pCheck, pCheckExpr); if( pParse->constraintName.n ){ sqlite3ExprListSetName(pParse, pTab->pCheck, &pParse->constraintName, 1); + }else{ + Token t; + for(zStart++; sqlite3Isspace(zStart[0]); zStart++){} + while( sqlite3Isspace(zEnd[-1]) ){ zEnd--; } + t.z = zStart; + t.n = (int)(zEnd - t.z); + sqlite3ExprListSetName(pParse, pTab->pCheck, &t, 1); } }else #endif @@ -112045,7 +114200,7 @@ SQLITE_PRIVATE void sqlite3AddCollateType(Parse *pParse, Token *pToken){ char *zColl; /* Dequoted name of collation sequence */ sqlite3 *db; - if( (p = pParse->pNewTable)==0 ) return; + if( (p = pParse->pNewTable)==0 || IN_RENAME_OBJECT ) return; i = p->nCol-1; db = pParse->db; zColl = sqlite3NameFromToken(db, pToken); @@ -112280,12 +114435,15 @@ static int resizeIndexObject(sqlite3 *db, Index *pIdx, int N){ int nByte; if( pIdx->nColumn>=N ) return SQLITE_OK; assert( pIdx->isResized==0 ); - nByte = (sizeof(char*) + sizeof(i16) + 1)*N; + nByte = (sizeof(char*) + sizeof(LogEst) + sizeof(i16) + 1)*N; zExtra = sqlite3DbMallocZero(db, nByte); if( zExtra==0 ) return SQLITE_NOMEM_BKPT; memcpy(zExtra, pIdx->azColl, sizeof(char*)*pIdx->nColumn); pIdx->azColl = (const char**)zExtra; zExtra += sizeof(char*)*N; + memcpy(zExtra, pIdx->aiRowLogEst, sizeof(LogEst)*(pIdx->nKeyCol+1)); + pIdx->aiRowLogEst = (LogEst*)zExtra; + zExtra += sizeof(LogEst)*N; memcpy(zExtra, pIdx->aiColumn, sizeof(i16)*pIdx->nColumn); pIdx->aiColumn = (i16*)zExtra; zExtra += sizeof(i16)*N; @@ -112454,9 +114612,10 @@ static void convertToWithoutRowidTable(Parse *pParse, Table *pTab){ /* Convert the P3 operand of the OP_CreateBtree opcode from BTREE_INTKEY ** into BTREE_BLOBKEY. */ - if( pParse->addrCrTab ){ + assert( !pParse->bReturning ); + if( pParse->u1.addrCrTab ){ assert( v ); - sqlite3VdbeChangeP3(v, pParse->addrCrTab, BTREE_BLOBKEY); + sqlite3VdbeChangeP3(v, pParse->u1.addrCrTab, BTREE_BLOBKEY); } /* Locate the PRIMARY KEY index. Or, if this table was originally @@ -112468,7 +114627,10 @@ static void convertToWithoutRowidTable(Parse *pParse, Table *pTab){ sqlite3TokenInit(&ipkToken, pTab->aCol[pTab->iPKey].zName); pList = sqlite3ExprListAppend(pParse, 0, sqlite3ExprAlloc(db, TK_ID, &ipkToken, 0)); - if( pList==0 ) return; + if( pList==0 ){ + pTab->tabFlags &= ~TF_WithoutRowid; + return; + } if( IN_RENAME_OBJECT ){ sqlite3RenameTokenRemap(pParse, pList->a[0].pExpr, &pTab->iPKey); } @@ -112477,7 +114639,10 @@ static void convertToWithoutRowidTable(Parse *pParse, Table *pTab){ pTab->iPKey = -1; sqlite3CreateIndex(pParse, 0, 0, 0, pList, pTab->keyConf, 0, 0, 0, 0, SQLITE_IDXTYPE_PRIMARYKEY); - if( db->mallocFailed || pParse->nErr ) return; + if( db->mallocFailed || pParse->nErr ){ + pTab->tabFlags &= ~TF_WithoutRowid; + return; + } pPk = sqlite3PrimaryKeyIndex(pTab); assert( pPk->nKeyCol==1 ); }else{ @@ -112681,7 +114846,6 @@ SQLITE_PRIVATE void sqlite3EndTable( if( pEnd==0 && pSelect==0 ){ return; } - assert( !db->mallocFailed ); p = pParse->pNewTable; if( p==0 ) return; @@ -112906,7 +115070,7 @@ SQLITE_PRIVATE void sqlite3EndTable( /* Check to see if we need to create an sqlite_sequence table for ** keeping track of autoincrement keys. */ - if( (p->tabFlags & TF_Autoincrement)!=0 ){ + if( (p->tabFlags & TF_Autoincrement)!=0 && !IN_SPECIAL_PARSE ){ Db *pDb = &db->aDb[iDb]; assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); if( pDb->pSchema->pSeqTab==0 ){ @@ -112920,7 +115084,7 @@ SQLITE_PRIVATE void sqlite3EndTable( /* Reparse everything to update our internal data structures */ sqlite3VdbeAddParseSchemaOp(v, iDb, - sqlite3MPrintf(db, "tbl_name='%q' AND type!='trigger'", p->zName)); + sqlite3MPrintf(db, "tbl_name='%q' AND type!='trigger'", p->zName),0); } /* Add the table to the in-memory representation of the database. @@ -112929,6 +115093,7 @@ SQLITE_PRIVATE void sqlite3EndTable( Table *pOld; Schema *pSchema = p->pSchema; assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); + assert( HasRowid(p) || p->iPKey<0 ); pOld = sqlite3HashInsert(&pSchema->tblHash, p->zName, p); if( pOld ){ assert( p==pOld ); /* Malloc must have failed inside HashInsert() */ @@ -112938,19 +115103,27 @@ SQLITE_PRIVATE void sqlite3EndTable( pParse->pNewTable = 0; db->mDbFlags |= DBFLAG_SchemaChange; -#ifndef SQLITE_OMIT_ALTERTABLE - if( !p->pSelect ){ - const char *zName = (const char *)pParse->sNameToken.z; - int nName; - assert( !pSelect && pCons && pEnd ); - if( pCons->z==0 ){ - pCons = pEnd; - } - nName = (int)((const char *)pCons->z - zName); - p->addColOffset = 13 + sqlite3Utf8CharLen(zName, nName); + /* If this is the magic sqlite_sequence table used by autoincrement, + ** then record a pointer to this table in the main database structure + ** so that INSERT can find the table easily. */ + assert( !pParse->nested ); +#ifndef SQLITE_OMIT_AUTOINCREMENT + if( strcmp(p->zName, "sqlite_sequence")==0 ){ + assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); + p->pSchema->pSeqTab = p; } #endif } + +#ifndef SQLITE_OMIT_ALTERTABLE + if( !pSelect && !p->pSelect ){ + assert( pCons && pEnd ); + if( pCons->z==0 ){ + pCons = pEnd; + } + p->addColOffset = 13 + (int)(pCons->z - pParse->sNameToken.z); + } +#endif } #ifndef SQLITE_OMIT_VIEW @@ -112983,6 +115156,16 @@ SQLITE_PRIVATE void sqlite3CreateView( sqlite3StartTable(pParse, pName1, pName2, isTemp, 1, 0, noErr); p = pParse->pNewTable; if( p==0 || pParse->nErr ) goto create_view_fail; + + /* Legacy versions of SQLite allowed the use of the magic "rowid" column + ** on a view, even though views do not have rowids. The following flag + ** setting fixes this problem. But the fix can be disabled by compiling + ** with -DSQLITE_ALLOW_ROWID_IN_VIEW in case there are legacy apps that + ** depend upon the old buggy behavior. */ +#ifndef SQLITE_ALLOW_ROWID_IN_VIEW + p->tabFlags |= TF_NoVisibleRowid; +#endif + sqlite3TwoPartName(pParse, pName1, pName2, &pName); iDb = sqlite3SchemaToIndex(db, p->pSchema); sqlite3FixInit(&sFix, pParse, iDb, "view", pName); @@ -113141,6 +115324,7 @@ SQLITE_PRIVATE int sqlite3ViewGetColumnNames(Parse *pParse, Table *pTable){ assert( pTable->aCol==0 ); pTable->nCol = pSelTab->nCol; pTable->aCol = pSelTab->aCol; + pTable->tabFlags |= (pSelTab->tabFlags & COLFLAG_NOINSERT); pSelTab->nCol = 0; pSelTab->aCol = 0; assert( sqlite3SchemaMutexHeld(db, 0, pTable->pSchema) ); @@ -113458,7 +115642,10 @@ SQLITE_PRIVATE void sqlite3DropTable(Parse *pParse, SrcList *pName, int isView, if( noErr ) db->suppressErr--; if( pTab==0 ){ - if( noErr ) sqlite3CodeVerifyNamedSchema(pParse, pName->a[0].zDatabase); + if( noErr ){ + sqlite3CodeVerifyNamedSchema(pParse, pName->a[0].zDatabase); + sqlite3ForceNotReadOnly(pParse); + } goto exit_drop_table; } iDb = sqlite3SchemaToIndex(db, pTab->pSchema); @@ -114028,6 +116215,7 @@ SQLITE_PRIVATE void sqlite3CreateIndex( }else{ assert( !db->init.busy ); sqlite3CodeVerifySchema(pParse, iDb); + sqlite3ForceNotReadOnly(pParse); } goto exit_create_index; } @@ -114408,7 +116596,7 @@ SQLITE_PRIVATE void sqlite3CreateIndex( sqlite3RefillIndex(pParse, pIndex, iMem); sqlite3ChangeCookie(pParse, iDb); sqlite3VdbeAddParseSchemaOp(v, iDb, - sqlite3MPrintf(db, "name='%q' AND type='index'", pIndex->zName)); + sqlite3MPrintf(db, "name='%q' AND type='index'", pIndex->zName), 0); sqlite3VdbeAddOp2(v, OP_Expire, 0, 1); } @@ -114429,7 +116617,11 @@ SQLITE_PRIVATE void sqlite3CreateIndex( /* Clean up before exiting */ exit_create_index: if( pIndex ) sqlite3FreeIndex(db, pIndex); - if( pTab ){ /* Ensure all REPLACE indexes are at the end of the list */ + if( pTab ){ + /* Ensure all REPLACE indexes on pTab are at the end of the pIndex list. + ** The list was already ordered when this routine was entered, so at this + ** point at most a single index (the newly added index) will be out of + ** order. So we have to reorder at most one index. */ Index **ppFrom = &pTab->pIndex; Index *pThis; for(ppFrom=&pTab->pIndex; (pThis = *ppFrom)!=0; ppFrom=&pThis->pNext){ @@ -114443,6 +116635,16 @@ exit_create_index: } break; } +#ifdef SQLITE_DEBUG + /* Verify that all REPLACE indexes really are now at the end + ** of the index list. In other words, no other index type ever + ** comes after a REPLACE index on the list. */ + for(pThis = pTab->pIndex; pThis; pThis=pThis->pNext){ + assert( pThis->onError!=OE_Replace + || pThis->pNext==0 + || pThis->pNext->onError==OE_Replace ); + } +#endif } sqlite3ExprDelete(db, pPIWhere); sqlite3ExprListDelete(db, pList); @@ -114494,7 +116696,7 @@ SQLITE_PRIVATE void sqlite3DefaultRowEst(Index *pIdx){ if( x<99 ){ pIdx->pTable->nRowLogEst = x = 99; } - if( pIdx->pPartIdxWhere!=0 ) x -= 10; assert( 10==sqlite3LogEst(2) ); + if( pIdx->pPartIdxWhere!=0 ){ x -= 10; assert( 10==sqlite3LogEst(2) ); } a[0] = x; /* Estimate that a[1] is 10, a[2] is 9, a[3] is 8, a[4] is 7, a[5] is @@ -114529,9 +116731,10 @@ SQLITE_PRIVATE void sqlite3DropIndex(Parse *pParse, SrcList *pName, int ifExists pIndex = sqlite3FindIndex(db, pName->a[0].zName, pName->a[0].zDatabase); if( pIndex==0 ){ if( !ifExists ){ - sqlite3ErrorMsg(pParse, "no such index: %S", pName, 0); + sqlite3ErrorMsg(pParse, "no such index: %S", pName->a); }else{ sqlite3CodeVerifyNamedSchema(pParse, pName->a[0].zDatabase); + sqlite3ForceNotReadOnly(pParse); } pParse->checkSchema = 1; goto exit_drop_index; @@ -114551,7 +116754,7 @@ SQLITE_PRIVATE void sqlite3DropIndex(Parse *pParse, SrcList *pName, int ifExists if( sqlite3AuthCheck(pParse, SQLITE_DELETE, zTab, 0, zDb) ){ goto exit_drop_index; } - if( !OMIT_TEMPDB && iDb ) code = SQLITE_DROP_TEMP_INDEX; + if( !OMIT_TEMPDB && iDb==1 ) code = SQLITE_DROP_TEMP_INDEX; if( sqlite3AuthCheck(pParse, code, pIndex->zName, pTab->zName, zDb) ){ goto exit_drop_index; } @@ -114801,7 +117004,7 @@ SQLITE_PRIVATE SrcList *sqlite3SrcListAppend( Token *pTable, /* Table to append */ Token *pDatabase /* Database of the table */ ){ - struct SrcList_item *pItem; + SrcItem *pItem; sqlite3 *db; assert( pDatabase==0 || pTable!=0 ); /* Cannot have C without B */ assert( pParse!=0 ); @@ -114842,11 +117045,11 @@ SQLITE_PRIVATE SrcList *sqlite3SrcListAppend( */ SQLITE_PRIVATE void sqlite3SrcListAssignCursors(Parse *pParse, SrcList *pList){ int i; - struct SrcList_item *pItem; - assert(pList || pParse->db->mallocFailed ); - if( pList ){ + SrcItem *pItem; + assert( pList || pParse->db->mallocFailed ); + if( ALWAYS(pList) ){ for(i=0, pItem=pList->a; inSrc; i++, pItem++){ - if( pItem->iCursor>=0 ) break; + if( pItem->iCursor>=0 ) continue; pItem->iCursor = pParse->nTab++; if( pItem->pSelect ){ sqlite3SrcListAssignCursors(pParse, pItem->pSelect->pSrc); @@ -114860,18 +117063,18 @@ SQLITE_PRIVATE void sqlite3SrcListAssignCursors(Parse *pParse, SrcList *pList){ */ SQLITE_PRIVATE void sqlite3SrcListDelete(sqlite3 *db, SrcList *pList){ int i; - struct SrcList_item *pItem; + SrcItem *pItem; if( pList==0 ) return; for(pItem=pList->a, i=0; inSrc; i++, pItem++){ - sqlite3DbFree(db, pItem->zDatabase); + if( pItem->zDatabase ) sqlite3DbFreeNN(db, pItem->zDatabase); sqlite3DbFree(db, pItem->zName); - sqlite3DbFree(db, pItem->zAlias); + if( pItem->zAlias ) sqlite3DbFreeNN(db, pItem->zAlias); if( pItem->fg.isIndexedBy ) sqlite3DbFree(db, pItem->u1.zIndexedBy); if( pItem->fg.isTabFunc ) sqlite3ExprListDelete(db, pItem->u1.pFuncArg); sqlite3DeleteTable(db, pItem->pTab); - sqlite3SelectDelete(db, pItem->pSelect); - sqlite3ExprDelete(db, pItem->pOn); - sqlite3IdListDelete(db, pItem->pUsing); + if( pItem->pSelect ) sqlite3SelectDelete(db, pItem->pSelect); + if( pItem->pOn ) sqlite3ExprDelete(db, pItem->pOn); + if( pItem->pUsing ) sqlite3IdListDelete(db, pItem->pUsing); } sqlite3DbFreeNN(db, pList); } @@ -114902,7 +117105,7 @@ SQLITE_PRIVATE SrcList *sqlite3SrcListAppendFromTerm( Expr *pOn, /* The ON clause of a join */ IdList *pUsing /* The USING clause of a join */ ){ - struct SrcList_item *pItem; + SrcItem *pItem; sqlite3 *db = pParse->db; if( !p && (pOn || pUsing) ){ sqlite3ErrorMsg(pParse, "a JOIN clause is required before %s", @@ -114946,7 +117149,7 @@ SQLITE_PRIVATE SrcList *sqlite3SrcListAppendFromTerm( SQLITE_PRIVATE void sqlite3SrcListIndexedBy(Parse *pParse, SrcList *p, Token *pIndexedBy){ assert( pIndexedBy!=0 ); if( p && pIndexedBy->n>0 ){ - struct SrcList_item *pItem; + SrcItem *pItem; assert( p->nSrc>0 ); pItem = &p->a[p->nSrc-1]; assert( pItem->fg.notIndexed==0 ); @@ -114976,7 +117179,7 @@ SQLITE_PRIVATE SrcList *sqlite3SrcListAppendList(Parse *pParse, SrcList *p1, Src sqlite3SrcListDelete(pParse->db, p2); }else{ p1 = pNew; - memcpy(&p1->a[1], p2->a, p2->nSrc*sizeof(struct SrcList_item)); + memcpy(&p1->a[1], p2->a, p2->nSrc*sizeof(SrcItem)); sqlite3DbFree(pParse->db, p2); } } @@ -114989,7 +117192,7 @@ SQLITE_PRIVATE SrcList *sqlite3SrcListAppendList(Parse *pParse, SrcList *p1, Src */ SQLITE_PRIVATE void sqlite3SrcListFuncArgs(Parse *pParse, SrcList *p, ExprList *pList){ if( p ){ - struct SrcList_item *pItem = &p->a[p->nSrc-1]; + SrcItem *pItem = &p->a[p->nSrc-1]; assert( pItem->fg.notIndexed==0 ); assert( pItem->fg.isIndexedBy==0 ); assert( pItem->fg.isTabFunc==0 ); @@ -115043,7 +117246,16 @@ SQLITE_PRIVATE void sqlite3BeginTransaction(Parse *pParse, int type){ if( !v ) return; if( type!=TK_DEFERRED ){ for(i=0; inDb; i++){ - sqlite3VdbeAddOp2(v, OP_Transaction, i, (type==TK_EXCLUSIVE)+1); + int eTxnType; + Btree *pBt = db->aDb[i].pBt; + if( pBt && sqlite3BtreeIsReadonly(pBt) ){ + eTxnType = 0; /* Read txn */ + }else if( type==TK_EXCLUSIVE ){ + eTxnType = 2; /* Exclusive txn */ + }else{ + eTxnType = 1; /* Write txn */ + } + sqlite3VdbeAddOp2(v, OP_Transaction, i, eTxnType); sqlite3VdbeUsesBtree(v, i); } } @@ -115132,13 +117344,11 @@ SQLITE_PRIVATE int sqlite3OpenTempDatabase(Parse *pParse){ ** will occur at the end of the top-level VDBE and will be generated ** later, by sqlite3FinishCoding(). */ -SQLITE_PRIVATE void sqlite3CodeVerifySchema(Parse *pParse, int iDb){ - Parse *pToplevel = sqlite3ParseToplevel(pParse); - - assert( iDb>=0 && iDbdb->nDb ); - assert( pParse->db->aDb[iDb].pBt!=0 || iDb==1 ); - assert( iDbdb, iDb, 0) ); +static void sqlite3CodeVerifySchemaAtToplevel(Parse *pToplevel, int iDb){ + assert( iDb>=0 && iDbdb->nDb ); + assert( pToplevel->db->aDb[iDb].pBt!=0 || iDb==1 ); + assert( iDbdb, iDb, 0) ); if( DbMaskTest(pToplevel->cookieMask, iDb)==0 ){ DbMaskSet(pToplevel->cookieMask, iDb); if( !OMIT_TEMPDB && iDb==1 ){ @@ -115146,6 +117356,10 @@ SQLITE_PRIVATE void sqlite3CodeVerifySchema(Parse *pParse, int iDb){ } } } +SQLITE_PRIVATE void sqlite3CodeVerifySchema(Parse *pParse, int iDb){ + sqlite3CodeVerifySchemaAtToplevel(sqlite3ParseToplevel(pParse), iDb); +} + /* ** If argument zDb is NULL, then call sqlite3CodeVerifySchema() for each @@ -115177,7 +117391,7 @@ SQLITE_PRIVATE void sqlite3CodeVerifyNamedSchema(Parse *pParse, const char *zDb) */ SQLITE_PRIVATE void sqlite3BeginWriteOperation(Parse *pParse, int setStatement, int iDb){ Parse *pToplevel = sqlite3ParseToplevel(pParse); - sqlite3CodeVerifySchema(pParse, iDb); + sqlite3CodeVerifySchemaAtToplevel(pToplevel, iDb); DbMaskSet(pToplevel->writeMask, iDb); pToplevel->isMultiWrite |= setStatement; } @@ -115228,7 +117442,9 @@ SQLITE_PRIVATE void sqlite3HaltConstraint( i8 p4type, /* P4_STATIC or P4_TRANSIENT */ u8 p5Errmsg /* P5_ErrMsg type */ ){ - Vdbe *v = sqlite3GetVdbe(pParse); + Vdbe *v; + assert( pParse->pVdbe!=0 ); + v = sqlite3GetVdbe(pParse); assert( (errCode&0xff)==SQLITE_CONSTRAINT || pParse->nested ); if( onError==OE_Abort ){ sqlite3MayAbort(pParse); @@ -115473,24 +117689,76 @@ SQLITE_PRIVATE KeyInfo *sqlite3KeyInfoOfIndex(Parse *pParse, Index *pIdx){ } #ifndef SQLITE_OMIT_CTE +/* +** Create a new CTE object +*/ +SQLITE_PRIVATE Cte *sqlite3CteNew( + Parse *pParse, /* Parsing context */ + Token *pName, /* Name of the common-table */ + ExprList *pArglist, /* Optional column name list for the table */ + Select *pQuery, /* Query used to initialize the table */ + u8 eM10d /* The MATERIALIZED flag */ +){ + Cte *pNew; + sqlite3 *db = pParse->db; + + pNew = sqlite3DbMallocZero(db, sizeof(*pNew)); + assert( pNew!=0 || db->mallocFailed ); + + if( db->mallocFailed ){ + sqlite3ExprListDelete(db, pArglist); + sqlite3SelectDelete(db, pQuery); + }else{ + pNew->pSelect = pQuery; + pNew->pCols = pArglist; + pNew->zName = sqlite3NameFromToken(pParse->db, pName); + pNew->eM10d = eM10d; + } + return pNew; +} + +/* +** Clear information from a Cte object, but do not deallocate storage +** for the object itself. +*/ +static void cteClear(sqlite3 *db, Cte *pCte){ + assert( pCte!=0 ); + sqlite3ExprListDelete(db, pCte->pCols); + sqlite3SelectDelete(db, pCte->pSelect); + sqlite3DbFree(db, pCte->zName); +} + +/* +** Free the contents of the CTE object passed as the second argument. +*/ +SQLITE_PRIVATE void sqlite3CteDelete(sqlite3 *db, Cte *pCte){ + assert( pCte!=0 ); + cteClear(db, pCte); + sqlite3DbFree(db, pCte); +} + /* ** This routine is invoked once per CTE by the parser while parsing a -** WITH clause. +** WITH clause. The CTE described by teh third argument is added to +** the WITH clause of the second argument. If the second argument is +** NULL, then a new WITH argument is created. */ SQLITE_PRIVATE With *sqlite3WithAdd( Parse *pParse, /* Parsing context */ With *pWith, /* Existing WITH clause, or NULL */ - Token *pName, /* Name of the common-table */ - ExprList *pArglist, /* Optional column name list for the table */ - Select *pQuery /* Query used to initialize the table */ + Cte *pCte /* CTE to add to the WITH clause */ ){ sqlite3 *db = pParse->db; With *pNew; char *zName; + if( pCte==0 ){ + return pWith; + } + /* Check that the CTE name is unique within this WITH clause. If ** not, store an error in the Parse structure. */ - zName = sqlite3NameFromToken(pParse->db, pName); + zName = pCte->zName; if( zName && pWith ){ int i; for(i=0; inCte; i++){ @@ -115509,16 +117777,11 @@ SQLITE_PRIVATE With *sqlite3WithAdd( assert( (pNew!=0 && zName!=0) || db->mallocFailed ); if( db->mallocFailed ){ - sqlite3ExprListDelete(db, pArglist); - sqlite3SelectDelete(db, pQuery); - sqlite3DbFree(db, zName); + sqlite3CteDelete(db, pCte); pNew = pWith; }else{ - pNew->a[pNew->nCte].pSelect = pQuery; - pNew->a[pNew->nCte].pCols = pArglist; - pNew->a[pNew->nCte].zName = zName; - pNew->a[pNew->nCte].zCteErr = 0; - pNew->nCte++; + pNew->a[pNew->nCte++] = *pCte; + sqlite3DbFree(db, pCte); } return pNew; @@ -115531,10 +117794,7 @@ SQLITE_PRIVATE void sqlite3WithDelete(sqlite3 *db, With *pWith){ if( pWith ){ int i; for(i=0; inCte; i++){ - struct Cte *pCte = &pWith->a[i]; - sqlite3ExprListDelete(db, pCte->pCols); - sqlite3SelectDelete(db, pCte->pSelect); - sqlite3DbFree(db, pCte->zName); + cteClear(db, &pWith->a[i]); } sqlite3DbFree(db, pWith); } @@ -116113,7 +118373,7 @@ SQLITE_PRIVATE Schema *sqlite3SchemaGet(sqlite3 *db, Btree *pBt){ ** */ SQLITE_PRIVATE Table *sqlite3SrcListLookup(Parse *pParse, SrcList *pSrc){ - struct SrcList_item *pItem = pSrc->a; + SrcItem *pItem = pSrc->a; Table *pTab; assert( pItem && pSrc->nSrc>=1 ); pTab = sqlite3LocateTableItem(pParse, 0, pItem); @@ -116121,9 +118381,9 @@ SQLITE_PRIVATE Table *sqlite3SrcListLookup(Parse *pParse, SrcList *pSrc){ pItem->pTab = pTab; if( pTab ){ pTab->nTabRef++; - } - if( sqlite3IndexedByLookup(pParse, pItem) ){ - pTab = 0; + if( pItem->fg.isIndexedBy && sqlite3IndexedByLookup(pParse, pItem) ){ + pTab = 0; + } } return pTab; } @@ -116291,9 +118551,15 @@ SQLITE_PRIVATE Expr *sqlite3LimitWhere( /* duplicate the FROM clause as it is needed by both the DELETE/UPDATE tree ** and the SELECT subtree. */ pSrc->a[0].pTab = 0; - pSelectSrc = sqlite3SrcListDup(pParse->db, pSrc, 0); + pSelectSrc = sqlite3SrcListDup(db, pSrc, 0); pSrc->a[0].pTab = pTab; - pSrc->a[0].pIBIndex = 0; + if( pSrc->a[0].fg.isIndexedBy ){ + pSrc->a[0].u2.pIBIndex = 0; + pSrc->a[0].fg.isIndexedBy = 0; + sqlite3DbFree(db, pSrc->a[0].u1.zIndexedBy); + }else if( pSrc->a[0].fg.isCte ){ + pSrc->a[0].u2.pCteUse->nUse++; + } /* generate the SELECT expression tree. */ pSelect = sqlite3SelectNew(pParse, pEList, pSelectSrc, pWhere, 0 ,0, @@ -116471,6 +118737,7 @@ SQLITE_PRIVATE void sqlite3DeleteFrom( if( (db->flags & SQLITE_CountRows)!=0 && !pParse->nested && !pParse->pTriggerTab + && !pParse->bReturning ){ memCnt = ++pParse->nMem; sqlite3VdbeAddOp2(v, OP_Integer, 0, memCnt); @@ -116505,11 +118772,14 @@ SQLITE_PRIVATE void sqlite3DeleteFrom( for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ assert( pIdx->pSchema==pTab->pSchema ); sqlite3VdbeAddOp2(v, OP_Clear, pIdx->tnum, iDb); + if( IsPrimaryKeyIndex(pIdx) && !HasRowid(pTab) ){ + sqlite3VdbeChangeP3(v, -1, memCnt ? memCnt : -1); + } } }else #endif /* SQLITE_OMIT_TRUNCATE_OPTIMIZATION */ { - u16 wcf = WHERE_ONEPASS_DESIRED|WHERE_DUPLICATES_OK|WHERE_SEEK_TABLE; + u16 wcf = WHERE_ONEPASS_DESIRED|WHERE_DUPLICATES_OK; if( sNC.ncFlags & NC_VarSelect ) bComplex = 1; wcf |= (bComplex ? 0 : WHERE_ONEPASS_MULTIROW); if( HasRowid(pTab) ){ @@ -116545,6 +118815,9 @@ SQLITE_PRIVATE void sqlite3DeleteFrom( assert( IsVirtual(pTab)==0 || eOnePass!=ONEPASS_MULTI ); assert( IsVirtual(pTab) || bComplex || eOnePass!=ONEPASS_OFF ); if( eOnePass!=ONEPASS_SINGLE ) sqlite3MultiWrite(pParse); + if( sqlite3WhereUsesDeferredSeek(pWInfo) ){ + sqlite3VdbeAddOp1(v, OP_FinishSeek, iTabCur); + } /* Keep track of the number of rows to be deleted */ if( memCnt ){ @@ -116579,6 +118852,7 @@ SQLITE_PRIVATE void sqlite3DeleteFrom( if( aiCurOnePass[0]>=0 ) aToOpen[aiCurOnePass[0]-iTabCur] = 0; if( aiCurOnePass[1]>=0 ) aToOpen[aiCurOnePass[1]-iTabCur] = 0; if( addrEphOpen ) sqlite3VdbeChangeToNoop(v, addrEphOpen); + addrBypass = sqlite3VdbeMakeLabel(pParse); }else{ if( pPk ){ /* Add the PK key for this row to the temporary table */ @@ -116592,13 +118866,6 @@ SQLITE_PRIVATE void sqlite3DeleteFrom( nKey = 1; /* OP_DeferredSeek always uses a single rowid */ sqlite3VdbeAddOp2(v, OP_RowSetAdd, iRowSet, iKey); } - } - - /* If this DELETE cannot use the ONEPASS strategy, this is the - ** end of the WHERE loop */ - if( eOnePass!=ONEPASS_OFF ){ - addrBypass = sqlite3VdbeMakeLabel(pParse); - }else{ sqlite3WhereEnd(pWInfo); } @@ -116695,7 +118962,7 @@ SQLITE_PRIVATE void sqlite3DeleteFrom( ** invoke the callback function. */ if( memCnt ){ - sqlite3VdbeAddOp2(v, OP_ResultRow, memCnt, 1); + sqlite3VdbeAddOp2(v, OP_ChngCntRow, memCnt, 1); sqlite3VdbeSetNumCols(v, 1); sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "rows deleted", SQLITE_STATIC); } @@ -117019,20 +119286,18 @@ SQLITE_PRIVATE int sqlite3GenerateIndexKey( continue; } sqlite3ExprCodeLoadIndexColumn(pParse, pIdx, iDataCur, j, regBase+j); - /* If the column affinity is REAL but the number is an integer, then it - ** might be stored in the table as an integer (using a compact - ** representation) then converted to REAL by an OP_RealAffinity opcode. - ** But we are getting ready to store this value back into an index, where - ** it should be converted by to INTEGER again. So omit the OP_RealAffinity - ** opcode if it is present */ - sqlite3VdbeDeletePriorOpcode(v, OP_RealAffinity); + if( pIdx->aiColumn[j]>=0 ){ + /* If the column affinity is REAL but the number is an integer, then it + ** might be stored in the table as an integer (using a compact + ** representation) then converted to REAL by an OP_RealAffinity opcode. + ** But we are getting ready to store this value back into an index, where + ** it should be converted by to INTEGER again. So omit the + ** OP_RealAffinity opcode if it is present */ + sqlite3VdbeDeletePriorOpcode(v, OP_RealAffinity); + } } if( regOut ){ sqlite3VdbeAddOp3(v, OP_MakeRecord, regBase, nCol, regOut); - if( pIdx->pTable->pSelect ){ - const char *zAff = sqlite3IndexAffinityStr(pParse->db, pIdx); - sqlite3VdbeChangeP4(v, -1, zAff, P4_TRANSIENT); - } } sqlite3ReleaseTempRange(pParse, regBase, nCol); return regBase; @@ -117747,7 +120012,8 @@ static int patternCompare( /* Skip over multiple "*" characters in the pattern. If there ** are also "?" characters, skip those as well, but consume a ** single character of the input string for each "?" skipped */ - while( (c=Utf8Read(zPattern)) == matchAll || c == matchOne ){ + while( (c=Utf8Read(zPattern)) == matchAll + || (c == matchOne && matchOne!=0) ){ if( c==matchOne && sqlite3Utf8Read(&zString)==0 ){ return SQLITE_NOWILDCARDMATCH; } @@ -118368,10 +120634,10 @@ static void trimFunc( ){ const unsigned char *zIn; /* Input string */ const unsigned char *zCharSet; /* Set of characters to trim */ - int nIn; /* Number of bytes in input */ + unsigned int nIn; /* Number of bytes in input */ int flags; /* 1: trimleft 2: trimright 3: trim */ int i; /* Loop counter */ - unsigned char *aLen = 0; /* Length of each character in zCharSet */ + unsigned int *aLen = 0; /* Length of each character in zCharSet */ unsigned char **azChar = 0; /* Individual characters in zCharSet */ int nChar; /* Number of characters in zCharSet */ @@ -118380,13 +120646,13 @@ static void trimFunc( } zIn = sqlite3_value_text(argv[0]); if( zIn==0 ) return; - nIn = sqlite3_value_bytes(argv[0]); + nIn = (unsigned)sqlite3_value_bytes(argv[0]); assert( zIn==sqlite3_value_text(argv[0]) ); if( argc==1 ){ - static const unsigned char lenOne[] = { 1 }; + static const unsigned lenOne[] = { 1 }; static unsigned char * const azOne[] = { (u8*)" " }; nChar = 1; - aLen = (u8*)lenOne; + aLen = (unsigned*)lenOne; azChar = (unsigned char **)azOne; zCharSet = 0; }else if( (zCharSet = sqlite3_value_text(argv[1]))==0 ){ @@ -118397,15 +120663,16 @@ static void trimFunc( SQLITE_SKIP_UTF8(z); } if( nChar>0 ){ - azChar = contextMalloc(context, ((i64)nChar)*(sizeof(char*)+1)); + azChar = contextMalloc(context, + ((i64)nChar)*(sizeof(char*)+sizeof(unsigned))); if( azChar==0 ){ return; } - aLen = (unsigned char*)&azChar[nChar]; + aLen = (unsigned*)&azChar[nChar]; for(z=zCharSet, nChar=0; *z; nChar++){ azChar[nChar] = (unsigned char *)z; SQLITE_SKIP_UTF8(z); - aLen[nChar] = (u8)(z - azChar[nChar]); + aLen[nChar] = (unsigned)(z - azChar[nChar]); } } } @@ -118413,7 +120680,7 @@ static void trimFunc( flags = SQLITE_PTR_TO_INT(sqlite3_user_data(context)); if( flags & 1 ){ while( nIn>0 ){ - int len = 0; + unsigned int len = 0; for(i=0; i0 ){ - int len = 0; + unsigned int len = 0; for(i=0; iop!=TK_FUNCTION || !pExpr->x.pList ){ + assert( pExpr!=0 ); + assert( pExpr->op==TK_FUNCTION ); + if( !pExpr->x.pList ){ return 0; } assert( !ExprHasProperty(pExpr, EP_xIsSelect) ); @@ -118957,6 +121226,201 @@ SQLITE_PRIVATE int sqlite3IsLikeFunction(sqlite3 *db, Expr *pExpr, int *pIsNocas return 1; } +/* Mathematical Constants */ +#ifndef M_PI +# define M_PI 3.141592653589793238462643383279502884 +#endif +#ifndef M_LN10 +# define M_LN10 2.302585092994045684017991454684364208 +#endif +#ifndef M_LN2 +# define M_LN2 0.693147180559945309417232121458176568 +#endif + + +/* Extra math functions that require linking with -lm +*/ +#ifdef SQLITE_ENABLE_MATH_FUNCTIONS +/* +** Implementation SQL functions: +** +** ceil(X) +** ceiling(X) +** floor(X) +** +** The sqlite3_user_data() pointer is a pointer to the libm implementation +** of the underlying C function. +*/ +static void ceilingFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + assert( argc==1 ); + switch( sqlite3_value_numeric_type(argv[0]) ){ + case SQLITE_INTEGER: { + sqlite3_result_int64(context, sqlite3_value_int64(argv[0])); + break; + } + case SQLITE_FLOAT: { + double (*x)(double) = (double(*)(double))sqlite3_user_data(context); + sqlite3_result_double(context, x(sqlite3_value_double(argv[0]))); + break; + } + default: { + break; + } + } +} + +/* +** On some systems, ceil() and floor() are intrinsic function. You are +** unable to take a pointer to these functions. Hence, we here wrap them +** in our own actual functions. +*/ +static double xCeil(double x){ return ceil(x); } +static double xFloor(double x){ return floor(x); } + +/* +** Implementation of SQL functions: +** +** ln(X) - natural logarithm +** log(X) - log X base 10 +** log10(X) - log X base 10 +** log(B,X) - log X base B +*/ +static void logFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + double x, b, ans; + assert( argc==1 || argc==2 ); + switch( sqlite3_value_numeric_type(argv[0]) ){ + case SQLITE_INTEGER: + case SQLITE_FLOAT: + x = sqlite3_value_double(argv[0]); + if( x<=0.0 ) return; + break; + default: + return; + } + if( argc==2 ){ + switch( sqlite3_value_numeric_type(argv[0]) ){ + case SQLITE_INTEGER: + case SQLITE_FLOAT: + b = log(x); + if( b<=0.0 ) return; + x = sqlite3_value_double(argv[1]); + if( x<=0.0 ) return; + break; + default: + return; + } + ans = log(x)/b; + }else{ + ans = log(x); + switch( SQLITE_PTR_TO_INT(sqlite3_user_data(context)) ){ + case 1: + /* Convert from natural logarithm to log base 10 */ + ans *= 1.0/M_LN10; + break; + case 2: + /* Convert from natural logarithm to log base 2 */ + ans *= 1.0/M_LN2; + break; + default: + break; + } + } + sqlite3_result_double(context, ans); +} + +/* +** Functions to converts degrees to radians and radians to degrees. +*/ +static double degToRad(double x){ return x*(M_PI/180.0); } +static double radToDeg(double x){ return x*(180.0/M_PI); } + +/* +** Implementation of 1-argument SQL math functions: +** +** exp(X) - Compute e to the X-th power +*/ +static void math1Func( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + int type0; + double v0, ans; + double (*x)(double); + assert( argc==1 ); + type0 = sqlite3_value_numeric_type(argv[0]); + if( type0!=SQLITE_INTEGER && type0!=SQLITE_FLOAT ) return; + v0 = sqlite3_value_double(argv[0]); + x = (double(*)(double))sqlite3_user_data(context); + ans = x(v0); + sqlite3_result_double(context, ans); +} + +/* +** Implementation of 2-argument SQL math functions: +** +** power(X,Y) - Compute X to the Y-th power +*/ +static void math2Func( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + int type0, type1; + double v0, v1, ans; + double (*x)(double,double); + assert( argc==2 ); + type0 = sqlite3_value_numeric_type(argv[0]); + if( type0!=SQLITE_INTEGER && type0!=SQLITE_FLOAT ) return; + type1 = sqlite3_value_numeric_type(argv[1]); + if( type1!=SQLITE_INTEGER && type1!=SQLITE_FLOAT ) return; + v0 = sqlite3_value_double(argv[0]); + v1 = sqlite3_value_double(argv[1]); + x = (double(*)(double,double))sqlite3_user_data(context); + ans = x(v0, v1); + sqlite3_result_double(context, ans); +} + +/* +** Implementation of 0-argument pi() function. +*/ +static void piFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + assert( argc==0 ); + sqlite3_result_double(context, M_PI); +} + +#endif /* SQLITE_ENABLE_MATH_FUNCTIONS */ + +/* +** Implementation of sign(X) function. +*/ +static void signFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + int type0; + double x; + UNUSED_PARAMETER(argc); + assert( argc==1 ); + type0 = sqlite3_value_numeric_type(argv[0]); + if( type0!=SQLITE_INTEGER && type0!=SQLITE_FLOAT ) return; + x = sqlite3_value_double(argv[0]); + sqlite3_result_int(context, x<0.0 ? -1 : x>0.0 ? +1 : 0); +} + /* ** All of the FuncDef structures in the aBuiltinFunc[] array above ** to the global function hash table. This occurs at start-time (as @@ -119048,6 +121512,8 @@ SQLITE_PRIVATE void sqlite3RegisterBuiltinFunctions(void){ FUNCTION(zeroblob, 1, 0, 0, zeroblobFunc ), FUNCTION(substr, 2, 0, 0, substrFunc ), FUNCTION(substr, 3, 0, 0, substrFunc ), + FUNCTION(substring, 2, 0, 0, substrFunc ), + FUNCTION(substring, 3, 0, 0, substrFunc ), WAGGREGATE(sum, 1,0,0, sumStep, sumFinalize, sumFinalize, sumInverse, 0), WAGGREGATE(total, 1,0,0, sumStep,totalFinalize,totalFinalize,sumInverse, 0), WAGGREGATE(avg, 1,0,0, sumStep, avgFinalize, avgFinalize, sumInverse, 0), @@ -119073,6 +121539,43 @@ SQLITE_PRIVATE void sqlite3RegisterBuiltinFunctions(void){ #endif FUNCTION(coalesce, 1, 0, 0, 0 ), FUNCTION(coalesce, 0, 0, 0, 0 ), +#ifdef SQLITE_ENABLE_MATH_FUNCTIONS + MFUNCTION(ceil, 1, xCeil, ceilingFunc ), + MFUNCTION(ceiling, 1, xCeil, ceilingFunc ), + MFUNCTION(floor, 1, xFloor, ceilingFunc ), +#if SQLITE_HAVE_C99_MATH_FUNCS + MFUNCTION(trunc, 1, trunc, ceilingFunc ), +#endif + FUNCTION(ln, 1, 0, 0, logFunc ), + FUNCTION(log, 1, 1, 0, logFunc ), + FUNCTION(log10, 1, 1, 0, logFunc ), + FUNCTION(log2, 1, 2, 0, logFunc ), + FUNCTION(log, 2, 0, 0, logFunc ), + MFUNCTION(exp, 1, exp, math1Func ), + MFUNCTION(pow, 2, pow, math2Func ), + MFUNCTION(power, 2, pow, math2Func ), + MFUNCTION(mod, 2, fmod, math2Func ), + MFUNCTION(acos, 1, acos, math1Func ), + MFUNCTION(asin, 1, asin, math1Func ), + MFUNCTION(atan, 1, atan, math1Func ), + MFUNCTION(atan2, 2, atan2, math2Func ), + MFUNCTION(cos, 1, cos, math1Func ), + MFUNCTION(sin, 1, sin, math1Func ), + MFUNCTION(tan, 1, tan, math1Func ), + MFUNCTION(cosh, 1, cosh, math1Func ), + MFUNCTION(sinh, 1, sinh, math1Func ), + MFUNCTION(tanh, 1, tanh, math1Func ), +#if SQLITE_HAVE_C99_MATH_FUNCS + MFUNCTION(acosh, 1, acosh, math1Func ), + MFUNCTION(asinh, 1, asinh, math1Func ), + MFUNCTION(atanh, 1, atanh, math1Func ), +#endif + MFUNCTION(sqrt, 1, sqrt, math1Func ), + MFUNCTION(radians, 1, degToRad, math1Func ), + MFUNCTION(degrees, 1, radToDeg, math1Func ), + FUNCTION(pi, 0, 0, 0, piFunc ), +#endif /* SQLITE_ENABLE_MATH_FUNCTIONS */ + FUNCTION(sign, 1, 0, 0, signFunc ), INLINE_FUNC(coalesce, -1, INLINEFUNC_coalesce, 0 ), INLINE_FUNC(iif, 3, INLINEFUNC_iif, 0 ), }; @@ -120128,7 +122631,7 @@ SQLITE_PRIVATE void sqlite3FkCheck( ** child table as a SrcList for sqlite3WhereBegin() */ pSrc = sqlite3SrcListAppend(pParse, 0, 0, 0); if( pSrc ){ - struct SrcList_item *pItem = pSrc->a; + SrcItem *pItem = pSrc->a; pItem->pTab = pFKey->pFrom; pItem->zName = pFKey->pFrom->zName; pItem->pTab->nTabRef++; @@ -120216,7 +122719,9 @@ SQLITE_PRIVATE u32 sqlite3FkOldmask( ** ** For an UPDATE, this function returns 2 if: ** -** * There are any FKs for which pTab is the child and the parent table, or +** * There are any FKs for which pTab is the child and the parent table +** and any FK processing at all is required (even of a different FK), or +** ** * the UPDATE modifies one or more parent keys for which the action is ** not "NO ACTION" (i.e. is CASCADE, SET DEFAULT or SET NULL). ** @@ -120228,13 +122733,14 @@ SQLITE_PRIVATE int sqlite3FkRequired( int *aChange, /* Non-NULL for UPDATE operations */ int chngRowid /* True for UPDATE that affects rowid */ ){ - int eRet = 0; + int eRet = 1; /* Value to return if bHaveFK is true */ + int bHaveFK = 0; /* If FK processing is required */ if( pParse->db->flags&SQLITE_ForeignKeys ){ if( !aChange ){ /* A DELETE operation. Foreign key processing is required if the ** table in question is either the child or parent table for any ** foreign key constraint. */ - eRet = (sqlite3FkReferences(pTab) || pTab->pFKey); + bHaveFK = (sqlite3FkReferences(pTab) || pTab->pFKey); }else{ /* This is an UPDATE. Foreign key processing is only required if the ** operation modifies one or more child or parent key columns. */ @@ -120242,9 +122748,9 @@ SQLITE_PRIVATE int sqlite3FkRequired( /* Check if any child key columns are being modified. */ for(p=pTab->pFKey; p; p=p->pNextFrom){ - if( 0==sqlite3_stricmp(pTab->zName, p->zTo) ) return 2; if( fkChildIsModified(pTab, p, aChange, chngRowid) ){ - eRet = 1; + if( 0==sqlite3_stricmp(pTab->zName, p->zTo) ) eRet = 2; + bHaveFK = 1; } } @@ -120252,12 +122758,12 @@ SQLITE_PRIVATE int sqlite3FkRequired( for(p=sqlite3FkReferences(pTab); p; p=p->pNextTo){ if( fkParentIsModified(pTab, p, aChange, chngRowid) ){ if( p->aAction[1]!=OE_None ) return 2; - eRet = 1; + bHaveFK = 1; } } } } - return eRet; + return bHaveFK ? eRet : 0; } /* @@ -120588,7 +123094,8 @@ SQLITE_PRIVATE void sqlite3OpenTable( ){ Vdbe *v; assert( !IsVirtual(pTab) ); - v = sqlite3GetVdbe(pParse); + assert( pParse->pVdbe!=0 ); + v = pParse->pVdbe; assert( opcode==OP_OpenWrite || opcode==OP_OpenRead ); sqlite3TableLock(pParse, iDb, pTab->tnum, (opcode==OP_OpenWrite)?1:0, pTab->zName); @@ -120913,7 +123420,7 @@ static int autoIncBegin( ** Ticket d8dc2b3a58cd5dc2918a1d4acb 2018-05-23 */ if( pSeqTab==0 || !HasRowid(pSeqTab) - || IsVirtual(pSeqTab) + || NEVER(IsVirtual(pSeqTab)) || pSeqTab->nCol!=2 ){ pParse->nErr++; @@ -120925,7 +123432,9 @@ static int autoIncBegin( while( pInfo && pInfo->pTab!=pTab ){ pInfo = pInfo->pNext; } if( pInfo==0 ){ pInfo = sqlite3DbMallocRawNN(pParse->db, sizeof(*pInfo)); - if( pInfo==0 ) return 0; + sqlite3ParserAddCleanup(pToplevel, sqlite3DbFree, pInfo); + testcase( pParse->earlyCleanup ); + if( pParse->db->mallocFailed ) return 0; pInfo->pNext = pToplevel->pAinc; pToplevel->pAinc = pInfo; pInfo->pTab = pTab; @@ -121370,7 +123879,7 @@ SQLITE_PRIVATE void sqlite3Insert( bIdListInOrder = 0; }else{ sqlite3ErrorMsg(pParse, "table %S has no column named %s", - pTabList, 0, pColumn->a[i].zName); + pTabList->a, pColumn->a[i].zName); pParse->checkSchema = 1; goto insert_cleanup; } @@ -121483,19 +123992,24 @@ SQLITE_PRIVATE void sqlite3Insert( } } #endif - } - /* Make sure the number of columns in the source data matches the number - ** of columns to be inserted into the table. - */ - for(i=0; inCol; i++){ - if( pTab->aCol[i].colFlags & COLFLAG_NOINSERT ) nHidden++; - } - if( pColumn==0 && nColumn && nColumn!=(pTab->nCol-nHidden) ){ - sqlite3ErrorMsg(pParse, - "table %S has %d columns but %d values were supplied", - pTabList, 0, pTab->nCol-nHidden, nColumn); - goto insert_cleanup; + /* Make sure the number of columns in the source data matches the number + ** of columns to be inserted into the table. + */ + assert( TF_HasHidden==COLFLAG_HIDDEN ); + assert( TF_HasGenerated==COLFLAG_GENERATED ); + assert( COLFLAG_NOINSERT==(COLFLAG_GENERATED|COLFLAG_HIDDEN) ); + if( (pTab->tabFlags & (TF_HasGenerated|TF_HasHidden))!=0 ){ + for(i=0; inCol; i++){ + if( pTab->aCol[i].colFlags & COLFLAG_NOINSERT ) nHidden++; + } + } + if( nColumn!=(pTab->nCol-nHidden) ){ + sqlite3ErrorMsg(pParse, + "table %S has %d columns but %d values were supplied", + pTabList->a, pTab->nCol-nHidden, nColumn); + goto insert_cleanup; + } } if( pColumn!=0 && nColumn!=pColumn->nId ){ sqlite3ErrorMsg(pParse, "%d values for %d columns", nColumn, pColumn->nId); @@ -121507,6 +124021,7 @@ SQLITE_PRIVATE void sqlite3Insert( if( (db->flags & SQLITE_CountRows)!=0 && !pParse->nested && !pParse->pTriggerTab + && !pParse->bReturning ){ regRowCount = ++pParse->nMem; sqlite3VdbeAddOp2(v, OP_Integer, 0, regRowCount); @@ -121530,6 +124045,7 @@ SQLITE_PRIVATE void sqlite3Insert( } #ifndef SQLITE_OMIT_UPSERT if( pUpsert ){ + Upsert *pNx; if( IsVirtual(pTab) ){ sqlite3ErrorMsg(pParse, "UPSERT not implemented for virtual table \"%s\"", pTab->zName); @@ -121543,13 +124059,19 @@ SQLITE_PRIVATE void sqlite3Insert( goto insert_cleanup; } pTabList->a[0].iCursor = iDataCur; - pUpsert->pUpsertSrc = pTabList; - pUpsert->regData = regData; - pUpsert->iDataCur = iDataCur; - pUpsert->iIdxCur = iIdxCur; - if( pUpsert->pUpsertTarget ){ - sqlite3UpsertAnalyzeTarget(pParse, pTabList, pUpsert); - } + pNx = pUpsert; + do{ + pNx->pUpsertSrc = pTabList; + pNx->regData = regData; + pNx->iDataCur = iDataCur; + pNx->iIdxCur = iIdxCur; + if( pNx->pUpsertTarget ){ + if( sqlite3UpsertAnalyzeTarget(pParse, pTabList, pNx) ){ + goto insert_cleanup; + } + } + pNx = pNx->pNextUpsert; + }while( pNx!=0 ); } #endif @@ -121690,11 +124212,6 @@ SQLITE_PRIVATE void sqlite3Insert( sqlite3VdbeAddOp1(v, OP_MustBeInt, regCols); VdbeCoverage(v); } - /* Cannot have triggers on a virtual table. If it were possible, - ** this block would have to account for hidden column. - */ - assert( !IsVirtual(pTab) ); - /* Copy the new data already generated. */ assert( pTab->nNVCol>0 ); sqlite3VdbeAddOp3(v, OP_Copy, regRowid+1, regCols+1, pTab->nNVCol-1); @@ -121793,7 +124310,7 @@ SQLITE_PRIVATE void sqlite3Insert( }else #endif { - int isReplace; /* Set to true if constraints may cause a replace */ + int isReplace = 0;/* Set to true if constraints may cause a replace */ int bUseSeek; /* True to use OPFLAG_SEEKRESULT */ sqlite3GenerateConstraintChecks(pParse, pTab, aRegIdx, iDataCur, iIdxCur, regIns, 0, ipkColumn>=0, onError, endOfLoop, &isReplace, 0, pUpsert @@ -121813,6 +124330,13 @@ SQLITE_PRIVATE void sqlite3Insert( regIns, aRegIdx, 0, appendFlag, bUseSeek ); } +#ifdef SQLITE_ALLOW_ROWID_IN_VIEW + }else if( pParse->bReturning ){ + /* If there is a RETURNING clause, populate the rowid register with + ** constant value -1, in case one or more of the returned expressions + ** refer to the "rowid" of the view. */ + sqlite3VdbeAddOp2(v, OP_Integer, -1, regRowid); +#endif } /* Update the count of rows that are inserted @@ -121849,7 +124373,9 @@ SQLITE_PRIVATE void sqlite3Insert( sqlite3VdbeJumpHere(v, addrInsTop); } +#ifndef SQLITE_OMIT_XFER_OPT insert_end: +#endif /* SQLITE_OMIT_XFER_OPT */ /* Update the sqlite_sequence table by storing the content of the ** maximum rowid counter values recorded while inserting into ** autoincrement tables. @@ -121864,7 +124390,7 @@ insert_end: ** invoke the callback function. */ if( regRowCount ){ - sqlite3VdbeAddOp2(v, OP_ResultRow, regRowCount, 1); + sqlite3VdbeAddOp2(v, OP_ChngCntRow, regRowCount, 1); sqlite3VdbeSetNumCols(v, 1); sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "rows inserted", SQLITE_STATIC); } @@ -121954,6 +124480,70 @@ SQLITE_PRIVATE int sqlite3ExprReferencesUpdatedColumn( return w.eCode!=0; } +/* +** The sqlite3GenerateConstraintChecks() routine usually wants to visit +** the indexes of a table in the order provided in the Table->pIndex list. +** However, sometimes (rarely - when there is an upsert) it wants to visit +** the indexes in a different order. The following data structures accomplish +** this. +** +** The IndexIterator object is used to walk through all of the indexes +** of a table in either Index.pNext order, or in some other order established +** by an array of IndexListTerm objects. +*/ +typedef struct IndexListTerm IndexListTerm; +typedef struct IndexIterator IndexIterator; +struct IndexIterator { + int eType; /* 0 for Index.pNext list. 1 for an array of IndexListTerm */ + int i; /* Index of the current item from the list */ + union { + struct { /* Use this object for eType==0: A Index.pNext list */ + Index *pIdx; /* The current Index */ + } lx; + struct { /* Use this object for eType==1; Array of IndexListTerm */ + int nIdx; /* Size of the array */ + IndexListTerm *aIdx; /* Array of IndexListTerms */ + } ax; + } u; +}; + +/* When IndexIterator.eType==1, then each index is an array of instances +** of the following object +*/ +struct IndexListTerm { + Index *p; /* The index */ + int ix; /* Which entry in the original Table.pIndex list is this index*/ +}; + +/* Return the first index on the list */ +static Index *indexIteratorFirst(IndexIterator *pIter, int *pIx){ + assert( pIter->i==0 ); + if( pIter->eType ){ + *pIx = pIter->u.ax.aIdx[0].ix; + return pIter->u.ax.aIdx[0].p; + }else{ + *pIx = 0; + return pIter->u.lx.pIdx; + } +} + +/* Return the next index from the list. Return NULL when out of indexes */ +static Index *indexIteratorNext(IndexIterator *pIter, int *pIx){ + if( pIter->eType ){ + int i = ++pIter->i; + if( i>=pIter->u.ax.nIdx ){ + *pIx = i; + return 0; + } + *pIx = pIter->u.ax.aIdx[i].ix; + return pIter->u.ax.aIdx[i].p; + }else{ + ++(*pIx); + pIter->u.lx.pIdx = pIter->u.lx.pIdx->pNext; + return pIter->u.lx.pIdx; + } +} + /* ** Generate code to do constraint checks prior to an INSERT or an UPDATE ** on table pTab. @@ -122062,7 +124652,7 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks( ){ Vdbe *v; /* VDBE under constrution */ Index *pIdx; /* Pointer to one of the indices */ - Index *pPk = 0; /* The PRIMARY KEY index */ + Index *pPk = 0; /* The PRIMARY KEY index for WITHOUT ROWID tables */ sqlite3 *db; /* Database connection */ int i; /* loop counter */ int ix; /* Index loop counter */ @@ -122070,11 +124660,11 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks( int onError; /* Conflict resolution strategy */ int seenReplace = 0; /* True if REPLACE is used to resolve INT PK conflict */ int nPkField; /* Number of fields in PRIMARY KEY. 1 for ROWID tables */ - Index *pUpIdx = 0; /* Index to which to apply the upsert */ - u8 isUpdate; /* True if this is an UPDATE operation */ + Upsert *pUpsertClause = 0; /* The specific ON CONFLICT clause for pIdx */ + u8 isUpdate; /* True if this is an UPDATE operation */ u8 bAffinityDone = 0; /* True if the OP_Affinity operation has been run */ - int upsertBypass = 0; /* Address of Goto to bypass upsert subroutine */ - int upsertJump = 0; /* Address of Goto that jumps into upsert subroutine */ + int upsertIpkReturn = 0; /* Address of Goto at end of IPK uniqueness check */ + int upsertIpkDelay = 0; /* Address of Goto to bypass initial IPK check */ int ipkTop = 0; /* Top of the IPK uniqueness check */ int ipkBottom = 0; /* OP_Goto at the end of the IPK uniqueness check */ /* Variables associated with retesting uniqueness constraints after @@ -122084,10 +124674,11 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks( int lblRecheckOk = 0; /* Each recheck jumps to this label if it passes */ Trigger *pTrigger; /* List of DELETE triggers on the table pTab */ int nReplaceTrig = 0; /* Number of replace triggers coded */ + IndexIterator sIdxIter; /* Index iterator */ isUpdate = regOldData!=0; db = pParse->db; - v = sqlite3GetVdbe(pParse); + v = pParse->pVdbe; assert( v!=0 ); assert( pTab->pSelect==0 ); /* This table is not a VIEW */ nCol = pTab->nCol; @@ -122241,7 +124832,7 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks( sqlite3VdbeGoto(v, ignoreDest); }else{ char *zName = pCheck->a[i].zEName; - if( zName==0 ) zName = pTab->zName; + assert( zName!=0 || pParse->db->mallocFailed ); if( onError==OE_Replace ) onError = OE_Abort; /* IMP: R-26383-51744 */ sqlite3HaltConstraint(pParse, SQLITE_CONSTRAINT_CHECK, onError, zName, P4_TRANSIENT, @@ -122281,19 +124872,63 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks( ** list of indexes attached to a table puts all OE_Replace indexes last ** in the list. See sqlite3CreateIndex() for where that happens. */ - + sIdxIter.eType = 0; + sIdxIter.i = 0; + sIdxIter.u.ax.aIdx = 0; /* Silence harmless compiler warning */ + sIdxIter.u.lx.pIdx = pTab->pIndex; if( pUpsert ){ if( pUpsert->pUpsertTarget==0 ){ - /* An ON CONFLICT DO NOTHING clause, without a constraint-target. - ** Make all unique constraint resolution be OE_Ignore */ - assert( pUpsert->pUpsertSet==0 ); - overrideError = OE_Ignore; - pUpsert = 0; - }else if( (pUpIdx = pUpsert->pUpsertIdx)!=0 ){ - /* If the constraint-target uniqueness check must be run first. - ** Jump to that uniqueness check now */ - upsertJump = sqlite3VdbeAddOp0(v, OP_Goto); - VdbeComment((v, "UPSERT constraint goes first")); + /* There is just on ON CONFLICT clause and it has no constraint-target */ + assert( pUpsert->pNextUpsert==0 ); + if( pUpsert->isDoUpdate==0 ){ + /* A single ON CONFLICT DO NOTHING clause, without a constraint-target. + ** Make all unique constraint resolution be OE_Ignore */ + overrideError = OE_Ignore; + pUpsert = 0; + }else{ + /* A single ON CONFLICT DO UPDATE. Make all resolutions OE_Update */ + overrideError = OE_Update; + } + }else if( pTab->pIndex!=0 ){ + /* Otherwise, we'll need to run the IndexListTerm array version of the + ** iterator to ensure that all of the ON CONFLICT conditions are + ** checked first and in order. */ + int nIdx, jj; + u64 nByte; + Upsert *pTerm; + u8 *bUsed; + for(nIdx=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, nIdx++){ + assert( aRegIdx[nIdx]>0 ); + } + sIdxIter.eType = 1; + sIdxIter.u.ax.nIdx = nIdx; + nByte = (sizeof(IndexListTerm)+1)*nIdx + nIdx; + sIdxIter.u.ax.aIdx = sqlite3DbMallocZero(db, nByte); + if( sIdxIter.u.ax.aIdx==0 ) return; /* OOM */ + bUsed = (u8*)&sIdxIter.u.ax.aIdx[nIdx]; + pUpsert->pToFree = sIdxIter.u.ax.aIdx; + for(i=0, pTerm=pUpsert; pTerm; pTerm=pTerm->pNextUpsert){ + if( pTerm->pUpsertTarget==0 ) break; + if( pTerm->pUpsertIdx==0 ) continue; /* Skip ON CONFLICT for the IPK */ + jj = 0; + pIdx = pTab->pIndex; + while( ALWAYS(pIdx!=0) && pIdx!=pTerm->pUpsertIdx ){ + pIdx = pIdx->pNext; + jj++; + } + if( bUsed[jj] ) continue; /* Duplicate ON CONFLICT clause ignored */ + bUsed[jj] = 1; + sIdxIter.u.ax.aIdx[i].p = pIdx; + sIdxIter.u.ax.aIdx[i].ix = jj; + i++; + } + for(jj=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, jj++){ + if( bUsed[jj] ) continue; + sIdxIter.u.ax.aIdx[i].p = pIdx; + sIdxIter.u.ax.aIdx[i].ix = jj; + i++; + } + assert( i==nIdx ); } } @@ -122356,11 +124991,20 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks( } /* figure out whether or not upsert applies in this case */ - if( pUpsert && pUpsert->pUpsertIdx==0 ){ - if( pUpsert->pUpsertSet==0 ){ - onError = OE_Ignore; /* DO NOTHING is the same as INSERT OR IGNORE */ - }else{ - onError = OE_Update; /* DO UPDATE */ + if( pUpsert ){ + pUpsertClause = sqlite3UpsertOfIndex(pUpsert,0); + if( pUpsertClause!=0 ){ + if( pUpsertClause->isDoUpdate==0 ){ + onError = OE_Ignore; /* DO NOTHING is the same as INSERT OR IGNORE */ + }else{ + onError = OE_Update; /* DO UPDATE */ + } + } + if( pUpsertClause!=pUpsert ){ + /* The first ON CONFLICT clause has a conflict target other than + ** the IPK. We have to jump ahead to that first ON CONFLICT clause + ** and then come back here and deal with the IPK afterwards */ + upsertIpkDelay = sqlite3VdbeAddOp0(v, OP_Goto); } } @@ -122370,7 +125014,7 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks( ** the UNIQUE constraints have run. */ if( onError==OE_Replace /* IPK rule is REPLACE */ - && onError!=overrideError /* Rules for other contraints are different */ + && onError!=overrideError /* Rules for other constraints are different */ && pTab->pIndex /* There exist other constraints */ ){ ipkTop = sqlite3VdbeAddOp0(v, OP_Goto)+1; @@ -122467,7 +125111,9 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks( } } sqlite3VdbeResolveLabel(v, addrRowidOk); - if( ipkTop ){ + if( pUpsert && pUpsertClause!=pUpsert ){ + upsertIpkReturn = sqlite3VdbeAddOp0(v, OP_Goto); + }else if( ipkTop ){ ipkBottom = sqlite3VdbeAddOp0(v, OP_Goto); sqlite3VdbeJumpHere(v, ipkTop-1); } @@ -122480,7 +125126,10 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks( ** This loop also handles the case of the PRIMARY KEY index for a ** WITHOUT ROWID table. */ - for(ix=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, ix++){ + for(pIdx = indexIteratorFirst(&sIdxIter, &ix); + pIdx; + pIdx = indexIteratorNext(&sIdxIter, &ix) + ){ int regIdx; /* Range of registers hold conent for pIdx */ int regR; /* Range of registers holding conflicting PK */ int iThisCur; /* Cursor for this UNIQUE index */ @@ -122488,15 +125137,14 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks( int addrConflictCk; /* First opcode in the conflict check logic */ if( aRegIdx[ix]==0 ) continue; /* Skip indices that do not change */ - if( pUpIdx==pIdx ){ - addrUniqueOk = upsertJump+1; - upsertBypass = sqlite3VdbeGoto(v, 0); - VdbeComment((v, "Skip upsert subroutine")); - sqlite3VdbeJumpHere(v, upsertJump); - }else{ - addrUniqueOk = sqlite3VdbeMakeLabel(pParse); + if( pUpsert ){ + pUpsertClause = sqlite3UpsertOfIndex(pUpsert, pIdx); + if( upsertIpkDelay && pUpsertClause==pUpsert ){ + sqlite3VdbeJumpHere(v, upsertIpkDelay); + } } - if( bAffinityDone==0 && (pUpIdx==0 || pUpIdx==pIdx) ){ + addrUniqueOk = sqlite3VdbeMakeLabel(pParse); + if( bAffinityDone==0 ){ sqlite3TableAffinity(v, pTab, regNewData+1); bAffinityDone = 1; } @@ -122567,8 +125215,8 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks( } /* Figure out if the upsert clause applies to this index */ - if( pUpIdx==pIdx ){ - if( pUpsert->pUpsertSet==0 ){ + if( pUpsertClause ){ + if( pUpsertClause->isDoUpdate==0 ){ onError = OE_Ignore; /* DO NOTHING is the same as INSERT OR IGNORE */ }else{ onError = OE_Update; /* DO UPDATE */ @@ -122606,7 +125254,7 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks( regIdx, pIdx->nKeyCol); VdbeCoverage(v); /* Generate code to handle collisions */ - regR = (pIdx==pPk) ? regIdx : sqlite3GetTempRange(pParse, nPkField); + regR = pIdx==pPk ? regIdx : sqlite3GetTempRange(pParse, nPkField); if( isUpdate || onError==OE_Replace ){ if( HasRowid(pTab) ){ sqlite3VdbeAddOp2(v, OP_IdxRowid, iThisCur, regR); @@ -122758,13 +125406,16 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks( break; } } - if( pUpIdx==pIdx ){ - sqlite3VdbeGoto(v, upsertJump+1); - sqlite3VdbeJumpHere(v, upsertBypass); - }else{ - sqlite3VdbeResolveLabel(v, addrUniqueOk); - } + sqlite3VdbeResolveLabel(v, addrUniqueOk); if( regR!=regIdx ) sqlite3ReleaseTempRange(pParse, regR, nPkField); + if( pUpsertClause + && upsertIpkReturn + && sqlite3UpsertNextIsIPK(pUpsertClause) + ){ + sqlite3VdbeGoto(v, upsertIpkDelay+1); + sqlite3VdbeJumpHere(v, upsertIpkReturn); + upsertIpkReturn = 0; + } } /* If the IPK constraint is a REPLACE, run it last */ @@ -122830,6 +125481,32 @@ SQLITE_PRIVATE void sqlite3SetMakeRecordP5(Vdbe *v, Table *pTab){ } #endif +/* +** Table pTab is a WITHOUT ROWID table that is being written to. The cursor +** number is iCur, and register regData contains the new record for the +** PK index. This function adds code to invoke the pre-update hook, +** if one is registered. +*/ +#ifdef SQLITE_ENABLE_PREUPDATE_HOOK +static void codeWithoutRowidPreupdate( + Parse *pParse, /* Parse context */ + Table *pTab, /* Table being updated */ + int iCur, /* Cursor number for table */ + int regData /* Data containing new record */ +){ + Vdbe *v = pParse->pVdbe; + int r = sqlite3GetTempReg(pParse); + assert( !HasRowid(pTab) ); + assert( 0==(pParse->db->mDbFlags & DBFLAG_Vacuum) || CORRUPT_DB ); + sqlite3VdbeAddOp2(v, OP_Integer, 0, r); + sqlite3VdbeAddOp4(v, OP_Insert, iCur, regData, r, (char*)pTab, P4_TABLE); + sqlite3VdbeChangeP5(v, OPFLAG_ISNOOP); + sqlite3ReleaseTempReg(pParse, r); +} +#else +# define codeWithoutRowidPreupdate(a,b,c,d) +#endif + /* ** This routine generates code to finish the INSERT or UPDATE operation ** that was started by a prior call to sqlite3GenerateConstraintChecks. @@ -122860,7 +125537,7 @@ SQLITE_PRIVATE void sqlite3CompleteInsertion( || update_flags==(OPFLAG_ISUPDATE|OPFLAG_SAVEPOSITION) ); - v = sqlite3GetVdbe(pParse); + v = pParse->pVdbe; assert( v!=0 ); assert( pTab->pSelect==0 ); /* This table is not a VIEW */ for(i=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, i++){ @@ -122878,17 +125555,9 @@ SQLITE_PRIVATE void sqlite3CompleteInsertion( assert( pParse->nested==0 ); pik_flags |= OPFLAG_NCHANGE; pik_flags |= (update_flags & OPFLAG_SAVEPOSITION); -#ifdef SQLITE_ENABLE_PREUPDATE_HOOK if( update_flags==0 ){ - int r = sqlite3GetTempReg(pParse); - sqlite3VdbeAddOp2(v, OP_Integer, 0, r); - sqlite3VdbeAddOp4(v, OP_Insert, - iIdxCur+i, aRegIdx[i], r, (char*)pTab, P4_TABLE - ); - sqlite3VdbeChangeP5(v, OPFLAG_ISNOOP); - sqlite3ReleaseTempReg(pParse, r); + codeWithoutRowidPreupdate(pParse, pTab, iIdxCur+i, aRegIdx[i]); } -#endif } sqlite3VdbeAddOp4Int(v, OP_IdxInsert, iIdxCur+i, aRegIdx[i], aRegIdx[i]+1, @@ -122961,7 +125630,7 @@ SQLITE_PRIVATE int sqlite3OpenTableAndIndices( return 0; } iDb = sqlite3SchemaToIndex(pParse->db, pTab->pSchema); - v = sqlite3GetVdbe(pParse); + v = pParse->pVdbe; assert( v!=0 ); if( iBase<0 ) iBase = pParse->nTab; iDataCur = iBase++; @@ -123086,7 +125755,7 @@ static int xferOptimization( ExprList *pEList; /* The result set of the SELECT */ Table *pSrc; /* The table in the FROM clause of SELECT */ Index *pSrcIdx, *pDestIdx; /* Source and destination indices */ - struct SrcList_item *pItem; /* An element of pSelect->pSrc */ + SrcItem *pItem; /* An element of pSelect->pSrc */ int i; /* Loop counter */ int iDbSrc; /* The database of pSrc */ int iSrc, iDest; /* Cursors from source and destination */ @@ -123303,6 +125972,7 @@ static int xferOptimization( iDest = pParse->nTab++; regAutoinc = autoIncBegin(pParse, iDbDest, pDest); regData = sqlite3GetTempReg(pParse); + sqlite3VdbeAddOp2(v, OP_Null, 0, regData); regRowid = sqlite3GetTempReg(pParse); sqlite3OpenTable(pParse, iDest, iDbDest, pDest, OP_OpenWrite); assert( HasRowid(pDest) || destHasUniqueIdx ); @@ -123338,11 +126008,13 @@ static int xferOptimization( emptySrcTest = sqlite3VdbeAddOp2(v, OP_Rewind, iSrc, 0); VdbeCoverage(v); if( pDest->iPKey>=0 ){ addr1 = sqlite3VdbeAddOp2(v, OP_Rowid, iSrc, regRowid); - sqlite3VdbeVerifyAbortable(v, onError); - addr2 = sqlite3VdbeAddOp3(v, OP_NotExists, iDest, 0, regRowid); - VdbeCoverage(v); - sqlite3RowidConstraint(pParse, onError, pDest); - sqlite3VdbeJumpHere(v, addr2); + if( (db->mDbFlags & DBFLAG_Vacuum)==0 ){ + sqlite3VdbeVerifyAbortable(v, onError); + addr2 = sqlite3VdbeAddOp3(v, OP_NotExists, iDest, 0, regRowid); + VdbeCoverage(v); + sqlite3RowidConstraint(pParse, onError, pDest); + sqlite3VdbeJumpHere(v, addr2); + } autoIncStep(pParse, regAutoinc, regRowid); }else if( pDest->pIndex==0 && !(db->mDbFlags & DBFLAG_VacuumInto) ){ addr1 = sqlite3VdbeAddOp2(v, OP_NewRowid, iDest, regRowid); @@ -123350,16 +126022,28 @@ static int xferOptimization( addr1 = sqlite3VdbeAddOp2(v, OP_Rowid, iSrc, regRowid); assert( (pDest->tabFlags & TF_Autoincrement)==0 ); } + if( db->mDbFlags & DBFLAG_Vacuum ){ sqlite3VdbeAddOp1(v, OP_SeekEnd, iDest); - insFlags = OPFLAG_APPEND|OPFLAG_USESEEKRESULT; + insFlags = OPFLAG_APPEND|OPFLAG_USESEEKRESULT|OPFLAG_PREFORMAT; }else{ - insFlags = OPFLAG_NCHANGE|OPFLAG_LASTROWID|OPFLAG_APPEND; + insFlags = OPFLAG_NCHANGE|OPFLAG_LASTROWID|OPFLAG_APPEND|OPFLAG_PREFORMAT; + } +#ifdef SQLITE_ENABLE_PREUPDATE_HOOK + if( (db->mDbFlags & DBFLAG_Vacuum)==0 ){ + sqlite3VdbeAddOp3(v, OP_RowData, iSrc, regData, 1); + insFlags &= ~OPFLAG_PREFORMAT; + }else +#endif + { + sqlite3VdbeAddOp3(v, OP_RowCell, iDest, iSrc, regRowid); + } + sqlite3VdbeAddOp3(v, OP_Insert, iDest, regData, regRowid); + if( (db->mDbFlags & DBFLAG_Vacuum)==0 ){ + sqlite3VdbeChangeP4(v, -1, (char*)pDest, P4_TABLE); } - sqlite3VdbeAddOp3(v, OP_RowData, iSrc, regData, 1); - sqlite3VdbeAddOp4(v, OP_Insert, iDest, regData, regRowid, - (char*)pDest, P4_TABLE); sqlite3VdbeChangeP5(v, insFlags); + sqlite3VdbeAddOp2(v, OP_Next, iSrc, addr1); VdbeCoverage(v); sqlite3VdbeAddOp2(v, OP_Close, iSrc, 0); sqlite3VdbeAddOp2(v, OP_Close, iDest, 0); @@ -123401,13 +126085,22 @@ static int xferOptimization( if( sqlite3_stricmp(sqlite3StrBINARY, zColl) ) break; } if( i==pSrcIdx->nColumn ){ - idxInsFlags = OPFLAG_USESEEKRESULT; + idxInsFlags = OPFLAG_USESEEKRESULT|OPFLAG_PREFORMAT; sqlite3VdbeAddOp1(v, OP_SeekEnd, iDest); + sqlite3VdbeAddOp2(v, OP_RowCell, iDest, iSrc); } }else if( !HasRowid(pSrc) && pDestIdx->idxType==SQLITE_IDXTYPE_PRIMARYKEY ){ idxInsFlags |= OPFLAG_NCHANGE; } - sqlite3VdbeAddOp3(v, OP_RowData, iSrc, regData, 1); + if( idxInsFlags!=(OPFLAG_USESEEKRESULT|OPFLAG_PREFORMAT) ){ + sqlite3VdbeAddOp3(v, OP_RowData, iSrc, regData, 1); + if( (db->mDbFlags & DBFLAG_Vacuum)==0 + && !HasRowid(pDest) + && IsPrimaryKeyIndex(pDestIdx) + ){ + codeWithoutRowidPreupdate(pParse, pDest, iDest, regData); + } + } sqlite3VdbeAddOp2(v, OP_IdxInsert, iDest, regData); sqlite3VdbeChangeP5(v, idxInsFlags|OPFLAG_APPEND); sqlite3VdbeAddOp2(v, OP_Next, iSrc, addr1+1); VdbeCoverage(v); @@ -123933,6 +126626,8 @@ struct sqlite3_api_routines { int,const char**); void (*free_filename)(char*); sqlite3_file *(*database_file_object)(const char*); + /* Version 3.34.0 and later */ + int (*txn_state)(sqlite3*,const char*); }; /* @@ -124237,6 +126932,8 @@ typedef int (*sqlite3_loadext_entry)( #define sqlite3_create_filename sqlite3_api->create_filename #define sqlite3_free_filename sqlite3_api->free_filename #define sqlite3_database_file_object sqlite3_api->database_file_object +/* Version 3.34.0 and later */ +#define sqlite3_txn_state sqlite3_api->txn_state #endif /* !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION) */ #if !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION) @@ -124719,6 +127416,8 @@ static const sqlite3_api_routines sqlite3Apis = { sqlite3_create_filename, sqlite3_free_filename, sqlite3_database_file_object, + /* Version 3.34.0 and later */ + sqlite3_txn_state, }; /* True if x is the directory separator character @@ -124754,7 +127453,7 @@ static int sqlite3LoadExtension( const char *zEntry; char *zAltEntry = 0; void **aHandle; - u64 nMsg = 300 + sqlite3Strlen30(zFile); + u64 nMsg = strlen(zFile); int ii; int rc; @@ -124788,6 +127487,12 @@ static int sqlite3LoadExtension( zEntry = zProc ? zProc : "sqlite3_extension_init"; + /* tag-20210611-1. Some dlopen() implementations will segfault if given + ** an oversize filename. Most filesystems have a pathname limit of 4K, + ** so limit the extension filename length to about twice that. + ** https://sqlite.org/forum/forumpost/08a0d6d9bf */ + if( nMsg>SQLITE_MAX_PATHLEN ) goto extension_not_found; + handle = sqlite3OsDlOpen(pVfs, zFile); #if SQLITE_OS_UNIX || SQLITE_OS_WIN for(ii=0; iiaExtension[db->nExtension++] = handle; return SQLITE_OK; + +extension_not_found: + if( pzErrMsg ){ + nMsg += 300; + *pzErrMsg = zErrmsg = sqlite3_malloc64(nMsg); + if( zErrmsg ){ + assert( nMsg<0x7fffffff ); /* zErrmsg would be NULL if not so */ + sqlite3_snprintf((int)nMsg, zErrmsg, + "unable to open shared library [%.*s]", SQLITE_MAX_PATHLEN, zFile); + sqlite3OsDlError(pVfs, nMsg-1, zErrmsg); + } + } + return SQLITE_ERROR; } SQLITE_API int sqlite3_load_extension( sqlite3 *db, /* Load the extension into this database connection */ @@ -125879,7 +128588,9 @@ static int getTempStore(const char *z){ static int invalidateTempStorage(Parse *pParse){ sqlite3 *db = pParse->db; if( db->aDb[1].pBt!=0 ){ - if( !db->autoCommit || sqlite3BtreeIsInReadTrans(db->aDb[1].pBt) ){ + if( !db->autoCommit + || sqlite3BtreeTxnState(db->aDb[1].pBt)!=SQLITE_TXN_NONE + ){ sqlite3ErrorMsg(pParse, "temporary storage cannot be changed " "from within a transaction"); return SQLITE_ERROR; @@ -127199,7 +129910,7 @@ SQLITE_PRIVATE void sqlite3Pragma( aiCols = 0; if( pParent ){ x = sqlite3FkLocateIndex(pParse, pParent, pFK, &pIdx, &aiCols); - assert( x==0 ); + assert( x==0 || db->mallocFailed ); } addrOk = sqlite3VdbeMakeLabel(pParse); @@ -127224,7 +129935,7 @@ SQLITE_PRIVATE void sqlite3Pragma( int jmp = sqlite3VdbeCurrentAddr(v)+2; sqlite3VdbeAddOp3(v, OP_SeekRowid, i, jmp, regRow); VdbeCoverage(v); sqlite3VdbeGoto(v, addrOk); - assert( pFK->nCol==1 ); + assert( pFK->nCol==1 || db->mallocFailed ); } /* Generate code to report an FK violation to the caller. */ @@ -127715,7 +130426,7 @@ SQLITE_PRIVATE void sqlite3Pragma( ** Checkpoint the database. */ case PragTyp_WAL_CHECKPOINT: { - int iBt = (pId2->z?iDb:SQLITE_MAX_ATTACHED); + int iBt = (pId2->z?iDb:SQLITE_MAX_DB); int eMode = SQLITE_CHECKPOINT_PASSIVE; if( zRight ){ if( sqlite3StrICmp(zRight, "full")==0 ){ @@ -128363,7 +131074,7 @@ SQLITE_PRIVATE Module *sqlite3PragmaVtabRegister(sqlite3 *db, const char *zName) */ static void corruptSchema( InitData *pData, /* Initialization context */ - const char *zObj, /* Object being parsed at the point of error */ + char **azObj, /* Type and name of object being parsed */ const char *zExtra /* Error information */ ){ sqlite3 *db = pData->db; @@ -128371,14 +131082,18 @@ static void corruptSchema( pData->rc = SQLITE_NOMEM_BKPT; }else if( pData->pzErrMsg[0]!=0 ){ /* A error message has already been generated. Do not overwrite it */ - }else if( pData->mInitFlags & INITFLAG_AlterTable ){ - *pData->pzErrMsg = sqlite3DbStrDup(db, zExtra); + }else if( pData->mInitFlags & (INITFLAG_AlterRename|INITFLAG_AlterDrop) ){ + *pData->pzErrMsg = sqlite3MPrintf(db, + "error in %s %s after %s: %s", azObj[0], azObj[1], + (pData->mInitFlags & INITFLAG_AlterRename) ? "rename" : "drop column", + zExtra + ); pData->rc = SQLITE_ERROR; }else if( db->flags & SQLITE_WriteSchema ){ pData->rc = SQLITE_CORRUPT_BKPT; }else{ char *z; - if( zObj==0 ) zObj = "?"; + const char *zObj = azObj[1] ? azObj[1] : "?"; z = sqlite3MPrintf(db, "malformed database schema (%s)", zObj); if( zExtra && zExtra[0] ) z = sqlite3MPrintf(db, "%z - %s", z, zExtra); *pData->pzErrMsg = z; @@ -128434,21 +131149,28 @@ SQLITE_PRIVATE int sqlite3InitCallback(void *pInit, int argc, char **argv, char UNUSED_PARAMETER2(NotUsed, argc); assert( sqlite3_mutex_held(db->mutex) ); db->mDbFlags |= DBFLAG_EncodingFixed; + if( argv==0 ) return 0; /* Might happen if EMPTY_RESULT_CALLBACKS are on */ pData->nInitRow++; if( db->mallocFailed ){ - corruptSchema(pData, argv[1], 0); + corruptSchema(pData, argv, 0); return 1; } assert( iDb>=0 && iDbnDb ); - if( argv==0 ) return 0; /* Might happen if EMPTY_RESULT_CALLBACKS are on */ if( argv[3]==0 ){ - corruptSchema(pData, argv[1], 0); - }else if( sqlite3_strnicmp(argv[4],"create ",7)==0 ){ + corruptSchema(pData, argv, 0); + }else if( argv[4] + && 'c'==sqlite3UpperToLower[(unsigned char)argv[4][0]] + && 'r'==sqlite3UpperToLower[(unsigned char)argv[4][1]] ){ /* Call the parser to process a CREATE TABLE, INDEX or VIEW. ** But because db->init.busy is set to 1, no VDBE code is generated ** or executed. All the parser does is build the internal data ** structures that describe the table, index, or view. + ** + ** No other valid SQL statement, other than the variable CREATE statements, + ** can begin with the letters "C" and "R". Thus, it is not possible run + ** any other kind of statement while parsing the schema, even a corrupt + ** schema. */ int rc; u8 saved_iDb = db->init.iDb; @@ -128461,7 +131183,7 @@ SQLITE_PRIVATE int sqlite3InitCallback(void *pInit, int argc, char **argv, char || (db->init.newTnum>pData->mxPage && pData->mxPage>0) ){ if( sqlite3Config.bExtraSchemaChecks ){ - corruptSchema(pData, argv[1], "invalid rootpage"); + corruptSchema(pData, argv, "invalid rootpage"); } } db->init.orphanTrigger = 0; @@ -128480,13 +131202,13 @@ SQLITE_PRIVATE int sqlite3InitCallback(void *pInit, int argc, char **argv, char if( rc==SQLITE_NOMEM ){ sqlite3OomFault(db); }else if( rc!=SQLITE_INTERRUPT && (rc&0xFF)!=SQLITE_LOCKED ){ - corruptSchema(pData, argv[1], sqlite3_errmsg(db)); + corruptSchema(pData, argv, sqlite3_errmsg(db)); } } } sqlite3_finalize(pStmt); }else if( argv[1]==0 || (argv[4]!=0 && argv[4][0]!=0) ){ - corruptSchema(pData, argv[1], 0); + corruptSchema(pData, argv, 0); }else{ /* If the SQL column is blank it means this is an index that ** was created to be the PRIMARY KEY or to fulfill a UNIQUE @@ -128497,7 +131219,7 @@ SQLITE_PRIVATE int sqlite3InitCallback(void *pInit, int argc, char **argv, char Index *pIndex; pIndex = sqlite3FindIndex(db, argv[1], db->aDb[iDb].zDbSName); if( pIndex==0 ){ - corruptSchema(pData, argv[1], "orphan index"); + corruptSchema(pData, argv, "orphan index"); }else if( sqlite3GetUInt32(argv[3],&pIndex->tnum)==0 || pIndex->tnum<2 @@ -128505,7 +131227,7 @@ SQLITE_PRIVATE int sqlite3InitCallback(void *pInit, int argc, char **argv, char || sqlite3IndexHasDuplicateRootPage(pIndex) ){ if( sqlite3Config.bExtraSchemaChecks ){ - corruptSchema(pData, argv[1], "invalid rootpage"); + corruptSchema(pData, argv, "invalid rootpage"); } } } @@ -128582,7 +131304,7 @@ SQLITE_PRIVATE int sqlite3InitOne(sqlite3 *db, int iDb, char **pzErrMsg, u32 mFl ** on the b-tree database, open one now. If a transaction is opened, it ** will be closed before this function returns. */ sqlite3BtreeEnter(pDb->pBt); - if( !sqlite3BtreeIsInReadTrans(pDb->pBt) ){ + if( sqlite3BtreeTxnState(pDb->pBt)==SQLITE_TXN_NONE ){ rc = sqlite3BtreeBeginTrans(pDb->pBt, 0, 0); if( rc!=SQLITE_OK ){ sqlite3SetString(pzErrMsg, db, sqlite3ErrStr(rc)); @@ -128708,18 +131430,22 @@ SQLITE_PRIVATE int sqlite3InitOne(sqlite3 *db, int iDb, char **pzErrMsg, u32 mFl } #endif } + assert( pDb == &(db->aDb[iDb]) ); if( db->mallocFailed ){ rc = SQLITE_NOMEM_BKPT; sqlite3ResetAllSchemasOfConnection(db); - } + pDb = &db->aDb[iDb]; + }else if( rc==SQLITE_OK || (db->flags&SQLITE_NoSchemaError)){ - /* Black magic: If the SQLITE_NoSchemaError flag is set, then consider - ** the schema loaded, even if errors occurred. In this situation the - ** current sqlite3_prepare() operation will fail, but the following one - ** will attempt to compile the supplied statement against whatever subset - ** of the schema was loaded before the error occurred. The primary - ** purpose of this is to allow access to the sqlite_schema table - ** even when its contents have been corrupted. + /* Hack: If the SQLITE_NoSchemaError flag is set, then consider + ** the schema loaded, even if errors (other than OOM) occurred. In + ** this situation the current sqlite3_prepare() operation will fail, + ** but the following one will attempt to compile the supplied statement + ** against whatever subset of the schema was loaded before the error + ** occurred. + ** + ** The primary purpose of this is to allow access to the sqlite_schema + ** table even when its contents have been corrupted. */ DbSetProperty(db, iDb, DB_SchemaLoaded); rc = SQLITE_OK; @@ -128825,10 +131551,11 @@ static void schemaIsValid(Parse *pParse){ /* If there is not already a read-only (or read-write) transaction opened ** on the b-tree database, open one now. If a transaction is opened, it ** will be closed immediately after reading the meta-value. */ - if( !sqlite3BtreeIsInReadTrans(pBt) ){ + if( sqlite3BtreeTxnState(pBt)==SQLITE_TXN_NONE ){ rc = sqlite3BtreeBeginTrans(pBt, 0, 0); if( rc==SQLITE_NOMEM || rc==SQLITE_IOERR_NOMEM ){ sqlite3OomFault(db); + pParse->rc = SQLITE_NOMEM; } if( rc!=SQLITE_OK ) return; openedTransaction = 1; @@ -128885,28 +131612,21 @@ SQLITE_PRIVATE int sqlite3SchemaToIndex(sqlite3 *db, Schema *pSchema){ return i; } -/* -** Deallocate a single AggInfo object -*/ -static void agginfoFree(sqlite3 *db, AggInfo *p){ - sqlite3DbFree(db, p->aCol); - sqlite3DbFree(db, p->aFunc); - sqlite3DbFree(db, p); -} - /* ** Free all memory allocations in the pParse object */ SQLITE_PRIVATE void sqlite3ParserReset(Parse *pParse){ sqlite3 *db = pParse->db; - AggInfo *pThis = pParse->pAggList; - while( pThis ){ - AggInfo *pNext = pThis->pNext; - agginfoFree(db, pThis); - pThis = pNext; + while( pParse->pCleanup ){ + ParseCleanup *pCleanup = pParse->pCleanup; + pParse->pCleanup = pCleanup->pNext; + pCleanup->xCleanup(db, pCleanup->pPtr); + sqlite3DbFreeNN(db, pCleanup); } sqlite3DbFree(db, pParse->aLabel); - sqlite3ExprListDelete(db, pParse->pConstExpr); + if( pParse->pConstExpr ){ + sqlite3ExprListDelete(db, pParse->pConstExpr); + } if( db ){ assert( db->lookaside.bDisable >= pParse->disableLookaside ); db->lookaside.bDisable -= pParse->disableLookaside; @@ -128915,6 +131635,55 @@ SQLITE_PRIVATE void sqlite3ParserReset(Parse *pParse){ pParse->disableLookaside = 0; } +/* +** Add a new cleanup operation to a Parser. The cleanup should happen when +** the parser object is destroyed. But, beware: the cleanup might happen +** immediately. +** +** Use this mechanism for uncommon cleanups. There is a higher setup +** cost for this mechansim (an extra malloc), so it should not be used +** for common cleanups that happen on most calls. But for less +** common cleanups, we save a single NULL-pointer comparison in +** sqlite3ParserReset(), which reduces the total CPU cycle count. +** +** If a memory allocation error occurs, then the cleanup happens immediately. +** When either SQLITE_DEBUG or SQLITE_COVERAGE_TEST are defined, the +** pParse->earlyCleanup flag is set in that case. Calling code show verify +** that test cases exist for which this happens, to guard against possible +** use-after-free errors following an OOM. The preferred way to do this is +** to immediately follow the call to this routine with: +** +** testcase( pParse->earlyCleanup ); +** +** This routine returns a copy of its pPtr input (the third parameter) +** except if an early cleanup occurs, in which case it returns NULL. So +** another way to check for early cleanup is to check the return value. +** Or, stop using the pPtr parameter with this call and use only its +** return value thereafter. Something like this: +** +** pObj = sqlite3ParserAddCleanup(pParse, destructor, pObj); +*/ +SQLITE_PRIVATE void *sqlite3ParserAddCleanup( + Parse *pParse, /* Destroy when this Parser finishes */ + void (*xCleanup)(sqlite3*,void*), /* The cleanup routine */ + void *pPtr /* Pointer to object to be cleaned up */ +){ + ParseCleanup *pCleanup = sqlite3DbMallocRaw(pParse->db, sizeof(*pCleanup)); + if( pCleanup ){ + pCleanup->pNext = pParse->pCleanup; + pParse->pCleanup = pCleanup; + pCleanup->pPtr = pPtr; + pCleanup->xCleanup = xCleanup; + }else{ + xCleanup(pParse->db, pPtr); + pPtr = 0; +#if defined(SQLITE_DEBUG) || defined(SQLITE_COVERAGE_TEST) + pParse->earlyCleanup = 1; +#endif + } + return pPtr; +} + /* ** Compile the UTF-8 encoded SQL statement zSql into a statement handle. */ @@ -129013,12 +131782,6 @@ static int sqlite3Prepare( } assert( 0==sParse.nQueryLoop ); - if( sParse.rc==SQLITE_DONE ){ - sParse.rc = SQLITE_OK; - } - if( sParse.checkSchema ){ - schemaIsValid(&sParse); - } if( pzTail ){ *pzTail = sParse.zTail; } @@ -129028,21 +131791,30 @@ static int sqlite3Prepare( } if( db->mallocFailed ){ sParse.rc = SQLITE_NOMEM_BKPT; + sParse.checkSchema = 0; } - rc = sParse.rc; - if( rc!=SQLITE_OK ){ - if( sParse.pVdbe ) sqlite3VdbeFinalize(sParse.pVdbe); - assert(!(*ppStmt)); + if( sParse.rc!=SQLITE_OK && sParse.rc!=SQLITE_DONE ){ + if( sParse.checkSchema ){ + schemaIsValid(&sParse); + } + if( sParse.pVdbe ){ + sqlite3VdbeFinalize(sParse.pVdbe); + } + assert( 0==(*ppStmt) ); + rc = sParse.rc; + if( zErrMsg ){ + sqlite3ErrorWithMsg(db, rc, "%s", zErrMsg); + sqlite3DbFree(db, zErrMsg); + }else{ + sqlite3Error(db, rc); + } }else{ + assert( zErrMsg==0 ); *ppStmt = (sqlite3_stmt*)sParse.pVdbe; + rc = SQLITE_OK; + sqlite3ErrorClear(db); } - if( zErrMsg ){ - sqlite3ErrorWithMsg(db, rc, "%s", zErrMsg); - sqlite3DbFree(db, zErrMsg); - }else{ - sqlite3Error(db, rc); - } /* Delete any TriggerPrg structures allocated while parsing this statement. */ while( sParse.pTriggerPrg ){ @@ -129088,6 +131860,7 @@ static int sqlite3LockAndPrepare( sqlite3BtreeLeaveAll(db); rc = sqlite3ApiExit(db, rc); assert( (rc&db->errMask)==rc ); + db->busyHandler.nBusy = 0; sqlite3_mutex_leave(db->mutex); return rc; } @@ -129387,12 +132160,16 @@ static void clearSelect(sqlite3 *db, Select *p, int bFree){ sqlite3ExprDelete(db, p->pHaving); sqlite3ExprListDelete(db, p->pOrderBy); sqlite3ExprDelete(db, p->pLimit); + if( OK_IF_ALWAYS_TRUE(p->pWith) ) sqlite3WithDelete(db, p->pWith); #ifndef SQLITE_OMIT_WINDOWFUNC if( OK_IF_ALWAYS_TRUE(p->pWinDefn) ){ sqlite3WindowListDelete(db, p->pWinDefn); } + while( p->pWin ){ + assert( p->pWin->ppThis==&p->pWin ); + sqlite3WindowUnlinkFromSelect(p->pWin); + } #endif - if( OK_IF_ALWAYS_TRUE(p->pWith) ) sqlite3WithDelete(db, p->pWith); if( bFree ) sqlite3DbFreeNN(db, p); p = pPrior; bFree = 1; @@ -129564,7 +132341,7 @@ SQLITE_PRIVATE int sqlite3JoinType(Parse *pParse, Token *pA, Token *pB, Token *p ** Return the index of a column in a table. Return -1 if the column ** is not contained in the table. */ -static int columnIndex(Table *pTab, const char *zCol){ +SQLITE_PRIVATE int sqlite3ColumnIndex(Table *pTab, const char *zCol){ int i; u8 h = sqlite3StrIHash(zCol); Column *pCol; @@ -129596,7 +132373,7 @@ static int tableAndColumnIndex( assert( (piTab==0)==(piCol==0) ); /* Both or neither are NULL */ for(i=0; ia[i].pTab, zCol); + iCol = sqlite3ColumnIndex(pSrc->a[i].pTab, zCol); if( iCol>=0 && (bIgnoreHidden==0 || IsHiddenColumn(&pSrc->a[i].pTab->aCol[iCol])==0) ){ @@ -129649,7 +132426,7 @@ static void addWhereTerm( ExprSetProperty(pEq, EP_FromJoin); assert( !ExprHasProperty(pEq, EP_TokenOnly|EP_Reduced) ); ExprSetVVAProperty(pEq, EP_NoReduce); - pEq->iRightJoinTable = (i16)pE2->iTable; + pEq->iRightJoinTable = pE2->iTable; } *ppWhere = sqlite3ExprAnd(pParse, *ppWhere, pEq); } @@ -129685,7 +132462,7 @@ SQLITE_PRIVATE void sqlite3SetJoinExpr(Expr *p, int iTable){ ExprSetProperty(p, EP_FromJoin); assert( !ExprHasProperty(p, EP_TokenOnly|EP_Reduced) ); ExprSetVVAProperty(p, EP_NoReduce); - p->iRightJoinTable = (i16)iTable; + p->iRightJoinTable = iTable; if( p->op==TK_FUNCTION && p->x.pList ){ int i; for(i=0; ix.pList->nExpr; i++){ @@ -129709,6 +132486,9 @@ static void unsetJoinExpr(Expr *p, int iTable){ && (iTable<0 || p->iRightJoinTable==iTable) ){ ExprClearProperty(p, EP_FromJoin); } + if( p->op==TK_COLUMN && p->iTable==iTable ){ + ExprClearProperty(p, EP_CanBeNull); + } if( p->op==TK_FUNCTION && p->x.pList ){ int i; for(i=0; ix.pList->nExpr; i++){ @@ -129737,8 +132517,8 @@ static void unsetJoinExpr(Expr *p, int iTable){ static int sqliteProcessJoin(Parse *pParse, Select *p){ SrcList *pSrc; /* All tables in the FROM clause */ int i, j; /* Loop counters */ - struct SrcList_item *pLeft; /* Left table being joined */ - struct SrcList_item *pRight; /* Right table being joined */ + SrcItem *pLeft; /* Left table being joined */ + SrcItem *pRight; /* Right table being joined */ pSrc = p->pSrc; pLeft = &pSrc->a[0]; @@ -129806,7 +132586,7 @@ static int sqliteProcessJoin(Parse *pParse, Select *p){ int iRightCol; /* Column number of matching column on the right */ zName = pList->a[j].zName; - iRightCol = columnIndex(pRightTab, zName); + iRightCol = sqlite3ColumnIndex(pRightTab, zName); if( iRightCol<0 || !tableAndColumnIndex(pSrc, i+1, zName, &iLeft, &iLeftCol, 0) ){ @@ -130036,31 +132816,155 @@ static void codeOffset( } /* -** Add code that will check to make sure the N registers starting at iMem -** form a distinct entry. iTab is a sorting index that holds previously -** seen combinations of the N values. A new entry is made in iTab -** if the current N values are new. +** Add code that will check to make sure the array of registers starting at +** iMem form a distinct entry. This is used by both "SELECT DISTINCT ..." and +** distinct aggregates ("SELECT count(DISTINCT ) ..."). Three strategies +** are available. Which is used depends on the value of parameter eTnctType, +** as follows: ** -** A jump to addrRepeat is made and the N+1 values are popped from the -** stack if the top N elements are not distinct. -*/ -static void codeDistinct( +** WHERE_DISTINCT_UNORDERED/WHERE_DISTINCT_NOOP: +** Build an ephemeral table that contains all entries seen before and +** skip entries which have been seen before. +** +** Parameter iTab is the cursor number of an ephemeral table that must +** be opened before the VM code generated by this routine is executed. +** The ephemeral cursor table is queried for a record identical to the +** record formed by the current array of registers. If one is found, +** jump to VM address addrRepeat. Otherwise, insert a new record into +** the ephemeral cursor and proceed. +** +** The returned value in this case is a copy of parameter iTab. +** +** WHERE_DISTINCT_ORDERED: +** In this case rows are being delivered sorted order. The ephermal +** table is not required. Instead, the current set of values +** is compared against previous row. If they match, the new row +** is not distinct and control jumps to VM address addrRepeat. Otherwise, +** the VM program proceeds with processing the new row. +** +** The returned value in this case is the register number of the first +** in an array of registers used to store the previous result row so that +** it can be compared to the next. The caller must ensure that this +** register is initialized to NULL. (The fixDistinctOpenEph() routine +** will take care of this initialization.) +** +** WHERE_DISTINCT_UNIQUE: +** In this case it has already been determined that the rows are distinct. +** No special action is required. The return value is zero. +** +** Parameter pEList is the list of expressions used to generated the +** contents of each row. It is used by this routine to determine (a) +** how many elements there are in the array of registers and (b) the +** collation sequences that should be used for the comparisons if +** eTnctType is WHERE_DISTINCT_ORDERED. +*/ +static int codeDistinct( Parse *pParse, /* Parsing and code generating context */ + int eTnctType, /* WHERE_DISTINCT_* value */ int iTab, /* A sorting index used to test for distinctness */ int addrRepeat, /* Jump to here if not distinct */ - int N, /* Number of elements */ - int iMem /* First element */ + ExprList *pEList, /* Expression for each element */ + int regElem /* First element */ ){ - Vdbe *v; - int r1; + int iRet = 0; + int nResultCol = pEList->nExpr; + Vdbe *v = pParse->pVdbe; - v = pParse->pVdbe; - r1 = sqlite3GetTempReg(pParse); - sqlite3VdbeAddOp4Int(v, OP_Found, iTab, addrRepeat, iMem, N); VdbeCoverage(v); - sqlite3VdbeAddOp3(v, OP_MakeRecord, iMem, N, r1); - sqlite3VdbeAddOp4Int(v, OP_IdxInsert, iTab, r1, iMem, N); - sqlite3VdbeChangeP5(v, OPFLAG_USESEEKRESULT); - sqlite3ReleaseTempReg(pParse, r1); + switch( eTnctType ){ + case WHERE_DISTINCT_ORDERED: { + int i; + int iJump; /* Jump destination */ + int regPrev; /* Previous row content */ + + /* Allocate space for the previous row */ + iRet = regPrev = pParse->nMem+1; + pParse->nMem += nResultCol; + + iJump = sqlite3VdbeCurrentAddr(v) + nResultCol; + for(i=0; ia[i].pExpr); + if( idb->mallocFailed ); + sqlite3VdbeAddOp3(v, OP_Copy, regElem, regPrev, nResultCol-1); + break; + } + + case WHERE_DISTINCT_UNIQUE: { + /* nothing to do */ + break; + } + + default: { + int r1 = sqlite3GetTempReg(pParse); + sqlite3VdbeAddOp4Int(v, OP_Found, iTab, addrRepeat, regElem, nResultCol); + VdbeCoverage(v); + sqlite3VdbeAddOp3(v, OP_MakeRecord, regElem, nResultCol, r1); + sqlite3VdbeAddOp4Int(v, OP_IdxInsert, iTab, r1, regElem, nResultCol); + sqlite3VdbeChangeP5(v, OPFLAG_USESEEKRESULT); + sqlite3ReleaseTempReg(pParse, r1); + iRet = iTab; + break; + } + } + + return iRet; +} + +/* +** This routine runs after codeDistinct(). It makes necessary +** adjustments to the OP_OpenEphemeral opcode that the codeDistinct() +** routine made use of. This processing must be done separately since +** sometimes codeDistinct is called before the OP_OpenEphemeral is actually +** laid down. +** +** WHERE_DISTINCT_NOOP: +** WHERE_DISTINCT_UNORDERED: +** +** No adjustments necessary. This function is a no-op. +** +** WHERE_DISTINCT_UNIQUE: +** +** The ephemeral table is not needed. So change the +** OP_OpenEphemeral opcode into an OP_Noop. +** +** WHERE_DISTINCT_ORDERED: +** +** The ephemeral table is not needed. But we do need register +** iVal to be initialized to NULL. So change the OP_OpenEphemeral +** into an OP_Null on the iVal register. +*/ +static void fixDistinctOpenEph( + Parse *pParse, /* Parsing and code generating context */ + int eTnctType, /* WHERE_DISTINCT_* value */ + int iVal, /* Value returned by codeDistinct() */ + int iOpenEphAddr /* Address of OP_OpenEphemeral instruction for iTab */ +){ + if( eTnctType==WHERE_DISTINCT_UNIQUE || eTnctType==WHERE_DISTINCT_ORDERED ){ + Vdbe *v = pParse->pVdbe; + sqlite3VdbeChangeToNoop(v, iOpenEphAddr); + if( sqlite3VdbeGetOp(v, iOpenEphAddr+1)->opcode==OP_Explain ){ + sqlite3VdbeChangeToNoop(v, iOpenEphAddr+1); + } + if( eTnctType==WHERE_DISTINCT_ORDERED ){ + /* Change the OP_OpenEphemeral to an OP_Null that sets the MEM_Cleared + ** bit on the first register of the previous value. This will cause the + ** OP_Ne added in codeDistinct() to always fail on the first iteration of + ** the loop even if the first row is all NULLs. */ + VdbeOp *pOp = sqlite3VdbeGetOp(v, iOpenEphAddr); + pOp->opcode = OP_Null; + pOp->p1 = 1; + pOp->p2 = iVal; + } + } } #ifdef SQLITE_ENABLE_SORTER_REFERENCES @@ -130308,59 +133212,11 @@ static void selectInnerLoop( ** part of the result. */ if( hasDistinct ){ - switch( pDistinct->eTnctType ){ - case WHERE_DISTINCT_ORDERED: { - VdbeOp *pOp; /* No longer required OpenEphemeral instr. */ - int iJump; /* Jump destination */ - int regPrev; /* Previous row content */ - - /* Allocate space for the previous row */ - regPrev = pParse->nMem+1; - pParse->nMem += nResultCol; - - /* Change the OP_OpenEphemeral coded earlier to an OP_Null - ** sets the MEM_Cleared bit on the first register of the - ** previous value. This will cause the OP_Ne below to always - ** fail on the first iteration of the loop even if the first - ** row is all NULLs. - */ - sqlite3VdbeChangeToNoop(v, pDistinct->addrTnct); - pOp = sqlite3VdbeGetOp(v, pDistinct->addrTnct); - pOp->opcode = OP_Null; - pOp->p1 = 1; - pOp->p2 = regPrev; - pOp = 0; /* Ensure pOp is not used after sqlite3VdbeAddOp() */ - - iJump = sqlite3VdbeCurrentAddr(v) + nResultCol; - for(i=0; ipEList->a[i].pExpr); - if( idb->mallocFailed ); - sqlite3VdbeAddOp3(v, OP_Copy, regResult, regPrev, nResultCol-1); - break; - } - - case WHERE_DISTINCT_UNIQUE: { - sqlite3VdbeChangeToNoop(v, pDistinct->addrTnct); - break; - } - - default: { - assert( pDistinct->eTnctType==WHERE_DISTINCT_UNORDERED ); - codeDistinct(pParse, pDistinct->tabTnct, iContinue, nResultCol, - regResult); - break; - } - } + int eType = pDistinct->eTnctType; + int iTab = pDistinct->tabTnct; + assert( nResultCol==p->pEList->nExpr ); + iTab = codeDistinct(pParse, eType, iTab, iContinue, p->pEList, regResult); + fixDistinctOpenEph(pParse, eType, iTab, pDistinct->addrTnct); if( pSort==0 ){ codeOffset(v, p->iOffset, iContinue); } @@ -130685,7 +133541,7 @@ SQLITE_PRIVATE KeyInfo *sqlite3KeyInfoFromExprList( /* ** Name of the connection operator, used for error messages. */ -static const char *selectOpName(int id){ +SQLITE_PRIVATE const char *sqlite3SelectOpName(int id){ char *z; switch( id ){ case TK_ALL: z = "UNION ALL"; break; @@ -131026,7 +133882,13 @@ static const char *columnTypeImpl( ** of the SELECT statement. Return the declaration type and origin ** data for the result-set column of the sub-select. */ - if( iCol>=0 && iColpEList->nExpr ){ + if( iColpEList->nExpr +#ifdef SQLITE_ALLOW_ROWID_IN_VIEW + && iCol>=0 +#else + && ALWAYS(iCol>=0) +#endif + ){ /* If iCol is less than zero, then the expression requests the ** rowid of the sub-select or view. This expression is legal (see ** test case misc2.2.2) - it always evaluates to NULL. @@ -131168,7 +134030,7 @@ static void generateColumnTypes( ** then the result column name with the table name ** prefix, ex: TABLE.COLUMN. Otherwise use zSpan. */ -static void generateColumnNames( +SQLITE_PRIVATE void sqlite3GenerateColumnNames( Parse *pParse, /* Parser context */ Select *pSelect /* Generate column names for this SELECT statement */ ){ @@ -131258,7 +134120,7 @@ static void generateColumnNames( ** and will break if those assumptions changes. Hence, use extreme caution ** when modifying this routine to avoid breaking legacy. ** -** See Also: generateColumnNames() +** See Also: sqlite3GenerateColumnNames() */ SQLITE_PRIVATE int sqlite3ColumnsFromExprList( Parse *pParse, /* Parsing context */ @@ -131274,13 +134136,14 @@ SQLITE_PRIVATE int sqlite3ColumnsFromExprList( char *zName; /* Column name */ int nName; /* Size of name in zName[] */ Hash ht; /* Hash table of column names */ + Table *pTab; sqlite3HashInit(&ht); if( pEList ){ nCol = pEList->nExpr; aCol = sqlite3DbMallocZero(db, sizeof(aCol[0])*nCol); testcase( aCol==0 ); - if( nCol>32767 ) nCol = 32767; + if( NEVER(nCol>32767) ) nCol = 32767; }else{ nCol = 0; aCol = 0; @@ -131296,15 +134159,13 @@ SQLITE_PRIVATE int sqlite3ColumnsFromExprList( /* If the column contains an "AS " phrase, use as the name */ }else{ Expr *pColExpr = sqlite3ExprSkipCollateAndLikely(pEList->a[i].pExpr); - while( pColExpr->op==TK_DOT ){ + while( ALWAYS(pColExpr!=0) && pColExpr->op==TK_DOT ){ pColExpr = pColExpr->pRight; assert( pColExpr!=0 ); } - if( pColExpr->op==TK_COLUMN ){ + if( pColExpr->op==TK_COLUMN && (pTab = pColExpr->y.pTab)!=0 ){ /* For columns use the column name name */ int iCol = pColExpr->iColumn; - Table *pTab = pColExpr->y.pTab; - assert( pTab!=0 ); if( iCol<0 ) iCol = pTab->iPKey; zName = iCol>=0 ? pTab->aCol[iCol].zName : "rowid"; }else if( pColExpr->op==TK_ID ){ @@ -131389,6 +134250,7 @@ SQLITE_PRIVATE void sqlite3SelectAddColumnTypeAndCollation( for(i=0, pCol=pTab->aCol; inCol; i++, pCol++){ const char *zType; int n, m; + pTab->tabFlags |= (pCol->colFlags & COLFLAG_NOINSERT); p = a[i].pExpr; zType = columnType(&sNC, p, 0, 0, 0); /* pCol->szEst = ... // Column size est for SELECT tables never used */ @@ -131642,6 +134504,7 @@ static void generateWithRecursiveQuery( int nCol = p->pEList->nExpr; /* Number of columns in the recursive table */ Vdbe *v = pParse->pVdbe; /* The prepared statement under construction */ Select *pSetup = p->pPrior; /* The setup query */ + Select *pFirstRec; /* Left-most recursive term */ int addrTop; /* Top of the loop */ int addrCont, addrBreak; /* CONTINUE and BREAK addresses */ int iCurrent = 0; /* The Current table */ @@ -131717,7 +134580,25 @@ static void generateWithRecursiveQuery( /* Detach the ORDER BY clause from the compound SELECT */ p->pOrderBy = 0; + /* Figure out how many elements of the compound SELECT are part of the + ** recursive query. Make sure no recursive elements use aggregate + ** functions. Mark the recursive elements as UNION ALL even if they + ** are really UNION because the distinctness will be enforced by the + ** iDistinct table. pFirstRec is left pointing to the left-most + ** recursive term of the CTE. + */ + pFirstRec = p; + for(pFirstRec=p; ALWAYS(pFirstRec!=0); pFirstRec=pFirstRec->pPrior){ + if( pFirstRec->selFlags & SF_Aggregate ){ + sqlite3ErrorMsg(pParse, "recursive aggregate queries not supported"); + goto end_of_recursive_query; + } + pFirstRec->op = TK_ALL; + if( (pFirstRec->pPrior->selFlags & SF_Recursive)==0 ) break; + } + /* Store the results of the setup-query in Queue. */ + pSetup = pFirstRec->pPrior; pSetup->pNext = 0; ExplainQueryPlan((pParse, 1, "SETUP")); rc = sqlite3Select(pParse, pSetup, &destQueue); @@ -131750,15 +134631,11 @@ static void generateWithRecursiveQuery( /* Execute the recursive SELECT taking the single row in Current as ** the value for the recursive-table. Store the results in the Queue. */ - if( p->selFlags & SF_Aggregate ){ - sqlite3ErrorMsg(pParse, "recursive aggregate queries not supported"); - }else{ - p->pPrior = 0; - ExplainQueryPlan((pParse, 1, "RECURSIVE STEP")); - sqlite3Select(pParse, p, &destQueue); - assert( p->pPrior==0 ); - p->pPrior = pSetup; - } + pFirstRec->pPrior = 0; + ExplainQueryPlan((pParse, 1, "RECURSIVE STEP")); + sqlite3Select(pParse, p, &destQueue); + assert( pFirstRec->pPrior==0 ); + pFirstRec->pPrior = pSetup; /* Keep running the loop until the Queue is empty */ sqlite3VdbeGoto(v, addrTop); @@ -131827,6 +134704,16 @@ static int multiSelectValues( return rc; } +/* +** Return true if the SELECT statement which is known to be the recursive +** part of a recursive CTE still has its anchor terms attached. If the +** anchor terms have already been removed, then return false. +*/ +static int hasAnchor(Select *p){ + while( p && (p->selFlags & SF_Recursive)!=0 ){ p = p->pPrior; } + return p!=0; +} + /* ** This routine is called to process a compound query form from ** two or more separate queries using UNION, UNION ALL, EXCEPT, or @@ -131879,12 +134766,8 @@ static int multiSelect( db = pParse->db; pPrior = p->pPrior; dest = *pDest; - if( pPrior->pOrderBy || pPrior->pLimit ){ - sqlite3ErrorMsg(pParse,"%s clause should come after %s not before", - pPrior->pOrderBy!=0 ? "ORDER BY" : "LIMIT", selectOpName(p->op)); - rc = 1; - goto multi_select_end; - } + assert( pPrior->pOrderBy==0 ); + assert( pPrior->pLimit==0 ); v = sqlite3GetVdbe(pParse); assert( v!=0 ); /* The VDBE already created by calling function */ @@ -131912,7 +134795,7 @@ static int multiSelect( assert( p->pEList->nExpr==pPrior->pEList->nExpr ); #ifndef SQLITE_OMIT_CTE - if( p->selFlags & SF_Recursive ){ + if( (p->selFlags & SF_Recursive)!=0 && hasAnchor(p) ){ generateWithRecursiveQuery(pParse, p, &dest); }else #endif @@ -131935,13 +134818,14 @@ static int multiSelect( switch( p->op ){ case TK_ALL: { int addr = 0; - int nLimit; + int nLimit = 0; /* Initialize to suppress harmless compiler warning */ assert( !pPrior->pLimit ); pPrior->iLimit = p->iLimit; pPrior->iOffset = p->iOffset; pPrior->pLimit = p->pLimit; + SELECTTRACE(1, pParse, p, ("multiSelect UNION ALL left...\n")); rc = sqlite3Select(pParse, pPrior, &dest); - p->pLimit = 0; + pPrior->pLimit = 0; if( rc ){ goto multi_select_end; } @@ -131957,13 +134841,14 @@ static int multiSelect( } } ExplainQueryPlan((pParse, 1, "UNION ALL")); + SELECTTRACE(1, pParse, p, ("multiSelect UNION ALL right...\n")); rc = sqlite3Select(pParse, p, &dest); testcase( rc!=SQLITE_OK ); pDelete = p->pPrior; p->pPrior = pPrior; p->nSelectRow = sqlite3LogEstAdd(p->nSelectRow, pPrior->nSelectRow); - if( pPrior->pLimit - && sqlite3ExprIsInteger(pPrior->pLimit->pLeft, &nLimit) + if( p->pLimit + && sqlite3ExprIsInteger(p->pLimit->pLeft, &nLimit) && nLimit>0 && p->nSelectRow > sqlite3LogEst((u64)nLimit) ){ p->nSelectRow = sqlite3LogEst((u64)nLimit); @@ -132004,10 +134889,12 @@ static int multiSelect( assert( p->pEList ); } + /* Code the SELECT statements to our left */ assert( !pPrior->pOrderBy ); sqlite3SelectDestInit(&uniondest, priorOp, unionTab); + SELECTTRACE(1, pParse, p, ("multiSelect EXCEPT/UNION left...\n")); rc = sqlite3Select(pParse, pPrior, &uniondest); if( rc ){ goto multi_select_end; @@ -132026,7 +134913,8 @@ static int multiSelect( p->pLimit = 0; uniondest.eDest = op; ExplainQueryPlan((pParse, 1, "%s USING TEMP B-TREE", - selectOpName(p->op))); + sqlite3SelectOpName(p->op))); + SELECTTRACE(1, pParse, p, ("multiSelect EXCEPT/UNION right...\n")); rc = sqlite3Select(pParse, p, &uniondest); testcase( rc!=SQLITE_OK ); assert( p->pOrderBy==0 ); @@ -132087,6 +134975,7 @@ static int multiSelect( /* Code the SELECTs to our left into temporary table "tab1". */ sqlite3SelectDestInit(&intersectdest, SRT_Union, tab1); + SELECTTRACE(1, pParse, p, ("multiSelect INTERSECT left...\n")); rc = sqlite3Select(pParse, pPrior, &intersectdest); if( rc ){ goto multi_select_end; @@ -132102,7 +134991,8 @@ static int multiSelect( p->pLimit = 0; intersectdest.iSDParm = tab2; ExplainQueryPlan((pParse, 1, "%s USING TEMP B-TREE", - selectOpName(p->op))); + sqlite3SelectOpName(p->op))); + SELECTTRACE(1, pParse, p, ("multiSelect INTERSECT right...\n")); rc = sqlite3Select(pParse, p, &intersectdest); testcase( rc!=SQLITE_OK ); pDelete = p->pPrior; @@ -132211,7 +135101,8 @@ SQLITE_PRIVATE void sqlite3SelectWrongNumTermsError(Parse *pParse, Select *p){ sqlite3ErrorMsg(pParse, "all VALUES must have the same number of terms"); }else{ sqlite3ErrorMsg(pParse, "SELECTs to the left and right of %s" - " do not have the same number of result columns", selectOpName(p->op)); + " do not have the same number of result columns", + sqlite3SelectOpName(p->op)); } } @@ -132308,10 +135199,8 @@ static int generateOutputSubroutine( ** if it is the RHS of a row-value IN operator. */ case SRT_Mem: { - if( pParse->nErr==0 ){ - testcase( pIn->nSdst>1 ); - sqlite3ExprCodeMove(pParse, pIn->iSdst, pDest->iSDParm, pIn->nSdst); - } + testcase( pIn->nSdst>1 ); + sqlite3ExprCodeMove(pParse, pIn->iSdst, pDest->iSDParm, pIn->nSdst); /* The LIMIT clause will jump out of the loop for us */ break; } @@ -132603,7 +135492,7 @@ static int multiSelectOrderBy( sqlite3SelectDestInit(&destA, SRT_Coroutine, regAddrA); sqlite3SelectDestInit(&destB, SRT_Coroutine, regAddrB); - ExplainQueryPlan((pParse, 1, "MERGE (%s)", selectOpName(p->op))); + ExplainQueryPlan((pParse, 1, "MERGE (%s)", sqlite3SelectOpName(p->op))); /* Generate a coroutine to evaluate the SELECT statement to the ** left of the compound operator - the "A" select. @@ -132737,6 +135626,9 @@ static int multiSelectOrderBy( p->pPrior = pPrior; pPrior->pNext = p; + sqlite3ExprListDelete(db, pPrior->pOrderBy); + pPrior->pOrderBy = 0; + /*** TBD: Insert subroutine calls to close cursors on incomplete **** subqueries ****/ ExplainQueryPlanPop(pParse); @@ -132791,9 +135683,12 @@ static Expr *substExpr( && pExpr->iTable==pSubst->iTable && !ExprHasProperty(pExpr, EP_FixedCol) ){ +#ifdef SQLITE_ALLOW_ROWID_IN_VIEW if( pExpr->iColumn<0 ){ pExpr->op = TK_NULL; - }else{ + }else +#endif + { Expr *pNew; Expr *pCopy = pSubst->pEList->a[pExpr->iColumn].pExpr; Expr ifNullRow; @@ -132808,32 +135703,33 @@ static Expr *substExpr( ifNullRow.op = TK_IF_NULL_ROW; ifNullRow.pLeft = pCopy; ifNullRow.iTable = pSubst->iNewTable; - ifNullRow.flags = EP_Skip; + ifNullRow.flags = EP_IfNullRow; pCopy = &ifNullRow; } testcase( ExprHasProperty(pCopy, EP_Subquery) ); pNew = sqlite3ExprDup(db, pCopy, 0); - if( pNew && pSubst->isLeftJoin ){ + if( db->mallocFailed ){ + sqlite3ExprDelete(db, pNew); + return pExpr; + } + if( pSubst->isLeftJoin ){ ExprSetProperty(pNew, EP_CanBeNull); } - if( pNew && ExprHasProperty(pExpr,EP_FromJoin) ){ - pNew->iRightJoinTable = pExpr->iRightJoinTable; - ExprSetProperty(pNew, EP_FromJoin); + if( ExprHasProperty(pExpr,EP_FromJoin) ){ + sqlite3SetJoinExpr(pNew, pExpr->iRightJoinTable); } sqlite3ExprDelete(db, pExpr); pExpr = pNew; /* Ensure that the expression now has an implicit collation sequence, ** just as it did when it was a column of a view or sub-query. */ - if( pExpr ){ - if( pExpr->op!=TK_COLUMN && pExpr->op!=TK_COLLATE ){ - CollSeq *pColl = sqlite3ExprCollSeq(pSubst->pParse, pExpr); - pExpr = sqlite3ExprAddCollateString(pSubst->pParse, pExpr, - (pColl ? pColl->zName : "BINARY") - ); - } - ExprClearProperty(pExpr, EP_Collate); + if( pExpr->op!=TK_COLUMN && pExpr->op!=TK_COLLATE ){ + CollSeq *pColl = sqlite3ExprCollSeq(pSubst->pParse, pExpr); + pExpr = sqlite3ExprAddCollateString(pSubst->pParse, pExpr, + (pColl ? pColl->zName : "BINARY") + ); } + ExprClearProperty(pExpr, EP_Collate); } } }else{ @@ -132874,7 +135770,7 @@ static void substSelect( int doPrior /* Do substitutes on p->pPrior too */ ){ SrcList *pSrc; - struct SrcList_item *pItem; + SrcItem *pItem; int i; if( !p ) return; do{ @@ -132904,7 +135800,7 @@ static void substSelect( ** pSrcItem->colUsed mask. */ static int recomputeColumnsUsedExpr(Walker *pWalker, Expr *pExpr){ - struct SrcList_item *pItem; + SrcItem *pItem; if( pExpr->op!=TK_COLUMN ) return WRC_Continue; pItem = pWalker->u.pSrcItem; if( pItem->iCursor!=pExpr->iTable ) return WRC_Continue; @@ -132914,7 +135810,7 @@ static int recomputeColumnsUsedExpr(Walker *pWalker, Expr *pExpr){ } static void recomputeColumnsUsed( Select *pSelect, /* The complete SELECT statement */ - struct SrcList_item *pSrcItem /* Which FROM clause item to recompute */ + SrcItem *pSrcItem /* Which FROM clause item to recompute */ ){ Walker w; if( NEVER(pSrcItem->pTab==0) ) return; @@ -132927,6 +135823,92 @@ static void recomputeColumnsUsed( } #endif /* !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) */ +#if !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) +/* +** Assign new cursor numbers to each of the items in pSrc. For each +** new cursor number assigned, set an entry in the aCsrMap[] array +** to map the old cursor number to the new: +** +** aCsrMap[iOld] = iNew; +** +** The array is guaranteed by the caller to be large enough for all +** existing cursor numbers in pSrc. +** +** If pSrc contains any sub-selects, call this routine recursively +** on the FROM clause of each such sub-select, with iExcept set to -1. +*/ +static void srclistRenumberCursors( + Parse *pParse, /* Parse context */ + int *aCsrMap, /* Array to store cursor mappings in */ + SrcList *pSrc, /* FROM clause to renumber */ + int iExcept /* FROM clause item to skip */ +){ + int i; + SrcItem *pItem; + for(i=0, pItem=pSrc->a; inSrc; i++, pItem++){ + if( i!=iExcept ){ + Select *p; + if( !pItem->fg.isRecursive || aCsrMap[pItem->iCursor]==0 ){ + aCsrMap[pItem->iCursor] = pParse->nTab++; + } + pItem->iCursor = aCsrMap[pItem->iCursor]; + for(p=pItem->pSelect; p; p=p->pPrior){ + srclistRenumberCursors(pParse, aCsrMap, p->pSrc, -1); + } + } + } +} + +/* +** Expression walker callback used by renumberCursors() to update +** Expr objects to match newly assigned cursor numbers. +*/ +static int renumberCursorsCb(Walker *pWalker, Expr *pExpr){ + int *aCsrMap = pWalker->u.aiCol; + int op = pExpr->op; + if( (op==TK_COLUMN || op==TK_IF_NULL_ROW) && aCsrMap[pExpr->iTable] ){ + pExpr->iTable = aCsrMap[pExpr->iTable]; + } + if( ExprHasProperty(pExpr, EP_FromJoin) && aCsrMap[pExpr->iRightJoinTable] ){ + pExpr->iRightJoinTable = aCsrMap[pExpr->iRightJoinTable]; + } + return WRC_Continue; +} + +/* +** Assign a new cursor number to each cursor in the FROM clause (Select.pSrc) +** of the SELECT statement passed as the second argument, and to each +** cursor in the FROM clause of any FROM clause sub-selects, recursively. +** Except, do not assign a new cursor number to the iExcept'th element in +** the FROM clause of (*p). Update all expressions and other references +** to refer to the new cursor numbers. +** +** Argument aCsrMap is an array that may be used for temporary working +** space. Two guarantees are made by the caller: +** +** * the array is larger than the largest cursor number used within the +** select statement passed as an argument, and +** +** * the array entries for all cursor numbers that do *not* appear in +** FROM clauses of the select statement as described above are +** initialized to zero. +*/ +static void renumberCursors( + Parse *pParse, /* Parse context */ + Select *p, /* Select to renumber cursors within */ + int iExcept, /* FROM clause item to skip */ + int *aCsrMap /* Working space */ +){ + Walker w; + srclistRenumberCursors(pParse, aCsrMap, p->pSrc, iExcept); + memset(&w, 0, sizeof(w)); + w.u.aiCol = aCsrMap; + w.xExprCallback = renumberCursorsCb; + w.xSelectCallback = sqlite3SelectWalkNoop; + sqlite3WalkSelect(&w, p); +} +#endif /* !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) */ + #if !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) /* ** This routine attempts to flatten subqueries as a performance optimization. @@ -133021,9 +136003,9 @@ static void recomputeColumnsUsed( ** (17c) every term within the subquery compound must have a FROM clause ** (17d) the outer query may not be ** (17d1) aggregate, or -** (17d2) DISTINCT, or -** (17d3) a join. -** (17e) the subquery may not contain window functions +** (17d2) DISTINCT +** (17e) the subquery may not contain window functions, and +** (17f) the subquery must not be the RHS of a LEFT JOIN. ** ** The parent and sub-query may contain WHERE clauses. Subject to ** rules (11), (13) and (14), they may also contain ORDER BY, @@ -133039,8 +136021,8 @@ static void recomputeColumnsUsed( ** syntax error and return a detailed message. ** ** (18) If the sub-query is a compound select, then all terms of the -** ORDER BY clause of the parent must be simple references to -** columns of the sub-query. +** ORDER BY clause of the parent must be copies of a term returned +** by the parent query. ** ** (19) If the subquery uses LIMIT then the outer query may not ** have a WHERE clause. @@ -133056,9 +136038,8 @@ static void recomputeColumnsUsed( ** ** (22) The subquery may not be a recursive CTE. ** -** (**) Subsumed into restriction (17d3). Was: If the outer query is -** a recursive CTE, then the sub-query may not be a compound query. -** This restriction is because transforming the +** (23) If the outer query is a recursive CTE, then the sub-query may not be +** a compound query. This restriction is because transforming the ** parent to a compound query confuses the code that handles ** recursive queries in multiSelect(). ** @@ -133100,9 +136081,10 @@ static int flattenSubquery( int isLeftJoin = 0; /* True if pSub is the right side of a LEFT JOIN */ int i; /* Loop counter */ Expr *pWhere; /* The WHERE clause */ - struct SrcList_item *pSubitem; /* The subquery */ + SrcItem *pSubitem; /* The subquery */ sqlite3 *db = pParse->db; Walker w; /* Walker to persist agginfo data */ + int *aCsrMap = 0; /* Check to see if flattening is permitted. Return 0 if not. */ @@ -133198,13 +136180,14 @@ static int flattenSubquery( if( pSub->pOrderBy ){ return 0; /* Restriction (20) */ } - if( isAgg || (p->selFlags & SF_Distinct)!=0 || pSrc->nSrc!=1 ){ - return 0; /* (17d1), (17d2), or (17d3) */ + if( isAgg || (p->selFlags & SF_Distinct)!=0 || isLeftJoin>0 ){ + return 0; /* (17d1), (17d2), or (17f) */ } for(pSub1=pSub; pSub1; pSub1=pSub1->pPrior){ testcase( (pSub1->selFlags & (SF_Distinct|SF_Aggregate))==SF_Distinct ); testcase( (pSub1->selFlags & (SF_Distinct|SF_Aggregate))==SF_Aggregate ); assert( pSub->pSrc!=0 ); + assert( (pSub->selFlags & SF_Recursive)==0 ); assert( pSub->pEList->nExpr==pSub1->pEList->nExpr ); if( (pSub1->selFlags & (SF_Distinct|SF_Aggregate))!=0 /* (17b) */ || (pSub1->pPrior && pSub1->op!=TK_ALL) /* (17a) */ @@ -133225,15 +136208,15 @@ static int flattenSubquery( if( p->pOrderBy->a[ii].u.x.iOrderByCol==0 ) return 0; } } - } - /* Ex-restriction (23): - ** The only way that the recursive part of a CTE can contain a compound - ** subquery is for the subquery to be one term of a join. But if the - ** subquery is a join, then the flattening has already been stopped by - ** restriction (17d3) - */ - assert( (p->selFlags & SF_Recursive)==0 || pSub->pPrior==0 ); + /* Restriction (23) */ + if( (p->selFlags & SF_Recursive) ) return 0; + + if( pSrc->nSrc>1 ){ + if( pParse->nSelect>500 ) return 0; + aCsrMap = sqlite3DbMallocZero(db, pParse->nTab*sizeof(int)); + } + } /***** If we reach this point, flattening is permitted. *****/ SELECTTRACE(1,pParse,p,("flatten %u.%p from term %d\n", @@ -133245,6 +136228,17 @@ static int flattenSubquery( testcase( i==SQLITE_DENY ); pParse->zAuthContext = zSavedAuthContext; + /* Delete the transient structures associated with thesubquery */ + pSub1 = pSubitem->pSelect; + sqlite3DbFree(db, pSubitem->zDatabase); + sqlite3DbFree(db, pSubitem->zName); + sqlite3DbFree(db, pSubitem->zAlias); + pSubitem->zDatabase = 0; + pSubitem->zName = 0; + pSubitem->zAlias = 0; + pSubitem->pSelect = 0; + assert( pSubitem->pOn==0 ); + /* If the sub-query is a compound SELECT statement, then (by restrictions ** 17 and 18 above) it must be a UNION ALL and the parent query must ** be of the form: @@ -133283,18 +136277,23 @@ static int flattenSubquery( ExprList *pOrderBy = p->pOrderBy; Expr *pLimit = p->pLimit; Select *pPrior = p->pPrior; + Table *pItemTab = pSubitem->pTab; + pSubitem->pTab = 0; p->pOrderBy = 0; - p->pSrc = 0; p->pPrior = 0; p->pLimit = 0; pNew = sqlite3SelectDup(db, p, 0); p->pLimit = pLimit; p->pOrderBy = pOrderBy; - p->pSrc = pSrc; p->op = TK_ALL; + pSubitem->pTab = pItemTab; if( pNew==0 ){ p->pPrior = pPrior; }else{ + pNew->selId = ++pParse->nSelect; + if( aCsrMap && ALWAYS(db->mallocFailed==0) ){ + renumberCursors(pParse, pNew, iFrom, aCsrMap); + } pNew->pPrior = pPrior; if( pPrior ) pPrior->pNext = pNew; pNew->pNext = p; @@ -133302,24 +136301,13 @@ static int flattenSubquery( SELECTTRACE(2,pParse,p,("compound-subquery flattener" " creates %u as peer\n",pNew->selId)); } - if( db->mallocFailed ) return 1; + assert( pSubitem->pSelect==0 ); + } + sqlite3DbFree(db, aCsrMap); + if( db->mallocFailed ){ + pSubitem->pSelect = pSub1; + return 1; } - - /* Begin flattening the iFrom-th entry of the FROM clause - ** in the outer query. - */ - pSub = pSub1 = pSubitem->pSelect; - - /* Delete the transient table structure associated with the - ** subquery - */ - sqlite3DbFree(db, pSubitem->zDatabase); - sqlite3DbFree(db, pSubitem->zName); - sqlite3DbFree(db, pSubitem->zAlias); - pSubitem->zDatabase = 0; - pSubitem->zName = 0; - pSubitem->zAlias = 0; - pSubitem->pSelect = 0; /* Defer deleting the Table object associated with the ** subquery until code generation is @@ -133332,8 +136320,10 @@ static int flattenSubquery( Table *pTabToDel = pSubitem->pTab; if( pTabToDel->nTabRef==1 ){ Parse *pToplevel = sqlite3ParseToplevel(pParse); - pTabToDel->pNextZombie = pToplevel->pZombieTab; - pToplevel->pZombieTab = pTabToDel; + sqlite3ParserAddCleanup(pToplevel, + (void(*)(sqlite3*,void*))sqlite3DeleteTable, + pTabToDel); + testcase( pToplevel->earlyCleanup ); }else{ pTabToDel->nTabRef--; } @@ -133353,6 +136343,7 @@ static int flattenSubquery( ** those references with expressions that resolve to the subquery FROM ** elements we are now copying in. */ + pSub = pSub1; for(pParent=p; pParent; pParent=pParent->pPrior, pSub=pSub->pPrior){ int nSubSrc; u8 jointype = 0; @@ -133361,14 +136352,8 @@ static int flattenSubquery( nSubSrc = pSubSrc->nSrc; /* Number of terms in subquery FROM clause */ pSrc = pParent->pSrc; /* FROM clause of the outer query */ - if( pSrc ){ - assert( pParent==p ); /* First time through the loop */ - jointype = pSubitem->fg.jointype; - }else{ - assert( pParent!=p ); /* 2nd and subsequent times through the loop */ - pSrc = sqlite3SrcListAppend(pParse, 0, 0, 0); - if( pSrc==0 ) break; - pParent->pSrc = pSrc; + if( pParent==p ){ + jointype = pSubitem->fg.jointype; /* First time through the loop */ } /* The subquery uses a single slot of the FROM clause of the outer @@ -133488,7 +136473,7 @@ static int flattenSubquery( sqlite3SelectDelete(db, pSub1); #if SELECTTRACE_ENABLED - if( sqlite3_unsupported_selecttrace & 0x100 ){ + if( sqlite3SelectTrace & 0x100 ){ SELECTTRACE(0x100,pParse,p,("After flattening:\n")); sqlite3TreeViewSelect(0, p, 0); } @@ -133505,8 +136490,10 @@ static int flattenSubquery( typedef struct WhereConst WhereConst; struct WhereConst { Parse *pParse; /* Parsing context */ + u8 *pOomFault; /* Pointer to pParse->db->mallocFailed */ int nConst; /* Number for COLUMN=CONSTANT terms */ int nChng; /* Number of times a constant is propagated */ + int bHasAffBlob; /* At least one column in apExpr[] as affinity BLOB */ Expr **apExpr; /* [i*2] is COLUMN and [i*2+1] is VALUE */ }; @@ -133545,6 +136532,9 @@ static void constInsert( return; /* Already present. Return without doing anything. */ } } + if( sqlite3ExprAffinity(pColumn)==SQLITE_AFF_BLOB ){ + pConst->bHasAffBlob = 1; + } pConst->nConst++; pConst->apExpr = sqlite3DbReallocOrFree(pConst->pParse->db, pConst->apExpr, @@ -133565,7 +136555,7 @@ static void constInsert( */ static void findConstInWhere(WhereConst *pConst, Expr *pExpr){ Expr *pRight, *pLeft; - if( pExpr==0 ) return; + if( NEVER(pExpr==0) ) return; if( ExprHasProperty(pExpr, EP_FromJoin) ) return; if( pExpr->op==TK_AND ){ findConstInWhere(pConst, pExpr->pRight); @@ -133586,37 +136576,83 @@ static void findConstInWhere(WhereConst *pConst, Expr *pExpr){ } /* -** This is a Walker expression callback. pExpr is a candidate expression -** to be replaced by a value. If pExpr is equivalent to one of the -** columns named in pWalker->u.pConst, then overwrite it with its -** corresponding value. +** This is a helper function for Walker callback propagateConstantExprRewrite(). +** +** Argument pExpr is a candidate expression to be replaced by a value. If +** pExpr is equivalent to one of the columns named in pWalker->u.pConst, +** then overwrite it with the corresponding value. Except, do not do so +** if argument bIgnoreAffBlob is non-zero and the affinity of pExpr +** is SQLITE_AFF_BLOB. */ -static int propagateConstantExprRewrite(Walker *pWalker, Expr *pExpr){ +static int propagateConstantExprRewriteOne( + WhereConst *pConst, + Expr *pExpr, + int bIgnoreAffBlob +){ int i; - WhereConst *pConst; + if( pConst->pOomFault[0] ) return WRC_Prune; if( pExpr->op!=TK_COLUMN ) return WRC_Continue; if( ExprHasProperty(pExpr, EP_FixedCol|EP_FromJoin) ){ testcase( ExprHasProperty(pExpr, EP_FixedCol) ); testcase( ExprHasProperty(pExpr, EP_FromJoin) ); return WRC_Continue; } - pConst = pWalker->u.pConst; for(i=0; inConst; i++){ Expr *pColumn = pConst->apExpr[i*2]; if( pColumn==pExpr ) continue; if( pColumn->iTable!=pExpr->iTable ) continue; if( pColumn->iColumn!=pExpr->iColumn ) continue; + if( bIgnoreAffBlob && sqlite3ExprAffinity(pColumn)==SQLITE_AFF_BLOB ){ + break; + } /* A match is found. Add the EP_FixedCol property */ pConst->nChng++; ExprClearProperty(pExpr, EP_Leaf); ExprSetProperty(pExpr, EP_FixedCol); assert( pExpr->pLeft==0 ); pExpr->pLeft = sqlite3ExprDup(pConst->pParse->db, pConst->apExpr[i*2+1], 0); + if( pConst->pParse->db->mallocFailed ) return WRC_Prune; break; } return WRC_Prune; } +/* +** This is a Walker expression callback. pExpr is a node from the WHERE +** clause of a SELECT statement. This function examines pExpr to see if +** any substitutions based on the contents of pWalker->u.pConst should +** be made to pExpr or its immediate children. +** +** A substitution is made if: +** +** + pExpr is a column with an affinity other than BLOB that matches +** one of the columns in pWalker->u.pConst, or +** +** + pExpr is a binary comparison operator (=, <=, >=, <, >) that +** uses an affinity other than TEXT and one of its immediate +** children is a column that matches one of the columns in +** pWalker->u.pConst. +*/ +static int propagateConstantExprRewrite(Walker *pWalker, Expr *pExpr){ + WhereConst *pConst = pWalker->u.pConst; + assert( TK_GT==TK_EQ+1 ); + assert( TK_LE==TK_EQ+2 ); + assert( TK_LT==TK_EQ+3 ); + assert( TK_GE==TK_EQ+4 ); + if( pConst->bHasAffBlob ){ + if( (pExpr->op>=TK_EQ && pExpr->op<=TK_GE) + || pExpr->op==TK_IS + ){ + propagateConstantExprRewriteOne(pConst, pExpr->pLeft, 0); + if( pConst->pOomFault[0] ) return WRC_Prune; + if( sqlite3ExprAffinity(pExpr->pLeft)!=SQLITE_AFF_TEXT ){ + propagateConstantExprRewriteOne(pConst, pExpr->pRight, 0); + } + } + } + return propagateConstantExprRewriteOne(pConst, pExpr, pConst->bHasAffBlob); +} + /* ** The WHERE-clause constant propagation optimization. ** @@ -133652,6 +136688,21 @@ static int propagateConstantExprRewrite(Walker *pWalker, Expr *pExpr){ ** routines know to generate the constant "123" instead of looking up the ** column value. Also, to avoid collation problems, this optimization is ** only attempted if the "a=123" term uses the default BINARY collation. +** +** 2021-05-25 forum post 6a06202608: Another troublesome case is... +** +** CREATE TABLE t1(x); +** INSERT INTO t1 VALUES(10.0); +** SELECT 1 FROM t1 WHERE x=10 AND x LIKE 10; +** +** The query should return no rows, because the t1.x value is '10.0' not '10' +** and '10.0' is not LIKE '10'. But if we are not careful, the first WHERE +** term "x=10" will cause the second WHERE term to become "10 LIKE 10", +** resulting in a false positive. To avoid this, constant propagation for +** columns with BLOB affinity is only allowed if the constant is used with +** operators ==, <=, <, >=, >, or IS in a way that will cause the correct +** type conversions to occur. See logic associated with the bHasAffBlob flag +** for details. */ static int propagateConstants( Parse *pParse, /* The parsing context */ @@ -133661,10 +136712,12 @@ static int propagateConstants( Walker w; int nChng = 0; x.pParse = pParse; + x.pOomFault = &pParse->db->mallocFailed; do{ x.nConst = 0; x.nChng = 0; x.apExpr = 0; + x.bHasAffBlob = 0; findConstInWhere(&x, p->pWhere); if( x.nConst ){ memset(&w, 0, sizeof(w)); @@ -133682,6 +136735,35 @@ static int propagateConstants( return nChng; } +#if !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) +# if !defined(SQLITE_OMIT_WINDOWFUNC) +/* +** This function is called to determine whether or not it is safe to +** push WHERE clause expression pExpr down to FROM clause sub-query +** pSubq, which contains at least one window function. Return 1 +** if it is safe and the expression should be pushed down, or 0 +** otherwise. +** +** It is only safe to push the expression down if it consists only +** of constants and copies of expressions that appear in the PARTITION +** BY clause of all window function used by the sub-query. It is safe +** to filter out entire partitions, but not rows within partitions, as +** this may change the results of the window functions. +** +** At the time this function is called it is guaranteed that +** +** * the sub-query uses only one distinct window frame, and +** * that the window frame has a PARTITION BY clase. +*/ +static int pushDownWindowCheck(Parse *pParse, Select *pSubq, Expr *pExpr){ + assert( pSubq->pWin->pPartition ); + assert( (pSubq->selFlags & SF_MultiPart)==0 ); + assert( pSubq->pPrior==0 ); + return sqlite3ExprIsConstantOrGroupBy(pParse, pExpr, pSubq->pWin->pPartition); +} +# endif /* SQLITE_OMIT_WINDOWFUNC */ +#endif /* !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) */ + #if !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) /* ** Make copies of relevant WHERE clause terms of the outer query into @@ -133729,9 +136811,24 @@ static int propagateConstants( ** But if the (b2=2) term were to be pushed down into the bb subquery, ** then the (1,1,NULL) row would be suppressed. ** -** (6) The inner query features one or more window-functions (since -** changes to the WHERE clause of the inner query could change the -** window over which window functions are calculated). +** (6) Window functions make things tricky as changes to the WHERE clause +** of the inner query could change the window over which window +** functions are calculated. Therefore, do not attempt the optimization +** if: +** +** (6a) The inner query uses multiple incompatible window partitions. +** +** (6b) The inner query is a compound and uses window-functions. +** +** (6c) The WHERE clause does not consist entirely of constants and +** copies of expressions found in the PARTITION BY clause of +** all window-functions used by the sub-query. It is safe to +** filter out entire partitions, as this does not change the +** window over which any window-function is calculated. +** +** (7) The inner query is a Common Table Expression (CTE) that should +** be materialized. (This restriction is implemented in the calling +** routine.) ** ** Return 0 if no changes are made and non-zero if one or more WHERE clause ** terms are duplicated into the subquery. @@ -133745,13 +136842,17 @@ static int pushDownWhereTerms( ){ Expr *pNew; int nChng = 0; - Select *pSel; if( pWhere==0 ) return 0; - if( pSubq->selFlags & SF_Recursive ) return 0; /* restriction (2) */ + if( pSubq->selFlags & (SF_Recursive|SF_MultiPart) ) return 0; #ifndef SQLITE_OMIT_WINDOWFUNC - for(pSel=pSubq; pSel; pSel=pSel->pPrior){ - if( pSel->pWin ) return 0; /* restriction (6) */ + if( pSubq->pPrior ){ + Select *pSel; + for(pSel=pSubq; pSel; pSel=pSel->pPrior){ + if( pSel->pWin ) return 0; /* restriction (6b) */ + } + }else{ + if( pSubq->pWin && pSubq->pWin->pPartition==0 ) return 0; } #endif @@ -133787,6 +136888,7 @@ static int pushDownWhereTerms( } if( sqlite3ExprIsTableConstant(pWhere, iCursor) ){ nChng++; + pSubq->selFlags |= SF_PushDown; while( pSubq ){ SubstContext x; pNew = sqlite3ExprDup(pParse->db, pWhere, 0); @@ -133797,6 +136899,14 @@ static int pushDownWhereTerms( x.isLeftJoin = 0; x.pEList = pSubq->pEList; pNew = substExpr(&x, pNew); +#ifndef SQLITE_OMIT_WINDOWFUNC + if( pSubq->pWin && 0==pushDownWindowCheck(pParse, pSubq, pNew) ){ + /* Restriction 6c has prevented push-down in this case */ + sqlite3ExprDelete(pParse->db, pNew); + nChng--; + break; + } +#endif if( pSubq->selFlags & SF_Aggregate ){ pSubq->pHaving = sqlite3ExprAnd(pParse, pSubq->pHaving, pNew); }else{ @@ -133835,7 +136945,11 @@ static u8 minMaxQuery(sqlite3 *db, Expr *pFunc, ExprList **ppMinMax){ assert( *ppMinMax==0 ); assert( pFunc->op==TK_AGG_FUNCTION ); assert( !IsWindowFunc(pFunc) ); - if( pEList==0 || pEList->nExpr!=1 || ExprHasProperty(pFunc, EP_WinFunc) ){ + if( pEList==0 + || pEList->nExpr!=1 + || ExprHasProperty(pFunc, EP_WinFunc) + || OptimizationDisabled(db, SQLITE_MinMaxOpt) + ){ return eRet; } zFunc = pFunc->u.zToken; @@ -133898,24 +137012,26 @@ static Table *isSimpleCount(Select *p, AggInfo *pAggInfo){ ** SQLITE_ERROR and leave an error in pParse. Otherwise, populate ** pFrom->pIndex and return SQLITE_OK. */ -SQLITE_PRIVATE int sqlite3IndexedByLookup(Parse *pParse, struct SrcList_item *pFrom){ - if( pFrom->pTab && pFrom->fg.isIndexedBy ){ - Table *pTab = pFrom->pTab; - char *zIndexedBy = pFrom->u1.zIndexedBy; - Index *pIdx; - for(pIdx=pTab->pIndex; - pIdx && sqlite3StrICmp(pIdx->zName, zIndexedBy); - pIdx=pIdx->pNext - ); - if( !pIdx ){ - sqlite3ErrorMsg(pParse, "no such index: %s", zIndexedBy, 0); - pParse->checkSchema = 1; - return SQLITE_ERROR; - } - pFrom->pIBIndex = pIdx; +SQLITE_PRIVATE int sqlite3IndexedByLookup(Parse *pParse, SrcItem *pFrom){ + Table *pTab = pFrom->pTab; + char *zIndexedBy = pFrom->u1.zIndexedBy; + Index *pIdx; + assert( pTab!=0 ); + assert( pFrom->fg.isIndexedBy!=0 ); + + for(pIdx=pTab->pIndex; + pIdx && sqlite3StrICmp(pIdx->zName, zIndexedBy); + pIdx=pIdx->pNext + ); + if( !pIdx ){ + sqlite3ErrorMsg(pParse, "no such index: %s", zIndexedBy, 0); + pParse->checkSchema = 1; + return SQLITE_ERROR; } + pFrom->u2.pIBIndex = pIdx; return SQLITE_OK; } + /* ** Detect compound SELECT statements that use an ORDER BY clause with ** an alternative collating sequence. @@ -134002,7 +137118,7 @@ static int convertCompoundSelectToSubquery(Walker *pWalker, Select *p){ ** arguments. If it does, leave an error message in pParse and return ** non-zero, since pFrom is not allowed to be a table-valued function. */ -static int cannotBeFunction(Parse *pParse, struct SrcList_item *pFrom){ +static int cannotBeFunction(Parse *pParse, SrcItem *pFrom){ if( pFrom->fg.isTabFunc ){ sqlite3ErrorMsg(pParse, "'%s' is not a function", pFrom->zName); return 1; @@ -134023,21 +137139,22 @@ static int cannotBeFunction(Parse *pParse, struct SrcList_item *pFrom){ */ static struct Cte *searchWith( With *pWith, /* Current innermost WITH clause */ - struct SrcList_item *pItem, /* FROM clause element to resolve */ + SrcItem *pItem, /* FROM clause element to resolve */ With **ppContext /* OUT: WITH clause return value belongs to */ ){ - const char *zName; - if( pItem->zDatabase==0 && (zName = pItem->zName)!=0 ){ - With *p; - for(p=pWith; p; p=p->pOuter){ - int i; - for(i=0; inCte; i++){ - if( sqlite3StrICmp(zName, p->a[i].zName)==0 ){ - *ppContext = p; - return &p->a[i]; - } + const char *zName = pItem->zName; + With *p; + assert( pItem->zDatabase==0 ); + assert( zName!=0 ); + for(p=pWith; p; p=p->pOuter){ + int i; + for(i=0; inCte; i++){ + if( sqlite3StrICmp(zName, p->a[i].zName)==0 ){ + *ppContext = p; + return &p->a[i]; } } + if( p->bView ) break; } return 0; } @@ -134047,58 +137164,92 @@ static struct Cte *searchWith( ** ** This routine pushes the WITH clause passed as the second argument ** onto the top of the stack. If argument bFree is true, then this -** WITH clause will never be popped from the stack. In this case it -** should be freed along with the Parse object. In other cases, when +** WITH clause will never be popped from the stack but should instead +** be freed along with the Parse object. In other cases, when ** bFree==0, the With object will be freed along with the SELECT ** statement with which it is associated. +** +** This routine returns a copy of pWith. Or, if bFree is true and +** the pWith object is destroyed immediately due to an OOM condition, +** then this routine return NULL. +** +** If bFree is true, do not continue to use the pWith pointer after +** calling this routine, Instead, use only the return value. */ -SQLITE_PRIVATE void sqlite3WithPush(Parse *pParse, With *pWith, u8 bFree){ - assert( bFree==0 || (pParse->pWith==0 && pParse->pWithToFree==0) ); +SQLITE_PRIVATE With *sqlite3WithPush(Parse *pParse, With *pWith, u8 bFree){ if( pWith ){ - assert( pParse->pWith!=pWith ); - pWith->pOuter = pParse->pWith; - pParse->pWith = pWith; - if( bFree ) pParse->pWithToFree = pWith; + if( bFree ){ + pWith = (With*)sqlite3ParserAddCleanup(pParse, + (void(*)(sqlite3*,void*))sqlite3WithDelete, + pWith); + if( pWith==0 ) return 0; + } + if( pParse->nErr==0 ){ + assert( pParse->pWith!=pWith ); + pWith->pOuter = pParse->pWith; + pParse->pWith = pWith; + } } + return pWith; } /* ** This function checks if argument pFrom refers to a CTE declared by -** a WITH clause on the stack currently maintained by the parser. And, -** if currently processing a CTE expression, if it is a recursive -** reference to the current CTE. +** a WITH clause on the stack currently maintained by the parser (on the +** pParse->pWith linked list). And if currently processing a CTE +** CTE expression, through routine checks to see if the reference is +** a recursive reference to the CTE. ** -** If pFrom falls into either of the two categories above, pFrom->pTab -** and other fields are populated accordingly. The caller should check -** (pFrom->pTab!=0) to determine whether or not a successful match -** was found. +** If pFrom matches a CTE according to either of these two above, pFrom->pTab +** and other fields are populated accordingly. ** -** Whether or not a match is found, SQLITE_OK is returned if no error -** occurs. If an error does occur, an error message is stored in the -** parser and some error code other than SQLITE_OK returned. +** Return 0 if no match is found. +** Return 1 if a match is found. +** Return 2 if an error condition is detected. */ -static int withExpand( - Walker *pWalker, - struct SrcList_item *pFrom +static int resolveFromTermToCte( + Parse *pParse, /* The parsing context */ + Walker *pWalker, /* Current tree walker */ + SrcItem *pFrom /* The FROM clause term to check */ ){ - Parse *pParse = pWalker->pParse; - sqlite3 *db = pParse->db; - struct Cte *pCte; /* Matched CTE (or NULL if no match) */ - With *pWith; /* WITH clause that pCte belongs to */ + Cte *pCte; /* Matched CTE (or NULL if no match) */ + With *pWith; /* The matching WITH */ assert( pFrom->pTab==0 ); + if( pParse->pWith==0 ){ + /* There are no WITH clauses in the stack. No match is possible */ + return 0; + } if( pParse->nErr ){ - return SQLITE_ERROR; + /* Prior errors might have left pParse->pWith in a goofy state, so + ** go no further. */ + return 0; + } + if( pFrom->zDatabase!=0 ){ + /* The FROM term contains a schema qualifier (ex: main.t1) and so + ** it cannot possibly be a CTE reference. */ + return 0; + } + if( pFrom->fg.notCte ){ + /* The FROM term is specifically excluded from matching a CTE. + ** (1) It is part of a trigger that used to have zDatabase but had + ** zDatabase removed by sqlite3FixTriggerStep(). + ** (2) This is the first term in the FROM clause of an UPDATE. + */ + return 0; } - pCte = searchWith(pParse->pWith, pFrom, &pWith); if( pCte ){ + sqlite3 *db = pParse->db; Table *pTab; ExprList *pEList; Select *pSel; Select *pLeft; /* Left-most SELECT statement */ + Select *pRecTerm; /* Left-most recursive term */ int bMayRecursive; /* True if compound joined by UNION [ALL] */ With *pSavedWith; /* Initial value of pParse->pWith */ + int iRecTab = -1; /* Cursor for recursive table */ + CteUse *pCteUse; /* If pCte->zCteErr is non-NULL at this point, then this is an illegal ** recursive reference to CTE pCte. Leave an error in pParse and return @@ -134106,63 +137257,94 @@ static int withExpand( ** In this case, proceed. */ if( pCte->zCteErr ){ sqlite3ErrorMsg(pParse, pCte->zCteErr, pCte->zName); - return SQLITE_ERROR; + return 2; } - if( cannotBeFunction(pParse, pFrom) ) return SQLITE_ERROR; + if( cannotBeFunction(pParse, pFrom) ) return 2; assert( pFrom->pTab==0 ); - pFrom->pTab = pTab = sqlite3DbMallocZero(db, sizeof(Table)); - if( pTab==0 ) return WRC_Abort; + pTab = sqlite3DbMallocZero(db, sizeof(Table)); + if( pTab==0 ) return 2; + pCteUse = pCte->pUse; + if( pCteUse==0 ){ + pCte->pUse = pCteUse = sqlite3DbMallocZero(db, sizeof(pCteUse[0])); + if( pCteUse==0 + || sqlite3ParserAddCleanup(pParse,sqlite3DbFree,pCteUse)==0 + ){ + sqlite3DbFree(db, pTab); + return 2; + } + pCteUse->eM10d = pCte->eM10d; + } + pFrom->pTab = pTab; pTab->nTabRef = 1; pTab->zName = sqlite3DbStrDup(db, pCte->zName); pTab->iPKey = -1; pTab->nRowLogEst = 200; assert( 200==sqlite3LogEst(1048576) ); pTab->tabFlags |= TF_Ephemeral | TF_NoVisibleRowid; pFrom->pSelect = sqlite3SelectDup(db, pCte->pSelect, 0); - if( db->mallocFailed ) return SQLITE_NOMEM_BKPT; + if( db->mallocFailed ) return 2; + pFrom->pSelect->selFlags |= SF_CopyCte; assert( pFrom->pSelect ); + pFrom->fg.isCte = 1; + pFrom->u2.pCteUse = pCteUse; + pCteUse->nUse++; + if( pCteUse->nUse>=2 && pCteUse->eM10d==M10d_Any ){ + pCteUse->eM10d = M10d_Yes; + } /* Check if this is a recursive CTE. */ - pSel = pFrom->pSelect; + pRecTerm = pSel = pFrom->pSelect; bMayRecursive = ( pSel->op==TK_ALL || pSel->op==TK_UNION ); - if( bMayRecursive ){ + while( bMayRecursive && pRecTerm->op==pSel->op ){ int i; - SrcList *pSrc = pFrom->pSelect->pSrc; + SrcList *pSrc = pRecTerm->pSrc; + assert( pRecTerm->pPrior!=0 ); for(i=0; inSrc; i++){ - struct SrcList_item *pItem = &pSrc->a[i]; + SrcItem *pItem = &pSrc->a[i]; if( pItem->zDatabase==0 && pItem->zName!=0 && 0==sqlite3StrICmp(pItem->zName, pCte->zName) - ){ + ){ pItem->pTab = pTab; - pItem->fg.isRecursive = 1; pTab->nTabRef++; - pSel->selFlags |= SF_Recursive; + pItem->fg.isRecursive = 1; + if( pRecTerm->selFlags & SF_Recursive ){ + sqlite3ErrorMsg(pParse, + "multiple references to recursive table: %s", pCte->zName + ); + return 2; + } + pRecTerm->selFlags |= SF_Recursive; + if( iRecTab<0 ) iRecTab = pParse->nTab++; + pItem->iCursor = iRecTab; } } + if( (pRecTerm->selFlags & SF_Recursive)==0 ) break; + pRecTerm = pRecTerm->pPrior; } - /* Only one recursive reference is permitted. */ - if( pTab->nTabRef>2 ){ - sqlite3ErrorMsg( - pParse, "multiple references to recursive table: %s", pCte->zName - ); - return SQLITE_ERROR; - } - assert( pTab->nTabRef==1 || - ((pSel->selFlags&SF_Recursive) && pTab->nTabRef==2 )); - pCte->zCteErr = "circular reference: %s"; pSavedWith = pParse->pWith; pParse->pWith = pWith; - if( bMayRecursive ){ - Select *pPrior = pSel->pPrior; - assert( pPrior->pWith==0 ); - pPrior->pWith = pSel->pWith; - sqlite3WalkSelect(pWalker, pPrior); - pPrior->pWith = 0; + if( pSel->selFlags & SF_Recursive ){ + int rc; + assert( pRecTerm!=0 ); + assert( (pRecTerm->selFlags & SF_Recursive)==0 ); + assert( pRecTerm->pNext!=0 ); + assert( (pRecTerm->pNext->selFlags & SF_Recursive)!=0 ); + assert( pRecTerm->pWith==0 ); + pRecTerm->pWith = pSel->pWith; + rc = sqlite3WalkSelect(pWalker, pRecTerm); + pRecTerm->pWith = 0; + if( rc ){ + pParse->pWith = pSavedWith; + return 2; + } }else{ - sqlite3WalkSelect(pWalker, pSel); + if( sqlite3WalkSelect(pWalker, pSel) ){ + pParse->pWith = pSavedWith; + return 2; + } } pParse->pWith = pWith; @@ -134174,7 +137356,7 @@ static int withExpand( pCte->zName, pEList->nExpr, pCte->pCols->nExpr ); pParse->pWith = pSavedWith; - return SQLITE_ERROR; + return 2; } pEList = pCte->pCols; } @@ -134190,9 +137372,9 @@ static int withExpand( } pCte->zCteErr = 0; pParse->pWith = pSavedWith; + return 1; /* Success */ } - - return SQLITE_OK; + return 0; /* No match */ } #endif @@ -134205,7 +137387,7 @@ static int withExpand( ** sqlite3SelectExpand() when walking a SELECT tree to resolve table ** names and other FROM clause elements. */ -static void selectPopWith(Walker *pWalker, Select *p){ +SQLITE_PRIVATE void sqlite3SelectPopWith(Walker *pWalker, Select *p){ Parse *pParse = pWalker->pParse; if( OK_IF_ALWAYS_TRUE(pParse->pWith) && p->pPrior==0 ){ With *pWith = findRightmost(p)->pWith; @@ -134215,8 +137397,6 @@ static void selectPopWith(Walker *pWalker, Select *p){ } } } -#else -#define selectPopWith 0 #endif /* @@ -134226,7 +137406,7 @@ static void selectPopWith(Walker *pWalker, Select *p){ ** SQLITE_OK is returned. Otherwise, if an OOM error is encountered, ** SQLITE_NOMEM. */ -SQLITE_PRIVATE int sqlite3ExpandSubquery(Parse *pParse, struct SrcList_item *pFrom){ +SQLITE_PRIVATE int sqlite3ExpandSubquery(Parse *pParse, SrcItem *pFrom){ Select *pSel = pFrom->pSelect; Table *pTab; @@ -134243,7 +137423,13 @@ SQLITE_PRIVATE int sqlite3ExpandSubquery(Parse *pParse, struct SrcList_item *pFr sqlite3ColumnsFromExprList(pParse, pSel->pEList,&pTab->nCol,&pTab->aCol); pTab->iPKey = -1; pTab->nRowLogEst = 200; assert( 200==sqlite3LogEst(1048576) ); - pTab->tabFlags |= TF_Ephemeral; +#ifndef SQLITE_ALLOW_ROWID_IN_VIEW + /* The usual case - do not allow ROWID on a subquery */ + pTab->tabFlags |= TF_Ephemeral | TF_NoVisibleRowid; +#else + pTab->tabFlags |= TF_Ephemeral; /* Legacy compatibility mode */ +#endif + return pParse->nErr ? SQLITE_ERROR : SQLITE_OK; } @@ -134274,10 +137460,10 @@ SQLITE_PRIVATE int sqlite3ExpandSubquery(Parse *pParse, struct SrcList_item *pFr */ static int selectExpander(Walker *pWalker, Select *p){ Parse *pParse = pWalker->pParse; - int i, j, k; + int i, j, k, rc; SrcList *pTabList; ExprList *pEList; - struct SrcList_item *pFrom; + SrcItem *pFrom; sqlite3 *db = pParse->db; Expr *pE, *pRight, *pExpr; u16 selFlags = p->selFlags; @@ -134297,6 +137483,15 @@ static int selectExpander(Walker *pWalker, Select *p){ } pTabList = p->pSrc; pEList = p->pEList; + if( pParse->pWith && (p->selFlags & SF_View) ){ + if( p->pWith==0 ){ + p->pWith = (With*)sqlite3DbMallocZero(db, sizeof(With)); + if( p->pWith==0 ){ + return WRC_Abort; + } + } + p->pWith->bView = 1; + } sqlite3WithPush(pParse, p->pWith, 0); /* Make sure cursor numbers have been assigned to all entries in @@ -134313,10 +137508,6 @@ static int selectExpander(Walker *pWalker, Select *p){ assert( pFrom->fg.isRecursive==0 || pFrom->pTab!=0 ); if( pFrom->pTab ) continue; assert( pFrom->fg.isRecursive==0 ); -#ifndef SQLITE_OMIT_CTE - if( withExpand(pWalker, pFrom) ) return WRC_Abort; - if( pFrom->pTab ) {} else -#endif if( pFrom->zName==0 ){ #ifndef SQLITE_OMIT_SUBQUERY Select *pSel = pFrom->pSelect; @@ -134325,6 +137516,12 @@ static int selectExpander(Walker *pWalker, Select *p){ assert( pFrom->pTab==0 ); if( sqlite3WalkSelect(pWalker, pSel) ) return WRC_Abort; if( sqlite3ExpandSubquery(pParse, pFrom) ) return WRC_Abort; +#endif +#ifndef SQLITE_OMIT_CTE + }else if( (rc = resolveFromTermToCte(pParse, pWalker, pFrom))!=0 ){ + if( rc>1 ) return WRC_Abort; + pTab = pFrom->pTab; + assert( pTab!=0 ); #endif }else{ /* An ordinary table or view name in the FROM clause */ @@ -134347,11 +137544,15 @@ static int selectExpander(Walker *pWalker, Select *p){ u8 eCodeOrig = pWalker->eCode; if( sqlite3ViewGetColumnNames(pParse, pTab) ) return WRC_Abort; assert( pFrom->pSelect==0 ); - if( pTab->pSelect && (db->flags & SQLITE_EnableView)==0 ){ + if( pTab->pSelect + && (db->flags & SQLITE_EnableView)==0 + && pTab->pSchema!=db->aDb[1].pSchema + ){ sqlite3ErrorMsg(pParse, "access to view \"%s\" prohibited", pTab->zName); } #ifndef SQLITE_OMIT_VIRTUALTABLE + assert( SQLITE_VTABRISK_Normal==1 && SQLITE_VTABRISK_High==2 ); if( IsVirtual(pTab) && pFrom->fg.fromDDL && ALWAYS(pTab->pVTable!=0) @@ -134373,7 +137574,7 @@ static int selectExpander(Walker *pWalker, Select *p){ } /* Locate the index named by the INDEXED BY clause, if any. */ - if( sqlite3IndexedByLookup(pParse, pFrom) ){ + if( pFrom->fg.isIndexedBy && sqlite3IndexedByLookup(pParse, pFrom) ){ return WRC_Abort; } } @@ -134592,7 +137793,7 @@ static void sqlite3SelectExpand(Parse *pParse, Select *pSelect){ sqlite3WalkSelect(&w, pSelect); } w.xSelectCallback = selectExpander; - w.xSelectCallback2 = selectPopWith; + w.xSelectCallback2 = sqlite3SelectPopWith; w.eCode = 0; sqlite3WalkSelect(&w, pSelect); } @@ -134616,7 +137817,7 @@ static void selectAddSubqueryTypeInfo(Walker *pWalker, Select *p){ Parse *pParse; int i; SrcList *pTabList; - struct SrcList_item *pFrom; + SrcItem *pFrom; assert( p->selFlags & SF_Resolved ); if( p->selFlags & SF_HasTypeInfo ) return; @@ -134725,8 +137926,10 @@ static void resetAccumulator(Parse *pParse, AggInfo *pAggInfo){ pFunc->iDistinct = -1; }else{ KeyInfo *pKeyInfo = sqlite3KeyInfoFromExprList(pParse, pE->x.pList,0,0); - sqlite3VdbeAddOp4(v, OP_OpenEphemeral, pFunc->iDistinct, 0, 0, - (char*)pKeyInfo, P4_KEYINFO); + pFunc->iDistAddr = sqlite3VdbeAddOp4(v, OP_OpenEphemeral, + pFunc->iDistinct, 0, 0, (char*)pKeyInfo, P4_KEYINFO); + ExplainQueryPlan((pParse, 0, "USE TEMP B-TREE FOR %s(DISTINCT)", + pFunc->pFunc->zName)); } } } @@ -134758,7 +137961,12 @@ static void finalizeAggFunctions(Parse *pParse, AggInfo *pAggInfo){ ** registers if register regAcc contains 0. The caller will take care ** of setting and clearing regAcc. */ -static void updateAccumulator(Parse *pParse, int regAcc, AggInfo *pAggInfo){ +static void updateAccumulator( + Parse *pParse, + int regAcc, + AggInfo *pAggInfo, + int eDistinctType +){ Vdbe *v = pParse->pVdbe; int i; int regHit = 0; @@ -134804,13 +138012,12 @@ static void updateAccumulator(Parse *pParse, int regAcc, AggInfo *pAggInfo){ nArg = 0; regAgg = 0; } - if( pF->iDistinct>=0 ){ + if( pF->iDistinct>=0 && pList ){ if( addrNext==0 ){ addrNext = sqlite3VdbeMakeLabel(pParse); } - testcase( nArg==0 ); /* Error condition */ - testcase( nArg>1 ); /* Also an error */ - codeDistinct(pParse, pF->iDistinct, addrNext, 1, regAgg); + pF->iDistinct = codeDistinct(pParse, eDistinctType, + pF->iDistinct, addrNext, pList, regAgg); } if( pF->pFunc->funcFlags & SQLITE_FUNC_NEEDCOLL ){ CollSeq *pColl = 0; @@ -134862,7 +138069,7 @@ static void explainSimpleCount( ){ if( pParse->explain==2 ){ int bCover = (pIdx!=0 && (HasRowid(pTab) || !IsPrimaryKeyIndex(pIdx))); - sqlite3VdbeExplain(pParse, 0, "SCAN TABLE %s%s%s", + sqlite3VdbeExplain(pParse, 0, "SCAN %s%s%s", pTab->zName, bCover ? " USING COVERING INDEX " : "", bCover ? pIdx->zName : "" @@ -134887,7 +138094,9 @@ static void explainSimpleCount( static int havingToWhereExprCb(Walker *pWalker, Expr *pExpr){ if( pExpr->op!=TK_AND ){ Select *pS = pWalker->u.pSelect; - if( sqlite3ExprIsConstantOrGroupBy(pWalker->pParse, pExpr, pS->pGroupBy) ){ + if( sqlite3ExprIsConstantOrGroupBy(pWalker->pParse, pExpr, pS->pGroupBy) + && ExprAlwaysFalse(pExpr)==0 + ){ sqlite3 *db = pWalker->pParse->db; Expr *pNew = sqlite3Expr(db, TK_INTEGER, "1"); if( pNew ){ @@ -134926,7 +138135,7 @@ static void havingToWhere(Parse *pParse, Select *p){ sWalker.u.pSelect = p; sqlite3WalkExpr(&sWalker, p->pHaving); #if SELECTTRACE_ENABLED - if( sWalker.eCode && (sqlite3_unsupported_selecttrace & 0x100)!=0 ){ + if( sWalker.eCode && (sqlite3SelectTrace & 0x100)!=0 ){ SELECTTRACE(0x100,pParse,p,("Move HAVING terms into WHERE:\n")); sqlite3TreeViewSelect(0, p, 0); } @@ -134938,11 +138147,13 @@ static void havingToWhere(Parse *pParse, Select *p){ ** If it is, then return the SrcList_item for the prior view. If it is not, ** then return 0. */ -static struct SrcList_item *isSelfJoinView( +static SrcItem *isSelfJoinView( SrcList *pTabList, /* Search for self-joins in this FROM clause */ - struct SrcList_item *pThis /* Search for prior reference to this subquery */ + SrcItem *pThis /* Search for prior reference to this subquery */ ){ - struct SrcList_item *pItem; + SrcItem *pItem; + assert( pThis->pSelect!=0 ); + if( pThis->pSelect->selFlags & SF_PushDown ) return 0; for(pItem = pTabList->a; pItempSelect==0 ) continue; @@ -134958,9 +138169,7 @@ static struct SrcList_item *isSelfJoinView( ** names in the same FROM clause. */ continue; } - if( sqlite3ExprCompare(0, pThis->pSelect->pWhere, pS1->pWhere, -1) - || sqlite3ExprCompare(0, pThis->pSelect->pHaving, pS1->pHaving, -1) - ){ + if( pItem->pSelect->selFlags & SF_PushDown ){ /* The view was modified by some other optimization such as ** pushDownWhereTerms() */ continue; @@ -134970,6 +138179,15 @@ static struct SrcList_item *isSelfJoinView( return 0; } +/* +** Deallocate a single AggInfo object +*/ +static void agginfoFree(sqlite3 *db, AggInfo *p){ + sqlite3DbFree(db, p->aCol); + sqlite3DbFree(db, p->aFunc); + sqlite3DbFreeNN(db, p); +} + #ifdef SQLITE_COUNTOFVIEW_OPTIMIZATION /* ** Attempt to transform a query of the form @@ -135048,7 +138266,7 @@ static int countOfViewOptimization(Parse *pParse, Select *p){ p->selFlags &= ~SF_Aggregate; #if SELECTTRACE_ENABLED - if( sqlite3_unsupported_selecttrace & 0x400 ){ + if( sqlite3SelectTrace & 0x400 ){ SELECTTRACE(0x400,pParse,p,("After count-of-view optimization:\n")); sqlite3TreeViewSelect(0, p, 0); } @@ -135101,7 +138319,7 @@ SQLITE_PRIVATE int sqlite3Select( if( sqlite3AuthCheck(pParse, SQLITE_SELECT, 0, 0, 0) ) return 1; #if SELECTTRACE_ENABLED SELECTTRACE(1,pParse,p, ("begin processing:\n", pParse->addrExplain)); - if( sqlite3_unsupported_selecttrace & 0x100 ){ + if( sqlite3SelectTrace & 0x100 ){ sqlite3TreeViewSelect(0, p, 0); } #endif @@ -135110,15 +138328,24 @@ SQLITE_PRIVATE int sqlite3Select( assert( p->pOrderBy==0 || pDest->eDest!=SRT_Fifo ); assert( p->pOrderBy==0 || pDest->eDest!=SRT_DistQueue ); assert( p->pOrderBy==0 || pDest->eDest!=SRT_Queue ); - if( IgnorableOrderby(pDest) ){ - assert(pDest->eDest==SRT_Exists || pDest->eDest==SRT_Union || - pDest->eDest==SRT_Except || pDest->eDest==SRT_Discard || - pDest->eDest==SRT_Queue || pDest->eDest==SRT_DistFifo || - pDest->eDest==SRT_DistQueue || pDest->eDest==SRT_Fifo); - /* If ORDER BY makes no difference in the output then neither does - ** DISTINCT so it can be removed too. */ - sqlite3ExprListDelete(db, p->pOrderBy); - p->pOrderBy = 0; + if( IgnorableDistinct(pDest) ){ + assert(pDest->eDest==SRT_Exists || pDest->eDest==SRT_Union || + pDest->eDest==SRT_Except || pDest->eDest==SRT_Discard || + pDest->eDest==SRT_DistQueue || pDest->eDest==SRT_DistFifo ); + /* All of these destinations are also able to ignore the ORDER BY clause */ + if( p->pOrderBy ){ +#if SELECTTRACE_ENABLED + SELECTTRACE(1,pParse,p, ("dropping superfluous ORDER BY:\n")); + if( sqlite3SelectTrace & 0x100 ){ + sqlite3TreeViewExprList(0, p->pOrderBy, 0, "ORDERBY"); + } +#endif + sqlite3ParserAddCleanup(pParse, + (void(*)(sqlite3*,void*))sqlite3ExprListDelete, + p->pOrderBy); + testcase( pParse->earlyCleanup ); + p->pOrderBy = 0; + } p->selFlags &= ~SF_Distinct; p->selFlags |= SF_NoopOrderBy; } @@ -135128,7 +138355,7 @@ SQLITE_PRIVATE int sqlite3Select( } assert( p->pEList!=0 ); #if SELECTTRACE_ENABLED - if( sqlite3_unsupported_selecttrace & 0x104 ){ + if( sqlite3SelectTrace & 0x104 ){ SELECTTRACE(0x104,pParse,p, ("after name resolution:\n")); sqlite3TreeViewSelect(0, p, 0); } @@ -135139,9 +138366,9 @@ SQLITE_PRIVATE int sqlite3Select( ** In this case, it is an error if the target object (pSrc->a[0]) name ** or alias is duplicated within FROM clause (pSrc->a[1..n]). */ if( p->selFlags & SF_UpdateFrom ){ - struct SrcList_item *p0 = &p->pSrc->a[0]; + SrcItem *p0 = &p->pSrc->a[0]; for(i=1; ipSrc->nSrc; i++){ - struct SrcList_item *p1 = &p->pSrc->a[i]; + SrcItem *p1 = &p->pSrc->a[i]; if( p0->pTab==p1->pTab && 0==sqlite3_stricmp(p0->zAlias, p1->zAlias) ){ sqlite3ErrorMsg(pParse, "target object/alias may not appear in FROM clause: %s", @@ -135153,17 +138380,16 @@ SQLITE_PRIVATE int sqlite3Select( } if( pDest->eDest==SRT_Output ){ - generateColumnNames(pParse, p); + sqlite3GenerateColumnNames(pParse, p); } #ifndef SQLITE_OMIT_WINDOWFUNC - rc = sqlite3WindowRewrite(pParse, p); - if( rc ){ + if( sqlite3WindowRewrite(pParse, p) ){ assert( db->mallocFailed || pParse->nErr>0 ); goto select_end; } #if SELECTTRACE_ENABLED - if( p->pWin && (sqlite3_unsupported_selecttrace & 0x108)!=0 ){ + if( p->pWin && (sqlite3SelectTrace & 0x108)!=0 ){ SELECTTRACE(0x104,pParse,p, ("after window rewrite:\n")); sqlite3TreeViewSelect(0, p, 0); } @@ -135179,7 +138405,7 @@ SQLITE_PRIVATE int sqlite3Select( */ #if !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) for(i=0; !p->pPrior && inSrc; i++){ - struct SrcList_item *pItem = &pTabList->a[i]; + SrcItem *pItem = &pTabList->a[i]; Select *pSub = pItem->pSelect; Table *pTab = pItem->pTab; @@ -135270,7 +138496,7 @@ SQLITE_PRIVATE int sqlite3Select( rc = multiSelect(pParse, p, pDest); #if SELECTTRACE_ENABLED SELECTTRACE(0x1,pParse,p,("end compound-select processing\n")); - if( (sqlite3_unsupported_selecttrace & 0x2000)!=0 && ExplainQueryPlanParent(pParse)==0 ){ + if( (sqlite3SelectTrace & 0x2000)!=0 && ExplainQueryPlanParent(pParse)==0 ){ sqlite3TreeViewSelect(0, p, 0); } #endif @@ -135284,12 +138510,13 @@ SQLITE_PRIVATE int sqlite3Select( ** as the equivalent optimization will be handled by query planner in ** sqlite3WhereBegin(). */ - if( pTabList->nSrc>1 + if( p->pWhere!=0 + && p->pWhere->op==TK_AND && OptimizationEnabled(db, SQLITE_PropagateConst) && propagateConstants(pParse, p) ){ #if SELECTTRACE_ENABLED - if( sqlite3_unsupported_selecttrace & 0x100 ){ + if( sqlite3SelectTrace & 0x100 ){ SELECTTRACE(0x100,pParse,p,("After constant propagation:\n")); sqlite3TreeViewSelect(0, p, 0); } @@ -135313,7 +138540,8 @@ SQLITE_PRIVATE int sqlite3Select( ** (2) Generate code for all sub-queries */ for(i=0; inSrc; i++){ - struct SrcList_item *pItem = &pTabList->a[i]; + SrcItem *pItem = &pTabList->a[i]; + SrcItem *pPrior; SelectDest dest; Select *pSub; #if !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) @@ -135346,19 +138574,8 @@ SQLITE_PRIVATE int sqlite3Select( pSub = pItem->pSelect; if( pSub==0 ) continue; - /* The code for a subquery should only be generated once, though it is - ** technically harmless for it to be generated multiple times. The - ** following assert() will detect if something changes to cause - ** the same subquery to be coded multiple times, as a signal to the - ** developers to try to optimize the situation. - ** - ** Update 2019-07-24: - ** See ticket https://sqlite.org/src/tktview/c52b09c7f38903b1311cec40. - ** The dbsqlfuzz fuzzer found a case where the same subquery gets - ** coded twice. So this assert() now becomes a testcase(). It should - ** be very rare, though. - */ - testcase( pItem->addrFillSub!=0 ); + /* The code for a subquery should only be generated once. */ + assert( pItem->addrFillSub==0 ); /* Increment Parse.nHeight by the height of the largest expression ** tree referred to by this, the parent select. The child select @@ -135373,16 +138590,18 @@ SQLITE_PRIVATE int sqlite3Select( ** inside the subquery. This can help the subquery to run more efficiently. */ if( OptimizationEnabled(db, SQLITE_PushDown) + && (pItem->fg.isCte==0 || pItem->u2.pCteUse->eM10d!=M10d_Yes) && pushDownWhereTerms(pParse, pSub, p->pWhere, pItem->iCursor, (pItem->fg.jointype & JT_OUTER)!=0) ){ #if SELECTTRACE_ENABLED - if( sqlite3_unsupported_selecttrace & 0x100 ){ + if( sqlite3SelectTrace & 0x100 ){ SELECTTRACE(0x100,pParse,p, ("After WHERE-clause push-down into subquery %d:\n", pSub->selId)); sqlite3TreeViewSelect(0, p, 0); } #endif + assert( pItem->pSelect && (pItem->pSelect->selFlags & SF_PushDown)!=0 ); }else{ SELECTTRACE(0x100,pParse,p,("Push-down not possible\n")); } @@ -135392,16 +138611,18 @@ SQLITE_PRIVATE int sqlite3Select( /* Generate code to implement the subquery ** - ** The subquery is implemented as a co-routine if the subquery is - ** guaranteed to be the outer loop (so that it does not need to be - ** computed more than once) + ** The subquery is implemented as a co-routine if: + ** (1) the subquery is guaranteed to be the outer loop (so that + ** it does not need to be computed more than once), and + ** (2) the subquery is not a CTE that should be materialized ** - ** TODO: Are there other reasons beside (1) to use a co-routine + ** TODO: Are there other reasons beside (1) and (2) to use a co-routine ** implementation? */ if( i==0 && (pTabList->nSrc==1 || (pTabList->a[1].fg.jointype&(JT_LEFT|JT_CROSS))!=0) /* (1) */ + && (pItem->fg.isCte==0 || pItem->u2.pCteUse->eM10d!=M10d_Yes) /* (2) */ ){ /* Implement a co-routine that will return a single row of the result ** set on each invocation. @@ -135410,10 +138631,10 @@ SQLITE_PRIVATE int sqlite3Select( pItem->regReturn = ++pParse->nMem; sqlite3VdbeAddOp3(v, OP_InitCoroutine, pItem->regReturn, 0, addrTop); - VdbeComment((v, "%s", pItem->pTab->zName)); + VdbeComment((v, "%!S", pItem)); pItem->addrFillSub = addrTop; sqlite3SelectDestInit(&dest, SRT_Coroutine, pItem->regReturn); - ExplainQueryPlan((pParse, 1, "CO-ROUTINE %u", pSub->selId)); + ExplainQueryPlan((pParse, 1, "CO-ROUTINE %!S", pItem)); sqlite3Select(pParse, pSub, &dest); pItem->pTab->nRowLogEst = pSub->nSelectRow; pItem->fg.viaCoroutine = 1; @@ -135421,18 +138642,33 @@ SQLITE_PRIVATE int sqlite3Select( sqlite3VdbeEndCoroutine(v, pItem->regReturn); sqlite3VdbeJumpHere(v, addrTop-1); sqlite3ClearTempRegCache(pParse); - }else{ - /* Generate a subroutine that will fill an ephemeral table with - ** the content of this subquery. pItem->addrFillSub will point - ** to the address of the generated subroutine. pItem->regReturn - ** is a register allocated to hold the subroutine return address - */ + }else if( pItem->fg.isCte && pItem->u2.pCteUse->addrM9e>0 ){ + /* This is a CTE for which materialization code has already been + ** generated. Invoke the subroutine to compute the materialization, + ** the make the pItem->iCursor be a copy of the ephemerial table that + ** holds the result of the materialization. */ + CteUse *pCteUse = pItem->u2.pCteUse; + sqlite3VdbeAddOp2(v, OP_Gosub, pCteUse->regRtn, pCteUse->addrM9e); + if( pItem->iCursor!=pCteUse->iCur ){ + sqlite3VdbeAddOp2(v, OP_OpenDup, pItem->iCursor, pCteUse->iCur); + } + pSub->nSelectRow = pCteUse->nRowEst; + }else if( (pPrior = isSelfJoinView(pTabList, pItem))!=0 ){ + /* This view has already been materialized by a prior entry in + ** this same FROM clause. Reuse it. */ + if( pPrior->addrFillSub ){ + sqlite3VdbeAddOp2(v, OP_Gosub, pPrior->regReturn, pPrior->addrFillSub); + } + sqlite3VdbeAddOp2(v, OP_OpenDup, pItem->iCursor, pPrior->iCursor); + pSub->nSelectRow = pPrior->pSelect->nSelectRow; + }else{ + /* Materialize the view. If the view is not correlated, generate a + ** subroutine to do the materialization so that subsequent uses of + ** the same view can reuse the materialization. */ int topAddr; int onceAddr = 0; int retAddr; - struct SrcList_item *pPrior; - testcase( pItem->addrFillSub==0 ); /* Ticket c52b09c7f38903b1311 */ pItem->regReturn = ++pParse->nMem; topAddr = sqlite3VdbeAddOp2(v, OP_Integer, 0, pItem->regReturn); pItem->addrFillSub = topAddr+1; @@ -135441,26 +138677,26 @@ SQLITE_PRIVATE int sqlite3Select( ** a trigger, then we only need to compute the value of the subquery ** once. */ onceAddr = sqlite3VdbeAddOp0(v, OP_Once); VdbeCoverage(v); - VdbeComment((v, "materialize \"%s\"", pItem->pTab->zName)); + VdbeComment((v, "materialize %!S", pItem)); }else{ - VdbeNoopComment((v, "materialize \"%s\"", pItem->pTab->zName)); - } - pPrior = isSelfJoinView(pTabList, pItem); - if( pPrior ){ - sqlite3VdbeAddOp2(v, OP_OpenDup, pItem->iCursor, pPrior->iCursor); - assert( pPrior->pSelect!=0 ); - pSub->nSelectRow = pPrior->pSelect->nSelectRow; - }else{ - sqlite3SelectDestInit(&dest, SRT_EphemTab, pItem->iCursor); - ExplainQueryPlan((pParse, 1, "MATERIALIZE %u", pSub->selId)); - sqlite3Select(pParse, pSub, &dest); + VdbeNoopComment((v, "materialize %!S", pItem)); } + sqlite3SelectDestInit(&dest, SRT_EphemTab, pItem->iCursor); + ExplainQueryPlan((pParse, 1, "MATERIALIZE %!S", pItem)); + sqlite3Select(pParse, pSub, &dest); pItem->pTab->nRowLogEst = pSub->nSelectRow; if( onceAddr ) sqlite3VdbeJumpHere(v, onceAddr); retAddr = sqlite3VdbeAddOp1(v, OP_Return, pItem->regReturn); - VdbeComment((v, "end %s", pItem->pTab->zName)); + VdbeComment((v, "end %!S", pItem)); sqlite3VdbeChangeP1(v, topAddr, retAddr); sqlite3ClearTempRegCache(pParse); + if( pItem->fg.isCte && pItem->fg.isCorrelated==0 ){ + CteUse *pCteUse = pItem->u2.pCteUse; + pCteUse->addrM9e = pItem->addrFillSub; + pCteUse->regRtn = pItem->regReturn; + pCteUse->iCur = pItem->iCursor; + pCteUse->nRowEst = pSub->nSelectRow; + } } if( db->mallocFailed ) goto select_end; pParse->nHeight -= sqlite3SelectExprHeight(p); @@ -135477,7 +138713,7 @@ SQLITE_PRIVATE int sqlite3Select( sDistinct.isTnct = (p->selFlags & SF_Distinct)!=0; #if SELECTTRACE_ENABLED - if( sqlite3_unsupported_selecttrace & 0x400 ){ + if( sqlite3SelectTrace & 0x400 ){ SELECTTRACE(0x400,pParse,p,("After all FROM-clause analysis:\n")); sqlite3TreeViewSelect(0, p, 0); } @@ -135513,7 +138749,7 @@ SQLITE_PRIVATE int sqlite3Select( assert( sDistinct.isTnct ); #if SELECTTRACE_ENABLED - if( sqlite3_unsupported_selecttrace & 0x400 ){ + if( sqlite3SelectTrace & 0x400 ){ SELECTTRACE(0x400,pParse,p,("Transform DISTINCT into GROUP BY:\n")); sqlite3TreeViewSelect(0, p, 0); } @@ -135605,6 +138841,7 @@ SQLITE_PRIVATE int sqlite3Select( sSort.pOrderBy = 0; } } + SELECTTRACE(1,pParse,p,("WhereBegin returns\n")); /* If sorting index that was created by a prior OP_OpenEphemeral ** instruction ended up not being needed, then change the OP_OpenEphemeral @@ -135643,6 +138880,7 @@ SQLITE_PRIVATE int sqlite3Select( /* End the database scan loop. */ + SELECTTRACE(1,pParse,p,("WhereEnd\n")); sqlite3WhereEnd(pWInfo); } }else{ @@ -135713,11 +138951,14 @@ SQLITE_PRIVATE int sqlite3Select( ** SELECT statement. */ pAggInfo = sqlite3DbMallocZero(db, sizeof(*pAggInfo) ); - if( pAggInfo==0 ){ + if( pAggInfo ){ + sqlite3ParserAddCleanup(pParse, + (void(*)(sqlite3*,void*))agginfoFree, pAggInfo); + testcase( pParse->earlyCleanup ); + } + if( db->mallocFailed ){ goto select_end; } - pAggInfo->pNext = pParse->pAggList; - pParse->pAggList = pAggInfo; pAggInfo->selId = p->selId; memset(&sNC, 0, sizeof(sNC)); sNC.pParse = pParse; @@ -135761,10 +139002,14 @@ SQLITE_PRIVATE int sqlite3Select( pAggInfo->mxReg = pParse->nMem; if( db->mallocFailed ) goto select_end; #if SELECTTRACE_ENABLED - if( sqlite3_unsupported_selecttrace & 0x400 ){ + if( sqlite3SelectTrace & 0x400 ){ int ii; SELECTTRACE(0x400,pParse,p,("After aggregate analysis %p:\n", pAggInfo)); sqlite3TreeViewSelect(0, p, 0); + if( minMaxFlag ){ + sqlite3DebugPrintf("MIN/MAX Optimization (0x%02x) adds:\n", minMaxFlag); + sqlite3TreeViewExprList(0, pMinMaxOrderBy, 0, "ORDERBY"); + } for(ii=0; iinColumn; ii++){ sqlite3DebugPrintf("agg-column[%d] iMem=%d\n", ii, pAggInfo->aCol[ii].iMem); @@ -135792,6 +139037,20 @@ SQLITE_PRIVATE int sqlite3Select( int addrSortingIdx; /* The OP_OpenEphemeral for the sorting index */ int addrReset; /* Subroutine for resetting the accumulator */ int regReset; /* Return address register for reset subroutine */ + ExprList *pDistinct = 0; + u16 distFlag = 0; + int eDist = WHERE_DISTINCT_NOOP; + + if( pAggInfo->nFunc==1 + && pAggInfo->aFunc[0].iDistinct>=0 + && pAggInfo->aFunc[0].pFExpr->x.pList + ){ + Expr *pExpr = pAggInfo->aFunc[0].pFExpr->x.pList->a[0].pExpr; + pExpr = sqlite3ExprDup(db, pExpr, 0); + pDistinct = sqlite3ExprListDup(db, pGroupBy, 0); + pDistinct = sqlite3ExprListAppend(pParse, pDistinct, pExpr); + distFlag = pDistinct ? (WHERE_WANT_DISTINCT|WHERE_AGG_DISTINCT) : 0; + } /* If there is a GROUP BY clause we might need a sorting index to ** implement it. Allocate that sorting index now. If it turns out @@ -135828,10 +139087,15 @@ SQLITE_PRIVATE int sqlite3Select( */ sqlite3VdbeAddOp2(v, OP_Gosub, regReset, addrReset); SELECTTRACE(1,pParse,p,("WhereBegin\n")); - pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, pGroupBy, 0, - WHERE_GROUPBY | (orderByGrp ? WHERE_SORTBYGROUP : 0), 0 + pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, pGroupBy, pDistinct, + WHERE_GROUPBY | (orderByGrp ? WHERE_SORTBYGROUP : 0) | distFlag, 0 ); - if( pWInfo==0 ) goto select_end; + if( pWInfo==0 ){ + sqlite3ExprListDelete(db, pDistinct); + goto select_end; + } + eDist = sqlite3WhereIsDistinct(pWInfo); + SELECTTRACE(1,pParse,p,("WhereBegin returns\n")); if( sqlite3WhereIsOrdered(pWInfo)==pGroupBy->nExpr ){ /* The optimizer is able to deliver rows in group by order so ** we do not have to sort. The OP_OpenEphemeral table will be @@ -135880,6 +139144,7 @@ SQLITE_PRIVATE int sqlite3Select( sqlite3VdbeAddOp2(v, OP_SorterInsert, pAggInfo->sortingIdx, regRecord); sqlite3ReleaseTempReg(pParse, regRecord); sqlite3ReleaseTempRange(pParse, regBase, nCol); + SELECTTRACE(1,pParse,p,("WhereEnd\n")); sqlite3WhereEnd(pWInfo); pAggInfo->sortingIdxPTab = sortPTab = pParse->nTab++; sortOut = sqlite3GetTempReg(pParse); @@ -135947,19 +139212,21 @@ SQLITE_PRIVATE int sqlite3Select( ** the current row */ sqlite3VdbeJumpHere(v, addr1); - updateAccumulator(pParse, iUseFlag, pAggInfo); + updateAccumulator(pParse, iUseFlag, pAggInfo, eDist); sqlite3VdbeAddOp2(v, OP_Integer, 1, iUseFlag); VdbeComment((v, "indicate data in accumulator")); /* End of the loop */ if( groupBySort ){ - sqlite3VdbeAddOp2(v, OP_SorterNext, pAggInfo->sortingIdx, addrTopOfLoop); + sqlite3VdbeAddOp2(v, OP_SorterNext, pAggInfo->sortingIdx,addrTopOfLoop); VdbeCoverage(v); }else{ + SELECTTRACE(1,pParse,p,("WhereEnd\n")); sqlite3WhereEnd(pWInfo); sqlite3VdbeChangeToNoop(v, addrSortingIdx); } + sqlite3ExprListDelete(db, pDistinct); /* Output the final row of result */ @@ -136003,6 +139270,10 @@ SQLITE_PRIVATE int sqlite3Select( VdbeComment((v, "indicate accumulator empty")); sqlite3VdbeAddOp1(v, OP_Return, regReset); + if( eDist!=WHERE_DISTINCT_NOOP ){ + struct AggInfo_func *pF = &pAggInfo->aFunc[0]; + fixDistinctOpenEph(pParse, eDist, pF->iDistinct, pF->iDistAddr); + } } /* endif pGroupBy. Begin aggregate queries without GROUP BY: */ else { Table *pTab; @@ -136066,7 +139337,9 @@ SQLITE_PRIVATE int sqlite3Select( explainSimpleCount(pParse, pTab, pBest); }else{ int regAcc = 0; /* "populate accumulators" flag */ - int addrSkip; + ExprList *pDistinct = 0; + u16 distFlag = 0; + int eDist; /* If there are accumulator registers but no min() or max() functions ** without FILTER clauses, allocate register regAcc. Register regAcc @@ -136090,6 +139363,9 @@ SQLITE_PRIVATE int sqlite3Select( regAcc = ++pParse->nMem; sqlite3VdbeAddOp2(v, OP_Integer, 0, regAcc); } + }else if( pAggInfo->nFunc==1 && pAggInfo->aFunc[0].iDistinct>=0 ){ + pDistinct = pAggInfo->aFunc[0].pFExpr->x.pList; + distFlag = pDistinct ? (WHERE_WANT_DISTINCT|WHERE_AGG_DISTINCT) : 0; } /* This case runs if the aggregate has no GROUP BY clause. The @@ -136109,16 +139385,23 @@ SQLITE_PRIVATE int sqlite3Select( SELECTTRACE(1,pParse,p,("WhereBegin\n")); pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, pMinMaxOrderBy, - 0, minMaxFlag, 0); + pDistinct, minMaxFlag|distFlag, 0); if( pWInfo==0 ){ goto select_end; } - updateAccumulator(pParse, regAcc, pAggInfo); + SELECTTRACE(1,pParse,p,("WhereBegin returns\n")); + eDist = sqlite3WhereIsDistinct(pWInfo); + updateAccumulator(pParse, regAcc, pAggInfo, eDist); + if( eDist!=WHERE_DISTINCT_NOOP ){ + struct AggInfo_func *pF = &pAggInfo->aFunc[0]; + fixDistinctOpenEph(pParse, eDist, pF->iDistinct, pF->iDistAddr); + } + if( regAcc ) sqlite3VdbeAddOp2(v, OP_Integer, 1, regAcc); - addrSkip = sqlite3WhereOrderByLimitOptLabel(pWInfo); - if( addrSkip!=sqlite3WhereContinueLabel(pWInfo) ){ - sqlite3VdbeGoto(v, addrSkip); + if( minMaxFlag ){ + sqlite3WhereMinMaxOptEarlyOut(v, pWInfo); } + SELECTTRACE(1,pParse,p,("WhereEnd\n")); sqlite3WhereEnd(pWInfo); finalizeAggFunctions(pParse, pAggInfo); } @@ -136158,20 +139441,20 @@ SQLITE_PRIVATE int sqlite3Select( ** successful coding of the SELECT. */ select_end: + assert( db->mallocFailed==0 || db->mallocFailed==1 ); + pParse->nErr += db->mallocFailed; sqlite3ExprListDelete(db, pMinMaxOrderBy); #ifdef SQLITE_DEBUG if( pAggInfo && !db->mallocFailed ){ for(i=0; inColumn; i++){ Expr *pExpr = pAggInfo->aCol[i].pCExpr; - assert( pExpr!=0 || db->mallocFailed ); - if( pExpr==0 ) continue; + assert( pExpr!=0 ); assert( pExpr->pAggInfo==pAggInfo ); assert( pExpr->iAgg==i ); } for(i=0; inFunc; i++){ Expr *pExpr = pAggInfo->aFunc[i].pFExpr; - assert( pExpr!=0 || db->mallocFailed ); - if( pExpr==0 ) continue; + assert( pExpr!=0 ); assert( pExpr->pAggInfo==pAggInfo ); assert( pExpr->iAgg==i ); } @@ -136180,7 +139463,7 @@ select_end: #if SELECTTRACE_ENABLED SELECTTRACE(0x1,pParse,p,("end processing\n")); - if( (sqlite3_unsupported_selecttrace & 0x2000)!=0 && ExplainQueryPlanParent(pParse)==0 ){ + if( (sqlite3SelectTrace & 0x2000)!=0 && ExplainQueryPlanParent(pParse)==0 ){ sqlite3TreeViewSelect(0, p, 0); } #endif @@ -136441,28 +139724,51 @@ SQLITE_PRIVATE void sqlite3DeleteTriggerStep(sqlite3 *db, TriggerStep *pTriggerS ** pTab as well as the triggers lised in pTab->pTrigger. */ SQLITE_PRIVATE Trigger *sqlite3TriggerList(Parse *pParse, Table *pTab){ - Schema * const pTmpSchema = pParse->db->aDb[1].pSchema; - Trigger *pList = 0; /* List of triggers to return */ + Schema *pTmpSchema; /* Schema of the pTab table */ + Trigger *pList; /* List of triggers to return */ + HashElem *p; /* Loop variable for TEMP triggers */ if( pParse->disableTriggers ){ return 0; } - - if( pTmpSchema!=pTab->pSchema ){ - HashElem *p; - assert( sqlite3SchemaMutexHeld(pParse->db, 0, pTmpSchema) ); - for(p=sqliteHashFirst(&pTmpSchema->trigHash); p; p=sqliteHashNext(p)){ - Trigger *pTrig = (Trigger *)sqliteHashData(p); - if( pTrig->pTabSchema==pTab->pSchema - && 0==sqlite3StrICmp(pTrig->table, pTab->zName) - ){ - pTrig->pNext = (pList ? pList : pTab->pTrigger); - pList = pTrig; - } + pTmpSchema = pParse->db->aDb[1].pSchema; + p = sqliteHashFirst(&pTmpSchema->trigHash); + pList = pTab->pTrigger; + while( p ){ + Trigger *pTrig = (Trigger *)sqliteHashData(p); + if( pTrig->pTabSchema==pTab->pSchema + && pTrig->table + && 0==sqlite3StrICmp(pTrig->table, pTab->zName) + && pTrig->pTabSchema!=pTmpSchema + ){ + pTrig->pNext = pList; + pList = pTrig; + }else if( pTrig->op==TK_RETURNING +#ifndef SQLITE_OMIT_VIRTUALTABLE + && pParse->db->pVtabCtx==0 +#endif + ){ + assert( pParse->bReturning ); + assert( &(pParse->u1.pReturning->retTrig) == pTrig ); + pTrig->table = pTab->zName; + pTrig->pTabSchema = pTab->pSchema; + pTrig->pNext = pList; + pList = pTrig; } + p = sqliteHashNext(p); } - - return (pList ? pList : pTab->pTrigger); +#if 0 + if( pList ){ + Trigger *pX; + printf("Triggers for %s:", pTab->zName); + for(pX=pList; pX; pX=pX->pNext){ + printf(" %s", pX->zName); + } + printf("\n"); + fflush(stdout); + } +#endif + return pList; } /* @@ -136550,22 +139856,11 @@ SQLITE_PRIVATE void sqlite3BeginTrigger( pTab = sqlite3SrcListLookup(pParse, pTableName); if( !pTab ){ /* The table does not exist. */ - if( db->init.iDb==1 ){ - /* Ticket #3810. - ** Normally, whenever a table is dropped, all associated triggers are - ** dropped too. But if a TEMP trigger is created on a non-TEMP table - ** and the table is dropped by a different database connection, the - ** trigger is not visible to the database connection that does the - ** drop so the trigger cannot be dropped. This results in an - ** "orphaned trigger" - a trigger whose associated table is missing. - */ - db->init.orphanTrigger = 1; - } - goto trigger_cleanup; + goto trigger_orphan_error; } if( IsVirtual(pTab) ){ sqlite3ErrorMsg(pParse, "cannot create triggers on virtual tables"); - goto trigger_cleanup; + goto trigger_orphan_error; } /* Check that the trigger name is not reserved and that no trigger of the @@ -136602,13 +139897,13 @@ SQLITE_PRIVATE void sqlite3BeginTrigger( */ if( pTab->pSelect && tr_tm!=TK_INSTEAD ){ sqlite3ErrorMsg(pParse, "cannot create %s trigger on view: %S", - (tr_tm == TK_BEFORE)?"BEFORE":"AFTER", pTableName, 0); - goto trigger_cleanup; + (tr_tm == TK_BEFORE)?"BEFORE":"AFTER", pTableName->a); + goto trigger_orphan_error; } if( !pTab->pSelect && tr_tm==TK_INSTEAD ){ sqlite3ErrorMsg(pParse, "cannot create INSTEAD OF" - " trigger on table: %S", pTableName, 0); - goto trigger_cleanup; + " trigger on table: %S", pTableName->a); + goto trigger_orphan_error; } #ifndef SQLITE_OMIT_AUTHORIZATION @@ -136668,6 +139963,23 @@ trigger_cleanup: }else{ assert( pParse->pNewTrigger==pTrigger ); } + return; + +trigger_orphan_error: + if( db->init.iDb==1 ){ + /* Ticket #3810. + ** Normally, whenever a table is dropped, all associated triggers are + ** dropped too. But if a TEMP trigger is created on a non-TEMP table + ** and the table is dropped by a different database connection, the + ** trigger is not visible to the database connection that does the + ** drop so the trigger cannot be dropped. This results in an + ** "orphaned trigger" - a trigger whose associated table is missing. + ** + ** 2020-11-05 see also https://sqlite.org/forum/forumpost/157dc791df + */ + db->init.orphanTrigger = 1; + } + goto trigger_cleanup; } /* @@ -136732,7 +140044,7 @@ SQLITE_PRIVATE void sqlite3FinishTrigger( sqlite3DbFree(db, z); sqlite3ChangeCookie(pParse, iDb); sqlite3VdbeAddParseSchemaOp(v, iDb, - sqlite3MPrintf(db, "type='trigger' AND name='%q'", zName)); + sqlite3MPrintf(db, "type='trigger' AND name='%q'", zName), 0); } if( db->init.busy ){ @@ -136945,7 +140257,7 @@ SQLITE_PRIVATE TriggerStep *sqlite3TriggerDeleteStep( ** Recursively delete a Trigger structure */ SQLITE_PRIVATE void sqlite3DeleteTrigger(sqlite3 *db, Trigger *pTrigger){ - if( pTrigger==0 ) return; + if( pTrigger==0 || pTrigger->bReturning ) return; sqlite3DeleteTriggerStep(db, pTrigger->step_list); sqlite3DbFree(db, pTrigger->zName); sqlite3DbFree(db, pTrigger->table); @@ -136987,7 +140299,7 @@ SQLITE_PRIVATE void sqlite3DropTrigger(Parse *pParse, SrcList *pName, int noErr) } if( !pTrigger ){ if( !noErr ){ - sqlite3ErrorMsg(pParse, "no such trigger: %S", pName, 0); + sqlite3ErrorMsg(pParse, "no such trigger: %S", pName->a); }else{ sqlite3CodeVerifyNamedSchema(pParse, zDb); } @@ -137110,15 +140422,53 @@ SQLITE_PRIVATE Trigger *sqlite3TriggersExist( Trigger *pList = 0; Trigger *p; - if( (pParse->db->flags & SQLITE_EnableTrigger)!=0 ){ - pList = sqlite3TriggerList(pParse, pTab); - } - assert( pList==0 || IsVirtual(pTab)==0 ); - for(p=pList; p; p=p->pNext){ - if( p->op==op && checkColumnOverlap(p->pColumns, pChanges) ){ - mask |= p->tr_tm; + pList = sqlite3TriggerList(pParse, pTab); + assert( pList==0 || IsVirtual(pTab)==0 + || (pList->bReturning && pList->pNext==0) ); + if( pList!=0 ){ + p = pList; + if( (pParse->db->flags & SQLITE_EnableTrigger)==0 + && pTab->pTrigger!=0 + ){ + /* The SQLITE_DBCONFIG_ENABLE_TRIGGER setting is off. That means that + ** only TEMP triggers are allowed. Truncate the pList so that it + ** includes only TEMP triggers */ + if( pList==pTab->pTrigger ){ + pList = 0; + goto exit_triggers_exist; + } + while( ALWAYS(p->pNext) && p->pNext!=pTab->pTrigger ) p = p->pNext; + p->pNext = 0; + p = pList; } + do{ + if( p->op==op && checkColumnOverlap(p->pColumns, pChanges) ){ + mask |= p->tr_tm; + }else if( p->op==TK_RETURNING ){ + /* The first time a RETURNING trigger is seen, the "op" value tells + ** us what time of trigger it should be. */ + assert( sqlite3IsToplevel(pParse) ); + p->op = op; + if( IsVirtual(pTab) ){ + if( op!=TK_INSERT ){ + sqlite3ErrorMsg(pParse, + "%s RETURNING is not available on virtual tables", + op==TK_DELETE ? "DELETE" : "UPDATE"); + } + p->tr_tm = TRIGGER_BEFORE; + }else{ + p->tr_tm = TRIGGER_AFTER; + } + mask |= p->tr_tm; + }else if( p->bReturning && p->op==TK_INSERT && op==TK_UPDATE + && sqlite3IsToplevel(pParse) ){ + /* Also fire a RETURNING trigger for an UPSERT */ + mask |= p->tr_tm; + } + p = p->pNext; + }while( p ); } +exit_triggers_exist: if( pMask ){ *pMask = mask; } @@ -137161,6 +140511,137 @@ SQLITE_PRIVATE SrcList *sqlite3TriggerStepSrc( return pSrc; } +/* +** Return true if the pExpr term from the RETURNING clause argument +** list is of the form "*". Raise an error if the terms if of the +** form "table.*". +*/ +static int isAsteriskTerm( + Parse *pParse, /* Parsing context */ + Expr *pTerm /* A term in the RETURNING clause */ +){ + assert( pTerm!=0 ); + if( pTerm->op==TK_ASTERISK ) return 1; + if( pTerm->op!=TK_DOT ) return 0; + assert( pTerm->pRight!=0 ); + assert( pTerm->pLeft!=0 ); + if( pTerm->pRight->op!=TK_ASTERISK ) return 0; + sqlite3ErrorMsg(pParse, "RETURNING may not use \"TABLE.*\" wildcards"); + return 1; +} + +/* The input list pList is the list of result set terms from a RETURNING +** clause. The table that we are returning from is pTab. +** +** This routine makes a copy of the pList, and at the same time expands +** any "*" wildcards to be the complete set of columns from pTab. +*/ +static ExprList *sqlite3ExpandReturning( + Parse *pParse, /* Parsing context */ + ExprList *pList, /* The arguments to RETURNING */ + Table *pTab /* The table being updated */ +){ + ExprList *pNew = 0; + sqlite3 *db = pParse->db; + int i; + + for(i=0; inExpr; i++){ + Expr *pOldExpr = pList->a[i].pExpr; + if( NEVER(pOldExpr==0) ) continue; + if( isAsteriskTerm(pParse, pOldExpr) ){ + int jj; + for(jj=0; jjnCol; jj++){ + Expr *pNewExpr; + if( IsHiddenColumn(pTab->aCol+jj) ) continue; + pNewExpr = sqlite3Expr(db, TK_ID, pTab->aCol[jj].zName); + pNew = sqlite3ExprListAppend(pParse, pNew, pNewExpr); + if( !db->mallocFailed ){ + struct ExprList_item *pItem = &pNew->a[pNew->nExpr-1]; + pItem->zEName = sqlite3DbStrDup(db, pTab->aCol[jj].zName); + pItem->eEName = ENAME_NAME; + } + } + }else{ + Expr *pNewExpr = sqlite3ExprDup(db, pOldExpr, 0); + pNew = sqlite3ExprListAppend(pParse, pNew, pNewExpr); + if( !db->mallocFailed && ALWAYS(pList->a[i].zEName!=0) ){ + struct ExprList_item *pItem = &pNew->a[pNew->nExpr-1]; + pItem->zEName = sqlite3DbStrDup(db, pList->a[i].zEName); + pItem->eEName = pList->a[i].eEName; + } + } + } + return pNew; +} + +/* +** Generate code for the RETURNING trigger. Unlike other triggers +** that invoke a subprogram in the bytecode, the code for RETURNING +** is generated in-line. +*/ +static void codeReturningTrigger( + Parse *pParse, /* Parse context */ + Trigger *pTrigger, /* The trigger step that defines the RETURNING */ + Table *pTab, /* The table to code triggers from */ + int regIn /* The first in an array of registers */ +){ + Vdbe *v = pParse->pVdbe; + sqlite3 *db = pParse->db; + ExprList *pNew; + Returning *pReturning; + Select sSelect; + SrcList sFrom; + + assert( v!=0 ); + assert( pParse->bReturning ); + pReturning = pParse->u1.pReturning; + assert( pTrigger == &(pReturning->retTrig) ); + memset(&sSelect, 0, sizeof(sSelect)); + memset(&sFrom, 0, sizeof(sFrom)); + sSelect.pEList = sqlite3ExprListDup(db, pReturning->pReturnEL, 0); + sSelect.pSrc = &sFrom; + sFrom.nSrc = 1; + sFrom.a[0].pTab = pTab; + sqlite3SelectPrep(pParse, &sSelect, 0); + if( db->mallocFailed==0 && pParse->nErr==0 ){ + sqlite3GenerateColumnNames(pParse, &sSelect); + } + sqlite3ExprListDelete(db, sSelect.pEList); + pNew = sqlite3ExpandReturning(pParse, pReturning->pReturnEL, pTab); + if( pNew ){ + NameContext sNC; + memset(&sNC, 0, sizeof(sNC)); + if( pReturning->nRetCol==0 ){ + pReturning->nRetCol = pNew->nExpr; + pReturning->iRetCur = pParse->nTab++; + } + sNC.pParse = pParse; + sNC.uNC.iBaseReg = regIn; + sNC.ncFlags = NC_UBaseReg; + pParse->eTriggerOp = pTrigger->op; + pParse->pTriggerTab = pTab; + if( sqlite3ResolveExprListNames(&sNC, pNew)==SQLITE_OK ){ + int i; + int nCol = pNew->nExpr; + int reg = pParse->nMem+1; + pParse->nMem += nCol+2; + pReturning->iRetReg = reg; + for(i=0; ia[i].pExpr; + sqlite3ExprCodeFactorable(pParse, pCol, reg+i); + } + sqlite3VdbeAddOp3(v, OP_MakeRecord, reg, i, reg+i); + sqlite3VdbeAddOp2(v, OP_NewRowid, pReturning->iRetCur, reg+i+1); + sqlite3VdbeAddOp3(v, OP_Insert, pReturning->iRetCur, reg+i, reg+i+1); + } + sqlite3ExprListDelete(db, pNew); + pParse->eTriggerOp = 0; + pParse->pTriggerTab = 0; + } +} + + + /* ** Generate VDBE code for the statements inside the body of a single ** trigger. @@ -137210,6 +140691,7 @@ static int codeTriggerProgram( sqlite3ExprDup(db, pStep->pWhere, 0), pParse->eOrconf, 0, 0, 0 ); + sqlite3VdbeAddOp0(v, OP_ResetCount); break; } case TK_INSERT: { @@ -137220,6 +140702,7 @@ static int codeTriggerProgram( pParse->eOrconf, sqlite3UpsertDup(db, pStep->pUpsert) ); + sqlite3VdbeAddOp0(v, OP_ResetCount); break; } case TK_DELETE: { @@ -137227,6 +140710,7 @@ static int codeTriggerProgram( sqlite3TriggerStepSrc(pParse, pStep), sqlite3ExprDup(db, pStep->pWhere, 0), 0, 0 ); + sqlite3VdbeAddOp0(v, OP_ResetCount); break; } default: assert( pStep->op==TK_SELECT ); { @@ -137238,9 +140722,6 @@ static int codeTriggerProgram( break; } } - if( pStep->op!=TK_SELECT ){ - sqlite3VdbeAddOp0(v, OP_ResetCount); - } } return 0; @@ -137356,8 +140837,8 @@ static TriggerPrg *codeRowTrigger( ** OP_Halt inserted at the end of the program. */ if( pTrigger->pWhen ){ pWhen = sqlite3ExprDup(db, pTrigger->pWhen, 0); - if( SQLITE_OK==sqlite3ResolveExprNames(&sNC, pWhen) - && db->mallocFailed==0 + if( db->mallocFailed==0 + && SQLITE_OK==sqlite3ResolveExprNames(&sNC, pWhen) ){ iEndTrigger = sqlite3VdbeMakeLabel(pSubParse); sqlite3ExprIfFalse(pSubParse, pWhen, iEndTrigger, SQLITE_JUMPIFNULL); @@ -137387,7 +140868,6 @@ static TriggerPrg *codeRowTrigger( sqlite3VdbeDelete(v); } - assert( !pSubParse->pAinc && !pSubParse->pZombieTab ); assert( !pSubParse->pTriggerPrg && !pSubParse->nMaxArg ); sqlite3ParserReset(pSubParse); sqlite3StackFree(db, pSubParse); @@ -137489,7 +140969,7 @@ SQLITE_PRIVATE void sqlite3CodeRowTriggerDirect( ** ... ... ** reg+N OLD.* value of right-most column of pTab ** reg+N+1 NEW.rowid -** reg+N+2 OLD.* value of left-most column of pTab +** reg+N+2 NEW.* value of left-most column of pTab ** ... ... ** reg+N+N+1 NEW.* value of right-most column of pTab ** @@ -137534,12 +141014,20 @@ SQLITE_PRIVATE void sqlite3CodeRowTrigger( assert( p->pSchema==p->pTabSchema || p->pSchema==pParse->db->aDb[1].pSchema ); - /* Determine whether we should code this trigger */ - if( p->op==op + /* Determine whether we should code this trigger. One of two choices: + ** 1. The trigger is an exact match to the current DML statement + ** 2. This is a RETURNING trigger for INSERT but we are currently + ** doing the UPDATE part of an UPSERT. + */ + if( (p->op==op || (p->bReturning && p->op==TK_INSERT && op==TK_UPDATE)) && p->tr_tm==tr_tm && checkColumnOverlap(p->pColumns, pChanges) ){ - sqlite3CodeRowTriggerDirect(pParse, p, pTab, reg, orconf, ignoreJump); + if( !p->bReturning ){ + sqlite3CodeRowTriggerDirect(pParse, p, pTab, reg, orconf, ignoreJump); + }else if( sqlite3IsToplevel(pParse) ){ + codeReturningTrigger(pParse, p, pTab, reg); + } } } } @@ -137584,13 +141072,18 @@ SQLITE_PRIVATE u32 sqlite3TriggerColmask( assert( isNew==1 || isNew==0 ); for(p=pTrigger; p; p=p->pNext){ - if( p->op==op && (tr_tm&p->tr_tm) + if( p->op==op + && (tr_tm&p->tr_tm) && checkColumnOverlap(p->pColumns,pChanges) ){ - TriggerPrg *pPrg; - pPrg = getRowTrigger(pParse, p, pTab, orconf); - if( pPrg ){ - mask |= pPrg->aColmask[isNew]; + if( p->bReturning ){ + mask = 0xffffffff; + }else{ + TriggerPrg *pPrg; + pPrg = getRowTrigger(pParse, p, pTab, orconf); + if( pPrg ){ + mask |= pPrg->aColmask[isNew]; + } } } } @@ -137824,6 +141317,7 @@ static void updateFromSelect( assert( pTabList->nSrc>1 ); if( pSrc ){ + pSrc->a[0].fg.notCte = 1; pSrc->a[0].iCursor = -1; pSrc->a[0].pTab->nTabRef--; pSrc->a[0].pTab = 0; @@ -137838,7 +141332,7 @@ static void updateFromSelect( #endif pList = sqlite3ExprListAppend(pParse, pList, pNew); } - eDest = SRT_Upfrom; + eDest = IsVirtual(pTab) ? SRT_Table : SRT_Upfrom; }else if( pTab->pSelect ){ for(i=0; inCol; i++){ pList = sqlite3ExprListAppend(pParse, pList, exprRowColumn(pParse, i)); @@ -137853,7 +141347,8 @@ static void updateFromSelect( } #endif } - if( ALWAYS(pChanges) ){ + assert( pChanges!=0 || pParse->db->mallocFailed ); + if( pChanges ){ for(i=0; inExpr; i++){ pList = sqlite3ExprListAppend(pParse, pList, sqlite3ExprDup(db, pChanges->a[i].pExpr, 0) @@ -138247,6 +141742,7 @@ SQLITE_PRIVATE void sqlite3Update( if( (db->flags&SQLITE_CountRows)!=0 && !pParse->pTriggerTab && !pParse->nested + && !pParse->bReturning && pUpsert==0 ){ regRowCount = ++pParse->nMem; @@ -138255,6 +141751,8 @@ SQLITE_PRIVATE void sqlite3Update( if( nChangeFrom==0 && HasRowid(pTab) ){ sqlite3VdbeAddOp3(v, OP_Null, 0, regRowSet, regOldRowid); + iEph = pParse->nTab++; + addrOpen = sqlite3VdbeAddOp3(v, OP_OpenEphemeral, iEph, 0, regRowSet); }else{ assert( pPk!=0 || HasRowid(pTab) ); nPk = pPk ? pPk->nKeyCol : 0; @@ -138309,7 +141807,7 @@ SQLITE_PRIVATE void sqlite3Update( ** be deleted as a result of REPLACE conflict handling. Any of these ** things might disturb a cursor being used to scan through the table ** or index, causing a single-pass approach to malfunction. */ - flags = WHERE_ONEPASS_DESIRED|WHERE_SEEK_UNIQ_TABLE; + flags = WHERE_ONEPASS_DESIRED; if( !pParse->nested && !pTrigger && !hasFK && !chngKey && !bReplace ){ flags |= WHERE_ONEPASS_MULTIROW; } @@ -138346,9 +141844,10 @@ SQLITE_PRIVATE void sqlite3Update( ** leave it in register regOldRowid. */ sqlite3VdbeAddOp2(v, OP_Rowid, iDataCur, regOldRowid); if( eOnePass==ONEPASS_OFF ){ - /* We need to use regRowSet, so reallocate aRegIdx[nAllIdx] */ aRegIdx[nAllIdx] = ++pParse->nMem; - sqlite3VdbeAddOp2(v, OP_RowSetAdd, regRowSet, regOldRowid); + sqlite3VdbeAddOp3(v, OP_Insert, iEph, regRowSet, regOldRowid); + }else{ + if( ALWAYS(addrOpen) ) sqlite3VdbeChangeToNoop(v, addrOpen); } }else{ /* Read the PK of the current row into an array of registers. In @@ -138399,7 +141898,12 @@ SQLITE_PRIVATE void sqlite3Update( /* Top of the update loop */ if( eOnePass!=ONEPASS_OFF ){ - if( !isView && aiCurOnePass[0]!=iDataCur && aiCurOnePass[1]!=iDataCur ){ + if( aiCurOnePass[0]!=iDataCur + && aiCurOnePass[1]!=iDataCur +#ifdef SQLITE_ALLOW_ROWID_IN_VIEW + && !isView +#endif + ){ assert( pPk ); sqlite3VdbeAddOp4Int(v, OP_NotFound, iDataCur, labelBreak, regKey,nKey); VdbeCoverage(v); @@ -138436,8 +141940,9 @@ SQLITE_PRIVATE void sqlite3Update( VdbeCoverage(v); } }else{ - labelContinue = sqlite3VdbeAddOp3(v, OP_RowSetRead, regRowSet,labelBreak, - regOldRowid); + sqlite3VdbeAddOp2(v, OP_Rewind, iEph, labelBreak); VdbeCoverage(v); + labelContinue = sqlite3VdbeMakeLabel(pParse); + addrTop = sqlite3VdbeAddOp2(v, OP_Rowid, iEph, regOldRowid); VdbeCoverage(v); sqlite3VdbeAddOp3(v, OP_NotExists, iDataCur, labelContinue, regOldRowid); VdbeCoverage(v); @@ -138687,11 +142192,9 @@ SQLITE_PRIVATE void sqlite3Update( }else if( eOnePass==ONEPASS_MULTI ){ sqlite3VdbeResolveLabel(v, labelContinue); sqlite3WhereEnd(pWInfo); - }else if( pPk || nChangeFrom ){ + }else{ sqlite3VdbeResolveLabel(v, labelContinue); sqlite3VdbeAddOp2(v, OP_Next, iEph, addrTop); VdbeCoverage(v); - }else{ - sqlite3VdbeGoto(v, labelContinue); } sqlite3VdbeResolveLabel(v, labelBreak); @@ -138708,7 +142211,7 @@ SQLITE_PRIVATE void sqlite3Update( ** that information. */ if( regRowCount ){ - sqlite3VdbeAddOp2(v, OP_ResultRow, regRowCount, 1); + sqlite3VdbeAddOp2(v, OP_ChngCntRow, regRowCount, 1); sqlite3VdbeSetNumCols(v, 1); sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "rows updated", SQLITE_STATIC); } @@ -138791,12 +142294,26 @@ static void updateVirtualTable( regArg = pParse->nMem + 1; pParse->nMem += nArg; if( pSrc->nSrc>1 ){ + Index *pPk = 0; Expr *pRow; ExprList *pList; - if( pRowid ){ - pRow = sqlite3ExprDup(db, pRowid, 0); + if( HasRowid(pTab) ){ + if( pRowid ){ + pRow = sqlite3ExprDup(db, pRowid, 0); + }else{ + pRow = sqlite3PExpr(pParse, TK_ROW, 0, 0); + } }else{ - pRow = sqlite3PExpr(pParse, TK_ROW, 0, 0); + i16 iPk; /* PRIMARY KEY column */ + pPk = sqlite3PrimaryKeyIndex(pTab); + assert( pPk!=0 ); + assert( pPk->nKeyCol==1 ); + iPk = pPk->aiColumn[0]; + if( aXRef[iPk]>=0 ){ + pRow = sqlite3ExprDup(db, pChanges->a[aXRef[iPk]].pExpr, 0); + }else{ + pRow = exprRowColumn(pParse, iPk); + } } pList = sqlite3ExprListAppend(pParse, 0, pRow); @@ -138810,7 +142327,7 @@ static void updateVirtualTable( } } - updateFromSelect(pParse, ephemTab, 0, pList, pSrc, pWhere, 0, 0); + updateFromSelect(pParse, ephemTab, pPk, pList, pSrc, pWhere, 0, 0); sqlite3ExprListDelete(db, pList); eOnePass = ONEPASS_OFF; }else{ @@ -138929,16 +142446,23 @@ static void updateVirtualTable( /* ** Free a list of Upsert objects */ -SQLITE_PRIVATE void sqlite3UpsertDelete(sqlite3 *db, Upsert *p){ - if( p ){ +static void SQLITE_NOINLINE upsertDelete(sqlite3 *db, Upsert *p){ + do{ + Upsert *pNext = p->pNextUpsert; sqlite3ExprListDelete(db, p->pUpsertTarget); sqlite3ExprDelete(db, p->pUpsertTargetWhere); sqlite3ExprListDelete(db, p->pUpsertSet); sqlite3ExprDelete(db, p->pUpsertWhere); + sqlite3DbFree(db, p->pToFree); sqlite3DbFree(db, p); - } + p = pNext; + }while( p ); +} +SQLITE_PRIVATE void sqlite3UpsertDelete(sqlite3 *db, Upsert *p){ + if( p ) upsertDelete(db, p); } + /* ** Duplicate an Upsert object. */ @@ -138948,7 +142472,8 @@ SQLITE_PRIVATE Upsert *sqlite3UpsertDup(sqlite3 *db, Upsert *p){ sqlite3ExprListDup(db, p->pUpsertTarget, 0), sqlite3ExprDup(db, p->pUpsertTargetWhere, 0), sqlite3ExprListDup(db, p->pUpsertSet, 0), - sqlite3ExprDup(db, p->pUpsertWhere, 0) + sqlite3ExprDup(db, p->pUpsertWhere, 0), + sqlite3UpsertDup(db, p->pNextUpsert) ); } @@ -138960,22 +142485,25 @@ SQLITE_PRIVATE Upsert *sqlite3UpsertNew( ExprList *pTarget, /* Target argument to ON CONFLICT, or NULL */ Expr *pTargetWhere, /* Optional WHERE clause on the target */ ExprList *pSet, /* UPDATE columns, or NULL for a DO NOTHING */ - Expr *pWhere /* WHERE clause for the ON CONFLICT UPDATE */ + Expr *pWhere, /* WHERE clause for the ON CONFLICT UPDATE */ + Upsert *pNext /* Next ON CONFLICT clause in the list */ ){ Upsert *pNew; - pNew = sqlite3DbMallocRaw(db, sizeof(Upsert)); + pNew = sqlite3DbMallocZero(db, sizeof(Upsert)); if( pNew==0 ){ sqlite3ExprListDelete(db, pTarget); sqlite3ExprDelete(db, pTargetWhere); sqlite3ExprListDelete(db, pSet); sqlite3ExprDelete(db, pWhere); + sqlite3UpsertDelete(db, pNext); return 0; }else{ pNew->pUpsertTarget = pTarget; pNew->pUpsertTargetWhere = pTargetWhere; pNew->pUpsertSet = pSet; pNew->pUpsertWhere = pWhere; - pNew->pUpsertIdx = 0; + pNew->isDoUpdate = pSet!=0; + pNew->pNextUpsert = pNext; } return pNew; } @@ -139000,6 +142528,7 @@ SQLITE_PRIVATE int sqlite3UpsertAnalyzeTarget( Expr *pTerm; /* One term of the conflict-target clause */ NameContext sNC; /* Context for resolving symbolic names */ Expr sCol[2]; /* Index column converted into an Expr */ + int nClause = 0; /* Counter of ON CONFLICT clauses */ assert( pTabList->nSrc==1 ); assert( pTabList->a[0].pTab!=0 ); @@ -139013,87 +142542,131 @@ SQLITE_PRIVATE int sqlite3UpsertAnalyzeTarget( memset(&sNC, 0, sizeof(sNC)); sNC.pParse = pParse; sNC.pSrcList = pTabList; - rc = sqlite3ResolveExprListNames(&sNC, pUpsert->pUpsertTarget); - if( rc ) return rc; - rc = sqlite3ResolveExprNames(&sNC, pUpsert->pUpsertTargetWhere); - if( rc ) return rc; + for(; pUpsert && pUpsert->pUpsertTarget; + pUpsert=pUpsert->pNextUpsert, nClause++){ + rc = sqlite3ResolveExprListNames(&sNC, pUpsert->pUpsertTarget); + if( rc ) return rc; + rc = sqlite3ResolveExprNames(&sNC, pUpsert->pUpsertTargetWhere); + if( rc ) return rc; - /* Check to see if the conflict target matches the rowid. */ - pTab = pTabList->a[0].pTab; - pTarget = pUpsert->pUpsertTarget; - iCursor = pTabList->a[0].iCursor; - if( HasRowid(pTab) - && pTarget->nExpr==1 - && (pTerm = pTarget->a[0].pExpr)->op==TK_COLUMN - && pTerm->iColumn==XN_ROWID - ){ - /* The conflict-target is the rowid of the primary table */ - assert( pUpsert->pUpsertIdx==0 ); - return SQLITE_OK; - } + /* Check to see if the conflict target matches the rowid. */ + pTab = pTabList->a[0].pTab; + pTarget = pUpsert->pUpsertTarget; + iCursor = pTabList->a[0].iCursor; + if( HasRowid(pTab) + && pTarget->nExpr==1 + && (pTerm = pTarget->a[0].pExpr)->op==TK_COLUMN + && pTerm->iColumn==XN_ROWID + ){ + /* The conflict-target is the rowid of the primary table */ + assert( pUpsert->pUpsertIdx==0 ); + continue; + } - /* Initialize sCol[0..1] to be an expression parse tree for a - ** single column of an index. The sCol[0] node will be the TK_COLLATE - ** operator and sCol[1] will be the TK_COLUMN operator. Code below - ** will populate the specific collation and column number values - ** prior to comparing against the conflict-target expression. - */ - memset(sCol, 0, sizeof(sCol)); - sCol[0].op = TK_COLLATE; - sCol[0].pLeft = &sCol[1]; - sCol[1].op = TK_COLUMN; - sCol[1].iTable = pTabList->a[0].iCursor; + /* Initialize sCol[0..1] to be an expression parse tree for a + ** single column of an index. The sCol[0] node will be the TK_COLLATE + ** operator and sCol[1] will be the TK_COLUMN operator. Code below + ** will populate the specific collation and column number values + ** prior to comparing against the conflict-target expression. + */ + memset(sCol, 0, sizeof(sCol)); + sCol[0].op = TK_COLLATE; + sCol[0].pLeft = &sCol[1]; + sCol[1].op = TK_COLUMN; + sCol[1].iTable = pTabList->a[0].iCursor; - /* Check for matches against other indexes */ - for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ - int ii, jj, nn; - if( !IsUniqueIndex(pIdx) ) continue; - if( pTarget->nExpr!=pIdx->nKeyCol ) continue; - if( pIdx->pPartIdxWhere ){ - if( pUpsert->pUpsertTargetWhere==0 ) continue; - if( sqlite3ExprCompare(pParse, pUpsert->pUpsertTargetWhere, - pIdx->pPartIdxWhere, iCursor)!=0 ){ - continue; + /* Check for matches against other indexes */ + for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ + int ii, jj, nn; + if( !IsUniqueIndex(pIdx) ) continue; + if( pTarget->nExpr!=pIdx->nKeyCol ) continue; + if( pIdx->pPartIdxWhere ){ + if( pUpsert->pUpsertTargetWhere==0 ) continue; + if( sqlite3ExprCompare(pParse, pUpsert->pUpsertTargetWhere, + pIdx->pPartIdxWhere, iCursor)!=0 ){ + continue; + } } - } - nn = pIdx->nKeyCol; - for(ii=0; iiazColl[ii]; - if( pIdx->aiColumn[ii]==XN_EXPR ){ - assert( pIdx->aColExpr!=0 ); - assert( pIdx->aColExpr->nExpr>ii ); - pExpr = pIdx->aColExpr->a[ii].pExpr; - if( pExpr->op!=TK_COLLATE ){ - sCol[0].pLeft = pExpr; + nn = pIdx->nKeyCol; + for(ii=0; iiazColl[ii]; + if( pIdx->aiColumn[ii]==XN_EXPR ){ + assert( pIdx->aColExpr!=0 ); + assert( pIdx->aColExpr->nExpr>ii ); + pExpr = pIdx->aColExpr->a[ii].pExpr; + if( pExpr->op!=TK_COLLATE ){ + sCol[0].pLeft = pExpr; + pExpr = &sCol[0]; + } + }else{ + sCol[0].pLeft = &sCol[1]; + sCol[1].iColumn = pIdx->aiColumn[ii]; pExpr = &sCol[0]; } - }else{ - sCol[0].pLeft = &sCol[1]; - sCol[1].iColumn = pIdx->aiColumn[ii]; - pExpr = &sCol[0]; - } - for(jj=0; jja[jj].pExpr, pExpr,iCursor)<2 ){ - break; /* Column ii of the index matches column jj of target */ + for(jj=0; jja[jj].pExpr,pExpr,iCursor)<2 ){ + break; /* Column ii of the index matches column jj of target */ + } + } + if( jj>=nn ){ + /* The target contains no match for column jj of the index */ + break; } } - if( jj>=nn ){ - /* The target contains no match for column jj of the index */ - break; + if( iipUpsertIdx = pIdx; + break; } - if( iipUpsertIdx==0 ){ + char zWhich[16]; + if( nClause==0 && pUpsert->pNextUpsert==0 ){ + zWhich[0] = 0; + }else{ + sqlite3_snprintf(sizeof(zWhich),zWhich,"%r ", nClause+1); + } + sqlite3ErrorMsg(pParse, "%sON CONFLICT clause does not match any " + "PRIMARY KEY or UNIQUE constraint", zWhich); + return SQLITE_ERROR; } - pUpsert->pUpsertIdx = pIdx; - return SQLITE_OK; } - sqlite3ErrorMsg(pParse, "ON CONFLICT clause does not match any " - "PRIMARY KEY or UNIQUE constraint"); - return SQLITE_ERROR; + return SQLITE_OK; +} + +/* +** Return true if pUpsert is the last ON CONFLICT clause with a +** conflict target, or if pUpsert is followed by another ON CONFLICT +** clause that targets the INTEGER PRIMARY KEY. +*/ +SQLITE_PRIVATE int sqlite3UpsertNextIsIPK(Upsert *pUpsert){ + Upsert *pNext; + if( NEVER(pUpsert==0) ) return 0; + pNext = pUpsert->pNextUpsert; + if( pNext==0 ) return 1; + if( pNext->pUpsertTarget==0 ) return 1; + if( pNext->pUpsertIdx==0 ) return 1; + return 0; +} + +/* +** Given the list of ON CONFLICT clauses described by pUpsert, and +** a particular index pIdx, return a pointer to the particular ON CONFLICT +** clause that applies to the index. Or, if the index is not subject to +** any ON CONFLICT clause, return NULL. +*/ +SQLITE_PRIVATE Upsert *sqlite3UpsertOfIndex(Upsert *pUpsert, Index *pIdx){ + while( + pUpsert + && pUpsert->pUpsertTarget!=0 + && pUpsert->pUpsertIdx!=pIdx + ){ + pUpsert = pUpsert->pNextUpsert; + } + return pUpsert; } /* @@ -139117,11 +142690,13 @@ SQLITE_PRIVATE void sqlite3UpsertDoUpdate( SrcList *pSrc; /* FROM clause for the UPDATE */ int iDataCur; int i; + Upsert *pTop = pUpsert; assert( v!=0 ); assert( pUpsert!=0 ); - VdbeNoopComment((v, "Begin DO UPDATE of UPSERT")); iDataCur = pUpsert->iDataCur; + pUpsert = sqlite3UpsertOfIndex(pTop, pIdx); + VdbeNoopComment((v, "Begin DO UPDATE of UPSERT")); if( pIdx && iCur!=iDataCur ){ if( HasRowid(pTab) ){ int regRowid = sqlite3GetTempReg(pParse); @@ -139151,19 +142726,17 @@ SQLITE_PRIVATE void sqlite3UpsertDoUpdate( sqlite3VdbeJumpHere(v, i); } } - /* pUpsert does not own pUpsertSrc - the outer INSERT statement does. So - ** we have to make a copy before passing it down into sqlite3Update() */ - pSrc = sqlite3SrcListDup(db, pUpsert->pUpsertSrc, 0); + /* pUpsert does not own pTop->pUpsertSrc - the outer INSERT statement does. + ** So we have to make a copy before passing it down into sqlite3Update() */ + pSrc = sqlite3SrcListDup(db, pTop->pUpsertSrc, 0); /* excluded.* columns of type REAL need to be converted to a hard real */ for(i=0; inCol; i++){ if( pTab->aCol[i].affinity==SQLITE_AFF_REAL ){ - sqlite3VdbeAddOp1(v, OP_RealAffinity, pUpsert->regData+i); + sqlite3VdbeAddOp1(v, OP_RealAffinity, pTop->regData+i); } } - sqlite3Update(pParse, pSrc, pUpsert->pUpsertSet, - pUpsert->pUpsertWhere, OE_Abort, 0, 0, pUpsert); - pUpsert->pUpsertSet = 0; /* Will have been deleted by sqlite3Update() */ - pUpsert->pUpsertWhere = 0; /* Will have been deleted by sqlite3Update() */ + sqlite3Update(pParse, pSrc, sqlite3ExprListDup(db,pUpsert->pUpsertSet,0), + sqlite3ExprDup(db,pUpsert->pUpsertWhere,0), OE_Abort, 0, 0, pUpsert); VdbeNoopComment((v, "End DO UPDATE of UPSERT")); } @@ -139512,8 +143085,8 @@ SQLITE_PRIVATE SQLITE_NOINLINE int sqlite3RunVacuum( BTREE_APPLICATION_ID, 0, /* Preserve the application id */ }; - assert( 1==sqlite3BtreeIsInTrans(pTemp) ); - assert( pOut!=0 || 1==sqlite3BtreeIsInTrans(pMain) ); + assert( SQLITE_TXN_WRITE==sqlite3BtreeTxnState(pTemp) ); + assert( pOut!=0 || SQLITE_TXN_WRITE==sqlite3BtreeTxnState(pMain) ); /* Copy Btree meta values */ for(i=0; izName, zStmt); - sqlite3VdbeAddParseSchemaOp(v, iDb, zWhere); + sqlite3VdbeAddParseSchemaOp(v, iDb, zWhere, 0); sqlite3DbFree(db, zStmt); iReg = ++pParse->nMem; @@ -140240,6 +143813,7 @@ static int vtabCallConstructor( zType[i-1] = '\0'; } pTab->aCol[iCol].colFlags |= COLFLAG_HIDDEN; + pTab->tabFlags |= TF_HasHidden; oooHidden = TF_OOOHidden; }else{ pTab->tabFlags |= oooHidden; @@ -140408,7 +143982,7 @@ SQLITE_API int sqlite3_declare_vtab(sqlite3 *db, const char *zCreateTable){ Table *pNew = sParse.pNewTable; Index *pIdx; pTab->aCol = pNew->aCol; - pTab->nCol = pNew->nCol; + pTab->nNVCol = pTab->nCol = pNew->nCol; pTab->tabFlags |= pNew->tabFlags & (TF_WithoutRowid|TF_NoVisibleRowid); pNew->nCol = 0; pNew->aCol = 0; @@ -140800,6 +144374,7 @@ SQLITE_PRIVATE int sqlite3VtabEponymousTableInit(Parse *pParse, Module *pMod){ pTab->pSchema = db->aDb[0].pSchema; assert( pTab->nModuleArg==0 ); pTab->iPKey = -1; + pTab->tabFlags |= TF_Eponymous; addModuleArgument(pParse, pTab, sqlite3DbStrDup(db, pTab->zName)); addModuleArgument(pParse, pTab, 0); addModuleArgument(pParse, pTab, sqlite3DbStrDup(db, pTab->zName)); @@ -140940,19 +144515,6 @@ SQLITE_API int sqlite3_vtab_config(sqlite3 *db, int op, ...){ #ifndef SQLITE_WHEREINT_H #define SQLITE_WHEREINT_H -/* -** Trace output macros -*/ -#if defined(SQLITE_TEST) || defined(SQLITE_DEBUG) -/***/ extern int sqlite3WhereTrace; -#endif -#if defined(SQLITE_DEBUG) \ - && (defined(SQLITE_TEST) || defined(SQLITE_ENABLE_WHERETRACE)) -# define WHERETRACE(K,X) if(sqlite3WhereTrace&(K)) sqlite3DebugPrintf X -# define WHERETRACE_ENABLED 1 -#else -# define WHERETRACE(K,X) -#endif /* Forward references */ @@ -141184,9 +144746,11 @@ struct WhereTerm { u8 eMatchOp; /* Op for vtab MATCH/LIKE/GLOB/REGEXP terms */ int iParent; /* Disable pWC->a[iParent] when this term disabled */ int leftCursor; /* Cursor number of X in "X " */ - int iField; /* Field in (?,?,?) IN (SELECT...) vector */ union { - int leftColumn; /* Column number of X in "X " */ + struct { + int leftColumn; /* Column number of X in "X " */ + int iField; /* Field in (?,?,?) IN (SELECT...) vector */ + } x; /* Opcode other than OP_OR or OP_AND */ WhereOrInfo *pOrInfo; /* Extra information if (eOperator & WO_OR)!=0 */ WhereAndInfo *pAndInfo; /* Extra information if (eOperator& WO_AND)!=0 */ } u; @@ -141204,11 +144768,7 @@ struct WhereTerm { #define TERM_ORINFO 0x0010 /* Need to free the WhereTerm.u.pOrInfo object */ #define TERM_ANDINFO 0x0020 /* Need to free the WhereTerm.u.pAndInfo obj */ #define TERM_OR_OK 0x0040 /* Used during OR-clause processing */ -#ifdef SQLITE_ENABLE_STAT4 -# define TERM_VNULL 0x0080 /* Manufactured x>NULL or x<=NULL term */ -#else -# define TERM_VNULL 0x0000 /* Disabled if not using stat4 */ -#endif +#define TERM_VNULL 0x0080 /* Manufactured x>NULL or x<=NULL term */ #define TERM_LIKEOPT 0x0100 /* Virtual terms from the LIKE optimization */ #define TERM_LIKECOND 0x0200 /* Conditionally this LIKE operator term */ #define TERM_LIKE 0x0400 /* The original LIKE operator */ @@ -141231,8 +144791,8 @@ struct WhereScan { const char *zCollName; /* Required collating sequence, if not NULL */ Expr *pIdxExpr; /* Search for this index expression */ char idxaff; /* Must match this affinity, if zCollName!=NULL */ - unsigned char nEquiv; /* Number of entries in aEquiv[] */ - unsigned char iEquiv; /* Next unused slot in aEquiv[] */ + unsigned char nEquiv; /* Number of entries in aiCur[] and aiColumn[] */ + unsigned char iEquiv; /* Next unused slot in aiCur[] and aiColumn[] */ u32 opMask; /* Acceptable operators */ int k; /* Resume scanning at this->pWC->a[this->k] */ int aiCur[11]; /* Cursors in the equivalence class */ @@ -141411,6 +144971,7 @@ struct WhereInfo { unsigned sorted :1; /* True if really sorted (not just grouped) */ LogEst nRowOut; /* Estimated number of output rows */ int iTop; /* The very beginning of the WHERE loop */ + int iEndWhere; /* End of the WHERE clause itself */ WhereLoop *pLoops; /* List of all WhereLoop objects */ WhereExprMod *pExprMods; /* Expression modifications */ Bitmask revMask; /* Mask of ORDER BY terms that need reversing */ @@ -141477,7 +145038,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereExprUsage(WhereMaskSet*, Expr*); SQLITE_PRIVATE Bitmask sqlite3WhereExprUsageNN(WhereMaskSet*, Expr*); SQLITE_PRIVATE Bitmask sqlite3WhereExprListUsage(WhereMaskSet*, ExprList*); SQLITE_PRIVATE void sqlite3WhereExprAnalyze(SrcList*, WhereClause*); -SQLITE_PRIVATE void sqlite3WhereTabFuncArgs(Parse*, struct SrcList_item*, WhereClause*); +SQLITE_PRIVATE void sqlite3WhereTabFuncArgs(Parse*, SrcItem*, WhereClause*); @@ -141539,6 +145100,8 @@ SQLITE_PRIVATE void sqlite3WhereTabFuncArgs(Parse*, struct SrcList_item*, WhereC #define WHERE_PARTIALIDX 0x00020000 /* The automatic index is partial */ #define WHERE_IN_EARLYOUT 0x00040000 /* Perhaps quit IN loops early */ #define WHERE_BIGNULL_SORT 0x00080000 /* Column nEq of index is BIGNULL */ +#define WHERE_IN_SEEKSCAN 0x00100000 /* Seek-scan optimization for IN */ +#define WHERE_TRANSCONS 0x00200000 /* Uses a transitive constraint */ #endif /* !defined(SQLITE_WHEREINT_H) */ @@ -141654,7 +145217,7 @@ SQLITE_PRIVATE int sqlite3WhereExplainOneScan( if( sqlite3ParseToplevel(pParse)->explain==2 ) #endif { - struct SrcList_item *pItem = &pTabList->a[pLevel->iFrom]; + SrcItem *pItem = &pTabList->a[pLevel->iFrom]; Vdbe *v = pParse->pVdbe; /* VM being constructed */ sqlite3 *db = pParse->db; /* Database handle */ int isSearch; /* True for a SEARCH. False for SCAN. */ @@ -141673,16 +145236,8 @@ SQLITE_PRIVATE int sqlite3WhereExplainOneScan( || (wctrlFlags&(WHERE_ORDERBY_MIN|WHERE_ORDERBY_MAX)); sqlite3StrAccumInit(&str, db, zBuf, sizeof(zBuf), SQLITE_MAX_LENGTH); - sqlite3_str_appendall(&str, isSearch ? "SEARCH" : "SCAN"); - if( pItem->pSelect ){ - sqlite3_str_appendf(&str, " SUBQUERY %u", pItem->pSelect->selId); - }else{ - sqlite3_str_appendf(&str, " TABLE %s", pItem->zName); - } - - if( pItem->zAlias ){ - sqlite3_str_appendf(&str, " AS %s", pItem->zAlias); - } + str.printfFlags = SQLITE_PRINTF_INTERNAL; + sqlite3_str_appendf(&str, "%s %S", isSearch ? "SEARCH" : "SCAN", pItem); if( (flags & (WHERE_IPK|WHERE_VIRTUALTABLE))==0 ){ const char *zFmt = 0; Index *pIdx; @@ -141830,6 +145385,12 @@ static void disableTerm(WhereLevel *pLevel, WhereTerm *pTerm){ }else{ pTerm->wtFlags |= TERM_CODED; } +#ifdef WHERETRACE_ENABLED + if( sqlite3WhereTrace & 0x20000 ){ + sqlite3DebugPrintf("DISABLE-"); + sqlite3WhereTermPrint(pTerm, (int)(pTerm - (pTerm->pWC->a))); + } +#endif if( pTerm->iParent<0 ) break; pTerm = &pTerm->pWC->a[pTerm->iParent]; assert( pTerm!=0 ); @@ -141952,7 +145513,7 @@ static Expr *removeUnindexableInClauseTerms( for(i=iEq; inLTerm; i++){ if( pLoop->aLTerm[i]->pExpr==pX ){ - int iField = pLoop->aLTerm[i]->iField - 1; + int iField = pLoop->aLTerm[i]->u.x.iField - 1; if( pOrigRhs->a[iField].pExpr==0 ) continue; /* Duplicate PK column */ pRhs = sqlite3ExprListAppend(pParse, pRhs, pOrigRhs->a[iField].pExpr); pOrigRhs->a[iField].pExpr = 0; @@ -142095,6 +145656,9 @@ static int codeEqualityTerm( if( pLevel->u.in.nIn==0 ){ pLevel->addrNxt = sqlite3VdbeMakeLabel(pParse); } + if( iEq>0 && (pLoop->wsFlags & WHERE_IN_SEEKSCAN)==0 ){ + pLoop->wsFlags |= WHERE_IN_EARLYOUT; + } i = pLevel->u.in.nIn; pLevel->u.in.nIn += nEq; @@ -142121,7 +145685,6 @@ static int codeEqualityTerm( if( iEq>0 ){ pIn->iBase = iReg - i; pIn->nPrefix = i; - pLoop->wsFlags |= WHERE_IN_EARLYOUT; }else{ pIn->nPrefix = 0; } @@ -142131,13 +145694,36 @@ static int codeEqualityTerm( pIn++; } } + testcase( iEq>0 + && (pLoop->wsFlags & WHERE_IN_SEEKSCAN)==0 + && (pLoop->wsFlags & WHERE_VIRTUALTABLE)!=0 ); + if( iEq>0 + && (pLoop->wsFlags & (WHERE_IN_SEEKSCAN|WHERE_VIRTUALTABLE))==0 + ){ + sqlite3VdbeAddOp3(v, OP_SeekHit, pLevel->iIdxCur, 0, iEq); + } }else{ pLevel->u.in.nIn = 0; } sqlite3DbFree(pParse->db, aiMap); #endif } - disableTerm(pLevel, pTerm); + + /* As an optimization, try to disable the WHERE clause term that is + ** driving the index as it will always be true. The correct answer is + ** obtained regardless, but we might get the answer with fewer CPU cycles + ** by omitting the term. + ** + ** But do not disable the term unless we are certain that the term is + ** not a transitive constraint. For an example of where that does not + ** work, see https://sqlite.org/forum/forumpost/eb8613976a (2021-05-04) + */ + if( (pLevel->pWLoop->wsFlags & WHERE_TRANSCONS)==0 + || (pTerm->eOperator & WO_EQUIV)==0 + ){ + disableTerm(pLevel, pTerm); + } + return iReg; } @@ -142223,6 +145809,7 @@ static int codeAllEqualityTerms( if( nSkip ){ int iIdxCur = pLevel->iIdxCur; + sqlite3VdbeAddOp3(v, OP_Null, 0, regBase, regBase+nSkip-1); sqlite3VdbeAddOp1(v, (bRev?OP_Last:OP_Rewind), iIdxCur); VdbeCoverageIf(v, bRev==0); VdbeCoverageIf(v, bRev!=0); @@ -142257,7 +145844,7 @@ static int codeAllEqualityTerms( sqlite3ReleaseTempReg(pParse, regBase); regBase = r1; }else{ - sqlite3VdbeAddOp2(v, OP_SCopy, r1, regBase+j); + sqlite3VdbeAddOp2(v, OP_Copy, r1, regBase+j); } } if( pTerm->eOperator & WO_IN ){ @@ -142274,7 +145861,7 @@ static int codeAllEqualityTerms( sqlite3VdbeAddOp2(v, OP_IsNull, regBase+j, pLevel->addrBrk); VdbeCoverage(v); } - if( zAff ){ + if( pParse->db->mallocFailed==0 && pParse->nErr==0 ){ if( sqlite3CompareAffinity(pRight, zAff[j])==SQLITE_AFF_BLOB ){ zAff[j] = SQLITE_AFF_BLOB; } @@ -142437,7 +146024,7 @@ static int codeCursorHintFixExpr(Walker *pWalker, Expr *pExpr){ ** Insert an OP_CursorHint instruction if it is appropriate to do so. */ static void codeCursorHint( - struct SrcList_item *pTabItem, /* FROM clause item */ + SrcItem *pTabItem, /* FROM clause item */ WhereInfo *pWInfo, /* The where clause */ WhereLevel *pLevel, /* Which loop to provide hints for */ WhereTerm *pEndRange /* Hint this end-of-scan boundary term if not NULL */ @@ -142623,7 +146210,7 @@ static void codeExprOrVector(Parse *pParse, Expr *p, int iReg, int nReg){ } } }else{ - assert( nReg==1 ); + assert( nReg==1 || pParse->nErr ); sqlite3ExprCode(pParse, p, iReg); } } @@ -142812,7 +146399,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( WhereClause *pWC; /* Decomposition of the entire WHERE clause */ WhereTerm *pTerm; /* A WHERE clause term */ sqlite3 *db; /* Database connection */ - struct SrcList_item *pTabItem; /* FROM clause term being coded */ + SrcItem *pTabItem; /* FROM clause term being coded */ int addrBrk; /* Jump here to break out of the loop */ int addrHalt; /* addrBrk for the outermost loop */ int addrCont; /* Jump here to continue with next cycle */ @@ -142917,6 +146504,9 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( pLoop->u.vtab.needFree ? P4_DYNAMIC : P4_STATIC); VdbeCoverage(v); pLoop->u.vtab.needFree = 0; + /* An OOM inside of AddOp4(OP_VFilter) instruction above might have freed + ** the u.vtab.idxStr. NULL it out to prevent a use-after-free */ + if( db->mallocFailed ) pLoop->u.vtab.idxStr = 0; pLevel->p1 = iCur; pLevel->op = pWInfo->eOnePass ? OP_Noop : OP_VNext; pLevel->p2 = sqlite3VdbeCurrentAddr(v); @@ -143175,6 +146765,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( u8 bStopAtNull = 0; /* Add condition to terminate at NULLs */ int omitTable; /* True if we use the index only */ int regBignull = 0; /* big-null flag register */ + int addrSeekScan = 0; /* Opcode of the OP_SeekScan, if any */ pIdx = pLoop->u.btree.pIndex; iIdxCur = pLevel->iIdxCur; @@ -143246,14 +146837,18 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( ** a forward order scan on a descending index, interchange the ** start and end terms (pRangeStart and pRangeEnd). */ - if( (nEqnKeyCol && bRev==(pIdx->aSortOrder[nEq]==SQLITE_SO_ASC)) - || (bRev && pIdx->nKeyCol==nEq) - ){ + if( (nEqnColumn && bRev==(pIdx->aSortOrder[nEq]==SQLITE_SO_ASC)) ){ SWAP(WhereTerm *, pRangeEnd, pRangeStart); SWAP(u8, bSeekPastNull, bStopAtNull); SWAP(u8, nBtm, nTop); } + if( iLevel>0 && (pLoop->wsFlags & WHERE_IN_SEEKSCAN)!=0 ){ + /* In case OP_SeekScan is used, ensure that the index cursor does not + ** point to a valid row for the first iteration of this loop. */ + sqlite3VdbeAddOp1(v, OP_NullRow, iIdxCur); + } + /* Generate code to evaluate all constraint terms using == or IN ** and store the values of those terms in an array of registers ** starting at regBase. @@ -143313,9 +146908,6 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( ** above has already left the cursor sitting on the correct row, ** so no further seeking is needed */ }else{ - if( pLoop->wsFlags & WHERE_IN_EARLYOUT ){ - sqlite3VdbeAddOp1(v, OP_SeekHit, iIdxCur); - } if( regBignull ){ sqlite3VdbeAddOp2(v, OP_Integer, 1, regBignull); VdbeComment((v, "NULL-scan pass ctr")); @@ -143323,6 +146915,20 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( op = aStartOp[(start_constraints<<2) + (startEq<<1) + bRev]; assert( op!=0 ); + if( (pLoop->wsFlags & WHERE_IN_SEEKSCAN)!=0 && op==OP_SeekGE ){ + assert( regBignull==0 ); + /* TUNING: The OP_SeekScan opcode seeks to reduce the number + ** of expensive seek operations by replacing a single seek with + ** 1 or more step operations. The question is, how many steps + ** should we try before giving up and going with a seek. The cost + ** of a seek is proportional to the logarithm of the of the number + ** of entries in the tree, so basing the number of steps to try + ** on the estimated number of rows in the btree seems like a good + ** guess. */ + addrSeekScan = sqlite3VdbeAddOp1(v, OP_SeekScan, + (pIdx->aiRowLogEst[0]+9)/10); + VdbeCoverage(v); + } sqlite3VdbeAddOp4Int(v, op, iIdxCur, addrNxt, regBase, nConstraint); VdbeCoverage(v); VdbeCoverageIf(v, op==OP_Rewind); testcase( op==OP_Rewind ); @@ -143405,6 +147011,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( testcase( op==OP_IdxGE ); VdbeCoverageIf(v, op==OP_IdxGE ); testcase( op==OP_IdxLT ); VdbeCoverageIf(v, op==OP_IdxLT ); testcase( op==OP_IdxLE ); VdbeCoverageIf(v, op==OP_IdxLE ); + if( addrSeekScan ) sqlite3VdbeJumpHere(v, addrSeekScan); } if( regBignull ){ /* During a NULL-scan, check to see if we have reached the end of @@ -143424,8 +147031,8 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( testcase( op==OP_IdxLE ); VdbeCoverageIf(v, op==OP_IdxLE ); } - if( pLoop->wsFlags & WHERE_IN_EARLYOUT ){ - sqlite3VdbeAddOp2(v, OP_SeekHit, iIdxCur, 1); + if( (pLoop->wsFlags & WHERE_IN_EARLYOUT)!=0 ){ + sqlite3VdbeAddOp3(v, OP_SeekHit, iIdxCur, nEq, nEq); } /* Seek the table cursor, if required */ @@ -143434,17 +147041,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( if( omitTable ){ /* pIdx is a covering index. No need to access the main table. */ }else if( HasRowid(pIdx->pTable) ){ - if( (pWInfo->wctrlFlags & WHERE_SEEK_TABLE) - || ( (pWInfo->wctrlFlags & WHERE_SEEK_UNIQ_TABLE)!=0 - && (pWInfo->eOnePass==ONEPASS_SINGLE || pLoop->nLTerm==0) ) - ){ - iRowidReg = ++pParse->nMem; - sqlite3VdbeAddOp2(v, OP_IdxRowid, iIdxCur, iRowidReg); - sqlite3VdbeAddOp3(v, OP_NotExists, iCur, 0, iRowidReg); - VdbeCoverage(v); - }else{ - codeDeferredSeek(pWInfo, pIdx, iCur, iIdxCur); - } + codeDeferredSeek(pWInfo, pIdx, iCur, iIdxCur); }else if( iCur!=iIdxCur ){ Index *pPk = sqlite3PrimaryKeyIndex(pIdx->pTable); iRowidReg = sqlite3GetTempRange(pParse, pPk->nKeyCol); @@ -143571,7 +147168,6 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( int iRetInit; /* Address of regReturn init */ int untestedTerms = 0; /* Some terms not completely tested */ int ii; /* Loop counter */ - u16 wctrlFlags; /* Flags for sub-WHERE clause */ Expr *pAndExpr = 0; /* An ".. AND (...)" expression */ Table *pTab = pTabItem->pTab; @@ -143589,7 +147185,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( */ if( pWInfo->nLevel>1 ){ int nNotReady; /* The number of notReady tables */ - struct SrcList_item *origSrc; /* Original list of tables */ + SrcItem *origSrc; /* Original list of tables */ nNotReady = pWInfo->nLevel - iLevel - 1; pOrTab = sqlite3StackAllocRaw(db, sizeof(*pOrTab)+ nNotReady*sizeof(pOrTab->a[0])); @@ -143662,7 +147258,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( /* The extra 0x10000 bit on the opcode is masked off and does not ** become part of the new Expr.op. However, it does make the ** op==TK_AND comparison inside of sqlite3PExpr() false, and this - ** prevents sqlite3PExpr() from implementing AND short-circuit + ** prevents sqlite3PExpr() from applying the AND short-circuit ** optimization, which we do not want here. */ pAndExpr = sqlite3PExpr(pParse, TK_AND|0x10000, 0, pAndExpr); } @@ -143672,17 +147268,22 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( ** eliminating duplicates from other WHERE clauses, the action for each ** sub-WHERE clause is to to invoke the main loop body as a subroutine. */ - wctrlFlags = WHERE_OR_SUBCLAUSE | (pWInfo->wctrlFlags & WHERE_SEEK_TABLE); ExplainQueryPlan((pParse, 1, "MULTI-INDEX OR")); for(ii=0; iinTerm; ii++){ WhereTerm *pOrTerm = &pOrWc->a[ii]; if( pOrTerm->leftCursor==iCur || (pOrTerm->eOperator & WO_AND)!=0 ){ WhereInfo *pSubWInfo; /* Info for single OR-term scan */ Expr *pOrExpr = pOrTerm->pExpr; /* Current OR clause term */ + Expr *pDelete; /* Local copy of OR clause term */ int jmp1 = 0; /* Address of jump operation */ testcase( (pTabItem[0].fg.jointype & JT_LEFT)!=0 && !ExprHasProperty(pOrExpr, EP_FromJoin) ); /* See TH3 vtab25.400 and ticket 614b25314c766238 */ + pDelete = pOrExpr = sqlite3ExprDup(db, pOrExpr, 0); + if( db->mallocFailed ){ + sqlite3ExprDelete(db, pDelete); + continue; + } if( pAndExpr ){ pAndExpr->pLeft = pOrExpr; pOrExpr = pAndExpr; @@ -143691,7 +147292,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( ExplainQueryPlan((pParse, 1, "INDEX %d", ii+1)); WHERETRACE(0xffff, ("Subplan for OR-clause:\n")); pSubWInfo = sqlite3WhereBegin(pParse, pOrTab, pOrExpr, 0, 0, - wctrlFlags, iCovCur); + WHERE_OR_SUBCLAUSE, iCovCur); assert( pSubWInfo || pParse->nErr || db->mallocFailed ); if( pSubWInfo ){ WhereLoop *pSubLoop; @@ -143789,11 +147390,15 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( }else{ pCov = 0; } + if( sqlite3WhereUsesDeferredSeek(pSubWInfo) ){ + pWInfo->bDeferredSeek = 1; + } /* Finish the loop through table entries that match term pOrTerm. */ sqlite3WhereEnd(pSubWInfo); ExplainQueryPlanPop(pParse); } + sqlite3ExprDelete(db, pDelete); } } ExplainQueryPlanPop(pParse); @@ -143941,7 +147546,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( #endif assert( !ExprHasProperty(pE, EP_FromJoin) ); assert( (pTerm->prereqRight & pLevel->notReady)!=0 ); - pAlt = sqlite3WhereFindTerm(pWC, iCur, pTerm->u.leftColumn, notReady, + pAlt = sqlite3WhereFindTerm(pWC, iCur, pTerm->u.x.leftColumn, notReady, WO_EQ|WO_IN|WO_IS, 0); if( pAlt==0 ) continue; if( pAlt->wtFlags & (TERM_CODED) ) continue; @@ -143958,6 +147563,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( sEAlt = *pAlt->pExpr; sEAlt.pLeft = pE->pLeft; sqlite3ExprIfFalse(pParse, &sEAlt, addrCont, SQLITE_JUMPIFNULL); + pAlt->wtFlags |= TERM_CODED; } /* For a LEFT OUTER JOIN, generate code that will record the fact that @@ -144510,6 +148116,7 @@ static void whereCombineDisjuncts( int op; /* Operator for the combined expression */ int idxNew; /* Index in pWC of the next virtual term */ + if( (pOne->wtFlags | pTwo->wtFlags) & TERM_VNULL ) return; if( (pOne->eOperator & (WO_EQ|WO_LT|WO_LE|WO_GT|WO_GE))==0 ) return; if( (pTwo->eOperator & (WO_EQ|WO_LT|WO_LE|WO_GT|WO_GE))==0 ) return; if( (eOp & (WO_EQ|WO_LT|WO_LE))!=eOp @@ -144797,7 +148404,7 @@ static void exprAnalyzeOrTerm( assert( pOrTerm->wtFlags & (TERM_COPIED|TERM_VIRTUAL) ); continue; } - iColumn = pOrTerm->u.leftColumn; + iColumn = pOrTerm->u.x.leftColumn; iCursor = pOrTerm->leftCursor; pLeft = pOrTerm->pExpr->pLeft; break; @@ -144819,7 +148426,7 @@ static void exprAnalyzeOrTerm( assert( pOrTerm->eOperator & WO_EQ ); if( pOrTerm->leftCursor!=iCursor ){ pOrTerm->wtFlags &= ~TERM_OR_OK; - }else if( pOrTerm->u.leftColumn!=iColumn || (iColumn==XN_EXPR + }else if( pOrTerm->u.x.leftColumn!=iColumn || (iColumn==XN_EXPR && sqlite3ExprCompare(pParse, pOrTerm->pExpr->pLeft, pLeft, -1) )){ okToChngToIN = 0; @@ -144854,7 +148461,7 @@ static void exprAnalyzeOrTerm( if( (pOrTerm->wtFlags & TERM_OR_OK)==0 ) continue; assert( pOrTerm->eOperator & WO_EQ ); assert( pOrTerm->leftCursor==iCursor ); - assert( pOrTerm->u.leftColumn==iColumn ); + assert( pOrTerm->u.x.leftColumn==iColumn ); pDup = sqlite3ExprDup(db, pOrTerm->pExpr->pRight, 0); pList = sqlite3ExprListAppend(pWInfo->pParse, pList, pDup); pLeft = pOrTerm->pExpr->pLeft; @@ -144870,7 +148477,7 @@ static void exprAnalyzeOrTerm( idxNew = whereClauseInsert(pWC, pNew, TERM_VIRTUAL|TERM_DYNAMIC); testcase( idxNew==0 ); exprAnalyze(pSrc, pWC, idxNew); - /* pTerm = &pWC->a[idxTerm]; // would be needed if pTerm where used again */ + /* pTerm = &pWC->a[idxTerm]; // would be needed if pTerm where reused */ markTermAsChild(pWC, idxNew, idxTerm); }else{ sqlite3ExprListDelete(db, pList); @@ -144994,6 +148601,7 @@ static int exprMightBeIndexed( assert( op<=TK_GE ); if( pExpr->op==TK_VECTOR && (op>=TK_GT && ALWAYS(op<=TK_GE)) ){ pExpr = pExpr->x.pList->a[0].pExpr; + } if( pExpr->op==TK_COLUMN ){ @@ -145006,6 +148614,7 @@ static int exprMightBeIndexed( return exprMightBeIndexed2(pFrom,mPrereq,aiCurCol,pExpr); } + /* ** The input to this routine is an WhereTerm structure with only the ** "pExpr" field filled in. The job of this routine is to analyze the @@ -145090,25 +148699,26 @@ static void exprAnalyze( Expr *pRight = sqlite3ExprSkipCollate(pExpr->pRight); u16 opMask = (pTerm->prereqRight & prereqLeft)==0 ? WO_ALL : WO_EQUIV; - if( pTerm->iField>0 ){ + if( pTerm->u.x.iField>0 ){ assert( op==TK_IN ); assert( pLeft->op==TK_VECTOR ); - pLeft = pLeft->x.pList->a[pTerm->iField-1].pExpr; + pLeft = pLeft->x.pList->a[pTerm->u.x.iField-1].pExpr; } if( exprMightBeIndexed(pSrc, prereqLeft, aiCurCol, pLeft, op) ){ pTerm->leftCursor = aiCurCol[0]; - pTerm->u.leftColumn = aiCurCol[1]; + pTerm->u.x.leftColumn = aiCurCol[1]; pTerm->eOperator = operatorMask(op) & opMask; } if( op==TK_IS ) pTerm->wtFlags |= TERM_IS; if( pRight && exprMightBeIndexed(pSrc, pTerm->prereqRight, aiCurCol, pRight, op) + && !ExprHasProperty(pRight, EP_FixedCol) ){ WhereTerm *pNew; Expr *pDup; u16 eExtraOp = 0; /* Extra bits for pNew->eOperator */ - assert( pTerm->iField==0 ); + assert( pTerm->u.x.iField==0 ); if( pTerm->leftCursor>=0 ){ int idxNew; pDup = sqlite3ExprDup(db, pExpr, 0); @@ -145134,11 +148744,17 @@ static void exprAnalyze( } pNew->wtFlags |= exprCommute(pParse, pDup); pNew->leftCursor = aiCurCol[0]; - pNew->u.leftColumn = aiCurCol[1]; + pNew->u.x.leftColumn = aiCurCol[1]; testcase( (prereqLeft | extraRight) != prereqLeft ); pNew->prereqRight = prereqLeft | extraRight; pNew->prereqAll = prereqAll; pNew->eOperator = (operatorMask(pDup->op) + eExtraOp) & opMask; + }else if( op==TK_ISNULL && 0==sqlite3ExprCanBeNull(pLeft) ){ + pExpr->op = TK_TRUEFALSE; + pExpr->u.zToken = "false"; + ExprSetProperty(pExpr, EP_IsFalse); + pTerm->prereqAll = 0; + pTerm->eOperator = 0; } } @@ -145190,6 +148806,42 @@ static void exprAnalyze( pTerm = &pWC->a[idxTerm]; } #endif /* SQLITE_OMIT_OR_OPTIMIZATION */ + /* The form "x IS NOT NULL" can sometimes be evaluated more efficiently + ** as "x>NULL" if x is not an INTEGER PRIMARY KEY. So construct a + ** virtual term of that form. + ** + ** The virtual term must be tagged with TERM_VNULL. + */ + else if( pExpr->op==TK_NOTNULL ){ + if( pExpr->pLeft->op==TK_COLUMN + && pExpr->pLeft->iColumn>=0 + && !ExprHasProperty(pExpr, EP_FromJoin) + ){ + Expr *pNewExpr; + Expr *pLeft = pExpr->pLeft; + int idxNew; + WhereTerm *pNewTerm; + + pNewExpr = sqlite3PExpr(pParse, TK_GT, + sqlite3ExprDup(db, pLeft, 0), + sqlite3ExprAlloc(db, TK_NULL, 0, 0)); + + idxNew = whereClauseInsert(pWC, pNewExpr, + TERM_VIRTUAL|TERM_DYNAMIC|TERM_VNULL); + if( idxNew ){ + pNewTerm = &pWC->a[idxNew]; + pNewTerm->prereqRight = 0; + pNewTerm->leftCursor = pLeft->iTable; + pNewTerm->u.x.leftColumn = pLeft->iColumn; + pNewTerm->eOperator = WO_GT; + markTermAsChild(pWC, idxNew, idxTerm); + pTerm = &pWC->a[idxTerm]; + pTerm->wtFlags |= TERM_COPIED; + pNewTerm->prereqAll = pTerm->prereqAll; + } + } + } + #ifndef SQLITE_OMIT_LIKE_OPTIMIZATION /* Add constraints to reduce the search space on a LIKE or GLOB @@ -145205,7 +148857,8 @@ static void exprAnalyze( ** bound is made all lowercase so that the bounds also work when comparing ** BLOBs. */ - if( pWC->op==TK_AND + else if( pExpr->op==TK_FUNCTION + && pWC->op==TK_AND && isLikeOrGlob(pParse, pExpr, &pStr1, &isComplete, &noCase) ){ Expr *pLeft; /* LHS of LIKE/GLOB operator */ @@ -145275,6 +148928,65 @@ static void exprAnalyze( } #endif /* SQLITE_OMIT_LIKE_OPTIMIZATION */ + /* If there is a vector == or IS term - e.g. "(a, b) == (?, ?)" - create + ** new terms for each component comparison - "a = ?" and "b = ?". The + ** new terms completely replace the original vector comparison, which is + ** no longer used. + ** + ** This is only required if at least one side of the comparison operation + ** is not a sub-select. */ + if( (pExpr->op==TK_EQ || pExpr->op==TK_IS) + && (nLeft = sqlite3ExprVectorSize(pExpr->pLeft))>1 + && sqlite3ExprVectorSize(pExpr->pRight)==nLeft + && ( (pExpr->pLeft->flags & EP_xIsSelect)==0 + || (pExpr->pRight->flags & EP_xIsSelect)==0) + && pWC->op==TK_AND + ){ + int i; + for(i=0; ipLeft, i); + Expr *pRight = sqlite3ExprForVectorField(pParse, pExpr->pRight, i); + + pNew = sqlite3PExpr(pParse, pExpr->op, pLeft, pRight); + transferJoinMarkings(pNew, pExpr); + idxNew = whereClauseInsert(pWC, pNew, TERM_DYNAMIC); + exprAnalyze(pSrc, pWC, idxNew); + } + pTerm = &pWC->a[idxTerm]; + pTerm->wtFlags |= TERM_CODED|TERM_VIRTUAL; /* Disable the original */ + pTerm->eOperator = 0; + } + + /* If there is a vector IN term - e.g. "(a, b) IN (SELECT ...)" - create + ** a virtual term for each vector component. The expression object + ** used by each such virtual term is pExpr (the full vector IN(...) + ** expression). The WhereTerm.u.x.iField variable identifies the index within + ** the vector on the LHS that the virtual term represents. + ** + ** This only works if the RHS is a simple SELECT (not a compound) that does + ** not use window functions. + */ + else if( pExpr->op==TK_IN + && pTerm->u.x.iField==0 + && pExpr->pLeft->op==TK_VECTOR + && pExpr->x.pSelect->pPrior==0 +#ifndef SQLITE_OMIT_WINDOWFUNC + && pExpr->x.pSelect->pWin==0 +#endif + && pWC->op==TK_AND + ){ + int i; + for(i=0; ipLeft); i++){ + int idxNew; + idxNew = whereClauseInsert(pWC, pExpr, TERM_VIRTUAL); + pWC->a[idxNew].u.x.iField = i+1; + exprAnalyze(pSrc, pWC, idxNew); + markTermAsChild(pWC, idxNew, idxTerm); + } + } + #ifndef SQLITE_OMIT_VIRTUALTABLE /* Add a WO_AUX auxiliary term to the constraint set if the ** current expression is of the form "column OP expr" where OP @@ -145285,7 +148997,7 @@ static void exprAnalyze( ** virtual tables. The native query optimizer does not attempt ** to do anything with MATCH functions. */ - if( pWC->op==TK_AND ){ + else if( pWC->op==TK_AND ){ Expr *pRight = 0, *pLeft = 0; int res = isAuxiliaryVtabOperator(db, pExpr, &eOp2, &pLeft, &pRight); while( res-- > 0 ){ @@ -145308,7 +149020,7 @@ static void exprAnalyze( pNewTerm = &pWC->a[idxNew]; pNewTerm->prereqRight = prereqExpr; pNewTerm->leftCursor = pLeft->iTable; - pNewTerm->u.leftColumn = pLeft->iColumn; + pNewTerm->u.x.leftColumn = pLeft->iColumn; pNewTerm->eOperator = WO_AUX; pNewTerm->eMatchOp = eOp2; markTermAsChild(pWC, idxNew, idxTerm); @@ -145321,102 +149033,6 @@ static void exprAnalyze( } #endif /* SQLITE_OMIT_VIRTUALTABLE */ - /* If there is a vector == or IS term - e.g. "(a, b) == (?, ?)" - create - ** new terms for each component comparison - "a = ?" and "b = ?". The - ** new terms completely replace the original vector comparison, which is - ** no longer used. - ** - ** This is only required if at least one side of the comparison operation - ** is not a sub-select. */ - if( pWC->op==TK_AND - && (pExpr->op==TK_EQ || pExpr->op==TK_IS) - && (nLeft = sqlite3ExprVectorSize(pExpr->pLeft))>1 - && sqlite3ExprVectorSize(pExpr->pRight)==nLeft - && ( (pExpr->pLeft->flags & EP_xIsSelect)==0 - || (pExpr->pRight->flags & EP_xIsSelect)==0) - ){ - int i; - for(i=0; ipLeft, i); - Expr *pRight = sqlite3ExprForVectorField(pParse, pExpr->pRight, i); - - pNew = sqlite3PExpr(pParse, pExpr->op, pLeft, pRight); - transferJoinMarkings(pNew, pExpr); - idxNew = whereClauseInsert(pWC, pNew, TERM_DYNAMIC); - exprAnalyze(pSrc, pWC, idxNew); - } - pTerm = &pWC->a[idxTerm]; - pTerm->wtFlags |= TERM_CODED|TERM_VIRTUAL; /* Disable the original */ - pTerm->eOperator = 0; - } - - /* If there is a vector IN term - e.g. "(a, b) IN (SELECT ...)" - create - ** a virtual term for each vector component. The expression object - ** used by each such virtual term is pExpr (the full vector IN(...) - ** expression). The WhereTerm.iField variable identifies the index within - ** the vector on the LHS that the virtual term represents. - ** - ** This only works if the RHS is a simple SELECT (not a compound) that does - ** not use window functions. - */ - if( pWC->op==TK_AND && pExpr->op==TK_IN && pTerm->iField==0 - && pExpr->pLeft->op==TK_VECTOR - && pExpr->x.pSelect->pPrior==0 -#ifndef SQLITE_OMIT_WINDOWFUNC - && pExpr->x.pSelect->pWin==0 -#endif - ){ - int i; - for(i=0; ipLeft); i++){ - int idxNew; - idxNew = whereClauseInsert(pWC, pExpr, TERM_VIRTUAL); - pWC->a[idxNew].iField = i+1; - exprAnalyze(pSrc, pWC, idxNew); - markTermAsChild(pWC, idxNew, idxTerm); - } - } - -#ifdef SQLITE_ENABLE_STAT4 - /* When sqlite_stat4 histogram data is available an operator of the - ** form "x IS NOT NULL" can sometimes be evaluated more efficiently - ** as "x>NULL" if x is not an INTEGER PRIMARY KEY. So construct a - ** virtual term of that form. - ** - ** Note that the virtual term must be tagged with TERM_VNULL. - */ - if( pExpr->op==TK_NOTNULL - && pExpr->pLeft->op==TK_COLUMN - && pExpr->pLeft->iColumn>=0 - && !ExprHasProperty(pExpr, EP_FromJoin) - && OptimizationEnabled(db, SQLITE_Stat4) - ){ - Expr *pNewExpr; - Expr *pLeft = pExpr->pLeft; - int idxNew; - WhereTerm *pNewTerm; - - pNewExpr = sqlite3PExpr(pParse, TK_GT, - sqlite3ExprDup(db, pLeft, 0), - sqlite3ExprAlloc(db, TK_NULL, 0, 0)); - - idxNew = whereClauseInsert(pWC, pNewExpr, - TERM_VIRTUAL|TERM_DYNAMIC|TERM_VNULL); - if( idxNew ){ - pNewTerm = &pWC->a[idxNew]; - pNewTerm->prereqRight = 0; - pNewTerm->leftCursor = pLeft->iTable; - pNewTerm->u.leftColumn = pLeft->iColumn; - pNewTerm->eOperator = WO_GT; - markTermAsChild(pWC, idxNew, idxTerm); - pTerm = &pWC->a[idxTerm]; - pTerm->wtFlags |= TERM_COPIED; - pNewTerm->prereqAll = pTerm->prereqAll; - } - } -#endif /* SQLITE_ENABLE_STAT4 */ - /* Prevent ON clause terms of a LEFT JOIN from being used to drive ** an index for tables to the left of the join. */ @@ -145450,6 +149066,7 @@ static void exprAnalyze( SQLITE_PRIVATE void sqlite3WhereSplit(WhereClause *pWC, Expr *pExpr, u8 op){ Expr *pE2 = sqlite3ExprSkipCollateAndLikely(pExpr); pWC->op = op; + assert( pE2!=0 || pExpr==0 ); if( pE2==0 ) return; if( pE2->op!=op ){ whereClauseInsert(pWC, pExpr, 0); @@ -145574,7 +149191,7 @@ SQLITE_PRIVATE void sqlite3WhereExprAnalyze( */ SQLITE_PRIVATE void sqlite3WhereTabFuncArgs( Parse *pParse, /* Parsing context */ - struct SrcList_item *pItem, /* The FROM clause term to process */ + SrcItem *pItem, /* The FROM clause term to process */ WhereClause *pWC /* Xfer function arguments to here */ ){ Table *pTab; @@ -145651,12 +149268,6 @@ struct HiddenIndexInfo { /* Forward declaration of methods */ static int whereLoopResize(sqlite3*, WhereLoop*, int); -/* Test variable that can be set to enable WHERE tracing */ -#if defined(SQLITE_TEST) || defined(SQLITE_DEBUG) -/***/ int sqlite3WhereTrace = 0; -#endif - - /* ** Return the estimated number of output rows from a WHERE clause */ @@ -145719,6 +149330,32 @@ SQLITE_PRIVATE int sqlite3WhereOrderByLimitOptLabel(WhereInfo *pWInfo){ return pInner->addrNxt; } +/* +** While generating code for the min/max optimization, after handling +** the aggregate-step call to min() or max(), check to see if any +** additional looping is required. If the output order is such that +** we are certain that the correct answer has already been found, then +** code an OP_Goto to by pass subsequent processing. +** +** Any extra OP_Goto that is coded here is an optimization. The +** correct answer should be obtained regardless. This OP_Goto just +** makes the answer appear faster. +*/ +SQLITE_PRIVATE void sqlite3WhereMinMaxOptEarlyOut(Vdbe *v, WhereInfo *pWInfo){ + WhereLevel *pInner; + int i; + if( !pWInfo->bOrderedInnerLoop ) return; + if( pWInfo->nOBSat==0 ) return; + for(i=pWInfo->nLevel-1; i>=0; i--){ + pInner = &pWInfo->a[i]; + if( (pInner->pWLoop->wsFlags & WHERE_COLUMN_IN)!=0 ){ + sqlite3VdbeGoto(v, pInner->addrNxt); + return; + } + } + sqlite3VdbeGoto(v, pWInfo->iBreak); +} + /* ** Return the VDBE address or label to jump to in order to continue ** immediately with the next row of a WHERE clause. @@ -145849,6 +149486,18 @@ static void createMask(WhereMaskSet *pMaskSet, int iCursor){ pMaskSet->ix[pMaskSet->n++] = iCursor; } +/* +** If the right-hand branch of the expression is a TK_COLUMN, then return +** a pointer to the right-hand branch. Otherwise, return NULL. +*/ +static Expr *whereRightSubexprIsColumn(Expr *p){ + p = sqlite3ExprSkipCollateAndLikely(p->pRight); + if( ALWAYS(p!=0) && p->op==TK_COLUMN && !ExprHasProperty(p, EP_FixedCol) ){ + return p; + } + return 0; +} + /* ** Advance to the next WhereTerm that matches according to the criteria ** established when the pScan object was initialized by whereScanInit(). @@ -145871,7 +149520,7 @@ static WhereTerm *whereScanNext(WhereScan *pScan){ do{ for(pTerm=pWC->a+k; knTerm; k++, pTerm++){ if( pTerm->leftCursor==iCur - && pTerm->u.leftColumn==iColumn + && pTerm->u.x.leftColumn==iColumn && (iColumn!=XN_EXPR || sqlite3ExprCompareSkip(pTerm->pExpr->pLeft, pScan->pIdxExpr,iCur)==0) @@ -145879,8 +149528,7 @@ static WhereTerm *whereScanNext(WhereScan *pScan){ ){ if( (pTerm->eOperator & WO_EQUIV)!=0 && pScan->nEquivaiCur) - && (pX = sqlite3ExprSkipCollateAndLikely(pTerm->pExpr->pRight))->op - ==TK_COLUMN + && (pX = whereRightSubexprIsColumn(pTerm->pExpr))!=0 ){ int j; for(j=0; jnEquiv; j++){ @@ -145921,6 +149569,18 @@ static WhereTerm *whereScanNext(WhereScan *pScan){ } pScan->pWC = pWC; pScan->k = k+1; +#ifdef WHERETRACE_ENABLED + if( sqlite3WhereTrace & 0x20000 ){ + int ii; + sqlite3DebugPrintf("SCAN-TERM %p: nEquiv=%d", + pTerm, pScan->nEquiv); + for(ii=0; iinEquiv; ii++){ + sqlite3DebugPrintf(" {%d:%d}", + pScan->aiCur[ii], pScan->aiColumn[ii]); + } + sqlite3DebugPrintf("\n"); + } +#endif return pTerm; } } @@ -146076,7 +149736,8 @@ static int findIndexCol( for(i=0; inExpr; i++){ Expr *p = sqlite3ExprSkipCollateAndLikely(pList->a[i].pExpr); - if( p->op==TK_COLUMN + if( ALWAYS(p!=0) + && (p->op==TK_COLUMN || p->op==TK_AGG_COLUMN) && p->iColumn==pIdx->aiColumn[iCol] && p->iTable==iBase ){ @@ -146140,7 +149801,9 @@ static int isDistinctRedundant( */ for(i=0; inExpr; i++){ Expr *p = sqlite3ExprSkipCollateAndLikely(pDistinct->a[i].pExpr); - if( p->op==TK_COLUMN && p->iTable==iBase && p->iColumn<0 ) return 1; + if( NEVER(p==0) ) continue; + if( p->op!=TK_COLUMN && p->op!=TK_AGG_COLUMN ) continue; + if( p->iTable==iBase && p->iColumn<0 ) return 1; } /* Loop through all indices on the table, checking each to see if it makes @@ -146158,6 +149821,7 @@ static int isDistinctRedundant( */ for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ if( !IsUniqueIndex(pIdx) ) continue; + if( pIdx->pPartIdxWhere ) continue; for(i=0; inKeyCol; i++){ if( 0==sqlite3WhereFindTerm(pWC, iBase, i, ~(Bitmask)0, WO_EQ, pIdx) ){ if( findIndexCol(pParse, pDistinct, iBase, pIdx, i)<0 ) break; @@ -146212,14 +149876,14 @@ static void translateColumnToCopy( pOp->p2 = pOp->p3; pOp->p3 = 0; }else if( pOp->opcode==OP_Rowid ){ - if( iAutoidxCur ){ - pOp->opcode = OP_Sequence; - pOp->p1 = iAutoidxCur; - }else{ + pOp->opcode = OP_Sequence; + pOp->p1 = iAutoidxCur; +#ifdef SQLITE_ALLOW_ROWID_IN_VIEW + if( iAutoidxCur==0 ){ pOp->opcode = OP_Null; - pOp->p1 = 0; pOp->p3 = 0; } +#endif } } } @@ -146277,7 +149941,7 @@ static void whereTraceIndexInfoOutputs(sqlite3_index_info *p){ */ static int termCanDriveIndex( WhereTerm *pTerm, /* WHERE clause term to check */ - struct SrcList_item *pSrc, /* Table we are trying to access */ + SrcItem *pSrc, /* Table we are trying to access */ Bitmask notReady /* Tables in outer loops of the join */ ){ char aff; @@ -146293,8 +149957,8 @@ static int termCanDriveIndex( return 0; } if( (pTerm->prereqRight & notReady)!=0 ) return 0; - if( pTerm->u.leftColumn<0 ) return 0; - aff = pSrc->pTab->aCol[pTerm->u.leftColumn].affinity; + if( pTerm->u.x.leftColumn<0 ) return 0; + aff = pSrc->pTab->aCol[pTerm->u.x.leftColumn].affinity; if( !sqlite3IndexAffinityOk(pTerm->pExpr, aff) ) return 0; testcase( pTerm->pExpr->op==TK_IS ); return 1; @@ -146311,7 +149975,7 @@ static int termCanDriveIndex( static void constructAutomaticIndex( Parse *pParse, /* The parsing context */ WhereClause *pWC, /* The WHERE clause */ - struct SrcList_item *pSrc, /* The FROM clause term to get the next index */ + SrcItem *pSrc, /* The FROM clause term to get the next index */ Bitmask notReady, /* Mask of cursors that are not available */ WhereLevel *pLevel /* Write new index here */ ){ @@ -146335,7 +149999,7 @@ static void constructAutomaticIndex( u8 sentWarning = 0; /* True if a warnning has been issued */ Expr *pPartial = 0; /* Partial Index Expression */ int iContinue = 0; /* Jump here to skip excluded rows */ - struct SrcList_item *pTabItem; /* FROM clause term being indexed */ + SrcItem *pTabItem; /* FROM clause term being indexed */ int addrCounter = 0; /* Address where integer counter is initialized */ int regBase; /* Array of registers where record is assembled */ @@ -146365,7 +150029,7 @@ static void constructAutomaticIndex( sqlite3ExprDup(pParse->db, pExpr, 0)); } if( termCanDriveIndex(pTerm, pSrc, notReady) ){ - int iCol = pTerm->u.leftColumn; + int iCol = pTerm->u.x.leftColumn; Bitmask cMask = iCol>=BMS ? MASKBIT(BMS-1) : MASKBIT(iCol); testcase( iCol==BMS ); testcase( iCol==BMS-1 ); @@ -146384,7 +150048,7 @@ static void constructAutomaticIndex( } } } - assert( nKeyCol>0 ); + assert( nKeyCol>0 || pParse->db->mallocFailed ); pLoop->u.btree.nEq = pLoop->nLTerm = nKeyCol; pLoop->wsFlags = WHERE_COLUMN_EQ | WHERE_IDX_ONLY | WHERE_INDEXED | WHERE_AUTO_INDEX; @@ -146418,14 +150082,14 @@ static void constructAutomaticIndex( idxCols = 0; for(pTerm=pWC->a; pTermu.leftColumn; + int iCol = pTerm->u.x.leftColumn; Bitmask cMask = iCol>=BMS ? MASKBIT(BMS-1) : MASKBIT(iCol); testcase( iCol==BMS-1 ); testcase( iCol==BMS ); if( (idxCols & cMask)==0 ){ Expr *pX = pTerm->pExpr; idxCols |= cMask; - pIdx->aiColumn[n] = pTerm->u.leftColumn; + pIdx->aiColumn[n] = pTerm->u.x.leftColumn; pColl = sqlite3ExprCompareCollSeq(pParse, pX); assert( pColl!=0 || pParse->nErr>0 ); /* TH3 collate01.800 */ pIdx->azColl[n] = pColl ? pColl->zName : sqlite3StrBINARY; @@ -146519,7 +150183,7 @@ static sqlite3_index_info *allocateIndexInfo( Parse *pParse, /* The parsing context */ WhereClause *pWC, /* The WHERE clause being analyzed */ Bitmask mUnusable, /* Ignore terms with these prereqs */ - struct SrcList_item *pSrc, /* The FROM clause term that is the vtab */ + SrcItem *pSrc, /* The FROM clause term that is the vtab */ ExprList *pOrderBy, /* The ORDER BY clause */ u16 *pmNoOmit /* Mask of terms not to omit */ ){ @@ -146546,7 +150210,7 @@ static sqlite3_index_info *allocateIndexInfo( testcase( pTerm->eOperator & WO_ALL ); if( (pTerm->eOperator & ~(WO_EQUIV))==0 ) continue; if( pTerm->wtFlags & TERM_VNULL ) continue; - assert( pTerm->u.leftColumn>=(-1) ); + assert( pTerm->u.x.leftColumn>=(-1) ); nTerm++; } @@ -146606,8 +150270,8 @@ static sqlite3_index_info *allocateIndexInfo( ){ continue; } - assert( pTerm->u.leftColumn>=(-1) ); - pIdxCons[j].iColumn = pTerm->u.leftColumn; + assert( pTerm->u.x.leftColumn>=(-1) ); + pIdxCons[j].iColumn = pTerm->u.x.leftColumn; pIdxCons[j].iTermOffset = i; op = pTerm->eOperator & WO_ALL; if( op==WO_IN ) op = WO_EQ; @@ -147370,9 +151034,9 @@ SQLITE_PRIVATE void sqlite3WhereTermPrint(WhereTerm *pTerm, int iTerm){ if( pTerm->wtFlags & TERM_CODED ) zType[3] = 'C'; if( pTerm->eOperator & WO_SINGLE ){ sqlite3_snprintf(sizeof(zLeft),zLeft,"left={%d:%d}", - pTerm->leftCursor, pTerm->u.leftColumn); + pTerm->leftCursor, pTerm->u.x.leftColumn); }else if( (pTerm->eOperator & WO_OR)!=0 && pTerm->u.pOrInfo!=0 ){ - sqlite3_snprintf(sizeof(zLeft),zLeft,"indexable=0x%lld", + sqlite3_snprintf(sizeof(zLeft),zLeft,"indexable=0x%llx", pTerm->u.pOrInfo->indexable); }else{ sqlite3_snprintf(sizeof(zLeft),zLeft,"left=%d", pTerm->leftCursor); @@ -147386,8 +151050,8 @@ SQLITE_PRIVATE void sqlite3WhereTermPrint(WhereTerm *pTerm, int iTerm){ sqlite3DebugPrintf(" prob=%-3d prereq=%llx,%llx", pTerm->truthProb, (u64)pTerm->prereqAll, (u64)pTerm->prereqRight); } - if( pTerm->iField ){ - sqlite3DebugPrintf(" iField=%d", pTerm->iField); + if( pTerm->u.x.iField ){ + sqlite3DebugPrintf(" iField=%d", pTerm->u.x.iField); } if( pTerm->iParent>=0 ){ sqlite3DebugPrintf(" iParent=%d", pTerm->iParent); @@ -147417,7 +151081,7 @@ SQLITE_PRIVATE void sqlite3WhereClausePrint(WhereClause *pWC){ SQLITE_PRIVATE void sqlite3WhereLoopPrint(WhereLoop *p, WhereClause *pWC){ WhereInfo *pWInfo = pWC->pWInfo; int nb = 1+(pWInfo->pTabList->nSrc+3)/4; - struct SrcList_item *pItem = pWInfo->pTabList->a + p->iTab; + SrcItem *pItem = pWInfo->pTabList->a + p->iTab; Table *pTab = pItem->pTab; Bitmask mAll = (((Bitmask)1)<<(nb*4)) - 1; sqlite3DebugPrintf("%c%2d.%0*llx.%0*llx", p->cId, @@ -147521,7 +151185,7 @@ static int whereLoopResize(sqlite3 *db, WhereLoop *p, int n){ static int whereLoopXfer(sqlite3 *db, WhereLoop *pTo, WhereLoop *pFrom){ whereLoopClearUnion(db, pTo); if( whereLoopResize(db, pTo, pFrom->nLTerm) ){ - memset(&pTo->u, 0, sizeof(pTo->u)); + memset(pTo, 0, WHERE_LOOP_XFER_SZ); return SQLITE_NOMEM_BKPT; } memcpy(pTo, pFrom, WHERE_LOOP_XFER_SZ); @@ -147564,6 +151228,17 @@ static void whereInfoFree(sqlite3 *db, WhereInfo *pWInfo){ sqlite3DbFreeNN(db, pWInfo); } +/* Undo all Expr node modifications +*/ +static void whereUndoExprMods(WhereInfo *pWInfo){ + while( pWInfo->pExprMods ){ + WhereExprMod *p = pWInfo->pExprMods; + pWInfo->pExprMods = p->pNext; + memcpy(p->pExpr, &p->orig, sizeof(p->orig)); + sqlite3DbFree(pWInfo->pParse->db, p); + } +} + /* ** Return TRUE if all of the following are true: ** @@ -148028,7 +151703,7 @@ static int whereRangeVectorLen( */ static int whereLoopAddBtreeIndex( WhereLoopBuilder *pBuilder, /* The WhereLoop factory */ - struct SrcList_item *pSrc, /* FROM clause term being analyzed */ + SrcItem *pSrc, /* FROM clause term being analyzed */ Index *pProbe, /* An index on pSrc */ LogEst nInMul /* log(Number of iterations due to IN) */ ){ @@ -148054,9 +151729,9 @@ static int whereLoopAddBtreeIndex( pNew = pBuilder->pNew; if( db->mallocFailed ) return SQLITE_NOMEM_BKPT; - WHERETRACE(0x800, ("BEGIN %s.addBtreeIdx(%s), nEq=%d, nSkip=%d\n", + WHERETRACE(0x800, ("BEGIN %s.addBtreeIdx(%s), nEq=%d, nSkip=%d, rRun=%d\n", pProbe->pTable->zName,pProbe->zName, - pNew->u.btree.nEq, pNew->nSkip)); + pNew->u.btree.nEq, pNew->nSkip, pNew->rRun)); assert( (pNew->wsFlags & WHERE_VIRTUALTABLE)==0 ); assert( (pNew->wsFlags & WHERE_TOP_LIMIT)==0 ); @@ -148069,6 +151744,8 @@ static int whereLoopAddBtreeIndex( if( pProbe->bUnordered ) opMask &= ~(WO_GT|WO_GE|WO_LT|WO_LE); assert( pNew->u.btree.nEqnColumn ); + assert( pNew->u.btree.nEqnKeyCol + || pProbe->idxType!=SQLITE_IDXTYPE_PRIMARYKEY ); saved_nEq = pNew->u.btree.nEq; saved_nBtm = pNew->u.btree.nBtm; @@ -148150,8 +151827,8 @@ static int whereLoopAddBtreeIndex( /* "x IN (value, value, ...)" */ nIn = sqlite3LogEst(pExpr->x.pList->nExpr); } - if( pProbe->hasStat1 ){ - LogEst M, logK, safetyMargin; + if( pProbe->hasStat1 && rLogSize>=10 ){ + LogEst M, logK, x; /* Let: ** N = the total number of rows in the table ** K = the number of entries on the RHS of the IN operator @@ -148169,20 +151846,30 @@ static int whereLoopAddBtreeIndex( ** a safety margin of 2 (LogEst: 10) that favors using the IN operator ** with the index, as using an index has better worst-case behavior. ** If we do not have real sqlite_stat1 data, always prefer to use - ** the index. + ** the index. Do not bother with this optimization on very small + ** tables (less than 2 rows) as it is pointless in that case. */ M = pProbe->aiRowLogEst[saved_nEq]; logK = estLog(nIn); - safetyMargin = 10; /* TUNING: extra weight for indexed IN */ - if( M + logK + safetyMargin < nIn + rLogSize ){ + /* TUNING v----- 10 to bias toward indexed IN */ + x = M + logK + 10 - (nIn + rLogSize); + if( x>=0 ){ WHERETRACE(0x40, - ("Scan preferred over IN operator on column %d of \"%s\" (%d<%d)\n", - saved_nEq, pProbe->zName, M+logK+10, nIn+rLogSize)); - continue; + ("IN operator (N=%d M=%d logK=%d nIn=%d rLogSize=%d x=%d) " + "prefers indexed lookup\n", + saved_nEq, M, logK, nIn, rLogSize, x)); + }else if( nInMul<2 && OptimizationEnabled(db, SQLITE_SeekScan) ){ + WHERETRACE(0x40, + ("IN operator (N=%d M=%d logK=%d nIn=%d rLogSize=%d x=%d" + " nInMul=%d) prefers skip-scan\n", + saved_nEq, M, logK, nIn, rLogSize, x, nInMul)); + pNew->wsFlags |= WHERE_IN_SEEKSCAN; }else{ WHERETRACE(0x40, - ("IN operator preferred on column %d of \"%s\" (%d>=%d)\n", - saved_nEq, pProbe->zName, M+logK+10, nIn+rLogSize)); + ("IN operator (N=%d M=%d logK=%d nIn=%d rLogSize=%d x=%d" + " nInMul=%d) prefers normal scan\n", + saved_nEq, M, logK, nIn, rLogSize, x, nInMul)); + continue; } } pNew->wsFlags |= WHERE_COLUMN_IN; @@ -148201,6 +151888,7 @@ static int whereLoopAddBtreeIndex( pNew->wsFlags |= WHERE_UNQ_WANTED; } } + if( scan.iEquiv>1 ) pNew->wsFlags |= WHERE_TRANSCONS; }else if( eOp & WO_ISNULL ){ pNew->wsFlags |= WHERE_COLUMN_NULL; }else if( eOp & (WO_GT|WO_GE) ){ @@ -148213,7 +151901,7 @@ static int whereLoopAddBtreeIndex( pBtm = pTerm; pTop = 0; if( pTerm->wtFlags & TERM_LIKEOPT ){ - /* Range contraints that come from the LIKE optimization are + /* Range constraints that come from the LIKE optimization are ** always used in pairs. */ pTop = &pTerm[1]; assert( (pTop-(pTerm->pWC->a))pWC->nTerm ); @@ -148262,7 +151950,7 @@ static int whereLoopAddBtreeIndex( tRowcnt nOut = 0; if( nInMul==0 && pProbe->nSample - && pNew->u.btree.nEq<=pProbe->nSampleCol + && ALWAYS(pNew->u.btree.nEq<=pProbe->nSampleCol) && ((eOp & WO_IN)==0 || !ExprHasProperty(pTerm->pExpr, EP_xIsSelect)) && OptimizationEnabled(db, SQLITE_Stat4) ){ @@ -148344,6 +152032,8 @@ static int whereLoopAddBtreeIndex( if( (pNew->wsFlags & WHERE_TOP_LIMIT)==0 && pNew->u.btree.nEqnColumn + && (pNew->u.btree.nEqnKeyCol || + pProbe->idxType!=SQLITE_IDXTYPE_PRIMARYKEY) ){ whereLoopAddBtreeIndex(pBuilder, pSrc, pProbe, nInMul+nIn); } @@ -148424,6 +152114,7 @@ static int indexMightHelpWithOrderBy( if( (pOB = pBuilder->pWInfo->pOrderBy)==0 ) return 0; for(ii=0; iinExpr; ii++){ Expr *pExpr = sqlite3ExprSkipCollateAndLikely(pOB->a[ii].pExpr); + if( NEVER(pExpr==0) ) continue; if( pExpr->op==TK_COLUMN && pExpr->iTable==iCursor ){ if( pExpr->iColumn<0 ) return 1; for(jj=0; jjnKeyCol; jj++){ @@ -148464,6 +152155,7 @@ static int whereUsablePartialIndex( if( (!ExprHasProperty(pExpr, EP_FromJoin) || pExpr->iRightJoinTable==iTab) && (isLeft==0 || ExprHasProperty(pExpr, EP_FromJoin)) && sqlite3ExprImpliesExpr(pParse, pExpr, pWhere, iTab) + && (pTerm->wtFlags & TERM_VNULL)==0 ){ return 1; } @@ -148517,7 +152209,7 @@ static int whereLoopAddBtree( LogEst aiRowEstPk[2]; /* The aiRowLogEst[] value for the sPk index */ i16 aiColumnPk = -1; /* The aColumn[] value for the sPk index */ SrcList *pTabList; /* The FROM clause */ - struct SrcList_item *pSrc; /* The FROM clause btree term to add */ + SrcItem *pSrc; /* The FROM clause btree term to add */ WhereLoop *pNew; /* Template WhereLoop object */ int rc = SQLITE_OK; /* Return code */ int iSortIdx = 1; /* Index number */ @@ -148535,9 +152227,9 @@ static int whereLoopAddBtree( pWC = pBuilder->pWC; assert( !IsVirtual(pSrc->pTab) ); - if( pSrc->pIBIndex ){ + if( pSrc->fg.isIndexedBy ){ /* An INDEXED BY clause specifies a particular index to use */ - pProbe = pSrc->pIBIndex; + pProbe = pSrc->u2.pIBIndex; }else if( !HasRowid(pTab) ){ pProbe = pTab->pIndex; }else{ @@ -148573,7 +152265,7 @@ static int whereLoopAddBtree( if( !pBuilder->pOrSet /* Not part of an OR optimization */ && (pWInfo->wctrlFlags & WHERE_OR_SUBCLAUSE)==0 && (pWInfo->pParse->db->flags & SQLITE_AutoIndex)!=0 - && pSrc->pIBIndex==0 /* Has no INDEXED BY clause */ + && !pSrc->fg.isIndexedBy /* Has no INDEXED BY clause */ && !pSrc->fg.notIndexed /* Has no NOT INDEXED clause */ && HasRowid(pTab) /* Not WITHOUT ROWID table. (FIXME: Why not?) */ && !pSrc->fg.isCorrelated /* Not a correlated subquery */ @@ -148623,7 +152315,7 @@ static int whereLoopAddBtree( /* Loop over all indices. If there was an INDEXED BY clause, then only ** consider index pProbe. */ for(; rc==SQLITE_OK && pProbe; - pProbe=(pSrc->pIBIndex ? 0 : pProbe->pNext), iSortIdx++ + pProbe=(pSrc->fg.isIndexedBy ? 0 : pProbe->pNext), iSortIdx++ ){ int isLeft = (pSrc->fg.jointype & JT_OUTER)!=0; if( pProbe->pPartIdxWhere!=0 @@ -148655,8 +152347,23 @@ static int whereLoopAddBtree( /* Full table scan */ pNew->iSortIdx = b ? iSortIdx : 0; - /* TUNING: Cost of full table scan is (N*3.0). */ + /* TUNING: Cost of full table scan is 3.0*N. The 3.0 factor is an + ** extra cost designed to discourage the use of full table scans, + ** since index lookups have better worst-case performance if our + ** stat guesses are wrong. Reduce the 3.0 penalty slightly + ** (to 2.75) if we have valid STAT4 information for the table. + ** At 2.75, a full table scan is preferred over using an index on + ** a column with just two distinct values where each value has about + ** an equal number of appearances. Without STAT4 data, we still want + ** to use an index in that case, since the constraint might be for + ** the scarcer of the two values, and in that case an index lookup is + ** better. + */ +#ifdef SQLITE_ENABLE_STAT4 + pNew->rRun = rSize + 16 - 2*((pTab->tabFlags & TF_HasStat4)!=0); +#else pNew->rRun = rSize + 16; +#endif ApplyCostMultiplier(pNew->rRun, pTab->costMult); whereLoopOutputAdjust(pWC, pNew, rSize); rc = whereLoopInsert(pBuilder, pNew); @@ -148783,7 +152490,7 @@ static int whereLoopAddVirtualOne( int rc = SQLITE_OK; WhereLoop *pNew = pBuilder->pNew; Parse *pParse = pBuilder->pWInfo->pParse; - struct SrcList_item *pSrc = &pBuilder->pWInfo->pTabList->a[pNew->iTab]; + SrcItem *pSrc = &pBuilder->pWInfo->pTabList->a[pNew->iTab]; int nConstraint = pIdxInfo->nConstraint; assert( (mUsable & mPrereq)==mPrereq ); @@ -148975,7 +152682,7 @@ static int whereLoopAddVirtual( WhereInfo *pWInfo; /* WHERE analysis context */ Parse *pParse; /* The parsing context */ WhereClause *pWC; /* The WHERE clause */ - struct SrcList_item *pSrc; /* The FROM clause term to search */ + SrcItem *pSrc; /* The FROM clause term to search */ sqlite3_index_info *p; /* Object to pass to xBestIndex() */ int nConstraint; /* Number of constraints in p */ int bIn; /* True if plan uses IN(...) operator */ @@ -149103,7 +152810,7 @@ static int whereLoopAddOr( WhereClause tempWC; WhereLoopBuilder sSubBuild; WhereOrSet sSum, sCur; - struct SrcList_item *pItem; + SrcItem *pItem; pWC = pBuilder->pWC; pWCEnd = pWC->a + pWC->nTerm; @@ -149159,7 +152866,9 @@ static int whereLoopAddOr( if( rc==SQLITE_OK ){ rc = whereLoopAddOr(&sSubBuild, mPrereq, mUnusable); } - assert( rc==SQLITE_OK || rc==SQLITE_DONE || sCur.n==0 ); + assert( rc==SQLITE_OK || rc==SQLITE_DONE || sCur.n==0 + || rc==SQLITE_NOMEM ); + testcase( rc==SQLITE_NOMEM && sCur.n>0 ); testcase( rc==SQLITE_DONE ); if( sCur.n==0 ){ sSum.n = 0; @@ -149219,8 +152928,8 @@ static int whereLoopAddAll(WhereLoopBuilder *pBuilder){ Bitmask mPrior = 0; int iTab; SrcList *pTabList = pWInfo->pTabList; - struct SrcList_item *pItem; - struct SrcList_item *pEnd = &pTabList->a[pWInfo->nLevel]; + SrcItem *pItem; + SrcItem *pEnd = &pTabList->a[pWInfo->nLevel]; sqlite3 *db = pWInfo->pParse->db; int rc = SQLITE_OK; WhereLoop *pNew; @@ -149243,7 +152952,7 @@ static int whereLoopAddAll(WhereLoopBuilder *pBuilder){ } #ifndef SQLITE_OMIT_VIRTUALTABLE if( IsVirtual(pItem->pTab) ){ - struct SrcList_item *p; + SrcItem *p; for(p=&pItem[1]; pfg.jointype & (JT_LEFT|JT_CROSS)) ){ mUnusable |= sqlite3WhereGetMask(&pWInfo->sMaskSet, p->iCursor); @@ -149387,7 +153096,8 @@ static i8 wherePathSatisfiesOrderBy( for(i=0; ia[i].pExpr); - if( pOBExpr->op!=TK_COLUMN ) continue; + if( NEVER(pOBExpr==0) ) continue; + if( pOBExpr->op!=TK_COLUMN && pOBExpr->op!=TK_AGG_COLUMN ) continue; if( pOBExpr->iTable!=iCur ) continue; pTerm = sqlite3WhereFindTerm(&pWInfo->sWC, iCur, pOBExpr->iColumn, ~ready, eqOpMask, 0); @@ -149427,6 +153137,10 @@ static i8 wherePathSatisfiesOrderBy( assert( nColumn==nKeyCol+1 || !HasRowid(pIndex->pTable) ); assert( pIndex->aiColumn[nColumn-1]==XN_ROWID || !HasRowid(pIndex->pTable)); + /* All relevant terms of the index must also be non-NULL in order + ** for isOrderDistinct to be true. So the isOrderDistint value + ** computed here might be a false positive. Corrections will be + ** made at tag-20210426-1 below */ isOrderDistinct = IsUniqueIndex(pIndex) && (pLoop->wsFlags & WHERE_SKIPSCAN)==0; } @@ -149494,14 +153208,18 @@ static i8 wherePathSatisfiesOrderBy( } /* An unconstrained column that might be NULL means that this - ** WhereLoop is not well-ordered + ** WhereLoop is not well-ordered. tag-20210426-1 */ - if( isOrderDistinct - && iColumn>=0 - && j>=pLoop->u.btree.nEq - && pIndex->pTable->aCol[iColumn].notNull==0 - ){ - isOrderDistinct = 0; + if( isOrderDistinct ){ + if( iColumn>=0 + && j>=pLoop->u.btree.nEq + && pIndex->pTable->aCol[iColumn].notNull==0 + ){ + isOrderDistinct = 0; + } + if( iColumn==XN_EXPR ){ + isOrderDistinct = 0; + } } /* Find the ORDER BY term that corresponds to the j-th column @@ -149513,9 +153231,10 @@ static i8 wherePathSatisfiesOrderBy( pOBExpr = sqlite3ExprSkipCollateAndLikely(pOrderBy->a[i].pExpr); testcase( wctrlFlags & WHERE_GROUPBY ); testcase( wctrlFlags & WHERE_DISTINCTBY ); + if( NEVER(pOBExpr==0) ) continue; if( (wctrlFlags & (WHERE_GROUPBY|WHERE_DISTINCTBY))==0 ) bOnce = 0; if( iColumn>=XN_ROWID ){ - if( pOBExpr->op!=TK_COLUMN ) continue; + if( pOBExpr->op!=TK_COLUMN && pOBExpr->op!=TK_AGG_COLUMN ) continue; if( pOBExpr->iTable!=iCur ) continue; if( pOBExpr->iColumn!=iColumn ) continue; }else{ @@ -149667,16 +153386,24 @@ static LogEst whereSortingCost( ** cost = (3.0 * N * log(N)) * (Y/X) ** ** The (Y/X) term is implemented using stack variable rScale - ** below. */ + ** below. + */ LogEst rScale, rSortCost; assert( nOrderBy>0 && 66==sqlite3LogEst(100) ); rScale = sqlite3LogEst((nOrderBy-nSorted)*100/nOrderBy) - 66; rSortCost = nRow + rScale + 16; /* Multiple by log(M) where M is the number of output rows. - ** Use the LIMIT for M if it is smaller */ + ** Use the LIMIT for M if it is smaller. Or if this sort is for + ** a DISTINCT operator, M will be the number of distinct output + ** rows, so fudge it downwards a bit. + */ if( (pWInfo->wctrlFlags & WHERE_USE_LIMIT)!=0 && pWInfo->iLimitiLimit; + }else if( (pWInfo->wctrlFlags & WHERE_WANT_DISTINCT) ){ + /* TUNING: In the sort for a DISTINCT operator, assume that the DISTINCT + ** reduces the number of output rows by a factor of 2 */ + if( nRow>10 ){ nRow -= 10; assert( 10==sqlite3LogEst(2) ); } } rSortCost += estLog(nRow); return rSortCost; @@ -150088,7 +153815,7 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ */ static int whereShortCut(WhereLoopBuilder *pBuilder){ WhereInfo *pWInfo; - struct SrcList_item *pItem; + SrcItem *pItem; WhereClause *pWC; WhereTerm *pTerm; WhereLoop *pLoop; @@ -150547,7 +154274,7 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( if( pWInfo->pOrderBy==0 && (db->flags & SQLITE_ReverseOrder)!=0 ){ pWInfo->revMask = ALLBITS; } - if( pParse->nErr || NEVER(db->mallocFailed) ){ + if( pParse->nErr || db->mallocFailed ){ goto whereBeginError; } #ifdef WHERETRACE_ENABLED @@ -150608,7 +154335,8 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( */ notReady = ~(Bitmask)0; if( pWInfo->nLevel>=2 - && pResultSet!=0 /* guarantees condition (1) above */ + && pResultSet!=0 /* these two combine to guarantee */ + && 0==(wctrlFlags & WHERE_AGG_DISTINCT) /* condition (1) above */ && OptimizationEnabled(db, SQLITE_OmitNoopJoin) ){ int i; @@ -150618,7 +154346,7 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( } for(i=pWInfo->nLevel-1; i>=1; i--){ WhereTerm *pTerm, *pEnd; - struct SrcList_item *pItem; + SrcItem *pItem; pLoop = pWInfo->a[i].pWLoop; pItem = &pWInfo->pTabList->a[pLoop->iTab]; if( (pItem->fg.jointype & JT_LEFT)==0 ) continue; @@ -150708,7 +154436,7 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( for(ii=0, pLevel=pWInfo->a; iia[pLevel->iFrom]; pTab = pTabItem->pTab; @@ -150803,6 +154531,7 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( if( (pLoop->wsFlags & WHERE_CONSTRAINT)!=0 && (pLoop->wsFlags & (WHERE_COLUMN_RANGE|WHERE_SKIPSCAN))==0 && (pLoop->wsFlags & WHERE_BIGNULL_SORT)==0 + && (pLoop->wsFlags & WHERE_IN_SEEKSCAN)==0 && (pWInfo->wctrlFlags&WHERE_ORDERBY_MIN)==0 && pWInfo->eDistinct!=WHERE_DISTINCT_ORDERED ){ @@ -150860,11 +154589,14 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( /* Done. */ VdbeModuleComment((v, "Begin WHERE-core")); + pWInfo->iEndWhere = sqlite3VdbeCurrentAddr(v); return pWInfo; /* Jump here if malloc fails */ whereBeginError: if( pWInfo ){ + testcase( pWInfo->pExprMods!=0 ); + whereUndoExprMods(pWInfo); pParse->nQueryLoop = pWInfo->savedNQueryLoop; whereInfoFree(db, pWInfo); } @@ -150903,6 +154635,7 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){ WhereLoop *pLoop; SrcList *pTabList = pWInfo->pTabList; sqlite3 *db = pParse->db; + int iEnd = sqlite3VdbeCurrentAddr(v); /* Generate loop termination code. */ @@ -150960,10 +154693,14 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){ int j; sqlite3VdbeResolveLabel(v, pLevel->addrNxt); for(j=pLevel->u.in.nIn, pIn=&pLevel->u.in.aInLoop[j-1]; j>0; j--, pIn--){ + assert( sqlite3VdbeGetOp(v, pIn->addrInTop+1)->opcode==OP_IsNull + || pParse->db->mallocFailed ); sqlite3VdbeJumpHere(v, pIn->addrInTop+1); if( pIn->eEndLoopOp!=OP_Noop ){ if( pIn->nPrefix ){ - assert( pLoop->wsFlags & WHERE_IN_EARLYOUT ); + int bEarlyOut = + (pLoop->wsFlags & WHERE_VIRTUALTABLE)==0 + && (pLoop->wsFlags & WHERE_IN_EARLYOUT)!=0; if( pLevel->iLeftJoin ){ /* For LEFT JOIN queries, cursor pIn->iCur may not have been ** opened yet. This occurs for WHERE clauses such as @@ -150974,16 +154711,19 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){ ** jump over the OP_Next or OP_Prev instruction about to ** be coded. */ sqlite3VdbeAddOp2(v, OP_IfNotOpen, pIn->iCur, - sqlite3VdbeCurrentAddr(v) + 2 + - ((pLoop->wsFlags & WHERE_VIRTUALTABLE)==0) - ); + sqlite3VdbeCurrentAddr(v) + 2 + bEarlyOut); VdbeCoverage(v); } - if( (pLoop->wsFlags & WHERE_VIRTUALTABLE)==0 ){ + if( bEarlyOut ){ sqlite3VdbeAddOp4Int(v, OP_IfNoHope, pLevel->iIdxCur, sqlite3VdbeCurrentAddr(v)+2, pIn->iBase, pIn->nPrefix); VdbeCoverage(v); + /* Retarget the OP_IsNull against the left operand of IN so + ** it jumps past the OP_IfNoHope. This is because the + ** OP_IsNull also bypasses the OP_Affinity opcode that is + ** required by OP_IfNoHope. */ + sqlite3VdbeJumpHere(v, pIn->addrInTop+1); } } sqlite3VdbeAddOp2(v, pIn->eEndLoopOp, pIn->iCur, pIn->addrInTop); @@ -151040,9 +154780,9 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){ assert( pWInfo->nLevel<=pTabList->nSrc ); for(i=0, pLevel=pWInfo->a; inLevel; i++, pLevel++){ int k, last; - VdbeOp *pOp; + VdbeOp *pOp, *pLastOp; Index *pIdx = 0; - struct SrcList_item *pTabItem = &pTabList->a[pLevel->iFrom]; + SrcItem *pTabItem = &pTabList->a[pLevel->iFrom]; Table *pTab = pTabItem->pTab; assert( pTab!=0 ); pLoop = pLevel->pWLoop; @@ -151098,20 +154838,31 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){ pIdx = pLevel->u.pCovidx; } if( pIdx - && (pWInfo->eOnePass==ONEPASS_OFF || !HasRowid(pIdx->pTable)) && !db->mallocFailed ){ - last = sqlite3VdbeCurrentAddr(v); - k = pLevel->addrBody; + if( pWInfo->eOnePass==ONEPASS_OFF || !HasRowid(pIdx->pTable) ){ + last = iEnd; + }else{ + last = pWInfo->iEndWhere; + } + k = pLevel->addrBody + 1; #ifdef SQLITE_DEBUG if( db->flags & SQLITE_VdbeAddopTrace ){ printf("TRANSLATE opcodes in range %d..%d\n", k, last-1); } + /* Proof that the "+1" on the k value above is safe */ + pOp = sqlite3VdbeGetOp(v, k - 1); + assert( pOp->opcode!=OP_Column || pOp->p1!=pLevel->iTabCur ); + assert( pOp->opcode!=OP_Rowid || pOp->p1!=pLevel->iTabCur ); + assert( pOp->opcode!=OP_IfNullRow || pOp->p1!=pLevel->iTabCur ); #endif pOp = sqlite3VdbeGetOp(v, k); - for(; kp1!=pLevel->iTabCur ) continue; - if( pOp->opcode==OP_Column + pLastOp = pOp + (last - k); + assert( pOp<=pLastOp ); + do{ + if( pOp->p1!=pLevel->iTabCur ){ + /* no-op */ + }else if( pOp->opcode==OP_Column #ifdef SQLITE_ENABLE_OFFSET_SQL_FUNC || pOp->opcode==OP_Offset #endif @@ -151142,23 +154893,19 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){ pOp->p1 = pLevel->iIdxCur; OpcodeRewriteTrace(db, k, pOp); } - } +#ifdef SQLITE_DEBUG + k++; +#endif + }while( (++pOp)flags & SQLITE_VdbeAddopTrace ) printf("TRANSLATE complete\n"); #endif } } - /* Undo all Expr node modifications */ - while( pWInfo->pExprMods ){ - WhereExprMod *p = pWInfo->pExprMods; - pWInfo->pExprMods = p->pNext; - memcpy(p->pExpr, &p->orig, sizeof(p->orig)); - sqlite3DbFree(db, p); - } - /* Final cleanup */ + if( pWInfo->pExprMods ) whereUndoExprMods(pWInfo); pParse->nQueryLoop = pWInfo->savedNQueryLoop; whereInfoFree(db, pWInfo); return; @@ -151956,6 +155703,7 @@ static int selectWindowRewriteExprCb(Walker *pWalker, Expr *pExpr){ case TK_AGG_FUNCTION: case TK_COLUMN: { int iCol = -1; + if( pParse->db->mallocFailed ) return WRC_Abort; if( p->pSub ){ int i; for(i=0; ipSub->nExpr; i++){ @@ -152065,9 +155813,14 @@ static ExprList *exprListAppendList( int i; int nInit = pList ? pList->nExpr : 0; for(i=0; inExpr; i++){ - Expr *pDup = sqlite3ExprDup(pParse->db, pAppend->a[i].pExpr, 0); + sqlite3 *db = pParse->db; + Expr *pDup = sqlite3ExprDup(db, pAppend->a[i].pExpr, 0); assert( pDup==0 || !ExprHasProperty(pDup, EP_MemToken) ); - if( bIntToNull && pDup ){ + if( db->mallocFailed ){ + sqlite3ExprDelete(db, pDup); + break; + } + if( bIntToNull ){ int iDummy; Expr *pSub; for(pSub=pDup; ExprHasProperty(pSub, EP_Skip); pSub=pSub->pLeft){ @@ -152103,6 +155856,14 @@ static int sqlite3WindowExtraAggFuncDepth(Walker *pWalker, Expr *pExpr){ return WRC_Continue; } +static int disallowAggregatesInOrderByCb(Walker *pWalker, Expr *pExpr){ + if( pExpr->op==TK_AGG_FUNCTION && pExpr->pAggInfo==0 ){ + sqlite3ErrorMsg(pWalker->pParse, + "misuse of aggregate: %s()", pExpr->u.zToken); + } + return WRC_Continue; +} + /* ** If the SELECT statement passed as the second argument does not invoke ** any SQL window functions, this function is a no-op. Otherwise, it @@ -152112,7 +155873,7 @@ static int sqlite3WindowExtraAggFuncDepth(Walker *pWalker, Expr *pExpr){ */ SQLITE_PRIVATE int sqlite3WindowRewrite(Parse *pParse, Select *p){ int rc = SQLITE_OK; - if( p->pWin && p->pPrior==0 && (p->selFlags & SF_WinRewrite)==0 ){ + if( p->pWin && p->pPrior==0 && ALWAYS((p->selFlags & SF_WinRewrite)==0) ){ Vdbe *v = sqlite3GetVdbe(pParse); sqlite3 *db = pParse->db; Select *pSub = 0; /* The subquery */ @@ -152136,6 +155897,11 @@ SQLITE_PRIVATE int sqlite3WindowRewrite(Parse *pParse, Select *p){ } sqlite3AggInfoPersistWalkerInit(&w, pParse); sqlite3WalkSelect(&w, p); + if( (p->selFlags & SF_Aggregate)==0 ){ + w.xExprCallback = disallowAggregatesInOrderByCb; + w.xSelectCallback = 0; + sqlite3WalkExprList(&w, p->pOrderBy); + } p->pSrc = 0; p->pWhere = 0; @@ -152472,15 +156238,19 @@ SQLITE_PRIVATE void sqlite3WindowAttach(Parse *pParse, Expr *p, Window *pWin){ ** SELECT, or (b) the windows already linked use a compatible window frame. */ SQLITE_PRIVATE void sqlite3WindowLink(Select *pSel, Window *pWin){ - if( pSel!=0 - && (0==pSel->pWin || 0==sqlite3WindowCompare(0, pSel->pWin, pWin, 0)) - ){ - pWin->pNextWin = pSel->pWin; - if( pSel->pWin ){ - pSel->pWin->ppThis = &pWin->pNextWin; + if( pSel ){ + if( 0==pSel->pWin || 0==sqlite3WindowCompare(0, pSel->pWin, pWin, 0) ){ + pWin->pNextWin = pSel->pWin; + if( pSel->pWin ){ + pSel->pWin->ppThis = &pWin->pNextWin; + } + pSel->pWin = pWin; + pWin->ppThis = &pSel->pWin; + }else{ + if( sqlite3ExprListCompare(pWin->pPartition, pSel->pWin->pPartition,-1) ){ + pSel->selFlags |= SF_MultiPart; + } } - pSel->pWin = pWin; - pWin->ppThis = &pSel->pWin; } } @@ -152633,6 +156403,7 @@ static void windowCheckValue(Parse *pParse, int reg, int eCond){ VdbeCoverageIf(v, eCond==2); } sqlite3VdbeAddOp3(v, aOp[eCond], regZero, sqlite3VdbeCurrentAddr(v)+2, reg); + sqlite3VdbeChangeP5(v, SQLITE_AFF_NUMERIC); VdbeCoverageNeverNullIf(v, eCond==0); /* NULL case captured by */ VdbeCoverageNeverNullIf(v, eCond==1); /* the OP_MustBeInt */ VdbeCoverageNeverNullIf(v, eCond==2); @@ -152727,6 +156498,7 @@ struct WindowCodeArg { int regGosub; /* Register used with OP_Gosub(addrGosub) */ int regArg; /* First in array of accumulator registers */ int eDelete; /* See above */ + int regRowid; WindowCsrAndReg start; WindowCsrAndReg current; @@ -152843,15 +156615,15 @@ static void windowAggStep( } if( pWin->bExprArgs ){ - int iStart = sqlite3VdbeCurrentAddr(v); - VdbeOp *pOp, *pEnd; + int iOp = sqlite3VdbeCurrentAddr(v); + int iEnd; nArg = pWin->pOwner->x.pList->nExpr; regArg = sqlite3GetTempRange(pParse, nArg); sqlite3ExprCodeExprList(pParse, pWin->pOwner->x.pList, regArg, 0, 0); - pEnd = sqlite3VdbeGetOp(v, -1); - for(pOp=sqlite3VdbeGetOp(v, iStart); pOp<=pEnd; pOp++){ + for(iEnd=sqlite3VdbeCurrentAddr(v); iOpopcode==OP_Column && pOp->p1==pWin->iEphCsr ){ pOp->p1 = csr; } @@ -153210,7 +156982,7 @@ static void windowIfNewPeer( ** if( csr1.peerVal - regVal <= csr2.peerVal ) goto lbl; ** ** A special type of arithmetic is used such that if csr1.peerVal is not -** a numeric type (real or integer), then the result of the addition addition +** a numeric type (real or integer), then the result of the addition ** or subtraction is a a copy of csr1.peerVal. */ static void windowCodeRangeTest( @@ -153229,6 +157001,12 @@ static void windowCodeRangeTest( int regString = ++pParse->nMem; /* Reg. for constant value '' */ int arith = OP_Add; /* OP_Add or OP_Subtract */ int addrGe; /* Jump destination */ + int addrDone = sqlite3VdbeMakeLabel(pParse); /* Address past OP_Ge */ + CollSeq *pColl; + + /* Read the peer-value from each cursor into a register */ + windowReadPeerValues(p, csr1, reg1); + windowReadPeerValues(p, csr2, reg2); assert( op==OP_Ge || op==OP_Gt || op==OP_Le ); assert( pOrderBy && pOrderBy->nExpr==1 ); @@ -153241,34 +157019,11 @@ static void windowCodeRangeTest( arith = OP_Subtract; } - /* Read the peer-value from each cursor into a register */ - windowReadPeerValues(p, csr1, reg1); - windowReadPeerValues(p, csr2, reg2); - VdbeModuleComment((v, "CodeRangeTest: if( R%d %s R%d %s R%d ) goto lbl", reg1, (arith==OP_Add ? "+" : "-"), regVal, ((op==OP_Ge) ? ">=" : (op==OP_Le) ? "<=" : (op==OP_Gt) ? ">" : "<"), reg2 )); - /* Register reg1 currently contains csr1.peerVal (the peer-value from csr1). - ** This block adds (or subtracts for DESC) the numeric value in regVal - ** from it. Or, if reg1 is not numeric (it is a NULL, a text value or a blob), - ** then leave reg1 as it is. In pseudo-code, this is implemented as: - ** - ** if( reg1>='' ) goto addrGe; - ** reg1 = reg1 +/- regVal - ** addrGe: - ** - ** Since all strings and blobs are greater-than-or-equal-to an empty string, - ** the add/subtract is skipped for these, as required. If reg1 is a NULL, - ** then the arithmetic is performed, but since adding or subtracting from - ** NULL is always NULL anyway, this case is handled as required too. */ - sqlite3VdbeAddOp4(v, OP_String8, 0, regString, 0, "", P4_STATIC); - addrGe = sqlite3VdbeAddOp3(v, OP_Ge, regString, 0, reg1); - VdbeCoverage(v); - sqlite3VdbeAddOp3(v, arith, regVal, reg1, reg1); - sqlite3VdbeJumpHere(v, addrGe); - /* If the BIGNULL flag is set for the ORDER BY, then it is required to ** consider NULL values to be larger than all other values, instead of ** the usual smaller. The VDBE opcodes OP_Ge and so on do not handle this @@ -153305,21 +157060,46 @@ static void windowCodeRangeTest( break; default: assert( op==OP_Lt ); /* no-op */ break; } - sqlite3VdbeAddOp2(v, OP_Goto, 0, sqlite3VdbeCurrentAddr(v)+3); + sqlite3VdbeAddOp2(v, OP_Goto, 0, addrDone); /* This block runs if reg1 is not NULL, but reg2 is. */ sqlite3VdbeJumpHere(v, addr); sqlite3VdbeAddOp2(v, OP_IsNull, reg2, lbl); VdbeCoverage(v); if( op==OP_Gt || op==OP_Ge ){ - sqlite3VdbeChangeP2(v, -1, sqlite3VdbeCurrentAddr(v)+1); + sqlite3VdbeChangeP2(v, -1, addrDone); } } + /* Register reg1 currently contains csr1.peerVal (the peer-value from csr1). + ** This block adds (or subtracts for DESC) the numeric value in regVal + ** from it. Or, if reg1 is not numeric (it is a NULL, a text value or a blob), + ** then leave reg1 as it is. In pseudo-code, this is implemented as: + ** + ** if( reg1>='' ) goto addrGe; + ** reg1 = reg1 +/- regVal + ** addrGe: + ** + ** Since all strings and blobs are greater-than-or-equal-to an empty string, + ** the add/subtract is skipped for these, as required. If reg1 is a NULL, + ** then the arithmetic is performed, but since adding or subtracting from + ** NULL is always NULL anyway, this case is handled as required too. */ + sqlite3VdbeAddOp4(v, OP_String8, 0, regString, 0, "", P4_STATIC); + addrGe = sqlite3VdbeAddOp3(v, OP_Ge, regString, 0, reg1); + VdbeCoverage(v); + if( (op==OP_Ge && arith==OP_Add) || (op==OP_Le && arith==OP_Subtract) ){ + sqlite3VdbeAddOp3(v, op, reg2, lbl, reg1); VdbeCoverage(v); + } + sqlite3VdbeAddOp3(v, arith, regVal, reg1, reg1); + sqlite3VdbeJumpHere(v, addrGe); + /* Compare registers reg2 and reg1, taking the jump if required. Note that ** control skips over this test if the BIGNULL flag is set and either ** reg1 or reg2 contain a NULL value. */ sqlite3VdbeAddOp3(v, op, reg2, lbl, reg1); VdbeCoverage(v); + pColl = sqlite3ExprNNCollSeq(pParse, pOrderBy->a[0].pExpr); + sqlite3VdbeAppendP4(v, (void*)pColl, P4_COLLSEQ); sqlite3VdbeChangeP5(v, SQLITE_NULLEQ); + sqlite3VdbeResolveLabel(v, addrDone); assert( op==OP_Ge || op==OP_Gt || op==OP_Lt || op==OP_Le ); testcase(op==OP_Ge); VdbeCoverageIf(v, op==OP_Ge); @@ -153395,16 +157175,24 @@ static int windowCodeOp( /* If this is a (RANGE BETWEEN a FOLLOWING AND b FOLLOWING) or ** (RANGE BETWEEN b PRECEDING AND a PRECEDING) frame, ensure the ** start cursor does not advance past the end cursor within the - ** temporary table. It otherwise might, if (a>b). */ + ** temporary table. It otherwise might, if (a>b). Also ensure that, + ** if the input cursor is still finding new rows, that the end + ** cursor does not go past it to EOF. */ if( pMWin->eStart==pMWin->eEnd && regCountdown - && pMWin->eFrmType==TK_RANGE && op==WINDOW_AGGINVERSE + && pMWin->eFrmType==TK_RANGE ){ int regRowid1 = sqlite3GetTempReg(pParse); int regRowid2 = sqlite3GetTempReg(pParse); - sqlite3VdbeAddOp2(v, OP_Rowid, p->start.csr, regRowid1); - sqlite3VdbeAddOp2(v, OP_Rowid, p->end.csr, regRowid2); - sqlite3VdbeAddOp3(v, OP_Ge, regRowid2, lblDone, regRowid1); - VdbeCoverage(v); + if( op==WINDOW_AGGINVERSE ){ + sqlite3VdbeAddOp2(v, OP_Rowid, p->start.csr, regRowid1); + sqlite3VdbeAddOp2(v, OP_Rowid, p->end.csr, regRowid2); + sqlite3VdbeAddOp3(v, OP_Ge, regRowid2, lblDone, regRowid1); + VdbeCoverage(v); + }else if( p->regRowid ){ + sqlite3VdbeAddOp2(v, OP_Rowid, p->end.csr, regRowid1); + sqlite3VdbeAddOp3(v, OP_Ge, p->regRowid, lblDone, regRowid1); + VdbeCoverageNeverNull(v); + } sqlite3ReleaseTempReg(pParse, regRowid1); sqlite3ReleaseTempReg(pParse, regRowid2); assert( pMWin->eStart==TK_PRECEDING || pMWin->eStart==TK_FOLLOWING ); @@ -153901,7 +157689,6 @@ SQLITE_PRIVATE void sqlite3WindowCodeStep( int addrEmpty; /* Address of OP_Rewind in flush: */ int regNew; /* Array of registers holding new input row */ int regRecord; /* regNew array in record form */ - int regRowid; /* Rowid for regRecord in eph table */ int regNewPeer = 0; /* Peer values for new row (part of regNew) */ int regPeer = 0; /* Peer values for current row */ int regFlushPart = 0; /* Register for "Gosub flush_partition" */ @@ -153973,7 +157760,7 @@ SQLITE_PRIVATE void sqlite3WindowCodeStep( regNew = pParse->nMem+1; pParse->nMem += nInput; regRecord = ++pParse->nMem; - regRowid = ++pParse->nMem; + s.regRowid = ++pParse->nMem; /* If the window frame contains an " PRECEDING" or " FOLLOWING" ** clause, allocate registers to store the results of evaluating each @@ -154029,9 +157816,9 @@ SQLITE_PRIVATE void sqlite3WindowCodeStep( } /* Insert the new row into the ephemeral table */ - sqlite3VdbeAddOp2(v, OP_NewRowid, csrWrite, regRowid); - sqlite3VdbeAddOp3(v, OP_Insert, csrWrite, regRecord, regRowid); - addrNe = sqlite3VdbeAddOp3(v, OP_Ne, pMWin->regOne, 0, regRowid); + sqlite3VdbeAddOp2(v, OP_NewRowid, csrWrite, s.regRowid); + sqlite3VdbeAddOp3(v, OP_Insert, csrWrite, regRecord, s.regRowid); + addrNe = sqlite3VdbeAddOp3(v, OP_Ne, pMWin->regOne, 0, s.regRowid); VdbeCoverageNeverNull(v); /* This block is run for the first row of each partition */ @@ -154149,6 +157936,7 @@ SQLITE_PRIVATE void sqlite3WindowCodeStep( sqlite3VdbeJumpHere(v, addrGosubFlush); } + s.regRowid = 0; addrEmpty = sqlite3VdbeAddOp1(v, OP_Rewind, csrWrite); VdbeCoverage(v); if( pMWin->eEnd==TK_PRECEDING ){ @@ -154211,8 +157999,10 @@ SQLITE_PRIVATE void sqlite3WindowCodeStep( /************** End of window.c **********************************************/ /************** Begin file parse.c *******************************************/ +/* This file is automatically generated by Lemon from input grammar +** source file "parse.y". */ /* -** 2000-05-29 +** 2001-09-15 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: @@ -154222,22 +158012,15 @@ SQLITE_PRIVATE void sqlite3WindowCodeStep( ** May you share freely, never taking more than you give. ** ************************************************************************* -** Driver template for the LEMON parser generator. +** This file contains SQLite's SQL parser. ** -** The "lemon" program processes an LALR(1) input grammar file, then uses -** this template to construct a parser. The "lemon" program inserts text -** at each "%%" line. Also, any "P-a-r-s-e" identifer prefix (without the -** interstitial "-" characters) contained in this template is changed into -** the value of the %name directive from the grammar. Otherwise, the content -** of this template is copied straight through into the generate parser -** source file. -** -** The following is the concatenation of all %include directives from the -** input grammar file: +** The canonical source code to this file ("parse.y") is a Lemon grammar +** file that specifies the input grammar and actions to take while parsing. +** That input file is processed by Lemon to generate a C-language +** implementation of a parser for the given grammer. You might be reading +** this comment as part of the translated C-code. Edits should be made +** to the original parse.y sources. */ -/* #include */ -/* #include */ -/************ Begin %include sections from the grammar ************************/ /* #include "sqliteInt.h" */ @@ -154330,11 +158113,21 @@ static void updateDeleteLimitError( static void parserDoubleLinkSelect(Parse *pParse, Select *p){ assert( p!=0 ); if( p->pPrior ){ - Select *pNext = 0, *pLoop; - int mxSelect, cnt = 0; - for(pLoop=p; pLoop; pNext=pLoop, pLoop=pLoop->pPrior, cnt++){ + Select *pNext = 0, *pLoop = p; + int mxSelect, cnt = 1; + while(1){ pLoop->pNext = pNext; pLoop->selFlags |= SF_Compound; + pNext = pLoop; + pLoop = pLoop->pPrior; + if( pLoop==0 ) break; + cnt++; + if( pLoop->pOrderBy || pLoop->pLimit ){ + sqlite3ErrorMsg(pParse,"%s clause should come after %s not before", + pLoop->pOrderBy!=0 ? "ORDER BY" : "LIMIT", + sqlite3SelectOpName(pNext->op)); + break; + } } if( (p->selFlags & SF_MultiValue)==0 && (mxSelect = pParse->db->aLimit[SQLITE_LIMIT_COMPOUND_SELECT])>0 && @@ -154345,6 +158138,19 @@ static void updateDeleteLimitError( } } + /* Attach a With object describing the WITH clause to a Select + ** object describing the query for which the WITH clause is a prefix. + */ + static Select *attachWithToSelect(Parse *pParse, Select *pSelect, With *pWith){ + if( pSelect ){ + pSelect->pWith = pWith; + parserDoubleLinkSelect(pParse, pSelect); + }else{ + sqlite3WithDelete(pParse->db, pWith); + } + return pSelect; + } + /* Construct a new Expr object from a single identifier. Use the ** new Expr to populate pOut. Set the span of pOut to be the identifier @@ -154421,11 +158227,194 @@ static void updateDeleteLimitError( # error too many tokens in the grammar #endif /**************** End of %include directives **********************************/ -/* These constants specify the various numeric values for terminal symbols -** in a format understandable to "makeheaders". This section is blank unless -** "lemon" is run with the "-m" command-line option. -***************** Begin makeheaders token definitions *************************/ -/**************** End makeheaders token definitions ***************************/ +/* These constants specify the various numeric values for terminal symbols. +***************** Begin token definitions *************************************/ +#ifndef TK_SEMI +#define TK_SEMI 1 +#define TK_EXPLAIN 2 +#define TK_QUERY 3 +#define TK_PLAN 4 +#define TK_BEGIN 5 +#define TK_TRANSACTION 6 +#define TK_DEFERRED 7 +#define TK_IMMEDIATE 8 +#define TK_EXCLUSIVE 9 +#define TK_COMMIT 10 +#define TK_END 11 +#define TK_ROLLBACK 12 +#define TK_SAVEPOINT 13 +#define TK_RELEASE 14 +#define TK_TO 15 +#define TK_TABLE 16 +#define TK_CREATE 17 +#define TK_IF 18 +#define TK_NOT 19 +#define TK_EXISTS 20 +#define TK_TEMP 21 +#define TK_LP 22 +#define TK_RP 23 +#define TK_AS 24 +#define TK_WITHOUT 25 +#define TK_COMMA 26 +#define TK_ABORT 27 +#define TK_ACTION 28 +#define TK_AFTER 29 +#define TK_ANALYZE 30 +#define TK_ASC 31 +#define TK_ATTACH 32 +#define TK_BEFORE 33 +#define TK_BY 34 +#define TK_CASCADE 35 +#define TK_CAST 36 +#define TK_CONFLICT 37 +#define TK_DATABASE 38 +#define TK_DESC 39 +#define TK_DETACH 40 +#define TK_EACH 41 +#define TK_FAIL 42 +#define TK_OR 43 +#define TK_AND 44 +#define TK_IS 45 +#define TK_MATCH 46 +#define TK_LIKE_KW 47 +#define TK_BETWEEN 48 +#define TK_IN 49 +#define TK_ISNULL 50 +#define TK_NOTNULL 51 +#define TK_NE 52 +#define TK_EQ 53 +#define TK_GT 54 +#define TK_LE 55 +#define TK_LT 56 +#define TK_GE 57 +#define TK_ESCAPE 58 +#define TK_ID 59 +#define TK_COLUMNKW 60 +#define TK_DO 61 +#define TK_FOR 62 +#define TK_IGNORE 63 +#define TK_INITIALLY 64 +#define TK_INSTEAD 65 +#define TK_NO 66 +#define TK_KEY 67 +#define TK_OF 68 +#define TK_OFFSET 69 +#define TK_PRAGMA 70 +#define TK_RAISE 71 +#define TK_RECURSIVE 72 +#define TK_REPLACE 73 +#define TK_RESTRICT 74 +#define TK_ROW 75 +#define TK_ROWS 76 +#define TK_TRIGGER 77 +#define TK_VACUUM 78 +#define TK_VIEW 79 +#define TK_VIRTUAL 80 +#define TK_WITH 81 +#define TK_NULLS 82 +#define TK_FIRST 83 +#define TK_LAST 84 +#define TK_CURRENT 85 +#define TK_FOLLOWING 86 +#define TK_PARTITION 87 +#define TK_PRECEDING 88 +#define TK_RANGE 89 +#define TK_UNBOUNDED 90 +#define TK_EXCLUDE 91 +#define TK_GROUPS 92 +#define TK_OTHERS 93 +#define TK_TIES 94 +#define TK_GENERATED 95 +#define TK_ALWAYS 96 +#define TK_MATERIALIZED 97 +#define TK_REINDEX 98 +#define TK_RENAME 99 +#define TK_CTIME_KW 100 +#define TK_ANY 101 +#define TK_BITAND 102 +#define TK_BITOR 103 +#define TK_LSHIFT 104 +#define TK_RSHIFT 105 +#define TK_PLUS 106 +#define TK_MINUS 107 +#define TK_STAR 108 +#define TK_SLASH 109 +#define TK_REM 110 +#define TK_CONCAT 111 +#define TK_COLLATE 112 +#define TK_BITNOT 113 +#define TK_ON 114 +#define TK_INDEXED 115 +#define TK_STRING 116 +#define TK_JOIN_KW 117 +#define TK_CONSTRAINT 118 +#define TK_DEFAULT 119 +#define TK_NULL 120 +#define TK_PRIMARY 121 +#define TK_UNIQUE 122 +#define TK_CHECK 123 +#define TK_REFERENCES 124 +#define TK_AUTOINCR 125 +#define TK_INSERT 126 +#define TK_DELETE 127 +#define TK_UPDATE 128 +#define TK_SET 129 +#define TK_DEFERRABLE 130 +#define TK_FOREIGN 131 +#define TK_DROP 132 +#define TK_UNION 133 +#define TK_ALL 134 +#define TK_EXCEPT 135 +#define TK_INTERSECT 136 +#define TK_SELECT 137 +#define TK_VALUES 138 +#define TK_DISTINCT 139 +#define TK_DOT 140 +#define TK_FROM 141 +#define TK_JOIN 142 +#define TK_USING 143 +#define TK_ORDER 144 +#define TK_GROUP 145 +#define TK_HAVING 146 +#define TK_LIMIT 147 +#define TK_WHERE 148 +#define TK_RETURNING 149 +#define TK_INTO 150 +#define TK_NOTHING 151 +#define TK_FLOAT 152 +#define TK_BLOB 153 +#define TK_INTEGER 154 +#define TK_VARIABLE 155 +#define TK_CASE 156 +#define TK_WHEN 157 +#define TK_THEN 158 +#define TK_ELSE 159 +#define TK_INDEX 160 +#define TK_ALTER 161 +#define TK_ADD 162 +#define TK_WINDOW 163 +#define TK_OVER 164 +#define TK_FILTER 165 +#define TK_COLUMN 166 +#define TK_AGG_FUNCTION 167 +#define TK_AGG_COLUMN 168 +#define TK_TRUEFALSE 169 +#define TK_ISNOT 170 +#define TK_FUNCTION 171 +#define TK_UMINUS 172 +#define TK_UPLUS 173 +#define TK_TRUTH 174 +#define TK_REGISTER 175 +#define TK_VECTOR 176 +#define TK_SELECT_COLUMN 177 +#define TK_IF_NULL_ROW 178 +#define TK_ASTERISK 179 +#define TK_SPAN 180 +#define TK_ERROR 181 +#define TK_SPACE 182 +#define TK_ILLEGAL 183 +#endif +/**************** End token definitions ***************************************/ /* The next sections is a series of control #defines. ** various aspects of the generated parser. @@ -154483,28 +158472,29 @@ static void updateDeleteLimitError( #endif /************* Begin control #defines *****************************************/ #define YYCODETYPE unsigned short int -#define YYNOCODE 310 +#define YYNOCODE 317 #define YYACTIONTYPE unsigned short int -#define YYWILDCARD 100 +#define YYWILDCARD 101 #define sqlite3ParserTOKENTYPE Token typedef union { int yyinit; sqlite3ParserTOKENTYPE yy0; - SrcList* yy47; - u8 yy58; - struct FrameBound yy77; - With* yy131; - int yy192; - Expr* yy202; - struct {int value; int mask;} yy207; - struct TrigEvent yy230; - ExprList* yy242; - Window* yy303; - Upsert* yy318; - const char* yy436; - TriggerStep* yy447; - Select* yy539; - IdList* yy600; + Window* yy49; + ExprList* yy70; + Select* yy81; + With* yy103; + struct FrameBound yy117; + struct {int value; int mask;} yy139; + SrcList* yy153; + TriggerStep* yy157; + Upsert* yy190; + struct TrigEvent yy262; + Cte* yy329; + int yy376; + Expr* yy404; + IdList* yy436; + const char* yy504; + u8 yy552; } YYMINORTYPE; #ifndef YYSTACKDEPTH #define YYSTACKDEPTH 100 @@ -154520,18 +158510,18 @@ typedef union { #define sqlite3ParserCTX_FETCH Parse *pParse=yypParser->pParse; #define sqlite3ParserCTX_STORE yypParser->pParse=pParse; #define YYFALLBACK 1 -#define YYNSTATE 557 -#define YYNRULE 385 -#define YYNRULE_WITH_ACTION 325 -#define YYNTOKEN 181 -#define YY_MAX_SHIFT 556 -#define YY_MIN_SHIFTREDUCE 807 -#define YY_MAX_SHIFTREDUCE 1191 -#define YY_ERROR_ACTION 1192 -#define YY_ACCEPT_ACTION 1193 -#define YY_NO_ACTION 1194 -#define YY_MIN_REDUCE 1195 -#define YY_MAX_REDUCE 1579 +#define YYNSTATE 574 +#define YYNRULE 398 +#define YYNRULE_WITH_ACTION 337 +#define YYNTOKEN 184 +#define YY_MAX_SHIFT 573 +#define YY_MIN_SHIFTREDUCE 829 +#define YY_MAX_SHIFTREDUCE 1226 +#define YY_ERROR_ACTION 1227 +#define YY_ACCEPT_ACTION 1228 +#define YY_NO_ACTION 1229 +#define YY_MIN_REDUCE 1230 +#define YY_MAX_REDUCE 1627 /************* End control #defines *******************************************/ #define YY_NLOOKAHEAD ((int)(sizeof(yy_lookahead)/sizeof(yy_lookahead[0]))) @@ -154598,588 +158588,603 @@ typedef union { ** yy_default[] Default action for each state. ** *********** Begin parsing tables **********************************************/ -#define YY_ACTTAB_COUNT (1974) +#define YY_ACTTAB_COUNT (2025) static const YYACTIONTYPE yy_action[] = { - /* 0 */ 550, 1226, 550, 455, 1264, 550, 1243, 550, 114, 111, - /* 10 */ 211, 550, 1541, 550, 1264, 527, 114, 111, 211, 396, - /* 20 */ 1236, 348, 42, 42, 42, 42, 1229, 42, 42, 71, - /* 30 */ 71, 941, 1228, 71, 71, 71, 71, 1466, 1497, 942, - /* 40 */ 824, 457, 6, 121, 122, 112, 1169, 1169, 1010, 1013, - /* 50 */ 1003, 1003, 119, 119, 120, 120, 120, 120, 1547, 396, - /* 60 */ 1362, 1521, 556, 2, 1197, 194, 532, 440, 143, 291, - /* 70 */ 532, 136, 532, 375, 261, 508, 272, 389, 1277, 531, - /* 80 */ 507, 497, 164, 121, 122, 112, 1169, 1169, 1010, 1013, - /* 90 */ 1003, 1003, 119, 119, 120, 120, 120, 120, 1362, 446, - /* 100 */ 1518, 118, 118, 118, 118, 117, 117, 116, 116, 116, - /* 110 */ 115, 428, 266, 266, 266, 266, 1502, 362, 1504, 439, - /* 120 */ 361, 1502, 521, 528, 1489, 547, 1118, 547, 1118, 396, - /* 130 */ 409, 241, 208, 114, 111, 211, 98, 290, 541, 221, - /* 140 */ 1033, 118, 118, 118, 118, 117, 117, 116, 116, 116, - /* 150 */ 115, 428, 1146, 121, 122, 112, 1169, 1169, 1010, 1013, - /* 160 */ 1003, 1003, 119, 119, 120, 120, 120, 120, 410, 432, - /* 170 */ 117, 117, 116, 116, 116, 115, 428, 1422, 472, 123, - /* 180 */ 118, 118, 118, 118, 117, 117, 116, 116, 116, 115, - /* 190 */ 428, 116, 116, 116, 115, 428, 544, 544, 544, 396, - /* 200 */ 509, 120, 120, 120, 120, 113, 1055, 1146, 1147, 1148, - /* 210 */ 1055, 118, 118, 118, 118, 117, 117, 116, 116, 116, - /* 220 */ 115, 428, 1465, 121, 122, 112, 1169, 1169, 1010, 1013, - /* 230 */ 1003, 1003, 119, 119, 120, 120, 120, 120, 396, 448, - /* 240 */ 320, 83, 467, 81, 363, 386, 1146, 80, 118, 118, - /* 250 */ 118, 118, 117, 117, 116, 116, 116, 115, 428, 179, - /* 260 */ 438, 428, 121, 122, 112, 1169, 1169, 1010, 1013, 1003, - /* 270 */ 1003, 119, 119, 120, 120, 120, 120, 438, 437, 266, - /* 280 */ 266, 118, 118, 118, 118, 117, 117, 116, 116, 116, - /* 290 */ 115, 428, 547, 1113, 907, 510, 1146, 114, 111, 211, - /* 300 */ 1435, 1146, 1147, 1148, 206, 495, 1113, 396, 453, 1113, - /* 310 */ 549, 334, 120, 120, 120, 120, 298, 1435, 1437, 17, - /* 320 */ 118, 118, 118, 118, 117, 117, 116, 116, 116, 115, - /* 330 */ 428, 121, 122, 112, 1169, 1169, 1010, 1013, 1003, 1003, - /* 340 */ 119, 119, 120, 120, 120, 120, 396, 1362, 438, 1146, - /* 350 */ 486, 1146, 1147, 1148, 1000, 1000, 1011, 1014, 449, 118, - /* 360 */ 118, 118, 118, 117, 117, 116, 116, 116, 115, 428, - /* 370 */ 121, 122, 112, 1169, 1169, 1010, 1013, 1003, 1003, 119, - /* 380 */ 119, 120, 120, 120, 120, 1058, 1058, 469, 1435, 118, - /* 390 */ 118, 118, 118, 117, 117, 116, 116, 116, 115, 428, - /* 400 */ 1146, 455, 550, 1430, 1146, 1147, 1148, 233, 970, 1146, - /* 410 */ 485, 482, 481, 171, 364, 396, 164, 411, 418, 846, - /* 420 */ 480, 164, 185, 338, 71, 71, 1247, 1004, 118, 118, - /* 430 */ 118, 118, 117, 117, 116, 116, 116, 115, 428, 121, - /* 440 */ 122, 112, 1169, 1169, 1010, 1013, 1003, 1003, 119, 119, - /* 450 */ 120, 120, 120, 120, 396, 1146, 1147, 1148, 839, 12, - /* 460 */ 318, 513, 163, 360, 1146, 1147, 1148, 114, 111, 211, - /* 470 */ 512, 290, 541, 550, 276, 180, 290, 541, 121, 122, - /* 480 */ 112, 1169, 1169, 1010, 1013, 1003, 1003, 119, 119, 120, - /* 490 */ 120, 120, 120, 349, 488, 71, 71, 118, 118, 118, - /* 500 */ 118, 117, 117, 116, 116, 116, 115, 428, 1146, 209, - /* 510 */ 415, 527, 1146, 1113, 1575, 382, 252, 269, 346, 491, - /* 520 */ 341, 490, 238, 396, 517, 368, 1113, 1131, 337, 1113, - /* 530 */ 191, 413, 286, 32, 461, 447, 118, 118, 118, 118, - /* 540 */ 117, 117, 116, 116, 116, 115, 428, 121, 122, 112, - /* 550 */ 1169, 1169, 1010, 1013, 1003, 1003, 119, 119, 120, 120, - /* 560 */ 120, 120, 396, 1146, 1147, 1148, 991, 1146, 1147, 1148, - /* 570 */ 1146, 233, 496, 1496, 485, 482, 481, 6, 163, 550, - /* 580 */ 516, 550, 115, 428, 480, 5, 121, 122, 112, 1169, - /* 590 */ 1169, 1010, 1013, 1003, 1003, 119, 119, 120, 120, 120, - /* 600 */ 120, 13, 13, 13, 13, 118, 118, 118, 118, 117, - /* 610 */ 117, 116, 116, 116, 115, 428, 407, 506, 412, 550, - /* 620 */ 1490, 548, 1146, 896, 896, 1146, 1147, 1148, 1477, 1146, - /* 630 */ 275, 396, 812, 813, 814, 975, 426, 426, 426, 16, - /* 640 */ 16, 55, 55, 1246, 118, 118, 118, 118, 117, 117, - /* 650 */ 116, 116, 116, 115, 428, 121, 122, 112, 1169, 1169, - /* 660 */ 1010, 1013, 1003, 1003, 119, 119, 120, 120, 120, 120, - /* 670 */ 396, 1193, 1, 1, 556, 2, 1197, 1146, 1147, 1148, - /* 680 */ 194, 291, 902, 136, 1146, 1147, 1148, 901, 525, 1496, - /* 690 */ 1277, 3, 384, 6, 121, 122, 112, 1169, 1169, 1010, - /* 700 */ 1013, 1003, 1003, 119, 119, 120, 120, 120, 120, 862, - /* 710 */ 550, 928, 550, 118, 118, 118, 118, 117, 117, 116, - /* 720 */ 116, 116, 115, 428, 266, 266, 1096, 1573, 1146, 555, - /* 730 */ 1573, 1197, 13, 13, 13, 13, 291, 547, 136, 396, - /* 740 */ 489, 425, 424, 970, 348, 1277, 472, 414, 863, 279, - /* 750 */ 140, 221, 118, 118, 118, 118, 117, 117, 116, 116, - /* 760 */ 116, 115, 428, 121, 122, 112, 1169, 1169, 1010, 1013, - /* 770 */ 1003, 1003, 119, 119, 120, 120, 120, 120, 550, 266, - /* 780 */ 266, 432, 396, 1146, 1147, 1148, 1176, 834, 1176, 472, - /* 790 */ 435, 145, 547, 1150, 405, 318, 443, 304, 842, 1494, - /* 800 */ 71, 71, 416, 6, 1094, 477, 221, 100, 112, 1169, - /* 810 */ 1169, 1010, 1013, 1003, 1003, 119, 119, 120, 120, 120, - /* 820 */ 120, 118, 118, 118, 118, 117, 117, 116, 116, 116, - /* 830 */ 115, 428, 237, 1429, 550, 455, 432, 287, 990, 550, - /* 840 */ 236, 235, 234, 834, 97, 533, 433, 1269, 1269, 1150, - /* 850 */ 498, 311, 434, 842, 981, 550, 71, 71, 980, 1245, - /* 860 */ 550, 51, 51, 300, 118, 118, 118, 118, 117, 117, - /* 870 */ 116, 116, 116, 115, 428, 194, 103, 70, 70, 266, - /* 880 */ 266, 550, 71, 71, 266, 266, 30, 395, 348, 980, - /* 890 */ 980, 982, 547, 532, 1113, 332, 396, 547, 499, 401, - /* 900 */ 1474, 195, 534, 13, 13, 1362, 240, 1113, 277, 280, - /* 910 */ 1113, 280, 308, 461, 310, 337, 396, 31, 188, 423, - /* 920 */ 121, 122, 112, 1169, 1169, 1010, 1013, 1003, 1003, 119, - /* 930 */ 119, 120, 120, 120, 120, 142, 396, 369, 461, 990, - /* 940 */ 121, 122, 112, 1169, 1169, 1010, 1013, 1003, 1003, 119, - /* 950 */ 119, 120, 120, 120, 120, 981, 327, 1146, 330, 980, - /* 960 */ 121, 110, 112, 1169, 1169, 1010, 1013, 1003, 1003, 119, - /* 970 */ 119, 120, 120, 120, 120, 468, 381, 1189, 118, 118, - /* 980 */ 118, 118, 117, 117, 116, 116, 116, 115, 428, 1146, - /* 990 */ 980, 980, 982, 309, 9, 370, 244, 366, 118, 118, - /* 1000 */ 118, 118, 117, 117, 116, 116, 116, 115, 428, 317, - /* 1010 */ 550, 348, 1146, 1147, 1148, 299, 290, 541, 118, 118, - /* 1020 */ 118, 118, 117, 117, 116, 116, 116, 115, 428, 1267, - /* 1030 */ 1267, 1167, 13, 13, 278, 425, 424, 472, 396, 927, - /* 1040 */ 260, 260, 289, 1173, 1146, 1147, 1148, 189, 1175, 266, - /* 1050 */ 266, 472, 394, 547, 1190, 550, 1174, 263, 144, 493, - /* 1060 */ 926, 550, 547, 122, 112, 1169, 1169, 1010, 1013, 1003, - /* 1070 */ 1003, 119, 119, 120, 120, 120, 120, 71, 71, 1146, - /* 1080 */ 1176, 1276, 1176, 13, 13, 902, 1074, 1167, 550, 472, - /* 1090 */ 901, 107, 542, 1495, 4, 1272, 1113, 6, 529, 1053, - /* 1100 */ 12, 1075, 1096, 1574, 316, 459, 1574, 524, 545, 1113, - /* 1110 */ 56, 56, 1113, 1493, 427, 1362, 1076, 6, 349, 285, - /* 1120 */ 118, 118, 118, 118, 117, 117, 116, 116, 116, 115, - /* 1130 */ 428, 429, 1275, 325, 1146, 1147, 1148, 882, 266, 266, - /* 1140 */ 1281, 107, 542, 539, 4, 1492, 293, 883, 1215, 6, - /* 1150 */ 210, 547, 547, 164, 294, 500, 420, 204, 545, 267, - /* 1160 */ 267, 1218, 402, 515, 503, 204, 266, 266, 400, 535, - /* 1170 */ 8, 990, 547, 523, 550, 926, 462, 105, 105, 547, - /* 1180 */ 1094, 429, 266, 266, 106, 421, 429, 552, 551, 266, - /* 1190 */ 266, 980, 522, 539, 1377, 547, 15, 15, 266, 266, - /* 1200 */ 460, 1124, 547, 266, 266, 1074, 1376, 519, 290, 541, - /* 1210 */ 550, 547, 518, 97, 448, 320, 547, 550, 926, 125, - /* 1220 */ 1075, 990, 980, 980, 982, 983, 27, 105, 105, 405, - /* 1230 */ 347, 1515, 44, 44, 106, 1076, 429, 552, 551, 57, - /* 1240 */ 57, 980, 347, 1515, 107, 542, 550, 4, 466, 405, - /* 1250 */ 214, 1124, 463, 297, 381, 1095, 538, 1313, 550, 543, - /* 1260 */ 402, 545, 290, 541, 104, 244, 102, 530, 58, 58, - /* 1270 */ 550, 199, 980, 980, 982, 983, 27, 1520, 1135, 431, - /* 1280 */ 59, 59, 270, 237, 429, 138, 95, 379, 379, 378, - /* 1290 */ 255, 376, 60, 60, 821, 1184, 539, 550, 273, 550, - /* 1300 */ 1167, 1312, 393, 392, 550, 442, 550, 215, 210, 296, - /* 1310 */ 519, 853, 550, 265, 208, 520, 1480, 295, 274, 61, - /* 1320 */ 61, 62, 62, 312, 990, 109, 45, 45, 46, 46, - /* 1330 */ 105, 105, 1190, 926, 47, 47, 345, 106, 550, 429, - /* 1340 */ 552, 551, 1546, 550, 980, 871, 344, 217, 550, 941, - /* 1350 */ 401, 107, 542, 218, 4, 156, 1167, 942, 158, 550, - /* 1360 */ 49, 49, 1166, 550, 268, 50, 50, 550, 545, 1454, - /* 1370 */ 63, 63, 550, 1453, 216, 980, 980, 982, 983, 27, - /* 1380 */ 450, 64, 64, 550, 464, 65, 65, 550, 322, 14, - /* 1390 */ 14, 429, 1309, 550, 66, 66, 1091, 550, 141, 383, - /* 1400 */ 38, 550, 967, 539, 326, 127, 127, 550, 397, 67, - /* 1410 */ 67, 550, 329, 290, 541, 52, 52, 519, 550, 68, - /* 1420 */ 68, 849, 518, 69, 69, 403, 165, 861, 860, 53, - /* 1430 */ 53, 990, 315, 151, 151, 97, 436, 105, 105, 331, - /* 1440 */ 152, 152, 530, 1052, 106, 1052, 429, 552, 551, 1135, - /* 1450 */ 431, 980, 1036, 270, 972, 239, 333, 243, 379, 379, - /* 1460 */ 378, 255, 376, 944, 945, 821, 1300, 550, 220, 550, - /* 1470 */ 107, 542, 550, 4, 550, 1260, 199, 849, 215, 1040, - /* 1480 */ 296, 1534, 980, 980, 982, 983, 27, 545, 295, 76, - /* 1490 */ 76, 54, 54, 984, 72, 72, 128, 128, 868, 869, - /* 1500 */ 107, 542, 550, 4, 1051, 550, 1051, 537, 473, 550, - /* 1510 */ 429, 550, 454, 1244, 550, 243, 550, 545, 217, 550, - /* 1520 */ 456, 197, 539, 243, 73, 73, 156, 129, 129, 158, - /* 1530 */ 340, 130, 130, 126, 126, 1040, 150, 150, 149, 149, - /* 1540 */ 429, 134, 134, 321, 478, 216, 97, 239, 335, 984, - /* 1550 */ 990, 97, 539, 350, 351, 550, 105, 105, 906, 935, - /* 1560 */ 550, 899, 243, 106, 109, 429, 552, 551, 550, 1509, - /* 1570 */ 980, 832, 99, 542, 139, 4, 550, 133, 133, 397, - /* 1580 */ 990, 1321, 131, 131, 290, 541, 105, 105, 1361, 545, - /* 1590 */ 132, 132, 1296, 106, 1307, 429, 552, 551, 75, 75, - /* 1600 */ 980, 980, 980, 982, 983, 27, 550, 436, 900, 1293, - /* 1610 */ 536, 109, 429, 1367, 550, 1225, 1217, 1206, 258, 550, - /* 1620 */ 353, 550, 1205, 11, 539, 1207, 1528, 355, 77, 77, - /* 1630 */ 380, 980, 980, 982, 983, 27, 74, 74, 357, 213, - /* 1640 */ 303, 43, 43, 48, 48, 441, 314, 201, 307, 1354, - /* 1650 */ 319, 359, 990, 458, 483, 1243, 343, 192, 105, 105, - /* 1660 */ 1426, 1425, 193, 540, 205, 106, 1531, 429, 552, 551, - /* 1670 */ 1184, 167, 980, 270, 247, 1473, 1471, 1181, 379, 379, - /* 1680 */ 378, 255, 376, 200, 373, 821, 404, 83, 79, 82, - /* 1690 */ 1431, 452, 177, 124, 530, 1346, 95, 301, 215, 302, - /* 1700 */ 296, 161, 169, 980, 980, 982, 983, 27, 295, 1343, - /* 1710 */ 305, 306, 444, 445, 1351, 172, 35, 173, 174, 175, - /* 1720 */ 476, 223, 387, 385, 36, 451, 465, 1357, 181, 388, - /* 1730 */ 88, 471, 227, 1420, 186, 474, 259, 1442, 217, 229, - /* 1740 */ 230, 324, 328, 390, 492, 231, 156, 1263, 1208, 158, - /* 1750 */ 417, 1254, 90, 853, 1262, 1261, 206, 419, 1514, 511, - /* 1760 */ 1304, 94, 1545, 352, 354, 216, 1305, 1303, 283, 1233, - /* 1770 */ 284, 391, 1232, 1544, 342, 1231, 1543, 356, 245, 1302, - /* 1780 */ 1253, 502, 505, 358, 246, 1500, 1499, 422, 10, 367, - /* 1790 */ 101, 1328, 1327, 514, 1406, 96, 253, 1214, 34, 397, - /* 1800 */ 553, 1141, 254, 365, 290, 541, 256, 257, 554, 1286, - /* 1810 */ 372, 196, 1285, 371, 1203, 1198, 153, 1458, 137, 1459, - /* 1820 */ 154, 1457, 281, 1456, 155, 808, 430, 436, 202, 398, - /* 1830 */ 203, 78, 288, 198, 292, 212, 271, 1050, 135, 1048, - /* 1840 */ 964, 157, 219, 168, 170, 885, 313, 1064, 222, 176, - /* 1850 */ 968, 159, 406, 84, 408, 178, 85, 86, 87, 160, - /* 1860 */ 1067, 224, 1063, 225, 146, 166, 399, 18, 226, 323, - /* 1870 */ 1056, 1178, 470, 243, 182, 228, 183, 37, 823, 475, - /* 1880 */ 344, 232, 479, 487, 184, 89, 19, 851, 336, 20, - /* 1890 */ 339, 484, 91, 282, 162, 147, 864, 92, 494, 93, - /* 1900 */ 1129, 148, 1016, 1099, 39, 501, 1100, 40, 504, 207, - /* 1910 */ 262, 264, 934, 187, 929, 109, 1119, 1115, 1117, 7, - /* 1920 */ 242, 1103, 33, 1123, 21, 526, 22, 23, 24, 1122, - /* 1930 */ 25, 190, 97, 26, 1031, 1017, 1015, 1019, 1073, 1020, - /* 1940 */ 1072, 249, 248, 28, 41, 895, 985, 833, 108, 29, - /* 1950 */ 377, 546, 250, 374, 1137, 1136, 1194, 1194, 251, 1194, - /* 1960 */ 1194, 1194, 1194, 1194, 1194, 1194, 1194, 1194, 1194, 1194, - /* 1970 */ 1194, 1194, 1536, 1535, + /* 0 */ 567, 1299, 567, 1278, 168, 1261, 115, 112, 218, 377, + /* 10 */ 567, 1299, 378, 567, 492, 567, 115, 112, 218, 410, + /* 20 */ 1304, 1304, 41, 41, 41, 41, 518, 1508, 524, 1302, + /* 30 */ 1302, 963, 41, 41, 1264, 71, 71, 51, 51, 964, + /* 40 */ 561, 561, 561, 122, 123, 113, 1204, 1204, 1039, 1042, + /* 50 */ 1032, 1032, 120, 120, 121, 121, 121, 121, 418, 410, + /* 60 */ 273, 273, 273, 273, 115, 112, 218, 115, 112, 218, + /* 70 */ 197, 268, 549, 564, 519, 564, 211, 567, 389, 248, + /* 80 */ 215, 525, 403, 122, 123, 113, 1204, 1204, 1039, 1042, + /* 90 */ 1032, 1032, 120, 120, 121, 121, 121, 121, 544, 13, + /* 100 */ 13, 1263, 119, 119, 119, 119, 118, 118, 117, 117, + /* 110 */ 117, 116, 445, 1180, 423, 197, 450, 324, 516, 1543, + /* 120 */ 1549, 376, 1551, 6, 375, 1180, 1152, 398, 1152, 410, + /* 130 */ 1549, 538, 115, 112, 218, 1419, 99, 30, 121, 121, + /* 140 */ 121, 121, 119, 119, 119, 119, 118, 118, 117, 117, + /* 150 */ 117, 116, 445, 122, 123, 113, 1204, 1204, 1039, 1042, + /* 160 */ 1032, 1032, 120, 120, 121, 121, 121, 121, 31, 1180, + /* 170 */ 1181, 1182, 241, 361, 1562, 505, 502, 501, 321, 124, + /* 180 */ 323, 1180, 1181, 1182, 1180, 500, 119, 119, 119, 119, + /* 190 */ 118, 118, 117, 117, 117, 116, 445, 139, 96, 410, + /* 200 */ 121, 121, 121, 121, 114, 117, 117, 117, 116, 445, + /* 210 */ 545, 1536, 119, 119, 119, 119, 118, 118, 117, 117, + /* 220 */ 117, 116, 445, 122, 123, 113, 1204, 1204, 1039, 1042, + /* 230 */ 1032, 1032, 120, 120, 121, 121, 121, 121, 410, 445, + /* 240 */ 1180, 1181, 1182, 81, 443, 443, 443, 80, 119, 119, + /* 250 */ 119, 119, 118, 118, 117, 117, 117, 116, 445, 492, + /* 260 */ 1180, 322, 122, 123, 113, 1204, 1204, 1039, 1042, 1032, + /* 270 */ 1032, 120, 120, 121, 121, 121, 121, 497, 1029, 1029, + /* 280 */ 1040, 1043, 119, 119, 119, 119, 118, 118, 117, 117, + /* 290 */ 117, 116, 445, 1588, 999, 1228, 1, 1, 573, 2, + /* 300 */ 1232, 1271, 137, 1507, 245, 305, 477, 140, 410, 864, + /* 310 */ 565, 1180, 918, 918, 1312, 363, 1180, 1181, 1182, 466, + /* 320 */ 334, 119, 119, 119, 119, 118, 118, 117, 117, 117, + /* 330 */ 116, 445, 122, 123, 113, 1204, 1204, 1039, 1042, 1032, + /* 340 */ 1032, 120, 120, 121, 121, 121, 121, 332, 273, 273, + /* 350 */ 1019, 83, 1033, 429, 1568, 573, 2, 1232, 304, 558, + /* 360 */ 929, 564, 305, 948, 140, 864, 1010, 1180, 1181, 1182, + /* 370 */ 1009, 1312, 415, 213, 515, 229, 119, 119, 119, 119, + /* 380 */ 118, 118, 117, 117, 117, 116, 445, 523, 351, 116, + /* 390 */ 445, 119, 119, 119, 119, 118, 118, 117, 117, 117, + /* 400 */ 116, 445, 1009, 1009, 1011, 273, 273, 449, 567, 16, + /* 410 */ 16, 1594, 567, 1544, 567, 410, 1180, 6, 564, 348, + /* 420 */ 182, 118, 118, 117, 117, 117, 116, 445, 420, 142, + /* 430 */ 71, 71, 229, 567, 71, 71, 55, 55, 203, 122, + /* 440 */ 123, 113, 1204, 1204, 1039, 1042, 1032, 1032, 120, 120, + /* 450 */ 121, 121, 121, 121, 217, 13, 13, 1180, 410, 572, + /* 460 */ 1404, 1232, 506, 137, 449, 168, 305, 549, 140, 1184, + /* 470 */ 428, 549, 1180, 1181, 1182, 1312, 548, 442, 441, 948, + /* 480 */ 517, 456, 122, 123, 113, 1204, 1204, 1039, 1042, 1032, + /* 490 */ 1032, 120, 120, 121, 121, 121, 121, 315, 119, 119, + /* 500 */ 119, 119, 118, 118, 117, 117, 117, 116, 445, 273, + /* 510 */ 273, 1147, 420, 1180, 1181, 1182, 547, 567, 1147, 304, + /* 520 */ 558, 1565, 564, 1211, 1147, 1211, 1184, 1147, 410, 534, + /* 530 */ 425, 1147, 868, 183, 1147, 143, 229, 566, 32, 71, + /* 540 */ 71, 119, 119, 119, 119, 118, 118, 117, 117, 117, + /* 550 */ 116, 445, 122, 123, 113, 1204, 1204, 1039, 1042, 1032, + /* 560 */ 1032, 120, 120, 121, 121, 121, 121, 410, 449, 241, + /* 570 */ 1180, 861, 505, 502, 501, 1180, 530, 189, 245, 542, + /* 580 */ 1543, 282, 500, 374, 6, 567, 533, 481, 5, 279, + /* 590 */ 1019, 122, 123, 113, 1204, 1204, 1039, 1042, 1032, 1032, + /* 600 */ 120, 120, 121, 121, 121, 121, 1010, 13, 13, 1418, + /* 610 */ 1009, 119, 119, 119, 119, 118, 118, 117, 117, 117, + /* 620 */ 116, 445, 430, 273, 273, 1180, 1180, 1181, 1182, 1623, + /* 630 */ 396, 1180, 1181, 1182, 1180, 346, 564, 410, 529, 365, + /* 640 */ 434, 1165, 1009, 1009, 1011, 352, 415, 361, 1562, 492, + /* 650 */ 119, 119, 119, 119, 118, 118, 117, 117, 117, 116, + /* 660 */ 445, 122, 123, 113, 1204, 1204, 1039, 1042, 1032, 1032, + /* 670 */ 120, 120, 121, 121, 121, 121, 410, 834, 835, 836, + /* 680 */ 1020, 1180, 1181, 1182, 400, 285, 148, 1316, 304, 558, + /* 690 */ 1180, 1181, 1182, 1471, 216, 3, 341, 137, 344, 564, + /* 700 */ 122, 123, 113, 1204, 1204, 1039, 1042, 1032, 1032, 120, + /* 710 */ 120, 121, 121, 121, 121, 567, 508, 950, 273, 273, + /* 720 */ 119, 119, 119, 119, 118, 118, 117, 117, 117, 116, + /* 730 */ 445, 564, 1180, 431, 567, 455, 98, 13, 13, 259, + /* 740 */ 276, 360, 511, 355, 510, 246, 410, 365, 473, 1534, + /* 750 */ 1004, 351, 293, 304, 558, 1593, 71, 71, 893, 119, + /* 760 */ 119, 119, 119, 118, 118, 117, 117, 117, 116, 445, + /* 770 */ 122, 123, 113, 1204, 1204, 1039, 1042, 1032, 1032, 120, + /* 780 */ 120, 121, 121, 121, 121, 410, 1147, 1082, 1180, 1181, + /* 790 */ 1182, 420, 1084, 300, 150, 999, 1084, 365, 365, 1147, + /* 800 */ 365, 382, 1147, 481, 567, 244, 243, 242, 1282, 122, + /* 810 */ 123, 113, 1204, 1204, 1039, 1042, 1032, 1032, 120, 120, + /* 820 */ 121, 121, 121, 121, 567, 884, 13, 13, 487, 119, + /* 830 */ 119, 119, 119, 118, 118, 117, 117, 117, 116, 445, + /* 840 */ 1180, 191, 544, 567, 147, 149, 13, 13, 332, 461, + /* 850 */ 318, 1087, 1087, 489, 1541, 410, 509, 1534, 6, 1518, + /* 860 */ 284, 192, 1281, 145, 885, 71, 71, 492, 119, 119, + /* 870 */ 119, 119, 118, 118, 117, 117, 117, 116, 445, 122, + /* 880 */ 123, 113, 1204, 1204, 1039, 1042, 1032, 1032, 120, 120, + /* 890 */ 121, 121, 121, 121, 567, 475, 1180, 1181, 1182, 410, + /* 900 */ 856, 331, 301, 466, 334, 1520, 270, 1534, 1534, 948, + /* 910 */ 1535, 1311, 313, 9, 846, 251, 71, 71, 481, 432, + /* 920 */ 146, 492, 38, 949, 101, 113, 1204, 1204, 1039, 1042, + /* 930 */ 1032, 1032, 120, 120, 121, 121, 121, 121, 119, 119, + /* 940 */ 119, 119, 118, 118, 117, 117, 117, 116, 445, 567, + /* 950 */ 1201, 1103, 567, 440, 567, 1537, 567, 856, 1126, 1621, + /* 960 */ 458, 290, 1621, 550, 251, 1307, 1104, 267, 267, 281, + /* 970 */ 408, 70, 70, 464, 71, 71, 71, 71, 13, 13, + /* 980 */ 564, 1105, 119, 119, 119, 119, 118, 118, 117, 117, + /* 990 */ 117, 116, 445, 546, 104, 273, 273, 273, 273, 1201, + /* 1000 */ 217, 1472, 904, 475, 454, 567, 1477, 1201, 564, 451, + /* 1010 */ 564, 549, 905, 444, 410, 1062, 292, 274, 274, 198, + /* 1020 */ 551, 454, 453, 1477, 1479, 948, 459, 56, 56, 414, + /* 1030 */ 564, 1126, 1622, 383, 410, 1622, 408, 1124, 122, 123, + /* 1040 */ 113, 1204, 1204, 1039, 1042, 1032, 1032, 120, 120, 121, + /* 1050 */ 121, 121, 121, 1464, 410, 12, 1201, 1516, 122, 123, + /* 1060 */ 113, 1204, 1204, 1039, 1042, 1032, 1032, 120, 120, 121, + /* 1070 */ 121, 121, 121, 308, 475, 126, 363, 286, 122, 111, + /* 1080 */ 113, 1204, 1204, 1039, 1042, 1032, 1032, 120, 120, 121, + /* 1090 */ 121, 121, 121, 309, 454, 475, 1477, 119, 119, 119, + /* 1100 */ 119, 118, 118, 117, 117, 117, 116, 445, 1180, 567, + /* 1110 */ 1124, 486, 567, 312, 437, 483, 197, 119, 119, 119, + /* 1120 */ 119, 118, 118, 117, 117, 117, 116, 445, 409, 12, + /* 1130 */ 540, 15, 15, 482, 43, 43, 513, 119, 119, 119, + /* 1140 */ 119, 118, 118, 117, 117, 117, 116, 445, 289, 539, + /* 1150 */ 294, 567, 294, 395, 1224, 442, 441, 410, 1158, 407, + /* 1160 */ 406, 1404, 924, 1208, 1180, 1181, 1182, 923, 1210, 291, + /* 1170 */ 1310, 1253, 416, 57, 57, 492, 1209, 567, 560, 416, + /* 1180 */ 1180, 1348, 123, 113, 1204, 1204, 1039, 1042, 1032, 1032, + /* 1190 */ 120, 120, 121, 121, 121, 121, 1404, 1147, 567, 44, + /* 1200 */ 44, 1211, 194, 1211, 273, 273, 1404, 465, 541, 1158, + /* 1210 */ 1147, 108, 559, 1147, 4, 395, 1125, 564, 1542, 339, + /* 1220 */ 58, 58, 6, 1250, 1103, 384, 1404, 380, 562, 1540, + /* 1230 */ 567, 426, 1225, 6, 304, 558, 1180, 1181, 1182, 1104, + /* 1240 */ 119, 119, 119, 119, 118, 118, 117, 117, 117, 116, + /* 1250 */ 445, 446, 59, 59, 1105, 520, 1539, 273, 273, 567, + /* 1260 */ 6, 567, 110, 556, 567, 532, 427, 417, 169, 552, + /* 1270 */ 564, 108, 559, 137, 4, 555, 488, 272, 215, 222, + /* 1280 */ 211, 60, 60, 61, 61, 98, 62, 62, 562, 273, + /* 1290 */ 273, 567, 1019, 471, 1225, 567, 438, 567, 106, 106, + /* 1300 */ 8, 924, 564, 273, 273, 107, 923, 446, 569, 568, + /* 1310 */ 567, 446, 1009, 45, 45, 468, 564, 46, 46, 47, + /* 1320 */ 47, 84, 202, 556, 1219, 408, 472, 567, 205, 304, + /* 1330 */ 558, 567, 49, 49, 567, 526, 408, 536, 567, 871, + /* 1340 */ 567, 105, 535, 103, 1009, 1009, 1011, 1012, 27, 50, + /* 1350 */ 50, 567, 1019, 63, 63, 479, 64, 64, 106, 106, + /* 1360 */ 65, 65, 14, 14, 17, 107, 567, 446, 569, 568, + /* 1370 */ 567, 303, 1009, 66, 66, 567, 226, 567, 963, 567, + /* 1380 */ 547, 408, 1200, 1347, 875, 278, 964, 460, 128, 128, + /* 1390 */ 567, 1069, 67, 67, 567, 206, 871, 52, 52, 68, + /* 1400 */ 68, 69, 69, 421, 1009, 1009, 1011, 1012, 27, 1567, + /* 1410 */ 1169, 448, 53, 53, 277, 1523, 156, 156, 307, 393, + /* 1420 */ 393, 392, 262, 390, 1169, 448, 843, 325, 277, 108, + /* 1430 */ 559, 527, 4, 393, 393, 392, 262, 390, 567, 223, + /* 1440 */ 843, 311, 330, 1496, 1121, 98, 562, 397, 1069, 310, + /* 1450 */ 567, 480, 567, 223, 567, 311, 883, 882, 1013, 277, + /* 1460 */ 157, 157, 467, 310, 393, 393, 392, 262, 390, 446, + /* 1470 */ 522, 843, 76, 76, 54, 54, 72, 72, 359, 225, + /* 1480 */ 567, 556, 275, 567, 223, 329, 311, 161, 358, 469, + /* 1490 */ 135, 567, 228, 225, 310, 536, 567, 206, 890, 891, + /* 1500 */ 537, 161, 129, 129, 135, 73, 73, 224, 966, 967, + /* 1510 */ 1019, 567, 287, 130, 130, 1013, 106, 106, 131, 131, + /* 1520 */ 567, 224, 567, 107, 225, 446, 569, 568, 1001, 1280, + /* 1530 */ 1009, 250, 161, 127, 127, 135, 108, 559, 1081, 4, + /* 1540 */ 1081, 411, 155, 155, 154, 154, 304, 558, 1130, 567, + /* 1550 */ 1335, 567, 224, 562, 474, 411, 567, 250, 567, 1495, + /* 1560 */ 304, 558, 1009, 1009, 1011, 1012, 27, 567, 484, 336, + /* 1570 */ 452, 136, 136, 134, 134, 1344, 446, 340, 132, 132, + /* 1580 */ 133, 133, 567, 1080, 452, 1080, 411, 567, 556, 75, + /* 1590 */ 75, 304, 558, 343, 345, 347, 108, 559, 567, 4, + /* 1600 */ 1581, 299, 536, 567, 77, 77, 1295, 535, 476, 74, + /* 1610 */ 74, 250, 1279, 562, 354, 452, 335, 1019, 364, 98, + /* 1620 */ 42, 42, 1356, 106, 106, 48, 48, 1403, 498, 1331, + /* 1630 */ 107, 247, 446, 569, 568, 349, 446, 1009, 98, 1065, + /* 1640 */ 957, 921, 247, 250, 110, 1556, 554, 854, 556, 922, + /* 1650 */ 144, 1342, 110, 553, 1409, 1260, 1252, 1241, 1240, 1242, + /* 1660 */ 1575, 1328, 208, 394, 493, 265, 367, 200, 369, 1009, + /* 1670 */ 1009, 1011, 1012, 27, 11, 280, 221, 1019, 327, 478, + /* 1680 */ 1278, 371, 212, 106, 106, 928, 1390, 328, 288, 317, + /* 1690 */ 107, 457, 446, 569, 568, 283, 333, 1009, 1395, 503, + /* 1700 */ 357, 320, 1468, 108, 559, 1467, 4, 1578, 1394, 401, + /* 1710 */ 1219, 171, 254, 373, 387, 207, 195, 196, 1515, 557, + /* 1720 */ 562, 1513, 419, 1216, 100, 559, 83, 4, 204, 1009, + /* 1730 */ 1009, 1011, 1012, 27, 180, 125, 547, 219, 79, 82, + /* 1740 */ 1385, 562, 316, 446, 35, 1391, 166, 173, 1378, 319, + /* 1750 */ 462, 175, 463, 1473, 496, 556, 176, 231, 96, 177, + /* 1760 */ 178, 1399, 399, 1397, 446, 36, 1396, 184, 485, 235, + /* 1770 */ 470, 402, 1462, 491, 1484, 188, 556, 89, 512, 266, + /* 1780 */ 237, 338, 494, 342, 1019, 238, 404, 433, 1243, 239, + /* 1790 */ 106, 106, 91, 1289, 1298, 1592, 1591, 107, 875, 446, + /* 1800 */ 569, 568, 1297, 213, 1009, 1019, 1296, 435, 1288, 1561, + /* 1810 */ 521, 106, 106, 405, 1268, 1267, 356, 1266, 107, 1590, + /* 1820 */ 446, 569, 568, 297, 298, 1009, 436, 362, 366, 528, + /* 1830 */ 95, 252, 253, 439, 1339, 1547, 1009, 1009, 1011, 1012, + /* 1840 */ 27, 10, 381, 302, 1363, 1546, 102, 1321, 97, 531, + /* 1850 */ 260, 1249, 34, 570, 1340, 1338, 1175, 1009, 1009, 1011, + /* 1860 */ 1012, 27, 368, 1320, 370, 1362, 1337, 372, 379, 199, + /* 1870 */ 385, 386, 261, 263, 264, 158, 1500, 1501, 571, 1238, + /* 1880 */ 1499, 1233, 1498, 159, 209, 78, 1448, 141, 295, 210, + /* 1890 */ 830, 447, 201, 306, 220, 1079, 160, 138, 1077, 314, + /* 1900 */ 172, 162, 1200, 174, 227, 907, 230, 326, 1093, 179, + /* 1910 */ 163, 164, 422, 85, 412, 413, 181, 170, 86, 424, + /* 1920 */ 87, 165, 88, 1096, 232, 233, 1092, 151, 18, 234, + /* 1930 */ 1085, 337, 185, 1213, 250, 490, 236, 186, 37, 845, + /* 1940 */ 495, 358, 240, 350, 499, 187, 90, 167, 19, 20, + /* 1950 */ 504, 873, 353, 507, 92, 93, 296, 886, 152, 514, + /* 1960 */ 94, 1163, 153, 1045, 1132, 39, 190, 214, 1131, 269, + /* 1970 */ 271, 956, 951, 1153, 110, 1149, 249, 7, 1157, 21, + /* 1980 */ 1137, 1151, 33, 22, 23, 24, 1156, 25, 543, 193, + /* 1990 */ 26, 98, 1060, 1046, 1044, 1048, 1102, 1049, 1101, 256, + /* 2000 */ 255, 28, 40, 257, 1014, 855, 109, 29, 917, 563, + /* 2010 */ 388, 391, 1171, 258, 1170, 1229, 1229, 1229, 1583, 1229, + /* 2020 */ 1229, 1229, 1229, 1229, 1582, }; static const YYCODETYPE yy_lookahead[] = { - /* 0 */ 189, 211, 189, 189, 218, 189, 220, 189, 267, 268, - /* 10 */ 269, 189, 210, 189, 228, 189, 267, 268, 269, 19, - /* 20 */ 218, 189, 211, 212, 211, 212, 211, 211, 212, 211, - /* 30 */ 212, 31, 211, 211, 212, 211, 212, 288, 300, 39, - /* 40 */ 21, 189, 304, 43, 44, 45, 46, 47, 48, 49, - /* 50 */ 50, 51, 52, 53, 54, 55, 56, 57, 225, 19, - /* 60 */ 189, 183, 184, 185, 186, 189, 248, 263, 236, 191, - /* 70 */ 248, 193, 248, 197, 208, 257, 262, 201, 200, 257, - /* 80 */ 200, 257, 81, 43, 44, 45, 46, 47, 48, 49, - /* 90 */ 50, 51, 52, 53, 54, 55, 56, 57, 189, 80, - /* 100 */ 189, 101, 102, 103, 104, 105, 106, 107, 108, 109, - /* 110 */ 110, 111, 234, 235, 234, 235, 305, 306, 305, 118, - /* 120 */ 307, 305, 306, 297, 298, 247, 86, 247, 88, 19, - /* 130 */ 259, 251, 252, 267, 268, 269, 26, 136, 137, 261, - /* 140 */ 121, 101, 102, 103, 104, 105, 106, 107, 108, 109, - /* 150 */ 110, 111, 59, 43, 44, 45, 46, 47, 48, 49, - /* 160 */ 50, 51, 52, 53, 54, 55, 56, 57, 259, 291, - /* 170 */ 105, 106, 107, 108, 109, 110, 111, 158, 189, 69, - /* 180 */ 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, - /* 190 */ 111, 107, 108, 109, 110, 111, 205, 206, 207, 19, - /* 200 */ 19, 54, 55, 56, 57, 58, 29, 114, 115, 116, - /* 210 */ 33, 101, 102, 103, 104, 105, 106, 107, 108, 109, - /* 220 */ 110, 111, 233, 43, 44, 45, 46, 47, 48, 49, - /* 230 */ 50, 51, 52, 53, 54, 55, 56, 57, 19, 126, - /* 240 */ 127, 148, 65, 24, 214, 200, 59, 67, 101, 102, - /* 250 */ 103, 104, 105, 106, 107, 108, 109, 110, 111, 22, - /* 260 */ 189, 111, 43, 44, 45, 46, 47, 48, 49, 50, - /* 270 */ 51, 52, 53, 54, 55, 56, 57, 206, 207, 234, - /* 280 */ 235, 101, 102, 103, 104, 105, 106, 107, 108, 109, - /* 290 */ 110, 111, 247, 76, 107, 114, 59, 267, 268, 269, - /* 300 */ 189, 114, 115, 116, 162, 163, 89, 19, 263, 92, - /* 310 */ 189, 23, 54, 55, 56, 57, 189, 206, 207, 22, - /* 320 */ 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, - /* 330 */ 111, 43, 44, 45, 46, 47, 48, 49, 50, 51, - /* 340 */ 52, 53, 54, 55, 56, 57, 19, 189, 277, 59, - /* 350 */ 23, 114, 115, 116, 46, 47, 48, 49, 61, 101, - /* 360 */ 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, - /* 370 */ 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, - /* 380 */ 53, 54, 55, 56, 57, 125, 126, 127, 277, 101, - /* 390 */ 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, - /* 400 */ 59, 189, 189, 276, 114, 115, 116, 117, 73, 59, - /* 410 */ 120, 121, 122, 72, 214, 19, 81, 259, 19, 23, - /* 420 */ 130, 81, 72, 24, 211, 212, 221, 119, 101, 102, - /* 430 */ 103, 104, 105, 106, 107, 108, 109, 110, 111, 43, + /* 0 */ 192, 221, 192, 223, 192, 214, 272, 273, 274, 217, + /* 10 */ 192, 231, 217, 192, 192, 192, 272, 273, 274, 19, + /* 20 */ 233, 234, 214, 215, 214, 215, 203, 293, 203, 233, + /* 30 */ 234, 31, 214, 215, 214, 214, 215, 214, 215, 39, + /* 40 */ 208, 209, 210, 43, 44, 45, 46, 47, 48, 49, + /* 50 */ 50, 51, 52, 53, 54, 55, 56, 57, 236, 19, + /* 60 */ 237, 238, 237, 238, 272, 273, 274, 272, 273, 274, + /* 70 */ 192, 211, 251, 250, 251, 250, 26, 192, 200, 254, + /* 80 */ 255, 260, 204, 43, 44, 45, 46, 47, 48, 49, + /* 90 */ 50, 51, 52, 53, 54, 55, 56, 57, 192, 214, + /* 100 */ 215, 214, 102, 103, 104, 105, 106, 107, 108, 109, + /* 110 */ 110, 111, 112, 59, 229, 192, 294, 16, 306, 307, + /* 120 */ 312, 313, 312, 311, 314, 59, 86, 204, 88, 19, + /* 130 */ 312, 313, 272, 273, 274, 271, 26, 22, 54, 55, + /* 140 */ 56, 57, 102, 103, 104, 105, 106, 107, 108, 109, + /* 150 */ 110, 111, 112, 43, 44, 45, 46, 47, 48, 49, + /* 160 */ 50, 51, 52, 53, 54, 55, 56, 57, 53, 115, + /* 170 */ 116, 117, 118, 309, 310, 121, 122, 123, 77, 69, + /* 180 */ 79, 115, 116, 117, 59, 131, 102, 103, 104, 105, + /* 190 */ 106, 107, 108, 109, 110, 111, 112, 72, 148, 19, + /* 200 */ 54, 55, 56, 57, 58, 108, 109, 110, 111, 112, + /* 210 */ 304, 305, 102, 103, 104, 105, 106, 107, 108, 109, + /* 220 */ 110, 111, 112, 43, 44, 45, 46, 47, 48, 49, + /* 230 */ 50, 51, 52, 53, 54, 55, 56, 57, 19, 112, + /* 240 */ 115, 116, 117, 24, 208, 209, 210, 67, 102, 103, + /* 250 */ 104, 105, 106, 107, 108, 109, 110, 111, 112, 192, + /* 260 */ 59, 160, 43, 44, 45, 46, 47, 48, 49, 50, + /* 270 */ 51, 52, 53, 54, 55, 56, 57, 19, 46, 47, + /* 280 */ 48, 49, 102, 103, 104, 105, 106, 107, 108, 109, + /* 290 */ 110, 111, 112, 213, 73, 184, 185, 186, 187, 188, + /* 300 */ 189, 221, 81, 236, 46, 194, 192, 196, 19, 59, + /* 310 */ 133, 59, 135, 136, 203, 192, 115, 116, 117, 127, + /* 320 */ 128, 102, 103, 104, 105, 106, 107, 108, 109, 110, + /* 330 */ 111, 112, 43, 44, 45, 46, 47, 48, 49, 50, + /* 340 */ 51, 52, 53, 54, 55, 56, 57, 126, 237, 238, + /* 350 */ 100, 150, 120, 230, 186, 187, 188, 189, 137, 138, + /* 360 */ 108, 250, 194, 26, 196, 115, 116, 115, 116, 117, + /* 370 */ 120, 203, 114, 164, 165, 264, 102, 103, 104, 105, + /* 380 */ 106, 107, 108, 109, 110, 111, 112, 192, 130, 111, + /* 390 */ 112, 102, 103, 104, 105, 106, 107, 108, 109, 110, + /* 400 */ 111, 112, 152, 153, 154, 237, 238, 296, 192, 214, + /* 410 */ 215, 228, 192, 307, 192, 19, 59, 311, 250, 23, + /* 420 */ 22, 106, 107, 108, 109, 110, 111, 112, 192, 72, + /* 430 */ 214, 215, 264, 192, 214, 215, 214, 215, 149, 43, /* 440 */ 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, - /* 450 */ 54, 55, 56, 57, 19, 114, 115, 116, 23, 208, - /* 460 */ 125, 248, 189, 189, 114, 115, 116, 267, 268, 269, - /* 470 */ 189, 136, 137, 189, 262, 22, 136, 137, 43, 44, - /* 480 */ 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, - /* 490 */ 55, 56, 57, 189, 95, 211, 212, 101, 102, 103, - /* 500 */ 104, 105, 106, 107, 108, 109, 110, 111, 59, 189, - /* 510 */ 111, 189, 59, 76, 294, 295, 117, 118, 119, 120, - /* 520 */ 121, 122, 123, 19, 87, 189, 89, 23, 129, 92, - /* 530 */ 279, 227, 248, 22, 189, 284, 101, 102, 103, 104, - /* 540 */ 105, 106, 107, 108, 109, 110, 111, 43, 44, 45, - /* 550 */ 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, - /* 560 */ 56, 57, 19, 114, 115, 116, 23, 114, 115, 116, - /* 570 */ 59, 117, 299, 300, 120, 121, 122, 304, 189, 189, - /* 580 */ 143, 189, 110, 111, 130, 22, 43, 44, 45, 46, - /* 590 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, - /* 600 */ 57, 211, 212, 211, 212, 101, 102, 103, 104, 105, - /* 610 */ 106, 107, 108, 109, 110, 111, 226, 189, 226, 189, - /* 620 */ 298, 132, 59, 134, 135, 114, 115, 116, 189, 59, - /* 630 */ 285, 19, 7, 8, 9, 23, 205, 206, 207, 211, - /* 640 */ 212, 211, 212, 221, 101, 102, 103, 104, 105, 106, - /* 650 */ 107, 108, 109, 110, 111, 43, 44, 45, 46, 47, - /* 660 */ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, - /* 670 */ 19, 181, 182, 183, 184, 185, 186, 114, 115, 116, - /* 680 */ 189, 191, 133, 193, 114, 115, 116, 138, 299, 300, - /* 690 */ 200, 22, 201, 304, 43, 44, 45, 46, 47, 48, - /* 700 */ 49, 50, 51, 52, 53, 54, 55, 56, 57, 35, - /* 710 */ 189, 141, 189, 101, 102, 103, 104, 105, 106, 107, - /* 720 */ 108, 109, 110, 111, 234, 235, 22, 23, 59, 184, - /* 730 */ 26, 186, 211, 212, 211, 212, 191, 247, 193, 19, - /* 740 */ 66, 105, 106, 73, 189, 200, 189, 226, 74, 226, - /* 750 */ 22, 261, 101, 102, 103, 104, 105, 106, 107, 108, - /* 760 */ 109, 110, 111, 43, 44, 45, 46, 47, 48, 49, - /* 770 */ 50, 51, 52, 53, 54, 55, 56, 57, 189, 234, - /* 780 */ 235, 291, 19, 114, 115, 116, 150, 59, 152, 189, - /* 790 */ 233, 236, 247, 59, 189, 125, 126, 127, 59, 300, - /* 800 */ 211, 212, 128, 304, 100, 19, 261, 156, 45, 46, - /* 810 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, - /* 820 */ 57, 101, 102, 103, 104, 105, 106, 107, 108, 109, - /* 830 */ 110, 111, 46, 233, 189, 189, 291, 248, 99, 189, - /* 840 */ 125, 126, 127, 115, 26, 200, 289, 230, 231, 115, - /* 850 */ 200, 16, 189, 114, 115, 189, 211, 212, 119, 221, - /* 860 */ 189, 211, 212, 258, 101, 102, 103, 104, 105, 106, - /* 870 */ 107, 108, 109, 110, 111, 189, 156, 211, 212, 234, - /* 880 */ 235, 189, 211, 212, 234, 235, 22, 201, 189, 150, - /* 890 */ 151, 152, 247, 248, 76, 16, 19, 247, 248, 113, - /* 900 */ 189, 24, 257, 211, 212, 189, 26, 89, 262, 223, - /* 910 */ 92, 225, 77, 189, 79, 129, 19, 53, 226, 248, - /* 920 */ 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, - /* 930 */ 53, 54, 55, 56, 57, 236, 19, 271, 189, 99, - /* 940 */ 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, - /* 950 */ 53, 54, 55, 56, 57, 115, 77, 59, 79, 119, - /* 960 */ 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, - /* 970 */ 53, 54, 55, 56, 57, 259, 22, 23, 101, 102, - /* 980 */ 103, 104, 105, 106, 107, 108, 109, 110, 111, 59, - /* 990 */ 150, 151, 152, 158, 22, 244, 24, 246, 101, 102, - /* 1000 */ 103, 104, 105, 106, 107, 108, 109, 110, 111, 285, - /* 1010 */ 189, 189, 114, 115, 116, 200, 136, 137, 101, 102, - /* 1020 */ 103, 104, 105, 106, 107, 108, 109, 110, 111, 230, - /* 1030 */ 231, 59, 211, 212, 285, 105, 106, 189, 19, 141, - /* 1040 */ 234, 235, 239, 113, 114, 115, 116, 226, 118, 234, - /* 1050 */ 235, 189, 249, 247, 100, 189, 126, 23, 236, 107, - /* 1060 */ 26, 189, 247, 44, 45, 46, 47, 48, 49, 50, - /* 1070 */ 51, 52, 53, 54, 55, 56, 57, 211, 212, 59, - /* 1080 */ 150, 233, 152, 211, 212, 133, 12, 115, 189, 189, - /* 1090 */ 138, 19, 20, 300, 22, 233, 76, 304, 226, 11, - /* 1100 */ 208, 27, 22, 23, 200, 19, 26, 87, 36, 89, - /* 1110 */ 211, 212, 92, 300, 248, 189, 42, 304, 189, 250, - /* 1120 */ 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, - /* 1130 */ 111, 59, 200, 233, 114, 115, 116, 63, 234, 235, - /* 1140 */ 235, 19, 20, 71, 22, 300, 189, 73, 200, 304, - /* 1150 */ 116, 247, 247, 81, 189, 200, 227, 26, 36, 234, - /* 1160 */ 235, 203, 204, 143, 200, 26, 234, 235, 194, 200, - /* 1170 */ 48, 99, 247, 66, 189, 141, 284, 105, 106, 247, - /* 1180 */ 100, 59, 234, 235, 112, 259, 114, 115, 116, 234, - /* 1190 */ 235, 119, 85, 71, 266, 247, 211, 212, 234, 235, - /* 1200 */ 114, 94, 247, 234, 235, 12, 266, 85, 136, 137, - /* 1210 */ 189, 247, 90, 26, 126, 127, 247, 189, 26, 22, - /* 1220 */ 27, 99, 150, 151, 152, 153, 154, 105, 106, 189, - /* 1230 */ 302, 303, 211, 212, 112, 42, 114, 115, 116, 211, - /* 1240 */ 212, 119, 302, 303, 19, 20, 189, 22, 274, 189, - /* 1250 */ 15, 144, 278, 189, 22, 23, 63, 189, 189, 203, - /* 1260 */ 204, 36, 136, 137, 155, 24, 157, 143, 211, 212, - /* 1270 */ 189, 140, 150, 151, 152, 153, 154, 0, 1, 2, - /* 1280 */ 211, 212, 5, 46, 59, 161, 147, 10, 11, 12, - /* 1290 */ 13, 14, 211, 212, 17, 60, 71, 189, 258, 189, - /* 1300 */ 59, 189, 105, 106, 189, 189, 189, 30, 116, 32, - /* 1310 */ 85, 124, 189, 251, 252, 90, 189, 40, 258, 211, - /* 1320 */ 212, 211, 212, 189, 99, 26, 211, 212, 211, 212, - /* 1330 */ 105, 106, 100, 141, 211, 212, 119, 112, 189, 114, - /* 1340 */ 115, 116, 23, 189, 119, 26, 129, 70, 189, 31, - /* 1350 */ 113, 19, 20, 24, 22, 78, 115, 39, 81, 189, - /* 1360 */ 211, 212, 26, 189, 22, 211, 212, 189, 36, 189, - /* 1370 */ 211, 212, 189, 189, 97, 150, 151, 152, 153, 154, - /* 1380 */ 127, 211, 212, 189, 189, 211, 212, 189, 189, 211, - /* 1390 */ 212, 59, 189, 189, 211, 212, 23, 189, 22, 26, - /* 1400 */ 24, 189, 149, 71, 189, 211, 212, 189, 131, 211, - /* 1410 */ 212, 189, 189, 136, 137, 211, 212, 85, 189, 211, - /* 1420 */ 212, 59, 90, 211, 212, 292, 293, 118, 119, 211, - /* 1430 */ 212, 99, 23, 211, 212, 26, 159, 105, 106, 189, - /* 1440 */ 211, 212, 143, 150, 112, 152, 114, 115, 116, 1, - /* 1450 */ 2, 119, 23, 5, 23, 26, 189, 26, 10, 11, - /* 1460 */ 12, 13, 14, 83, 84, 17, 253, 189, 139, 189, - /* 1470 */ 19, 20, 189, 22, 189, 189, 140, 115, 30, 59, - /* 1480 */ 32, 139, 150, 151, 152, 153, 154, 36, 40, 211, - /* 1490 */ 212, 211, 212, 59, 211, 212, 211, 212, 7, 8, - /* 1500 */ 19, 20, 189, 22, 150, 189, 152, 231, 281, 189, - /* 1510 */ 59, 189, 23, 189, 189, 26, 189, 36, 70, 189, - /* 1520 */ 23, 237, 71, 26, 211, 212, 78, 211, 212, 81, - /* 1530 */ 189, 211, 212, 211, 212, 115, 211, 212, 211, 212, - /* 1540 */ 59, 211, 212, 23, 23, 97, 26, 26, 23, 115, - /* 1550 */ 99, 26, 71, 189, 189, 189, 105, 106, 107, 23, - /* 1560 */ 189, 23, 26, 112, 26, 114, 115, 116, 189, 309, - /* 1570 */ 119, 23, 19, 20, 26, 22, 189, 211, 212, 131, - /* 1580 */ 99, 189, 211, 212, 136, 137, 105, 106, 189, 36, - /* 1590 */ 211, 212, 189, 112, 189, 114, 115, 116, 211, 212, - /* 1600 */ 119, 150, 151, 152, 153, 154, 189, 159, 23, 250, - /* 1610 */ 189, 26, 59, 189, 189, 189, 189, 189, 280, 189, - /* 1620 */ 250, 189, 189, 238, 71, 189, 189, 250, 211, 212, - /* 1630 */ 187, 150, 151, 152, 153, 154, 211, 212, 250, 290, - /* 1640 */ 240, 211, 212, 211, 212, 254, 286, 209, 254, 241, - /* 1650 */ 240, 254, 99, 286, 215, 220, 214, 244, 105, 106, - /* 1660 */ 214, 214, 244, 273, 224, 112, 192, 114, 115, 116, - /* 1670 */ 60, 290, 119, 5, 139, 196, 196, 38, 10, 11, - /* 1680 */ 12, 13, 14, 238, 240, 17, 196, 148, 287, 287, - /* 1690 */ 276, 113, 22, 146, 143, 245, 147, 244, 30, 241, - /* 1700 */ 32, 43, 229, 150, 151, 152, 153, 154, 40, 245, - /* 1710 */ 244, 241, 18, 196, 265, 232, 264, 232, 232, 232, - /* 1720 */ 18, 195, 265, 241, 264, 241, 196, 229, 229, 241, - /* 1730 */ 155, 62, 195, 241, 22, 216, 196, 283, 70, 195, - /* 1740 */ 195, 282, 196, 216, 113, 195, 78, 213, 196, 81, - /* 1750 */ 64, 222, 22, 124, 213, 213, 162, 111, 303, 142, - /* 1760 */ 256, 113, 219, 255, 255, 97, 256, 256, 275, 213, - /* 1770 */ 275, 216, 215, 219, 213, 213, 213, 255, 196, 256, - /* 1780 */ 222, 216, 216, 255, 91, 308, 308, 82, 22, 196, - /* 1790 */ 155, 260, 260, 144, 270, 145, 25, 199, 26, 131, - /* 1800 */ 198, 13, 190, 244, 136, 137, 190, 6, 188, 245, - /* 1810 */ 241, 243, 245, 242, 188, 188, 202, 208, 217, 208, - /* 1820 */ 202, 208, 217, 208, 202, 4, 3, 159, 209, 296, - /* 1830 */ 209, 208, 272, 22, 160, 15, 98, 23, 16, 23, - /* 1840 */ 137, 128, 24, 148, 140, 20, 16, 1, 142, 140, - /* 1850 */ 149, 128, 61, 53, 37, 148, 53, 53, 53, 128, - /* 1860 */ 114, 34, 1, 139, 5, 293, 296, 22, 113, 158, - /* 1870 */ 68, 75, 41, 26, 68, 139, 113, 24, 20, 19, - /* 1880 */ 129, 123, 67, 96, 22, 22, 22, 59, 23, 22, - /* 1890 */ 24, 67, 22, 67, 37, 23, 28, 147, 22, 26, - /* 1900 */ 23, 23, 23, 23, 22, 24, 23, 22, 24, 139, - /* 1910 */ 23, 23, 114, 22, 141, 26, 75, 88, 86, 44, - /* 1920 */ 34, 23, 22, 75, 34, 24, 34, 34, 34, 93, - /* 1930 */ 34, 26, 26, 34, 23, 23, 23, 23, 23, 11, - /* 1940 */ 23, 22, 26, 22, 22, 133, 23, 23, 22, 22, - /* 1950 */ 15, 26, 139, 23, 1, 1, 310, 310, 139, 310, - /* 1960 */ 310, 310, 310, 310, 310, 310, 310, 310, 310, 310, - /* 1970 */ 310, 310, 139, 139, 310, 310, 310, 310, 310, 310, - /* 1980 */ 310, 310, 310, 310, 310, 310, 310, 310, 310, 310, - /* 1990 */ 310, 310, 310, 310, 310, 310, 310, 310, 310, 310, - /* 2000 */ 310, 310, 310, 310, 310, 310, 310, 310, 310, 310, - /* 2010 */ 310, 310, 310, 310, 310, 310, 310, 310, 310, 310, - /* 2020 */ 310, 310, 310, 310, 310, 310, 310, 310, 310, 310, - /* 2030 */ 310, 310, 310, 310, 310, 310, 310, 310, 310, 310, - /* 2040 */ 310, 310, 310, 310, 310, 310, 310, 310, 310, 310, - /* 2050 */ 310, 310, 310, 310, 310, 310, 310, 310, 310, 310, - /* 2060 */ 310, 310, 310, 310, 310, 310, 310, 310, 310, 310, - /* 2070 */ 310, 310, 310, 310, 310, 310, 310, 310, 310, 310, - /* 2080 */ 310, 310, 310, 310, 310, 310, 310, 310, 310, 310, - /* 2090 */ 310, 310, 310, 310, 310, 310, 310, 310, 310, 310, - /* 2100 */ 310, 310, 310, 310, 310, 310, 310, 310, 310, 310, - /* 2110 */ 310, 310, 310, 310, 310, 310, 310, 310, 310, 310, - /* 2120 */ 310, 310, 310, 310, 310, 310, 310, 310, 310, 310, - /* 2130 */ 310, 310, 310, 310, 310, 310, 310, 310, 310, 310, - /* 2140 */ 310, 310, 310, 310, 310, 310, 310, 310, 310, 310, - /* 2150 */ 310, 310, 310, 310, 310, + /* 450 */ 54, 55, 56, 57, 117, 214, 215, 59, 19, 187, + /* 460 */ 192, 189, 23, 81, 296, 192, 194, 251, 196, 59, + /* 470 */ 229, 251, 115, 116, 117, 203, 260, 106, 107, 142, + /* 480 */ 260, 267, 43, 44, 45, 46, 47, 48, 49, 50, + /* 490 */ 51, 52, 53, 54, 55, 56, 57, 261, 102, 103, + /* 500 */ 104, 105, 106, 107, 108, 109, 110, 111, 112, 237, + /* 510 */ 238, 76, 192, 115, 116, 117, 144, 192, 76, 137, + /* 520 */ 138, 192, 250, 152, 89, 154, 116, 92, 19, 87, + /* 530 */ 262, 89, 23, 22, 92, 163, 264, 192, 22, 214, + /* 540 */ 215, 102, 103, 104, 105, 106, 107, 108, 109, 110, + /* 550 */ 111, 112, 43, 44, 45, 46, 47, 48, 49, 50, + /* 560 */ 51, 52, 53, 54, 55, 56, 57, 19, 296, 118, + /* 570 */ 59, 23, 121, 122, 123, 59, 251, 26, 46, 306, + /* 580 */ 307, 261, 131, 192, 311, 192, 144, 192, 22, 203, + /* 590 */ 100, 43, 44, 45, 46, 47, 48, 49, 50, 51, + /* 600 */ 52, 53, 54, 55, 56, 57, 116, 214, 215, 271, + /* 610 */ 120, 102, 103, 104, 105, 106, 107, 108, 109, 110, + /* 620 */ 111, 112, 229, 237, 238, 59, 115, 116, 117, 299, + /* 630 */ 300, 115, 116, 117, 59, 16, 250, 19, 192, 192, + /* 640 */ 19, 23, 152, 153, 154, 24, 114, 309, 310, 192, + /* 650 */ 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, + /* 660 */ 112, 43, 44, 45, 46, 47, 48, 49, 50, 51, + /* 670 */ 52, 53, 54, 55, 56, 57, 19, 7, 8, 9, + /* 680 */ 23, 115, 116, 117, 203, 290, 239, 238, 137, 138, + /* 690 */ 115, 116, 117, 236, 192, 22, 77, 81, 79, 250, + /* 700 */ 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, + /* 710 */ 53, 54, 55, 56, 57, 192, 95, 142, 237, 238, + /* 720 */ 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, + /* 730 */ 112, 250, 59, 112, 192, 119, 26, 214, 215, 118, + /* 740 */ 119, 120, 121, 122, 123, 124, 19, 192, 267, 302, + /* 750 */ 23, 130, 229, 137, 138, 23, 214, 215, 26, 102, + /* 760 */ 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, + /* 770 */ 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, + /* 780 */ 53, 54, 55, 56, 57, 19, 76, 11, 115, 116, + /* 790 */ 117, 192, 29, 251, 239, 73, 33, 192, 192, 89, + /* 800 */ 192, 192, 92, 192, 192, 126, 127, 128, 224, 43, + /* 810 */ 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, + /* 820 */ 54, 55, 56, 57, 192, 35, 214, 215, 65, 102, + /* 830 */ 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, + /* 840 */ 59, 229, 192, 192, 239, 239, 214, 215, 126, 127, + /* 850 */ 128, 126, 127, 128, 307, 19, 66, 302, 311, 192, + /* 860 */ 261, 229, 224, 22, 74, 214, 215, 192, 102, 103, + /* 870 */ 104, 105, 106, 107, 108, 109, 110, 111, 112, 43, + /* 880 */ 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, + /* 890 */ 54, 55, 56, 57, 192, 192, 115, 116, 117, 19, + /* 900 */ 59, 290, 251, 127, 128, 192, 23, 302, 302, 26, + /* 910 */ 302, 236, 192, 22, 21, 24, 214, 215, 192, 129, + /* 920 */ 22, 192, 24, 142, 158, 45, 46, 47, 48, 49, + /* 930 */ 50, 51, 52, 53, 54, 55, 56, 57, 102, 103, + /* 940 */ 104, 105, 106, 107, 108, 109, 110, 111, 112, 192, + /* 950 */ 59, 12, 192, 251, 192, 305, 192, 116, 22, 23, + /* 960 */ 242, 203, 26, 203, 24, 236, 27, 237, 238, 266, + /* 970 */ 252, 214, 215, 80, 214, 215, 214, 215, 214, 215, + /* 980 */ 250, 42, 102, 103, 104, 105, 106, 107, 108, 109, + /* 990 */ 110, 111, 112, 229, 158, 237, 238, 237, 238, 59, + /* 1000 */ 117, 281, 63, 192, 192, 192, 192, 116, 250, 192, + /* 1010 */ 250, 251, 73, 251, 19, 122, 290, 237, 238, 24, + /* 1020 */ 260, 209, 210, 209, 210, 142, 242, 214, 215, 197, + /* 1030 */ 250, 22, 23, 276, 19, 26, 252, 101, 43, 44, + /* 1040 */ 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, + /* 1050 */ 55, 56, 57, 160, 19, 211, 116, 192, 43, 44, + /* 1060 */ 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, + /* 1070 */ 55, 56, 57, 192, 192, 22, 192, 266, 43, 44, + /* 1080 */ 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, + /* 1090 */ 55, 56, 57, 192, 282, 192, 282, 102, 103, 104, + /* 1100 */ 105, 106, 107, 108, 109, 110, 111, 112, 59, 192, + /* 1110 */ 101, 279, 192, 192, 230, 283, 192, 102, 103, 104, + /* 1120 */ 105, 106, 107, 108, 109, 110, 111, 112, 204, 211, + /* 1130 */ 66, 214, 215, 289, 214, 215, 108, 102, 103, 104, + /* 1140 */ 105, 106, 107, 108, 109, 110, 111, 112, 266, 85, + /* 1150 */ 226, 192, 228, 22, 23, 106, 107, 19, 94, 106, + /* 1160 */ 107, 192, 134, 114, 115, 116, 117, 139, 119, 266, + /* 1170 */ 203, 206, 207, 214, 215, 192, 127, 192, 206, 207, + /* 1180 */ 59, 192, 44, 45, 46, 47, 48, 49, 50, 51, + /* 1190 */ 52, 53, 54, 55, 56, 57, 192, 76, 192, 214, + /* 1200 */ 215, 152, 284, 154, 237, 238, 192, 289, 87, 145, + /* 1210 */ 89, 19, 20, 92, 22, 22, 23, 250, 307, 236, + /* 1220 */ 214, 215, 311, 203, 12, 247, 192, 249, 36, 307, + /* 1230 */ 192, 262, 101, 311, 137, 138, 115, 116, 117, 27, + /* 1240 */ 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, + /* 1250 */ 112, 59, 214, 215, 42, 203, 307, 237, 238, 192, + /* 1260 */ 311, 192, 26, 71, 192, 144, 262, 297, 298, 203, + /* 1270 */ 250, 19, 20, 81, 22, 63, 262, 254, 255, 15, + /* 1280 */ 26, 214, 215, 214, 215, 26, 214, 215, 36, 237, + /* 1290 */ 238, 192, 100, 114, 101, 192, 262, 192, 106, 107, + /* 1300 */ 48, 134, 250, 237, 238, 113, 139, 115, 116, 117, + /* 1310 */ 192, 59, 120, 214, 215, 242, 250, 214, 215, 214, + /* 1320 */ 215, 148, 149, 71, 60, 252, 242, 192, 149, 137, + /* 1330 */ 138, 192, 214, 215, 192, 19, 252, 85, 192, 59, + /* 1340 */ 192, 157, 90, 159, 152, 153, 154, 155, 156, 214, + /* 1350 */ 215, 192, 100, 214, 215, 19, 214, 215, 106, 107, + /* 1360 */ 214, 215, 214, 215, 22, 113, 192, 115, 116, 117, + /* 1370 */ 192, 242, 120, 214, 215, 192, 24, 192, 31, 192, + /* 1380 */ 144, 252, 26, 192, 125, 99, 39, 192, 214, 215, + /* 1390 */ 192, 59, 214, 215, 192, 141, 116, 214, 215, 214, + /* 1400 */ 215, 214, 215, 61, 152, 153, 154, 155, 156, 0, + /* 1410 */ 1, 2, 214, 215, 5, 192, 214, 215, 132, 10, + /* 1420 */ 11, 12, 13, 14, 1, 2, 17, 192, 5, 19, + /* 1430 */ 20, 115, 22, 10, 11, 12, 13, 14, 192, 30, + /* 1440 */ 17, 32, 23, 192, 23, 26, 36, 26, 116, 40, + /* 1450 */ 192, 115, 192, 30, 192, 32, 119, 120, 59, 5, + /* 1460 */ 214, 215, 128, 40, 10, 11, 12, 13, 14, 59, + /* 1470 */ 19, 17, 214, 215, 214, 215, 214, 215, 120, 70, + /* 1480 */ 192, 71, 22, 192, 30, 151, 32, 78, 130, 128, + /* 1490 */ 81, 192, 140, 70, 40, 85, 192, 141, 7, 8, + /* 1500 */ 90, 78, 214, 215, 81, 214, 215, 98, 83, 84, + /* 1510 */ 100, 192, 151, 214, 215, 116, 106, 107, 214, 215, + /* 1520 */ 192, 98, 192, 113, 70, 115, 116, 117, 23, 224, + /* 1530 */ 120, 26, 78, 214, 215, 81, 19, 20, 152, 22, + /* 1540 */ 154, 132, 214, 215, 214, 215, 137, 138, 97, 192, + /* 1550 */ 256, 192, 98, 36, 23, 132, 192, 26, 192, 192, + /* 1560 */ 137, 138, 152, 153, 154, 155, 156, 192, 192, 192, + /* 1570 */ 161, 214, 215, 214, 215, 192, 59, 192, 214, 215, + /* 1580 */ 214, 215, 192, 152, 161, 154, 132, 192, 71, 214, + /* 1590 */ 215, 137, 138, 192, 192, 192, 19, 20, 192, 22, + /* 1600 */ 140, 253, 85, 192, 214, 215, 192, 90, 23, 214, + /* 1610 */ 215, 26, 192, 36, 192, 161, 23, 100, 192, 26, + /* 1620 */ 214, 215, 192, 106, 107, 214, 215, 192, 23, 192, + /* 1630 */ 113, 26, 115, 116, 117, 23, 59, 120, 26, 23, + /* 1640 */ 23, 23, 26, 26, 26, 316, 234, 23, 71, 23, + /* 1650 */ 26, 192, 26, 192, 192, 192, 192, 192, 192, 192, + /* 1660 */ 192, 253, 212, 190, 286, 285, 253, 240, 253, 152, + /* 1670 */ 153, 154, 155, 156, 241, 243, 295, 100, 291, 291, + /* 1680 */ 223, 253, 227, 106, 107, 108, 269, 244, 244, 265, + /* 1690 */ 113, 257, 115, 116, 117, 257, 243, 120, 269, 218, + /* 1700 */ 217, 265, 217, 19, 20, 217, 22, 195, 269, 269, + /* 1710 */ 60, 295, 140, 257, 243, 241, 247, 247, 199, 278, + /* 1720 */ 36, 199, 199, 38, 19, 20, 150, 22, 149, 152, + /* 1730 */ 153, 154, 155, 156, 22, 147, 144, 295, 292, 292, + /* 1740 */ 248, 36, 247, 59, 268, 270, 43, 232, 248, 247, + /* 1750 */ 18, 235, 199, 281, 18, 71, 235, 198, 148, 235, + /* 1760 */ 235, 232, 244, 270, 59, 268, 270, 232, 199, 198, + /* 1770 */ 244, 244, 244, 62, 288, 22, 71, 157, 114, 199, + /* 1780 */ 198, 287, 219, 199, 100, 198, 219, 64, 199, 198, + /* 1790 */ 106, 107, 22, 225, 216, 222, 222, 113, 125, 115, + /* 1800 */ 116, 117, 216, 164, 120, 100, 216, 24, 225, 310, + /* 1810 */ 303, 106, 107, 219, 216, 218, 216, 216, 113, 216, + /* 1820 */ 115, 116, 117, 280, 280, 120, 112, 219, 258, 143, + /* 1830 */ 114, 199, 91, 82, 259, 315, 152, 153, 154, 155, + /* 1840 */ 156, 22, 199, 277, 263, 315, 157, 248, 146, 145, + /* 1850 */ 25, 202, 26, 201, 259, 259, 13, 152, 153, 154, + /* 1860 */ 155, 156, 258, 248, 258, 263, 259, 258, 247, 246, + /* 1870 */ 245, 244, 193, 193, 6, 205, 211, 211, 191, 191, + /* 1880 */ 211, 191, 211, 205, 212, 211, 275, 220, 220, 212, + /* 1890 */ 4, 3, 22, 162, 15, 23, 205, 16, 23, 138, + /* 1900 */ 150, 129, 26, 141, 24, 20, 143, 16, 1, 141, + /* 1910 */ 129, 129, 61, 53, 301, 301, 150, 298, 53, 37, + /* 1920 */ 53, 129, 53, 115, 34, 140, 1, 5, 22, 114, + /* 1930 */ 68, 160, 68, 75, 26, 41, 140, 114, 24, 20, + /* 1940 */ 19, 130, 124, 23, 67, 22, 22, 37, 22, 22, + /* 1950 */ 67, 59, 24, 96, 22, 148, 67, 28, 23, 22, + /* 1960 */ 26, 23, 23, 23, 23, 22, 22, 140, 97, 23, + /* 1970 */ 23, 115, 142, 75, 26, 88, 34, 44, 75, 34, + /* 1980 */ 23, 86, 22, 34, 34, 34, 93, 34, 24, 26, + /* 1990 */ 34, 26, 23, 23, 23, 23, 23, 11, 23, 22, + /* 2000 */ 26, 22, 22, 140, 23, 23, 22, 22, 134, 26, + /* 2010 */ 23, 15, 1, 140, 1, 317, 317, 317, 140, 317, + /* 2020 */ 317, 317, 317, 317, 140, 317, 317, 317, 317, 317, + /* 2030 */ 317, 317, 317, 317, 317, 317, 317, 317, 317, 317, + /* 2040 */ 317, 317, 317, 317, 317, 317, 317, 317, 317, 317, + /* 2050 */ 317, 317, 317, 317, 317, 317, 317, 317, 317, 317, + /* 2060 */ 317, 317, 317, 317, 317, 317, 317, 317, 317, 317, + /* 2070 */ 317, 317, 317, 317, 317, 317, 317, 317, 317, 317, + /* 2080 */ 317, 317, 317, 317, 317, 317, 317, 317, 317, 317, + /* 2090 */ 317, 317, 317, 317, 317, 317, 317, 317, 317, 317, + /* 2100 */ 317, 317, 317, 317, 317, 317, 317, 317, 317, 317, + /* 2110 */ 317, 317, 317, 317, 317, 317, 317, 317, 317, 317, + /* 2120 */ 317, 317, 317, 317, 317, 317, 317, 317, 317, 317, + /* 2130 */ 317, 317, 317, 317, 317, 317, 317, 317, 317, 317, + /* 2140 */ 317, 317, 317, 317, 317, 317, 317, 317, 317, 317, + /* 2150 */ 317, 317, 317, 317, 317, 317, 317, 317, 317, 317, + /* 2160 */ 317, 317, 317, 317, 317, 317, 317, 317, 317, 317, + /* 2170 */ 317, 317, 317, 317, 317, 317, 317, 317, 317, 317, + /* 2180 */ 317, 317, 317, 317, 317, 317, 317, 317, 317, 317, + /* 2190 */ 317, 317, 317, 317, 317, 317, 317, 317, 317, 317, + /* 2200 */ 317, 317, 317, 317, 317, 317, 317, 317, 317, }; -#define YY_SHIFT_COUNT (556) +#define YY_SHIFT_COUNT (573) #define YY_SHIFT_MIN (0) -#define YY_SHIFT_MAX (1954) +#define YY_SHIFT_MAX (2013) static const unsigned short int yy_shift_ofst[] = { - /* 0 */ 1448, 1277, 1668, 1072, 1072, 340, 1122, 1225, 1332, 1481, - /* 10 */ 1481, 1481, 335, 0, 0, 180, 897, 1481, 1481, 1481, - /* 20 */ 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, - /* 30 */ 930, 930, 1020, 1020, 290, 1, 340, 340, 340, 340, - /* 40 */ 340, 340, 40, 110, 219, 288, 327, 396, 435, 504, - /* 50 */ 543, 612, 651, 720, 877, 897, 897, 897, 897, 897, - /* 60 */ 897, 897, 897, 897, 897, 897, 897, 897, 897, 897, - /* 70 */ 897, 897, 897, 917, 897, 1019, 763, 763, 1451, 1481, - /* 80 */ 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, - /* 90 */ 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, - /* 100 */ 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, - /* 110 */ 1481, 1481, 1553, 1481, 1481, 1481, 1481, 1481, 1481, 1481, - /* 120 */ 1481, 1481, 1481, 1481, 1481, 1481, 147, 258, 258, 258, - /* 130 */ 258, 258, 79, 65, 84, 449, 19, 786, 449, 636, - /* 140 */ 636, 449, 880, 880, 880, 880, 113, 142, 142, 472, - /* 150 */ 150, 1974, 1974, 399, 399, 399, 93, 237, 341, 237, - /* 160 */ 237, 1074, 1074, 437, 350, 704, 1080, 449, 449, 449, - /* 170 */ 449, 449, 449, 449, 449, 449, 449, 449, 449, 449, - /* 180 */ 449, 449, 449, 449, 449, 449, 449, 449, 818, 818, - /* 190 */ 449, 1088, 217, 217, 734, 734, 1124, 1126, 1974, 1974, - /* 200 */ 1974, 739, 840, 840, 453, 454, 511, 187, 563, 570, - /* 210 */ 898, 669, 449, 449, 449, 449, 449, 449, 449, 449, - /* 220 */ 449, 670, 449, 449, 449, 449, 449, 449, 449, 449, - /* 230 */ 449, 449, 449, 449, 674, 674, 674, 449, 449, 449, - /* 240 */ 449, 1034, 449, 449, 449, 972, 1107, 449, 449, 1193, - /* 250 */ 449, 449, 449, 449, 449, 449, 449, 449, 260, 177, - /* 260 */ 489, 1241, 1241, 1241, 1241, 1192, 489, 489, 952, 1197, - /* 270 */ 625, 1235, 1131, 181, 181, 1086, 1139, 1131, 1086, 1187, - /* 280 */ 1319, 1237, 1318, 1318, 1318, 181, 1299, 1299, 1109, 1336, - /* 290 */ 549, 1376, 1610, 1535, 1535, 1639, 1639, 1535, 1539, 1578, - /* 300 */ 1670, 1547, 1551, 1549, 1658, 1547, 1551, 1549, 1694, 1694, - /* 310 */ 1694, 1694, 1535, 1702, 1549, 1549, 1578, 1670, 1658, 1549, - /* 320 */ 1658, 1549, 1535, 1702, 1575, 1669, 1535, 1702, 1712, 1535, - /* 330 */ 1702, 1535, 1702, 1712, 1631, 1631, 1631, 1686, 1730, 1730, - /* 340 */ 1712, 1631, 1629, 1631, 1686, 1631, 1631, 1594, 1712, 1646, - /* 350 */ 1646, 1712, 1617, 1648, 1617, 1648, 1617, 1648, 1617, 1648, - /* 360 */ 1535, 1693, 1693, 1705, 1705, 1547, 1551, 1766, 1535, 1635, - /* 370 */ 1547, 1650, 1649, 1549, 1771, 1772, 1788, 1788, 1801, 1801, - /* 380 */ 1801, 1974, 1974, 1974, 1974, 1974, 1974, 1974, 1974, 1974, - /* 390 */ 1974, 1974, 1974, 1974, 1974, 1974, 308, 835, 954, 1232, - /* 400 */ 879, 715, 728, 1373, 864, 1329, 1253, 1409, 297, 1431, - /* 410 */ 1489, 1497, 1520, 1521, 1525, 1362, 1309, 1491, 1217, 1420, - /* 420 */ 1429, 1536, 1380, 1538, 1293, 1354, 1548, 1585, 1434, 1342, - /* 430 */ 1821, 1823, 1811, 1674, 1820, 1738, 1822, 1814, 1816, 1703, - /* 440 */ 1695, 1713, 1818, 1704, 1825, 1706, 1830, 1846, 1709, 1701, - /* 450 */ 1723, 1791, 1817, 1707, 1800, 1803, 1804, 1805, 1731, 1746, - /* 460 */ 1827, 1724, 1861, 1859, 1845, 1755, 1711, 1802, 1847, 1806, - /* 470 */ 1796, 1831, 1736, 1763, 1853, 1858, 1860, 1751, 1758, 1862, - /* 480 */ 1815, 1863, 1864, 1865, 1867, 1824, 1828, 1866, 1787, 1868, - /* 490 */ 1870, 1826, 1857, 1872, 1750, 1876, 1877, 1878, 1879, 1873, - /* 500 */ 1880, 1882, 1881, 1883, 1885, 1884, 1770, 1887, 1888, 1798, - /* 510 */ 1886, 1891, 1773, 1889, 1890, 1892, 1893, 1894, 1829, 1841, - /* 520 */ 1832, 1875, 1848, 1836, 1896, 1898, 1900, 1901, 1905, 1906, - /* 530 */ 1899, 1911, 1889, 1912, 1913, 1914, 1915, 1916, 1917, 1919, - /* 540 */ 1928, 1921, 1922, 1923, 1924, 1926, 1927, 1925, 1812, 1813, - /* 550 */ 1819, 1833, 1834, 1930, 1935, 1953, 1954, + /* 0 */ 1423, 1409, 1454, 1192, 1192, 382, 1252, 1410, 1517, 1684, + /* 10 */ 1684, 1684, 221, 0, 0, 180, 1015, 1684, 1684, 1684, + /* 20 */ 1684, 1684, 1684, 1684, 1684, 1684, 1684, 1684, 1684, 1684, + /* 30 */ 1049, 1049, 1121, 1121, 54, 616, 382, 382, 382, 382, + /* 40 */ 382, 40, 110, 219, 289, 396, 439, 509, 548, 618, + /* 50 */ 657, 727, 766, 836, 995, 1015, 1015, 1015, 1015, 1015, + /* 60 */ 1015, 1015, 1015, 1015, 1015, 1015, 1015, 1015, 1015, 1015, + /* 70 */ 1015, 1015, 1015, 1035, 1015, 1138, 880, 880, 1577, 1684, + /* 80 */ 1684, 1684, 1684, 1684, 1684, 1684, 1684, 1684, 1684, 1684, + /* 90 */ 1684, 1684, 1684, 1684, 1684, 1684, 1684, 1684, 1684, 1684, + /* 100 */ 1684, 1684, 1684, 1684, 1684, 1684, 1684, 1684, 1684, 1684, + /* 110 */ 1684, 1684, 1684, 1705, 1684, 1684, 1684, 1684, 1684, 1684, + /* 120 */ 1684, 1684, 1684, 1684, 1684, 1684, 1684, 146, 84, 84, + /* 130 */ 84, 84, 84, 274, 315, 125, 97, 357, 66, 66, + /* 140 */ 893, 258, 66, 66, 371, 371, 66, 551, 551, 551, + /* 150 */ 551, 192, 209, 209, 278, 127, 2025, 2025, 621, 621, + /* 160 */ 621, 201, 398, 398, 398, 398, 939, 939, 442, 936, + /* 170 */ 1009, 66, 66, 66, 66, 66, 66, 66, 66, 66, + /* 180 */ 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, + /* 190 */ 66, 710, 710, 66, 776, 435, 435, 410, 410, 372, + /* 200 */ 1097, 2025, 2025, 2025, 2025, 2025, 2025, 2025, 250, 490, + /* 210 */ 490, 511, 451, 516, 252, 566, 575, 781, 673, 66, + /* 220 */ 66, 66, 66, 66, 66, 66, 66, 66, 66, 722, + /* 230 */ 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, + /* 240 */ 66, 66, 790, 790, 790, 66, 66, 66, 883, 66, + /* 250 */ 66, 66, 891, 1064, 66, 66, 1212, 66, 66, 66, + /* 260 */ 66, 66, 66, 66, 66, 725, 763, 177, 940, 940, + /* 270 */ 940, 940, 337, 177, 177, 1028, 1053, 670, 1264, 1179, + /* 280 */ 1173, 1254, 1316, 1173, 1316, 1336, 50, 1179, 1179, 50, + /* 290 */ 1179, 1254, 1336, 1259, 732, 532, 1347, 1347, 1347, 1316, + /* 300 */ 1236, 1236, 1184, 1356, 1167, 898, 1650, 1650, 1572, 1572, + /* 310 */ 1685, 1685, 1572, 1576, 1579, 1712, 1588, 1592, 1703, 1588, + /* 320 */ 1592, 1732, 1732, 1732, 1732, 1572, 1736, 1610, 1579, 1579, + /* 330 */ 1610, 1712, 1703, 1610, 1703, 1610, 1572, 1736, 1620, 1711, + /* 340 */ 1572, 1736, 1753, 1572, 1736, 1572, 1736, 1753, 1664, 1664, + /* 350 */ 1664, 1723, 1770, 1770, 1753, 1664, 1673, 1664, 1723, 1664, + /* 360 */ 1664, 1639, 1783, 1714, 1714, 1753, 1686, 1716, 1686, 1716, + /* 370 */ 1686, 1716, 1686, 1716, 1572, 1741, 1741, 1751, 1751, 1588, + /* 380 */ 1592, 1819, 1572, 1689, 1588, 1702, 1704, 1610, 1825, 1826, + /* 390 */ 1843, 1843, 1868, 1868, 1868, 2025, 2025, 2025, 2025, 2025, + /* 400 */ 2025, 2025, 2025, 2025, 2025, 2025, 2025, 2025, 2025, 2025, + /* 410 */ 232, 101, 1131, 1193, 619, 679, 841, 1421, 1286, 115, + /* 420 */ 1352, 1334, 1361, 1419, 1342, 1505, 1531, 1585, 1593, 1605, + /* 430 */ 1612, 1280, 1337, 1491, 1358, 1451, 1332, 1616, 1617, 1425, + /* 440 */ 1618, 1386, 1431, 1624, 1626, 1399, 1460, 1886, 1888, 1870, + /* 450 */ 1731, 1879, 1881, 1872, 1875, 1761, 1750, 1772, 1876, 1876, + /* 460 */ 1880, 1762, 1885, 1763, 1891, 1907, 1768, 1781, 1876, 1782, + /* 470 */ 1851, 1882, 1876, 1766, 1860, 1865, 1867, 1869, 1792, 1808, + /* 480 */ 1890, 1785, 1925, 1922, 1906, 1815, 1771, 1862, 1908, 1864, + /* 490 */ 1858, 1894, 1796, 1823, 1914, 1919, 1921, 1811, 1818, 1923, + /* 500 */ 1877, 1924, 1926, 1920, 1927, 1883, 1892, 1928, 1857, 1929, + /* 510 */ 1932, 1889, 1910, 1935, 1807, 1937, 1938, 1939, 1940, 1934, + /* 520 */ 1941, 1943, 1871, 1827, 1946, 1947, 1856, 1942, 1944, 1830, + /* 530 */ 1948, 1945, 1949, 1950, 1951, 1887, 1898, 1895, 1933, 1903, + /* 540 */ 1893, 1953, 1957, 1960, 1964, 1963, 1965, 1956, 1969, 1948, + /* 550 */ 1970, 1971, 1972, 1973, 1974, 1975, 1977, 1986, 1979, 1980, + /* 560 */ 1981, 1982, 1984, 1985, 1983, 1874, 1863, 1873, 1878, 1884, + /* 570 */ 1987, 1996, 2011, 2013, }; -#define YY_REDUCE_COUNT (395) -#define YY_REDUCE_MIN (-262) -#define YY_REDUCE_MAX (1627) +#define YY_REDUCE_COUNT (409) +#define YY_REDUCE_MIN (-266) +#define YY_REDUCE_MAX (1691) static const short yy_reduce_ofst[] = { - /* 0 */ 490, -122, 545, 645, 650, -120, -189, -187, -184, -182, - /* 10 */ -178, -176, 45, 30, 200, -251, -134, 390, 392, 521, - /* 20 */ 523, 213, 692, 821, 284, 589, 872, 666, 671, 866, - /* 30 */ 71, 111, 273, 389, 686, 815, 904, 932, 948, 955, - /* 40 */ 964, 969, -259, -259, -259, -259, -259, -259, -259, -259, - /* 50 */ -259, -259, -259, -259, -259, -259, -259, -259, -259, -259, - /* 60 */ -259, -259, -259, -259, -259, -259, -259, -259, -259, -259, - /* 70 */ -259, -259, -259, -259, -259, -259, -259, -259, 428, 430, - /* 80 */ 899, 985, 1021, 1028, 1057, 1069, 1081, 1108, 1110, 1115, - /* 90 */ 1117, 1123, 1149, 1154, 1159, 1170, 1174, 1178, 1183, 1194, - /* 100 */ 1198, 1204, 1208, 1212, 1218, 1222, 1229, 1278, 1280, 1283, - /* 110 */ 1285, 1313, 1316, 1320, 1322, 1325, 1327, 1330, 1366, 1371, - /* 120 */ 1379, 1387, 1417, 1425, 1430, 1432, -259, -259, -259, -259, - /* 130 */ -259, -259, -259, -259, -259, 557, 974, -214, -174, -9, - /* 140 */ 431, -124, 806, 925, 806, 925, 251, 928, 940, -259, - /* 150 */ -259, -259, -259, -198, -198, -198, 127, -186, -168, 212, - /* 160 */ 646, 617, 799, -262, 555, 220, 220, 491, 605, 1040, - /* 170 */ 1060, 699, -11, 600, 848, 862, 345, -129, 724, -91, - /* 180 */ 158, 749, 716, 900, 304, 822, 929, 926, 499, 793, - /* 190 */ 322, 892, 813, 845, 958, 1056, 751, 905, 1133, 1062, - /* 200 */ 803, -210, -185, -179, -148, -167, -89, 121, 274, 281, - /* 210 */ 320, 336, 439, 663, 711, 957, 965, 1064, 1068, 1112, - /* 220 */ 1116, -196, 1127, 1134, 1180, 1184, 1195, 1199, 1203, 1215, - /* 230 */ 1223, 1250, 1267, 1286, 205, 422, 638, 1324, 1341, 1364, - /* 240 */ 1365, 1213, 1392, 1399, 1403, 869, 1260, 1405, 1421, 1276, - /* 250 */ 1424, 121, 1426, 1427, 1428, 1433, 1436, 1437, 1227, 1338, - /* 260 */ 1284, 1359, 1370, 1377, 1388, 1213, 1284, 1284, 1385, 1438, - /* 270 */ 1443, 1349, 1400, 1391, 1394, 1360, 1408, 1410, 1367, 1439, - /* 280 */ 1440, 1435, 1442, 1446, 1447, 1397, 1413, 1418, 1390, 1444, - /* 290 */ 1445, 1474, 1381, 1479, 1480, 1401, 1402, 1490, 1414, 1449, - /* 300 */ 1452, 1450, 1453, 1458, 1473, 1464, 1466, 1470, 1483, 1485, - /* 310 */ 1486, 1487, 1517, 1526, 1482, 1484, 1457, 1460, 1498, 1488, - /* 320 */ 1499, 1492, 1530, 1537, 1454, 1459, 1540, 1544, 1519, 1546, - /* 330 */ 1545, 1552, 1550, 1527, 1534, 1541, 1542, 1529, 1543, 1554, - /* 340 */ 1555, 1556, 1557, 1561, 1558, 1562, 1563, 1455, 1565, 1493, - /* 350 */ 1495, 1566, 1504, 1508, 1510, 1509, 1511, 1522, 1523, 1528, - /* 360 */ 1582, 1477, 1478, 1531, 1532, 1564, 1559, 1524, 1593, 1560, - /* 370 */ 1567, 1568, 1571, 1569, 1598, 1602, 1612, 1616, 1620, 1626, - /* 380 */ 1627, 1533, 1570, 1572, 1614, 1609, 1611, 1613, 1615, 1618, - /* 390 */ 1601, 1605, 1619, 1621, 1623, 1622, + /* 0 */ 111, 168, 272, 760, -177, -175, -192, -190, -182, -179, + /* 10 */ 216, 220, 481, -208, -205, -266, -140, -115, 241, 393, + /* 20 */ 523, 325, 612, 632, 542, 651, 764, 757, 702, 762, + /* 30 */ 812, 814, -188, 273, 924, 386, 758, 967, 1020, 1052, + /* 40 */ 1066, -256, -256, -256, -256, -256, -256, -256, -256, -256, + /* 50 */ -256, -256, -256, -256, -256, -256, -256, -256, -256, -256, + /* 60 */ -256, -256, -256, -256, -256, -256, -256, -256, -256, -256, + /* 70 */ -256, -256, -256, -256, -256, -256, -256, -256, 195, 222, + /* 80 */ 813, 917, 920, 959, 985, 1006, 1038, 1067, 1069, 1072, + /* 90 */ 1099, 1103, 1105, 1118, 1135, 1139, 1142, 1146, 1148, 1159, + /* 100 */ 1174, 1178, 1183, 1185, 1187, 1198, 1202, 1246, 1258, 1260, + /* 110 */ 1262, 1288, 1291, 1299, 1304, 1319, 1328, 1330, 1357, 1359, + /* 120 */ 1364, 1366, 1375, 1390, 1395, 1406, 1411, -256, -256, -256, + /* 130 */ -256, -256, -256, -256, -256, 447, -256, 555, -178, 605, + /* 140 */ 832, -220, 606, -94, -168, 36, -122, 730, 780, 730, + /* 150 */ 780, 918, -136, 338, -256, -256, -256, -256, 80, 80, + /* 160 */ 80, 720, 703, 811, 882, 903, -213, -204, 106, 330, + /* 170 */ 330, -77, 236, 320, 599, 67, 457, 675, 729, 395, + /* 180 */ 268, 611, 969, 1004, 726, 1014, 983, 123, 884, 608, + /* 190 */ 1034, 547, 911, 650, 844, 922, 949, 965, 972, 978, + /* 200 */ 449, 970, 718, 784, 1073, 1084, 1023, 1129, -209, -180, + /* 210 */ -113, 114, 183, 329, 345, 391, 446, 502, 609, 667, + /* 220 */ 713, 817, 865, 881, 901, 921, 989, 1191, 1195, 214, + /* 230 */ 1223, 1235, 1251, 1367, 1376, 1377, 1383, 1385, 1401, 1402, + /* 240 */ 1403, 1414, 584, 638, 1305, 1420, 1422, 1426, 1294, 1430, + /* 250 */ 1435, 1437, 1348, 1329, 1459, 1461, 1412, 1462, 345, 1463, + /* 260 */ 1464, 1465, 1466, 1467, 1468, 1378, 1380, 1427, 1408, 1413, + /* 270 */ 1415, 1428, 1294, 1427, 1427, 1433, 1450, 1473, 1381, 1417, + /* 280 */ 1424, 1432, 1434, 1436, 1438, 1387, 1443, 1429, 1439, 1444, + /* 290 */ 1440, 1453, 1388, 1481, 1455, 1457, 1483, 1485, 1488, 1456, + /* 300 */ 1469, 1470, 1441, 1471, 1474, 1512, 1416, 1442, 1519, 1522, + /* 310 */ 1446, 1447, 1523, 1472, 1475, 1476, 1492, 1495, 1515, 1500, + /* 320 */ 1502, 1516, 1521, 1524, 1525, 1553, 1559, 1518, 1493, 1496, + /* 330 */ 1526, 1497, 1529, 1527, 1535, 1528, 1569, 1571, 1486, 1494, + /* 340 */ 1580, 1582, 1563, 1584, 1587, 1589, 1591, 1567, 1578, 1586, + /* 350 */ 1590, 1568, 1573, 1574, 1594, 1598, 1597, 1600, 1583, 1601, + /* 360 */ 1603, 1499, 1507, 1543, 1544, 1608, 1575, 1570, 1595, 1604, + /* 370 */ 1596, 1606, 1607, 1609, 1632, 1520, 1530, 1581, 1602, 1599, + /* 380 */ 1621, 1611, 1643, 1566, 1615, 1623, 1625, 1627, 1649, 1652, + /* 390 */ 1679, 1680, 1687, 1688, 1690, 1613, 1614, 1619, 1670, 1665, + /* 400 */ 1666, 1669, 1671, 1678, 1667, 1668, 1672, 1677, 1674, 1691, }; static const YYACTIONTYPE yy_default[] = { - /* 0 */ 1579, 1579, 1579, 1415, 1192, 1301, 1192, 1192, 1192, 1415, - /* 10 */ 1415, 1415, 1192, 1331, 1331, 1468, 1223, 1192, 1192, 1192, - /* 20 */ 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1414, 1192, 1192, - /* 30 */ 1192, 1192, 1498, 1498, 1192, 1192, 1192, 1192, 1192, 1192, - /* 40 */ 1192, 1192, 1192, 1340, 1192, 1192, 1192, 1192, 1192, 1192, - /* 50 */ 1416, 1417, 1192, 1192, 1192, 1467, 1469, 1432, 1350, 1349, - /* 60 */ 1348, 1347, 1450, 1318, 1345, 1338, 1342, 1410, 1411, 1409, - /* 70 */ 1413, 1417, 1416, 1192, 1341, 1381, 1395, 1380, 1192, 1192, - /* 80 */ 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192, - /* 90 */ 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192, - /* 100 */ 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192, - /* 110 */ 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192, - /* 120 */ 1192, 1192, 1192, 1192, 1192, 1192, 1389, 1394, 1400, 1393, - /* 130 */ 1390, 1383, 1382, 1384, 1385, 1192, 1213, 1265, 1192, 1192, - /* 140 */ 1192, 1192, 1486, 1485, 1192, 1192, 1223, 1375, 1374, 1386, - /* 150 */ 1387, 1397, 1396, 1475, 1533, 1532, 1433, 1192, 1192, 1192, - /* 160 */ 1192, 1192, 1192, 1498, 1192, 1192, 1192, 1192, 1192, 1192, - /* 170 */ 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192, - /* 180 */ 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1498, 1498, - /* 190 */ 1192, 1223, 1498, 1498, 1219, 1219, 1325, 1192, 1481, 1301, - /* 200 */ 1292, 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192, - /* 210 */ 1192, 1192, 1192, 1192, 1192, 1472, 1470, 1192, 1192, 1192, - /* 220 */ 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192, - /* 230 */ 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192, - /* 240 */ 1192, 1192, 1192, 1192, 1192, 1297, 1192, 1192, 1192, 1192, - /* 250 */ 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1527, 1192, 1445, - /* 260 */ 1279, 1297, 1297, 1297, 1297, 1299, 1280, 1278, 1291, 1224, - /* 270 */ 1199, 1571, 1298, 1320, 1320, 1568, 1344, 1298, 1568, 1240, - /* 280 */ 1549, 1235, 1331, 1331, 1331, 1320, 1325, 1325, 1412, 1298, - /* 290 */ 1291, 1192, 1571, 1306, 1306, 1570, 1570, 1306, 1433, 1353, - /* 300 */ 1359, 1339, 1325, 1344, 1268, 1339, 1325, 1344, 1274, 1274, - /* 310 */ 1274, 1274, 1306, 1210, 1344, 1344, 1353, 1359, 1268, 1344, - /* 320 */ 1268, 1344, 1306, 1210, 1449, 1565, 1306, 1210, 1423, 1306, - /* 330 */ 1210, 1306, 1210, 1423, 1266, 1266, 1266, 1255, 1192, 1192, - /* 340 */ 1423, 1266, 1240, 1266, 1255, 1266, 1266, 1516, 1423, 1427, - /* 350 */ 1427, 1423, 1324, 1319, 1324, 1319, 1324, 1319, 1324, 1319, - /* 360 */ 1306, 1508, 1508, 1334, 1334, 1339, 1325, 1418, 1306, 1192, - /* 370 */ 1339, 1337, 1335, 1344, 1216, 1258, 1530, 1530, 1526, 1526, - /* 380 */ 1526, 1576, 1576, 1481, 1542, 1223, 1223, 1223, 1223, 1542, - /* 390 */ 1242, 1242, 1224, 1224, 1223, 1542, 1192, 1192, 1192, 1192, - /* 400 */ 1192, 1192, 1537, 1192, 1434, 1310, 1192, 1192, 1192, 1192, - /* 410 */ 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192, - /* 420 */ 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1364, - /* 430 */ 1192, 1195, 1478, 1192, 1192, 1476, 1192, 1192, 1192, 1192, - /* 440 */ 1192, 1192, 1311, 1192, 1192, 1192, 1192, 1192, 1192, 1192, - /* 450 */ 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192, - /* 460 */ 1192, 1567, 1192, 1192, 1192, 1192, 1192, 1192, 1448, 1447, - /* 470 */ 1192, 1192, 1308, 1192, 1192, 1192, 1192, 1192, 1192, 1192, - /* 480 */ 1192, 1192, 1192, 1192, 1192, 1192, 1238, 1192, 1192, 1192, - /* 490 */ 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192, - /* 500 */ 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192, - /* 510 */ 1192, 1192, 1192, 1336, 1192, 1192, 1192, 1192, 1192, 1192, - /* 520 */ 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1513, 1326, - /* 530 */ 1192, 1192, 1558, 1192, 1192, 1192, 1192, 1192, 1192, 1192, - /* 540 */ 1192, 1192, 1192, 1192, 1192, 1192, 1192, 1553, 1282, 1366, - /* 550 */ 1192, 1365, 1369, 1192, 1204, 1192, 1192, + /* 0 */ 1627, 1627, 1627, 1457, 1227, 1336, 1227, 1227, 1227, 1457, + /* 10 */ 1457, 1457, 1227, 1366, 1366, 1510, 1258, 1227, 1227, 1227, + /* 20 */ 1227, 1227, 1227, 1227, 1227, 1227, 1227, 1456, 1227, 1227, + /* 30 */ 1227, 1227, 1545, 1545, 1227, 1227, 1227, 1227, 1227, 1227, + /* 40 */ 1227, 1227, 1375, 1227, 1382, 1227, 1227, 1227, 1227, 1227, + /* 50 */ 1458, 1459, 1227, 1227, 1227, 1509, 1511, 1474, 1389, 1388, + /* 60 */ 1387, 1386, 1492, 1353, 1380, 1373, 1377, 1452, 1453, 1451, + /* 70 */ 1455, 1459, 1458, 1227, 1376, 1423, 1437, 1422, 1227, 1227, + /* 80 */ 1227, 1227, 1227, 1227, 1227, 1227, 1227, 1227, 1227, 1227, + /* 90 */ 1227, 1227, 1227, 1227, 1227, 1227, 1227, 1227, 1227, 1227, + /* 100 */ 1227, 1227, 1227, 1227, 1227, 1227, 1227, 1227, 1227, 1227, + /* 110 */ 1227, 1227, 1227, 1227, 1227, 1227, 1227, 1227, 1227, 1227, + /* 120 */ 1227, 1227, 1227, 1227, 1227, 1227, 1227, 1431, 1436, 1442, + /* 130 */ 1435, 1432, 1425, 1424, 1426, 1227, 1427, 1227, 1227, 1227, + /* 140 */ 1248, 1300, 1227, 1227, 1227, 1227, 1227, 1529, 1528, 1227, + /* 150 */ 1227, 1258, 1417, 1416, 1428, 1429, 1439, 1438, 1517, 1580, + /* 160 */ 1579, 1475, 1227, 1227, 1227, 1227, 1227, 1227, 1545, 1227, + /* 170 */ 1227, 1227, 1227, 1227, 1227, 1227, 1227, 1227, 1227, 1227, + /* 180 */ 1227, 1227, 1227, 1227, 1227, 1227, 1227, 1227, 1227, 1227, + /* 190 */ 1227, 1545, 1545, 1227, 1258, 1545, 1545, 1254, 1254, 1360, + /* 200 */ 1227, 1524, 1327, 1327, 1327, 1327, 1336, 1327, 1227, 1227, + /* 210 */ 1227, 1227, 1227, 1227, 1227, 1227, 1227, 1227, 1227, 1227, + /* 220 */ 1227, 1227, 1227, 1514, 1512, 1227, 1227, 1227, 1227, 1227, + /* 230 */ 1227, 1227, 1227, 1227, 1227, 1227, 1227, 1227, 1227, 1227, + /* 240 */ 1227, 1227, 1227, 1227, 1227, 1227, 1227, 1227, 1227, 1227, + /* 250 */ 1227, 1227, 1332, 1227, 1227, 1227, 1227, 1227, 1227, 1227, + /* 260 */ 1227, 1227, 1227, 1227, 1574, 1227, 1487, 1314, 1332, 1332, + /* 270 */ 1332, 1332, 1334, 1315, 1313, 1326, 1259, 1234, 1619, 1392, + /* 280 */ 1381, 1333, 1355, 1381, 1355, 1616, 1379, 1392, 1392, 1379, + /* 290 */ 1392, 1333, 1616, 1275, 1596, 1270, 1366, 1366, 1366, 1355, + /* 300 */ 1360, 1360, 1454, 1333, 1326, 1227, 1619, 1619, 1341, 1341, + /* 310 */ 1618, 1618, 1341, 1475, 1603, 1401, 1374, 1360, 1303, 1374, + /* 320 */ 1360, 1309, 1309, 1309, 1309, 1341, 1245, 1379, 1603, 1603, + /* 330 */ 1379, 1401, 1303, 1379, 1303, 1379, 1341, 1245, 1491, 1613, + /* 340 */ 1341, 1245, 1465, 1341, 1245, 1341, 1245, 1465, 1301, 1301, + /* 350 */ 1301, 1290, 1227, 1227, 1465, 1301, 1275, 1301, 1290, 1301, + /* 360 */ 1301, 1563, 1227, 1469, 1469, 1465, 1359, 1354, 1359, 1354, + /* 370 */ 1359, 1354, 1359, 1354, 1341, 1555, 1555, 1369, 1369, 1374, + /* 380 */ 1360, 1460, 1341, 1227, 1374, 1372, 1370, 1379, 1251, 1293, + /* 390 */ 1577, 1577, 1573, 1573, 1573, 1624, 1624, 1524, 1589, 1258, + /* 400 */ 1258, 1258, 1258, 1589, 1277, 1277, 1259, 1259, 1258, 1589, + /* 410 */ 1227, 1227, 1227, 1227, 1227, 1227, 1584, 1227, 1519, 1476, + /* 420 */ 1345, 1227, 1227, 1227, 1227, 1227, 1227, 1227, 1227, 1227, + /* 430 */ 1227, 1227, 1227, 1227, 1227, 1530, 1227, 1227, 1227, 1227, + /* 440 */ 1227, 1227, 1227, 1227, 1227, 1227, 1406, 1227, 1230, 1521, + /* 450 */ 1227, 1227, 1227, 1227, 1227, 1227, 1227, 1227, 1383, 1384, + /* 460 */ 1346, 1227, 1227, 1227, 1227, 1227, 1227, 1227, 1398, 1227, + /* 470 */ 1227, 1227, 1393, 1227, 1227, 1227, 1227, 1227, 1227, 1227, + /* 480 */ 1227, 1615, 1227, 1227, 1227, 1227, 1227, 1227, 1490, 1489, + /* 490 */ 1227, 1227, 1343, 1227, 1227, 1227, 1227, 1227, 1227, 1227, + /* 500 */ 1227, 1227, 1227, 1227, 1227, 1227, 1273, 1227, 1227, 1227, + /* 510 */ 1227, 1227, 1227, 1227, 1227, 1227, 1227, 1227, 1227, 1227, + /* 520 */ 1227, 1227, 1227, 1227, 1227, 1227, 1227, 1227, 1227, 1227, + /* 530 */ 1371, 1227, 1227, 1227, 1227, 1227, 1227, 1227, 1227, 1227, + /* 540 */ 1227, 1227, 1227, 1227, 1227, 1560, 1361, 1227, 1227, 1606, + /* 550 */ 1227, 1227, 1227, 1227, 1227, 1227, 1227, 1227, 1227, 1227, + /* 560 */ 1227, 1227, 1227, 1227, 1600, 1317, 1408, 1227, 1407, 1411, + /* 570 */ 1227, 1239, 1227, 1227, }; /********** End of lemon-generated parsing tables *****************************/ @@ -155296,6 +159301,7 @@ static const YYCODETYPE yyFallback[] = { 59, /* TIES => ID */ 59, /* GENERATED => ID */ 59, /* ALWAYS => ID */ + 59, /* MATERIALIZED => ID */ 59, /* REINDEX => ID */ 59, /* RENAME => ID */ 59, /* CTIME_KW => ID */ @@ -155347,6 +159353,7 @@ static const YYCODETYPE yyFallback[] = { 0, /* HAVING => nothing */ 0, /* LIMIT => nothing */ 0, /* WHERE => nothing */ + 0, /* RETURNING => nothing */ 0, /* INTO => nothing */ 0, /* NOTHING => nothing */ 0, /* FLOAT => nothing */ @@ -155378,6 +159385,7 @@ static const YYCODETYPE yyFallback[] = { 0, /* IF_NULL_ROW => nothing */ 0, /* ASTERISK => nothing */ 0, /* SPAN => nothing */ + 0, /* ERROR => nothing */ 0, /* SPACE => nothing */ 0, /* ILLEGAL => nothing */ }; @@ -155433,6 +159441,7 @@ typedef struct yyParser yyParser; #ifndef NDEBUG /* #include */ +/* #include */ static FILE *yyTraceFILE = 0; static char *yyTracePrompt = 0; #endif /* NDEBUG */ @@ -155564,219 +159573,226 @@ static const char *const yyTokenName[] = { /* 94 */ "TIES", /* 95 */ "GENERATED", /* 96 */ "ALWAYS", - /* 97 */ "REINDEX", - /* 98 */ "RENAME", - /* 99 */ "CTIME_KW", - /* 100 */ "ANY", - /* 101 */ "BITAND", - /* 102 */ "BITOR", - /* 103 */ "LSHIFT", - /* 104 */ "RSHIFT", - /* 105 */ "PLUS", - /* 106 */ "MINUS", - /* 107 */ "STAR", - /* 108 */ "SLASH", - /* 109 */ "REM", - /* 110 */ "CONCAT", - /* 111 */ "COLLATE", - /* 112 */ "BITNOT", - /* 113 */ "ON", - /* 114 */ "INDEXED", - /* 115 */ "STRING", - /* 116 */ "JOIN_KW", - /* 117 */ "CONSTRAINT", - /* 118 */ "DEFAULT", - /* 119 */ "NULL", - /* 120 */ "PRIMARY", - /* 121 */ "UNIQUE", - /* 122 */ "CHECK", - /* 123 */ "REFERENCES", - /* 124 */ "AUTOINCR", - /* 125 */ "INSERT", - /* 126 */ "DELETE", - /* 127 */ "UPDATE", - /* 128 */ "SET", - /* 129 */ "DEFERRABLE", - /* 130 */ "FOREIGN", - /* 131 */ "DROP", - /* 132 */ "UNION", - /* 133 */ "ALL", - /* 134 */ "EXCEPT", - /* 135 */ "INTERSECT", - /* 136 */ "SELECT", - /* 137 */ "VALUES", - /* 138 */ "DISTINCT", - /* 139 */ "DOT", - /* 140 */ "FROM", - /* 141 */ "JOIN", - /* 142 */ "USING", - /* 143 */ "ORDER", - /* 144 */ "GROUP", - /* 145 */ "HAVING", - /* 146 */ "LIMIT", - /* 147 */ "WHERE", - /* 148 */ "INTO", - /* 149 */ "NOTHING", - /* 150 */ "FLOAT", - /* 151 */ "BLOB", - /* 152 */ "INTEGER", - /* 153 */ "VARIABLE", - /* 154 */ "CASE", - /* 155 */ "WHEN", - /* 156 */ "THEN", - /* 157 */ "ELSE", - /* 158 */ "INDEX", - /* 159 */ "ALTER", - /* 160 */ "ADD", - /* 161 */ "WINDOW", - /* 162 */ "OVER", - /* 163 */ "FILTER", - /* 164 */ "COLUMN", - /* 165 */ "AGG_FUNCTION", - /* 166 */ "AGG_COLUMN", - /* 167 */ "TRUEFALSE", - /* 168 */ "ISNOT", - /* 169 */ "FUNCTION", - /* 170 */ "UMINUS", - /* 171 */ "UPLUS", - /* 172 */ "TRUTH", - /* 173 */ "REGISTER", - /* 174 */ "VECTOR", - /* 175 */ "SELECT_COLUMN", - /* 176 */ "IF_NULL_ROW", - /* 177 */ "ASTERISK", - /* 178 */ "SPAN", - /* 179 */ "SPACE", - /* 180 */ "ILLEGAL", - /* 181 */ "input", - /* 182 */ "cmdlist", - /* 183 */ "ecmd", - /* 184 */ "cmdx", - /* 185 */ "explain", - /* 186 */ "cmd", - /* 187 */ "transtype", - /* 188 */ "trans_opt", - /* 189 */ "nm", - /* 190 */ "savepoint_opt", - /* 191 */ "create_table", - /* 192 */ "create_table_args", - /* 193 */ "createkw", - /* 194 */ "temp", - /* 195 */ "ifnotexists", - /* 196 */ "dbnm", - /* 197 */ "columnlist", - /* 198 */ "conslist_opt", - /* 199 */ "table_options", - /* 200 */ "select", - /* 201 */ "columnname", - /* 202 */ "carglist", - /* 203 */ "typetoken", - /* 204 */ "typename", - /* 205 */ "signed", - /* 206 */ "plus_num", - /* 207 */ "minus_num", - /* 208 */ "scanpt", - /* 209 */ "scantok", - /* 210 */ "ccons", - /* 211 */ "term", - /* 212 */ "expr", - /* 213 */ "onconf", - /* 214 */ "sortorder", - /* 215 */ "autoinc", - /* 216 */ "eidlist_opt", - /* 217 */ "refargs", - /* 218 */ "defer_subclause", - /* 219 */ "generated", - /* 220 */ "refarg", - /* 221 */ "refact", - /* 222 */ "init_deferred_pred_opt", - /* 223 */ "conslist", - /* 224 */ "tconscomma", - /* 225 */ "tcons", - /* 226 */ "sortlist", - /* 227 */ "eidlist", - /* 228 */ "defer_subclause_opt", - /* 229 */ "orconf", - /* 230 */ "resolvetype", - /* 231 */ "raisetype", - /* 232 */ "ifexists", - /* 233 */ "fullname", - /* 234 */ "selectnowith", - /* 235 */ "oneselect", - /* 236 */ "wqlist", - /* 237 */ "multiselect_op", - /* 238 */ "distinct", - /* 239 */ "selcollist", - /* 240 */ "from", - /* 241 */ "where_opt", - /* 242 */ "groupby_opt", - /* 243 */ "having_opt", - /* 244 */ "orderby_opt", - /* 245 */ "limit_opt", - /* 246 */ "window_clause", - /* 247 */ "values", - /* 248 */ "nexprlist", - /* 249 */ "sclp", - /* 250 */ "as", - /* 251 */ "seltablist", - /* 252 */ "stl_prefix", - /* 253 */ "joinop", - /* 254 */ "indexed_opt", - /* 255 */ "on_opt", - /* 256 */ "using_opt", - /* 257 */ "exprlist", - /* 258 */ "xfullname", - /* 259 */ "idlist", - /* 260 */ "nulls", - /* 261 */ "with", - /* 262 */ "setlist", - /* 263 */ "insert_cmd", - /* 264 */ "idlist_opt", - /* 265 */ "upsert", - /* 266 */ "filter_over", - /* 267 */ "likeop", - /* 268 */ "between_op", - /* 269 */ "in_op", - /* 270 */ "paren_exprlist", - /* 271 */ "case_operand", - /* 272 */ "case_exprlist", - /* 273 */ "case_else", - /* 274 */ "uniqueflag", - /* 275 */ "collate", - /* 276 */ "vinto", - /* 277 */ "nmnum", - /* 278 */ "trigger_decl", - /* 279 */ "trigger_cmd_list", - /* 280 */ "trigger_time", - /* 281 */ "trigger_event", - /* 282 */ "foreach_clause", - /* 283 */ "when_clause", - /* 284 */ "trigger_cmd", - /* 285 */ "trnm", - /* 286 */ "tridxby", - /* 287 */ "database_kw_opt", - /* 288 */ "key_opt", - /* 289 */ "add_column_fullname", - /* 290 */ "kwcolumn_opt", - /* 291 */ "create_vtab", - /* 292 */ "vtabarglist", - /* 293 */ "vtabarg", - /* 294 */ "vtabargtoken", - /* 295 */ "lp", - /* 296 */ "anylist", - /* 297 */ "windowdefn_list", - /* 298 */ "windowdefn", - /* 299 */ "window", - /* 300 */ "frame_opt", - /* 301 */ "part_opt", - /* 302 */ "filter_clause", - /* 303 */ "over_clause", - /* 304 */ "range_or_rows", - /* 305 */ "frame_bound", - /* 306 */ "frame_bound_s", - /* 307 */ "frame_bound_e", - /* 308 */ "frame_exclude_opt", - /* 309 */ "frame_exclude", + /* 97 */ "MATERIALIZED", + /* 98 */ "REINDEX", + /* 99 */ "RENAME", + /* 100 */ "CTIME_KW", + /* 101 */ "ANY", + /* 102 */ "BITAND", + /* 103 */ "BITOR", + /* 104 */ "LSHIFT", + /* 105 */ "RSHIFT", + /* 106 */ "PLUS", + /* 107 */ "MINUS", + /* 108 */ "STAR", + /* 109 */ "SLASH", + /* 110 */ "REM", + /* 111 */ "CONCAT", + /* 112 */ "COLLATE", + /* 113 */ "BITNOT", + /* 114 */ "ON", + /* 115 */ "INDEXED", + /* 116 */ "STRING", + /* 117 */ "JOIN_KW", + /* 118 */ "CONSTRAINT", + /* 119 */ "DEFAULT", + /* 120 */ "NULL", + /* 121 */ "PRIMARY", + /* 122 */ "UNIQUE", + /* 123 */ "CHECK", + /* 124 */ "REFERENCES", + /* 125 */ "AUTOINCR", + /* 126 */ "INSERT", + /* 127 */ "DELETE", + /* 128 */ "UPDATE", + /* 129 */ "SET", + /* 130 */ "DEFERRABLE", + /* 131 */ "FOREIGN", + /* 132 */ "DROP", + /* 133 */ "UNION", + /* 134 */ "ALL", + /* 135 */ "EXCEPT", + /* 136 */ "INTERSECT", + /* 137 */ "SELECT", + /* 138 */ "VALUES", + /* 139 */ "DISTINCT", + /* 140 */ "DOT", + /* 141 */ "FROM", + /* 142 */ "JOIN", + /* 143 */ "USING", + /* 144 */ "ORDER", + /* 145 */ "GROUP", + /* 146 */ "HAVING", + /* 147 */ "LIMIT", + /* 148 */ "WHERE", + /* 149 */ "RETURNING", + /* 150 */ "INTO", + /* 151 */ "NOTHING", + /* 152 */ "FLOAT", + /* 153 */ "BLOB", + /* 154 */ "INTEGER", + /* 155 */ "VARIABLE", + /* 156 */ "CASE", + /* 157 */ "WHEN", + /* 158 */ "THEN", + /* 159 */ "ELSE", + /* 160 */ "INDEX", + /* 161 */ "ALTER", + /* 162 */ "ADD", + /* 163 */ "WINDOW", + /* 164 */ "OVER", + /* 165 */ "FILTER", + /* 166 */ "COLUMN", + /* 167 */ "AGG_FUNCTION", + /* 168 */ "AGG_COLUMN", + /* 169 */ "TRUEFALSE", + /* 170 */ "ISNOT", + /* 171 */ "FUNCTION", + /* 172 */ "UMINUS", + /* 173 */ "UPLUS", + /* 174 */ "TRUTH", + /* 175 */ "REGISTER", + /* 176 */ "VECTOR", + /* 177 */ "SELECT_COLUMN", + /* 178 */ "IF_NULL_ROW", + /* 179 */ "ASTERISK", + /* 180 */ "SPAN", + /* 181 */ "ERROR", + /* 182 */ "SPACE", + /* 183 */ "ILLEGAL", + /* 184 */ "input", + /* 185 */ "cmdlist", + /* 186 */ "ecmd", + /* 187 */ "cmdx", + /* 188 */ "explain", + /* 189 */ "cmd", + /* 190 */ "transtype", + /* 191 */ "trans_opt", + /* 192 */ "nm", + /* 193 */ "savepoint_opt", + /* 194 */ "create_table", + /* 195 */ "create_table_args", + /* 196 */ "createkw", + /* 197 */ "temp", + /* 198 */ "ifnotexists", + /* 199 */ "dbnm", + /* 200 */ "columnlist", + /* 201 */ "conslist_opt", + /* 202 */ "table_options", + /* 203 */ "select", + /* 204 */ "columnname", + /* 205 */ "carglist", + /* 206 */ "typetoken", + /* 207 */ "typename", + /* 208 */ "signed", + /* 209 */ "plus_num", + /* 210 */ "minus_num", + /* 211 */ "scanpt", + /* 212 */ "scantok", + /* 213 */ "ccons", + /* 214 */ "term", + /* 215 */ "expr", + /* 216 */ "onconf", + /* 217 */ "sortorder", + /* 218 */ "autoinc", + /* 219 */ "eidlist_opt", + /* 220 */ "refargs", + /* 221 */ "defer_subclause", + /* 222 */ "generated", + /* 223 */ "refarg", + /* 224 */ "refact", + /* 225 */ "init_deferred_pred_opt", + /* 226 */ "conslist", + /* 227 */ "tconscomma", + /* 228 */ "tcons", + /* 229 */ "sortlist", + /* 230 */ "eidlist", + /* 231 */ "defer_subclause_opt", + /* 232 */ "orconf", + /* 233 */ "resolvetype", + /* 234 */ "raisetype", + /* 235 */ "ifexists", + /* 236 */ "fullname", + /* 237 */ "selectnowith", + /* 238 */ "oneselect", + /* 239 */ "wqlist", + /* 240 */ "multiselect_op", + /* 241 */ "distinct", + /* 242 */ "selcollist", + /* 243 */ "from", + /* 244 */ "where_opt", + /* 245 */ "groupby_opt", + /* 246 */ "having_opt", + /* 247 */ "orderby_opt", + /* 248 */ "limit_opt", + /* 249 */ "window_clause", + /* 250 */ "values", + /* 251 */ "nexprlist", + /* 252 */ "sclp", + /* 253 */ "as", + /* 254 */ "seltablist", + /* 255 */ "stl_prefix", + /* 256 */ "joinop", + /* 257 */ "indexed_opt", + /* 258 */ "on_opt", + /* 259 */ "using_opt", + /* 260 */ "exprlist", + /* 261 */ "xfullname", + /* 262 */ "idlist", + /* 263 */ "nulls", + /* 264 */ "with", + /* 265 */ "where_opt_ret", + /* 266 */ "setlist", + /* 267 */ "insert_cmd", + /* 268 */ "idlist_opt", + /* 269 */ "upsert", + /* 270 */ "returning", + /* 271 */ "filter_over", + /* 272 */ "likeop", + /* 273 */ "between_op", + /* 274 */ "in_op", + /* 275 */ "paren_exprlist", + /* 276 */ "case_operand", + /* 277 */ "case_exprlist", + /* 278 */ "case_else", + /* 279 */ "uniqueflag", + /* 280 */ "collate", + /* 281 */ "vinto", + /* 282 */ "nmnum", + /* 283 */ "trigger_decl", + /* 284 */ "trigger_cmd_list", + /* 285 */ "trigger_time", + /* 286 */ "trigger_event", + /* 287 */ "foreach_clause", + /* 288 */ "when_clause", + /* 289 */ "trigger_cmd", + /* 290 */ "trnm", + /* 291 */ "tridxby", + /* 292 */ "database_kw_opt", + /* 293 */ "key_opt", + /* 294 */ "add_column_fullname", + /* 295 */ "kwcolumn_opt", + /* 296 */ "create_vtab", + /* 297 */ "vtabarglist", + /* 298 */ "vtabarg", + /* 299 */ "vtabargtoken", + /* 300 */ "lp", + /* 301 */ "anylist", + /* 302 */ "wqitem", + /* 303 */ "wqas", + /* 304 */ "windowdefn_list", + /* 305 */ "windowdefn", + /* 306 */ "window", + /* 307 */ "frame_opt", + /* 308 */ "part_opt", + /* 309 */ "filter_clause", + /* 310 */ "over_clause", + /* 311 */ "range_or_rows", + /* 312 */ "frame_bound", + /* 313 */ "frame_bound_s", + /* 314 */ "frame_bound_e", + /* 315 */ "frame_exclude_opt", + /* 316 */ "frame_exclude", }; #endif /* defined(YYCOVERAGE) || !defined(NDEBUG) */ @@ -155932,243 +159948,256 @@ static const char *const yyRuleName[] = { /* 145 */ "limit_opt ::= LIMIT expr", /* 146 */ "limit_opt ::= LIMIT expr OFFSET expr", /* 147 */ "limit_opt ::= LIMIT expr COMMA expr", - /* 148 */ "cmd ::= with DELETE FROM xfullname indexed_opt where_opt orderby_opt limit_opt", + /* 148 */ "cmd ::= with DELETE FROM xfullname indexed_opt where_opt_ret orderby_opt limit_opt", /* 149 */ "where_opt ::=", /* 150 */ "where_opt ::= WHERE expr", - /* 151 */ "cmd ::= with UPDATE orconf xfullname indexed_opt SET setlist from where_opt orderby_opt limit_opt", - /* 152 */ "setlist ::= setlist COMMA nm EQ expr", - /* 153 */ "setlist ::= setlist COMMA LP idlist RP EQ expr", - /* 154 */ "setlist ::= nm EQ expr", - /* 155 */ "setlist ::= LP idlist RP EQ expr", - /* 156 */ "cmd ::= with insert_cmd INTO xfullname idlist_opt select upsert", - /* 157 */ "cmd ::= with insert_cmd INTO xfullname idlist_opt DEFAULT VALUES", - /* 158 */ "upsert ::=", - /* 159 */ "upsert ::= ON CONFLICT LP sortlist RP where_opt DO UPDATE SET setlist where_opt", - /* 160 */ "upsert ::= ON CONFLICT LP sortlist RP where_opt DO NOTHING", - /* 161 */ "upsert ::= ON CONFLICT DO NOTHING", - /* 162 */ "insert_cmd ::= INSERT orconf", - /* 163 */ "insert_cmd ::= REPLACE", - /* 164 */ "idlist_opt ::=", - /* 165 */ "idlist_opt ::= LP idlist RP", - /* 166 */ "idlist ::= idlist COMMA nm", - /* 167 */ "idlist ::= nm", - /* 168 */ "expr ::= LP expr RP", - /* 169 */ "expr ::= ID|INDEXED", - /* 170 */ "expr ::= JOIN_KW", - /* 171 */ "expr ::= nm DOT nm", - /* 172 */ "expr ::= nm DOT nm DOT nm", - /* 173 */ "term ::= NULL|FLOAT|BLOB", - /* 174 */ "term ::= STRING", - /* 175 */ "term ::= INTEGER", - /* 176 */ "expr ::= VARIABLE", - /* 177 */ "expr ::= expr COLLATE ID|STRING", - /* 178 */ "expr ::= CAST LP expr AS typetoken RP", - /* 179 */ "expr ::= ID|INDEXED LP distinct exprlist RP", - /* 180 */ "expr ::= ID|INDEXED LP STAR RP", - /* 181 */ "expr ::= ID|INDEXED LP distinct exprlist RP filter_over", - /* 182 */ "expr ::= ID|INDEXED LP STAR RP filter_over", - /* 183 */ "term ::= CTIME_KW", - /* 184 */ "expr ::= LP nexprlist COMMA expr RP", - /* 185 */ "expr ::= expr AND expr", - /* 186 */ "expr ::= expr OR expr", - /* 187 */ "expr ::= expr LT|GT|GE|LE expr", - /* 188 */ "expr ::= expr EQ|NE expr", - /* 189 */ "expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr", - /* 190 */ "expr ::= expr PLUS|MINUS expr", - /* 191 */ "expr ::= expr STAR|SLASH|REM expr", - /* 192 */ "expr ::= expr CONCAT expr", - /* 193 */ "likeop ::= NOT LIKE_KW|MATCH", - /* 194 */ "expr ::= expr likeop expr", - /* 195 */ "expr ::= expr likeop expr ESCAPE expr", - /* 196 */ "expr ::= expr ISNULL|NOTNULL", - /* 197 */ "expr ::= expr NOT NULL", - /* 198 */ "expr ::= expr IS expr", - /* 199 */ "expr ::= expr IS NOT expr", - /* 200 */ "expr ::= NOT expr", - /* 201 */ "expr ::= BITNOT expr", - /* 202 */ "expr ::= PLUS|MINUS expr", - /* 203 */ "between_op ::= BETWEEN", - /* 204 */ "between_op ::= NOT BETWEEN", - /* 205 */ "expr ::= expr between_op expr AND expr", - /* 206 */ "in_op ::= IN", - /* 207 */ "in_op ::= NOT IN", - /* 208 */ "expr ::= expr in_op LP exprlist RP", - /* 209 */ "expr ::= LP select RP", - /* 210 */ "expr ::= expr in_op LP select RP", - /* 211 */ "expr ::= expr in_op nm dbnm paren_exprlist", - /* 212 */ "expr ::= EXISTS LP select RP", - /* 213 */ "expr ::= CASE case_operand case_exprlist case_else END", - /* 214 */ "case_exprlist ::= case_exprlist WHEN expr THEN expr", - /* 215 */ "case_exprlist ::= WHEN expr THEN expr", - /* 216 */ "case_else ::= ELSE expr", - /* 217 */ "case_else ::=", - /* 218 */ "case_operand ::= expr", - /* 219 */ "case_operand ::=", - /* 220 */ "exprlist ::=", - /* 221 */ "nexprlist ::= nexprlist COMMA expr", - /* 222 */ "nexprlist ::= expr", - /* 223 */ "paren_exprlist ::=", - /* 224 */ "paren_exprlist ::= LP exprlist RP", - /* 225 */ "cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt", - /* 226 */ "uniqueflag ::= UNIQUE", - /* 227 */ "uniqueflag ::=", - /* 228 */ "eidlist_opt ::=", - /* 229 */ "eidlist_opt ::= LP eidlist RP", - /* 230 */ "eidlist ::= eidlist COMMA nm collate sortorder", - /* 231 */ "eidlist ::= nm collate sortorder", - /* 232 */ "collate ::=", - /* 233 */ "collate ::= COLLATE ID|STRING", - /* 234 */ "cmd ::= DROP INDEX ifexists fullname", - /* 235 */ "cmd ::= VACUUM vinto", - /* 236 */ "cmd ::= VACUUM nm vinto", - /* 237 */ "vinto ::= INTO expr", - /* 238 */ "vinto ::=", - /* 239 */ "cmd ::= PRAGMA nm dbnm", - /* 240 */ "cmd ::= PRAGMA nm dbnm EQ nmnum", - /* 241 */ "cmd ::= PRAGMA nm dbnm LP nmnum RP", - /* 242 */ "cmd ::= PRAGMA nm dbnm EQ minus_num", - /* 243 */ "cmd ::= PRAGMA nm dbnm LP minus_num RP", - /* 244 */ "plus_num ::= PLUS INTEGER|FLOAT", - /* 245 */ "minus_num ::= MINUS INTEGER|FLOAT", - /* 246 */ "cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END", - /* 247 */ "trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause", - /* 248 */ "trigger_time ::= BEFORE|AFTER", - /* 249 */ "trigger_time ::= INSTEAD OF", - /* 250 */ "trigger_time ::=", - /* 251 */ "trigger_event ::= DELETE|INSERT", - /* 252 */ "trigger_event ::= UPDATE", - /* 253 */ "trigger_event ::= UPDATE OF idlist", - /* 254 */ "when_clause ::=", - /* 255 */ "when_clause ::= WHEN expr", - /* 256 */ "trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI", - /* 257 */ "trigger_cmd_list ::= trigger_cmd SEMI", - /* 258 */ "trnm ::= nm DOT nm", - /* 259 */ "tridxby ::= INDEXED BY nm", - /* 260 */ "tridxby ::= NOT INDEXED", - /* 261 */ "trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt", - /* 262 */ "trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt", - /* 263 */ "trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt", - /* 264 */ "trigger_cmd ::= scanpt select scanpt", - /* 265 */ "expr ::= RAISE LP IGNORE RP", - /* 266 */ "expr ::= RAISE LP raisetype COMMA nm RP", - /* 267 */ "raisetype ::= ROLLBACK", - /* 268 */ "raisetype ::= ABORT", - /* 269 */ "raisetype ::= FAIL", - /* 270 */ "cmd ::= DROP TRIGGER ifexists fullname", - /* 271 */ "cmd ::= ATTACH database_kw_opt expr AS expr key_opt", - /* 272 */ "cmd ::= DETACH database_kw_opt expr", - /* 273 */ "key_opt ::=", - /* 274 */ "key_opt ::= KEY expr", - /* 275 */ "cmd ::= REINDEX", - /* 276 */ "cmd ::= REINDEX nm dbnm", - /* 277 */ "cmd ::= ANALYZE", - /* 278 */ "cmd ::= ANALYZE nm dbnm", - /* 279 */ "cmd ::= ALTER TABLE fullname RENAME TO nm", - /* 280 */ "cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist", - /* 281 */ "add_column_fullname ::= fullname", - /* 282 */ "cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm", - /* 283 */ "cmd ::= create_vtab", - /* 284 */ "cmd ::= create_vtab LP vtabarglist RP", - /* 285 */ "create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm", - /* 286 */ "vtabarg ::=", - /* 287 */ "vtabargtoken ::= ANY", - /* 288 */ "vtabargtoken ::= lp anylist RP", - /* 289 */ "lp ::= LP", - /* 290 */ "with ::= WITH wqlist", - /* 291 */ "with ::= WITH RECURSIVE wqlist", - /* 292 */ "wqlist ::= nm eidlist_opt AS LP select RP", - /* 293 */ "wqlist ::= wqlist COMMA nm eidlist_opt AS LP select RP", - /* 294 */ "windowdefn_list ::= windowdefn", - /* 295 */ "windowdefn_list ::= windowdefn_list COMMA windowdefn", - /* 296 */ "windowdefn ::= nm AS LP window RP", - /* 297 */ "window ::= PARTITION BY nexprlist orderby_opt frame_opt", - /* 298 */ "window ::= nm PARTITION BY nexprlist orderby_opt frame_opt", - /* 299 */ "window ::= ORDER BY sortlist frame_opt", - /* 300 */ "window ::= nm ORDER BY sortlist frame_opt", - /* 301 */ "window ::= frame_opt", - /* 302 */ "window ::= nm frame_opt", - /* 303 */ "frame_opt ::=", - /* 304 */ "frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt", - /* 305 */ "frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt", - /* 306 */ "range_or_rows ::= RANGE|ROWS|GROUPS", - /* 307 */ "frame_bound_s ::= frame_bound", - /* 308 */ "frame_bound_s ::= UNBOUNDED PRECEDING", - /* 309 */ "frame_bound_e ::= frame_bound", - /* 310 */ "frame_bound_e ::= UNBOUNDED FOLLOWING", - /* 311 */ "frame_bound ::= expr PRECEDING|FOLLOWING", - /* 312 */ "frame_bound ::= CURRENT ROW", - /* 313 */ "frame_exclude_opt ::=", - /* 314 */ "frame_exclude_opt ::= EXCLUDE frame_exclude", - /* 315 */ "frame_exclude ::= NO OTHERS", - /* 316 */ "frame_exclude ::= CURRENT ROW", - /* 317 */ "frame_exclude ::= GROUP|TIES", - /* 318 */ "window_clause ::= WINDOW windowdefn_list", - /* 319 */ "filter_over ::= filter_clause over_clause", - /* 320 */ "filter_over ::= over_clause", - /* 321 */ "filter_over ::= filter_clause", - /* 322 */ "over_clause ::= OVER LP window RP", - /* 323 */ "over_clause ::= OVER nm", - /* 324 */ "filter_clause ::= FILTER LP WHERE expr RP", - /* 325 */ "input ::= cmdlist", - /* 326 */ "cmdlist ::= cmdlist ecmd", - /* 327 */ "cmdlist ::= ecmd", - /* 328 */ "ecmd ::= SEMI", - /* 329 */ "ecmd ::= cmdx SEMI", - /* 330 */ "ecmd ::= explain cmdx SEMI", - /* 331 */ "trans_opt ::=", - /* 332 */ "trans_opt ::= TRANSACTION", - /* 333 */ "trans_opt ::= TRANSACTION nm", - /* 334 */ "savepoint_opt ::= SAVEPOINT", - /* 335 */ "savepoint_opt ::=", - /* 336 */ "cmd ::= create_table create_table_args", - /* 337 */ "columnlist ::= columnlist COMMA columnname carglist", - /* 338 */ "columnlist ::= columnname carglist", - /* 339 */ "nm ::= ID|INDEXED", - /* 340 */ "nm ::= STRING", - /* 341 */ "nm ::= JOIN_KW", - /* 342 */ "typetoken ::= typename", - /* 343 */ "typename ::= ID|STRING", - /* 344 */ "signed ::= plus_num", - /* 345 */ "signed ::= minus_num", - /* 346 */ "carglist ::= carglist ccons", - /* 347 */ "carglist ::=", - /* 348 */ "ccons ::= NULL onconf", - /* 349 */ "ccons ::= GENERATED ALWAYS AS generated", - /* 350 */ "ccons ::= AS generated", - /* 351 */ "conslist_opt ::= COMMA conslist", - /* 352 */ "conslist ::= conslist tconscomma tcons", - /* 353 */ "conslist ::= tcons", - /* 354 */ "tconscomma ::=", - /* 355 */ "defer_subclause_opt ::= defer_subclause", - /* 356 */ "resolvetype ::= raisetype", - /* 357 */ "selectnowith ::= oneselect", - /* 358 */ "oneselect ::= values", - /* 359 */ "sclp ::= selcollist COMMA", - /* 360 */ "as ::= ID|STRING", - /* 361 */ "expr ::= term", - /* 362 */ "likeop ::= LIKE_KW|MATCH", - /* 363 */ "exprlist ::= nexprlist", - /* 364 */ "nmnum ::= plus_num", - /* 365 */ "nmnum ::= nm", - /* 366 */ "nmnum ::= ON", - /* 367 */ "nmnum ::= DELETE", - /* 368 */ "nmnum ::= DEFAULT", - /* 369 */ "plus_num ::= INTEGER|FLOAT", - /* 370 */ "foreach_clause ::=", - /* 371 */ "foreach_clause ::= FOR EACH ROW", - /* 372 */ "trnm ::= nm", - /* 373 */ "tridxby ::=", - /* 374 */ "database_kw_opt ::= DATABASE", - /* 375 */ "database_kw_opt ::=", - /* 376 */ "kwcolumn_opt ::=", - /* 377 */ "kwcolumn_opt ::= COLUMNKW", - /* 378 */ "vtabarglist ::= vtabarg", - /* 379 */ "vtabarglist ::= vtabarglist COMMA vtabarg", - /* 380 */ "vtabarg ::= vtabarg vtabargtoken", - /* 381 */ "anylist ::=", - /* 382 */ "anylist ::= anylist LP anylist RP", - /* 383 */ "anylist ::= anylist ANY", - /* 384 */ "with ::=", + /* 151 */ "where_opt_ret ::=", + /* 152 */ "where_opt_ret ::= WHERE expr", + /* 153 */ "where_opt_ret ::= RETURNING selcollist", + /* 154 */ "where_opt_ret ::= WHERE expr RETURNING selcollist", + /* 155 */ "cmd ::= with UPDATE orconf xfullname indexed_opt SET setlist from where_opt_ret orderby_opt limit_opt", + /* 156 */ "setlist ::= setlist COMMA nm EQ expr", + /* 157 */ "setlist ::= setlist COMMA LP idlist RP EQ expr", + /* 158 */ "setlist ::= nm EQ expr", + /* 159 */ "setlist ::= LP idlist RP EQ expr", + /* 160 */ "cmd ::= with insert_cmd INTO xfullname idlist_opt select upsert", + /* 161 */ "cmd ::= with insert_cmd INTO xfullname idlist_opt DEFAULT VALUES returning", + /* 162 */ "upsert ::=", + /* 163 */ "upsert ::= RETURNING selcollist", + /* 164 */ "upsert ::= ON CONFLICT LP sortlist RP where_opt DO UPDATE SET setlist where_opt upsert", + /* 165 */ "upsert ::= ON CONFLICT LP sortlist RP where_opt DO NOTHING upsert", + /* 166 */ "upsert ::= ON CONFLICT DO NOTHING returning", + /* 167 */ "upsert ::= ON CONFLICT DO UPDATE SET setlist where_opt returning", + /* 168 */ "returning ::= RETURNING selcollist", + /* 169 */ "insert_cmd ::= INSERT orconf", + /* 170 */ "insert_cmd ::= REPLACE", + /* 171 */ "idlist_opt ::=", + /* 172 */ "idlist_opt ::= LP idlist RP", + /* 173 */ "idlist ::= idlist COMMA nm", + /* 174 */ "idlist ::= nm", + /* 175 */ "expr ::= LP expr RP", + /* 176 */ "expr ::= ID|INDEXED", + /* 177 */ "expr ::= JOIN_KW", + /* 178 */ "expr ::= nm DOT nm", + /* 179 */ "expr ::= nm DOT nm DOT nm", + /* 180 */ "term ::= NULL|FLOAT|BLOB", + /* 181 */ "term ::= STRING", + /* 182 */ "term ::= INTEGER", + /* 183 */ "expr ::= VARIABLE", + /* 184 */ "expr ::= expr COLLATE ID|STRING", + /* 185 */ "expr ::= CAST LP expr AS typetoken RP", + /* 186 */ "expr ::= ID|INDEXED LP distinct exprlist RP", + /* 187 */ "expr ::= ID|INDEXED LP STAR RP", + /* 188 */ "expr ::= ID|INDEXED LP distinct exprlist RP filter_over", + /* 189 */ "expr ::= ID|INDEXED LP STAR RP filter_over", + /* 190 */ "term ::= CTIME_KW", + /* 191 */ "expr ::= LP nexprlist COMMA expr RP", + /* 192 */ "expr ::= expr AND expr", + /* 193 */ "expr ::= expr OR expr", + /* 194 */ "expr ::= expr LT|GT|GE|LE expr", + /* 195 */ "expr ::= expr EQ|NE expr", + /* 196 */ "expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr", + /* 197 */ "expr ::= expr PLUS|MINUS expr", + /* 198 */ "expr ::= expr STAR|SLASH|REM expr", + /* 199 */ "expr ::= expr CONCAT expr", + /* 200 */ "likeop ::= NOT LIKE_KW|MATCH", + /* 201 */ "expr ::= expr likeop expr", + /* 202 */ "expr ::= expr likeop expr ESCAPE expr", + /* 203 */ "expr ::= expr ISNULL|NOTNULL", + /* 204 */ "expr ::= expr NOT NULL", + /* 205 */ "expr ::= expr IS expr", + /* 206 */ "expr ::= expr IS NOT expr", + /* 207 */ "expr ::= NOT expr", + /* 208 */ "expr ::= BITNOT expr", + /* 209 */ "expr ::= PLUS|MINUS expr", + /* 210 */ "between_op ::= BETWEEN", + /* 211 */ "between_op ::= NOT BETWEEN", + /* 212 */ "expr ::= expr between_op expr AND expr", + /* 213 */ "in_op ::= IN", + /* 214 */ "in_op ::= NOT IN", + /* 215 */ "expr ::= expr in_op LP exprlist RP", + /* 216 */ "expr ::= LP select RP", + /* 217 */ "expr ::= expr in_op LP select RP", + /* 218 */ "expr ::= expr in_op nm dbnm paren_exprlist", + /* 219 */ "expr ::= EXISTS LP select RP", + /* 220 */ "expr ::= CASE case_operand case_exprlist case_else END", + /* 221 */ "case_exprlist ::= case_exprlist WHEN expr THEN expr", + /* 222 */ "case_exprlist ::= WHEN expr THEN expr", + /* 223 */ "case_else ::= ELSE expr", + /* 224 */ "case_else ::=", + /* 225 */ "case_operand ::= expr", + /* 226 */ "case_operand ::=", + /* 227 */ "exprlist ::=", + /* 228 */ "nexprlist ::= nexprlist COMMA expr", + /* 229 */ "nexprlist ::= expr", + /* 230 */ "paren_exprlist ::=", + /* 231 */ "paren_exprlist ::= LP exprlist RP", + /* 232 */ "cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt", + /* 233 */ "uniqueflag ::= UNIQUE", + /* 234 */ "uniqueflag ::=", + /* 235 */ "eidlist_opt ::=", + /* 236 */ "eidlist_opt ::= LP eidlist RP", + /* 237 */ "eidlist ::= eidlist COMMA nm collate sortorder", + /* 238 */ "eidlist ::= nm collate sortorder", + /* 239 */ "collate ::=", + /* 240 */ "collate ::= COLLATE ID|STRING", + /* 241 */ "cmd ::= DROP INDEX ifexists fullname", + /* 242 */ "cmd ::= VACUUM vinto", + /* 243 */ "cmd ::= VACUUM nm vinto", + /* 244 */ "vinto ::= INTO expr", + /* 245 */ "vinto ::=", + /* 246 */ "cmd ::= PRAGMA nm dbnm", + /* 247 */ "cmd ::= PRAGMA nm dbnm EQ nmnum", + /* 248 */ "cmd ::= PRAGMA nm dbnm LP nmnum RP", + /* 249 */ "cmd ::= PRAGMA nm dbnm EQ minus_num", + /* 250 */ "cmd ::= PRAGMA nm dbnm LP minus_num RP", + /* 251 */ "plus_num ::= PLUS INTEGER|FLOAT", + /* 252 */ "minus_num ::= MINUS INTEGER|FLOAT", + /* 253 */ "cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END", + /* 254 */ "trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause", + /* 255 */ "trigger_time ::= BEFORE|AFTER", + /* 256 */ "trigger_time ::= INSTEAD OF", + /* 257 */ "trigger_time ::=", + /* 258 */ "trigger_event ::= DELETE|INSERT", + /* 259 */ "trigger_event ::= UPDATE", + /* 260 */ "trigger_event ::= UPDATE OF idlist", + /* 261 */ "when_clause ::=", + /* 262 */ "when_clause ::= WHEN expr", + /* 263 */ "trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI", + /* 264 */ "trigger_cmd_list ::= trigger_cmd SEMI", + /* 265 */ "trnm ::= nm DOT nm", + /* 266 */ "tridxby ::= INDEXED BY nm", + /* 267 */ "tridxby ::= NOT INDEXED", + /* 268 */ "trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt", + /* 269 */ "trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt", + /* 270 */ "trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt", + /* 271 */ "trigger_cmd ::= scanpt select scanpt", + /* 272 */ "expr ::= RAISE LP IGNORE RP", + /* 273 */ "expr ::= RAISE LP raisetype COMMA nm RP", + /* 274 */ "raisetype ::= ROLLBACK", + /* 275 */ "raisetype ::= ABORT", + /* 276 */ "raisetype ::= FAIL", + /* 277 */ "cmd ::= DROP TRIGGER ifexists fullname", + /* 278 */ "cmd ::= ATTACH database_kw_opt expr AS expr key_opt", + /* 279 */ "cmd ::= DETACH database_kw_opt expr", + /* 280 */ "key_opt ::=", + /* 281 */ "key_opt ::= KEY expr", + /* 282 */ "cmd ::= REINDEX", + /* 283 */ "cmd ::= REINDEX nm dbnm", + /* 284 */ "cmd ::= ANALYZE", + /* 285 */ "cmd ::= ANALYZE nm dbnm", + /* 286 */ "cmd ::= ALTER TABLE fullname RENAME TO nm", + /* 287 */ "cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist", + /* 288 */ "cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm", + /* 289 */ "add_column_fullname ::= fullname", + /* 290 */ "cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm", + /* 291 */ "cmd ::= create_vtab", + /* 292 */ "cmd ::= create_vtab LP vtabarglist RP", + /* 293 */ "create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm", + /* 294 */ "vtabarg ::=", + /* 295 */ "vtabargtoken ::= ANY", + /* 296 */ "vtabargtoken ::= lp anylist RP", + /* 297 */ "lp ::= LP", + /* 298 */ "with ::= WITH wqlist", + /* 299 */ "with ::= WITH RECURSIVE wqlist", + /* 300 */ "wqas ::= AS", + /* 301 */ "wqas ::= AS MATERIALIZED", + /* 302 */ "wqas ::= AS NOT MATERIALIZED", + /* 303 */ "wqitem ::= nm eidlist_opt wqas LP select RP", + /* 304 */ "wqlist ::= wqitem", + /* 305 */ "wqlist ::= wqlist COMMA wqitem", + /* 306 */ "windowdefn_list ::= windowdefn", + /* 307 */ "windowdefn_list ::= windowdefn_list COMMA windowdefn", + /* 308 */ "windowdefn ::= nm AS LP window RP", + /* 309 */ "window ::= PARTITION BY nexprlist orderby_opt frame_opt", + /* 310 */ "window ::= nm PARTITION BY nexprlist orderby_opt frame_opt", + /* 311 */ "window ::= ORDER BY sortlist frame_opt", + /* 312 */ "window ::= nm ORDER BY sortlist frame_opt", + /* 313 */ "window ::= frame_opt", + /* 314 */ "window ::= nm frame_opt", + /* 315 */ "frame_opt ::=", + /* 316 */ "frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt", + /* 317 */ "frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt", + /* 318 */ "range_or_rows ::= RANGE|ROWS|GROUPS", + /* 319 */ "frame_bound_s ::= frame_bound", + /* 320 */ "frame_bound_s ::= UNBOUNDED PRECEDING", + /* 321 */ "frame_bound_e ::= frame_bound", + /* 322 */ "frame_bound_e ::= UNBOUNDED FOLLOWING", + /* 323 */ "frame_bound ::= expr PRECEDING|FOLLOWING", + /* 324 */ "frame_bound ::= CURRENT ROW", + /* 325 */ "frame_exclude_opt ::=", + /* 326 */ "frame_exclude_opt ::= EXCLUDE frame_exclude", + /* 327 */ "frame_exclude ::= NO OTHERS", + /* 328 */ "frame_exclude ::= CURRENT ROW", + /* 329 */ "frame_exclude ::= GROUP|TIES", + /* 330 */ "window_clause ::= WINDOW windowdefn_list", + /* 331 */ "filter_over ::= filter_clause over_clause", + /* 332 */ "filter_over ::= over_clause", + /* 333 */ "filter_over ::= filter_clause", + /* 334 */ "over_clause ::= OVER LP window RP", + /* 335 */ "over_clause ::= OVER nm", + /* 336 */ "filter_clause ::= FILTER LP WHERE expr RP", + /* 337 */ "input ::= cmdlist", + /* 338 */ "cmdlist ::= cmdlist ecmd", + /* 339 */ "cmdlist ::= ecmd", + /* 340 */ "ecmd ::= SEMI", + /* 341 */ "ecmd ::= cmdx SEMI", + /* 342 */ "ecmd ::= explain cmdx SEMI", + /* 343 */ "trans_opt ::=", + /* 344 */ "trans_opt ::= TRANSACTION", + /* 345 */ "trans_opt ::= TRANSACTION nm", + /* 346 */ "savepoint_opt ::= SAVEPOINT", + /* 347 */ "savepoint_opt ::=", + /* 348 */ "cmd ::= create_table create_table_args", + /* 349 */ "columnlist ::= columnlist COMMA columnname carglist", + /* 350 */ "columnlist ::= columnname carglist", + /* 351 */ "nm ::= ID|INDEXED", + /* 352 */ "nm ::= STRING", + /* 353 */ "nm ::= JOIN_KW", + /* 354 */ "typetoken ::= typename", + /* 355 */ "typename ::= ID|STRING", + /* 356 */ "signed ::= plus_num", + /* 357 */ "signed ::= minus_num", + /* 358 */ "carglist ::= carglist ccons", + /* 359 */ "carglist ::=", + /* 360 */ "ccons ::= NULL onconf", + /* 361 */ "ccons ::= GENERATED ALWAYS AS generated", + /* 362 */ "ccons ::= AS generated", + /* 363 */ "conslist_opt ::= COMMA conslist", + /* 364 */ "conslist ::= conslist tconscomma tcons", + /* 365 */ "conslist ::= tcons", + /* 366 */ "tconscomma ::=", + /* 367 */ "defer_subclause_opt ::= defer_subclause", + /* 368 */ "resolvetype ::= raisetype", + /* 369 */ "selectnowith ::= oneselect", + /* 370 */ "oneselect ::= values", + /* 371 */ "sclp ::= selcollist COMMA", + /* 372 */ "as ::= ID|STRING", + /* 373 */ "returning ::=", + /* 374 */ "expr ::= term", + /* 375 */ "likeop ::= LIKE_KW|MATCH", + /* 376 */ "exprlist ::= nexprlist", + /* 377 */ "nmnum ::= plus_num", + /* 378 */ "nmnum ::= nm", + /* 379 */ "nmnum ::= ON", + /* 380 */ "nmnum ::= DELETE", + /* 381 */ "nmnum ::= DEFAULT", + /* 382 */ "plus_num ::= INTEGER|FLOAT", + /* 383 */ "foreach_clause ::=", + /* 384 */ "foreach_clause ::= FOR EACH ROW", + /* 385 */ "trnm ::= nm", + /* 386 */ "tridxby ::=", + /* 387 */ "database_kw_opt ::= DATABASE", + /* 388 */ "database_kw_opt ::=", + /* 389 */ "kwcolumn_opt ::=", + /* 390 */ "kwcolumn_opt ::= COLUMNKW", + /* 391 */ "vtabarglist ::= vtabarg", + /* 392 */ "vtabarglist ::= vtabarglist COMMA vtabarg", + /* 393 */ "vtabarg ::= vtabarg vtabargtoken", + /* 394 */ "anylist ::=", + /* 395 */ "anylist ::= anylist LP anylist RP", + /* 396 */ "anylist ::= anylist ANY", + /* 397 */ "with ::=", }; #endif /* NDEBUG */ @@ -156294,98 +160323,99 @@ static void yy_destructor( ** inside the C code. */ /********* Begin destructor definitions ***************************************/ - case 200: /* select */ - case 234: /* selectnowith */ - case 235: /* oneselect */ - case 247: /* values */ + case 203: /* select */ + case 237: /* selectnowith */ + case 238: /* oneselect */ + case 250: /* values */ { -sqlite3SelectDelete(pParse->db, (yypminor->yy539)); -} - break; - case 211: /* term */ - case 212: /* expr */ - case 241: /* where_opt */ - case 243: /* having_opt */ - case 255: /* on_opt */ - case 271: /* case_operand */ - case 273: /* case_else */ - case 276: /* vinto */ - case 283: /* when_clause */ - case 288: /* key_opt */ - case 302: /* filter_clause */ +sqlite3SelectDelete(pParse->db, (yypminor->yy81)); +} + break; + case 214: /* term */ + case 215: /* expr */ + case 244: /* where_opt */ + case 246: /* having_opt */ + case 258: /* on_opt */ + case 265: /* where_opt_ret */ + case 276: /* case_operand */ + case 278: /* case_else */ + case 281: /* vinto */ + case 288: /* when_clause */ + case 293: /* key_opt */ + case 309: /* filter_clause */ { -sqlite3ExprDelete(pParse->db, (yypminor->yy202)); -} - break; - case 216: /* eidlist_opt */ - case 226: /* sortlist */ - case 227: /* eidlist */ - case 239: /* selcollist */ - case 242: /* groupby_opt */ - case 244: /* orderby_opt */ - case 248: /* nexprlist */ - case 249: /* sclp */ - case 257: /* exprlist */ - case 262: /* setlist */ - case 270: /* paren_exprlist */ - case 272: /* case_exprlist */ - case 301: /* part_opt */ +sqlite3ExprDelete(pParse->db, (yypminor->yy404)); +} + break; + case 219: /* eidlist_opt */ + case 229: /* sortlist */ + case 230: /* eidlist */ + case 242: /* selcollist */ + case 245: /* groupby_opt */ + case 247: /* orderby_opt */ + case 251: /* nexprlist */ + case 252: /* sclp */ + case 260: /* exprlist */ + case 266: /* setlist */ + case 275: /* paren_exprlist */ + case 277: /* case_exprlist */ + case 308: /* part_opt */ { -sqlite3ExprListDelete(pParse->db, (yypminor->yy242)); +sqlite3ExprListDelete(pParse->db, (yypminor->yy70)); } break; - case 233: /* fullname */ - case 240: /* from */ - case 251: /* seltablist */ - case 252: /* stl_prefix */ - case 258: /* xfullname */ + case 236: /* fullname */ + case 243: /* from */ + case 254: /* seltablist */ + case 255: /* stl_prefix */ + case 261: /* xfullname */ { -sqlite3SrcListDelete(pParse->db, (yypminor->yy47)); +sqlite3SrcListDelete(pParse->db, (yypminor->yy153)); } break; - case 236: /* wqlist */ + case 239: /* wqlist */ { -sqlite3WithDelete(pParse->db, (yypminor->yy131)); +sqlite3WithDelete(pParse->db, (yypminor->yy103)); } break; - case 246: /* window_clause */ - case 297: /* windowdefn_list */ + case 249: /* window_clause */ + case 304: /* windowdefn_list */ { -sqlite3WindowListDelete(pParse->db, (yypminor->yy303)); +sqlite3WindowListDelete(pParse->db, (yypminor->yy49)); } break; - case 256: /* using_opt */ - case 259: /* idlist */ - case 264: /* idlist_opt */ + case 259: /* using_opt */ + case 262: /* idlist */ + case 268: /* idlist_opt */ { -sqlite3IdListDelete(pParse->db, (yypminor->yy600)); +sqlite3IdListDelete(pParse->db, (yypminor->yy436)); } break; - case 266: /* filter_over */ - case 298: /* windowdefn */ - case 299: /* window */ - case 300: /* frame_opt */ - case 303: /* over_clause */ + case 271: /* filter_over */ + case 305: /* windowdefn */ + case 306: /* window */ + case 307: /* frame_opt */ + case 310: /* over_clause */ { -sqlite3WindowDelete(pParse->db, (yypminor->yy303)); +sqlite3WindowDelete(pParse->db, (yypminor->yy49)); } break; - case 279: /* trigger_cmd_list */ - case 284: /* trigger_cmd */ + case 284: /* trigger_cmd_list */ + case 289: /* trigger_cmd */ { -sqlite3DeleteTriggerStep(pParse->db, (yypminor->yy447)); +sqlite3DeleteTriggerStep(pParse->db, (yypminor->yy157)); } break; - case 281: /* trigger_event */ + case 286: /* trigger_event */ { -sqlite3IdListDelete(pParse->db, (yypminor->yy230).b); +sqlite3IdListDelete(pParse->db, (yypminor->yy262).b); } break; - case 305: /* frame_bound */ - case 306: /* frame_bound_s */ - case 307: /* frame_bound_e */ + case 312: /* frame_bound */ + case 313: /* frame_bound_s */ + case 314: /* frame_bound_e */ { -sqlite3ExprDelete(pParse->db, (yypminor->yy77).pExpr); +sqlite3ExprDelete(pParse->db, (yypminor->yy117).pExpr); } break; /********* End destructor definitions *****************************************/ @@ -156552,7 +160582,7 @@ static YYACTIONTYPE yy_find_shift_action( #endif /* YYWILDCARD */ return yy_default[stateno]; }else{ - assert( i>=0 && i=0 && i<(int)(sizeof(yy_action)/sizeof(yy_action[0])) ); return yy_action[i]; } }while(1); @@ -156676,391 +160706,404 @@ static void yy_shift( /* For rule J, yyRuleInfoLhs[J] contains the symbol on the left-hand side ** of that rule */ static const YYCODETYPE yyRuleInfoLhs[] = { - 185, /* (0) explain ::= EXPLAIN */ - 185, /* (1) explain ::= EXPLAIN QUERY PLAN */ - 184, /* (2) cmdx ::= cmd */ - 186, /* (3) cmd ::= BEGIN transtype trans_opt */ - 187, /* (4) transtype ::= */ - 187, /* (5) transtype ::= DEFERRED */ - 187, /* (6) transtype ::= IMMEDIATE */ - 187, /* (7) transtype ::= EXCLUSIVE */ - 186, /* (8) cmd ::= COMMIT|END trans_opt */ - 186, /* (9) cmd ::= ROLLBACK trans_opt */ - 186, /* (10) cmd ::= SAVEPOINT nm */ - 186, /* (11) cmd ::= RELEASE savepoint_opt nm */ - 186, /* (12) cmd ::= ROLLBACK trans_opt TO savepoint_opt nm */ - 191, /* (13) create_table ::= createkw temp TABLE ifnotexists nm dbnm */ - 193, /* (14) createkw ::= CREATE */ - 195, /* (15) ifnotexists ::= */ - 195, /* (16) ifnotexists ::= IF NOT EXISTS */ - 194, /* (17) temp ::= TEMP */ - 194, /* (18) temp ::= */ - 192, /* (19) create_table_args ::= LP columnlist conslist_opt RP table_options */ - 192, /* (20) create_table_args ::= AS select */ - 199, /* (21) table_options ::= */ - 199, /* (22) table_options ::= WITHOUT nm */ - 201, /* (23) columnname ::= nm typetoken */ - 203, /* (24) typetoken ::= */ - 203, /* (25) typetoken ::= typename LP signed RP */ - 203, /* (26) typetoken ::= typename LP signed COMMA signed RP */ - 204, /* (27) typename ::= typename ID|STRING */ - 208, /* (28) scanpt ::= */ - 209, /* (29) scantok ::= */ - 210, /* (30) ccons ::= CONSTRAINT nm */ - 210, /* (31) ccons ::= DEFAULT scantok term */ - 210, /* (32) ccons ::= DEFAULT LP expr RP */ - 210, /* (33) ccons ::= DEFAULT PLUS scantok term */ - 210, /* (34) ccons ::= DEFAULT MINUS scantok term */ - 210, /* (35) ccons ::= DEFAULT scantok ID|INDEXED */ - 210, /* (36) ccons ::= NOT NULL onconf */ - 210, /* (37) ccons ::= PRIMARY KEY sortorder onconf autoinc */ - 210, /* (38) ccons ::= UNIQUE onconf */ - 210, /* (39) ccons ::= CHECK LP expr RP */ - 210, /* (40) ccons ::= REFERENCES nm eidlist_opt refargs */ - 210, /* (41) ccons ::= defer_subclause */ - 210, /* (42) ccons ::= COLLATE ID|STRING */ - 219, /* (43) generated ::= LP expr RP */ - 219, /* (44) generated ::= LP expr RP ID */ - 215, /* (45) autoinc ::= */ - 215, /* (46) autoinc ::= AUTOINCR */ - 217, /* (47) refargs ::= */ - 217, /* (48) refargs ::= refargs refarg */ - 220, /* (49) refarg ::= MATCH nm */ - 220, /* (50) refarg ::= ON INSERT refact */ - 220, /* (51) refarg ::= ON DELETE refact */ - 220, /* (52) refarg ::= ON UPDATE refact */ - 221, /* (53) refact ::= SET NULL */ - 221, /* (54) refact ::= SET DEFAULT */ - 221, /* (55) refact ::= CASCADE */ - 221, /* (56) refact ::= RESTRICT */ - 221, /* (57) refact ::= NO ACTION */ - 218, /* (58) defer_subclause ::= NOT DEFERRABLE init_deferred_pred_opt */ - 218, /* (59) defer_subclause ::= DEFERRABLE init_deferred_pred_opt */ - 222, /* (60) init_deferred_pred_opt ::= */ - 222, /* (61) init_deferred_pred_opt ::= INITIALLY DEFERRED */ - 222, /* (62) init_deferred_pred_opt ::= INITIALLY IMMEDIATE */ - 198, /* (63) conslist_opt ::= */ - 224, /* (64) tconscomma ::= COMMA */ - 225, /* (65) tcons ::= CONSTRAINT nm */ - 225, /* (66) tcons ::= PRIMARY KEY LP sortlist autoinc RP onconf */ - 225, /* (67) tcons ::= UNIQUE LP sortlist RP onconf */ - 225, /* (68) tcons ::= CHECK LP expr RP onconf */ - 225, /* (69) tcons ::= FOREIGN KEY LP eidlist RP REFERENCES nm eidlist_opt refargs defer_subclause_opt */ - 228, /* (70) defer_subclause_opt ::= */ - 213, /* (71) onconf ::= */ - 213, /* (72) onconf ::= ON CONFLICT resolvetype */ - 229, /* (73) orconf ::= */ - 229, /* (74) orconf ::= OR resolvetype */ - 230, /* (75) resolvetype ::= IGNORE */ - 230, /* (76) resolvetype ::= REPLACE */ - 186, /* (77) cmd ::= DROP TABLE ifexists fullname */ - 232, /* (78) ifexists ::= IF EXISTS */ - 232, /* (79) ifexists ::= */ - 186, /* (80) cmd ::= createkw temp VIEW ifnotexists nm dbnm eidlist_opt AS select */ - 186, /* (81) cmd ::= DROP VIEW ifexists fullname */ - 186, /* (82) cmd ::= select */ - 200, /* (83) select ::= WITH wqlist selectnowith */ - 200, /* (84) select ::= WITH RECURSIVE wqlist selectnowith */ - 200, /* (85) select ::= selectnowith */ - 234, /* (86) selectnowith ::= selectnowith multiselect_op oneselect */ - 237, /* (87) multiselect_op ::= UNION */ - 237, /* (88) multiselect_op ::= UNION ALL */ - 237, /* (89) multiselect_op ::= EXCEPT|INTERSECT */ - 235, /* (90) oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt orderby_opt limit_opt */ - 235, /* (91) oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt window_clause orderby_opt limit_opt */ - 247, /* (92) values ::= VALUES LP nexprlist RP */ - 247, /* (93) values ::= values COMMA LP nexprlist RP */ - 238, /* (94) distinct ::= DISTINCT */ - 238, /* (95) distinct ::= ALL */ - 238, /* (96) distinct ::= */ - 249, /* (97) sclp ::= */ - 239, /* (98) selcollist ::= sclp scanpt expr scanpt as */ - 239, /* (99) selcollist ::= sclp scanpt STAR */ - 239, /* (100) selcollist ::= sclp scanpt nm DOT STAR */ - 250, /* (101) as ::= AS nm */ - 250, /* (102) as ::= */ - 240, /* (103) from ::= */ - 240, /* (104) from ::= FROM seltablist */ - 252, /* (105) stl_prefix ::= seltablist joinop */ - 252, /* (106) stl_prefix ::= */ - 251, /* (107) seltablist ::= stl_prefix nm dbnm as indexed_opt on_opt using_opt */ - 251, /* (108) seltablist ::= stl_prefix nm dbnm LP exprlist RP as on_opt using_opt */ - 251, /* (109) seltablist ::= stl_prefix LP select RP as on_opt using_opt */ - 251, /* (110) seltablist ::= stl_prefix LP seltablist RP as on_opt using_opt */ - 196, /* (111) dbnm ::= */ - 196, /* (112) dbnm ::= DOT nm */ - 233, /* (113) fullname ::= nm */ - 233, /* (114) fullname ::= nm DOT nm */ - 258, /* (115) xfullname ::= nm */ - 258, /* (116) xfullname ::= nm DOT nm */ - 258, /* (117) xfullname ::= nm DOT nm AS nm */ - 258, /* (118) xfullname ::= nm AS nm */ - 253, /* (119) joinop ::= COMMA|JOIN */ - 253, /* (120) joinop ::= JOIN_KW JOIN */ - 253, /* (121) joinop ::= JOIN_KW nm JOIN */ - 253, /* (122) joinop ::= JOIN_KW nm nm JOIN */ - 255, /* (123) on_opt ::= ON expr */ - 255, /* (124) on_opt ::= */ - 254, /* (125) indexed_opt ::= */ - 254, /* (126) indexed_opt ::= INDEXED BY nm */ - 254, /* (127) indexed_opt ::= NOT INDEXED */ - 256, /* (128) using_opt ::= USING LP idlist RP */ - 256, /* (129) using_opt ::= */ - 244, /* (130) orderby_opt ::= */ - 244, /* (131) orderby_opt ::= ORDER BY sortlist */ - 226, /* (132) sortlist ::= sortlist COMMA expr sortorder nulls */ - 226, /* (133) sortlist ::= expr sortorder nulls */ - 214, /* (134) sortorder ::= ASC */ - 214, /* (135) sortorder ::= DESC */ - 214, /* (136) sortorder ::= */ - 260, /* (137) nulls ::= NULLS FIRST */ - 260, /* (138) nulls ::= NULLS LAST */ - 260, /* (139) nulls ::= */ - 242, /* (140) groupby_opt ::= */ - 242, /* (141) groupby_opt ::= GROUP BY nexprlist */ - 243, /* (142) having_opt ::= */ - 243, /* (143) having_opt ::= HAVING expr */ - 245, /* (144) limit_opt ::= */ - 245, /* (145) limit_opt ::= LIMIT expr */ - 245, /* (146) limit_opt ::= LIMIT expr OFFSET expr */ - 245, /* (147) limit_opt ::= LIMIT expr COMMA expr */ - 186, /* (148) cmd ::= with DELETE FROM xfullname indexed_opt where_opt orderby_opt limit_opt */ - 241, /* (149) where_opt ::= */ - 241, /* (150) where_opt ::= WHERE expr */ - 186, /* (151) cmd ::= with UPDATE orconf xfullname indexed_opt SET setlist from where_opt orderby_opt limit_opt */ - 262, /* (152) setlist ::= setlist COMMA nm EQ expr */ - 262, /* (153) setlist ::= setlist COMMA LP idlist RP EQ expr */ - 262, /* (154) setlist ::= nm EQ expr */ - 262, /* (155) setlist ::= LP idlist RP EQ expr */ - 186, /* (156) cmd ::= with insert_cmd INTO xfullname idlist_opt select upsert */ - 186, /* (157) cmd ::= with insert_cmd INTO xfullname idlist_opt DEFAULT VALUES */ - 265, /* (158) upsert ::= */ - 265, /* (159) upsert ::= ON CONFLICT LP sortlist RP where_opt DO UPDATE SET setlist where_opt */ - 265, /* (160) upsert ::= ON CONFLICT LP sortlist RP where_opt DO NOTHING */ - 265, /* (161) upsert ::= ON CONFLICT DO NOTHING */ - 263, /* (162) insert_cmd ::= INSERT orconf */ - 263, /* (163) insert_cmd ::= REPLACE */ - 264, /* (164) idlist_opt ::= */ - 264, /* (165) idlist_opt ::= LP idlist RP */ - 259, /* (166) idlist ::= idlist COMMA nm */ - 259, /* (167) idlist ::= nm */ - 212, /* (168) expr ::= LP expr RP */ - 212, /* (169) expr ::= ID|INDEXED */ - 212, /* (170) expr ::= JOIN_KW */ - 212, /* (171) expr ::= nm DOT nm */ - 212, /* (172) expr ::= nm DOT nm DOT nm */ - 211, /* (173) term ::= NULL|FLOAT|BLOB */ - 211, /* (174) term ::= STRING */ - 211, /* (175) term ::= INTEGER */ - 212, /* (176) expr ::= VARIABLE */ - 212, /* (177) expr ::= expr COLLATE ID|STRING */ - 212, /* (178) expr ::= CAST LP expr AS typetoken RP */ - 212, /* (179) expr ::= ID|INDEXED LP distinct exprlist RP */ - 212, /* (180) expr ::= ID|INDEXED LP STAR RP */ - 212, /* (181) expr ::= ID|INDEXED LP distinct exprlist RP filter_over */ - 212, /* (182) expr ::= ID|INDEXED LP STAR RP filter_over */ - 211, /* (183) term ::= CTIME_KW */ - 212, /* (184) expr ::= LP nexprlist COMMA expr RP */ - 212, /* (185) expr ::= expr AND expr */ - 212, /* (186) expr ::= expr OR expr */ - 212, /* (187) expr ::= expr LT|GT|GE|LE expr */ - 212, /* (188) expr ::= expr EQ|NE expr */ - 212, /* (189) expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ - 212, /* (190) expr ::= expr PLUS|MINUS expr */ - 212, /* (191) expr ::= expr STAR|SLASH|REM expr */ - 212, /* (192) expr ::= expr CONCAT expr */ - 267, /* (193) likeop ::= NOT LIKE_KW|MATCH */ - 212, /* (194) expr ::= expr likeop expr */ - 212, /* (195) expr ::= expr likeop expr ESCAPE expr */ - 212, /* (196) expr ::= expr ISNULL|NOTNULL */ - 212, /* (197) expr ::= expr NOT NULL */ - 212, /* (198) expr ::= expr IS expr */ - 212, /* (199) expr ::= expr IS NOT expr */ - 212, /* (200) expr ::= NOT expr */ - 212, /* (201) expr ::= BITNOT expr */ - 212, /* (202) expr ::= PLUS|MINUS expr */ - 268, /* (203) between_op ::= BETWEEN */ - 268, /* (204) between_op ::= NOT BETWEEN */ - 212, /* (205) expr ::= expr between_op expr AND expr */ - 269, /* (206) in_op ::= IN */ - 269, /* (207) in_op ::= NOT IN */ - 212, /* (208) expr ::= expr in_op LP exprlist RP */ - 212, /* (209) expr ::= LP select RP */ - 212, /* (210) expr ::= expr in_op LP select RP */ - 212, /* (211) expr ::= expr in_op nm dbnm paren_exprlist */ - 212, /* (212) expr ::= EXISTS LP select RP */ - 212, /* (213) expr ::= CASE case_operand case_exprlist case_else END */ - 272, /* (214) case_exprlist ::= case_exprlist WHEN expr THEN expr */ - 272, /* (215) case_exprlist ::= WHEN expr THEN expr */ - 273, /* (216) case_else ::= ELSE expr */ - 273, /* (217) case_else ::= */ - 271, /* (218) case_operand ::= expr */ - 271, /* (219) case_operand ::= */ - 257, /* (220) exprlist ::= */ - 248, /* (221) nexprlist ::= nexprlist COMMA expr */ - 248, /* (222) nexprlist ::= expr */ - 270, /* (223) paren_exprlist ::= */ - 270, /* (224) paren_exprlist ::= LP exprlist RP */ - 186, /* (225) cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */ - 274, /* (226) uniqueflag ::= UNIQUE */ - 274, /* (227) uniqueflag ::= */ - 216, /* (228) eidlist_opt ::= */ - 216, /* (229) eidlist_opt ::= LP eidlist RP */ - 227, /* (230) eidlist ::= eidlist COMMA nm collate sortorder */ - 227, /* (231) eidlist ::= nm collate sortorder */ - 275, /* (232) collate ::= */ - 275, /* (233) collate ::= COLLATE ID|STRING */ - 186, /* (234) cmd ::= DROP INDEX ifexists fullname */ - 186, /* (235) cmd ::= VACUUM vinto */ - 186, /* (236) cmd ::= VACUUM nm vinto */ - 276, /* (237) vinto ::= INTO expr */ - 276, /* (238) vinto ::= */ - 186, /* (239) cmd ::= PRAGMA nm dbnm */ - 186, /* (240) cmd ::= PRAGMA nm dbnm EQ nmnum */ - 186, /* (241) cmd ::= PRAGMA nm dbnm LP nmnum RP */ - 186, /* (242) cmd ::= PRAGMA nm dbnm EQ minus_num */ - 186, /* (243) cmd ::= PRAGMA nm dbnm LP minus_num RP */ - 206, /* (244) plus_num ::= PLUS INTEGER|FLOAT */ - 207, /* (245) minus_num ::= MINUS INTEGER|FLOAT */ - 186, /* (246) cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */ - 278, /* (247) trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */ - 280, /* (248) trigger_time ::= BEFORE|AFTER */ - 280, /* (249) trigger_time ::= INSTEAD OF */ - 280, /* (250) trigger_time ::= */ - 281, /* (251) trigger_event ::= DELETE|INSERT */ - 281, /* (252) trigger_event ::= UPDATE */ - 281, /* (253) trigger_event ::= UPDATE OF idlist */ - 283, /* (254) when_clause ::= */ - 283, /* (255) when_clause ::= WHEN expr */ - 279, /* (256) trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */ - 279, /* (257) trigger_cmd_list ::= trigger_cmd SEMI */ - 285, /* (258) trnm ::= nm DOT nm */ - 286, /* (259) tridxby ::= INDEXED BY nm */ - 286, /* (260) tridxby ::= NOT INDEXED */ - 284, /* (261) trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */ - 284, /* (262) trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */ - 284, /* (263) trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */ - 284, /* (264) trigger_cmd ::= scanpt select scanpt */ - 212, /* (265) expr ::= RAISE LP IGNORE RP */ - 212, /* (266) expr ::= RAISE LP raisetype COMMA nm RP */ - 231, /* (267) raisetype ::= ROLLBACK */ - 231, /* (268) raisetype ::= ABORT */ - 231, /* (269) raisetype ::= FAIL */ - 186, /* (270) cmd ::= DROP TRIGGER ifexists fullname */ - 186, /* (271) cmd ::= ATTACH database_kw_opt expr AS expr key_opt */ - 186, /* (272) cmd ::= DETACH database_kw_opt expr */ - 288, /* (273) key_opt ::= */ - 288, /* (274) key_opt ::= KEY expr */ - 186, /* (275) cmd ::= REINDEX */ - 186, /* (276) cmd ::= REINDEX nm dbnm */ - 186, /* (277) cmd ::= ANALYZE */ - 186, /* (278) cmd ::= ANALYZE nm dbnm */ - 186, /* (279) cmd ::= ALTER TABLE fullname RENAME TO nm */ - 186, /* (280) cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */ - 289, /* (281) add_column_fullname ::= fullname */ - 186, /* (282) cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */ - 186, /* (283) cmd ::= create_vtab */ - 186, /* (284) cmd ::= create_vtab LP vtabarglist RP */ - 291, /* (285) create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */ - 293, /* (286) vtabarg ::= */ - 294, /* (287) vtabargtoken ::= ANY */ - 294, /* (288) vtabargtoken ::= lp anylist RP */ - 295, /* (289) lp ::= LP */ - 261, /* (290) with ::= WITH wqlist */ - 261, /* (291) with ::= WITH RECURSIVE wqlist */ - 236, /* (292) wqlist ::= nm eidlist_opt AS LP select RP */ - 236, /* (293) wqlist ::= wqlist COMMA nm eidlist_opt AS LP select RP */ - 297, /* (294) windowdefn_list ::= windowdefn */ - 297, /* (295) windowdefn_list ::= windowdefn_list COMMA windowdefn */ - 298, /* (296) windowdefn ::= nm AS LP window RP */ - 299, /* (297) window ::= PARTITION BY nexprlist orderby_opt frame_opt */ - 299, /* (298) window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */ - 299, /* (299) window ::= ORDER BY sortlist frame_opt */ - 299, /* (300) window ::= nm ORDER BY sortlist frame_opt */ - 299, /* (301) window ::= frame_opt */ - 299, /* (302) window ::= nm frame_opt */ - 300, /* (303) frame_opt ::= */ - 300, /* (304) frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */ - 300, /* (305) frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */ - 304, /* (306) range_or_rows ::= RANGE|ROWS|GROUPS */ - 306, /* (307) frame_bound_s ::= frame_bound */ - 306, /* (308) frame_bound_s ::= UNBOUNDED PRECEDING */ - 307, /* (309) frame_bound_e ::= frame_bound */ - 307, /* (310) frame_bound_e ::= UNBOUNDED FOLLOWING */ - 305, /* (311) frame_bound ::= expr PRECEDING|FOLLOWING */ - 305, /* (312) frame_bound ::= CURRENT ROW */ - 308, /* (313) frame_exclude_opt ::= */ - 308, /* (314) frame_exclude_opt ::= EXCLUDE frame_exclude */ - 309, /* (315) frame_exclude ::= NO OTHERS */ - 309, /* (316) frame_exclude ::= CURRENT ROW */ - 309, /* (317) frame_exclude ::= GROUP|TIES */ - 246, /* (318) window_clause ::= WINDOW windowdefn_list */ - 266, /* (319) filter_over ::= filter_clause over_clause */ - 266, /* (320) filter_over ::= over_clause */ - 266, /* (321) filter_over ::= filter_clause */ - 303, /* (322) over_clause ::= OVER LP window RP */ - 303, /* (323) over_clause ::= OVER nm */ - 302, /* (324) filter_clause ::= FILTER LP WHERE expr RP */ - 181, /* (325) input ::= cmdlist */ - 182, /* (326) cmdlist ::= cmdlist ecmd */ - 182, /* (327) cmdlist ::= ecmd */ - 183, /* (328) ecmd ::= SEMI */ - 183, /* (329) ecmd ::= cmdx SEMI */ - 183, /* (330) ecmd ::= explain cmdx SEMI */ - 188, /* (331) trans_opt ::= */ - 188, /* (332) trans_opt ::= TRANSACTION */ - 188, /* (333) trans_opt ::= TRANSACTION nm */ - 190, /* (334) savepoint_opt ::= SAVEPOINT */ - 190, /* (335) savepoint_opt ::= */ - 186, /* (336) cmd ::= create_table create_table_args */ - 197, /* (337) columnlist ::= columnlist COMMA columnname carglist */ - 197, /* (338) columnlist ::= columnname carglist */ - 189, /* (339) nm ::= ID|INDEXED */ - 189, /* (340) nm ::= STRING */ - 189, /* (341) nm ::= JOIN_KW */ - 203, /* (342) typetoken ::= typename */ - 204, /* (343) typename ::= ID|STRING */ - 205, /* (344) signed ::= plus_num */ - 205, /* (345) signed ::= minus_num */ - 202, /* (346) carglist ::= carglist ccons */ - 202, /* (347) carglist ::= */ - 210, /* (348) ccons ::= NULL onconf */ - 210, /* (349) ccons ::= GENERATED ALWAYS AS generated */ - 210, /* (350) ccons ::= AS generated */ - 198, /* (351) conslist_opt ::= COMMA conslist */ - 223, /* (352) conslist ::= conslist tconscomma tcons */ - 223, /* (353) conslist ::= tcons */ - 224, /* (354) tconscomma ::= */ - 228, /* (355) defer_subclause_opt ::= defer_subclause */ - 230, /* (356) resolvetype ::= raisetype */ - 234, /* (357) selectnowith ::= oneselect */ - 235, /* (358) oneselect ::= values */ - 249, /* (359) sclp ::= selcollist COMMA */ - 250, /* (360) as ::= ID|STRING */ - 212, /* (361) expr ::= term */ - 267, /* (362) likeop ::= LIKE_KW|MATCH */ - 257, /* (363) exprlist ::= nexprlist */ - 277, /* (364) nmnum ::= plus_num */ - 277, /* (365) nmnum ::= nm */ - 277, /* (366) nmnum ::= ON */ - 277, /* (367) nmnum ::= DELETE */ - 277, /* (368) nmnum ::= DEFAULT */ - 206, /* (369) plus_num ::= INTEGER|FLOAT */ - 282, /* (370) foreach_clause ::= */ - 282, /* (371) foreach_clause ::= FOR EACH ROW */ - 285, /* (372) trnm ::= nm */ - 286, /* (373) tridxby ::= */ - 287, /* (374) database_kw_opt ::= DATABASE */ - 287, /* (375) database_kw_opt ::= */ - 290, /* (376) kwcolumn_opt ::= */ - 290, /* (377) kwcolumn_opt ::= COLUMNKW */ - 292, /* (378) vtabarglist ::= vtabarg */ - 292, /* (379) vtabarglist ::= vtabarglist COMMA vtabarg */ - 293, /* (380) vtabarg ::= vtabarg vtabargtoken */ - 296, /* (381) anylist ::= */ - 296, /* (382) anylist ::= anylist LP anylist RP */ - 296, /* (383) anylist ::= anylist ANY */ - 261, /* (384) with ::= */ + 188, /* (0) explain ::= EXPLAIN */ + 188, /* (1) explain ::= EXPLAIN QUERY PLAN */ + 187, /* (2) cmdx ::= cmd */ + 189, /* (3) cmd ::= BEGIN transtype trans_opt */ + 190, /* (4) transtype ::= */ + 190, /* (5) transtype ::= DEFERRED */ + 190, /* (6) transtype ::= IMMEDIATE */ + 190, /* (7) transtype ::= EXCLUSIVE */ + 189, /* (8) cmd ::= COMMIT|END trans_opt */ + 189, /* (9) cmd ::= ROLLBACK trans_opt */ + 189, /* (10) cmd ::= SAVEPOINT nm */ + 189, /* (11) cmd ::= RELEASE savepoint_opt nm */ + 189, /* (12) cmd ::= ROLLBACK trans_opt TO savepoint_opt nm */ + 194, /* (13) create_table ::= createkw temp TABLE ifnotexists nm dbnm */ + 196, /* (14) createkw ::= CREATE */ + 198, /* (15) ifnotexists ::= */ + 198, /* (16) ifnotexists ::= IF NOT EXISTS */ + 197, /* (17) temp ::= TEMP */ + 197, /* (18) temp ::= */ + 195, /* (19) create_table_args ::= LP columnlist conslist_opt RP table_options */ + 195, /* (20) create_table_args ::= AS select */ + 202, /* (21) table_options ::= */ + 202, /* (22) table_options ::= WITHOUT nm */ + 204, /* (23) columnname ::= nm typetoken */ + 206, /* (24) typetoken ::= */ + 206, /* (25) typetoken ::= typename LP signed RP */ + 206, /* (26) typetoken ::= typename LP signed COMMA signed RP */ + 207, /* (27) typename ::= typename ID|STRING */ + 211, /* (28) scanpt ::= */ + 212, /* (29) scantok ::= */ + 213, /* (30) ccons ::= CONSTRAINT nm */ + 213, /* (31) ccons ::= DEFAULT scantok term */ + 213, /* (32) ccons ::= DEFAULT LP expr RP */ + 213, /* (33) ccons ::= DEFAULT PLUS scantok term */ + 213, /* (34) ccons ::= DEFAULT MINUS scantok term */ + 213, /* (35) ccons ::= DEFAULT scantok ID|INDEXED */ + 213, /* (36) ccons ::= NOT NULL onconf */ + 213, /* (37) ccons ::= PRIMARY KEY sortorder onconf autoinc */ + 213, /* (38) ccons ::= UNIQUE onconf */ + 213, /* (39) ccons ::= CHECK LP expr RP */ + 213, /* (40) ccons ::= REFERENCES nm eidlist_opt refargs */ + 213, /* (41) ccons ::= defer_subclause */ + 213, /* (42) ccons ::= COLLATE ID|STRING */ + 222, /* (43) generated ::= LP expr RP */ + 222, /* (44) generated ::= LP expr RP ID */ + 218, /* (45) autoinc ::= */ + 218, /* (46) autoinc ::= AUTOINCR */ + 220, /* (47) refargs ::= */ + 220, /* (48) refargs ::= refargs refarg */ + 223, /* (49) refarg ::= MATCH nm */ + 223, /* (50) refarg ::= ON INSERT refact */ + 223, /* (51) refarg ::= ON DELETE refact */ + 223, /* (52) refarg ::= ON UPDATE refact */ + 224, /* (53) refact ::= SET NULL */ + 224, /* (54) refact ::= SET DEFAULT */ + 224, /* (55) refact ::= CASCADE */ + 224, /* (56) refact ::= RESTRICT */ + 224, /* (57) refact ::= NO ACTION */ + 221, /* (58) defer_subclause ::= NOT DEFERRABLE init_deferred_pred_opt */ + 221, /* (59) defer_subclause ::= DEFERRABLE init_deferred_pred_opt */ + 225, /* (60) init_deferred_pred_opt ::= */ + 225, /* (61) init_deferred_pred_opt ::= INITIALLY DEFERRED */ + 225, /* (62) init_deferred_pred_opt ::= INITIALLY IMMEDIATE */ + 201, /* (63) conslist_opt ::= */ + 227, /* (64) tconscomma ::= COMMA */ + 228, /* (65) tcons ::= CONSTRAINT nm */ + 228, /* (66) tcons ::= PRIMARY KEY LP sortlist autoinc RP onconf */ + 228, /* (67) tcons ::= UNIQUE LP sortlist RP onconf */ + 228, /* (68) tcons ::= CHECK LP expr RP onconf */ + 228, /* (69) tcons ::= FOREIGN KEY LP eidlist RP REFERENCES nm eidlist_opt refargs defer_subclause_opt */ + 231, /* (70) defer_subclause_opt ::= */ + 216, /* (71) onconf ::= */ + 216, /* (72) onconf ::= ON CONFLICT resolvetype */ + 232, /* (73) orconf ::= */ + 232, /* (74) orconf ::= OR resolvetype */ + 233, /* (75) resolvetype ::= IGNORE */ + 233, /* (76) resolvetype ::= REPLACE */ + 189, /* (77) cmd ::= DROP TABLE ifexists fullname */ + 235, /* (78) ifexists ::= IF EXISTS */ + 235, /* (79) ifexists ::= */ + 189, /* (80) cmd ::= createkw temp VIEW ifnotexists nm dbnm eidlist_opt AS select */ + 189, /* (81) cmd ::= DROP VIEW ifexists fullname */ + 189, /* (82) cmd ::= select */ + 203, /* (83) select ::= WITH wqlist selectnowith */ + 203, /* (84) select ::= WITH RECURSIVE wqlist selectnowith */ + 203, /* (85) select ::= selectnowith */ + 237, /* (86) selectnowith ::= selectnowith multiselect_op oneselect */ + 240, /* (87) multiselect_op ::= UNION */ + 240, /* (88) multiselect_op ::= UNION ALL */ + 240, /* (89) multiselect_op ::= EXCEPT|INTERSECT */ + 238, /* (90) oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt orderby_opt limit_opt */ + 238, /* (91) oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt window_clause orderby_opt limit_opt */ + 250, /* (92) values ::= VALUES LP nexprlist RP */ + 250, /* (93) values ::= values COMMA LP nexprlist RP */ + 241, /* (94) distinct ::= DISTINCT */ + 241, /* (95) distinct ::= ALL */ + 241, /* (96) distinct ::= */ + 252, /* (97) sclp ::= */ + 242, /* (98) selcollist ::= sclp scanpt expr scanpt as */ + 242, /* (99) selcollist ::= sclp scanpt STAR */ + 242, /* (100) selcollist ::= sclp scanpt nm DOT STAR */ + 253, /* (101) as ::= AS nm */ + 253, /* (102) as ::= */ + 243, /* (103) from ::= */ + 243, /* (104) from ::= FROM seltablist */ + 255, /* (105) stl_prefix ::= seltablist joinop */ + 255, /* (106) stl_prefix ::= */ + 254, /* (107) seltablist ::= stl_prefix nm dbnm as indexed_opt on_opt using_opt */ + 254, /* (108) seltablist ::= stl_prefix nm dbnm LP exprlist RP as on_opt using_opt */ + 254, /* (109) seltablist ::= stl_prefix LP select RP as on_opt using_opt */ + 254, /* (110) seltablist ::= stl_prefix LP seltablist RP as on_opt using_opt */ + 199, /* (111) dbnm ::= */ + 199, /* (112) dbnm ::= DOT nm */ + 236, /* (113) fullname ::= nm */ + 236, /* (114) fullname ::= nm DOT nm */ + 261, /* (115) xfullname ::= nm */ + 261, /* (116) xfullname ::= nm DOT nm */ + 261, /* (117) xfullname ::= nm DOT nm AS nm */ + 261, /* (118) xfullname ::= nm AS nm */ + 256, /* (119) joinop ::= COMMA|JOIN */ + 256, /* (120) joinop ::= JOIN_KW JOIN */ + 256, /* (121) joinop ::= JOIN_KW nm JOIN */ + 256, /* (122) joinop ::= JOIN_KW nm nm JOIN */ + 258, /* (123) on_opt ::= ON expr */ + 258, /* (124) on_opt ::= */ + 257, /* (125) indexed_opt ::= */ + 257, /* (126) indexed_opt ::= INDEXED BY nm */ + 257, /* (127) indexed_opt ::= NOT INDEXED */ + 259, /* (128) using_opt ::= USING LP idlist RP */ + 259, /* (129) using_opt ::= */ + 247, /* (130) orderby_opt ::= */ + 247, /* (131) orderby_opt ::= ORDER BY sortlist */ + 229, /* (132) sortlist ::= sortlist COMMA expr sortorder nulls */ + 229, /* (133) sortlist ::= expr sortorder nulls */ + 217, /* (134) sortorder ::= ASC */ + 217, /* (135) sortorder ::= DESC */ + 217, /* (136) sortorder ::= */ + 263, /* (137) nulls ::= NULLS FIRST */ + 263, /* (138) nulls ::= NULLS LAST */ + 263, /* (139) nulls ::= */ + 245, /* (140) groupby_opt ::= */ + 245, /* (141) groupby_opt ::= GROUP BY nexprlist */ + 246, /* (142) having_opt ::= */ + 246, /* (143) having_opt ::= HAVING expr */ + 248, /* (144) limit_opt ::= */ + 248, /* (145) limit_opt ::= LIMIT expr */ + 248, /* (146) limit_opt ::= LIMIT expr OFFSET expr */ + 248, /* (147) limit_opt ::= LIMIT expr COMMA expr */ + 189, /* (148) cmd ::= with DELETE FROM xfullname indexed_opt where_opt_ret orderby_opt limit_opt */ + 244, /* (149) where_opt ::= */ + 244, /* (150) where_opt ::= WHERE expr */ + 265, /* (151) where_opt_ret ::= */ + 265, /* (152) where_opt_ret ::= WHERE expr */ + 265, /* (153) where_opt_ret ::= RETURNING selcollist */ + 265, /* (154) where_opt_ret ::= WHERE expr RETURNING selcollist */ + 189, /* (155) cmd ::= with UPDATE orconf xfullname indexed_opt SET setlist from where_opt_ret orderby_opt limit_opt */ + 266, /* (156) setlist ::= setlist COMMA nm EQ expr */ + 266, /* (157) setlist ::= setlist COMMA LP idlist RP EQ expr */ + 266, /* (158) setlist ::= nm EQ expr */ + 266, /* (159) setlist ::= LP idlist RP EQ expr */ + 189, /* (160) cmd ::= with insert_cmd INTO xfullname idlist_opt select upsert */ + 189, /* (161) cmd ::= with insert_cmd INTO xfullname idlist_opt DEFAULT VALUES returning */ + 269, /* (162) upsert ::= */ + 269, /* (163) upsert ::= RETURNING selcollist */ + 269, /* (164) upsert ::= ON CONFLICT LP sortlist RP where_opt DO UPDATE SET setlist where_opt upsert */ + 269, /* (165) upsert ::= ON CONFLICT LP sortlist RP where_opt DO NOTHING upsert */ + 269, /* (166) upsert ::= ON CONFLICT DO NOTHING returning */ + 269, /* (167) upsert ::= ON CONFLICT DO UPDATE SET setlist where_opt returning */ + 270, /* (168) returning ::= RETURNING selcollist */ + 267, /* (169) insert_cmd ::= INSERT orconf */ + 267, /* (170) insert_cmd ::= REPLACE */ + 268, /* (171) idlist_opt ::= */ + 268, /* (172) idlist_opt ::= LP idlist RP */ + 262, /* (173) idlist ::= idlist COMMA nm */ + 262, /* (174) idlist ::= nm */ + 215, /* (175) expr ::= LP expr RP */ + 215, /* (176) expr ::= ID|INDEXED */ + 215, /* (177) expr ::= JOIN_KW */ + 215, /* (178) expr ::= nm DOT nm */ + 215, /* (179) expr ::= nm DOT nm DOT nm */ + 214, /* (180) term ::= NULL|FLOAT|BLOB */ + 214, /* (181) term ::= STRING */ + 214, /* (182) term ::= INTEGER */ + 215, /* (183) expr ::= VARIABLE */ + 215, /* (184) expr ::= expr COLLATE ID|STRING */ + 215, /* (185) expr ::= CAST LP expr AS typetoken RP */ + 215, /* (186) expr ::= ID|INDEXED LP distinct exprlist RP */ + 215, /* (187) expr ::= ID|INDEXED LP STAR RP */ + 215, /* (188) expr ::= ID|INDEXED LP distinct exprlist RP filter_over */ + 215, /* (189) expr ::= ID|INDEXED LP STAR RP filter_over */ + 214, /* (190) term ::= CTIME_KW */ + 215, /* (191) expr ::= LP nexprlist COMMA expr RP */ + 215, /* (192) expr ::= expr AND expr */ + 215, /* (193) expr ::= expr OR expr */ + 215, /* (194) expr ::= expr LT|GT|GE|LE expr */ + 215, /* (195) expr ::= expr EQ|NE expr */ + 215, /* (196) expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ + 215, /* (197) expr ::= expr PLUS|MINUS expr */ + 215, /* (198) expr ::= expr STAR|SLASH|REM expr */ + 215, /* (199) expr ::= expr CONCAT expr */ + 272, /* (200) likeop ::= NOT LIKE_KW|MATCH */ + 215, /* (201) expr ::= expr likeop expr */ + 215, /* (202) expr ::= expr likeop expr ESCAPE expr */ + 215, /* (203) expr ::= expr ISNULL|NOTNULL */ + 215, /* (204) expr ::= expr NOT NULL */ + 215, /* (205) expr ::= expr IS expr */ + 215, /* (206) expr ::= expr IS NOT expr */ + 215, /* (207) expr ::= NOT expr */ + 215, /* (208) expr ::= BITNOT expr */ + 215, /* (209) expr ::= PLUS|MINUS expr */ + 273, /* (210) between_op ::= BETWEEN */ + 273, /* (211) between_op ::= NOT BETWEEN */ + 215, /* (212) expr ::= expr between_op expr AND expr */ + 274, /* (213) in_op ::= IN */ + 274, /* (214) in_op ::= NOT IN */ + 215, /* (215) expr ::= expr in_op LP exprlist RP */ + 215, /* (216) expr ::= LP select RP */ + 215, /* (217) expr ::= expr in_op LP select RP */ + 215, /* (218) expr ::= expr in_op nm dbnm paren_exprlist */ + 215, /* (219) expr ::= EXISTS LP select RP */ + 215, /* (220) expr ::= CASE case_operand case_exprlist case_else END */ + 277, /* (221) case_exprlist ::= case_exprlist WHEN expr THEN expr */ + 277, /* (222) case_exprlist ::= WHEN expr THEN expr */ + 278, /* (223) case_else ::= ELSE expr */ + 278, /* (224) case_else ::= */ + 276, /* (225) case_operand ::= expr */ + 276, /* (226) case_operand ::= */ + 260, /* (227) exprlist ::= */ + 251, /* (228) nexprlist ::= nexprlist COMMA expr */ + 251, /* (229) nexprlist ::= expr */ + 275, /* (230) paren_exprlist ::= */ + 275, /* (231) paren_exprlist ::= LP exprlist RP */ + 189, /* (232) cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */ + 279, /* (233) uniqueflag ::= UNIQUE */ + 279, /* (234) uniqueflag ::= */ + 219, /* (235) eidlist_opt ::= */ + 219, /* (236) eidlist_opt ::= LP eidlist RP */ + 230, /* (237) eidlist ::= eidlist COMMA nm collate sortorder */ + 230, /* (238) eidlist ::= nm collate sortorder */ + 280, /* (239) collate ::= */ + 280, /* (240) collate ::= COLLATE ID|STRING */ + 189, /* (241) cmd ::= DROP INDEX ifexists fullname */ + 189, /* (242) cmd ::= VACUUM vinto */ + 189, /* (243) cmd ::= VACUUM nm vinto */ + 281, /* (244) vinto ::= INTO expr */ + 281, /* (245) vinto ::= */ + 189, /* (246) cmd ::= PRAGMA nm dbnm */ + 189, /* (247) cmd ::= PRAGMA nm dbnm EQ nmnum */ + 189, /* (248) cmd ::= PRAGMA nm dbnm LP nmnum RP */ + 189, /* (249) cmd ::= PRAGMA nm dbnm EQ minus_num */ + 189, /* (250) cmd ::= PRAGMA nm dbnm LP minus_num RP */ + 209, /* (251) plus_num ::= PLUS INTEGER|FLOAT */ + 210, /* (252) minus_num ::= MINUS INTEGER|FLOAT */ + 189, /* (253) cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */ + 283, /* (254) trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */ + 285, /* (255) trigger_time ::= BEFORE|AFTER */ + 285, /* (256) trigger_time ::= INSTEAD OF */ + 285, /* (257) trigger_time ::= */ + 286, /* (258) trigger_event ::= DELETE|INSERT */ + 286, /* (259) trigger_event ::= UPDATE */ + 286, /* (260) trigger_event ::= UPDATE OF idlist */ + 288, /* (261) when_clause ::= */ + 288, /* (262) when_clause ::= WHEN expr */ + 284, /* (263) trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */ + 284, /* (264) trigger_cmd_list ::= trigger_cmd SEMI */ + 290, /* (265) trnm ::= nm DOT nm */ + 291, /* (266) tridxby ::= INDEXED BY nm */ + 291, /* (267) tridxby ::= NOT INDEXED */ + 289, /* (268) trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */ + 289, /* (269) trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */ + 289, /* (270) trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */ + 289, /* (271) trigger_cmd ::= scanpt select scanpt */ + 215, /* (272) expr ::= RAISE LP IGNORE RP */ + 215, /* (273) expr ::= RAISE LP raisetype COMMA nm RP */ + 234, /* (274) raisetype ::= ROLLBACK */ + 234, /* (275) raisetype ::= ABORT */ + 234, /* (276) raisetype ::= FAIL */ + 189, /* (277) cmd ::= DROP TRIGGER ifexists fullname */ + 189, /* (278) cmd ::= ATTACH database_kw_opt expr AS expr key_opt */ + 189, /* (279) cmd ::= DETACH database_kw_opt expr */ + 293, /* (280) key_opt ::= */ + 293, /* (281) key_opt ::= KEY expr */ + 189, /* (282) cmd ::= REINDEX */ + 189, /* (283) cmd ::= REINDEX nm dbnm */ + 189, /* (284) cmd ::= ANALYZE */ + 189, /* (285) cmd ::= ANALYZE nm dbnm */ + 189, /* (286) cmd ::= ALTER TABLE fullname RENAME TO nm */ + 189, /* (287) cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */ + 189, /* (288) cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm */ + 294, /* (289) add_column_fullname ::= fullname */ + 189, /* (290) cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */ + 189, /* (291) cmd ::= create_vtab */ + 189, /* (292) cmd ::= create_vtab LP vtabarglist RP */ + 296, /* (293) create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */ + 298, /* (294) vtabarg ::= */ + 299, /* (295) vtabargtoken ::= ANY */ + 299, /* (296) vtabargtoken ::= lp anylist RP */ + 300, /* (297) lp ::= LP */ + 264, /* (298) with ::= WITH wqlist */ + 264, /* (299) with ::= WITH RECURSIVE wqlist */ + 303, /* (300) wqas ::= AS */ + 303, /* (301) wqas ::= AS MATERIALIZED */ + 303, /* (302) wqas ::= AS NOT MATERIALIZED */ + 302, /* (303) wqitem ::= nm eidlist_opt wqas LP select RP */ + 239, /* (304) wqlist ::= wqitem */ + 239, /* (305) wqlist ::= wqlist COMMA wqitem */ + 304, /* (306) windowdefn_list ::= windowdefn */ + 304, /* (307) windowdefn_list ::= windowdefn_list COMMA windowdefn */ + 305, /* (308) windowdefn ::= nm AS LP window RP */ + 306, /* (309) window ::= PARTITION BY nexprlist orderby_opt frame_opt */ + 306, /* (310) window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */ + 306, /* (311) window ::= ORDER BY sortlist frame_opt */ + 306, /* (312) window ::= nm ORDER BY sortlist frame_opt */ + 306, /* (313) window ::= frame_opt */ + 306, /* (314) window ::= nm frame_opt */ + 307, /* (315) frame_opt ::= */ + 307, /* (316) frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */ + 307, /* (317) frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */ + 311, /* (318) range_or_rows ::= RANGE|ROWS|GROUPS */ + 313, /* (319) frame_bound_s ::= frame_bound */ + 313, /* (320) frame_bound_s ::= UNBOUNDED PRECEDING */ + 314, /* (321) frame_bound_e ::= frame_bound */ + 314, /* (322) frame_bound_e ::= UNBOUNDED FOLLOWING */ + 312, /* (323) frame_bound ::= expr PRECEDING|FOLLOWING */ + 312, /* (324) frame_bound ::= CURRENT ROW */ + 315, /* (325) frame_exclude_opt ::= */ + 315, /* (326) frame_exclude_opt ::= EXCLUDE frame_exclude */ + 316, /* (327) frame_exclude ::= NO OTHERS */ + 316, /* (328) frame_exclude ::= CURRENT ROW */ + 316, /* (329) frame_exclude ::= GROUP|TIES */ + 249, /* (330) window_clause ::= WINDOW windowdefn_list */ + 271, /* (331) filter_over ::= filter_clause over_clause */ + 271, /* (332) filter_over ::= over_clause */ + 271, /* (333) filter_over ::= filter_clause */ + 310, /* (334) over_clause ::= OVER LP window RP */ + 310, /* (335) over_clause ::= OVER nm */ + 309, /* (336) filter_clause ::= FILTER LP WHERE expr RP */ + 184, /* (337) input ::= cmdlist */ + 185, /* (338) cmdlist ::= cmdlist ecmd */ + 185, /* (339) cmdlist ::= ecmd */ + 186, /* (340) ecmd ::= SEMI */ + 186, /* (341) ecmd ::= cmdx SEMI */ + 186, /* (342) ecmd ::= explain cmdx SEMI */ + 191, /* (343) trans_opt ::= */ + 191, /* (344) trans_opt ::= TRANSACTION */ + 191, /* (345) trans_opt ::= TRANSACTION nm */ + 193, /* (346) savepoint_opt ::= SAVEPOINT */ + 193, /* (347) savepoint_opt ::= */ + 189, /* (348) cmd ::= create_table create_table_args */ + 200, /* (349) columnlist ::= columnlist COMMA columnname carglist */ + 200, /* (350) columnlist ::= columnname carglist */ + 192, /* (351) nm ::= ID|INDEXED */ + 192, /* (352) nm ::= STRING */ + 192, /* (353) nm ::= JOIN_KW */ + 206, /* (354) typetoken ::= typename */ + 207, /* (355) typename ::= ID|STRING */ + 208, /* (356) signed ::= plus_num */ + 208, /* (357) signed ::= minus_num */ + 205, /* (358) carglist ::= carglist ccons */ + 205, /* (359) carglist ::= */ + 213, /* (360) ccons ::= NULL onconf */ + 213, /* (361) ccons ::= GENERATED ALWAYS AS generated */ + 213, /* (362) ccons ::= AS generated */ + 201, /* (363) conslist_opt ::= COMMA conslist */ + 226, /* (364) conslist ::= conslist tconscomma tcons */ + 226, /* (365) conslist ::= tcons */ + 227, /* (366) tconscomma ::= */ + 231, /* (367) defer_subclause_opt ::= defer_subclause */ + 233, /* (368) resolvetype ::= raisetype */ + 237, /* (369) selectnowith ::= oneselect */ + 238, /* (370) oneselect ::= values */ + 252, /* (371) sclp ::= selcollist COMMA */ + 253, /* (372) as ::= ID|STRING */ + 270, /* (373) returning ::= */ + 215, /* (374) expr ::= term */ + 272, /* (375) likeop ::= LIKE_KW|MATCH */ + 260, /* (376) exprlist ::= nexprlist */ + 282, /* (377) nmnum ::= plus_num */ + 282, /* (378) nmnum ::= nm */ + 282, /* (379) nmnum ::= ON */ + 282, /* (380) nmnum ::= DELETE */ + 282, /* (381) nmnum ::= DEFAULT */ + 209, /* (382) plus_num ::= INTEGER|FLOAT */ + 287, /* (383) foreach_clause ::= */ + 287, /* (384) foreach_clause ::= FOR EACH ROW */ + 290, /* (385) trnm ::= nm */ + 291, /* (386) tridxby ::= */ + 292, /* (387) database_kw_opt ::= DATABASE */ + 292, /* (388) database_kw_opt ::= */ + 295, /* (389) kwcolumn_opt ::= */ + 295, /* (390) kwcolumn_opt ::= COLUMNKW */ + 297, /* (391) vtabarglist ::= vtabarg */ + 297, /* (392) vtabarglist ::= vtabarglist COMMA vtabarg */ + 298, /* (393) vtabarg ::= vtabarg vtabargtoken */ + 301, /* (394) anylist ::= */ + 301, /* (395) anylist ::= anylist LP anylist RP */ + 301, /* (396) anylist ::= anylist ANY */ + 264, /* (397) with ::= */ }; /* For rule J, yyRuleInfoNRhs[J] contains the negative of the number @@ -157214,243 +161257,256 @@ static const signed char yyRuleInfoNRhs[] = { -2, /* (145) limit_opt ::= LIMIT expr */ -4, /* (146) limit_opt ::= LIMIT expr OFFSET expr */ -4, /* (147) limit_opt ::= LIMIT expr COMMA expr */ - -8, /* (148) cmd ::= with DELETE FROM xfullname indexed_opt where_opt orderby_opt limit_opt */ + -8, /* (148) cmd ::= with DELETE FROM xfullname indexed_opt where_opt_ret orderby_opt limit_opt */ 0, /* (149) where_opt ::= */ -2, /* (150) where_opt ::= WHERE expr */ - -11, /* (151) cmd ::= with UPDATE orconf xfullname indexed_opt SET setlist from where_opt orderby_opt limit_opt */ - -5, /* (152) setlist ::= setlist COMMA nm EQ expr */ - -7, /* (153) setlist ::= setlist COMMA LP idlist RP EQ expr */ - -3, /* (154) setlist ::= nm EQ expr */ - -5, /* (155) setlist ::= LP idlist RP EQ expr */ - -7, /* (156) cmd ::= with insert_cmd INTO xfullname idlist_opt select upsert */ - -7, /* (157) cmd ::= with insert_cmd INTO xfullname idlist_opt DEFAULT VALUES */ - 0, /* (158) upsert ::= */ - -11, /* (159) upsert ::= ON CONFLICT LP sortlist RP where_opt DO UPDATE SET setlist where_opt */ - -8, /* (160) upsert ::= ON CONFLICT LP sortlist RP where_opt DO NOTHING */ - -4, /* (161) upsert ::= ON CONFLICT DO NOTHING */ - -2, /* (162) insert_cmd ::= INSERT orconf */ - -1, /* (163) insert_cmd ::= REPLACE */ - 0, /* (164) idlist_opt ::= */ - -3, /* (165) idlist_opt ::= LP idlist RP */ - -3, /* (166) idlist ::= idlist COMMA nm */ - -1, /* (167) idlist ::= nm */ - -3, /* (168) expr ::= LP expr RP */ - -1, /* (169) expr ::= ID|INDEXED */ - -1, /* (170) expr ::= JOIN_KW */ - -3, /* (171) expr ::= nm DOT nm */ - -5, /* (172) expr ::= nm DOT nm DOT nm */ - -1, /* (173) term ::= NULL|FLOAT|BLOB */ - -1, /* (174) term ::= STRING */ - -1, /* (175) term ::= INTEGER */ - -1, /* (176) expr ::= VARIABLE */ - -3, /* (177) expr ::= expr COLLATE ID|STRING */ - -6, /* (178) expr ::= CAST LP expr AS typetoken RP */ - -5, /* (179) expr ::= ID|INDEXED LP distinct exprlist RP */ - -4, /* (180) expr ::= ID|INDEXED LP STAR RP */ - -6, /* (181) expr ::= ID|INDEXED LP distinct exprlist RP filter_over */ - -5, /* (182) expr ::= ID|INDEXED LP STAR RP filter_over */ - -1, /* (183) term ::= CTIME_KW */ - -5, /* (184) expr ::= LP nexprlist COMMA expr RP */ - -3, /* (185) expr ::= expr AND expr */ - -3, /* (186) expr ::= expr OR expr */ - -3, /* (187) expr ::= expr LT|GT|GE|LE expr */ - -3, /* (188) expr ::= expr EQ|NE expr */ - -3, /* (189) expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ - -3, /* (190) expr ::= expr PLUS|MINUS expr */ - -3, /* (191) expr ::= expr STAR|SLASH|REM expr */ - -3, /* (192) expr ::= expr CONCAT expr */ - -2, /* (193) likeop ::= NOT LIKE_KW|MATCH */ - -3, /* (194) expr ::= expr likeop expr */ - -5, /* (195) expr ::= expr likeop expr ESCAPE expr */ - -2, /* (196) expr ::= expr ISNULL|NOTNULL */ - -3, /* (197) expr ::= expr NOT NULL */ - -3, /* (198) expr ::= expr IS expr */ - -4, /* (199) expr ::= expr IS NOT expr */ - -2, /* (200) expr ::= NOT expr */ - -2, /* (201) expr ::= BITNOT expr */ - -2, /* (202) expr ::= PLUS|MINUS expr */ - -1, /* (203) between_op ::= BETWEEN */ - -2, /* (204) between_op ::= NOT BETWEEN */ - -5, /* (205) expr ::= expr between_op expr AND expr */ - -1, /* (206) in_op ::= IN */ - -2, /* (207) in_op ::= NOT IN */ - -5, /* (208) expr ::= expr in_op LP exprlist RP */ - -3, /* (209) expr ::= LP select RP */ - -5, /* (210) expr ::= expr in_op LP select RP */ - -5, /* (211) expr ::= expr in_op nm dbnm paren_exprlist */ - -4, /* (212) expr ::= EXISTS LP select RP */ - -5, /* (213) expr ::= CASE case_operand case_exprlist case_else END */ - -5, /* (214) case_exprlist ::= case_exprlist WHEN expr THEN expr */ - -4, /* (215) case_exprlist ::= WHEN expr THEN expr */ - -2, /* (216) case_else ::= ELSE expr */ - 0, /* (217) case_else ::= */ - -1, /* (218) case_operand ::= expr */ - 0, /* (219) case_operand ::= */ - 0, /* (220) exprlist ::= */ - -3, /* (221) nexprlist ::= nexprlist COMMA expr */ - -1, /* (222) nexprlist ::= expr */ - 0, /* (223) paren_exprlist ::= */ - -3, /* (224) paren_exprlist ::= LP exprlist RP */ - -12, /* (225) cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */ - -1, /* (226) uniqueflag ::= UNIQUE */ - 0, /* (227) uniqueflag ::= */ - 0, /* (228) eidlist_opt ::= */ - -3, /* (229) eidlist_opt ::= LP eidlist RP */ - -5, /* (230) eidlist ::= eidlist COMMA nm collate sortorder */ - -3, /* (231) eidlist ::= nm collate sortorder */ - 0, /* (232) collate ::= */ - -2, /* (233) collate ::= COLLATE ID|STRING */ - -4, /* (234) cmd ::= DROP INDEX ifexists fullname */ - -2, /* (235) cmd ::= VACUUM vinto */ - -3, /* (236) cmd ::= VACUUM nm vinto */ - -2, /* (237) vinto ::= INTO expr */ - 0, /* (238) vinto ::= */ - -3, /* (239) cmd ::= PRAGMA nm dbnm */ - -5, /* (240) cmd ::= PRAGMA nm dbnm EQ nmnum */ - -6, /* (241) cmd ::= PRAGMA nm dbnm LP nmnum RP */ - -5, /* (242) cmd ::= PRAGMA nm dbnm EQ minus_num */ - -6, /* (243) cmd ::= PRAGMA nm dbnm LP minus_num RP */ - -2, /* (244) plus_num ::= PLUS INTEGER|FLOAT */ - -2, /* (245) minus_num ::= MINUS INTEGER|FLOAT */ - -5, /* (246) cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */ - -11, /* (247) trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */ - -1, /* (248) trigger_time ::= BEFORE|AFTER */ - -2, /* (249) trigger_time ::= INSTEAD OF */ - 0, /* (250) trigger_time ::= */ - -1, /* (251) trigger_event ::= DELETE|INSERT */ - -1, /* (252) trigger_event ::= UPDATE */ - -3, /* (253) trigger_event ::= UPDATE OF idlist */ - 0, /* (254) when_clause ::= */ - -2, /* (255) when_clause ::= WHEN expr */ - -3, /* (256) trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */ - -2, /* (257) trigger_cmd_list ::= trigger_cmd SEMI */ - -3, /* (258) trnm ::= nm DOT nm */ - -3, /* (259) tridxby ::= INDEXED BY nm */ - -2, /* (260) tridxby ::= NOT INDEXED */ - -9, /* (261) trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */ - -8, /* (262) trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */ - -6, /* (263) trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */ - -3, /* (264) trigger_cmd ::= scanpt select scanpt */ - -4, /* (265) expr ::= RAISE LP IGNORE RP */ - -6, /* (266) expr ::= RAISE LP raisetype COMMA nm RP */ - -1, /* (267) raisetype ::= ROLLBACK */ - -1, /* (268) raisetype ::= ABORT */ - -1, /* (269) raisetype ::= FAIL */ - -4, /* (270) cmd ::= DROP TRIGGER ifexists fullname */ - -6, /* (271) cmd ::= ATTACH database_kw_opt expr AS expr key_opt */ - -3, /* (272) cmd ::= DETACH database_kw_opt expr */ - 0, /* (273) key_opt ::= */ - -2, /* (274) key_opt ::= KEY expr */ - -1, /* (275) cmd ::= REINDEX */ - -3, /* (276) cmd ::= REINDEX nm dbnm */ - -1, /* (277) cmd ::= ANALYZE */ - -3, /* (278) cmd ::= ANALYZE nm dbnm */ - -6, /* (279) cmd ::= ALTER TABLE fullname RENAME TO nm */ - -7, /* (280) cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */ - -1, /* (281) add_column_fullname ::= fullname */ - -8, /* (282) cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */ - -1, /* (283) cmd ::= create_vtab */ - -4, /* (284) cmd ::= create_vtab LP vtabarglist RP */ - -8, /* (285) create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */ - 0, /* (286) vtabarg ::= */ - -1, /* (287) vtabargtoken ::= ANY */ - -3, /* (288) vtabargtoken ::= lp anylist RP */ - -1, /* (289) lp ::= LP */ - -2, /* (290) with ::= WITH wqlist */ - -3, /* (291) with ::= WITH RECURSIVE wqlist */ - -6, /* (292) wqlist ::= nm eidlist_opt AS LP select RP */ - -8, /* (293) wqlist ::= wqlist COMMA nm eidlist_opt AS LP select RP */ - -1, /* (294) windowdefn_list ::= windowdefn */ - -3, /* (295) windowdefn_list ::= windowdefn_list COMMA windowdefn */ - -5, /* (296) windowdefn ::= nm AS LP window RP */ - -5, /* (297) window ::= PARTITION BY nexprlist orderby_opt frame_opt */ - -6, /* (298) window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */ - -4, /* (299) window ::= ORDER BY sortlist frame_opt */ - -5, /* (300) window ::= nm ORDER BY sortlist frame_opt */ - -1, /* (301) window ::= frame_opt */ - -2, /* (302) window ::= nm frame_opt */ - 0, /* (303) frame_opt ::= */ - -3, /* (304) frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */ - -6, /* (305) frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */ - -1, /* (306) range_or_rows ::= RANGE|ROWS|GROUPS */ - -1, /* (307) frame_bound_s ::= frame_bound */ - -2, /* (308) frame_bound_s ::= UNBOUNDED PRECEDING */ - -1, /* (309) frame_bound_e ::= frame_bound */ - -2, /* (310) frame_bound_e ::= UNBOUNDED FOLLOWING */ - -2, /* (311) frame_bound ::= expr PRECEDING|FOLLOWING */ - -2, /* (312) frame_bound ::= CURRENT ROW */ - 0, /* (313) frame_exclude_opt ::= */ - -2, /* (314) frame_exclude_opt ::= EXCLUDE frame_exclude */ - -2, /* (315) frame_exclude ::= NO OTHERS */ - -2, /* (316) frame_exclude ::= CURRENT ROW */ - -1, /* (317) frame_exclude ::= GROUP|TIES */ - -2, /* (318) window_clause ::= WINDOW windowdefn_list */ - -2, /* (319) filter_over ::= filter_clause over_clause */ - -1, /* (320) filter_over ::= over_clause */ - -1, /* (321) filter_over ::= filter_clause */ - -4, /* (322) over_clause ::= OVER LP window RP */ - -2, /* (323) over_clause ::= OVER nm */ - -5, /* (324) filter_clause ::= FILTER LP WHERE expr RP */ - -1, /* (325) input ::= cmdlist */ - -2, /* (326) cmdlist ::= cmdlist ecmd */ - -1, /* (327) cmdlist ::= ecmd */ - -1, /* (328) ecmd ::= SEMI */ - -2, /* (329) ecmd ::= cmdx SEMI */ - -3, /* (330) ecmd ::= explain cmdx SEMI */ - 0, /* (331) trans_opt ::= */ - -1, /* (332) trans_opt ::= TRANSACTION */ - -2, /* (333) trans_opt ::= TRANSACTION nm */ - -1, /* (334) savepoint_opt ::= SAVEPOINT */ - 0, /* (335) savepoint_opt ::= */ - -2, /* (336) cmd ::= create_table create_table_args */ - -4, /* (337) columnlist ::= columnlist COMMA columnname carglist */ - -2, /* (338) columnlist ::= columnname carglist */ - -1, /* (339) nm ::= ID|INDEXED */ - -1, /* (340) nm ::= STRING */ - -1, /* (341) nm ::= JOIN_KW */ - -1, /* (342) typetoken ::= typename */ - -1, /* (343) typename ::= ID|STRING */ - -1, /* (344) signed ::= plus_num */ - -1, /* (345) signed ::= minus_num */ - -2, /* (346) carglist ::= carglist ccons */ - 0, /* (347) carglist ::= */ - -2, /* (348) ccons ::= NULL onconf */ - -4, /* (349) ccons ::= GENERATED ALWAYS AS generated */ - -2, /* (350) ccons ::= AS generated */ - -2, /* (351) conslist_opt ::= COMMA conslist */ - -3, /* (352) conslist ::= conslist tconscomma tcons */ - -1, /* (353) conslist ::= tcons */ - 0, /* (354) tconscomma ::= */ - -1, /* (355) defer_subclause_opt ::= defer_subclause */ - -1, /* (356) resolvetype ::= raisetype */ - -1, /* (357) selectnowith ::= oneselect */ - -1, /* (358) oneselect ::= values */ - -2, /* (359) sclp ::= selcollist COMMA */ - -1, /* (360) as ::= ID|STRING */ - -1, /* (361) expr ::= term */ - -1, /* (362) likeop ::= LIKE_KW|MATCH */ - -1, /* (363) exprlist ::= nexprlist */ - -1, /* (364) nmnum ::= plus_num */ - -1, /* (365) nmnum ::= nm */ - -1, /* (366) nmnum ::= ON */ - -1, /* (367) nmnum ::= DELETE */ - -1, /* (368) nmnum ::= DEFAULT */ - -1, /* (369) plus_num ::= INTEGER|FLOAT */ - 0, /* (370) foreach_clause ::= */ - -3, /* (371) foreach_clause ::= FOR EACH ROW */ - -1, /* (372) trnm ::= nm */ - 0, /* (373) tridxby ::= */ - -1, /* (374) database_kw_opt ::= DATABASE */ - 0, /* (375) database_kw_opt ::= */ - 0, /* (376) kwcolumn_opt ::= */ - -1, /* (377) kwcolumn_opt ::= COLUMNKW */ - -1, /* (378) vtabarglist ::= vtabarg */ - -3, /* (379) vtabarglist ::= vtabarglist COMMA vtabarg */ - -2, /* (380) vtabarg ::= vtabarg vtabargtoken */ - 0, /* (381) anylist ::= */ - -4, /* (382) anylist ::= anylist LP anylist RP */ - -2, /* (383) anylist ::= anylist ANY */ - 0, /* (384) with ::= */ + 0, /* (151) where_opt_ret ::= */ + -2, /* (152) where_opt_ret ::= WHERE expr */ + -2, /* (153) where_opt_ret ::= RETURNING selcollist */ + -4, /* (154) where_opt_ret ::= WHERE expr RETURNING selcollist */ + -11, /* (155) cmd ::= with UPDATE orconf xfullname indexed_opt SET setlist from where_opt_ret orderby_opt limit_opt */ + -5, /* (156) setlist ::= setlist COMMA nm EQ expr */ + -7, /* (157) setlist ::= setlist COMMA LP idlist RP EQ expr */ + -3, /* (158) setlist ::= nm EQ expr */ + -5, /* (159) setlist ::= LP idlist RP EQ expr */ + -7, /* (160) cmd ::= with insert_cmd INTO xfullname idlist_opt select upsert */ + -8, /* (161) cmd ::= with insert_cmd INTO xfullname idlist_opt DEFAULT VALUES returning */ + 0, /* (162) upsert ::= */ + -2, /* (163) upsert ::= RETURNING selcollist */ + -12, /* (164) upsert ::= ON CONFLICT LP sortlist RP where_opt DO UPDATE SET setlist where_opt upsert */ + -9, /* (165) upsert ::= ON CONFLICT LP sortlist RP where_opt DO NOTHING upsert */ + -5, /* (166) upsert ::= ON CONFLICT DO NOTHING returning */ + -8, /* (167) upsert ::= ON CONFLICT DO UPDATE SET setlist where_opt returning */ + -2, /* (168) returning ::= RETURNING selcollist */ + -2, /* (169) insert_cmd ::= INSERT orconf */ + -1, /* (170) insert_cmd ::= REPLACE */ + 0, /* (171) idlist_opt ::= */ + -3, /* (172) idlist_opt ::= LP idlist RP */ + -3, /* (173) idlist ::= idlist COMMA nm */ + -1, /* (174) idlist ::= nm */ + -3, /* (175) expr ::= LP expr RP */ + -1, /* (176) expr ::= ID|INDEXED */ + -1, /* (177) expr ::= JOIN_KW */ + -3, /* (178) expr ::= nm DOT nm */ + -5, /* (179) expr ::= nm DOT nm DOT nm */ + -1, /* (180) term ::= NULL|FLOAT|BLOB */ + -1, /* (181) term ::= STRING */ + -1, /* (182) term ::= INTEGER */ + -1, /* (183) expr ::= VARIABLE */ + -3, /* (184) expr ::= expr COLLATE ID|STRING */ + -6, /* (185) expr ::= CAST LP expr AS typetoken RP */ + -5, /* (186) expr ::= ID|INDEXED LP distinct exprlist RP */ + -4, /* (187) expr ::= ID|INDEXED LP STAR RP */ + -6, /* (188) expr ::= ID|INDEXED LP distinct exprlist RP filter_over */ + -5, /* (189) expr ::= ID|INDEXED LP STAR RP filter_over */ + -1, /* (190) term ::= CTIME_KW */ + -5, /* (191) expr ::= LP nexprlist COMMA expr RP */ + -3, /* (192) expr ::= expr AND expr */ + -3, /* (193) expr ::= expr OR expr */ + -3, /* (194) expr ::= expr LT|GT|GE|LE expr */ + -3, /* (195) expr ::= expr EQ|NE expr */ + -3, /* (196) expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ + -3, /* (197) expr ::= expr PLUS|MINUS expr */ + -3, /* (198) expr ::= expr STAR|SLASH|REM expr */ + -3, /* (199) expr ::= expr CONCAT expr */ + -2, /* (200) likeop ::= NOT LIKE_KW|MATCH */ + -3, /* (201) expr ::= expr likeop expr */ + -5, /* (202) expr ::= expr likeop expr ESCAPE expr */ + -2, /* (203) expr ::= expr ISNULL|NOTNULL */ + -3, /* (204) expr ::= expr NOT NULL */ + -3, /* (205) expr ::= expr IS expr */ + -4, /* (206) expr ::= expr IS NOT expr */ + -2, /* (207) expr ::= NOT expr */ + -2, /* (208) expr ::= BITNOT expr */ + -2, /* (209) expr ::= PLUS|MINUS expr */ + -1, /* (210) between_op ::= BETWEEN */ + -2, /* (211) between_op ::= NOT BETWEEN */ + -5, /* (212) expr ::= expr between_op expr AND expr */ + -1, /* (213) in_op ::= IN */ + -2, /* (214) in_op ::= NOT IN */ + -5, /* (215) expr ::= expr in_op LP exprlist RP */ + -3, /* (216) expr ::= LP select RP */ + -5, /* (217) expr ::= expr in_op LP select RP */ + -5, /* (218) expr ::= expr in_op nm dbnm paren_exprlist */ + -4, /* (219) expr ::= EXISTS LP select RP */ + -5, /* (220) expr ::= CASE case_operand case_exprlist case_else END */ + -5, /* (221) case_exprlist ::= case_exprlist WHEN expr THEN expr */ + -4, /* (222) case_exprlist ::= WHEN expr THEN expr */ + -2, /* (223) case_else ::= ELSE expr */ + 0, /* (224) case_else ::= */ + -1, /* (225) case_operand ::= expr */ + 0, /* (226) case_operand ::= */ + 0, /* (227) exprlist ::= */ + -3, /* (228) nexprlist ::= nexprlist COMMA expr */ + -1, /* (229) nexprlist ::= expr */ + 0, /* (230) paren_exprlist ::= */ + -3, /* (231) paren_exprlist ::= LP exprlist RP */ + -12, /* (232) cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */ + -1, /* (233) uniqueflag ::= UNIQUE */ + 0, /* (234) uniqueflag ::= */ + 0, /* (235) eidlist_opt ::= */ + -3, /* (236) eidlist_opt ::= LP eidlist RP */ + -5, /* (237) eidlist ::= eidlist COMMA nm collate sortorder */ + -3, /* (238) eidlist ::= nm collate sortorder */ + 0, /* (239) collate ::= */ + -2, /* (240) collate ::= COLLATE ID|STRING */ + -4, /* (241) cmd ::= DROP INDEX ifexists fullname */ + -2, /* (242) cmd ::= VACUUM vinto */ + -3, /* (243) cmd ::= VACUUM nm vinto */ + -2, /* (244) vinto ::= INTO expr */ + 0, /* (245) vinto ::= */ + -3, /* (246) cmd ::= PRAGMA nm dbnm */ + -5, /* (247) cmd ::= PRAGMA nm dbnm EQ nmnum */ + -6, /* (248) cmd ::= PRAGMA nm dbnm LP nmnum RP */ + -5, /* (249) cmd ::= PRAGMA nm dbnm EQ minus_num */ + -6, /* (250) cmd ::= PRAGMA nm dbnm LP minus_num RP */ + -2, /* (251) plus_num ::= PLUS INTEGER|FLOAT */ + -2, /* (252) minus_num ::= MINUS INTEGER|FLOAT */ + -5, /* (253) cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */ + -11, /* (254) trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */ + -1, /* (255) trigger_time ::= BEFORE|AFTER */ + -2, /* (256) trigger_time ::= INSTEAD OF */ + 0, /* (257) trigger_time ::= */ + -1, /* (258) trigger_event ::= DELETE|INSERT */ + -1, /* (259) trigger_event ::= UPDATE */ + -3, /* (260) trigger_event ::= UPDATE OF idlist */ + 0, /* (261) when_clause ::= */ + -2, /* (262) when_clause ::= WHEN expr */ + -3, /* (263) trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */ + -2, /* (264) trigger_cmd_list ::= trigger_cmd SEMI */ + -3, /* (265) trnm ::= nm DOT nm */ + -3, /* (266) tridxby ::= INDEXED BY nm */ + -2, /* (267) tridxby ::= NOT INDEXED */ + -9, /* (268) trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */ + -8, /* (269) trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */ + -6, /* (270) trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */ + -3, /* (271) trigger_cmd ::= scanpt select scanpt */ + -4, /* (272) expr ::= RAISE LP IGNORE RP */ + -6, /* (273) expr ::= RAISE LP raisetype COMMA nm RP */ + -1, /* (274) raisetype ::= ROLLBACK */ + -1, /* (275) raisetype ::= ABORT */ + -1, /* (276) raisetype ::= FAIL */ + -4, /* (277) cmd ::= DROP TRIGGER ifexists fullname */ + -6, /* (278) cmd ::= ATTACH database_kw_opt expr AS expr key_opt */ + -3, /* (279) cmd ::= DETACH database_kw_opt expr */ + 0, /* (280) key_opt ::= */ + -2, /* (281) key_opt ::= KEY expr */ + -1, /* (282) cmd ::= REINDEX */ + -3, /* (283) cmd ::= REINDEX nm dbnm */ + -1, /* (284) cmd ::= ANALYZE */ + -3, /* (285) cmd ::= ANALYZE nm dbnm */ + -6, /* (286) cmd ::= ALTER TABLE fullname RENAME TO nm */ + -7, /* (287) cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */ + -6, /* (288) cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm */ + -1, /* (289) add_column_fullname ::= fullname */ + -8, /* (290) cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */ + -1, /* (291) cmd ::= create_vtab */ + -4, /* (292) cmd ::= create_vtab LP vtabarglist RP */ + -8, /* (293) create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */ + 0, /* (294) vtabarg ::= */ + -1, /* (295) vtabargtoken ::= ANY */ + -3, /* (296) vtabargtoken ::= lp anylist RP */ + -1, /* (297) lp ::= LP */ + -2, /* (298) with ::= WITH wqlist */ + -3, /* (299) with ::= WITH RECURSIVE wqlist */ + -1, /* (300) wqas ::= AS */ + -2, /* (301) wqas ::= AS MATERIALIZED */ + -3, /* (302) wqas ::= AS NOT MATERIALIZED */ + -6, /* (303) wqitem ::= nm eidlist_opt wqas LP select RP */ + -1, /* (304) wqlist ::= wqitem */ + -3, /* (305) wqlist ::= wqlist COMMA wqitem */ + -1, /* (306) windowdefn_list ::= windowdefn */ + -3, /* (307) windowdefn_list ::= windowdefn_list COMMA windowdefn */ + -5, /* (308) windowdefn ::= nm AS LP window RP */ + -5, /* (309) window ::= PARTITION BY nexprlist orderby_opt frame_opt */ + -6, /* (310) window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */ + -4, /* (311) window ::= ORDER BY sortlist frame_opt */ + -5, /* (312) window ::= nm ORDER BY sortlist frame_opt */ + -1, /* (313) window ::= frame_opt */ + -2, /* (314) window ::= nm frame_opt */ + 0, /* (315) frame_opt ::= */ + -3, /* (316) frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */ + -6, /* (317) frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */ + -1, /* (318) range_or_rows ::= RANGE|ROWS|GROUPS */ + -1, /* (319) frame_bound_s ::= frame_bound */ + -2, /* (320) frame_bound_s ::= UNBOUNDED PRECEDING */ + -1, /* (321) frame_bound_e ::= frame_bound */ + -2, /* (322) frame_bound_e ::= UNBOUNDED FOLLOWING */ + -2, /* (323) frame_bound ::= expr PRECEDING|FOLLOWING */ + -2, /* (324) frame_bound ::= CURRENT ROW */ + 0, /* (325) frame_exclude_opt ::= */ + -2, /* (326) frame_exclude_opt ::= EXCLUDE frame_exclude */ + -2, /* (327) frame_exclude ::= NO OTHERS */ + -2, /* (328) frame_exclude ::= CURRENT ROW */ + -1, /* (329) frame_exclude ::= GROUP|TIES */ + -2, /* (330) window_clause ::= WINDOW windowdefn_list */ + -2, /* (331) filter_over ::= filter_clause over_clause */ + -1, /* (332) filter_over ::= over_clause */ + -1, /* (333) filter_over ::= filter_clause */ + -4, /* (334) over_clause ::= OVER LP window RP */ + -2, /* (335) over_clause ::= OVER nm */ + -5, /* (336) filter_clause ::= FILTER LP WHERE expr RP */ + -1, /* (337) input ::= cmdlist */ + -2, /* (338) cmdlist ::= cmdlist ecmd */ + -1, /* (339) cmdlist ::= ecmd */ + -1, /* (340) ecmd ::= SEMI */ + -2, /* (341) ecmd ::= cmdx SEMI */ + -3, /* (342) ecmd ::= explain cmdx SEMI */ + 0, /* (343) trans_opt ::= */ + -1, /* (344) trans_opt ::= TRANSACTION */ + -2, /* (345) trans_opt ::= TRANSACTION nm */ + -1, /* (346) savepoint_opt ::= SAVEPOINT */ + 0, /* (347) savepoint_opt ::= */ + -2, /* (348) cmd ::= create_table create_table_args */ + -4, /* (349) columnlist ::= columnlist COMMA columnname carglist */ + -2, /* (350) columnlist ::= columnname carglist */ + -1, /* (351) nm ::= ID|INDEXED */ + -1, /* (352) nm ::= STRING */ + -1, /* (353) nm ::= JOIN_KW */ + -1, /* (354) typetoken ::= typename */ + -1, /* (355) typename ::= ID|STRING */ + -1, /* (356) signed ::= plus_num */ + -1, /* (357) signed ::= minus_num */ + -2, /* (358) carglist ::= carglist ccons */ + 0, /* (359) carglist ::= */ + -2, /* (360) ccons ::= NULL onconf */ + -4, /* (361) ccons ::= GENERATED ALWAYS AS generated */ + -2, /* (362) ccons ::= AS generated */ + -2, /* (363) conslist_opt ::= COMMA conslist */ + -3, /* (364) conslist ::= conslist tconscomma tcons */ + -1, /* (365) conslist ::= tcons */ + 0, /* (366) tconscomma ::= */ + -1, /* (367) defer_subclause_opt ::= defer_subclause */ + -1, /* (368) resolvetype ::= raisetype */ + -1, /* (369) selectnowith ::= oneselect */ + -1, /* (370) oneselect ::= values */ + -2, /* (371) sclp ::= selcollist COMMA */ + -1, /* (372) as ::= ID|STRING */ + 0, /* (373) returning ::= */ + -1, /* (374) expr ::= term */ + -1, /* (375) likeop ::= LIKE_KW|MATCH */ + -1, /* (376) exprlist ::= nexprlist */ + -1, /* (377) nmnum ::= plus_num */ + -1, /* (378) nmnum ::= nm */ + -1, /* (379) nmnum ::= ON */ + -1, /* (380) nmnum ::= DELETE */ + -1, /* (381) nmnum ::= DEFAULT */ + -1, /* (382) plus_num ::= INTEGER|FLOAT */ + 0, /* (383) foreach_clause ::= */ + -3, /* (384) foreach_clause ::= FOR EACH ROW */ + -1, /* (385) trnm ::= nm */ + 0, /* (386) tridxby ::= */ + -1, /* (387) database_kw_opt ::= DATABASE */ + 0, /* (388) database_kw_opt ::= */ + 0, /* (389) kwcolumn_opt ::= */ + -1, /* (390) kwcolumn_opt ::= COLUMNKW */ + -1, /* (391) vtabarglist ::= vtabarg */ + -3, /* (392) vtabarglist ::= vtabarglist COMMA vtabarg */ + -2, /* (393) vtabarg ::= vtabarg vtabargtoken */ + 0, /* (394) anylist ::= */ + -4, /* (395) anylist ::= anylist LP anylist RP */ + -2, /* (396) anylist ::= anylist ANY */ + 0, /* (397) with ::= */ }; static void yy_accept(yyParser*); /* Forward Declaration */ @@ -157480,54 +161536,6 @@ static YYACTIONTYPE yy_reduce( (void)yyLookahead; (void)yyLookaheadToken; yymsp = yypParser->yytos; -#ifndef NDEBUG - if( yyTraceFILE && yyruleno<(int)(sizeof(yyRuleName)/sizeof(yyRuleName[0])) ){ - yysize = yyRuleInfoNRhs[yyruleno]; - if( yysize ){ - fprintf(yyTraceFILE, "%sReduce %d [%s]%s, pop back to state %d.\n", - yyTracePrompt, - yyruleno, yyRuleName[yyruleno], - yyrulenoyytos - yypParser->yystack)>yypParser->yyhwm ){ - yypParser->yyhwm++; - assert( yypParser->yyhwm == (int)(yypParser->yytos - yypParser->yystack)); - } -#endif -#if YYSTACKDEPTH>0 - if( yypParser->yytos>=yypParser->yystackEnd ){ - yyStackOverflow(yypParser); - /* The call to yyStackOverflow() above pops the stack until it is - ** empty, causing the main parser loop to exit. So the return value - ** is never used and does not matter. */ - return 0; - } -#else - if( yypParser->yytos>=&yypParser->yystack[yypParser->yystksz-1] ){ - if( yyGrowStack(yypParser) ){ - yyStackOverflow(yypParser); - /* The call to yyStackOverflow() above pops the stack until it is - ** empty, causing the main parser loop to exit. So the return value - ** is never used and does not matter. */ - return 0; - } - yymsp = yypParser->yytos; - } -#endif - } switch( yyruleno ){ /* Beginning here are the reduction cases. A typical example @@ -157550,16 +161558,16 @@ static YYACTIONTYPE yy_reduce( { sqlite3FinishCoding(pParse); } break; case 3: /* cmd ::= BEGIN transtype trans_opt */ -{sqlite3BeginTransaction(pParse, yymsp[-1].minor.yy192);} +{sqlite3BeginTransaction(pParse, yymsp[-1].minor.yy376);} break; case 4: /* transtype ::= */ -{yymsp[1].minor.yy192 = TK_DEFERRED;} +{yymsp[1].minor.yy376 = TK_DEFERRED;} break; case 5: /* transtype ::= DEFERRED */ case 6: /* transtype ::= IMMEDIATE */ yytestcase(yyruleno==6); case 7: /* transtype ::= EXCLUSIVE */ yytestcase(yyruleno==7); - case 306: /* range_or_rows ::= RANGE|ROWS|GROUPS */ yytestcase(yyruleno==306); -{yymsp[0].minor.yy192 = yymsp[0].major; /*A-overwrites-X*/} + case 318: /* range_or_rows ::= RANGE|ROWS|GROUPS */ yytestcase(yyruleno==318); +{yymsp[0].minor.yy376 = yymsp[0].major; /*A-overwrites-X*/} break; case 8: /* cmd ::= COMMIT|END trans_opt */ case 9: /* cmd ::= ROLLBACK trans_opt */ yytestcase(yyruleno==9); @@ -157582,7 +161590,7 @@ static YYACTIONTYPE yy_reduce( break; case 13: /* create_table ::= createkw temp TABLE ifnotexists nm dbnm */ { - sqlite3StartTable(pParse,&yymsp[-1].minor.yy0,&yymsp[0].minor.yy0,yymsp[-4].minor.yy192,0,0,yymsp[-2].minor.yy192); + sqlite3StartTable(pParse,&yymsp[-1].minor.yy0,&yymsp[0].minor.yy0,yymsp[-4].minor.yy376,0,0,yymsp[-2].minor.yy376); } break; case 14: /* createkw ::= CREATE */ @@ -157596,33 +161604,32 @@ static YYACTIONTYPE yy_reduce( case 70: /* defer_subclause_opt ::= */ yytestcase(yyruleno==70); case 79: /* ifexists ::= */ yytestcase(yyruleno==79); case 96: /* distinct ::= */ yytestcase(yyruleno==96); - case 232: /* collate ::= */ yytestcase(yyruleno==232); -{yymsp[1].minor.yy192 = 0;} + case 239: /* collate ::= */ yytestcase(yyruleno==239); +{yymsp[1].minor.yy376 = 0;} break; case 16: /* ifnotexists ::= IF NOT EXISTS */ -{yymsp[-2].minor.yy192 = 1;} +{yymsp[-2].minor.yy376 = 1;} break; case 17: /* temp ::= TEMP */ - case 46: /* autoinc ::= AUTOINCR */ yytestcase(yyruleno==46); -{yymsp[0].minor.yy192 = 1;} +{yymsp[0].minor.yy376 = pParse->db->init.busy==0;} break; case 19: /* create_table_args ::= LP columnlist conslist_opt RP table_options */ { - sqlite3EndTable(pParse,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0,yymsp[0].minor.yy192,0); + sqlite3EndTable(pParse,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0,yymsp[0].minor.yy376,0); } break; case 20: /* create_table_args ::= AS select */ { - sqlite3EndTable(pParse,0,0,0,yymsp[0].minor.yy539); - sqlite3SelectDelete(pParse->db, yymsp[0].minor.yy539); + sqlite3EndTable(pParse,0,0,0,yymsp[0].minor.yy81); + sqlite3SelectDelete(pParse->db, yymsp[0].minor.yy81); } break; case 22: /* table_options ::= WITHOUT nm */ { if( yymsp[0].minor.yy0.n==5 && sqlite3_strnicmp(yymsp[0].minor.yy0.z,"rowid",5)==0 ){ - yymsp[-1].minor.yy192 = TF_WithoutRowid | TF_NoVisibleRowid; + yymsp[-1].minor.yy376 = TF_WithoutRowid | TF_NoVisibleRowid; }else{ - yymsp[-1].minor.yy192 = 0; + yymsp[-1].minor.yy376 = 0; sqlite3ErrorMsg(pParse, "unknown table option: %.*s", yymsp[0].minor.yy0.n, yymsp[0].minor.yy0.z); } } @@ -157651,7 +161658,7 @@ static YYACTIONTYPE yy_reduce( case 28: /* scanpt ::= */ { assert( yyLookahead!=YYNOCODE ); - yymsp[1].minor.yy436 = yyLookaheadToken.z; + yymsp[1].minor.yy504 = yyLookaheadToken.z; } break; case 29: /* scantok ::= */ @@ -157665,17 +161672,17 @@ static YYACTIONTYPE yy_reduce( {pParse->constraintName = yymsp[0].minor.yy0;} break; case 31: /* ccons ::= DEFAULT scantok term */ -{sqlite3AddDefaultValue(pParse,yymsp[0].minor.yy202,yymsp[-1].minor.yy0.z,&yymsp[-1].minor.yy0.z[yymsp[-1].minor.yy0.n]);} +{sqlite3AddDefaultValue(pParse,yymsp[0].minor.yy404,yymsp[-1].minor.yy0.z,&yymsp[-1].minor.yy0.z[yymsp[-1].minor.yy0.n]);} break; case 32: /* ccons ::= DEFAULT LP expr RP */ -{sqlite3AddDefaultValue(pParse,yymsp[-1].minor.yy202,yymsp[-2].minor.yy0.z+1,yymsp[0].minor.yy0.z);} +{sqlite3AddDefaultValue(pParse,yymsp[-1].minor.yy404,yymsp[-2].minor.yy0.z+1,yymsp[0].minor.yy0.z);} break; case 33: /* ccons ::= DEFAULT PLUS scantok term */ -{sqlite3AddDefaultValue(pParse,yymsp[0].minor.yy202,yymsp[-2].minor.yy0.z,&yymsp[-1].minor.yy0.z[yymsp[-1].minor.yy0.n]);} +{sqlite3AddDefaultValue(pParse,yymsp[0].minor.yy404,yymsp[-2].minor.yy0.z,&yymsp[-1].minor.yy0.z[yymsp[-1].minor.yy0.n]);} break; case 34: /* ccons ::= DEFAULT MINUS scantok term */ { - Expr *p = sqlite3PExpr(pParse, TK_UMINUS, yymsp[0].minor.yy202, 0); + Expr *p = sqlite3PExpr(pParse, TK_UMINUS, yymsp[0].minor.yy404, 0); sqlite3AddDefaultValue(pParse,p,yymsp[-2].minor.yy0.z,&yymsp[-1].minor.yy0.z[yymsp[-1].minor.yy0.n]); } break; @@ -157690,176 +161697,161 @@ static YYACTIONTYPE yy_reduce( } break; case 36: /* ccons ::= NOT NULL onconf */ -{sqlite3AddNotNull(pParse, yymsp[0].minor.yy192);} +{sqlite3AddNotNull(pParse, yymsp[0].minor.yy376);} break; case 37: /* ccons ::= PRIMARY KEY sortorder onconf autoinc */ -{sqlite3AddPrimaryKey(pParse,0,yymsp[-1].minor.yy192,yymsp[0].minor.yy192,yymsp[-2].minor.yy192);} +{sqlite3AddPrimaryKey(pParse,0,yymsp[-1].minor.yy376,yymsp[0].minor.yy376,yymsp[-2].minor.yy376);} break; case 38: /* ccons ::= UNIQUE onconf */ -{sqlite3CreateIndex(pParse,0,0,0,0,yymsp[0].minor.yy192,0,0,0,0, +{sqlite3CreateIndex(pParse,0,0,0,0,yymsp[0].minor.yy376,0,0,0,0, SQLITE_IDXTYPE_UNIQUE);} break; case 39: /* ccons ::= CHECK LP expr RP */ -{sqlite3AddCheckConstraint(pParse,yymsp[-1].minor.yy202);} +{sqlite3AddCheckConstraint(pParse,yymsp[-1].minor.yy404,yymsp[-2].minor.yy0.z,yymsp[0].minor.yy0.z);} break; case 40: /* ccons ::= REFERENCES nm eidlist_opt refargs */ -{sqlite3CreateForeignKey(pParse,0,&yymsp[-2].minor.yy0,yymsp[-1].minor.yy242,yymsp[0].minor.yy192);} +{sqlite3CreateForeignKey(pParse,0,&yymsp[-2].minor.yy0,yymsp[-1].minor.yy70,yymsp[0].minor.yy376);} break; case 41: /* ccons ::= defer_subclause */ -{sqlite3DeferForeignKey(pParse,yymsp[0].minor.yy192);} +{sqlite3DeferForeignKey(pParse,yymsp[0].minor.yy376);} break; case 42: /* ccons ::= COLLATE ID|STRING */ {sqlite3AddCollateType(pParse, &yymsp[0].minor.yy0);} break; case 43: /* generated ::= LP expr RP */ -{sqlite3AddGenerated(pParse,yymsp[-1].minor.yy202,0);} +{sqlite3AddGenerated(pParse,yymsp[-1].minor.yy404,0);} break; case 44: /* generated ::= LP expr RP ID */ -{sqlite3AddGenerated(pParse,yymsp[-2].minor.yy202,&yymsp[0].minor.yy0);} +{sqlite3AddGenerated(pParse,yymsp[-2].minor.yy404,&yymsp[0].minor.yy0);} + break; + case 46: /* autoinc ::= AUTOINCR */ +{yymsp[0].minor.yy376 = 1;} break; case 47: /* refargs ::= */ -{ yymsp[1].minor.yy192 = OE_None*0x0101; /* EV: R-19803-45884 */} +{ yymsp[1].minor.yy376 = OE_None*0x0101; /* EV: R-19803-45884 */} break; case 48: /* refargs ::= refargs refarg */ -{ yymsp[-1].minor.yy192 = (yymsp[-1].minor.yy192 & ~yymsp[0].minor.yy207.mask) | yymsp[0].minor.yy207.value; } +{ yymsp[-1].minor.yy376 = (yymsp[-1].minor.yy376 & ~yymsp[0].minor.yy139.mask) | yymsp[0].minor.yy139.value; } break; case 49: /* refarg ::= MATCH nm */ -{ yymsp[-1].minor.yy207.value = 0; yymsp[-1].minor.yy207.mask = 0x000000; } +{ yymsp[-1].minor.yy139.value = 0; yymsp[-1].minor.yy139.mask = 0x000000; } break; case 50: /* refarg ::= ON INSERT refact */ -{ yymsp[-2].minor.yy207.value = 0; yymsp[-2].minor.yy207.mask = 0x000000; } +{ yymsp[-2].minor.yy139.value = 0; yymsp[-2].minor.yy139.mask = 0x000000; } break; case 51: /* refarg ::= ON DELETE refact */ -{ yymsp[-2].minor.yy207.value = yymsp[0].minor.yy192; yymsp[-2].minor.yy207.mask = 0x0000ff; } +{ yymsp[-2].minor.yy139.value = yymsp[0].minor.yy376; yymsp[-2].minor.yy139.mask = 0x0000ff; } break; case 52: /* refarg ::= ON UPDATE refact */ -{ yymsp[-2].minor.yy207.value = yymsp[0].minor.yy192<<8; yymsp[-2].minor.yy207.mask = 0x00ff00; } +{ yymsp[-2].minor.yy139.value = yymsp[0].minor.yy376<<8; yymsp[-2].minor.yy139.mask = 0x00ff00; } break; case 53: /* refact ::= SET NULL */ -{ yymsp[-1].minor.yy192 = OE_SetNull; /* EV: R-33326-45252 */} +{ yymsp[-1].minor.yy376 = OE_SetNull; /* EV: R-33326-45252 */} break; case 54: /* refact ::= SET DEFAULT */ -{ yymsp[-1].minor.yy192 = OE_SetDflt; /* EV: R-33326-45252 */} +{ yymsp[-1].minor.yy376 = OE_SetDflt; /* EV: R-33326-45252 */} break; case 55: /* refact ::= CASCADE */ -{ yymsp[0].minor.yy192 = OE_Cascade; /* EV: R-33326-45252 */} +{ yymsp[0].minor.yy376 = OE_Cascade; /* EV: R-33326-45252 */} break; case 56: /* refact ::= RESTRICT */ -{ yymsp[0].minor.yy192 = OE_Restrict; /* EV: R-33326-45252 */} +{ yymsp[0].minor.yy376 = OE_Restrict; /* EV: R-33326-45252 */} break; case 57: /* refact ::= NO ACTION */ -{ yymsp[-1].minor.yy192 = OE_None; /* EV: R-33326-45252 */} +{ yymsp[-1].minor.yy376 = OE_None; /* EV: R-33326-45252 */} break; case 58: /* defer_subclause ::= NOT DEFERRABLE init_deferred_pred_opt */ -{yymsp[-2].minor.yy192 = 0;} +{yymsp[-2].minor.yy376 = 0;} break; case 59: /* defer_subclause ::= DEFERRABLE init_deferred_pred_opt */ case 74: /* orconf ::= OR resolvetype */ yytestcase(yyruleno==74); - case 162: /* insert_cmd ::= INSERT orconf */ yytestcase(yyruleno==162); -{yymsp[-1].minor.yy192 = yymsp[0].minor.yy192;} + case 169: /* insert_cmd ::= INSERT orconf */ yytestcase(yyruleno==169); +{yymsp[-1].minor.yy376 = yymsp[0].minor.yy376;} break; case 61: /* init_deferred_pred_opt ::= INITIALLY DEFERRED */ case 78: /* ifexists ::= IF EXISTS */ yytestcase(yyruleno==78); - case 204: /* between_op ::= NOT BETWEEN */ yytestcase(yyruleno==204); - case 207: /* in_op ::= NOT IN */ yytestcase(yyruleno==207); - case 233: /* collate ::= COLLATE ID|STRING */ yytestcase(yyruleno==233); -{yymsp[-1].minor.yy192 = 1;} + case 211: /* between_op ::= NOT BETWEEN */ yytestcase(yyruleno==211); + case 214: /* in_op ::= NOT IN */ yytestcase(yyruleno==214); + case 240: /* collate ::= COLLATE ID|STRING */ yytestcase(yyruleno==240); +{yymsp[-1].minor.yy376 = 1;} break; case 62: /* init_deferred_pred_opt ::= INITIALLY IMMEDIATE */ -{yymsp[-1].minor.yy192 = 0;} +{yymsp[-1].minor.yy376 = 0;} break; case 64: /* tconscomma ::= COMMA */ {pParse->constraintName.n = 0;} break; case 66: /* tcons ::= PRIMARY KEY LP sortlist autoinc RP onconf */ -{sqlite3AddPrimaryKey(pParse,yymsp[-3].minor.yy242,yymsp[0].minor.yy192,yymsp[-2].minor.yy192,0);} +{sqlite3AddPrimaryKey(pParse,yymsp[-3].minor.yy70,yymsp[0].minor.yy376,yymsp[-2].minor.yy376,0);} break; case 67: /* tcons ::= UNIQUE LP sortlist RP onconf */ -{sqlite3CreateIndex(pParse,0,0,0,yymsp[-2].minor.yy242,yymsp[0].minor.yy192,0,0,0,0, +{sqlite3CreateIndex(pParse,0,0,0,yymsp[-2].minor.yy70,yymsp[0].minor.yy376,0,0,0,0, SQLITE_IDXTYPE_UNIQUE);} break; case 68: /* tcons ::= CHECK LP expr RP onconf */ -{sqlite3AddCheckConstraint(pParse,yymsp[-2].minor.yy202);} +{sqlite3AddCheckConstraint(pParse,yymsp[-2].minor.yy404,yymsp[-3].minor.yy0.z,yymsp[-1].minor.yy0.z);} break; case 69: /* tcons ::= FOREIGN KEY LP eidlist RP REFERENCES nm eidlist_opt refargs defer_subclause_opt */ { - sqlite3CreateForeignKey(pParse, yymsp[-6].minor.yy242, &yymsp[-3].minor.yy0, yymsp[-2].minor.yy242, yymsp[-1].minor.yy192); - sqlite3DeferForeignKey(pParse, yymsp[0].minor.yy192); + sqlite3CreateForeignKey(pParse, yymsp[-6].minor.yy70, &yymsp[-3].minor.yy0, yymsp[-2].minor.yy70, yymsp[-1].minor.yy376); + sqlite3DeferForeignKey(pParse, yymsp[0].minor.yy376); } break; case 71: /* onconf ::= */ case 73: /* orconf ::= */ yytestcase(yyruleno==73); -{yymsp[1].minor.yy192 = OE_Default;} +{yymsp[1].minor.yy376 = OE_Default;} break; case 72: /* onconf ::= ON CONFLICT resolvetype */ -{yymsp[-2].minor.yy192 = yymsp[0].minor.yy192;} +{yymsp[-2].minor.yy376 = yymsp[0].minor.yy376;} break; case 75: /* resolvetype ::= IGNORE */ -{yymsp[0].minor.yy192 = OE_Ignore;} +{yymsp[0].minor.yy376 = OE_Ignore;} break; case 76: /* resolvetype ::= REPLACE */ - case 163: /* insert_cmd ::= REPLACE */ yytestcase(yyruleno==163); -{yymsp[0].minor.yy192 = OE_Replace;} + case 170: /* insert_cmd ::= REPLACE */ yytestcase(yyruleno==170); +{yymsp[0].minor.yy376 = OE_Replace;} break; case 77: /* cmd ::= DROP TABLE ifexists fullname */ { - sqlite3DropTable(pParse, yymsp[0].minor.yy47, 0, yymsp[-1].minor.yy192); + sqlite3DropTable(pParse, yymsp[0].minor.yy153, 0, yymsp[-1].minor.yy376); } break; case 80: /* cmd ::= createkw temp VIEW ifnotexists nm dbnm eidlist_opt AS select */ { - sqlite3CreateView(pParse, &yymsp[-8].minor.yy0, &yymsp[-4].minor.yy0, &yymsp[-3].minor.yy0, yymsp[-2].minor.yy242, yymsp[0].minor.yy539, yymsp[-7].minor.yy192, yymsp[-5].minor.yy192); + sqlite3CreateView(pParse, &yymsp[-8].minor.yy0, &yymsp[-4].minor.yy0, &yymsp[-3].minor.yy0, yymsp[-2].minor.yy70, yymsp[0].minor.yy81, yymsp[-7].minor.yy376, yymsp[-5].minor.yy376); } break; case 81: /* cmd ::= DROP VIEW ifexists fullname */ { - sqlite3DropTable(pParse, yymsp[0].minor.yy47, 1, yymsp[-1].minor.yy192); + sqlite3DropTable(pParse, yymsp[0].minor.yy153, 1, yymsp[-1].minor.yy376); } break; case 82: /* cmd ::= select */ { SelectDest dest = {SRT_Output, 0, 0, 0, 0, 0, 0}; - sqlite3Select(pParse, yymsp[0].minor.yy539, &dest); - sqlite3SelectDelete(pParse->db, yymsp[0].minor.yy539); + sqlite3Select(pParse, yymsp[0].minor.yy81, &dest); + sqlite3SelectDelete(pParse->db, yymsp[0].minor.yy81); } break; case 83: /* select ::= WITH wqlist selectnowith */ -{ - Select *p = yymsp[0].minor.yy539; - if( p ){ - p->pWith = yymsp[-1].minor.yy131; - parserDoubleLinkSelect(pParse, p); - }else{ - sqlite3WithDelete(pParse->db, yymsp[-1].minor.yy131); - } - yymsp[-2].minor.yy539 = p; -} +{yymsp[-2].minor.yy81 = attachWithToSelect(pParse,yymsp[0].minor.yy81,yymsp[-1].minor.yy103);} break; case 84: /* select ::= WITH RECURSIVE wqlist selectnowith */ -{ - Select *p = yymsp[0].minor.yy539; - if( p ){ - p->pWith = yymsp[-1].minor.yy131; - parserDoubleLinkSelect(pParse, p); - }else{ - sqlite3WithDelete(pParse->db, yymsp[-1].minor.yy131); - } - yymsp[-3].minor.yy539 = p; -} +{yymsp[-3].minor.yy81 = attachWithToSelect(pParse,yymsp[0].minor.yy81,yymsp[-1].minor.yy103);} break; case 85: /* select ::= selectnowith */ { - Select *p = yymsp[0].minor.yy539; + Select *p = yymsp[0].minor.yy81; if( p ){ parserDoubleLinkSelect(pParse, p); } - yymsp[0].minor.yy539 = p; /*A-overwrites-X*/ + yymsp[0].minor.yy81 = p; /*A-overwrites-X*/ } break; case 86: /* selectnowith ::= selectnowith multiselect_op oneselect */ { - Select *pRhs = yymsp[0].minor.yy539; - Select *pLhs = yymsp[-2].minor.yy539; + Select *pRhs = yymsp[0].minor.yy81; + Select *pLhs = yymsp[-2].minor.yy81; if( pRhs && pRhs->pPrior ){ SrcList *pFrom; Token x; @@ -157869,83 +161861,83 @@ static YYACTIONTYPE yy_reduce( pRhs = sqlite3SelectNew(pParse,0,pFrom,0,0,0,0,0,0); } if( pRhs ){ - pRhs->op = (u8)yymsp[-1].minor.yy192; + pRhs->op = (u8)yymsp[-1].minor.yy376; pRhs->pPrior = pLhs; if( ALWAYS(pLhs) ) pLhs->selFlags &= ~SF_MultiValue; pRhs->selFlags &= ~SF_MultiValue; - if( yymsp[-1].minor.yy192!=TK_ALL ) pParse->hasCompound = 1; + if( yymsp[-1].minor.yy376!=TK_ALL ) pParse->hasCompound = 1; }else{ sqlite3SelectDelete(pParse->db, pLhs); } - yymsp[-2].minor.yy539 = pRhs; + yymsp[-2].minor.yy81 = pRhs; } break; case 87: /* multiselect_op ::= UNION */ case 89: /* multiselect_op ::= EXCEPT|INTERSECT */ yytestcase(yyruleno==89); -{yymsp[0].minor.yy192 = yymsp[0].major; /*A-overwrites-OP*/} +{yymsp[0].minor.yy376 = yymsp[0].major; /*A-overwrites-OP*/} break; case 88: /* multiselect_op ::= UNION ALL */ -{yymsp[-1].minor.yy192 = TK_ALL;} +{yymsp[-1].minor.yy376 = TK_ALL;} break; case 90: /* oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt orderby_opt limit_opt */ { - yymsp[-8].minor.yy539 = sqlite3SelectNew(pParse,yymsp[-6].minor.yy242,yymsp[-5].minor.yy47,yymsp[-4].minor.yy202,yymsp[-3].minor.yy242,yymsp[-2].minor.yy202,yymsp[-1].minor.yy242,yymsp[-7].minor.yy192,yymsp[0].minor.yy202); + yymsp[-8].minor.yy81 = sqlite3SelectNew(pParse,yymsp[-6].minor.yy70,yymsp[-5].minor.yy153,yymsp[-4].minor.yy404,yymsp[-3].minor.yy70,yymsp[-2].minor.yy404,yymsp[-1].minor.yy70,yymsp[-7].minor.yy376,yymsp[0].minor.yy404); } break; case 91: /* oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt window_clause orderby_opt limit_opt */ { - yymsp[-9].minor.yy539 = sqlite3SelectNew(pParse,yymsp[-7].minor.yy242,yymsp[-6].minor.yy47,yymsp[-5].minor.yy202,yymsp[-4].minor.yy242,yymsp[-3].minor.yy202,yymsp[-1].minor.yy242,yymsp[-8].minor.yy192,yymsp[0].minor.yy202); - if( yymsp[-9].minor.yy539 ){ - yymsp[-9].minor.yy539->pWinDefn = yymsp[-2].minor.yy303; + yymsp[-9].minor.yy81 = sqlite3SelectNew(pParse,yymsp[-7].minor.yy70,yymsp[-6].minor.yy153,yymsp[-5].minor.yy404,yymsp[-4].minor.yy70,yymsp[-3].minor.yy404,yymsp[-1].minor.yy70,yymsp[-8].minor.yy376,yymsp[0].minor.yy404); + if( yymsp[-9].minor.yy81 ){ + yymsp[-9].minor.yy81->pWinDefn = yymsp[-2].minor.yy49; }else{ - sqlite3WindowListDelete(pParse->db, yymsp[-2].minor.yy303); + sqlite3WindowListDelete(pParse->db, yymsp[-2].minor.yy49); } } break; case 92: /* values ::= VALUES LP nexprlist RP */ { - yymsp[-3].minor.yy539 = sqlite3SelectNew(pParse,yymsp[-1].minor.yy242,0,0,0,0,0,SF_Values,0); + yymsp[-3].minor.yy81 = sqlite3SelectNew(pParse,yymsp[-1].minor.yy70,0,0,0,0,0,SF_Values,0); } break; case 93: /* values ::= values COMMA LP nexprlist RP */ { - Select *pRight, *pLeft = yymsp[-4].minor.yy539; - pRight = sqlite3SelectNew(pParse,yymsp[-1].minor.yy242,0,0,0,0,0,SF_Values|SF_MultiValue,0); + Select *pRight, *pLeft = yymsp[-4].minor.yy81; + pRight = sqlite3SelectNew(pParse,yymsp[-1].minor.yy70,0,0,0,0,0,SF_Values|SF_MultiValue,0); if( ALWAYS(pLeft) ) pLeft->selFlags &= ~SF_MultiValue; if( pRight ){ pRight->op = TK_ALL; pRight->pPrior = pLeft; - yymsp[-4].minor.yy539 = pRight; + yymsp[-4].minor.yy81 = pRight; }else{ - yymsp[-4].minor.yy539 = pLeft; + yymsp[-4].minor.yy81 = pLeft; } } break; case 94: /* distinct ::= DISTINCT */ -{yymsp[0].minor.yy192 = SF_Distinct;} +{yymsp[0].minor.yy376 = SF_Distinct;} break; case 95: /* distinct ::= ALL */ -{yymsp[0].minor.yy192 = SF_All;} +{yymsp[0].minor.yy376 = SF_All;} break; case 97: /* sclp ::= */ case 130: /* orderby_opt ::= */ yytestcase(yyruleno==130); case 140: /* groupby_opt ::= */ yytestcase(yyruleno==140); - case 220: /* exprlist ::= */ yytestcase(yyruleno==220); - case 223: /* paren_exprlist ::= */ yytestcase(yyruleno==223); - case 228: /* eidlist_opt ::= */ yytestcase(yyruleno==228); -{yymsp[1].minor.yy242 = 0;} + case 227: /* exprlist ::= */ yytestcase(yyruleno==227); + case 230: /* paren_exprlist ::= */ yytestcase(yyruleno==230); + case 235: /* eidlist_opt ::= */ yytestcase(yyruleno==235); +{yymsp[1].minor.yy70 = 0;} break; case 98: /* selcollist ::= sclp scanpt expr scanpt as */ { - yymsp[-4].minor.yy242 = sqlite3ExprListAppend(pParse, yymsp[-4].minor.yy242, yymsp[-2].minor.yy202); - if( yymsp[0].minor.yy0.n>0 ) sqlite3ExprListSetName(pParse, yymsp[-4].minor.yy242, &yymsp[0].minor.yy0, 1); - sqlite3ExprListSetSpan(pParse,yymsp[-4].minor.yy242,yymsp[-3].minor.yy436,yymsp[-1].minor.yy436); + yymsp[-4].minor.yy70 = sqlite3ExprListAppend(pParse, yymsp[-4].minor.yy70, yymsp[-2].minor.yy404); + if( yymsp[0].minor.yy0.n>0 ) sqlite3ExprListSetName(pParse, yymsp[-4].minor.yy70, &yymsp[0].minor.yy0, 1); + sqlite3ExprListSetSpan(pParse,yymsp[-4].minor.yy70,yymsp[-3].minor.yy504,yymsp[-1].minor.yy504); } break; case 99: /* selcollist ::= sclp scanpt STAR */ { Expr *p = sqlite3Expr(pParse->db, TK_ASTERISK, 0); - yymsp[-2].minor.yy242 = sqlite3ExprListAppend(pParse, yymsp[-2].minor.yy242, p); + yymsp[-2].minor.yy70 = sqlite3ExprListAppend(pParse, yymsp[-2].minor.yy70, p); } break; case 100: /* selcollist ::= sclp scanpt nm DOT STAR */ @@ -157953,56 +161945,56 @@ static YYACTIONTYPE yy_reduce( Expr *pRight = sqlite3PExpr(pParse, TK_ASTERISK, 0, 0); Expr *pLeft = sqlite3ExprAlloc(pParse->db, TK_ID, &yymsp[-2].minor.yy0, 1); Expr *pDot = sqlite3PExpr(pParse, TK_DOT, pLeft, pRight); - yymsp[-4].minor.yy242 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy242, pDot); + yymsp[-4].minor.yy70 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy70, pDot); } break; case 101: /* as ::= AS nm */ case 112: /* dbnm ::= DOT nm */ yytestcase(yyruleno==112); - case 244: /* plus_num ::= PLUS INTEGER|FLOAT */ yytestcase(yyruleno==244); - case 245: /* minus_num ::= MINUS INTEGER|FLOAT */ yytestcase(yyruleno==245); + case 251: /* plus_num ::= PLUS INTEGER|FLOAT */ yytestcase(yyruleno==251); + case 252: /* minus_num ::= MINUS INTEGER|FLOAT */ yytestcase(yyruleno==252); {yymsp[-1].minor.yy0 = yymsp[0].minor.yy0;} break; case 103: /* from ::= */ case 106: /* stl_prefix ::= */ yytestcase(yyruleno==106); -{yymsp[1].minor.yy47 = 0;} +{yymsp[1].minor.yy153 = 0;} break; case 104: /* from ::= FROM seltablist */ { - yymsp[-1].minor.yy47 = yymsp[0].minor.yy47; - sqlite3SrcListShiftJoinType(yymsp[-1].minor.yy47); + yymsp[-1].minor.yy153 = yymsp[0].minor.yy153; + sqlite3SrcListShiftJoinType(yymsp[-1].minor.yy153); } break; case 105: /* stl_prefix ::= seltablist joinop */ { - if( ALWAYS(yymsp[-1].minor.yy47 && yymsp[-1].minor.yy47->nSrc>0) ) yymsp[-1].minor.yy47->a[yymsp[-1].minor.yy47->nSrc-1].fg.jointype = (u8)yymsp[0].minor.yy192; + if( ALWAYS(yymsp[-1].minor.yy153 && yymsp[-1].minor.yy153->nSrc>0) ) yymsp[-1].minor.yy153->a[yymsp[-1].minor.yy153->nSrc-1].fg.jointype = (u8)yymsp[0].minor.yy376; } break; case 107: /* seltablist ::= stl_prefix nm dbnm as indexed_opt on_opt using_opt */ { - yymsp[-6].minor.yy47 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-6].minor.yy47,&yymsp[-5].minor.yy0,&yymsp[-4].minor.yy0,&yymsp[-3].minor.yy0,0,yymsp[-1].minor.yy202,yymsp[0].minor.yy600); - sqlite3SrcListIndexedBy(pParse, yymsp[-6].minor.yy47, &yymsp[-2].minor.yy0); + yymsp[-6].minor.yy153 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-6].minor.yy153,&yymsp[-5].minor.yy0,&yymsp[-4].minor.yy0,&yymsp[-3].minor.yy0,0,yymsp[-1].minor.yy404,yymsp[0].minor.yy436); + sqlite3SrcListIndexedBy(pParse, yymsp[-6].minor.yy153, &yymsp[-2].minor.yy0); } break; case 108: /* seltablist ::= stl_prefix nm dbnm LP exprlist RP as on_opt using_opt */ { - yymsp[-8].minor.yy47 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-8].minor.yy47,&yymsp[-7].minor.yy0,&yymsp[-6].minor.yy0,&yymsp[-2].minor.yy0,0,yymsp[-1].minor.yy202,yymsp[0].minor.yy600); - sqlite3SrcListFuncArgs(pParse, yymsp[-8].minor.yy47, yymsp[-4].minor.yy242); + yymsp[-8].minor.yy153 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-8].minor.yy153,&yymsp[-7].minor.yy0,&yymsp[-6].minor.yy0,&yymsp[-2].minor.yy0,0,yymsp[-1].minor.yy404,yymsp[0].minor.yy436); + sqlite3SrcListFuncArgs(pParse, yymsp[-8].minor.yy153, yymsp[-4].minor.yy70); } break; case 109: /* seltablist ::= stl_prefix LP select RP as on_opt using_opt */ { - yymsp[-6].minor.yy47 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-6].minor.yy47,0,0,&yymsp[-2].minor.yy0,yymsp[-4].minor.yy539,yymsp[-1].minor.yy202,yymsp[0].minor.yy600); + yymsp[-6].minor.yy153 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-6].minor.yy153,0,0,&yymsp[-2].minor.yy0,yymsp[-4].minor.yy81,yymsp[-1].minor.yy404,yymsp[0].minor.yy436); } break; case 110: /* seltablist ::= stl_prefix LP seltablist RP as on_opt using_opt */ { - if( yymsp[-6].minor.yy47==0 && yymsp[-2].minor.yy0.n==0 && yymsp[-1].minor.yy202==0 && yymsp[0].minor.yy600==0 ){ - yymsp[-6].minor.yy47 = yymsp[-4].minor.yy47; - }else if( yymsp[-4].minor.yy47->nSrc==1 ){ - yymsp[-6].minor.yy47 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-6].minor.yy47,0,0,&yymsp[-2].minor.yy0,0,yymsp[-1].minor.yy202,yymsp[0].minor.yy600); - if( yymsp[-6].minor.yy47 ){ - struct SrcList_item *pNew = &yymsp[-6].minor.yy47->a[yymsp[-6].minor.yy47->nSrc-1]; - struct SrcList_item *pOld = yymsp[-4].minor.yy47->a; + if( yymsp[-6].minor.yy153==0 && yymsp[-2].minor.yy0.n==0 && yymsp[-1].minor.yy404==0 && yymsp[0].minor.yy436==0 ){ + yymsp[-6].minor.yy153 = yymsp[-4].minor.yy153; + }else if( yymsp[-4].minor.yy153->nSrc==1 ){ + yymsp[-6].minor.yy153 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-6].minor.yy153,0,0,&yymsp[-2].minor.yy0,0,yymsp[-1].minor.yy404,yymsp[0].minor.yy436); + if( yymsp[-6].minor.yy153 ){ + SrcItem *pNew = &yymsp[-6].minor.yy153->a[yymsp[-6].minor.yy153->nSrc-1]; + SrcItem *pOld = yymsp[-4].minor.yy153->a; pNew->zName = pOld->zName; pNew->zDatabase = pOld->zDatabase; pNew->pSelect = pOld->pSelect; @@ -158015,12 +162007,12 @@ static YYACTIONTYPE yy_reduce( pOld->zName = pOld->zDatabase = 0; pOld->pSelect = 0; } - sqlite3SrcListDelete(pParse->db, yymsp[-4].minor.yy47); + sqlite3SrcListDelete(pParse->db, yymsp[-4].minor.yy153); }else{ Select *pSubquery; - sqlite3SrcListShiftJoinType(yymsp[-4].minor.yy47); - pSubquery = sqlite3SelectNew(pParse,0,yymsp[-4].minor.yy47,0,0,0,0,SF_NestedFrom,0); - yymsp[-6].minor.yy47 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-6].minor.yy47,0,0,&yymsp[-2].minor.yy0,pSubquery,yymsp[-1].minor.yy202,yymsp[0].minor.yy600); + sqlite3SrcListShiftJoinType(yymsp[-4].minor.yy153); + pSubquery = sqlite3SelectNew(pParse,0,yymsp[-4].minor.yy153,0,0,0,0,SF_NestedFrom,0); + yymsp[-6].minor.yy153 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-6].minor.yy153,0,0,&yymsp[-2].minor.yy0,pSubquery,yymsp[-1].minor.yy404,yymsp[0].minor.yy436); } } break; @@ -158030,63 +162022,65 @@ static YYACTIONTYPE yy_reduce( break; case 113: /* fullname ::= nm */ { - yylhsminor.yy47 = sqlite3SrcListAppend(pParse,0,&yymsp[0].minor.yy0,0); - if( IN_RENAME_OBJECT && yylhsminor.yy47 ) sqlite3RenameTokenMap(pParse, yylhsminor.yy47->a[0].zName, &yymsp[0].minor.yy0); + yylhsminor.yy153 = sqlite3SrcListAppend(pParse,0,&yymsp[0].minor.yy0,0); + if( IN_RENAME_OBJECT && yylhsminor.yy153 ) sqlite3RenameTokenMap(pParse, yylhsminor.yy153->a[0].zName, &yymsp[0].minor.yy0); } - yymsp[0].minor.yy47 = yylhsminor.yy47; + yymsp[0].minor.yy153 = yylhsminor.yy153; break; case 114: /* fullname ::= nm DOT nm */ { - yylhsminor.yy47 = sqlite3SrcListAppend(pParse,0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0); - if( IN_RENAME_OBJECT && yylhsminor.yy47 ) sqlite3RenameTokenMap(pParse, yylhsminor.yy47->a[0].zName, &yymsp[0].minor.yy0); + yylhsminor.yy153 = sqlite3SrcListAppend(pParse,0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0); + if( IN_RENAME_OBJECT && yylhsminor.yy153 ) sqlite3RenameTokenMap(pParse, yylhsminor.yy153->a[0].zName, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy47 = yylhsminor.yy47; + yymsp[-2].minor.yy153 = yylhsminor.yy153; break; case 115: /* xfullname ::= nm */ -{yymsp[0].minor.yy47 = sqlite3SrcListAppend(pParse,0,&yymsp[0].minor.yy0,0); /*A-overwrites-X*/} +{yymsp[0].minor.yy153 = sqlite3SrcListAppend(pParse,0,&yymsp[0].minor.yy0,0); /*A-overwrites-X*/} break; case 116: /* xfullname ::= nm DOT nm */ -{yymsp[-2].minor.yy47 = sqlite3SrcListAppend(pParse,0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-X*/} +{yymsp[-2].minor.yy153 = sqlite3SrcListAppend(pParse,0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-X*/} break; case 117: /* xfullname ::= nm DOT nm AS nm */ { - yymsp[-4].minor.yy47 = sqlite3SrcListAppend(pParse,0,&yymsp[-4].minor.yy0,&yymsp[-2].minor.yy0); /*A-overwrites-X*/ - if( yymsp[-4].minor.yy47 ) yymsp[-4].minor.yy47->a[0].zAlias = sqlite3NameFromToken(pParse->db, &yymsp[0].minor.yy0); + yymsp[-4].minor.yy153 = sqlite3SrcListAppend(pParse,0,&yymsp[-4].minor.yy0,&yymsp[-2].minor.yy0); /*A-overwrites-X*/ + if( yymsp[-4].minor.yy153 ) yymsp[-4].minor.yy153->a[0].zAlias = sqlite3NameFromToken(pParse->db, &yymsp[0].minor.yy0); } break; case 118: /* xfullname ::= nm AS nm */ { - yymsp[-2].minor.yy47 = sqlite3SrcListAppend(pParse,0,&yymsp[-2].minor.yy0,0); /*A-overwrites-X*/ - if( yymsp[-2].minor.yy47 ) yymsp[-2].minor.yy47->a[0].zAlias = sqlite3NameFromToken(pParse->db, &yymsp[0].minor.yy0); + yymsp[-2].minor.yy153 = sqlite3SrcListAppend(pParse,0,&yymsp[-2].minor.yy0,0); /*A-overwrites-X*/ + if( yymsp[-2].minor.yy153 ) yymsp[-2].minor.yy153->a[0].zAlias = sqlite3NameFromToken(pParse->db, &yymsp[0].minor.yy0); } break; case 119: /* joinop ::= COMMA|JOIN */ -{ yymsp[0].minor.yy192 = JT_INNER; } +{ yymsp[0].minor.yy376 = JT_INNER; } break; case 120: /* joinop ::= JOIN_KW JOIN */ -{yymsp[-1].minor.yy192 = sqlite3JoinType(pParse,&yymsp[-1].minor.yy0,0,0); /*X-overwrites-A*/} +{yymsp[-1].minor.yy376 = sqlite3JoinType(pParse,&yymsp[-1].minor.yy0,0,0); /*X-overwrites-A*/} break; case 121: /* joinop ::= JOIN_KW nm JOIN */ -{yymsp[-2].minor.yy192 = sqlite3JoinType(pParse,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0,0); /*X-overwrites-A*/} +{yymsp[-2].minor.yy376 = sqlite3JoinType(pParse,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0,0); /*X-overwrites-A*/} break; case 122: /* joinop ::= JOIN_KW nm nm JOIN */ -{yymsp[-3].minor.yy192 = sqlite3JoinType(pParse,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0);/*X-overwrites-A*/} +{yymsp[-3].minor.yy376 = sqlite3JoinType(pParse,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0);/*X-overwrites-A*/} break; case 123: /* on_opt ::= ON expr */ case 143: /* having_opt ::= HAVING expr */ yytestcase(yyruleno==143); case 150: /* where_opt ::= WHERE expr */ yytestcase(yyruleno==150); - case 216: /* case_else ::= ELSE expr */ yytestcase(yyruleno==216); - case 237: /* vinto ::= INTO expr */ yytestcase(yyruleno==237); -{yymsp[-1].minor.yy202 = yymsp[0].minor.yy202;} + case 152: /* where_opt_ret ::= WHERE expr */ yytestcase(yyruleno==152); + case 223: /* case_else ::= ELSE expr */ yytestcase(yyruleno==223); + case 244: /* vinto ::= INTO expr */ yytestcase(yyruleno==244); +{yymsp[-1].minor.yy404 = yymsp[0].minor.yy404;} break; case 124: /* on_opt ::= */ case 142: /* having_opt ::= */ yytestcase(yyruleno==142); case 144: /* limit_opt ::= */ yytestcase(yyruleno==144); case 149: /* where_opt ::= */ yytestcase(yyruleno==149); - case 217: /* case_else ::= */ yytestcase(yyruleno==217); - case 219: /* case_operand ::= */ yytestcase(yyruleno==219); - case 238: /* vinto ::= */ yytestcase(yyruleno==238); -{yymsp[1].minor.yy202 = 0;} + case 151: /* where_opt_ret ::= */ yytestcase(yyruleno==151); + case 224: /* case_else ::= */ yytestcase(yyruleno==224); + case 226: /* case_operand ::= */ yytestcase(yyruleno==226); + case 245: /* vinto ::= */ yytestcase(yyruleno==245); +{yymsp[1].minor.yy404 = 0;} break; case 126: /* indexed_opt ::= INDEXED BY nm */ {yymsp[-2].minor.yy0 = yymsp[0].minor.yy0;} @@ -158095,143 +162089,158 @@ static YYACTIONTYPE yy_reduce( {yymsp[-1].minor.yy0.z=0; yymsp[-1].minor.yy0.n=1;} break; case 128: /* using_opt ::= USING LP idlist RP */ -{yymsp[-3].minor.yy600 = yymsp[-1].minor.yy600;} +{yymsp[-3].minor.yy436 = yymsp[-1].minor.yy436;} break; case 129: /* using_opt ::= */ - case 164: /* idlist_opt ::= */ yytestcase(yyruleno==164); -{yymsp[1].minor.yy600 = 0;} + case 171: /* idlist_opt ::= */ yytestcase(yyruleno==171); +{yymsp[1].minor.yy436 = 0;} break; case 131: /* orderby_opt ::= ORDER BY sortlist */ case 141: /* groupby_opt ::= GROUP BY nexprlist */ yytestcase(yyruleno==141); -{yymsp[-2].minor.yy242 = yymsp[0].minor.yy242;} +{yymsp[-2].minor.yy70 = yymsp[0].minor.yy70;} break; case 132: /* sortlist ::= sortlist COMMA expr sortorder nulls */ { - yymsp[-4].minor.yy242 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy242,yymsp[-2].minor.yy202); - sqlite3ExprListSetSortOrder(yymsp[-4].minor.yy242,yymsp[-1].minor.yy192,yymsp[0].minor.yy192); + yymsp[-4].minor.yy70 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy70,yymsp[-2].minor.yy404); + sqlite3ExprListSetSortOrder(yymsp[-4].minor.yy70,yymsp[-1].minor.yy376,yymsp[0].minor.yy376); } break; case 133: /* sortlist ::= expr sortorder nulls */ { - yymsp[-2].minor.yy242 = sqlite3ExprListAppend(pParse,0,yymsp[-2].minor.yy202); /*A-overwrites-Y*/ - sqlite3ExprListSetSortOrder(yymsp[-2].minor.yy242,yymsp[-1].minor.yy192,yymsp[0].minor.yy192); + yymsp[-2].minor.yy70 = sqlite3ExprListAppend(pParse,0,yymsp[-2].minor.yy404); /*A-overwrites-Y*/ + sqlite3ExprListSetSortOrder(yymsp[-2].minor.yy70,yymsp[-1].minor.yy376,yymsp[0].minor.yy376); } break; case 134: /* sortorder ::= ASC */ -{yymsp[0].minor.yy192 = SQLITE_SO_ASC;} +{yymsp[0].minor.yy376 = SQLITE_SO_ASC;} break; case 135: /* sortorder ::= DESC */ -{yymsp[0].minor.yy192 = SQLITE_SO_DESC;} +{yymsp[0].minor.yy376 = SQLITE_SO_DESC;} break; case 136: /* sortorder ::= */ case 139: /* nulls ::= */ yytestcase(yyruleno==139); -{yymsp[1].minor.yy192 = SQLITE_SO_UNDEFINED;} +{yymsp[1].minor.yy376 = SQLITE_SO_UNDEFINED;} break; case 137: /* nulls ::= NULLS FIRST */ -{yymsp[-1].minor.yy192 = SQLITE_SO_ASC;} +{yymsp[-1].minor.yy376 = SQLITE_SO_ASC;} break; case 138: /* nulls ::= NULLS LAST */ -{yymsp[-1].minor.yy192 = SQLITE_SO_DESC;} +{yymsp[-1].minor.yy376 = SQLITE_SO_DESC;} break; case 145: /* limit_opt ::= LIMIT expr */ -{yymsp[-1].minor.yy202 = sqlite3PExpr(pParse,TK_LIMIT,yymsp[0].minor.yy202,0);} +{yymsp[-1].minor.yy404 = sqlite3PExpr(pParse,TK_LIMIT,yymsp[0].minor.yy404,0);} break; case 146: /* limit_opt ::= LIMIT expr OFFSET expr */ -{yymsp[-3].minor.yy202 = sqlite3PExpr(pParse,TK_LIMIT,yymsp[-2].minor.yy202,yymsp[0].minor.yy202);} +{yymsp[-3].minor.yy404 = sqlite3PExpr(pParse,TK_LIMIT,yymsp[-2].minor.yy404,yymsp[0].minor.yy404);} break; case 147: /* limit_opt ::= LIMIT expr COMMA expr */ -{yymsp[-3].minor.yy202 = sqlite3PExpr(pParse,TK_LIMIT,yymsp[0].minor.yy202,yymsp[-2].minor.yy202);} +{yymsp[-3].minor.yy404 = sqlite3PExpr(pParse,TK_LIMIT,yymsp[0].minor.yy404,yymsp[-2].minor.yy404);} break; - case 148: /* cmd ::= with DELETE FROM xfullname indexed_opt where_opt orderby_opt limit_opt */ + case 148: /* cmd ::= with DELETE FROM xfullname indexed_opt where_opt_ret orderby_opt limit_opt */ { - sqlite3SrcListIndexedBy(pParse, yymsp[-4].minor.yy47, &yymsp[-3].minor.yy0); + sqlite3SrcListIndexedBy(pParse, yymsp[-4].minor.yy153, &yymsp[-3].minor.yy0); #ifndef SQLITE_ENABLE_UPDATE_DELETE_LIMIT - if( yymsp[-1].minor.yy242 || yymsp[0].minor.yy202 ){ - updateDeleteLimitError(pParse,yymsp[-1].minor.yy242,yymsp[0].minor.yy202); - yymsp[-1].minor.yy242 = 0; - yymsp[0].minor.yy202 = 0; + if( yymsp[-1].minor.yy70 || yymsp[0].minor.yy404 ){ + updateDeleteLimitError(pParse,yymsp[-1].minor.yy70,yymsp[0].minor.yy404); + yymsp[-1].minor.yy70 = 0; + yymsp[0].minor.yy404 = 0; } #endif - sqlite3DeleteFrom(pParse,yymsp[-4].minor.yy47,yymsp[-2].minor.yy202,yymsp[-1].minor.yy242,yymsp[0].minor.yy202); + sqlite3DeleteFrom(pParse,yymsp[-4].minor.yy153,yymsp[-2].minor.yy404,yymsp[-1].minor.yy70,yymsp[0].minor.yy404); } break; - case 151: /* cmd ::= with UPDATE orconf xfullname indexed_opt SET setlist from where_opt orderby_opt limit_opt */ + case 153: /* where_opt_ret ::= RETURNING selcollist */ +{sqlite3AddReturning(pParse,yymsp[0].minor.yy70); yymsp[-1].minor.yy404 = 0;} + break; + case 154: /* where_opt_ret ::= WHERE expr RETURNING selcollist */ +{sqlite3AddReturning(pParse,yymsp[0].minor.yy70); yymsp[-3].minor.yy404 = yymsp[-2].minor.yy404;} + break; + case 155: /* cmd ::= with UPDATE orconf xfullname indexed_opt SET setlist from where_opt_ret orderby_opt limit_opt */ { - sqlite3SrcListIndexedBy(pParse, yymsp[-7].minor.yy47, &yymsp[-6].minor.yy0); - yymsp[-7].minor.yy47 = sqlite3SrcListAppendList(pParse, yymsp[-7].minor.yy47, yymsp[-3].minor.yy47); - sqlite3ExprListCheckLength(pParse,yymsp[-4].minor.yy242,"set list"); + sqlite3SrcListIndexedBy(pParse, yymsp[-7].minor.yy153, &yymsp[-6].minor.yy0); + yymsp[-7].minor.yy153 = sqlite3SrcListAppendList(pParse, yymsp[-7].minor.yy153, yymsp[-3].minor.yy153); + sqlite3ExprListCheckLength(pParse,yymsp[-4].minor.yy70,"set list"); #ifndef SQLITE_ENABLE_UPDATE_DELETE_LIMIT - if( yymsp[-1].minor.yy242 || yymsp[0].minor.yy202 ){ - updateDeleteLimitError(pParse,yymsp[-1].minor.yy242,yymsp[0].minor.yy202); - yymsp[-1].minor.yy242 = 0; - yymsp[0].minor.yy202 = 0; + if( yymsp[-1].minor.yy70 || yymsp[0].minor.yy404 ){ + updateDeleteLimitError(pParse,yymsp[-1].minor.yy70,yymsp[0].minor.yy404); + yymsp[-1].minor.yy70 = 0; + yymsp[0].minor.yy404 = 0; } #endif - sqlite3Update(pParse,yymsp[-7].minor.yy47,yymsp[-4].minor.yy242,yymsp[-2].minor.yy202,yymsp[-8].minor.yy192,yymsp[-1].minor.yy242,yymsp[0].minor.yy202,0); + sqlite3Update(pParse,yymsp[-7].minor.yy153,yymsp[-4].minor.yy70,yymsp[-2].minor.yy404,yymsp[-8].minor.yy376,yymsp[-1].minor.yy70,yymsp[0].minor.yy404,0); } break; - case 152: /* setlist ::= setlist COMMA nm EQ expr */ + case 156: /* setlist ::= setlist COMMA nm EQ expr */ { - yymsp[-4].minor.yy242 = sqlite3ExprListAppend(pParse, yymsp[-4].minor.yy242, yymsp[0].minor.yy202); - sqlite3ExprListSetName(pParse, yymsp[-4].minor.yy242, &yymsp[-2].minor.yy0, 1); + yymsp[-4].minor.yy70 = sqlite3ExprListAppend(pParse, yymsp[-4].minor.yy70, yymsp[0].minor.yy404); + sqlite3ExprListSetName(pParse, yymsp[-4].minor.yy70, &yymsp[-2].minor.yy0, 1); } break; - case 153: /* setlist ::= setlist COMMA LP idlist RP EQ expr */ + case 157: /* setlist ::= setlist COMMA LP idlist RP EQ expr */ { - yymsp[-6].minor.yy242 = sqlite3ExprListAppendVector(pParse, yymsp[-6].minor.yy242, yymsp[-3].minor.yy600, yymsp[0].minor.yy202); + yymsp[-6].minor.yy70 = sqlite3ExprListAppendVector(pParse, yymsp[-6].minor.yy70, yymsp[-3].minor.yy436, yymsp[0].minor.yy404); } break; - case 154: /* setlist ::= nm EQ expr */ + case 158: /* setlist ::= nm EQ expr */ { - yylhsminor.yy242 = sqlite3ExprListAppend(pParse, 0, yymsp[0].minor.yy202); - sqlite3ExprListSetName(pParse, yylhsminor.yy242, &yymsp[-2].minor.yy0, 1); + yylhsminor.yy70 = sqlite3ExprListAppend(pParse, 0, yymsp[0].minor.yy404); + sqlite3ExprListSetName(pParse, yylhsminor.yy70, &yymsp[-2].minor.yy0, 1); } - yymsp[-2].minor.yy242 = yylhsminor.yy242; + yymsp[-2].minor.yy70 = yylhsminor.yy70; break; - case 155: /* setlist ::= LP idlist RP EQ expr */ + case 159: /* setlist ::= LP idlist RP EQ expr */ { - yymsp[-4].minor.yy242 = sqlite3ExprListAppendVector(pParse, 0, yymsp[-3].minor.yy600, yymsp[0].minor.yy202); + yymsp[-4].minor.yy70 = sqlite3ExprListAppendVector(pParse, 0, yymsp[-3].minor.yy436, yymsp[0].minor.yy404); } break; - case 156: /* cmd ::= with insert_cmd INTO xfullname idlist_opt select upsert */ + case 160: /* cmd ::= with insert_cmd INTO xfullname idlist_opt select upsert */ { - sqlite3Insert(pParse, yymsp[-3].minor.yy47, yymsp[-1].minor.yy539, yymsp[-2].minor.yy600, yymsp[-5].minor.yy192, yymsp[0].minor.yy318); + sqlite3Insert(pParse, yymsp[-3].minor.yy153, yymsp[-1].minor.yy81, yymsp[-2].minor.yy436, yymsp[-5].minor.yy376, yymsp[0].minor.yy190); } break; - case 157: /* cmd ::= with insert_cmd INTO xfullname idlist_opt DEFAULT VALUES */ + case 161: /* cmd ::= with insert_cmd INTO xfullname idlist_opt DEFAULT VALUES returning */ { - sqlite3Insert(pParse, yymsp[-3].minor.yy47, 0, yymsp[-2].minor.yy600, yymsp[-5].minor.yy192, 0); + sqlite3Insert(pParse, yymsp[-4].minor.yy153, 0, yymsp[-3].minor.yy436, yymsp[-6].minor.yy376, 0); } break; - case 158: /* upsert ::= */ -{ yymsp[1].minor.yy318 = 0; } + case 162: /* upsert ::= */ +{ yymsp[1].minor.yy190 = 0; } + break; + case 163: /* upsert ::= RETURNING selcollist */ +{ yymsp[-1].minor.yy190 = 0; sqlite3AddReturning(pParse,yymsp[0].minor.yy70); } + break; + case 164: /* upsert ::= ON CONFLICT LP sortlist RP where_opt DO UPDATE SET setlist where_opt upsert */ +{ yymsp[-11].minor.yy190 = sqlite3UpsertNew(pParse->db,yymsp[-8].minor.yy70,yymsp[-6].minor.yy404,yymsp[-2].minor.yy70,yymsp[-1].minor.yy404,yymsp[0].minor.yy190);} + break; + case 165: /* upsert ::= ON CONFLICT LP sortlist RP where_opt DO NOTHING upsert */ +{ yymsp[-8].minor.yy190 = sqlite3UpsertNew(pParse->db,yymsp[-5].minor.yy70,yymsp[-3].minor.yy404,0,0,yymsp[0].minor.yy190); } break; - case 159: /* upsert ::= ON CONFLICT LP sortlist RP where_opt DO UPDATE SET setlist where_opt */ -{ yymsp[-10].minor.yy318 = sqlite3UpsertNew(pParse->db,yymsp[-7].minor.yy242,yymsp[-5].minor.yy202,yymsp[-1].minor.yy242,yymsp[0].minor.yy202);} + case 166: /* upsert ::= ON CONFLICT DO NOTHING returning */ +{ yymsp[-4].minor.yy190 = sqlite3UpsertNew(pParse->db,0,0,0,0,0); } break; - case 160: /* upsert ::= ON CONFLICT LP sortlist RP where_opt DO NOTHING */ -{ yymsp[-7].minor.yy318 = sqlite3UpsertNew(pParse->db,yymsp[-4].minor.yy242,yymsp[-2].minor.yy202,0,0); } + case 167: /* upsert ::= ON CONFLICT DO UPDATE SET setlist where_opt returning */ +{ yymsp[-7].minor.yy190 = sqlite3UpsertNew(pParse->db,0,0,yymsp[-2].minor.yy70,yymsp[-1].minor.yy404,0);} break; - case 161: /* upsert ::= ON CONFLICT DO NOTHING */ -{ yymsp[-3].minor.yy318 = sqlite3UpsertNew(pParse->db,0,0,0,0); } + case 168: /* returning ::= RETURNING selcollist */ +{sqlite3AddReturning(pParse,yymsp[0].minor.yy70);} break; - case 165: /* idlist_opt ::= LP idlist RP */ -{yymsp[-2].minor.yy600 = yymsp[-1].minor.yy600;} + case 172: /* idlist_opt ::= LP idlist RP */ +{yymsp[-2].minor.yy436 = yymsp[-1].minor.yy436;} break; - case 166: /* idlist ::= idlist COMMA nm */ -{yymsp[-2].minor.yy600 = sqlite3IdListAppend(pParse,yymsp[-2].minor.yy600,&yymsp[0].minor.yy0);} + case 173: /* idlist ::= idlist COMMA nm */ +{yymsp[-2].minor.yy436 = sqlite3IdListAppend(pParse,yymsp[-2].minor.yy436,&yymsp[0].minor.yy0);} break; - case 167: /* idlist ::= nm */ -{yymsp[0].minor.yy600 = sqlite3IdListAppend(pParse,0,&yymsp[0].minor.yy0); /*A-overwrites-Y*/} + case 174: /* idlist ::= nm */ +{yymsp[0].minor.yy436 = sqlite3IdListAppend(pParse,0,&yymsp[0].minor.yy0); /*A-overwrites-Y*/} break; - case 168: /* expr ::= LP expr RP */ -{yymsp[-2].minor.yy202 = yymsp[-1].minor.yy202;} + case 175: /* expr ::= LP expr RP */ +{yymsp[-2].minor.yy404 = yymsp[-1].minor.yy404;} break; - case 169: /* expr ::= ID|INDEXED */ - case 170: /* expr ::= JOIN_KW */ yytestcase(yyruleno==170); -{yymsp[0].minor.yy202=tokenExpr(pParse,TK_ID,yymsp[0].minor.yy0); /*A-overwrites-X*/} + case 176: /* expr ::= ID|INDEXED */ + case 177: /* expr ::= JOIN_KW */ yytestcase(yyruleno==177); +{yymsp[0].minor.yy404=tokenExpr(pParse,TK_ID,yymsp[0].minor.yy0); /*A-overwrites-X*/} break; - case 171: /* expr ::= nm DOT nm */ + case 178: /* expr ::= nm DOT nm */ { Expr *temp1 = sqlite3ExprAlloc(pParse->db, TK_ID, &yymsp[-2].minor.yy0, 1); Expr *temp2 = sqlite3ExprAlloc(pParse->db, TK_ID, &yymsp[0].minor.yy0, 1); @@ -158239,11 +162248,11 @@ static YYACTIONTYPE yy_reduce( sqlite3RenameTokenMap(pParse, (void*)temp2, &yymsp[0].minor.yy0); sqlite3RenameTokenMap(pParse, (void*)temp1, &yymsp[-2].minor.yy0); } - yylhsminor.yy202 = sqlite3PExpr(pParse, TK_DOT, temp1, temp2); + yylhsminor.yy404 = sqlite3PExpr(pParse, TK_DOT, temp1, temp2); } - yymsp[-2].minor.yy202 = yylhsminor.yy202; + yymsp[-2].minor.yy404 = yylhsminor.yy404; break; - case 172: /* expr ::= nm DOT nm DOT nm */ + case 179: /* expr ::= nm DOT nm DOT nm */ { Expr *temp1 = sqlite3ExprAlloc(pParse->db, TK_ID, &yymsp[-4].minor.yy0, 1); Expr *temp2 = sqlite3ExprAlloc(pParse->db, TK_ID, &yymsp[-2].minor.yy0, 1); @@ -158253,26 +162262,26 @@ static YYACTIONTYPE yy_reduce( sqlite3RenameTokenMap(pParse, (void*)temp3, &yymsp[0].minor.yy0); sqlite3RenameTokenMap(pParse, (void*)temp2, &yymsp[-2].minor.yy0); } - yylhsminor.yy202 = sqlite3PExpr(pParse, TK_DOT, temp1, temp4); + yylhsminor.yy404 = sqlite3PExpr(pParse, TK_DOT, temp1, temp4); } - yymsp[-4].minor.yy202 = yylhsminor.yy202; + yymsp[-4].minor.yy404 = yylhsminor.yy404; break; - case 173: /* term ::= NULL|FLOAT|BLOB */ - case 174: /* term ::= STRING */ yytestcase(yyruleno==174); -{yymsp[0].minor.yy202=tokenExpr(pParse,yymsp[0].major,yymsp[0].minor.yy0); /*A-overwrites-X*/} + case 180: /* term ::= NULL|FLOAT|BLOB */ + case 181: /* term ::= STRING */ yytestcase(yyruleno==181); +{yymsp[0].minor.yy404=tokenExpr(pParse,yymsp[0].major,yymsp[0].minor.yy0); /*A-overwrites-X*/} break; - case 175: /* term ::= INTEGER */ + case 182: /* term ::= INTEGER */ { - yylhsminor.yy202 = sqlite3ExprAlloc(pParse->db, TK_INTEGER, &yymsp[0].minor.yy0, 1); + yylhsminor.yy404 = sqlite3ExprAlloc(pParse->db, TK_INTEGER, &yymsp[0].minor.yy0, 1); } - yymsp[0].minor.yy202 = yylhsminor.yy202; + yymsp[0].minor.yy404 = yylhsminor.yy404; break; - case 176: /* expr ::= VARIABLE */ + case 183: /* expr ::= VARIABLE */ { if( !(yymsp[0].minor.yy0.z[0]=='#' && sqlite3Isdigit(yymsp[0].minor.yy0.z[1])) ){ u32 n = yymsp[0].minor.yy0.n; - yymsp[0].minor.yy202 = tokenExpr(pParse, TK_VARIABLE, yymsp[0].minor.yy0); - sqlite3ExprAssignVarNumber(pParse, yymsp[0].minor.yy202, n); + yymsp[0].minor.yy404 = tokenExpr(pParse, TK_VARIABLE, yymsp[0].minor.yy0); + sqlite3ExprAssignVarNumber(pParse, yymsp[0].minor.yy404, n); }else{ /* When doing a nested parse, one can include terms in an expression ** that look like this: #1 #2 ... These terms refer to registers @@ -158281,159 +162290,159 @@ static YYACTIONTYPE yy_reduce( assert( t.n>=2 ); if( pParse->nested==0 ){ sqlite3ErrorMsg(pParse, "near \"%T\": syntax error", &t); - yymsp[0].minor.yy202 = 0; + yymsp[0].minor.yy404 = 0; }else{ - yymsp[0].minor.yy202 = sqlite3PExpr(pParse, TK_REGISTER, 0, 0); - if( yymsp[0].minor.yy202 ) sqlite3GetInt32(&t.z[1], &yymsp[0].minor.yy202->iTable); + yymsp[0].minor.yy404 = sqlite3PExpr(pParse, TK_REGISTER, 0, 0); + if( yymsp[0].minor.yy404 ) sqlite3GetInt32(&t.z[1], &yymsp[0].minor.yy404->iTable); } } } break; - case 177: /* expr ::= expr COLLATE ID|STRING */ + case 184: /* expr ::= expr COLLATE ID|STRING */ { - yymsp[-2].minor.yy202 = sqlite3ExprAddCollateToken(pParse, yymsp[-2].minor.yy202, &yymsp[0].minor.yy0, 1); + yymsp[-2].minor.yy404 = sqlite3ExprAddCollateToken(pParse, yymsp[-2].minor.yy404, &yymsp[0].minor.yy0, 1); } break; - case 178: /* expr ::= CAST LP expr AS typetoken RP */ + case 185: /* expr ::= CAST LP expr AS typetoken RP */ { - yymsp[-5].minor.yy202 = sqlite3ExprAlloc(pParse->db, TK_CAST, &yymsp[-1].minor.yy0, 1); - sqlite3ExprAttachSubtrees(pParse->db, yymsp[-5].minor.yy202, yymsp[-3].minor.yy202, 0); + yymsp[-5].minor.yy404 = sqlite3ExprAlloc(pParse->db, TK_CAST, &yymsp[-1].minor.yy0, 1); + sqlite3ExprAttachSubtrees(pParse->db, yymsp[-5].minor.yy404, yymsp[-3].minor.yy404, 0); } break; - case 179: /* expr ::= ID|INDEXED LP distinct exprlist RP */ + case 186: /* expr ::= ID|INDEXED LP distinct exprlist RP */ { - yylhsminor.yy202 = sqlite3ExprFunction(pParse, yymsp[-1].minor.yy242, &yymsp[-4].minor.yy0, yymsp[-2].minor.yy192); + yylhsminor.yy404 = sqlite3ExprFunction(pParse, yymsp[-1].minor.yy70, &yymsp[-4].minor.yy0, yymsp[-2].minor.yy376); } - yymsp[-4].minor.yy202 = yylhsminor.yy202; + yymsp[-4].minor.yy404 = yylhsminor.yy404; break; - case 180: /* expr ::= ID|INDEXED LP STAR RP */ + case 187: /* expr ::= ID|INDEXED LP STAR RP */ { - yylhsminor.yy202 = sqlite3ExprFunction(pParse, 0, &yymsp[-3].minor.yy0, 0); + yylhsminor.yy404 = sqlite3ExprFunction(pParse, 0, &yymsp[-3].minor.yy0, 0); } - yymsp[-3].minor.yy202 = yylhsminor.yy202; + yymsp[-3].minor.yy404 = yylhsminor.yy404; break; - case 181: /* expr ::= ID|INDEXED LP distinct exprlist RP filter_over */ + case 188: /* expr ::= ID|INDEXED LP distinct exprlist RP filter_over */ { - yylhsminor.yy202 = sqlite3ExprFunction(pParse, yymsp[-2].minor.yy242, &yymsp[-5].minor.yy0, yymsp[-3].minor.yy192); - sqlite3WindowAttach(pParse, yylhsminor.yy202, yymsp[0].minor.yy303); + yylhsminor.yy404 = sqlite3ExprFunction(pParse, yymsp[-2].minor.yy70, &yymsp[-5].minor.yy0, yymsp[-3].minor.yy376); + sqlite3WindowAttach(pParse, yylhsminor.yy404, yymsp[0].minor.yy49); } - yymsp[-5].minor.yy202 = yylhsminor.yy202; + yymsp[-5].minor.yy404 = yylhsminor.yy404; break; - case 182: /* expr ::= ID|INDEXED LP STAR RP filter_over */ + case 189: /* expr ::= ID|INDEXED LP STAR RP filter_over */ { - yylhsminor.yy202 = sqlite3ExprFunction(pParse, 0, &yymsp[-4].minor.yy0, 0); - sqlite3WindowAttach(pParse, yylhsminor.yy202, yymsp[0].minor.yy303); + yylhsminor.yy404 = sqlite3ExprFunction(pParse, 0, &yymsp[-4].minor.yy0, 0); + sqlite3WindowAttach(pParse, yylhsminor.yy404, yymsp[0].minor.yy49); } - yymsp[-4].minor.yy202 = yylhsminor.yy202; + yymsp[-4].minor.yy404 = yylhsminor.yy404; break; - case 183: /* term ::= CTIME_KW */ + case 190: /* term ::= CTIME_KW */ { - yylhsminor.yy202 = sqlite3ExprFunction(pParse, 0, &yymsp[0].minor.yy0, 0); + yylhsminor.yy404 = sqlite3ExprFunction(pParse, 0, &yymsp[0].minor.yy0, 0); } - yymsp[0].minor.yy202 = yylhsminor.yy202; + yymsp[0].minor.yy404 = yylhsminor.yy404; break; - case 184: /* expr ::= LP nexprlist COMMA expr RP */ + case 191: /* expr ::= LP nexprlist COMMA expr RP */ { - ExprList *pList = sqlite3ExprListAppend(pParse, yymsp[-3].minor.yy242, yymsp[-1].minor.yy202); - yymsp[-4].minor.yy202 = sqlite3PExpr(pParse, TK_VECTOR, 0, 0); - if( yymsp[-4].minor.yy202 ){ - yymsp[-4].minor.yy202->x.pList = pList; + ExprList *pList = sqlite3ExprListAppend(pParse, yymsp[-3].minor.yy70, yymsp[-1].minor.yy404); + yymsp[-4].minor.yy404 = sqlite3PExpr(pParse, TK_VECTOR, 0, 0); + if( yymsp[-4].minor.yy404 ){ + yymsp[-4].minor.yy404->x.pList = pList; if( ALWAYS(pList->nExpr) ){ - yymsp[-4].minor.yy202->flags |= pList->a[0].pExpr->flags & EP_Propagate; + yymsp[-4].minor.yy404->flags |= pList->a[0].pExpr->flags & EP_Propagate; } }else{ sqlite3ExprListDelete(pParse->db, pList); } } break; - case 185: /* expr ::= expr AND expr */ -{yymsp[-2].minor.yy202=sqlite3ExprAnd(pParse,yymsp[-2].minor.yy202,yymsp[0].minor.yy202);} + case 192: /* expr ::= expr AND expr */ +{yymsp[-2].minor.yy404=sqlite3ExprAnd(pParse,yymsp[-2].minor.yy404,yymsp[0].minor.yy404);} break; - case 186: /* expr ::= expr OR expr */ - case 187: /* expr ::= expr LT|GT|GE|LE expr */ yytestcase(yyruleno==187); - case 188: /* expr ::= expr EQ|NE expr */ yytestcase(yyruleno==188); - case 189: /* expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ yytestcase(yyruleno==189); - case 190: /* expr ::= expr PLUS|MINUS expr */ yytestcase(yyruleno==190); - case 191: /* expr ::= expr STAR|SLASH|REM expr */ yytestcase(yyruleno==191); - case 192: /* expr ::= expr CONCAT expr */ yytestcase(yyruleno==192); -{yymsp[-2].minor.yy202=sqlite3PExpr(pParse,yymsp[-1].major,yymsp[-2].minor.yy202,yymsp[0].minor.yy202);} + case 193: /* expr ::= expr OR expr */ + case 194: /* expr ::= expr LT|GT|GE|LE expr */ yytestcase(yyruleno==194); + case 195: /* expr ::= expr EQ|NE expr */ yytestcase(yyruleno==195); + case 196: /* expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ yytestcase(yyruleno==196); + case 197: /* expr ::= expr PLUS|MINUS expr */ yytestcase(yyruleno==197); + case 198: /* expr ::= expr STAR|SLASH|REM expr */ yytestcase(yyruleno==198); + case 199: /* expr ::= expr CONCAT expr */ yytestcase(yyruleno==199); +{yymsp[-2].minor.yy404=sqlite3PExpr(pParse,yymsp[-1].major,yymsp[-2].minor.yy404,yymsp[0].minor.yy404);} break; - case 193: /* likeop ::= NOT LIKE_KW|MATCH */ + case 200: /* likeop ::= NOT LIKE_KW|MATCH */ {yymsp[-1].minor.yy0=yymsp[0].minor.yy0; yymsp[-1].minor.yy0.n|=0x80000000; /*yymsp[-1].minor.yy0-overwrite-yymsp[0].minor.yy0*/} break; - case 194: /* expr ::= expr likeop expr */ + case 201: /* expr ::= expr likeop expr */ { ExprList *pList; int bNot = yymsp[-1].minor.yy0.n & 0x80000000; yymsp[-1].minor.yy0.n &= 0x7fffffff; - pList = sqlite3ExprListAppend(pParse,0, yymsp[0].minor.yy202); - pList = sqlite3ExprListAppend(pParse,pList, yymsp[-2].minor.yy202); - yymsp[-2].minor.yy202 = sqlite3ExprFunction(pParse, pList, &yymsp[-1].minor.yy0, 0); - if( bNot ) yymsp[-2].minor.yy202 = sqlite3PExpr(pParse, TK_NOT, yymsp[-2].minor.yy202, 0); - if( yymsp[-2].minor.yy202 ) yymsp[-2].minor.yy202->flags |= EP_InfixFunc; + pList = sqlite3ExprListAppend(pParse,0, yymsp[0].minor.yy404); + pList = sqlite3ExprListAppend(pParse,pList, yymsp[-2].minor.yy404); + yymsp[-2].minor.yy404 = sqlite3ExprFunction(pParse, pList, &yymsp[-1].minor.yy0, 0); + if( bNot ) yymsp[-2].minor.yy404 = sqlite3PExpr(pParse, TK_NOT, yymsp[-2].minor.yy404, 0); + if( yymsp[-2].minor.yy404 ) yymsp[-2].minor.yy404->flags |= EP_InfixFunc; } break; - case 195: /* expr ::= expr likeop expr ESCAPE expr */ + case 202: /* expr ::= expr likeop expr ESCAPE expr */ { ExprList *pList; int bNot = yymsp[-3].minor.yy0.n & 0x80000000; yymsp[-3].minor.yy0.n &= 0x7fffffff; - pList = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy202); - pList = sqlite3ExprListAppend(pParse,pList, yymsp[-4].minor.yy202); - pList = sqlite3ExprListAppend(pParse,pList, yymsp[0].minor.yy202); - yymsp[-4].minor.yy202 = sqlite3ExprFunction(pParse, pList, &yymsp[-3].minor.yy0, 0); - if( bNot ) yymsp[-4].minor.yy202 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy202, 0); - if( yymsp[-4].minor.yy202 ) yymsp[-4].minor.yy202->flags |= EP_InfixFunc; + pList = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy404); + pList = sqlite3ExprListAppend(pParse,pList, yymsp[-4].minor.yy404); + pList = sqlite3ExprListAppend(pParse,pList, yymsp[0].minor.yy404); + yymsp[-4].minor.yy404 = sqlite3ExprFunction(pParse, pList, &yymsp[-3].minor.yy0, 0); + if( bNot ) yymsp[-4].minor.yy404 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy404, 0); + if( yymsp[-4].minor.yy404 ) yymsp[-4].minor.yy404->flags |= EP_InfixFunc; } break; - case 196: /* expr ::= expr ISNULL|NOTNULL */ -{yymsp[-1].minor.yy202 = sqlite3PExpr(pParse,yymsp[0].major,yymsp[-1].minor.yy202,0);} + case 203: /* expr ::= expr ISNULL|NOTNULL */ +{yymsp[-1].minor.yy404 = sqlite3PExpr(pParse,yymsp[0].major,yymsp[-1].minor.yy404,0);} break; - case 197: /* expr ::= expr NOT NULL */ -{yymsp[-2].minor.yy202 = sqlite3PExpr(pParse,TK_NOTNULL,yymsp[-2].minor.yy202,0);} + case 204: /* expr ::= expr NOT NULL */ +{yymsp[-2].minor.yy404 = sqlite3PExpr(pParse,TK_NOTNULL,yymsp[-2].minor.yy404,0);} break; - case 198: /* expr ::= expr IS expr */ + case 205: /* expr ::= expr IS expr */ { - yymsp[-2].minor.yy202 = sqlite3PExpr(pParse,TK_IS,yymsp[-2].minor.yy202,yymsp[0].minor.yy202); - binaryToUnaryIfNull(pParse, yymsp[0].minor.yy202, yymsp[-2].minor.yy202, TK_ISNULL); + yymsp[-2].minor.yy404 = sqlite3PExpr(pParse,TK_IS,yymsp[-2].minor.yy404,yymsp[0].minor.yy404); + binaryToUnaryIfNull(pParse, yymsp[0].minor.yy404, yymsp[-2].minor.yy404, TK_ISNULL); } break; - case 199: /* expr ::= expr IS NOT expr */ + case 206: /* expr ::= expr IS NOT expr */ { - yymsp[-3].minor.yy202 = sqlite3PExpr(pParse,TK_ISNOT,yymsp[-3].minor.yy202,yymsp[0].minor.yy202); - binaryToUnaryIfNull(pParse, yymsp[0].minor.yy202, yymsp[-3].minor.yy202, TK_NOTNULL); + yymsp[-3].minor.yy404 = sqlite3PExpr(pParse,TK_ISNOT,yymsp[-3].minor.yy404,yymsp[0].minor.yy404); + binaryToUnaryIfNull(pParse, yymsp[0].minor.yy404, yymsp[-3].minor.yy404, TK_NOTNULL); } break; - case 200: /* expr ::= NOT expr */ - case 201: /* expr ::= BITNOT expr */ yytestcase(yyruleno==201); -{yymsp[-1].minor.yy202 = sqlite3PExpr(pParse, yymsp[-1].major, yymsp[0].minor.yy202, 0);/*A-overwrites-B*/} + case 207: /* expr ::= NOT expr */ + case 208: /* expr ::= BITNOT expr */ yytestcase(yyruleno==208); +{yymsp[-1].minor.yy404 = sqlite3PExpr(pParse, yymsp[-1].major, yymsp[0].minor.yy404, 0);/*A-overwrites-B*/} break; - case 202: /* expr ::= PLUS|MINUS expr */ + case 209: /* expr ::= PLUS|MINUS expr */ { - yymsp[-1].minor.yy202 = sqlite3PExpr(pParse, yymsp[-1].major==TK_PLUS ? TK_UPLUS : TK_UMINUS, yymsp[0].minor.yy202, 0); + yymsp[-1].minor.yy404 = sqlite3PExpr(pParse, yymsp[-1].major==TK_PLUS ? TK_UPLUS : TK_UMINUS, yymsp[0].minor.yy404, 0); /*A-overwrites-B*/ } break; - case 203: /* between_op ::= BETWEEN */ - case 206: /* in_op ::= IN */ yytestcase(yyruleno==206); -{yymsp[0].minor.yy192 = 0;} + case 210: /* between_op ::= BETWEEN */ + case 213: /* in_op ::= IN */ yytestcase(yyruleno==213); +{yymsp[0].minor.yy376 = 0;} break; - case 205: /* expr ::= expr between_op expr AND expr */ + case 212: /* expr ::= expr between_op expr AND expr */ { - ExprList *pList = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy202); - pList = sqlite3ExprListAppend(pParse,pList, yymsp[0].minor.yy202); - yymsp[-4].minor.yy202 = sqlite3PExpr(pParse, TK_BETWEEN, yymsp[-4].minor.yy202, 0); - if( yymsp[-4].minor.yy202 ){ - yymsp[-4].minor.yy202->x.pList = pList; + ExprList *pList = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy404); + pList = sqlite3ExprListAppend(pParse,pList, yymsp[0].minor.yy404); + yymsp[-4].minor.yy404 = sqlite3PExpr(pParse, TK_BETWEEN, yymsp[-4].minor.yy404, 0); + if( yymsp[-4].minor.yy404 ){ + yymsp[-4].minor.yy404->x.pList = pList; }else{ sqlite3ExprListDelete(pParse->db, pList); } - if( yymsp[-3].minor.yy192 ) yymsp[-4].minor.yy202 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy202, 0); + if( yymsp[-3].minor.yy376 ) yymsp[-4].minor.yy404 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy404, 0); } break; - case 208: /* expr ::= expr in_op LP exprlist RP */ + case 215: /* expr ::= expr in_op LP exprlist RP */ { - if( yymsp[-1].minor.yy242==0 ){ + if( yymsp[-1].minor.yy70==0 ){ /* Expressions of the form ** ** expr1 IN () @@ -158442,197 +162451,197 @@ static YYACTIONTYPE yy_reduce( ** simplify to constants 0 (false) and 1 (true), respectively, ** regardless of the value of expr1. */ - sqlite3ExprUnmapAndDelete(pParse, yymsp[-4].minor.yy202); - yymsp[-4].minor.yy202 = sqlite3Expr(pParse->db, TK_INTEGER, yymsp[-3].minor.yy192 ? "1" : "0"); - }else if( yymsp[-1].minor.yy242->nExpr==1 && sqlite3ExprIsConstant(yymsp[-1].minor.yy242->a[0].pExpr) ){ - Expr *pRHS = yymsp[-1].minor.yy242->a[0].pExpr; - yymsp[-1].minor.yy242->a[0].pExpr = 0; - sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy242); + sqlite3ExprUnmapAndDelete(pParse, yymsp[-4].minor.yy404); + yymsp[-4].minor.yy404 = sqlite3Expr(pParse->db, TK_INTEGER, yymsp[-3].minor.yy376 ? "1" : "0"); + }else if( yymsp[-1].minor.yy70->nExpr==1 && sqlite3ExprIsConstant(yymsp[-1].minor.yy70->a[0].pExpr) ){ + Expr *pRHS = yymsp[-1].minor.yy70->a[0].pExpr; + yymsp[-1].minor.yy70->a[0].pExpr = 0; + sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy70); pRHS = sqlite3PExpr(pParse, TK_UPLUS, pRHS, 0); - yymsp[-4].minor.yy202 = sqlite3PExpr(pParse, TK_EQ, yymsp[-4].minor.yy202, pRHS); - if( yymsp[-3].minor.yy192 ) yymsp[-4].minor.yy202 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy202, 0); + yymsp[-4].minor.yy404 = sqlite3PExpr(pParse, TK_EQ, yymsp[-4].minor.yy404, pRHS); + if( yymsp[-3].minor.yy376 ) yymsp[-4].minor.yy404 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy404, 0); }else{ - yymsp[-4].minor.yy202 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy202, 0); - if( yymsp[-4].minor.yy202 ){ - yymsp[-4].minor.yy202->x.pList = yymsp[-1].minor.yy242; - sqlite3ExprSetHeightAndFlags(pParse, yymsp[-4].minor.yy202); + yymsp[-4].minor.yy404 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy404, 0); + if( yymsp[-4].minor.yy404 ){ + yymsp[-4].minor.yy404->x.pList = yymsp[-1].minor.yy70; + sqlite3ExprSetHeightAndFlags(pParse, yymsp[-4].minor.yy404); }else{ - sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy242); + sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy70); } - if( yymsp[-3].minor.yy192 ) yymsp[-4].minor.yy202 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy202, 0); + if( yymsp[-3].minor.yy376 ) yymsp[-4].minor.yy404 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy404, 0); } } break; - case 209: /* expr ::= LP select RP */ + case 216: /* expr ::= LP select RP */ { - yymsp[-2].minor.yy202 = sqlite3PExpr(pParse, TK_SELECT, 0, 0); - sqlite3PExprAddSelect(pParse, yymsp[-2].minor.yy202, yymsp[-1].minor.yy539); + yymsp[-2].minor.yy404 = sqlite3PExpr(pParse, TK_SELECT, 0, 0); + sqlite3PExprAddSelect(pParse, yymsp[-2].minor.yy404, yymsp[-1].minor.yy81); } break; - case 210: /* expr ::= expr in_op LP select RP */ + case 217: /* expr ::= expr in_op LP select RP */ { - yymsp[-4].minor.yy202 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy202, 0); - sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy202, yymsp[-1].minor.yy539); - if( yymsp[-3].minor.yy192 ) yymsp[-4].minor.yy202 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy202, 0); + yymsp[-4].minor.yy404 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy404, 0); + sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy404, yymsp[-1].minor.yy81); + if( yymsp[-3].minor.yy376 ) yymsp[-4].minor.yy404 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy404, 0); } break; - case 211: /* expr ::= expr in_op nm dbnm paren_exprlist */ + case 218: /* expr ::= expr in_op nm dbnm paren_exprlist */ { SrcList *pSrc = sqlite3SrcListAppend(pParse, 0,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0); Select *pSelect = sqlite3SelectNew(pParse, 0,pSrc,0,0,0,0,0,0); - if( yymsp[0].minor.yy242 ) sqlite3SrcListFuncArgs(pParse, pSelect ? pSrc : 0, yymsp[0].minor.yy242); - yymsp[-4].minor.yy202 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy202, 0); - sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy202, pSelect); - if( yymsp[-3].minor.yy192 ) yymsp[-4].minor.yy202 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy202, 0); + if( yymsp[0].minor.yy70 ) sqlite3SrcListFuncArgs(pParse, pSelect ? pSrc : 0, yymsp[0].minor.yy70); + yymsp[-4].minor.yy404 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy404, 0); + sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy404, pSelect); + if( yymsp[-3].minor.yy376 ) yymsp[-4].minor.yy404 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy404, 0); } break; - case 212: /* expr ::= EXISTS LP select RP */ + case 219: /* expr ::= EXISTS LP select RP */ { Expr *p; - p = yymsp[-3].minor.yy202 = sqlite3PExpr(pParse, TK_EXISTS, 0, 0); - sqlite3PExprAddSelect(pParse, p, yymsp[-1].minor.yy539); + p = yymsp[-3].minor.yy404 = sqlite3PExpr(pParse, TK_EXISTS, 0, 0); + sqlite3PExprAddSelect(pParse, p, yymsp[-1].minor.yy81); } break; - case 213: /* expr ::= CASE case_operand case_exprlist case_else END */ + case 220: /* expr ::= CASE case_operand case_exprlist case_else END */ { - yymsp[-4].minor.yy202 = sqlite3PExpr(pParse, TK_CASE, yymsp[-3].minor.yy202, 0); - if( yymsp[-4].minor.yy202 ){ - yymsp[-4].minor.yy202->x.pList = yymsp[-1].minor.yy202 ? sqlite3ExprListAppend(pParse,yymsp[-2].minor.yy242,yymsp[-1].minor.yy202) : yymsp[-2].minor.yy242; - sqlite3ExprSetHeightAndFlags(pParse, yymsp[-4].minor.yy202); + yymsp[-4].minor.yy404 = sqlite3PExpr(pParse, TK_CASE, yymsp[-3].minor.yy404, 0); + if( yymsp[-4].minor.yy404 ){ + yymsp[-4].minor.yy404->x.pList = yymsp[-1].minor.yy404 ? sqlite3ExprListAppend(pParse,yymsp[-2].minor.yy70,yymsp[-1].minor.yy404) : yymsp[-2].minor.yy70; + sqlite3ExprSetHeightAndFlags(pParse, yymsp[-4].minor.yy404); }else{ - sqlite3ExprListDelete(pParse->db, yymsp[-2].minor.yy242); - sqlite3ExprDelete(pParse->db, yymsp[-1].minor.yy202); + sqlite3ExprListDelete(pParse->db, yymsp[-2].minor.yy70); + sqlite3ExprDelete(pParse->db, yymsp[-1].minor.yy404); } } break; - case 214: /* case_exprlist ::= case_exprlist WHEN expr THEN expr */ + case 221: /* case_exprlist ::= case_exprlist WHEN expr THEN expr */ { - yymsp[-4].minor.yy242 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy242, yymsp[-2].minor.yy202); - yymsp[-4].minor.yy242 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy242, yymsp[0].minor.yy202); + yymsp[-4].minor.yy70 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy70, yymsp[-2].minor.yy404); + yymsp[-4].minor.yy70 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy70, yymsp[0].minor.yy404); } break; - case 215: /* case_exprlist ::= WHEN expr THEN expr */ + case 222: /* case_exprlist ::= WHEN expr THEN expr */ { - yymsp[-3].minor.yy242 = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy202); - yymsp[-3].minor.yy242 = sqlite3ExprListAppend(pParse,yymsp[-3].minor.yy242, yymsp[0].minor.yy202); + yymsp[-3].minor.yy70 = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy404); + yymsp[-3].minor.yy70 = sqlite3ExprListAppend(pParse,yymsp[-3].minor.yy70, yymsp[0].minor.yy404); } break; - case 218: /* case_operand ::= expr */ -{yymsp[0].minor.yy202 = yymsp[0].minor.yy202; /*A-overwrites-X*/} + case 225: /* case_operand ::= expr */ +{yymsp[0].minor.yy404 = yymsp[0].minor.yy404; /*A-overwrites-X*/} break; - case 221: /* nexprlist ::= nexprlist COMMA expr */ -{yymsp[-2].minor.yy242 = sqlite3ExprListAppend(pParse,yymsp[-2].minor.yy242,yymsp[0].minor.yy202);} + case 228: /* nexprlist ::= nexprlist COMMA expr */ +{yymsp[-2].minor.yy70 = sqlite3ExprListAppend(pParse,yymsp[-2].minor.yy70,yymsp[0].minor.yy404);} break; - case 222: /* nexprlist ::= expr */ -{yymsp[0].minor.yy242 = sqlite3ExprListAppend(pParse,0,yymsp[0].minor.yy202); /*A-overwrites-Y*/} + case 229: /* nexprlist ::= expr */ +{yymsp[0].minor.yy70 = sqlite3ExprListAppend(pParse,0,yymsp[0].minor.yy404); /*A-overwrites-Y*/} break; - case 224: /* paren_exprlist ::= LP exprlist RP */ - case 229: /* eidlist_opt ::= LP eidlist RP */ yytestcase(yyruleno==229); -{yymsp[-2].minor.yy242 = yymsp[-1].minor.yy242;} + case 231: /* paren_exprlist ::= LP exprlist RP */ + case 236: /* eidlist_opt ::= LP eidlist RP */ yytestcase(yyruleno==236); +{yymsp[-2].minor.yy70 = yymsp[-1].minor.yy70;} break; - case 225: /* cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */ + case 232: /* cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */ { sqlite3CreateIndex(pParse, &yymsp[-7].minor.yy0, &yymsp[-6].minor.yy0, - sqlite3SrcListAppend(pParse,0,&yymsp[-4].minor.yy0,0), yymsp[-2].minor.yy242, yymsp[-10].minor.yy192, - &yymsp[-11].minor.yy0, yymsp[0].minor.yy202, SQLITE_SO_ASC, yymsp[-8].minor.yy192, SQLITE_IDXTYPE_APPDEF); + sqlite3SrcListAppend(pParse,0,&yymsp[-4].minor.yy0,0), yymsp[-2].minor.yy70, yymsp[-10].minor.yy376, + &yymsp[-11].minor.yy0, yymsp[0].minor.yy404, SQLITE_SO_ASC, yymsp[-8].minor.yy376, SQLITE_IDXTYPE_APPDEF); if( IN_RENAME_OBJECT && pParse->pNewIndex ){ sqlite3RenameTokenMap(pParse, pParse->pNewIndex->zName, &yymsp[-4].minor.yy0); } } break; - case 226: /* uniqueflag ::= UNIQUE */ - case 268: /* raisetype ::= ABORT */ yytestcase(yyruleno==268); -{yymsp[0].minor.yy192 = OE_Abort;} + case 233: /* uniqueflag ::= UNIQUE */ + case 275: /* raisetype ::= ABORT */ yytestcase(yyruleno==275); +{yymsp[0].minor.yy376 = OE_Abort;} break; - case 227: /* uniqueflag ::= */ -{yymsp[1].minor.yy192 = OE_None;} + case 234: /* uniqueflag ::= */ +{yymsp[1].minor.yy376 = OE_None;} break; - case 230: /* eidlist ::= eidlist COMMA nm collate sortorder */ + case 237: /* eidlist ::= eidlist COMMA nm collate sortorder */ { - yymsp[-4].minor.yy242 = parserAddExprIdListTerm(pParse, yymsp[-4].minor.yy242, &yymsp[-2].minor.yy0, yymsp[-1].minor.yy192, yymsp[0].minor.yy192); + yymsp[-4].minor.yy70 = parserAddExprIdListTerm(pParse, yymsp[-4].minor.yy70, &yymsp[-2].minor.yy0, yymsp[-1].minor.yy376, yymsp[0].minor.yy376); } break; - case 231: /* eidlist ::= nm collate sortorder */ + case 238: /* eidlist ::= nm collate sortorder */ { - yymsp[-2].minor.yy242 = parserAddExprIdListTerm(pParse, 0, &yymsp[-2].minor.yy0, yymsp[-1].minor.yy192, yymsp[0].minor.yy192); /*A-overwrites-Y*/ + yymsp[-2].minor.yy70 = parserAddExprIdListTerm(pParse, 0, &yymsp[-2].minor.yy0, yymsp[-1].minor.yy376, yymsp[0].minor.yy376); /*A-overwrites-Y*/ } break; - case 234: /* cmd ::= DROP INDEX ifexists fullname */ -{sqlite3DropIndex(pParse, yymsp[0].minor.yy47, yymsp[-1].minor.yy192);} + case 241: /* cmd ::= DROP INDEX ifexists fullname */ +{sqlite3DropIndex(pParse, yymsp[0].minor.yy153, yymsp[-1].minor.yy376);} break; - case 235: /* cmd ::= VACUUM vinto */ -{sqlite3Vacuum(pParse,0,yymsp[0].minor.yy202);} + case 242: /* cmd ::= VACUUM vinto */ +{sqlite3Vacuum(pParse,0,yymsp[0].minor.yy404);} break; - case 236: /* cmd ::= VACUUM nm vinto */ -{sqlite3Vacuum(pParse,&yymsp[-1].minor.yy0,yymsp[0].minor.yy202);} + case 243: /* cmd ::= VACUUM nm vinto */ +{sqlite3Vacuum(pParse,&yymsp[-1].minor.yy0,yymsp[0].minor.yy404);} break; - case 239: /* cmd ::= PRAGMA nm dbnm */ + case 246: /* cmd ::= PRAGMA nm dbnm */ {sqlite3Pragma(pParse,&yymsp[-1].minor.yy0,&yymsp[0].minor.yy0,0,0);} break; - case 240: /* cmd ::= PRAGMA nm dbnm EQ nmnum */ + case 247: /* cmd ::= PRAGMA nm dbnm EQ nmnum */ {sqlite3Pragma(pParse,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0,0);} break; - case 241: /* cmd ::= PRAGMA nm dbnm LP nmnum RP */ + case 248: /* cmd ::= PRAGMA nm dbnm LP nmnum RP */ {sqlite3Pragma(pParse,&yymsp[-4].minor.yy0,&yymsp[-3].minor.yy0,&yymsp[-1].minor.yy0,0);} break; - case 242: /* cmd ::= PRAGMA nm dbnm EQ minus_num */ + case 249: /* cmd ::= PRAGMA nm dbnm EQ minus_num */ {sqlite3Pragma(pParse,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0,1);} break; - case 243: /* cmd ::= PRAGMA nm dbnm LP minus_num RP */ + case 250: /* cmd ::= PRAGMA nm dbnm LP minus_num RP */ {sqlite3Pragma(pParse,&yymsp[-4].minor.yy0,&yymsp[-3].minor.yy0,&yymsp[-1].minor.yy0,1);} break; - case 246: /* cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */ + case 253: /* cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */ { Token all; all.z = yymsp[-3].minor.yy0.z; all.n = (int)(yymsp[0].minor.yy0.z - yymsp[-3].minor.yy0.z) + yymsp[0].minor.yy0.n; - sqlite3FinishTrigger(pParse, yymsp[-1].minor.yy447, &all); + sqlite3FinishTrigger(pParse, yymsp[-1].minor.yy157, &all); } break; - case 247: /* trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */ + case 254: /* trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */ { - sqlite3BeginTrigger(pParse, &yymsp[-7].minor.yy0, &yymsp[-6].minor.yy0, yymsp[-5].minor.yy192, yymsp[-4].minor.yy230.a, yymsp[-4].minor.yy230.b, yymsp[-2].minor.yy47, yymsp[0].minor.yy202, yymsp[-10].minor.yy192, yymsp[-8].minor.yy192); + sqlite3BeginTrigger(pParse, &yymsp[-7].minor.yy0, &yymsp[-6].minor.yy0, yymsp[-5].minor.yy376, yymsp[-4].minor.yy262.a, yymsp[-4].minor.yy262.b, yymsp[-2].minor.yy153, yymsp[0].minor.yy404, yymsp[-10].minor.yy376, yymsp[-8].minor.yy376); yymsp[-10].minor.yy0 = (yymsp[-6].minor.yy0.n==0?yymsp[-7].minor.yy0:yymsp[-6].minor.yy0); /*A-overwrites-T*/ } break; - case 248: /* trigger_time ::= BEFORE|AFTER */ -{ yymsp[0].minor.yy192 = yymsp[0].major; /*A-overwrites-X*/ } + case 255: /* trigger_time ::= BEFORE|AFTER */ +{ yymsp[0].minor.yy376 = yymsp[0].major; /*A-overwrites-X*/ } break; - case 249: /* trigger_time ::= INSTEAD OF */ -{ yymsp[-1].minor.yy192 = TK_INSTEAD;} + case 256: /* trigger_time ::= INSTEAD OF */ +{ yymsp[-1].minor.yy376 = TK_INSTEAD;} break; - case 250: /* trigger_time ::= */ -{ yymsp[1].minor.yy192 = TK_BEFORE; } + case 257: /* trigger_time ::= */ +{ yymsp[1].minor.yy376 = TK_BEFORE; } break; - case 251: /* trigger_event ::= DELETE|INSERT */ - case 252: /* trigger_event ::= UPDATE */ yytestcase(yyruleno==252); -{yymsp[0].minor.yy230.a = yymsp[0].major; /*A-overwrites-X*/ yymsp[0].minor.yy230.b = 0;} + case 258: /* trigger_event ::= DELETE|INSERT */ + case 259: /* trigger_event ::= UPDATE */ yytestcase(yyruleno==259); +{yymsp[0].minor.yy262.a = yymsp[0].major; /*A-overwrites-X*/ yymsp[0].minor.yy262.b = 0;} break; - case 253: /* trigger_event ::= UPDATE OF idlist */ -{yymsp[-2].minor.yy230.a = TK_UPDATE; yymsp[-2].minor.yy230.b = yymsp[0].minor.yy600;} + case 260: /* trigger_event ::= UPDATE OF idlist */ +{yymsp[-2].minor.yy262.a = TK_UPDATE; yymsp[-2].minor.yy262.b = yymsp[0].minor.yy436;} break; - case 254: /* when_clause ::= */ - case 273: /* key_opt ::= */ yytestcase(yyruleno==273); -{ yymsp[1].minor.yy202 = 0; } + case 261: /* when_clause ::= */ + case 280: /* key_opt ::= */ yytestcase(yyruleno==280); +{ yymsp[1].minor.yy404 = 0; } break; - case 255: /* when_clause ::= WHEN expr */ - case 274: /* key_opt ::= KEY expr */ yytestcase(yyruleno==274); -{ yymsp[-1].minor.yy202 = yymsp[0].minor.yy202; } + case 262: /* when_clause ::= WHEN expr */ + case 281: /* key_opt ::= KEY expr */ yytestcase(yyruleno==281); +{ yymsp[-1].minor.yy404 = yymsp[0].minor.yy404; } break; - case 256: /* trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */ + case 263: /* trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */ { - assert( yymsp[-2].minor.yy447!=0 ); - yymsp[-2].minor.yy447->pLast->pNext = yymsp[-1].minor.yy447; - yymsp[-2].minor.yy447->pLast = yymsp[-1].minor.yy447; + assert( yymsp[-2].minor.yy157!=0 ); + yymsp[-2].minor.yy157->pLast->pNext = yymsp[-1].minor.yy157; + yymsp[-2].minor.yy157->pLast = yymsp[-1].minor.yy157; } break; - case 257: /* trigger_cmd_list ::= trigger_cmd SEMI */ + case 264: /* trigger_cmd_list ::= trigger_cmd SEMI */ { - assert( yymsp[-1].minor.yy447!=0 ); - yymsp[-1].minor.yy447->pLast = yymsp[-1].minor.yy447; + assert( yymsp[-1].minor.yy157!=0 ); + yymsp[-1].minor.yy157->pLast = yymsp[-1].minor.yy157; } break; - case 258: /* trnm ::= nm DOT nm */ + case 265: /* trnm ::= nm DOT nm */ { yymsp[-2].minor.yy0 = yymsp[0].minor.yy0; sqlite3ErrorMsg(pParse, @@ -158640,344 +162649,368 @@ static YYACTIONTYPE yy_reduce( "statements within triggers"); } break; - case 259: /* tridxby ::= INDEXED BY nm */ + case 266: /* tridxby ::= INDEXED BY nm */ { sqlite3ErrorMsg(pParse, "the INDEXED BY clause is not allowed on UPDATE or DELETE statements " "within triggers"); } break; - case 260: /* tridxby ::= NOT INDEXED */ + case 267: /* tridxby ::= NOT INDEXED */ { sqlite3ErrorMsg(pParse, "the NOT INDEXED clause is not allowed on UPDATE or DELETE statements " "within triggers"); } break; - case 261: /* trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */ -{yylhsminor.yy447 = sqlite3TriggerUpdateStep(pParse, &yymsp[-6].minor.yy0, yymsp[-2].minor.yy47, yymsp[-3].minor.yy242, yymsp[-1].minor.yy202, yymsp[-7].minor.yy192, yymsp[-8].minor.yy0.z, yymsp[0].minor.yy436);} - yymsp[-8].minor.yy447 = yylhsminor.yy447; + case 268: /* trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */ +{yylhsminor.yy157 = sqlite3TriggerUpdateStep(pParse, &yymsp[-6].minor.yy0, yymsp[-2].minor.yy153, yymsp[-3].minor.yy70, yymsp[-1].minor.yy404, yymsp[-7].minor.yy376, yymsp[-8].minor.yy0.z, yymsp[0].minor.yy504);} + yymsp[-8].minor.yy157 = yylhsminor.yy157; break; - case 262: /* trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */ + case 269: /* trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */ { - yylhsminor.yy447 = sqlite3TriggerInsertStep(pParse,&yymsp[-4].minor.yy0,yymsp[-3].minor.yy600,yymsp[-2].minor.yy539,yymsp[-6].minor.yy192,yymsp[-1].minor.yy318,yymsp[-7].minor.yy436,yymsp[0].minor.yy436);/*yylhsminor.yy447-overwrites-yymsp[-6].minor.yy192*/ + yylhsminor.yy157 = sqlite3TriggerInsertStep(pParse,&yymsp[-4].minor.yy0,yymsp[-3].minor.yy436,yymsp[-2].minor.yy81,yymsp[-6].minor.yy376,yymsp[-1].minor.yy190,yymsp[-7].minor.yy504,yymsp[0].minor.yy504);/*yylhsminor.yy157-overwrites-yymsp[-6].minor.yy376*/ } - yymsp[-7].minor.yy447 = yylhsminor.yy447; + yymsp[-7].minor.yy157 = yylhsminor.yy157; break; - case 263: /* trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */ -{yylhsminor.yy447 = sqlite3TriggerDeleteStep(pParse, &yymsp[-3].minor.yy0, yymsp[-1].minor.yy202, yymsp[-5].minor.yy0.z, yymsp[0].minor.yy436);} - yymsp[-5].minor.yy447 = yylhsminor.yy447; + case 270: /* trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */ +{yylhsminor.yy157 = sqlite3TriggerDeleteStep(pParse, &yymsp[-3].minor.yy0, yymsp[-1].minor.yy404, yymsp[-5].minor.yy0.z, yymsp[0].minor.yy504);} + yymsp[-5].minor.yy157 = yylhsminor.yy157; break; - case 264: /* trigger_cmd ::= scanpt select scanpt */ -{yylhsminor.yy447 = sqlite3TriggerSelectStep(pParse->db, yymsp[-1].minor.yy539, yymsp[-2].minor.yy436, yymsp[0].minor.yy436); /*yylhsminor.yy447-overwrites-yymsp[-1].minor.yy539*/} - yymsp[-2].minor.yy447 = yylhsminor.yy447; + case 271: /* trigger_cmd ::= scanpt select scanpt */ +{yylhsminor.yy157 = sqlite3TriggerSelectStep(pParse->db, yymsp[-1].minor.yy81, yymsp[-2].minor.yy504, yymsp[0].minor.yy504); /*yylhsminor.yy157-overwrites-yymsp[-1].minor.yy81*/} + yymsp[-2].minor.yy157 = yylhsminor.yy157; break; - case 265: /* expr ::= RAISE LP IGNORE RP */ + case 272: /* expr ::= RAISE LP IGNORE RP */ { - yymsp[-3].minor.yy202 = sqlite3PExpr(pParse, TK_RAISE, 0, 0); - if( yymsp[-3].minor.yy202 ){ - yymsp[-3].minor.yy202->affExpr = OE_Ignore; + yymsp[-3].minor.yy404 = sqlite3PExpr(pParse, TK_RAISE, 0, 0); + if( yymsp[-3].minor.yy404 ){ + yymsp[-3].minor.yy404->affExpr = OE_Ignore; } } break; - case 266: /* expr ::= RAISE LP raisetype COMMA nm RP */ + case 273: /* expr ::= RAISE LP raisetype COMMA nm RP */ { - yymsp[-5].minor.yy202 = sqlite3ExprAlloc(pParse->db, TK_RAISE, &yymsp[-1].minor.yy0, 1); - if( yymsp[-5].minor.yy202 ) { - yymsp[-5].minor.yy202->affExpr = (char)yymsp[-3].minor.yy192; + yymsp[-5].minor.yy404 = sqlite3ExprAlloc(pParse->db, TK_RAISE, &yymsp[-1].minor.yy0, 1); + if( yymsp[-5].minor.yy404 ) { + yymsp[-5].minor.yy404->affExpr = (char)yymsp[-3].minor.yy376; } } break; - case 267: /* raisetype ::= ROLLBACK */ -{yymsp[0].minor.yy192 = OE_Rollback;} + case 274: /* raisetype ::= ROLLBACK */ +{yymsp[0].minor.yy376 = OE_Rollback;} break; - case 269: /* raisetype ::= FAIL */ -{yymsp[0].minor.yy192 = OE_Fail;} + case 276: /* raisetype ::= FAIL */ +{yymsp[0].minor.yy376 = OE_Fail;} break; - case 270: /* cmd ::= DROP TRIGGER ifexists fullname */ + case 277: /* cmd ::= DROP TRIGGER ifexists fullname */ { - sqlite3DropTrigger(pParse,yymsp[0].minor.yy47,yymsp[-1].minor.yy192); + sqlite3DropTrigger(pParse,yymsp[0].minor.yy153,yymsp[-1].minor.yy376); } break; - case 271: /* cmd ::= ATTACH database_kw_opt expr AS expr key_opt */ + case 278: /* cmd ::= ATTACH database_kw_opt expr AS expr key_opt */ { - sqlite3Attach(pParse, yymsp[-3].minor.yy202, yymsp[-1].minor.yy202, yymsp[0].minor.yy202); + sqlite3Attach(pParse, yymsp[-3].minor.yy404, yymsp[-1].minor.yy404, yymsp[0].minor.yy404); } break; - case 272: /* cmd ::= DETACH database_kw_opt expr */ + case 279: /* cmd ::= DETACH database_kw_opt expr */ { - sqlite3Detach(pParse, yymsp[0].minor.yy202); + sqlite3Detach(pParse, yymsp[0].minor.yy404); } break; - case 275: /* cmd ::= REINDEX */ + case 282: /* cmd ::= REINDEX */ {sqlite3Reindex(pParse, 0, 0);} break; - case 276: /* cmd ::= REINDEX nm dbnm */ + case 283: /* cmd ::= REINDEX nm dbnm */ {sqlite3Reindex(pParse, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0);} break; - case 277: /* cmd ::= ANALYZE */ + case 284: /* cmd ::= ANALYZE */ {sqlite3Analyze(pParse, 0, 0);} break; - case 278: /* cmd ::= ANALYZE nm dbnm */ + case 285: /* cmd ::= ANALYZE nm dbnm */ {sqlite3Analyze(pParse, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0);} break; - case 279: /* cmd ::= ALTER TABLE fullname RENAME TO nm */ + case 286: /* cmd ::= ALTER TABLE fullname RENAME TO nm */ { - sqlite3AlterRenameTable(pParse,yymsp[-3].minor.yy47,&yymsp[0].minor.yy0); + sqlite3AlterRenameTable(pParse,yymsp[-3].minor.yy153,&yymsp[0].minor.yy0); } break; - case 280: /* cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */ + case 287: /* cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */ { yymsp[-1].minor.yy0.n = (int)(pParse->sLastToken.z-yymsp[-1].minor.yy0.z) + pParse->sLastToken.n; sqlite3AlterFinishAddColumn(pParse, &yymsp[-1].minor.yy0); } break; - case 281: /* add_column_fullname ::= fullname */ + case 288: /* cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm */ +{ + sqlite3AlterDropColumn(pParse, yymsp[-3].minor.yy153, &yymsp[0].minor.yy0); +} + break; + case 289: /* add_column_fullname ::= fullname */ { disableLookaside(pParse); - sqlite3AlterBeginAddColumn(pParse, yymsp[0].minor.yy47); + sqlite3AlterBeginAddColumn(pParse, yymsp[0].minor.yy153); } break; - case 282: /* cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */ + case 290: /* cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */ { - sqlite3AlterRenameColumn(pParse, yymsp[-5].minor.yy47, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); + sqlite3AlterRenameColumn(pParse, yymsp[-5].minor.yy153, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); } break; - case 283: /* cmd ::= create_vtab */ + case 291: /* cmd ::= create_vtab */ {sqlite3VtabFinishParse(pParse,0);} break; - case 284: /* cmd ::= create_vtab LP vtabarglist RP */ + case 292: /* cmd ::= create_vtab LP vtabarglist RP */ {sqlite3VtabFinishParse(pParse,&yymsp[0].minor.yy0);} break; - case 285: /* create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */ + case 293: /* create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */ { - sqlite3VtabBeginParse(pParse, &yymsp[-3].minor.yy0, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, yymsp[-4].minor.yy192); + sqlite3VtabBeginParse(pParse, &yymsp[-3].minor.yy0, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, yymsp[-4].minor.yy376); } break; - case 286: /* vtabarg ::= */ + case 294: /* vtabarg ::= */ {sqlite3VtabArgInit(pParse);} break; - case 287: /* vtabargtoken ::= ANY */ - case 288: /* vtabargtoken ::= lp anylist RP */ yytestcase(yyruleno==288); - case 289: /* lp ::= LP */ yytestcase(yyruleno==289); + case 295: /* vtabargtoken ::= ANY */ + case 296: /* vtabargtoken ::= lp anylist RP */ yytestcase(yyruleno==296); + case 297: /* lp ::= LP */ yytestcase(yyruleno==297); {sqlite3VtabArgExtend(pParse,&yymsp[0].minor.yy0);} break; - case 290: /* with ::= WITH wqlist */ - case 291: /* with ::= WITH RECURSIVE wqlist */ yytestcase(yyruleno==291); -{ sqlite3WithPush(pParse, yymsp[0].minor.yy131, 1); } + case 298: /* with ::= WITH wqlist */ + case 299: /* with ::= WITH RECURSIVE wqlist */ yytestcase(yyruleno==299); +{ sqlite3WithPush(pParse, yymsp[0].minor.yy103, 1); } + break; + case 300: /* wqas ::= AS */ +{yymsp[0].minor.yy552 = M10d_Any;} + break; + case 301: /* wqas ::= AS MATERIALIZED */ +{yymsp[-1].minor.yy552 = M10d_Yes;} + break; + case 302: /* wqas ::= AS NOT MATERIALIZED */ +{yymsp[-2].minor.yy552 = M10d_No;} + break; + case 303: /* wqitem ::= nm eidlist_opt wqas LP select RP */ +{ + yymsp[-5].minor.yy329 = sqlite3CteNew(pParse, &yymsp[-5].minor.yy0, yymsp[-4].minor.yy70, yymsp[-1].minor.yy81, yymsp[-3].minor.yy552); /*A-overwrites-X*/ +} break; - case 292: /* wqlist ::= nm eidlist_opt AS LP select RP */ + case 304: /* wqlist ::= wqitem */ { - yymsp[-5].minor.yy131 = sqlite3WithAdd(pParse, 0, &yymsp[-5].minor.yy0, yymsp[-4].minor.yy242, yymsp[-1].minor.yy539); /*A-overwrites-X*/ + yymsp[0].minor.yy103 = sqlite3WithAdd(pParse, 0, yymsp[0].minor.yy329); /*A-overwrites-X*/ } break; - case 293: /* wqlist ::= wqlist COMMA nm eidlist_opt AS LP select RP */ + case 305: /* wqlist ::= wqlist COMMA wqitem */ { - yymsp[-7].minor.yy131 = sqlite3WithAdd(pParse, yymsp[-7].minor.yy131, &yymsp[-5].minor.yy0, yymsp[-4].minor.yy242, yymsp[-1].minor.yy539); + yymsp[-2].minor.yy103 = sqlite3WithAdd(pParse, yymsp[-2].minor.yy103, yymsp[0].minor.yy329); } break; - case 294: /* windowdefn_list ::= windowdefn */ -{ yylhsminor.yy303 = yymsp[0].minor.yy303; } - yymsp[0].minor.yy303 = yylhsminor.yy303; + case 306: /* windowdefn_list ::= windowdefn */ +{ yylhsminor.yy49 = yymsp[0].minor.yy49; } + yymsp[0].minor.yy49 = yylhsminor.yy49; break; - case 295: /* windowdefn_list ::= windowdefn_list COMMA windowdefn */ + case 307: /* windowdefn_list ::= windowdefn_list COMMA windowdefn */ { - assert( yymsp[0].minor.yy303!=0 ); - sqlite3WindowChain(pParse, yymsp[0].minor.yy303, yymsp[-2].minor.yy303); - yymsp[0].minor.yy303->pNextWin = yymsp[-2].minor.yy303; - yylhsminor.yy303 = yymsp[0].minor.yy303; + assert( yymsp[0].minor.yy49!=0 ); + sqlite3WindowChain(pParse, yymsp[0].minor.yy49, yymsp[-2].minor.yy49); + yymsp[0].minor.yy49->pNextWin = yymsp[-2].minor.yy49; + yylhsminor.yy49 = yymsp[0].minor.yy49; } - yymsp[-2].minor.yy303 = yylhsminor.yy303; + yymsp[-2].minor.yy49 = yylhsminor.yy49; break; - case 296: /* windowdefn ::= nm AS LP window RP */ + case 308: /* windowdefn ::= nm AS LP window RP */ { - if( ALWAYS(yymsp[-1].minor.yy303) ){ - yymsp[-1].minor.yy303->zName = sqlite3DbStrNDup(pParse->db, yymsp[-4].minor.yy0.z, yymsp[-4].minor.yy0.n); + if( ALWAYS(yymsp[-1].minor.yy49) ){ + yymsp[-1].minor.yy49->zName = sqlite3DbStrNDup(pParse->db, yymsp[-4].minor.yy0.z, yymsp[-4].minor.yy0.n); } - yylhsminor.yy303 = yymsp[-1].minor.yy303; + yylhsminor.yy49 = yymsp[-1].minor.yy49; } - yymsp[-4].minor.yy303 = yylhsminor.yy303; + yymsp[-4].minor.yy49 = yylhsminor.yy49; break; - case 297: /* window ::= PARTITION BY nexprlist orderby_opt frame_opt */ + case 309: /* window ::= PARTITION BY nexprlist orderby_opt frame_opt */ { - yymsp[-4].minor.yy303 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy303, yymsp[-2].minor.yy242, yymsp[-1].minor.yy242, 0); + yymsp[-4].minor.yy49 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy49, yymsp[-2].minor.yy70, yymsp[-1].minor.yy70, 0); } break; - case 298: /* window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */ + case 310: /* window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */ { - yylhsminor.yy303 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy303, yymsp[-2].minor.yy242, yymsp[-1].minor.yy242, &yymsp[-5].minor.yy0); + yylhsminor.yy49 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy49, yymsp[-2].minor.yy70, yymsp[-1].minor.yy70, &yymsp[-5].minor.yy0); } - yymsp[-5].minor.yy303 = yylhsminor.yy303; + yymsp[-5].minor.yy49 = yylhsminor.yy49; break; - case 299: /* window ::= ORDER BY sortlist frame_opt */ + case 311: /* window ::= ORDER BY sortlist frame_opt */ { - yymsp[-3].minor.yy303 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy303, 0, yymsp[-1].minor.yy242, 0); + yymsp[-3].minor.yy49 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy49, 0, yymsp[-1].minor.yy70, 0); } break; - case 300: /* window ::= nm ORDER BY sortlist frame_opt */ + case 312: /* window ::= nm ORDER BY sortlist frame_opt */ { - yylhsminor.yy303 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy303, 0, yymsp[-1].minor.yy242, &yymsp[-4].minor.yy0); + yylhsminor.yy49 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy49, 0, yymsp[-1].minor.yy70, &yymsp[-4].minor.yy0); } - yymsp[-4].minor.yy303 = yylhsminor.yy303; + yymsp[-4].minor.yy49 = yylhsminor.yy49; break; - case 301: /* window ::= frame_opt */ - case 320: /* filter_over ::= over_clause */ yytestcase(yyruleno==320); + case 313: /* window ::= frame_opt */ + case 332: /* filter_over ::= over_clause */ yytestcase(yyruleno==332); { - yylhsminor.yy303 = yymsp[0].minor.yy303; + yylhsminor.yy49 = yymsp[0].minor.yy49; } - yymsp[0].minor.yy303 = yylhsminor.yy303; + yymsp[0].minor.yy49 = yylhsminor.yy49; break; - case 302: /* window ::= nm frame_opt */ + case 314: /* window ::= nm frame_opt */ { - yylhsminor.yy303 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy303, 0, 0, &yymsp[-1].minor.yy0); + yylhsminor.yy49 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy49, 0, 0, &yymsp[-1].minor.yy0); } - yymsp[-1].minor.yy303 = yylhsminor.yy303; + yymsp[-1].minor.yy49 = yylhsminor.yy49; break; - case 303: /* frame_opt ::= */ + case 315: /* frame_opt ::= */ { - yymsp[1].minor.yy303 = sqlite3WindowAlloc(pParse, 0, TK_UNBOUNDED, 0, TK_CURRENT, 0, 0); + yymsp[1].minor.yy49 = sqlite3WindowAlloc(pParse, 0, TK_UNBOUNDED, 0, TK_CURRENT, 0, 0); } break; - case 304: /* frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */ + case 316: /* frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */ { - yylhsminor.yy303 = sqlite3WindowAlloc(pParse, yymsp[-2].minor.yy192, yymsp[-1].minor.yy77.eType, yymsp[-1].minor.yy77.pExpr, TK_CURRENT, 0, yymsp[0].minor.yy58); + yylhsminor.yy49 = sqlite3WindowAlloc(pParse, yymsp[-2].minor.yy376, yymsp[-1].minor.yy117.eType, yymsp[-1].minor.yy117.pExpr, TK_CURRENT, 0, yymsp[0].minor.yy552); } - yymsp[-2].minor.yy303 = yylhsminor.yy303; + yymsp[-2].minor.yy49 = yylhsminor.yy49; break; - case 305: /* frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */ + case 317: /* frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */ { - yylhsminor.yy303 = sqlite3WindowAlloc(pParse, yymsp[-5].minor.yy192, yymsp[-3].minor.yy77.eType, yymsp[-3].minor.yy77.pExpr, yymsp[-1].minor.yy77.eType, yymsp[-1].minor.yy77.pExpr, yymsp[0].minor.yy58); + yylhsminor.yy49 = sqlite3WindowAlloc(pParse, yymsp[-5].minor.yy376, yymsp[-3].minor.yy117.eType, yymsp[-3].minor.yy117.pExpr, yymsp[-1].minor.yy117.eType, yymsp[-1].minor.yy117.pExpr, yymsp[0].minor.yy552); } - yymsp[-5].minor.yy303 = yylhsminor.yy303; + yymsp[-5].minor.yy49 = yylhsminor.yy49; break; - case 307: /* frame_bound_s ::= frame_bound */ - case 309: /* frame_bound_e ::= frame_bound */ yytestcase(yyruleno==309); -{yylhsminor.yy77 = yymsp[0].minor.yy77;} - yymsp[0].minor.yy77 = yylhsminor.yy77; + case 319: /* frame_bound_s ::= frame_bound */ + case 321: /* frame_bound_e ::= frame_bound */ yytestcase(yyruleno==321); +{yylhsminor.yy117 = yymsp[0].minor.yy117;} + yymsp[0].minor.yy117 = yylhsminor.yy117; break; - case 308: /* frame_bound_s ::= UNBOUNDED PRECEDING */ - case 310: /* frame_bound_e ::= UNBOUNDED FOLLOWING */ yytestcase(yyruleno==310); - case 312: /* frame_bound ::= CURRENT ROW */ yytestcase(yyruleno==312); -{yylhsminor.yy77.eType = yymsp[-1].major; yylhsminor.yy77.pExpr = 0;} - yymsp[-1].minor.yy77 = yylhsminor.yy77; + case 320: /* frame_bound_s ::= UNBOUNDED PRECEDING */ + case 322: /* frame_bound_e ::= UNBOUNDED FOLLOWING */ yytestcase(yyruleno==322); + case 324: /* frame_bound ::= CURRENT ROW */ yytestcase(yyruleno==324); +{yylhsminor.yy117.eType = yymsp[-1].major; yylhsminor.yy117.pExpr = 0;} + yymsp[-1].minor.yy117 = yylhsminor.yy117; break; - case 311: /* frame_bound ::= expr PRECEDING|FOLLOWING */ -{yylhsminor.yy77.eType = yymsp[0].major; yylhsminor.yy77.pExpr = yymsp[-1].minor.yy202;} - yymsp[-1].minor.yy77 = yylhsminor.yy77; + case 323: /* frame_bound ::= expr PRECEDING|FOLLOWING */ +{yylhsminor.yy117.eType = yymsp[0].major; yylhsminor.yy117.pExpr = yymsp[-1].minor.yy404;} + yymsp[-1].minor.yy117 = yylhsminor.yy117; break; - case 313: /* frame_exclude_opt ::= */ -{yymsp[1].minor.yy58 = 0;} + case 325: /* frame_exclude_opt ::= */ +{yymsp[1].minor.yy552 = 0;} break; - case 314: /* frame_exclude_opt ::= EXCLUDE frame_exclude */ -{yymsp[-1].minor.yy58 = yymsp[0].minor.yy58;} + case 326: /* frame_exclude_opt ::= EXCLUDE frame_exclude */ +{yymsp[-1].minor.yy552 = yymsp[0].minor.yy552;} break; - case 315: /* frame_exclude ::= NO OTHERS */ - case 316: /* frame_exclude ::= CURRENT ROW */ yytestcase(yyruleno==316); -{yymsp[-1].minor.yy58 = yymsp[-1].major; /*A-overwrites-X*/} + case 327: /* frame_exclude ::= NO OTHERS */ + case 328: /* frame_exclude ::= CURRENT ROW */ yytestcase(yyruleno==328); +{yymsp[-1].minor.yy552 = yymsp[-1].major; /*A-overwrites-X*/} break; - case 317: /* frame_exclude ::= GROUP|TIES */ -{yymsp[0].minor.yy58 = yymsp[0].major; /*A-overwrites-X*/} + case 329: /* frame_exclude ::= GROUP|TIES */ +{yymsp[0].minor.yy552 = yymsp[0].major; /*A-overwrites-X*/} break; - case 318: /* window_clause ::= WINDOW windowdefn_list */ -{ yymsp[-1].minor.yy303 = yymsp[0].minor.yy303; } + case 330: /* window_clause ::= WINDOW windowdefn_list */ +{ yymsp[-1].minor.yy49 = yymsp[0].minor.yy49; } break; - case 319: /* filter_over ::= filter_clause over_clause */ + case 331: /* filter_over ::= filter_clause over_clause */ { - yymsp[0].minor.yy303->pFilter = yymsp[-1].minor.yy202; - yylhsminor.yy303 = yymsp[0].minor.yy303; + if( yymsp[0].minor.yy49 ){ + yymsp[0].minor.yy49->pFilter = yymsp[-1].minor.yy404; + }else{ + sqlite3ExprDelete(pParse->db, yymsp[-1].minor.yy404); + } + yylhsminor.yy49 = yymsp[0].minor.yy49; } - yymsp[-1].minor.yy303 = yylhsminor.yy303; + yymsp[-1].minor.yy49 = yylhsminor.yy49; break; - case 321: /* filter_over ::= filter_clause */ + case 333: /* filter_over ::= filter_clause */ { - yylhsminor.yy303 = (Window*)sqlite3DbMallocZero(pParse->db, sizeof(Window)); - if( yylhsminor.yy303 ){ - yylhsminor.yy303->eFrmType = TK_FILTER; - yylhsminor.yy303->pFilter = yymsp[0].minor.yy202; + yylhsminor.yy49 = (Window*)sqlite3DbMallocZero(pParse->db, sizeof(Window)); + if( yylhsminor.yy49 ){ + yylhsminor.yy49->eFrmType = TK_FILTER; + yylhsminor.yy49->pFilter = yymsp[0].minor.yy404; }else{ - sqlite3ExprDelete(pParse->db, yymsp[0].minor.yy202); + sqlite3ExprDelete(pParse->db, yymsp[0].minor.yy404); } } - yymsp[0].minor.yy303 = yylhsminor.yy303; + yymsp[0].minor.yy49 = yylhsminor.yy49; break; - case 322: /* over_clause ::= OVER LP window RP */ + case 334: /* over_clause ::= OVER LP window RP */ { - yymsp[-3].minor.yy303 = yymsp[-1].minor.yy303; - assert( yymsp[-3].minor.yy303!=0 ); + yymsp[-3].minor.yy49 = yymsp[-1].minor.yy49; + assert( yymsp[-3].minor.yy49!=0 ); } break; - case 323: /* over_clause ::= OVER nm */ + case 335: /* over_clause ::= OVER nm */ { - yymsp[-1].minor.yy303 = (Window*)sqlite3DbMallocZero(pParse->db, sizeof(Window)); - if( yymsp[-1].minor.yy303 ){ - yymsp[-1].minor.yy303->zName = sqlite3DbStrNDup(pParse->db, yymsp[0].minor.yy0.z, yymsp[0].minor.yy0.n); + yymsp[-1].minor.yy49 = (Window*)sqlite3DbMallocZero(pParse->db, sizeof(Window)); + if( yymsp[-1].minor.yy49 ){ + yymsp[-1].minor.yy49->zName = sqlite3DbStrNDup(pParse->db, yymsp[0].minor.yy0.z, yymsp[0].minor.yy0.n); } } break; - case 324: /* filter_clause ::= FILTER LP WHERE expr RP */ -{ yymsp[-4].minor.yy202 = yymsp[-1].minor.yy202; } + case 336: /* filter_clause ::= FILTER LP WHERE expr RP */ +{ yymsp[-4].minor.yy404 = yymsp[-1].minor.yy404; } break; default: - /* (325) input ::= cmdlist */ yytestcase(yyruleno==325); - /* (326) cmdlist ::= cmdlist ecmd */ yytestcase(yyruleno==326); - /* (327) cmdlist ::= ecmd (OPTIMIZED OUT) */ assert(yyruleno!=327); - /* (328) ecmd ::= SEMI */ yytestcase(yyruleno==328); - /* (329) ecmd ::= cmdx SEMI */ yytestcase(yyruleno==329); - /* (330) ecmd ::= explain cmdx SEMI (NEVER REDUCES) */ assert(yyruleno!=330); - /* (331) trans_opt ::= */ yytestcase(yyruleno==331); - /* (332) trans_opt ::= TRANSACTION */ yytestcase(yyruleno==332); - /* (333) trans_opt ::= TRANSACTION nm */ yytestcase(yyruleno==333); - /* (334) savepoint_opt ::= SAVEPOINT */ yytestcase(yyruleno==334); - /* (335) savepoint_opt ::= */ yytestcase(yyruleno==335); - /* (336) cmd ::= create_table create_table_args */ yytestcase(yyruleno==336); - /* (337) columnlist ::= columnlist COMMA columnname carglist */ yytestcase(yyruleno==337); - /* (338) columnlist ::= columnname carglist */ yytestcase(yyruleno==338); - /* (339) nm ::= ID|INDEXED */ yytestcase(yyruleno==339); - /* (340) nm ::= STRING */ yytestcase(yyruleno==340); - /* (341) nm ::= JOIN_KW */ yytestcase(yyruleno==341); - /* (342) typetoken ::= typename */ yytestcase(yyruleno==342); - /* (343) typename ::= ID|STRING */ yytestcase(yyruleno==343); - /* (344) signed ::= plus_num (OPTIMIZED OUT) */ assert(yyruleno!=344); - /* (345) signed ::= minus_num (OPTIMIZED OUT) */ assert(yyruleno!=345); - /* (346) carglist ::= carglist ccons */ yytestcase(yyruleno==346); - /* (347) carglist ::= */ yytestcase(yyruleno==347); - /* (348) ccons ::= NULL onconf */ yytestcase(yyruleno==348); - /* (349) ccons ::= GENERATED ALWAYS AS generated */ yytestcase(yyruleno==349); - /* (350) ccons ::= AS generated */ yytestcase(yyruleno==350); - /* (351) conslist_opt ::= COMMA conslist */ yytestcase(yyruleno==351); - /* (352) conslist ::= conslist tconscomma tcons */ yytestcase(yyruleno==352); - /* (353) conslist ::= tcons (OPTIMIZED OUT) */ assert(yyruleno!=353); - /* (354) tconscomma ::= */ yytestcase(yyruleno==354); - /* (355) defer_subclause_opt ::= defer_subclause (OPTIMIZED OUT) */ assert(yyruleno!=355); - /* (356) resolvetype ::= raisetype (OPTIMIZED OUT) */ assert(yyruleno!=356); - /* (357) selectnowith ::= oneselect (OPTIMIZED OUT) */ assert(yyruleno!=357); - /* (358) oneselect ::= values */ yytestcase(yyruleno==358); - /* (359) sclp ::= selcollist COMMA */ yytestcase(yyruleno==359); - /* (360) as ::= ID|STRING */ yytestcase(yyruleno==360); - /* (361) expr ::= term (OPTIMIZED OUT) */ assert(yyruleno!=361); - /* (362) likeop ::= LIKE_KW|MATCH */ yytestcase(yyruleno==362); - /* (363) exprlist ::= nexprlist */ yytestcase(yyruleno==363); - /* (364) nmnum ::= plus_num (OPTIMIZED OUT) */ assert(yyruleno!=364); - /* (365) nmnum ::= nm (OPTIMIZED OUT) */ assert(yyruleno!=365); - /* (366) nmnum ::= ON */ yytestcase(yyruleno==366); - /* (367) nmnum ::= DELETE */ yytestcase(yyruleno==367); - /* (368) nmnum ::= DEFAULT */ yytestcase(yyruleno==368); - /* (369) plus_num ::= INTEGER|FLOAT */ yytestcase(yyruleno==369); - /* (370) foreach_clause ::= */ yytestcase(yyruleno==370); - /* (371) foreach_clause ::= FOR EACH ROW */ yytestcase(yyruleno==371); - /* (372) trnm ::= nm */ yytestcase(yyruleno==372); - /* (373) tridxby ::= */ yytestcase(yyruleno==373); - /* (374) database_kw_opt ::= DATABASE */ yytestcase(yyruleno==374); - /* (375) database_kw_opt ::= */ yytestcase(yyruleno==375); - /* (376) kwcolumn_opt ::= */ yytestcase(yyruleno==376); - /* (377) kwcolumn_opt ::= COLUMNKW */ yytestcase(yyruleno==377); - /* (378) vtabarglist ::= vtabarg */ yytestcase(yyruleno==378); - /* (379) vtabarglist ::= vtabarglist COMMA vtabarg */ yytestcase(yyruleno==379); - /* (380) vtabarg ::= vtabarg vtabargtoken */ yytestcase(yyruleno==380); - /* (381) anylist ::= */ yytestcase(yyruleno==381); - /* (382) anylist ::= anylist LP anylist RP */ yytestcase(yyruleno==382); - /* (383) anylist ::= anylist ANY */ yytestcase(yyruleno==383); - /* (384) with ::= */ yytestcase(yyruleno==384); + /* (337) input ::= cmdlist */ yytestcase(yyruleno==337); + /* (338) cmdlist ::= cmdlist ecmd */ yytestcase(yyruleno==338); + /* (339) cmdlist ::= ecmd (OPTIMIZED OUT) */ assert(yyruleno!=339); + /* (340) ecmd ::= SEMI */ yytestcase(yyruleno==340); + /* (341) ecmd ::= cmdx SEMI */ yytestcase(yyruleno==341); + /* (342) ecmd ::= explain cmdx SEMI (NEVER REDUCES) */ assert(yyruleno!=342); + /* (343) trans_opt ::= */ yytestcase(yyruleno==343); + /* (344) trans_opt ::= TRANSACTION */ yytestcase(yyruleno==344); + /* (345) trans_opt ::= TRANSACTION nm */ yytestcase(yyruleno==345); + /* (346) savepoint_opt ::= SAVEPOINT */ yytestcase(yyruleno==346); + /* (347) savepoint_opt ::= */ yytestcase(yyruleno==347); + /* (348) cmd ::= create_table create_table_args */ yytestcase(yyruleno==348); + /* (349) columnlist ::= columnlist COMMA columnname carglist */ yytestcase(yyruleno==349); + /* (350) columnlist ::= columnname carglist */ yytestcase(yyruleno==350); + /* (351) nm ::= ID|INDEXED */ yytestcase(yyruleno==351); + /* (352) nm ::= STRING */ yytestcase(yyruleno==352); + /* (353) nm ::= JOIN_KW */ yytestcase(yyruleno==353); + /* (354) typetoken ::= typename */ yytestcase(yyruleno==354); + /* (355) typename ::= ID|STRING */ yytestcase(yyruleno==355); + /* (356) signed ::= plus_num (OPTIMIZED OUT) */ assert(yyruleno!=356); + /* (357) signed ::= minus_num (OPTIMIZED OUT) */ assert(yyruleno!=357); + /* (358) carglist ::= carglist ccons */ yytestcase(yyruleno==358); + /* (359) carglist ::= */ yytestcase(yyruleno==359); + /* (360) ccons ::= NULL onconf */ yytestcase(yyruleno==360); + /* (361) ccons ::= GENERATED ALWAYS AS generated */ yytestcase(yyruleno==361); + /* (362) ccons ::= AS generated */ yytestcase(yyruleno==362); + /* (363) conslist_opt ::= COMMA conslist */ yytestcase(yyruleno==363); + /* (364) conslist ::= conslist tconscomma tcons */ yytestcase(yyruleno==364); + /* (365) conslist ::= tcons (OPTIMIZED OUT) */ assert(yyruleno!=365); + /* (366) tconscomma ::= */ yytestcase(yyruleno==366); + /* (367) defer_subclause_opt ::= defer_subclause (OPTIMIZED OUT) */ assert(yyruleno!=367); + /* (368) resolvetype ::= raisetype (OPTIMIZED OUT) */ assert(yyruleno!=368); + /* (369) selectnowith ::= oneselect (OPTIMIZED OUT) */ assert(yyruleno!=369); + /* (370) oneselect ::= values */ yytestcase(yyruleno==370); + /* (371) sclp ::= selcollist COMMA */ yytestcase(yyruleno==371); + /* (372) as ::= ID|STRING */ yytestcase(yyruleno==372); + /* (373) returning ::= */ yytestcase(yyruleno==373); + /* (374) expr ::= term (OPTIMIZED OUT) */ assert(yyruleno!=374); + /* (375) likeop ::= LIKE_KW|MATCH */ yytestcase(yyruleno==375); + /* (376) exprlist ::= nexprlist */ yytestcase(yyruleno==376); + /* (377) nmnum ::= plus_num (OPTIMIZED OUT) */ assert(yyruleno!=377); + /* (378) nmnum ::= nm (OPTIMIZED OUT) */ assert(yyruleno!=378); + /* (379) nmnum ::= ON */ yytestcase(yyruleno==379); + /* (380) nmnum ::= DELETE */ yytestcase(yyruleno==380); + /* (381) nmnum ::= DEFAULT */ yytestcase(yyruleno==381); + /* (382) plus_num ::= INTEGER|FLOAT */ yytestcase(yyruleno==382); + /* (383) foreach_clause ::= */ yytestcase(yyruleno==383); + /* (384) foreach_clause ::= FOR EACH ROW */ yytestcase(yyruleno==384); + /* (385) trnm ::= nm */ yytestcase(yyruleno==385); + /* (386) tridxby ::= */ yytestcase(yyruleno==386); + /* (387) database_kw_opt ::= DATABASE */ yytestcase(yyruleno==387); + /* (388) database_kw_opt ::= */ yytestcase(yyruleno==388); + /* (389) kwcolumn_opt ::= */ yytestcase(yyruleno==389); + /* (390) kwcolumn_opt ::= COLUMNKW */ yytestcase(yyruleno==390); + /* (391) vtabarglist ::= vtabarg */ yytestcase(yyruleno==391); + /* (392) vtabarglist ::= vtabarglist COMMA vtabarg */ yytestcase(yyruleno==392); + /* (393) vtabarg ::= vtabarg vtabargtoken */ yytestcase(yyruleno==393); + /* (394) anylist ::= */ yytestcase(yyruleno==394); + /* (395) anylist ::= anylist LP anylist RP */ yytestcase(yyruleno==395); + /* (396) anylist ::= anylist ANY */ yytestcase(yyruleno==396); + /* (397) with ::= */ yytestcase(yyruleno==397); break; /********** End reduce actions ************************************************/ }; @@ -159129,12 +163162,56 @@ SQLITE_PRIVATE void sqlite3Parser( } #endif - do{ + while(1){ /* Exit by "break" */ + assert( yypParser->yytos>=yypParser->yystack ); assert( yyact==yypParser->yytos->stateno ); yyact = yy_find_shift_action((YYCODETYPE)yymajor,yyact); if( yyact >= YY_MIN_REDUCE ){ - yyact = yy_reduce(yypParser,yyact-YY_MIN_REDUCE,yymajor, - yyminor sqlite3ParserCTX_PARAM); + unsigned int yyruleno = yyact - YY_MIN_REDUCE; /* Reduce by this rule */ + assert( yyruleno<(int)(sizeof(yyRuleName)/sizeof(yyRuleName[0])) ); +#ifndef NDEBUG + if( yyTraceFILE ){ + int yysize = yyRuleInfoNRhs[yyruleno]; + if( yysize ){ + fprintf(yyTraceFILE, "%sReduce %d [%s]%s, pop back to state %d.\n", + yyTracePrompt, + yyruleno, yyRuleName[yyruleno], + yyrulenoyytos[yysize].stateno); + }else{ + fprintf(yyTraceFILE, "%sReduce %d [%s]%s.\n", + yyTracePrompt, yyruleno, yyRuleName[yyruleno], + yyrulenoyytos - yypParser->yystack)>yypParser->yyhwm ){ + yypParser->yyhwm++; + assert( yypParser->yyhwm == + (int)(yypParser->yytos - yypParser->yystack)); + } +#endif +#if YYSTACKDEPTH>0 + if( yypParser->yytos>=yypParser->yystackEnd ){ + yyStackOverflow(yypParser); + break; + } +#else + if( yypParser->yytos>=&yypParser->yystack[yypParser->yystksz-1] ){ + if( yyGrowStack(yypParser) ){ + yyStackOverflow(yypParser); + break; + } + } +#endif + } + yyact = yy_reduce(yypParser,yyruleno,yymajor,yyminor sqlite3ParserCTX_PARAM); }else if( yyact <= YY_MAX_SHIFTREDUCE ){ yy_shift(yypParser,yyact,(YYCODETYPE)yymajor,yyminor); #ifndef YYNOERRORRECOVERY @@ -159247,7 +163324,7 @@ SQLITE_PRIVATE void sqlite3Parser( break; #endif } - }while( yypParser->yytos>yypParser->yystack ); + } #ifndef NDEBUG if( yyTraceFILE ){ yyStackEntry *i; @@ -159308,8 +163385,8 @@ SQLITE_PRIVATE int sqlite3ParserFallback(int iToken){ ** all of them need to be used within the switch. */ #define CC_X 0 /* The letter 'x', or start of BLOB literal */ -#define CC_KYWD 1 /* Alphabetics or '_'. Usable in a keyword */ -#define CC_ID 2 /* unicode characters usable in IDs */ +#define CC_KYWD0 1 /* First letter of a keyword */ +#define CC_KYWD 2 /* Alphabetics or '_'. Usable in a keyword */ #define CC_DIGIT 3 /* Digits */ #define CC_DOLLAR 4 /* '$' */ #define CC_VARALPHA 5 /* '@', '#', ':'. Alphabetic SQL variables */ @@ -159334,47 +163411,49 @@ SQLITE_PRIVATE int sqlite3ParserFallback(int iToken){ #define CC_AND 24 /* '&' */ #define CC_TILDA 25 /* '~' */ #define CC_DOT 26 /* '.' */ -#define CC_ILLEGAL 27 /* Illegal character */ -#define CC_NUL 28 /* 0x00 */ +#define CC_ID 27 /* unicode characters usable in IDs */ +#define CC_ILLEGAL 28 /* Illegal character */ +#define CC_NUL 29 /* 0x00 */ +#define CC_BOM 30 /* First byte of UTF8 BOM: 0xEF 0xBB 0xBF */ static const unsigned char aiClass[] = { #ifdef SQLITE_ASCII /* x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 xa xb xc xd xe xf */ -/* 0x */ 28, 27, 27, 27, 27, 27, 27, 27, 27, 7, 7, 27, 7, 7, 27, 27, -/* 1x */ 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, +/* 0x */ 29, 28, 28, 28, 28, 28, 28, 28, 28, 7, 7, 28, 7, 7, 28, 28, +/* 1x */ 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, /* 2x */ 7, 15, 8, 5, 4, 22, 24, 8, 17, 18, 21, 20, 23, 11, 26, 16, /* 3x */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 5, 19, 12, 14, 13, 6, /* 4x */ 5, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -/* 5x */ 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 9, 27, 27, 27, 1, +/* 5x */ 1, 1, 1, 1, 1, 1, 1, 1, 0, 2, 2, 9, 28, 28, 28, 2, /* 6x */ 8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -/* 7x */ 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 27, 10, 27, 25, 27, -/* 8x */ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, -/* 9x */ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, -/* Ax */ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, -/* Bx */ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, -/* Cx */ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, -/* Dx */ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, -/* Ex */ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, -/* Fx */ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 +/* 7x */ 1, 1, 1, 1, 1, 1, 1, 1, 0, 2, 2, 28, 10, 28, 25, 28, +/* 8x */ 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, +/* 9x */ 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, +/* Ax */ 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, +/* Bx */ 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, +/* Cx */ 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, +/* Dx */ 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, +/* Ex */ 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 30, +/* Fx */ 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27 #endif #ifdef SQLITE_EBCDIC /* x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 xa xb xc xd xe xf */ -/* 0x */ 27, 27, 27, 27, 27, 7, 27, 27, 27, 27, 27, 27, 7, 7, 27, 27, -/* 1x */ 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, -/* 2x */ 27, 27, 27, 27, 27, 7, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, -/* 3x */ 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, -/* 4x */ 7, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 26, 12, 17, 20, 10, -/* 5x */ 24, 27, 27, 27, 27, 27, 27, 27, 27, 27, 15, 4, 21, 18, 19, 27, -/* 6x */ 11, 16, 27, 27, 27, 27, 27, 27, 27, 27, 27, 23, 22, 1, 13, 6, -/* 7x */ 27, 27, 27, 27, 27, 27, 27, 27, 27, 8, 5, 5, 5, 8, 14, 8, -/* 8x */ 27, 1, 1, 1, 1, 1, 1, 1, 1, 1, 27, 27, 27, 27, 27, 27, -/* 9x */ 27, 1, 1, 1, 1, 1, 1, 1, 1, 1, 27, 27, 27, 27, 27, 27, -/* Ax */ 27, 25, 1, 1, 1, 1, 1, 0, 1, 1, 27, 27, 27, 27, 27, 27, -/* Bx */ 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 9, 27, 27, 27, 27, 27, -/* Cx */ 27, 1, 1, 1, 1, 1, 1, 1, 1, 1, 27, 27, 27, 27, 27, 27, -/* Dx */ 27, 1, 1, 1, 1, 1, 1, 1, 1, 1, 27, 27, 27, 27, 27, 27, -/* Ex */ 27, 27, 1, 1, 1, 1, 1, 0, 1, 1, 27, 27, 27, 27, 27, 27, -/* Fx */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 27, 27, 27, 27, 27, 27, +/* 0x */ 29, 28, 28, 28, 28, 7, 28, 28, 28, 28, 28, 28, 7, 7, 28, 28, +/* 1x */ 28, 28, 28, 28, 28, 7, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, +/* 2x */ 28, 28, 28, 28, 28, 7, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, +/* 3x */ 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, +/* 4x */ 7, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 26, 12, 17, 20, 10, +/* 5x */ 24, 28, 28, 28, 28, 28, 28, 28, 28, 28, 15, 4, 21, 18, 19, 28, +/* 6x */ 11, 16, 28, 28, 28, 28, 28, 28, 28, 28, 28, 23, 22, 2, 13, 6, +/* 7x */ 28, 28, 28, 28, 28, 28, 28, 28, 28, 8, 5, 5, 5, 8, 14, 8, +/* 8x */ 28, 1, 1, 1, 1, 1, 1, 1, 1, 1, 28, 28, 28, 28, 28, 28, +/* 9x */ 28, 1, 1, 1, 1, 1, 1, 1, 1, 1, 28, 28, 28, 28, 28, 28, +/* Ax */ 28, 25, 1, 1, 1, 1, 1, 0, 2, 2, 28, 28, 28, 28, 28, 28, +/* Bx */ 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 9, 28, 28, 28, 28, 28, +/* Cx */ 28, 1, 1, 1, 1, 1, 1, 1, 1, 1, 28, 28, 28, 28, 28, 28, +/* Dx */ 28, 1, 1, 1, 1, 1, 1, 1, 1, 1, 28, 28, 28, 28, 28, 28, +/* Ex */ 28, 28, 1, 1, 1, 1, 1, 0, 2, 2, 28, 28, 28, 28, 28, 28, +/* Fx */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 28, 28, 28, 28, 28, 28, #endif }; @@ -159439,20 +163518,21 @@ const unsigned char ebcdicToAscii[] = { ** is substantially reduced. This is important for embedded applications ** on platforms with limited memory. */ -/* Hash score: 227 */ -/* zKWText[] encodes 984 bytes of keyword text in 648 bytes */ +/* Hash score: 231 */ +/* zKWText[] encodes 1007 bytes of keyword text in 667 bytes */ /* REINDEXEDESCAPEACHECKEYBEFOREIGNOREGEXPLAINSTEADDATABASELECT */ /* ABLEFTHENDEFERRABLELSEXCLUDELETEMPORARYISNULLSAVEPOINTERSECT */ /* IESNOTNULLIKEXCEPTRANSACTIONATURALTERAISEXCLUSIVEXISTS */ /* CONSTRAINTOFFSETRIGGERANGENERATEDETACHAVINGLOBEGINNEREFERENCES */ /* UNIQUERYWITHOUTERELEASEATTACHBETWEENOTHINGROUPSCASCADEFAULT */ /* CASECOLLATECREATECURRENT_DATEIMMEDIATEJOINSERTMATCHPLANALYZE */ -/* PRAGMABORTUPDATEVALUESVIRTUALWAYSWHENWHERECURSIVEAFTERENAMEAND */ -/* EFERREDISTINCTAUTOINCREMENTCASTCOLUMNCOMMITCONFLICTCROSS */ -/* CURRENT_TIMESTAMPARTITIONDROPRECEDINGFAILASTFILTEREPLACEFIRST */ -/* FOLLOWINGFROMFULLIMITIFORDERESTRICTOTHERSOVERIGHTROLLBACKROWS */ -/* UNBOUNDEDUNIONUSINGVACUUMVIEWINDOWBYINITIALLYPRIMARY */ -static const char zKWText[647] = { +/* PRAGMATERIALIZEDEFERREDISTINCTUPDATEVALUESVIRTUALWAYSWHENWHERE */ +/* CURSIVEABORTAFTERENAMEANDROPARTITIONAUTOINCREMENTCASTCOLUMN */ +/* COMMITCONFLICTCROSSCURRENT_TIMESTAMPRECEDINGFAILASTFILTER */ +/* EPLACEFIRSTFOLLOWINGFROMFULLIMITIFORDERESTRICTOTHERSOVER */ +/* ETURNINGRIGHTROLLBACKROWSUNBOUNDEDUNIONUSINGVACUUMVIEWINDOWBY */ +/* INITIALLYPRIMARY */ +static const char zKWText[666] = { 'R','E','I','N','D','E','X','E','D','E','S','C','A','P','E','A','C','H', 'E','C','K','E','Y','B','E','F','O','R','E','I','G','N','O','R','E','G', 'E','X','P','L','A','I','N','S','T','E','A','D','D','A','T','A','B','A', @@ -159473,86 +163553,87 @@ static const char zKWText[647] = { 'C','R','E','A','T','E','C','U','R','R','E','N','T','_','D','A','T','E', 'I','M','M','E','D','I','A','T','E','J','O','I','N','S','E','R','T','M', 'A','T','C','H','P','L','A','N','A','L','Y','Z','E','P','R','A','G','M', - 'A','B','O','R','T','U','P','D','A','T','E','V','A','L','U','E','S','V', - 'I','R','T','U','A','L','W','A','Y','S','W','H','E','N','W','H','E','R', - 'E','C','U','R','S','I','V','E','A','F','T','E','R','E','N','A','M','E', - 'A','N','D','E','F','E','R','R','E','D','I','S','T','I','N','C','T','A', - 'U','T','O','I','N','C','R','E','M','E','N','T','C','A','S','T','C','O', - 'L','U','M','N','C','O','M','M','I','T','C','O','N','F','L','I','C','T', - 'C','R','O','S','S','C','U','R','R','E','N','T','_','T','I','M','E','S', - 'T','A','M','P','A','R','T','I','T','I','O','N','D','R','O','P','R','E', - 'C','E','D','I','N','G','F','A','I','L','A','S','T','F','I','L','T','E', - 'R','E','P','L','A','C','E','F','I','R','S','T','F','O','L','L','O','W', - 'I','N','G','F','R','O','M','F','U','L','L','I','M','I','T','I','F','O', - 'R','D','E','R','E','S','T','R','I','C','T','O','T','H','E','R','S','O', - 'V','E','R','I','G','H','T','R','O','L','L','B','A','C','K','R','O','W', - 'S','U','N','B','O','U','N','D','E','D','U','N','I','O','N','U','S','I', - 'N','G','V','A','C','U','U','M','V','I','E','W','I','N','D','O','W','B', - 'Y','I','N','I','T','I','A','L','L','Y','P','R','I','M','A','R','Y', + 'A','T','E','R','I','A','L','I','Z','E','D','E','F','E','R','R','E','D', + 'I','S','T','I','N','C','T','U','P','D','A','T','E','V','A','L','U','E', + 'S','V','I','R','T','U','A','L','W','A','Y','S','W','H','E','N','W','H', + 'E','R','E','C','U','R','S','I','V','E','A','B','O','R','T','A','F','T', + 'E','R','E','N','A','M','E','A','N','D','R','O','P','A','R','T','I','T', + 'I','O','N','A','U','T','O','I','N','C','R','E','M','E','N','T','C','A', + 'S','T','C','O','L','U','M','N','C','O','M','M','I','T','C','O','N','F', + 'L','I','C','T','C','R','O','S','S','C','U','R','R','E','N','T','_','T', + 'I','M','E','S','T','A','M','P','R','E','C','E','D','I','N','G','F','A', + 'I','L','A','S','T','F','I','L','T','E','R','E','P','L','A','C','E','F', + 'I','R','S','T','F','O','L','L','O','W','I','N','G','F','R','O','M','F', + 'U','L','L','I','M','I','T','I','F','O','R','D','E','R','E','S','T','R', + 'I','C','T','O','T','H','E','R','S','O','V','E','R','E','T','U','R','N', + 'I','N','G','R','I','G','H','T','R','O','L','L','B','A','C','K','R','O', + 'W','S','U','N','B','O','U','N','D','E','D','U','N','I','O','N','U','S', + 'I','N','G','V','A','C','U','U','M','V','I','E','W','I','N','D','O','W', + 'B','Y','I','N','I','T','I','A','L','L','Y','P','R','I','M','A','R','Y', }; /* aKWHash[i] is the hash value for the i-th keyword */ static const unsigned char aKWHash[127] = { - 84, 102, 132, 82, 114, 29, 0, 0, 91, 0, 85, 72, 0, - 53, 35, 86, 15, 0, 42, 94, 54, 126, 133, 19, 0, 0, - 138, 0, 40, 128, 0, 22, 104, 0, 9, 0, 0, 122, 80, - 0, 78, 6, 0, 65, 99, 145, 0, 134, 112, 0, 0, 48, - 0, 100, 24, 0, 17, 0, 27, 70, 23, 26, 5, 60, 140, - 107, 121, 0, 73, 101, 71, 143, 61, 119, 74, 0, 49, 0, - 11, 41, 0, 110, 0, 0, 0, 106, 10, 108, 113, 124, 14, - 50, 123, 0, 89, 0, 18, 120, 142, 56, 129, 137, 88, 83, - 37, 30, 125, 0, 0, 105, 51, 130, 127, 0, 34, 0, 0, - 44, 0, 95, 38, 39, 0, 20, 45, 116, 90, + 84, 92, 134, 82, 105, 29, 0, 0, 94, 0, 85, 72, 0, + 53, 35, 86, 15, 0, 42, 97, 54, 89, 135, 19, 0, 0, + 140, 0, 40, 129, 0, 22, 107, 0, 9, 0, 0, 123, 80, + 0, 78, 6, 0, 65, 103, 147, 0, 136, 115, 0, 0, 48, + 0, 90, 24, 0, 17, 0, 27, 70, 23, 26, 5, 60, 142, + 110, 122, 0, 73, 91, 71, 145, 61, 120, 74, 0, 49, 0, + 11, 41, 0, 113, 0, 0, 0, 109, 10, 111, 116, 125, 14, + 50, 124, 0, 100, 0, 18, 121, 144, 56, 130, 139, 88, 83, + 37, 30, 126, 0, 0, 108, 51, 131, 128, 0, 34, 0, 0, + 132, 0, 98, 38, 39, 0, 20, 45, 117, 93, }; /* aKWNext[] forms the hash collision chain. If aKWHash[i]==0 ** then the i-th keyword has no more hash collisions. Otherwise, ** the next keyword with the same hash is aKWHash[i]-1. */ -static const unsigned char aKWNext[145] = { - 0, 0, 0, 0, 4, 0, 43, 0, 0, 103, 111, 0, 0, - 0, 2, 0, 0, 141, 0, 0, 0, 13, 0, 0, 0, 0, - 139, 0, 0, 118, 52, 0, 0, 135, 12, 0, 0, 62, 0, - 136, 0, 131, 0, 0, 36, 0, 0, 28, 77, 0, 0, 0, +static const unsigned char aKWNext[147] = { + 0, 0, 0, 0, 4, 0, 43, 0, 0, 106, 114, 0, 0, + 0, 2, 0, 0, 143, 0, 0, 0, 13, 0, 0, 0, 0, + 141, 0, 0, 119, 52, 0, 0, 137, 12, 0, 0, 62, 0, + 138, 0, 133, 0, 0, 36, 0, 0, 28, 77, 0, 0, 0, 0, 59, 0, 47, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 69, 0, 0, 0, 0, 0, 144, 3, 0, 58, 0, 1, - 75, 0, 0, 0, 31, 0, 0, 0, 0, 0, 0, 64, 66, - 63, 0, 0, 0, 0, 46, 0, 16, 0, 115, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 81, 97, 0, 8, 0, 109, - 21, 7, 67, 0, 79, 93, 117, 0, 0, 68, 0, 0, 96, - 0, 55, 0, 76, 0, 92, 32, 33, 57, 25, 0, 98, 0, - 0, 87, + 0, 69, 0, 0, 0, 0, 0, 146, 3, 0, 58, 0, 1, + 75, 0, 0, 0, 31, 0, 0, 0, 0, 0, 127, 0, 104, + 0, 64, 66, 63, 0, 0, 0, 0, 0, 46, 0, 16, 8, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 81, 101, 0, + 112, 21, 7, 67, 0, 79, 96, 118, 0, 0, 68, 0, 0, + 99, 44, 0, 55, 0, 76, 0, 95, 32, 33, 57, 25, 0, + 102, 0, 0, 87, }; /* aKWLen[i] is the length (in bytes) of the i-th keyword */ -static const unsigned char aKWLen[145] = { +static const unsigned char aKWLen[147] = { 7, 7, 5, 4, 6, 4, 5, 3, 6, 7, 3, 6, 6, 7, 7, 3, 8, 2, 6, 5, 4, 4, 3, 10, 4, 7, 6, 9, 4, 2, 6, 5, 9, 9, 4, 7, 3, 2, 4, 4, 6, 11, 6, 2, 7, 5, 5, 9, 6, 10, 4, 6, 2, 3, 7, 5, 9, 6, 6, 4, 5, 5, 10, 6, 5, 7, 4, 5, 7, 6, 7, 7, 6, 5, 7, 3, 7, 4, - 7, 6, 12, 9, 4, 6, 5, 4, 7, 6, 5, 6, 6, - 7, 6, 4, 5, 9, 5, 6, 3, 8, 8, 2, 13, 2, - 2, 4, 6, 6, 8, 5, 17, 12, 7, 9, 4, 9, 4, - 4, 6, 7, 5, 9, 4, 4, 5, 2, 5, 8, 6, 4, - 5, 8, 4, 3, 9, 5, 5, 6, 4, 6, 2, 2, 9, - 3, 7, + 7, 6, 12, 9, 4, 6, 5, 4, 7, 6, 12, 8, 8, + 2, 6, 6, 7, 6, 4, 5, 9, 5, 5, 6, 3, 4, + 9, 13, 2, 2, 4, 6, 6, 8, 5, 17, 12, 7, 9, + 4, 4, 6, 7, 5, 9, 4, 4, 5, 2, 5, 8, 6, + 4, 9, 5, 8, 4, 3, 9, 5, 5, 6, 4, 6, 2, + 2, 9, 3, 7, }; /* aKWOffset[i] is the index into zKWText[] of the start of ** the text for the i-th keyword. */ -static const unsigned short int aKWOffset[145] = { +static const unsigned short int aKWOffset[147] = { 0, 2, 2, 8, 9, 14, 16, 20, 23, 25, 25, 29, 33, 36, 41, 46, 48, 53, 54, 59, 62, 65, 67, 69, 78, 81, 86, 90, 90, 94, 99, 101, 105, 111, 119, 123, 123, 123, 126, 129, 132, 137, 142, 146, 147, 152, 156, 160, 168, 174, 181, 184, 184, 187, 189, 195, 198, 206, 211, 216, 219, 222, 226, 236, 239, 244, 244, 248, 252, 259, 265, 271, 277, 277, 283, 284, 288, 295, - 299, 306, 312, 324, 333, 335, 341, 346, 348, 355, 360, 365, 371, - 377, 382, 388, 392, 395, 404, 408, 414, 416, 423, 424, 431, 433, - 435, 444, 448, 454, 460, 468, 473, 473, 473, 489, 498, 501, 510, - 513, 517, 522, 529, 534, 543, 547, 550, 555, 557, 561, 569, 575, - 578, 583, 591, 591, 595, 604, 609, 614, 620, 623, 626, 629, 631, - 636, 640, + 299, 306, 312, 324, 333, 335, 341, 346, 348, 355, 359, 370, 377, + 378, 385, 391, 397, 402, 408, 412, 415, 424, 429, 433, 439, 441, + 444, 453, 455, 457, 466, 470, 476, 482, 490, 495, 495, 495, 511, + 520, 523, 527, 532, 539, 544, 553, 557, 560, 565, 567, 571, 579, + 585, 588, 597, 602, 610, 610, 614, 623, 628, 633, 639, 642, 645, + 648, 650, 655, 659, }; /* aKWCode[i] is the parser symbol code for the i-th keyword */ -static const unsigned char aKWCode[145] = { +static const unsigned char aKWCode[147] = { TK_REINDEX, TK_INDEXED, TK_INDEX, TK_DESC, TK_ESCAPE, TK_EACH, TK_CHECK, TK_KEY, TK_BEFORE, TK_FOREIGN, TK_FOR, TK_IGNORE, TK_LIKE_KW, TK_EXPLAIN, TK_INSTEAD, @@ -159570,18 +163651,19 @@ static const unsigned char aKWCode[145] = { TK_BETWEEN, TK_NOTHING, TK_GROUPS, TK_GROUP, TK_CASCADE, TK_ASC, TK_DEFAULT, TK_CASE, TK_COLLATE, TK_CREATE, TK_CTIME_KW, TK_IMMEDIATE, TK_JOIN, TK_INSERT, TK_MATCH, - TK_PLAN, TK_ANALYZE, TK_PRAGMA, TK_ABORT, TK_UPDATE, - TK_VALUES, TK_VIRTUAL, TK_ALWAYS, TK_WHEN, TK_WHERE, - TK_RECURSIVE, TK_AFTER, TK_RENAME, TK_AND, TK_DEFERRED, - TK_DISTINCT, TK_IS, TK_AUTOINCR, TK_TO, TK_IN, - TK_CAST, TK_COLUMNKW, TK_COMMIT, TK_CONFLICT, TK_JOIN_KW, - TK_CTIME_KW, TK_CTIME_KW, TK_CURRENT, TK_PARTITION, TK_DROP, - TK_PRECEDING, TK_FAIL, TK_LAST, TK_FILTER, TK_REPLACE, - TK_FIRST, TK_FOLLOWING, TK_FROM, TK_JOIN_KW, TK_LIMIT, - TK_IF, TK_ORDER, TK_RESTRICT, TK_OTHERS, TK_OVER, - TK_JOIN_KW, TK_ROLLBACK, TK_ROWS, TK_ROW, TK_UNBOUNDED, - TK_UNION, TK_USING, TK_VACUUM, TK_VIEW, TK_WINDOW, - TK_DO, TK_BY, TK_INITIALLY, TK_ALL, TK_PRIMARY, + TK_PLAN, TK_ANALYZE, TK_PRAGMA, TK_MATERIALIZED, TK_DEFERRED, + TK_DISTINCT, TK_IS, TK_UPDATE, TK_VALUES, TK_VIRTUAL, + TK_ALWAYS, TK_WHEN, TK_WHERE, TK_RECURSIVE, TK_ABORT, + TK_AFTER, TK_RENAME, TK_AND, TK_DROP, TK_PARTITION, + TK_AUTOINCR, TK_TO, TK_IN, TK_CAST, TK_COLUMNKW, + TK_COMMIT, TK_CONFLICT, TK_JOIN_KW, TK_CTIME_KW, TK_CTIME_KW, + TK_CURRENT, TK_PRECEDING, TK_FAIL, TK_LAST, TK_FILTER, + TK_REPLACE, TK_FIRST, TK_FOLLOWING, TK_FROM, TK_JOIN_KW, + TK_LIMIT, TK_IF, TK_ORDER, TK_RESTRICT, TK_OTHERS, + TK_OVER, TK_RETURNING, TK_JOIN_KW, TK_ROLLBACK, TK_ROWS, + TK_ROW, TK_UNBOUNDED, TK_UNION, TK_USING, TK_VACUUM, + TK_VIEW, TK_WINDOW, TK_DO, TK_BY, TK_INITIALLY, + TK_ALL, TK_PRIMARY, }; /* Hash table decoded: ** 0: INSERT @@ -159605,7 +163687,7 @@ static const unsigned char aKWCode[145] = { ** 18: TRANSACTION RIGHT ** 19: WHEN ** 20: SET HAVING -** 21: IF +** 21: MATERIALIZED IF ** 22: ROWS ** 23: SELECT ** 24: @@ -159701,7 +163783,7 @@ static const unsigned char aKWCode[145] = { ** 114: INTERSECT UNBOUNDED ** 115: ** 116: -** 117: ON +** 117: RETURNING ON ** 118: ** 119: WHERE ** 120: NO INNER @@ -159719,7 +163801,7 @@ static int keywordCode(const char *z, int n, int *pType){ int i, j; const char *zKW; if( n>=2 ){ - i = ((charMap(z[0])*4) ^ (charMap(z[n-1])*3) ^ n) % 127; + i = ((charMap(z[0])*4) ^ (charMap(z[n-1])*3) ^ n*1) % 127; for(i=((int)aKWHash[i])-1; i>=0; i=((int)aKWNext[i])-1){ if( aKWLen[i]!=n ) continue; zKW = &zKWText[aKWOffset[i]]; @@ -159824,63 +163906,65 @@ static int keywordCode(const char *z, int n, int *pType){ testcase( i==85 ); /* PLAN */ testcase( i==86 ); /* ANALYZE */ testcase( i==87 ); /* PRAGMA */ - testcase( i==88 ); /* ABORT */ - testcase( i==89 ); /* UPDATE */ - testcase( i==90 ); /* VALUES */ - testcase( i==91 ); /* VIRTUAL */ - testcase( i==92 ); /* ALWAYS */ - testcase( i==93 ); /* WHEN */ - testcase( i==94 ); /* WHERE */ - testcase( i==95 ); /* RECURSIVE */ - testcase( i==96 ); /* AFTER */ - testcase( i==97 ); /* RENAME */ - testcase( i==98 ); /* AND */ - testcase( i==99 ); /* DEFERRED */ - testcase( i==100 ); /* DISTINCT */ - testcase( i==101 ); /* IS */ - testcase( i==102 ); /* AUTOINCREMENT */ - testcase( i==103 ); /* TO */ - testcase( i==104 ); /* IN */ - testcase( i==105 ); /* CAST */ - testcase( i==106 ); /* COLUMN */ - testcase( i==107 ); /* COMMIT */ - testcase( i==108 ); /* CONFLICT */ - testcase( i==109 ); /* CROSS */ - testcase( i==110 ); /* CURRENT_TIMESTAMP */ - testcase( i==111 ); /* CURRENT_TIME */ - testcase( i==112 ); /* CURRENT */ - testcase( i==113 ); /* PARTITION */ - testcase( i==114 ); /* DROP */ - testcase( i==115 ); /* PRECEDING */ - testcase( i==116 ); /* FAIL */ - testcase( i==117 ); /* LAST */ - testcase( i==118 ); /* FILTER */ - testcase( i==119 ); /* REPLACE */ - testcase( i==120 ); /* FIRST */ - testcase( i==121 ); /* FOLLOWING */ - testcase( i==122 ); /* FROM */ - testcase( i==123 ); /* FULL */ - testcase( i==124 ); /* LIMIT */ - testcase( i==125 ); /* IF */ - testcase( i==126 ); /* ORDER */ - testcase( i==127 ); /* RESTRICT */ - testcase( i==128 ); /* OTHERS */ - testcase( i==129 ); /* OVER */ - testcase( i==130 ); /* RIGHT */ - testcase( i==131 ); /* ROLLBACK */ - testcase( i==132 ); /* ROWS */ - testcase( i==133 ); /* ROW */ - testcase( i==134 ); /* UNBOUNDED */ - testcase( i==135 ); /* UNION */ - testcase( i==136 ); /* USING */ - testcase( i==137 ); /* VACUUM */ - testcase( i==138 ); /* VIEW */ - testcase( i==139 ); /* WINDOW */ - testcase( i==140 ); /* DO */ - testcase( i==141 ); /* BY */ - testcase( i==142 ); /* INITIALLY */ - testcase( i==143 ); /* ALL */ - testcase( i==144 ); /* PRIMARY */ + testcase( i==88 ); /* MATERIALIZED */ + testcase( i==89 ); /* DEFERRED */ + testcase( i==90 ); /* DISTINCT */ + testcase( i==91 ); /* IS */ + testcase( i==92 ); /* UPDATE */ + testcase( i==93 ); /* VALUES */ + testcase( i==94 ); /* VIRTUAL */ + testcase( i==95 ); /* ALWAYS */ + testcase( i==96 ); /* WHEN */ + testcase( i==97 ); /* WHERE */ + testcase( i==98 ); /* RECURSIVE */ + testcase( i==99 ); /* ABORT */ + testcase( i==100 ); /* AFTER */ + testcase( i==101 ); /* RENAME */ + testcase( i==102 ); /* AND */ + testcase( i==103 ); /* DROP */ + testcase( i==104 ); /* PARTITION */ + testcase( i==105 ); /* AUTOINCREMENT */ + testcase( i==106 ); /* TO */ + testcase( i==107 ); /* IN */ + testcase( i==108 ); /* CAST */ + testcase( i==109 ); /* COLUMN */ + testcase( i==110 ); /* COMMIT */ + testcase( i==111 ); /* CONFLICT */ + testcase( i==112 ); /* CROSS */ + testcase( i==113 ); /* CURRENT_TIMESTAMP */ + testcase( i==114 ); /* CURRENT_TIME */ + testcase( i==115 ); /* CURRENT */ + testcase( i==116 ); /* PRECEDING */ + testcase( i==117 ); /* FAIL */ + testcase( i==118 ); /* LAST */ + testcase( i==119 ); /* FILTER */ + testcase( i==120 ); /* REPLACE */ + testcase( i==121 ); /* FIRST */ + testcase( i==122 ); /* FOLLOWING */ + testcase( i==123 ); /* FROM */ + testcase( i==124 ); /* FULL */ + testcase( i==125 ); /* LIMIT */ + testcase( i==126 ); /* IF */ + testcase( i==127 ); /* ORDER */ + testcase( i==128 ); /* RESTRICT */ + testcase( i==129 ); /* OTHERS */ + testcase( i==130 ); /* OVER */ + testcase( i==131 ); /* RETURNING */ + testcase( i==132 ); /* RIGHT */ + testcase( i==133 ); /* ROLLBACK */ + testcase( i==134 ); /* ROWS */ + testcase( i==135 ); /* ROW */ + testcase( i==136 ); /* UNBOUNDED */ + testcase( i==137 ); /* UNION */ + testcase( i==138 ); /* USING */ + testcase( i==139 ); /* VACUUM */ + testcase( i==140 ); /* VIEW */ + testcase( i==141 ); /* WINDOW */ + testcase( i==142 ); /* DO */ + testcase( i==143 ); /* BY */ + testcase( i==144 ); /* INITIALLY */ + testcase( i==145 ); /* ALL */ + testcase( i==146 ); /* PRIMARY */ *pType = aKWCode[i]; break; } @@ -159892,7 +163976,7 @@ SQLITE_PRIVATE int sqlite3KeywordCode(const unsigned char *z, int n){ keywordCode((char*)z, n, &id); return id; } -#define SQLITE_N_KEYWORD 145 +#define SQLITE_N_KEYWORD 147 SQLITE_API int sqlite3_keyword_name(int i,const char **pzName,int *pnName){ if( i<0 || i>=SQLITE_N_KEYWORD ) return SQLITE_ERROR; *pzName = zKWText + aKWOffset[i]; @@ -160261,7 +164345,7 @@ SQLITE_PRIVATE int sqlite3GetToken(const unsigned char *z, int *tokenType){ if( n==0 ) *tokenType = TK_ILLEGAL; return i; } - case CC_KYWD: { + case CC_KYWD0: { for(i=1; aiClass[z[i]]<=CC_KYWD; i++){} if( IdChar(z[i]) ){ /* This token started out using characters that can appear in keywords, @@ -160291,10 +164375,19 @@ SQLITE_PRIVATE int sqlite3GetToken(const unsigned char *z, int *tokenType){ ** SQL keywords start with the letter 'x'. Fall through */ /* no break */ deliberate_fall_through } + case CC_KYWD: case CC_ID: { i = 1; break; } + case CC_BOM: { + if( z[1]==0xbb && z[2]==0xbf ){ + *tokenType = TK_SPACE; + return 3; + } + i = 1; + break; + } case CC_NUL: { *tokenType = TK_ILLEGAL; return 0; @@ -160473,19 +164566,7 @@ SQLITE_PRIVATE int sqlite3RunParser(Parse *pParse, const char *zSql, char **pzEr if( !IN_RENAME_OBJECT ){ sqlite3DeleteTrigger(db, pParse->pNewTrigger); } - - if( pParse->pWithToFree ) sqlite3WithDelete(db, pParse->pWithToFree); sqlite3DbFree(db, pParse->pVList); - while( pParse->pAinc ){ - AutoincInfo *p = pParse->pAinc; - pParse->pAinc = p->pNext; - sqlite3DbFreeNN(db, p); - } - while( pParse->pZombieTab ){ - Table *p = pParse->pZombieTab; - pParse->pZombieTab = p->pNextZombie; - sqlite3DeleteTable(db, p); - } db->pParse = pParse->pParentParse; pParse->pParentParse = 0; assert( nErr==0 || pParse->rc!=SQLITE_OK ); @@ -161323,7 +165404,7 @@ SQLITE_API int sqlite3_initialize(void){ sqlite3GlobalConfig.isPCacheInit = 1; rc = sqlite3OsInit(); } -#ifdef SQLITE_ENABLE_DESERIALIZE +#ifndef SQLITE_OMIT_DESERIALIZE if( rc==SQLITE_OK ){ rc = sqlite3MemdbInit(); } @@ -161738,12 +165819,12 @@ SQLITE_API int sqlite3_config(int op, ...){ } #endif /* SQLITE_ENABLE_SORTER_REFERENCES */ -#ifdef SQLITE_ENABLE_DESERIALIZE +#ifndef SQLITE_OMIT_DESERIALIZE case SQLITE_CONFIG_MEMDB_MAXSIZE: { sqlite3GlobalConfig.mxMemdbSize = va_arg(ap, sqlite3_int64); break; } -#endif /* SQLITE_ENABLE_DESERIALIZE */ +#endif /* SQLITE_OMIT_DESERIALIZE */ default: { rc = SQLITE_ERROR; @@ -161915,7 +165996,7 @@ SQLITE_API int sqlite3_db_cacheflush(sqlite3 *db){ sqlite3BtreeEnterAll(db); for(i=0; rc==SQLITE_OK && inDb; i++){ Btree *pBt = db->aDb[i].pBt; - if( pBt && sqlite3BtreeIsInTrans(pBt) ){ + if( pBt && sqlite3BtreeTxnState(pBt)==SQLITE_TXN_WRITE ){ Pager *pPager = sqlite3BtreePager(pBt); rc = sqlite3PagerFlush(pPager); if( rc==SQLITE_BUSY ){ @@ -162259,10 +166340,40 @@ static int sqlite3Close(sqlite3 *db, int forceZombie){ return SQLITE_OK; } +/* +** Return the transaction state for a single databse, or the maximum +** transaction state over all attached databases if zSchema is null. +*/ +SQLITE_API int sqlite3_txn_state(sqlite3 *db, const char *zSchema){ + int iDb, nDb; + int iTxn = -1; +#ifdef SQLITE_ENABLE_API_ARMOR + if( !sqlite3SafetyCheckOk(db) ){ + (void)SQLITE_MISUSE_BKPT; + return -1; + } +#endif + sqlite3_mutex_enter(db->mutex); + if( zSchema ){ + nDb = iDb = sqlite3FindDbName(db, zSchema); + if( iDb<0 ) nDb--; + }else{ + iDb = 0; + nDb = db->nDb-1; + } + for(; iDb<=nDb; iDb++){ + Btree *pBt = db->aDb[iDb].pBt; + int x = pBt!=0 ? sqlite3BtreeTxnState(pBt) : SQLITE_TXN_NONE; + if( x>iTxn ) iTxn = x; + } + sqlite3_mutex_leave(db->mutex); + return iTxn; +} + /* ** Two variations on the public interface for closing a database ** connection. The sqlite3_close() version returns SQLITE_BUSY and -** leaves the connection option if there are unfinalized prepared +** leaves the connection open if there are unfinalized prepared ** statements or unfinished sqlite3_backups. The sqlite3_close_v2() ** version forces the connection to become a zombie if there are ** unclosed resources, and arranges for deallocation when the last @@ -162419,7 +166530,7 @@ SQLITE_PRIVATE void sqlite3RollbackAll(sqlite3 *db, int tripCode){ for(i=0; inDb; i++){ Btree *p = db->aDb[i].pBt; if( p ){ - if( sqlite3BtreeIsInTrans(p) ){ + if( sqlite3BtreeTxnState(p)==SQLITE_TXN_WRITE ){ inTrans = 1; } sqlite3BtreeRollback(p, tripCode, !schemaChange); @@ -162872,6 +166983,10 @@ SQLITE_PRIVATE int sqlite3CreateFunc( }else{ sqlite3ExpirePreparedStatements(db, 0); } + }else if( xSFunc==0 && xFinal==0 ){ + /* Trying to delete a function that does not exist. This is a no-op. + ** https://sqlite.org/forum/forumpost/726219164b */ + return SQLITE_OK; } p = sqlite3FindFunction(db, zFunctionName, nArg, (u8)enc, 1); @@ -163362,7 +167477,7 @@ SQLITE_API int sqlite3_wal_checkpoint_v2( return SQLITE_OK; #else int rc; /* Return code */ - int iDb = SQLITE_MAX_ATTACHED; /* sqlite3.aDb[] index of db to checkpoint */ + int iDb; /* Schema to checkpoint */ #ifdef SQLITE_ENABLE_API_ARMOR if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT; @@ -163385,6 +167500,8 @@ SQLITE_API int sqlite3_wal_checkpoint_v2( sqlite3_mutex_enter(db->mutex); if( zDb && zDb[0] ){ iDb = sqlite3FindDbName(db, zDb); + }else{ + iDb = SQLITE_MAX_DB; /* This means process all schemas */ } if( iDb<0 ){ rc = SQLITE_ERROR; @@ -163433,7 +167550,7 @@ SQLITE_API int sqlite3_wal_checkpoint(sqlite3 *db, const char *zDb){ ** associated with the specific b-tree being checkpointed is taken by ** this function while the checkpoint is running. ** -** If iDb is passed SQLITE_MAX_ATTACHED, then all attached databases are +** If iDb is passed SQLITE_MAX_DB then all attached databases are ** checkpointed. If an error is encountered it is returned immediately - ** no attempt is made to checkpoint any remaining databases. ** @@ -163448,9 +167565,11 @@ SQLITE_PRIVATE int sqlite3Checkpoint(sqlite3 *db, int iDb, int eMode, int *pnLog assert( sqlite3_mutex_held(db->mutex) ); assert( !pnLog || *pnLog==-1 ); assert( !pnCkpt || *pnCkpt==-1 ); + testcase( iDb==SQLITE_MAX_ATTACHED ); /* See forum post a006d86f72 */ + testcase( iDb==SQLITE_MAX_DB ); for(i=0; inDb && rc==SQLITE_OK; i++){ - if( i==iDb || iDb==SQLITE_MAX_ATTACHED ){ + if( i==iDb || iDb==SQLITE_MAX_DB ){ rc = sqlite3BtreeCheckpoint(db->aDb[i].pBt, eMode, pnLog, pnCkpt); pnLog = 0; pnCkpt = 0; @@ -164840,7 +168959,9 @@ SQLITE_API int sqlite3_file_control(sqlite3 *db, const char *zDbName, int op, vo } rc = SQLITE_OK; }else{ + int nSave = db->busyHandler.nBusy; rc = sqlite3OsFileControl(fd, op, pArg); + db->busyHandler.nBusy = nSave; } sqlite3BtreeLeave(pBtree); } @@ -165066,7 +169187,7 @@ SQLITE_API int sqlite3_test_control(int op, ...){ */ case SQLITE_TESTCTRL_OPTIMIZATIONS: { sqlite3 *db = va_arg(ap, sqlite3*); - db->dbOptFlags = (u16)(va_arg(ap, int) & 0xffff); + db->dbOptFlags = va_arg(ap, u32); break; } @@ -165223,6 +169344,74 @@ SQLITE_API int sqlite3_test_control(int op, ...){ sqlite3ResultIntReal(pCtx); break; } + + /* sqlite3_test_control(SQLITE_TESTCTRL_SEEK_COUNT, + ** sqlite3 *db, // Database connection + ** u64 *pnSeek // Write seek count here + ** ); + ** + ** This test-control queries the seek-counter on the "main" database + ** file. The seek-counter is written into *pnSeek and is then reset. + ** The seek-count is only available if compiled with SQLITE_DEBUG. + */ + case SQLITE_TESTCTRL_SEEK_COUNT: { + sqlite3 *db = va_arg(ap, sqlite3*); + u64 *pn = va_arg(ap, sqlite3_uint64*); + *pn = sqlite3BtreeSeekCount(db->aDb->pBt); + (void)db; /* Silence harmless unused variable warning */ + break; + } + + /* sqlite3_test_control(SQLITE_TESTCTRL_TRACEFLAGS, op, ptr) + ** + ** "ptr" is a pointer to a u32. + ** + ** op==0 Store the current sqlite3SelectTrace in *ptr + ** op==1 Set sqlite3SelectTrace to the value *ptr + ** op==3 Store the current sqlite3WhereTrace in *ptr + ** op==3 Set sqlite3WhereTrace to the value *ptr + */ + case SQLITE_TESTCTRL_TRACEFLAGS: { + int opTrace = va_arg(ap, int); + u32 *ptr = va_arg(ap, u32*); + switch( opTrace ){ + case 0: *ptr = sqlite3SelectTrace; break; + case 1: sqlite3SelectTrace = *ptr; break; + case 2: *ptr = sqlite3WhereTrace; break; + case 3: sqlite3WhereTrace = *ptr; break; + } + break; + } + +#if defined(SQLITE_DEBUG) && !defined(SQLITE_OMIT_WSD) + /* sqlite3_test_control(SQLITE_TESTCTRL_TUNE, id, *piValue) + ** + ** If "id" is an integer between 1 and SQLITE_NTUNE then set the value + ** of the id-th tuning parameter to *piValue. If "id" is between -1 + ** and -SQLITE_NTUNE, then write the current value of the (-id)-th + ** tuning parameter into *piValue. + ** + ** Tuning parameters are for use during transient development builds, + ** to help find the best values for constants in the query planner. + ** Access tuning parameters using the Tuning(ID) macro. Set the + ** parameters in the CLI using ".testctrl tune ID VALUE". + ** + ** Transient use only. Tuning parameters should not be used in + ** checked-in code. + */ + case SQLITE_TESTCTRL_TUNE: { + int id = va_arg(ap, int); + int *piValue = va_arg(ap, int*); + if( id>0 && id<=SQLITE_NTUNE ){ + Tuning(id) = *piValue; + }else if( id<0 && id>=-SQLITE_NTUNE ){ + *piValue = Tuning(-id); + }else{ + rc = SQLITE_NOTFOUND; + } + break; + } +#endif } va_end(ap); #endif /* SQLITE_UNTESTABLE */ @@ -165458,7 +169647,7 @@ SQLITE_API int sqlite3_snapshot_get( int iDb = sqlite3FindDbName(db, zDb); if( iDb==0 || iDb>1 ){ Btree *pBt = db->aDb[iDb].pBt; - if( 0==sqlite3BtreeIsInTrans(pBt) ){ + if( SQLITE_TXN_WRITE!=sqlite3BtreeTxnState(pBt) ){ rc = sqlite3BtreeBeginTrans(pBt, 0, 0); if( rc==SQLITE_OK ){ rc = sqlite3PagerSnapshotGet(sqlite3BtreePager(pBt), ppSnapshot); @@ -165494,10 +169683,10 @@ SQLITE_API int sqlite3_snapshot_open( iDb = sqlite3FindDbName(db, zDb); if( iDb==0 || iDb>1 ){ Btree *pBt = db->aDb[iDb].pBt; - if( sqlite3BtreeIsInTrans(pBt)==0 ){ + if( sqlite3BtreeTxnState(pBt)!=SQLITE_TXN_WRITE ){ Pager *pPager = sqlite3BtreePager(pBt); int bUnlock = 0; - if( sqlite3BtreeIsInReadTrans(pBt) ){ + if( sqlite3BtreeTxnState(pBt)!=SQLITE_TXN_NONE ){ if( db->nVdbeActive==0 ){ rc = sqlite3PagerSnapshotCheck(pPager, pSnapshot); if( rc==SQLITE_OK ){ @@ -165546,7 +169735,7 @@ SQLITE_API int sqlite3_snapshot_recover(sqlite3 *db, const char *zDb){ iDb = sqlite3FindDbName(db, zDb); if( iDb==0 || iDb>1 ){ Btree *pBt = db->aDb[iDb].pBt; - if( 0==sqlite3BtreeIsInReadTrans(pBt) ){ + if( SQLITE_TXN_NONE==sqlite3BtreeTxnState(pBt) ){ rc = sqlite3BtreeBeginTrans(pBt, 0, 0); if( rc==SQLITE_OK ){ rc = sqlite3PagerSnapshotRecover(sqlite3BtreePager(pBt)); @@ -166665,7 +170854,7 @@ SQLITE_PRIVATE Fts3HashElem *sqlite3Fts3HashFindElem(const Fts3Hash *, const voi ** is used for assert() conditions that are true only if it can be ** guranteed that the database is not corrupt. */ -#if defined(SQLITE_DEBUG) || defined(SQLITE_TEST) +#ifdef SQLITE_DEBUG SQLITE_API extern int sqlite3_fts3_may_be_corrupt; # define assert_fts3_nc(x) assert(sqlite3_fts3_may_be_corrupt || (x)) #else @@ -167221,7 +171410,9 @@ SQLITE_PRIVATE int sqlite3Fts3Never(int b) { assert( !b ); return b; } ** assert() conditions in the fts3 code are activated - conditions that are ** only true if it is guaranteed that the fts3 database is not corrupt. */ +#ifdef SQLITE_DEBUG SQLITE_API int sqlite3_fts3_may_be_corrupt = 1; +#endif /* ** Write a 64-bit variable-length integer to memory starting at p[0]. @@ -168792,7 +172983,7 @@ static int fts3ScanInteriorNode( char *zBuffer = 0; /* Buffer to load terms into */ i64 nAlloc = 0; /* Size of allocated buffer */ int isFirstTerm = 1; /* True when processing first term on page */ - sqlite3_int64 iChild; /* Block id of child node to descend to */ + u64 iChild; /* Block id of child node to descend to */ int nBuffer = 0; /* Total term size */ /* Skip over the 'height' varint that occurs at the start of every @@ -168808,8 +172999,8 @@ static int fts3ScanInteriorNode( ** table, then there are always 20 bytes of zeroed padding following the ** nNode bytes of content (see sqlite3Fts3ReadBlock() for details). */ - zCsr += sqlite3Fts3GetVarint(zCsr, &iChild); - zCsr += sqlite3Fts3GetVarint(zCsr, &iChild); + zCsr += sqlite3Fts3GetVarintU(zCsr, &iChild); + zCsr += sqlite3Fts3GetVarintU(zCsr, &iChild); if( zCsr>zEnd ){ return FTS_CORRUPT_VTAB; } @@ -168862,20 +173053,20 @@ static int fts3ScanInteriorNode( */ cmp = memcmp(zTerm, zBuffer, (nBuffer>nTerm ? nTerm : nBuffer)); if( piFirst && (cmp<0 || (cmp==0 && nBuffer>nTerm)) ){ - *piFirst = iChild; + *piFirst = (i64)iChild; piFirst = 0; } if( piLast && cmp<0 ){ - *piLast = iChild; + *piLast = (i64)iChild; piLast = 0; } iChild++; }; - if( piFirst ) *piFirst = iChild; - if( piLast ) *piLast = iChild; + if( piFirst ) *piFirst = (i64)iChild; + if( piLast ) *piLast = (i64)iChild; finish_scan: sqlite3_free(zBuffer); @@ -170481,14 +174672,20 @@ static int fts3SetHasStat(Fts3Table *p){ */ static int fts3BeginMethod(sqlite3_vtab *pVtab){ Fts3Table *p = (Fts3Table*)pVtab; + int rc; UNUSED_PARAMETER(pVtab); assert( p->pSegments==0 ); assert( p->nPendingData==0 ); assert( p->inTransaction!=1 ); - TESTONLY( p->inTransaction = 1 ); - TESTONLY( p->mxSavepoint = -1; ); p->nLeafAdd = 0; - return fts3SetHasStat(p); + rc = fts3SetHasStat(p); +#ifdef SQLITE_DEBUG + if( rc==SQLITE_OK ){ + p->inTransaction = 1; + p->mxSavepoint = -1; + } +#endif + return rc; } /* @@ -172017,16 +176214,15 @@ static int fts3EvalStart(Fts3Cursor *pCsr){ #ifndef SQLITE_DISABLE_FTS4_DEFERRED if( rc==SQLITE_OK && nToken>1 && pTab->bFts4 ){ Fts3TokenAndCost *aTC; - Fts3Expr **apOr; aTC = (Fts3TokenAndCost *)sqlite3_malloc64( sizeof(Fts3TokenAndCost) * nToken + sizeof(Fts3Expr *) * nOr * 2 ); - apOr = (Fts3Expr **)&aTC[nToken]; if( !aTC ){ rc = SQLITE_NOMEM; }else{ + Fts3Expr **apOr = (Fts3Expr **)&aTC[nToken]; int ii; Fts3TokenAndCost *pTC = aTC; Fts3Expr **ppOr = apOr; @@ -172107,9 +176303,9 @@ static int fts3EvalNearTrim( ); if( res ){ nNew = (int)(pOut - pPhrase->doclist.pList) - 1; - if( nNew>=0 ){ + assert_fts3_nc( nNew<=pPhrase->doclist.nList && nNew>0 ); + if( nNew>=0 && nNew<=pPhrase->doclist.nList ){ assert( pPhrase->doclist.pList[nNew]=='\0' ); - assert( nNew<=pPhrase->doclist.nList && nNew>0 ); memset(&pPhrase->doclist.pList[nNew], 0, pPhrase->doclist.nList - nNew); pPhrase->doclist.nList = nNew; } @@ -173402,6 +177598,7 @@ static int fts3auxFilterMethod( sqlite3Fts3SegReaderFinish(&pCsr->csr); sqlite3_free((void *)pCsr->filter.zTerm); sqlite3_free(pCsr->aStat); + sqlite3_free(pCsr->zStop); memset(&pCsr->csr, 0, ((u8*)&pCsr[1]) - (u8*)&pCsr->csr); pCsr->filter.flags = FTS3_SEGMENT_REQUIRE_POS|FTS3_SEGMENT_IGNORE_EMPTY; @@ -174043,6 +178240,11 @@ static int getNextNode( if( *zInput=='(' ){ int nConsumed = 0; pParse->nNest++; +#if !defined(SQLITE_MAX_EXPR_DEPTH) + if( pParse->nNest>1000 ) return SQLITE_ERROR; +#elif SQLITE_MAX_EXPR_DEPTH>0 + if( pParse->nNest>SQLITE_MAX_EXPR_DEPTH ) return SQLITE_ERROR; +#endif rc = fts3ExprParse(pParse, zInput+1, nInput-1, ppExpr, &nConsumed); *pnConsumed = (int)(zInput - z) + 1 + nConsumed; return rc; @@ -178918,7 +183120,7 @@ static int fts3SegReaderCmp(Fts3SegReader *pLhs, Fts3SegReader *pRhs){ if( rc==0 ){ rc = pRhs->iIdx - pLhs->iIdx; } - assert( rc!=0 ); + assert_fts3_nc( rc!=0 ); return rc; } @@ -179114,8 +183316,8 @@ static int fts3PrefixCompress( int nNext /* Size of buffer zNext in bytes */ ){ int n; - UNUSED_PARAMETER(nNext); - for(n=0; nkey, reader.term.n, &rc); if( rc==SQLITE_OK ){ - memcpy(pNode->key.a, reader.term.a, reader.term.n); + assert_fts3_nc( reader.term.n>0 || reader.aNode==0 ); + if( reader.term.n>0 ){ + memcpy(pNode->key.a, reader.term.a, reader.term.n); + } pNode->key.n = reader.term.n; if( i>0 ){ char *aBlock = 0; int nBlock = 0; pNode = &pWriter->aNodeWriter[i-1]; pNode->iBlock = reader.iChild; - rc = sqlite3Fts3ReadBlock(p, reader.iChild, &aBlock, &nBlock, 0); + rc = sqlite3Fts3ReadBlock(p, reader.iChild, &aBlock, &nBlock,0); blobGrowBuffer(&pNode->block, MAX(nBlock, p->nNodeSize)+FTS3_NODE_PADDING, &rc - ); + ); if( rc==SQLITE_OK ){ memcpy(pNode->block.a, aBlock, nBlock); pNode->block.n = nBlock; @@ -182925,6 +187130,10 @@ SQLITE_PRIVATE int sqlite3Fts3Optimize(Fts3Table *p){ /* #include */ /* #include */ +#ifndef SQLITE_AMALGAMATION +typedef sqlite3_int64 i64; +#endif + /* ** Characters that may appear in the second argument to matchinfo(). */ @@ -182975,9 +187184,9 @@ struct SnippetIter { struct SnippetPhrase { int nToken; /* Number of tokens in phrase */ char *pList; /* Pointer to start of phrase position list */ - int iHead; /* Next value in position list */ + i64 iHead; /* Next value in position list */ char *pHead; /* Position list data following iHead */ - int iTail; /* Next value in trailing position list */ + i64 iTail; /* Next value in trailing position list */ char *pTail; /* Position list data following iTail */ }; @@ -183142,7 +187351,7 @@ SQLITE_PRIVATE void sqlite3Fts3MIBufferFree(MatchinfoBuffer *p){ ** After it returns, *piPos contains the value of the next element of the ** list and *pp is advanced to the following varint. */ -static void fts3GetDeltaPosition(char **pp, int *piPos){ +static void fts3GetDeltaPosition(char **pp, i64 *piPos){ int iVal; *pp += fts3GetVarint32(*pp, &iVal); *piPos += (iVal-2); @@ -183251,10 +187460,10 @@ static int fts3ExprPhraseCount(Fts3Expr *pExpr){ ** arguments so that it points to the first element with a value greater ** than or equal to parameter iNext. */ -static void fts3SnippetAdvance(char **ppIter, int *piIter, int iNext){ +static void fts3SnippetAdvance(char **ppIter, i64 *piIter, int iNext){ char *pIter = *ppIter; if( pIter ){ - int iIter = *piIter; + i64 iIter = *piIter; while( iIteraPhrase[i]; if( pPhrase->pTail ){ char *pCsr = pPhrase->pTail; - int iCsr = pPhrase->iTail; + i64 iCsr = pPhrase->iTail; while( iCsr<(iStart+pIter->nSnippet) && iCsr>=iStart ){ int j; @@ -183383,7 +187592,7 @@ static int fts3SnippetFindPositions(Fts3Expr *pExpr, int iPhrase, void *ctx){ rc = sqlite3Fts3EvalPhrasePoslist(p->pCsr, pExpr, p->iCol, &pCsr); assert( rc==SQLITE_OK || pCsr==0 ); if( pCsr ){ - int iFirst = 0; + i64 iFirst = 0; pPhrase->pList = pCsr; fts3GetDeltaPosition(&pCsr, &iFirst); if( iFirst<0 ){ @@ -184447,8 +188656,8 @@ typedef struct TermOffsetCtx TermOffsetCtx; struct TermOffset { char *pList; /* Position-list */ - int iPos; /* Position just read from pList */ - int iOff; /* Offset of this term from read positions */ + i64 iPos; /* Position just read from pList */ + i64 iOff; /* Offset of this term from read positions */ }; struct TermOffsetCtx { @@ -184467,7 +188676,7 @@ static int fts3ExprTermOffsetInit(Fts3Expr *pExpr, int iPhrase, void *ctx){ int nTerm; /* Number of tokens in phrase */ int iTerm; /* For looping through nTerm phrase terms */ char *pList; /* Pointer to position list for phrase */ - int iPos = 0; /* First position in position-list */ + i64 iPos = 0; /* First position in position-list */ int rc; UNUSED_PARAMETER(iPhrase); @@ -184944,6 +189153,7 @@ static int unicodeOpen( pCsr->aInput = (const unsigned char *)aInput; if( aInput==0 ){ pCsr->nInput = 0; + pCsr->aInput = (const unsigned char*)""; }else if( nInput<0 ){ pCsr->nInput = (int)strlen(aInput); }else{ @@ -185743,7 +189953,7 @@ static void jsonAppendSeparator(JsonString *p){ */ static void jsonAppendString(JsonString *p, const char *zIn, u32 N){ u32 i; - if( (N+p->nUsed+2 >= p->nAlloc) && jsonGrow(p,N+2)!=0 ) return; + if( zIn==0 || ((N+p->nUsed+2 >= p->nAlloc) && jsonGrow(p,N+2)!=0) ) return; p->zBuf[p->nUsed++] = '"'; for(i=0; inUsed>1 ){ jsonAppendChar(pStr, ','); - pStr->pCtx = ctx; } + pStr->pCtx = ctx; jsonAppendValue(pStr, argv[0]); } } @@ -187403,11 +191613,7 @@ static void jsonGroupInverse( if( NEVER(!pStr) ) return; #endif z = pStr->zBuf; - for(i=1; (c = z[i])!=',' || inStr || nNest; i++){ - if( i>=pStr->nUsed ){ - pStr->nUsed = 1; - return; - } + for(i=1; inUsed && ((c = z[i])!=',' || inStr || nNest); i++){ if( c=='"' ){ inStr = !inStr; }else if( c=='\\' ){ @@ -187417,8 +191623,13 @@ static void jsonGroupInverse( if( c=='}' || c==']' ) nNest--; } } - pStr->nUsed -= i; - memmove(&z[1], &z[i+1], (size_t)pStr->nUsed-1); + if( inUsed ){ + pStr->nUsed -= i; + memmove(&z[1], &z[i+1], (size_t)pStr->nUsed-1); + z[pStr->nUsed] = 0; + }else{ + pStr->nUsed = 1; + } } #else # define jsonGroupInverse 0 @@ -187446,8 +191657,8 @@ static void jsonObjectStep( jsonAppendChar(pStr, '{'); }else if( pStr->nUsed>1 ){ jsonAppendChar(pStr, ','); - pStr->pCtx = ctx; } + pStr->pCtx = ctx; z = (const char*)sqlite3_value_text(argv[0]); n = (u32)sqlite3_value_bytes(argv[0]); jsonAppendString(pStr, z, n); @@ -188837,7 +193048,7 @@ static int nodeAcquire( ** are the leaves, and so on. If the depth as specified on the root node ** is greater than RTREE_MAX_DEPTH, the r-tree structure must be corrupt. */ - if( pNode && iNode==1 ){ + if( pNode && rc==SQLITE_OK && iNode==1 ){ pRtree->iDepth = readInt16(pNode->zData); if( pRtree->iDepth>RTREE_MAX_DEPTH ){ rc = SQLITE_CORRUPT_VTAB; @@ -191968,11 +196179,16 @@ static void rtreedepth(sqlite3_context *ctx, int nArg, sqlite3_value **apArg){ UNUSED_PARAMETER(nArg); if( sqlite3_value_type(apArg[0])!=SQLITE_BLOB || sqlite3_value_bytes(apArg[0])<2 + ){ sqlite3_result_error(ctx, "Invalid argument to rtreedepth()", -1); }else{ u8 *zBlob = (u8 *)sqlite3_value_blob(apArg[0]); - sqlite3_result_int(ctx, readInt16(zBlob)); + if( zBlob ){ + sqlite3_result_int(ctx, readInt16(zBlob)); + }else{ + sqlite3_result_error_nomem(ctx); + } } } @@ -192758,6 +196974,10 @@ static GeoPoly *geopolyFuncParam( ){ const unsigned char *a = sqlite3_value_blob(pVal); int nVertex; + if( a==0 ){ + sqlite3_result_error_nomem(pCtx); + return 0; + } nVertex = (a[1]<<16) + (a[2]<<8) + a[3]; if( (a[0]==0 || a[0]==1) && (nVertex*2*sizeof(GeoCoord) + 4)==(unsigned int)nByte @@ -193131,7 +197351,7 @@ static GeoPoly *geopolyBBox( aCoord[2].f = mnY; aCoord[3].f = mxY; } - }else{ + }else if( aCoord ){ memset(aCoord, 0, sizeof(RtreeCoord)*4); } return pOut; @@ -193523,7 +197743,7 @@ static int geopolyOverlap(GeoPoly *p1, GeoPoly *p2){ geopolyAddSegments(p, p1, 1); geopolyAddSegments(p, p2, 2); pThisEvent = geopolySortEventsByX(p->aEvent, p->nEvent); - rX = pThisEvent->x==0.0 ? -1.0 : 0.0; + rX = pThisEvent && pThisEvent->x==0.0 ? -1.0 : 0.0; memset(aOverlap, 0, sizeof(aOverlap)); while( pThisEvent ){ if( pThisEvent->x!=rX ){ @@ -197491,7 +201711,9 @@ char *rbuVacuumIndexStart( zSep = ""; for(iCol=0; iColnCol; iCol++){ const char *zQuoted = (const char*)sqlite3_column_text(pSel, iCol); - if( zQuoted[0]=='N' ){ + if( zQuoted==0 ){ + p->rc = SQLITE_NOMEM; + }else if( zQuoted[0]=='N' ){ bFailed = 1; break; } @@ -200699,22 +204921,24 @@ static int rbuVfsShmLock(sqlite3_file *pFile, int ofst, int n, int flags){ #endif assert( p->openFlags & (SQLITE_OPEN_MAIN_DB|SQLITE_OPEN_TEMP_DB) ); - if( pRbu && (pRbu->eStage==RBU_STAGE_OAL || pRbu->eStage==RBU_STAGE_MOVE) ){ - /* Magic number 1 is the WAL_CKPT_LOCK lock. Preventing SQLite from - ** taking this lock also prevents any checkpoints from occurring. - ** todo: really, it's not clear why this might occur, as - ** wal_autocheckpoint ought to be turned off. */ + if( pRbu && ( + pRbu->eStage==RBU_STAGE_OAL + || pRbu->eStage==RBU_STAGE_MOVE + || pRbu->eStage==RBU_STAGE_DONE + )){ + /* Prevent SQLite from taking a shm-lock on the target file when it + ** is supplying heap memory to the upper layer in place of *-shm + ** segments. */ if( ofst==WAL_LOCK_CKPT && n==1 ) rc = SQLITE_BUSY; }else{ int bCapture = 0; if( pRbu && pRbu->eStage==RBU_STAGE_CAPTURE ){ bCapture = 1; } - if( bCapture==0 || 0==(flags & SQLITE_SHM_UNLOCK) ){ rc = p->pReal->pMethods->xShmLock(p->pReal, ofst, n, flags); if( bCapture && rc==SQLITE_OK ){ - pRbu->mLock |= (1 << ofst); + pRbu->mLock |= ((1<pRbu && pDb->pRbu->eStage==RBU_STAGE_OAL ){ - /* This call is to open a *-wal file. Intead, open the *-oal. This - ** code ensures that the string passed to xOpen() is terminated by a - ** pair of '\0' bytes in case the VFS attempts to extract a URI - ** parameter from it. */ - const char *zBase = zName; - size_t nCopy; - char *zCopy; + /* This call is to open a *-wal file. Intead, open the *-oal. */ + size_t nOpen; if( rbuIsVacuum(pDb->pRbu) ){ - zBase = sqlite3_db_filename(pDb->pRbu->dbRbu, "main"); - zBase = sqlite3_filename_wal(zBase); - } - nCopy = strlen(zBase); - zCopy = sqlite3_malloc64(nCopy+2); - if( zCopy ){ - memcpy(zCopy, zBase, nCopy); - zCopy[nCopy-3] = 'o'; - zCopy[nCopy] = '\0'; - zCopy[nCopy+1] = '\0'; - zOpen = (const char*)(pFd->zDel = zCopy); - }else{ - rc = SQLITE_NOMEM; + zOpen = sqlite3_db_filename(pDb->pRbu->dbRbu, "main"); + zOpen = sqlite3_filename_wal(zOpen); } + nOpen = strlen(zOpen); + ((char*)zOpen)[nOpen-3] = 'o'; pFd->pRbu = pDb->pRbu; } pDb->pWalFd = pFd; @@ -202495,12 +206705,15 @@ struct SessionHook { struct sqlite3_session { sqlite3 *db; /* Database handle session is attached to */ char *zDb; /* Name of database session is attached to */ + int bEnableSize; /* True if changeset_size() enabled */ int bEnable; /* True if currently recording */ int bIndirect; /* True if all changes are indirect */ int bAutoAttach; /* True to auto-attach tables */ int rc; /* Non-zero if an error has occurred */ void *pFilterCtx; /* First argument to pass to xTableFilter */ int (*xTableFilter)(void *pCtx, const char *zTab); + i64 nMalloc; /* Number of bytes of data allocated */ + i64 nMaxChangesetSize; sqlite3_value *pZeroBlob; /* Value containing X'' */ sqlite3_session *pNext; /* Next session object on same db. */ SessionTable *pTable; /* List of attached tables */ @@ -202543,6 +206756,7 @@ struct sqlite3_changeset_iter { SessionBuffer tblhdr; /* Buffer to hold apValue/zTab/abPK/ */ int bPatchset; /* True if this is a patchset */ int bInvert; /* True to invert changeset */ + int bSkipEmpty; /* Skip noop UPDATE changes */ int rc; /* Iterator error code */ sqlite3_stmt *pConflict; /* Points to conflicting row, if any */ char *zTab; /* Current table */ @@ -202742,8 +206956,9 @@ struct SessionTable { ** this structure stored in a SessionTable.aChange[] hash table. */ struct SessionChange { - int op; /* One of UPDATE, DELETE, INSERT */ - int bIndirect; /* True if this change is "indirect" */ + u8 op; /* One of UPDATE, DELETE, INSERT */ + u8 bIndirect; /* True if this change is "indirect" */ + int nMaxSize; /* Max size of eventual changeset record */ int nRecord; /* Number of bytes in buffer aRecord[] */ u8 *aRecord; /* Buffer containing old.* record */ SessionChange *pNext; /* For hash-table collisions */ @@ -202884,6 +207099,26 @@ static int sessionSerializeValue( return SQLITE_OK; } +/* +** Allocate and return a pointer to a buffer nByte bytes in size. If +** pSession is not NULL, increase the sqlite3_session.nMalloc variable +** by the number of bytes allocated. +*/ +static void *sessionMalloc64(sqlite3_session *pSession, i64 nByte){ + void *pRet = sqlite3_malloc64(nByte); + if( pSession ) pSession->nMalloc += sqlite3_msize(pRet); + return pRet; +} + +/* +** Free buffer pFree, which must have been allocated by an earlier +** call to sessionMalloc64(). If pSession is not NULL, decrease the +** sqlite3_session.nMalloc counter by the number of bytes freed. +*/ +static void sessionFree(sqlite3_session *pSession, void *pFree){ + if( pSession ) pSession->nMalloc -= sqlite3_msize(pFree); + sqlite3_free(pFree); +} /* ** This macro is used to calculate hash key values for data structures. In @@ -203351,13 +207586,19 @@ static int sessionPreupdateEqual( ** Growing the hash table in this case is a performance optimization only, ** it is not required for correct operation. */ -static int sessionGrowHash(int bPatchset, SessionTable *pTab){ +static int sessionGrowHash( + sqlite3_session *pSession, /* For memory accounting. May be NULL */ + int bPatchset, + SessionTable *pTab +){ if( pTab->nChange==0 || pTab->nEntry>=(pTab->nChange/2) ){ int i; SessionChange **apNew; sqlite3_int64 nNew = 2*(sqlite3_int64)(pTab->nChange ? pTab->nChange : 128); - apNew = (SessionChange **)sqlite3_malloc64(sizeof(SessionChange *) * nNew); + apNew = (SessionChange**)sessionMalloc64( + pSession, sizeof(SessionChange*) * nNew + ); if( apNew==0 ){ if( pTab->nChange==0 ){ return SQLITE_ERROR; @@ -203378,7 +207619,7 @@ static int sessionGrowHash(int bPatchset, SessionTable *pTab){ } } - sqlite3_free(pTab->apChange); + sessionFree(pSession, pTab->apChange); pTab->nChange = nNew; pTab->apChange = apNew; } @@ -203412,6 +207653,7 @@ static int sessionGrowHash(int bPatchset, SessionTable *pTab){ ** be freed using sqlite3_free() by the caller */ static int sessionTableInfo( + sqlite3_session *pSession, /* For memory accounting. May be NULL */ sqlite3 *db, /* Database connection */ const char *zDb, /* Name of attached database (e.g. "main") */ const char *zThis, /* Table name */ @@ -203466,7 +207708,7 @@ static int sessionTableInfo( if( rc==SQLITE_OK ){ nByte += nDbCol * (sizeof(const char *) + sizeof(u8) + 1); - pAlloc = sqlite3_malloc64(nByte); + pAlloc = sessionMalloc64(pSession, nByte); if( pAlloc==0 ){ rc = SQLITE_NOMEM; } @@ -203509,7 +207751,7 @@ static int sessionTableInfo( *pabPK = 0; *pnCol = 0; if( pzTab ) *pzTab = 0; - sqlite3_free(azCol); + sessionFree(pSession, azCol); } sqlite3_finalize(pStmt); return rc; @@ -203531,7 +207773,7 @@ static int sessionInitTable(sqlite3_session *pSession, SessionTable *pTab){ if( pTab->nCol==0 ){ u8 *abPK; assert( pTab->azCol==0 || pTab->abPK==0 ); - pSession->rc = sessionTableInfo(pSession->db, pSession->zDb, + pSession->rc = sessionTableInfo(pSession, pSession->db, pSession->zDb, pTab->zName, &pTab->nCol, 0, &pTab->azCol, &abPK ); if( pSession->rc==SQLITE_OK ){ @@ -203545,6 +207787,12 @@ static int sessionInitTable(sqlite3_session *pSession, SessionTable *pTab){ if( 0==sqlite3_stricmp("sqlite_stat1", pTab->zName) ){ pTab->bStat1 = 1; } + + if( pSession->bEnableSize ){ + pSession->nMaxChangesetSize += ( + 1 + sessionVarintLen(pTab->nCol) + pTab->nCol + strlen(pTab->zName)+1 + ); + } } } return (pSession->rc || pTab->abPK==0); @@ -203590,6 +207838,103 @@ static int sessionStat1Depth(void *pCtx){ return p->hook.xDepth(p->hook.pCtx); } +static int sessionUpdateMaxSize( + int op, + sqlite3_session *pSession, /* Session object pTab is attached to */ + SessionTable *pTab, /* Table that change applies to */ + SessionChange *pC /* Update pC->nMaxSize */ +){ + i64 nNew = 2; + if( pC->op==SQLITE_INSERT ){ + if( op!=SQLITE_DELETE ){ + int ii; + for(ii=0; iinCol; ii++){ + sqlite3_value *p = 0; + pSession->hook.xNew(pSession->hook.pCtx, ii, &p); + sessionSerializeValue(0, p, &nNew); + } + } + }else if( op==SQLITE_DELETE ){ + nNew += pC->nRecord; + if( sqlite3_preupdate_blobwrite(pSession->db)>=0 ){ + nNew += pC->nRecord; + } + }else{ + int ii; + u8 *pCsr = pC->aRecord; + for(ii=0; iinCol; ii++){ + int bChanged = 1; + int nOld = 0; + int eType; + sqlite3_value *p = 0; + pSession->hook.xNew(pSession->hook.pCtx, ii, &p); + if( p==0 ){ + return SQLITE_NOMEM; + } + + eType = *pCsr++; + switch( eType ){ + case SQLITE_NULL: + bChanged = sqlite3_value_type(p)!=SQLITE_NULL; + break; + + case SQLITE_FLOAT: + case SQLITE_INTEGER: { + if( eType==sqlite3_value_type(p) ){ + sqlite3_int64 iVal = sessionGetI64(pCsr); + if( eType==SQLITE_INTEGER ){ + bChanged = (iVal!=sqlite3_value_int64(p)); + }else{ + double dVal; + memcpy(&dVal, &iVal, 8); + bChanged = (dVal!=sqlite3_value_double(p)); + } + } + nOld = 8; + pCsr += 8; + break; + } + + default: { + int nByte; + nOld = sessionVarintGet(pCsr, &nByte); + pCsr += nOld; + nOld += nByte; + assert( eType==SQLITE_TEXT || eType==SQLITE_BLOB ); + if( eType==sqlite3_value_type(p) + && nByte==sqlite3_value_bytes(p) + && (nByte==0 || 0==memcmp(pCsr, sqlite3_value_blob(p), nByte)) + ){ + bChanged = 0; + } + pCsr += nByte; + break; + } + } + + if( bChanged && pTab->abPK[ii] ){ + nNew = pC->nRecord + 2; + break; + } + + if( bChanged ){ + nNew += 1 + nOld; + sessionSerializeValue(0, p, &nNew); + }else if( pTab->abPK[ii] ){ + nNew += 2 + nOld; + }else{ + nNew += 2; + } + } + } + + if( nNew>pC->nMaxSize ){ + int nIncr = nNew - pC->nMaxSize; + pC->nMaxSize = nNew; + pSession->nMaxChangesetSize += nIncr; + } + return SQLITE_OK; +} /* ** This function is only called from with a pre-update-hook reporting a @@ -203622,7 +207967,7 @@ static void sessionPreupdateOneChange( } /* Grow the hash table if required */ - if( sessionGrowHash(0, pTab) ){ + if( sessionGrowHash(pSession, 0, pTab) ){ pSession->rc = SQLITE_NOMEM; return; } @@ -203663,7 +208008,6 @@ static void sessionPreupdateOneChange( /* Create a new change object containing all the old values (if ** this is an SQLITE_UPDATE or SQLITE_DELETE), or just the PK ** values (if this is an INSERT). */ - SessionChange *pChange; /* New change object */ sqlite3_int64 nByte; /* Number of bytes to allocate */ int i; /* Used to iterate through columns */ @@ -203689,13 +208033,13 @@ static void sessionPreupdateOneChange( } /* Allocate the change object */ - pChange = (SessionChange *)sqlite3_malloc64(nByte); - if( !pChange ){ + pC = (SessionChange *)sessionMalloc64(pSession, nByte); + if( !pC ){ rc = SQLITE_NOMEM; goto error_out; }else{ - memset(pChange, 0, sizeof(SessionChange)); - pChange->aRecord = (u8 *)&pChange[1]; + memset(pC, 0, sizeof(SessionChange)); + pC->aRecord = (u8 *)&pC[1]; } /* Populate the change object. None of the preupdate_old(), @@ -203710,17 +208054,17 @@ static void sessionPreupdateOneChange( }else if( pTab->abPK[i] ){ pSession->hook.xNew(pSession->hook.pCtx, i, &p); } - sessionSerializeValue(&pChange->aRecord[nByte], p, &nByte); + sessionSerializeValue(&pC->aRecord[nByte], p, &nByte); } /* Add the change to the hash-table */ if( pSession->bIndirect || pSession->hook.xDepth(pSession->hook.pCtx) ){ - pChange->bIndirect = 1; + pC->bIndirect = 1; } - pChange->nRecord = nByte; - pChange->op = op; - pChange->pNext = pTab->apChange[iHash]; - pTab->apChange[iHash] = pChange; + pC->nRecord = nByte; + pC->op = op; + pC->pNext = pTab->apChange[iHash]; + pTab->apChange[iHash] = pC; }else if( pC->bIndirect ){ /* If the existing change is considered "indirect", but this current @@ -203731,8 +208075,14 @@ static void sessionPreupdateOneChange( pC->bIndirect = 0; } } + + assert( rc==SQLITE_OK ); + if( pSession->bEnableSize ){ + rc = sessionUpdateMaxSize(op, pSession, pTab, pC); + } } + /* If an error has occurred, mark the session object as failed. */ error_out: if( pTab->bStat1 ){ @@ -204062,7 +208412,7 @@ SQLITE_API int sqlite3session_diff( int nCol; /* Columns in zFrom.zTbl */ u8 *abPK; const char **azCol = 0; - rc = sessionTableInfo(db, zFrom, zTbl, &nCol, 0, &azCol, &abPK); + rc = sessionTableInfo(0, db, zFrom, zTbl, &nCol, 0, &azCol, &abPK); if( rc==SQLITE_OK ){ if( pTo->nCol!=nCol ){ bMismatch = 1; @@ -204160,7 +208510,7 @@ SQLITE_API int sqlite3session_create( ** Free the list of table objects passed as the first argument. The contents ** of the changed-rows hash tables are also deleted. */ -static void sessionDeleteTable(SessionTable *pList){ +static void sessionDeleteTable(sqlite3_session *pSession, SessionTable *pList){ SessionTable *pNext; SessionTable *pTab; @@ -204172,12 +208522,12 @@ static void sessionDeleteTable(SessionTable *pList){ SessionChange *pNextChange; for(p=pTab->apChange[i]; p; p=pNextChange){ pNextChange = p->pNext; - sqlite3_free(p); + sessionFree(pSession, p); } } - sqlite3_free((char*)pTab->azCol); /* cast works around VC++ bug */ - sqlite3_free(pTab->apChange); - sqlite3_free(pTab); + sessionFree(pSession, (char*)pTab->azCol); /* cast works around VC++ bug */ + sessionFree(pSession, pTab->apChange); + sessionFree(pSession, pTab); } } @@ -204205,9 +208555,11 @@ SQLITE_API void sqlite3session_delete(sqlite3_session *pSession){ /* Delete all attached table objects. And the contents of their ** associated hash-tables. */ - sessionDeleteTable(pSession->pTable); + sessionDeleteTable(pSession, pSession->pTable); - /* Free the session object itself. */ + /* Assert that all allocations have been freed and then free the + ** session object itself. */ + assert( pSession->nMalloc==0 ); sqlite3_free(pSession); } @@ -204254,7 +208606,8 @@ SQLITE_API int sqlite3session_attach( if( !pTab ){ /* Allocate new SessionTable object. */ - pTab = (SessionTable *)sqlite3_malloc64(sizeof(SessionTable) + nName + 1); + int nByte = sizeof(SessionTable) + nName + 1; + pTab = (SessionTable*)sessionMalloc64(pSession, nByte); if( !pTab ){ rc = SQLITE_NOMEM; }else{ @@ -204284,13 +208637,29 @@ SQLITE_API int sqlite3session_attach( ** If successful, return zero. Otherwise, if an OOM condition is encountered, ** set *pRc to SQLITE_NOMEM and return non-zero. */ -static int sessionBufferGrow(SessionBuffer *p, size_t nByte, int *pRc){ - if( *pRc==SQLITE_OK && (size_t)(p->nAlloc-p->nBuf)nBuf + nByte; + if( *pRc==SQLITE_OK && nReq>p->nAlloc ){ u8 *aNew; i64 nNew = p->nAlloc ? p->nAlloc : 128; + do { nNew = nNew*2; - }while( (size_t)(nNew-p->nBuf)SESSION_MAX_BUFFER_SZ ){ + nNew = SESSION_MAX_BUFFER_SZ; + if( nNewaBuf, nNew); if( 0==aNew ){ @@ -204851,7 +209220,7 @@ static int sessionGenerateChangeset( int nNoop; /* Size of buffer after writing tbl header */ /* Check the table schema is still Ok. */ - rc = sessionTableInfo(db, pSession->zDb, zName, &nCol, 0, &azCol, &abPK); + rc = sessionTableInfo(0, db, pSession->zDb, zName, &nCol, 0,&azCol,&abPK); if( !rc && (pTab->nCol!=nCol || memcmp(abPK, pTab->abPK, nCol)) ){ rc = SQLITE_SCHEMA; } @@ -204941,7 +209310,11 @@ SQLITE_API int sqlite3session_changeset( int *pnChangeset, /* OUT: Size of buffer at *ppChangeset */ void **ppChangeset /* OUT: Buffer containing changeset */ ){ - return sessionGenerateChangeset(pSession, 0, 0, 0, pnChangeset, ppChangeset); + int rc = sessionGenerateChangeset(pSession, 0, 0, 0, pnChangeset,ppChangeset); + assert( rc || pnChangeset==0 + || pSession->bEnableSize==0 || *pnChangeset<=pSession->nMaxChangesetSize + ); + return rc; } /* @@ -205026,6 +209399,46 @@ SQLITE_API int sqlite3session_isempty(sqlite3_session *pSession){ return (ret==0); } +/* +** Return the amount of heap memory in use. +*/ +SQLITE_API sqlite3_int64 sqlite3session_memory_used(sqlite3_session *pSession){ + return pSession->nMalloc; +} + +/* +** Configure the session object passed as the first argument. +*/ +SQLITE_API int sqlite3session_object_config(sqlite3_session *pSession, int op, void *pArg){ + int rc = SQLITE_OK; + switch( op ){ + case SQLITE_SESSION_OBJCONFIG_SIZE: { + int iArg = *(int*)pArg; + if( iArg>=0 ){ + if( pSession->pTable ){ + rc = SQLITE_MISUSE; + }else{ + pSession->bEnableSize = (iArg!=0); + } + } + *(int*)pArg = pSession->bEnableSize; + break; + } + + default: + rc = SQLITE_MISUSE; + } + + return rc; +} + +/* +** Return the maximum size of sqlite3session_changeset() output. +*/ +SQLITE_API sqlite3_int64 sqlite3session_changeset_size(sqlite3_session *pSession){ + return pSession->nMaxChangesetSize; +} + /* ** Do the work for either sqlite3changeset_start() or start_strm(). */ @@ -205035,7 +209448,8 @@ static int sessionChangesetStart( void *pIn, int nChangeset, /* Size of buffer pChangeset in bytes */ void *pChangeset, /* Pointer to buffer containing changeset */ - int bInvert /* True to invert changeset */ + int bInvert, /* True to invert changeset */ + int bSkipEmpty /* True to skip empty UPDATE changes */ ){ sqlite3_changeset_iter *pRet; /* Iterator to return */ int nByte; /* Number of bytes to allocate for iterator */ @@ -205056,6 +209470,7 @@ static int sessionChangesetStart( pRet->in.pIn = pIn; pRet->in.bEof = (xInput ? 0 : 1); pRet->bInvert = bInvert; + pRet->bSkipEmpty = bSkipEmpty; /* Populate the output variable and return success. */ *pp = pRet; @@ -205070,7 +209485,7 @@ SQLITE_API int sqlite3changeset_start( int nChangeset, /* Size of buffer pChangeset in bytes */ void *pChangeset /* Pointer to buffer containing changeset */ ){ - return sessionChangesetStart(pp, 0, 0, nChangeset, pChangeset, 0); + return sessionChangesetStart(pp, 0, 0, nChangeset, pChangeset, 0, 0); } SQLITE_API int sqlite3changeset_start_v2( sqlite3_changeset_iter **pp, /* OUT: Changeset iterator handle */ @@ -205079,7 +209494,7 @@ SQLITE_API int sqlite3changeset_start_v2( int flags ){ int bInvert = !!(flags & SQLITE_CHANGESETSTART_INVERT); - return sessionChangesetStart(pp, 0, 0, nChangeset, pChangeset, bInvert); + return sessionChangesetStart(pp, 0, 0, nChangeset, pChangeset, bInvert, 0); } /* @@ -205090,7 +209505,7 @@ SQLITE_API int sqlite3changeset_start_strm( int (*xInput)(void *pIn, void *pData, int *pnData), void *pIn ){ - return sessionChangesetStart(pp, xInput, pIn, 0, 0, 0); + return sessionChangesetStart(pp, xInput, pIn, 0, 0, 0, 0); } SQLITE_API int sqlite3changeset_start_v2_strm( sqlite3_changeset_iter **pp, /* OUT: Changeset iterator handle */ @@ -205099,7 +209514,7 @@ SQLITE_API int sqlite3changeset_start_v2_strm( int flags ){ int bInvert = !!(flags & SQLITE_CHANGESETSTART_INVERT); - return sessionChangesetStart(pp, xInput, pIn, 0, 0, bInvert); + return sessionChangesetStart(pp, xInput, pIn, 0, 0, bInvert, 0); } /* @@ -205225,11 +209640,14 @@ static int sessionReadRecord( SessionInput *pIn, /* Input data */ int nCol, /* Number of values in record */ u8 *abPK, /* Array of primary key flags, or NULL */ - sqlite3_value **apOut /* Write values to this array */ + sqlite3_value **apOut, /* Write values to this array */ + int *pbEmpty ){ int i; /* Used to iterate through columns */ int rc = SQLITE_OK; + assert( pbEmpty==0 || *pbEmpty==0 ); + if( pbEmpty ) *pbEmpty = 1; for(i=0; iaData[pIn->iNext++]; assert( apOut[i]==0 ); if( eType ){ + if( pbEmpty ) *pbEmpty = 0; apOut[i] = sqlite3ValueNew(0); if( !apOut[i] ) rc = SQLITE_NOMEM; } @@ -205420,31 +209839,27 @@ static int sessionChangesetReadTblhdr(sqlite3_changeset_iter *p){ } /* -** Advance the changeset iterator to the next change. +** Advance the changeset iterator to the next change. The differences between +** this function and sessionChangesetNext() are that ** -** If both paRec and pnRec are NULL, then this function works like the public -** API sqlite3changeset_next(). If SQLITE_ROW is returned, then the -** sqlite3changeset_new() and old() APIs may be used to query for values. -** -** Otherwise, if paRec and pnRec are not NULL, then a pointer to the change -** record is written to *paRec before returning and the number of bytes in -** the record to *pnRec. +** * If pbEmpty is not NULL and the change is a no-op UPDATE (an UPDATE +** that modifies no columns), this function sets (*pbEmpty) to 1. ** -** Either way, this function returns SQLITE_ROW if the iterator is -** successfully advanced to the next change in the changeset, an SQLite -** error code if an error occurs, or SQLITE_DONE if there are no further -** changes in the changeset. +** * If the iterator is configured to skip no-op UPDATEs, +** sessionChangesetNext() does that. This function does not. */ -static int sessionChangesetNext( +static int sessionChangesetNextOne( sqlite3_changeset_iter *p, /* Changeset iterator */ u8 **paRec, /* If non-NULL, store record pointer here */ int *pnRec, /* If non-NULL, store size of record here */ - int *pbNew /* If non-NULL, true if new table */ + int *pbNew, /* If non-NULL, true if new table */ + int *pbEmpty ){ int i; u8 op; assert( (paRec==0 && pnRec==0) || (paRec && pnRec) ); + assert( pbEmpty==0 || *pbEmpty==0 ); /* If the iterator is in the error-state, return immediately. */ if( p->rc!=SQLITE_OK ) return p->rc; @@ -205517,13 +209932,13 @@ static int sessionChangesetNext( /* If this is an UPDATE or DELETE, read the old.* record. */ if( p->op!=SQLITE_INSERT && (p->bPatchset==0 || p->op==SQLITE_DELETE) ){ u8 *abPK = p->bPatchset ? p->abPK : 0; - p->rc = sessionReadRecord(&p->in, p->nCol, abPK, apOld); + p->rc = sessionReadRecord(&p->in, p->nCol, abPK, apOld, 0); if( p->rc!=SQLITE_OK ) return p->rc; } /* If this is an INSERT or UPDATE, read the new.* record. */ if( p->op!=SQLITE_DELETE ){ - p->rc = sessionReadRecord(&p->in, p->nCol, 0, apNew); + p->rc = sessionReadRecord(&p->in, p->nCol, 0, apNew, pbEmpty); if( p->rc!=SQLITE_OK ) return p->rc; } @@ -205550,6 +209965,37 @@ static int sessionChangesetNext( return SQLITE_ROW; } +/* +** Advance the changeset iterator to the next change. +** +** If both paRec and pnRec are NULL, then this function works like the public +** API sqlite3changeset_next(). If SQLITE_ROW is returned, then the +** sqlite3changeset_new() and old() APIs may be used to query for values. +** +** Otherwise, if paRec and pnRec are not NULL, then a pointer to the change +** record is written to *paRec before returning and the number of bytes in +** the record to *pnRec. +** +** Either way, this function returns SQLITE_ROW if the iterator is +** successfully advanced to the next change in the changeset, an SQLite +** error code if an error occurs, or SQLITE_DONE if there are no further +** changes in the changeset. +*/ +static int sessionChangesetNext( + sqlite3_changeset_iter *p, /* Changeset iterator */ + u8 **paRec, /* If non-NULL, store record pointer here */ + int *pnRec, /* If non-NULL, store size of record here */ + int *pbNew /* If non-NULL, true if new table */ +){ + int bEmpty; + int rc; + do { + bEmpty = 0; + rc = sessionChangesetNextOne(p, paRec, pnRec, pbNew, &bEmpty); + }while( rc==SQLITE_ROW && p->bSkipEmpty && bEmpty); + return rc; +} + /* ** Advance an iterator created by sqlite3changeset_start() to the next ** change in the changeset. This function may return SQLITE_ROW, SQLITE_DONE @@ -205822,9 +210268,9 @@ static int sessionChangesetInvert( /* Read the old.* and new.* records for the update change. */ pInput->iNext += 2; - rc = sessionReadRecord(pInput, nCol, 0, &apVal[0]); + rc = sessionReadRecord(pInput, nCol, 0, &apVal[0], 0); if( rc==SQLITE_OK ){ - rc = sessionReadRecord(pInput, nCol, 0, &apVal[nCol]); + rc = sessionReadRecord(pInput, nCol, 0, &apVal[nCol], 0); } /* Write the new old.* record. Consists of the PK columns from the @@ -205925,16 +210371,25 @@ SQLITE_API int sqlite3changeset_invert_strm( return rc; } + +typedef struct SessionUpdate SessionUpdate; +struct SessionUpdate { + sqlite3_stmt *pStmt; + u32 *aMask; + SessionUpdate *pNext; +}; + typedef struct SessionApplyCtx SessionApplyCtx; struct SessionApplyCtx { sqlite3 *db; sqlite3_stmt *pDelete; /* DELETE statement */ - sqlite3_stmt *pUpdate; /* UPDATE statement */ sqlite3_stmt *pInsert; /* INSERT statement */ sqlite3_stmt *pSelect; /* SELECT statement */ int nCol; /* Size of azCol[] and abPK[] arrays */ const char **azCol; /* Array of column names */ u8 *abPK; /* Boolean array - true if column is in PK */ + u32 *aUpdateMask; /* Used by sessionUpdateFind */ + SessionUpdate *pUp; int bStat1; /* True if table is sqlite_stat1 */ int bDeferConstraints; /* True to defer constraints */ int bInvertConstraints; /* Invert when iterating constraints buffer */ @@ -205944,6 +210399,167 @@ struct SessionApplyCtx { u8 bRebase; /* True to collect rebase information */ }; +/* Number of prepared UPDATE statements to cache. */ +#define SESSION_UPDATE_CACHE_SZ 12 + +/* +** Find a prepared UPDATE statement suitable for the UPDATE step currently +** being visited by the iterator. The UPDATE is of the form: +** +** UPDATE tbl SET col = ?, col2 = ? WHERE pk1 IS ? AND pk2 IS ? +*/ +static int sessionUpdateFind( + sqlite3_changeset_iter *pIter, + SessionApplyCtx *p, + int bPatchset, + sqlite3_stmt **ppStmt +){ + int rc = SQLITE_OK; + SessionUpdate *pUp = 0; + int nCol = pIter->nCol; + int nU32 = (pIter->nCol+33)/32; + int ii; + + if( p->aUpdateMask==0 ){ + p->aUpdateMask = sqlite3_malloc(nU32*sizeof(u32)); + if( p->aUpdateMask==0 ){ + rc = SQLITE_NOMEM; + } + } + + if( rc==SQLITE_OK ){ + memset(p->aUpdateMask, 0, nU32*sizeof(u32)); + rc = SQLITE_CORRUPT; + for(ii=0; iinCol; ii++){ + if( sessionChangesetNew(pIter, ii) ){ + p->aUpdateMask[ii/32] |= (1<<(ii%32)); + rc = SQLITE_OK; + } + } + } + + if( rc==SQLITE_OK ){ + if( bPatchset ) p->aUpdateMask[nCol/32] |= (1<<(nCol%32)); + + if( p->pUp ){ + int nUp = 0; + SessionUpdate **pp = &p->pUp; + while( 1 ){ + nUp++; + if( 0==memcmp(p->aUpdateMask, (*pp)->aMask, nU32*sizeof(u32)) ){ + pUp = *pp; + *pp = pUp->pNext; + pUp->pNext = p->pUp; + p->pUp = pUp; + break; + } + + if( (*pp)->pNext ){ + pp = &(*pp)->pNext; + }else{ + if( nUp>=SESSION_UPDATE_CACHE_SZ ){ + sqlite3_finalize((*pp)->pStmt); + sqlite3_free(*pp); + *pp = 0; + } + break; + } + } + } + + if( pUp==0 ){ + int nByte = sizeof(SessionUpdate) * nU32*sizeof(u32); + int bStat1 = (sqlite3_stricmp(pIter->zTab, "sqlite_stat1")==0); + pUp = (SessionUpdate*)sqlite3_malloc(nByte); + if( pUp==0 ){ + rc = SQLITE_NOMEM; + }else{ + const char *zSep = ""; + SessionBuffer buf; + + memset(&buf, 0, sizeof(buf)); + pUp->aMask = (u32*)&pUp[1]; + memcpy(pUp->aMask, p->aUpdateMask, nU32*sizeof(u32)); + + sessionAppendStr(&buf, "UPDATE main.", &rc); + sessionAppendIdent(&buf, pIter->zTab, &rc); + sessionAppendStr(&buf, " SET ", &rc); + + /* Create the assignments part of the UPDATE */ + for(ii=0; iinCol; ii++){ + if( p->abPK[ii]==0 && sessionChangesetNew(pIter, ii) ){ + sessionAppendStr(&buf, zSep, &rc); + sessionAppendIdent(&buf, p->azCol[ii], &rc); + sessionAppendStr(&buf, " = ?", &rc); + sessionAppendInteger(&buf, ii*2+1, &rc); + zSep = ", "; + } + } + + /* Create the WHERE clause part of the UPDATE */ + zSep = ""; + sessionAppendStr(&buf, " WHERE ", &rc); + for(ii=0; iinCol; ii++){ + if( p->abPK[ii] || (bPatchset==0 && sessionChangesetOld(pIter, ii)) ){ + sessionAppendStr(&buf, zSep, &rc); + if( bStat1 && ii==1 ){ + assert( sqlite3_stricmp(p->azCol[ii], "idx")==0 ); + sessionAppendStr(&buf, + "idx IS CASE " + "WHEN length(?4)=0 AND typeof(?4)='blob' THEN NULL " + "ELSE ?4 END ", &rc + ); + }else{ + sessionAppendIdent(&buf, p->azCol[ii], &rc); + sessionAppendStr(&buf, " IS ?", &rc); + sessionAppendInteger(&buf, ii*2+2, &rc); + } + zSep = " AND "; + } + } + + if( rc==SQLITE_OK ){ + char *zSql = (char*)buf.aBuf; + rc = sqlite3_prepare_v2(p->db, zSql, buf.nBuf, &pUp->pStmt, 0); + } + + if( rc!=SQLITE_OK ){ + sqlite3_free(pUp); + pUp = 0; + }else{ + pUp->pNext = p->pUp; + p->pUp = pUp; + } + sqlite3_free(buf.aBuf); + } + } + } + + assert( (rc==SQLITE_OK)==(pUp!=0) ); + if( pUp ){ + *ppStmt = pUp->pStmt; + }else{ + *ppStmt = 0; + } + return rc; +} + +/* +** Free all cached UPDATE statements. +*/ +static void sessionUpdateFree(SessionApplyCtx *p){ + SessionUpdate *pUp; + SessionUpdate *pNext; + for(pUp=p->pUp; pUp; pUp=pNext){ + pNext = pUp->pNext; + sqlite3_finalize(pUp->pStmt); + sqlite3_free(pUp); + } + p->pUp = 0; + sqlite3_free(p->aUpdateMask); + p->aUpdateMask = 0; +} + /* ** Formulate a statement to DELETE a row from database db. Assuming a table ** structure like this: @@ -206013,103 +210629,6 @@ static int sessionDeleteRow( return rc; } -/* -** Formulate and prepare a statement to UPDATE a row from database db. -** Assuming a table structure like this: -** -** CREATE TABLE x(a, b, c, d, PRIMARY KEY(a, c)); -** -** The UPDATE statement looks like this: -** -** UPDATE x SET -** a = CASE WHEN ?2 THEN ?3 ELSE a END, -** b = CASE WHEN ?5 THEN ?6 ELSE b END, -** c = CASE WHEN ?8 THEN ?9 ELSE c END, -** d = CASE WHEN ?11 THEN ?12 ELSE d END -** WHERE a = ?1 AND c = ?7 AND (?13 OR -** (?5==0 OR b IS ?4) AND (?11==0 OR d IS ?10) AND -** ) -** -** For each column in the table, there are three variables to bind: -** -** ?(i*3+1) The old.* value of the column, if any. -** ?(i*3+2) A boolean flag indicating that the value is being modified. -** ?(i*3+3) The new.* value of the column, if any. -** -** Also, a boolean flag that, if set to true, causes the statement to update -** a row even if the non-PK values do not match. This is required if the -** conflict-handler is invoked with CHANGESET_DATA and returns -** CHANGESET_REPLACE. This is variable "?(nCol*3+1)". -** -** If successful, SQLITE_OK is returned and SessionApplyCtx.pUpdate is left -** pointing to the prepared version of the SQL statement. -*/ -static int sessionUpdateRow( - sqlite3 *db, /* Database handle */ - const char *zTab, /* Table name */ - SessionApplyCtx *p /* Session changeset-apply context */ -){ - int rc = SQLITE_OK; - int i; - const char *zSep = ""; - SessionBuffer buf = {0, 0, 0}; - - /* Append "UPDATE tbl SET " */ - sessionAppendStr(&buf, "UPDATE main.", &rc); - sessionAppendIdent(&buf, zTab, &rc); - sessionAppendStr(&buf, " SET ", &rc); - - /* Append the assignments */ - for(i=0; inCol; i++){ - sessionAppendStr(&buf, zSep, &rc); - sessionAppendIdent(&buf, p->azCol[i], &rc); - sessionAppendStr(&buf, " = CASE WHEN ?", &rc); - sessionAppendInteger(&buf, i*3+2, &rc); - sessionAppendStr(&buf, " THEN ?", &rc); - sessionAppendInteger(&buf, i*3+3, &rc); - sessionAppendStr(&buf, " ELSE ", &rc); - sessionAppendIdent(&buf, p->azCol[i], &rc); - sessionAppendStr(&buf, " END", &rc); - zSep = ", "; - } - - /* Append the PK part of the WHERE clause */ - sessionAppendStr(&buf, " WHERE ", &rc); - for(i=0; inCol; i++){ - if( p->abPK[i] ){ - sessionAppendIdent(&buf, p->azCol[i], &rc); - sessionAppendStr(&buf, " = ?", &rc); - sessionAppendInteger(&buf, i*3+1, &rc); - sessionAppendStr(&buf, " AND ", &rc); - } - } - - /* Append the non-PK part of the WHERE clause */ - sessionAppendStr(&buf, " (?", &rc); - sessionAppendInteger(&buf, p->nCol*3+1, &rc); - sessionAppendStr(&buf, " OR 1", &rc); - for(i=0; inCol; i++){ - if( !p->abPK[i] ){ - sessionAppendStr(&buf, " AND (?", &rc); - sessionAppendInteger(&buf, i*3+2, &rc); - sessionAppendStr(&buf, "=0 OR ", &rc); - sessionAppendIdent(&buf, p->azCol[i], &rc); - sessionAppendStr(&buf, " IS ?", &rc); - sessionAppendInteger(&buf, i*3+1, &rc); - sessionAppendStr(&buf, ")", &rc); - } - } - sessionAppendStr(&buf, ")", &rc); - - if( rc==SQLITE_OK ){ - rc = sqlite3_prepare_v2(db, (char *)buf.aBuf, buf.nBuf, &p->pUpdate, 0); - } - sqlite3_free(buf.aBuf); - - return rc; -} - - /* ** Formulate and prepare an SQL statement to query table zTab by primary ** key. Assuming the following table structure: @@ -206190,17 +210709,6 @@ static int sessionStat1Sql(sqlite3 *db, SessionApplyCtx *p){ "?3)" ); } - if( rc==SQLITE_OK ){ - rc = sessionPrepare(db, &p->pUpdate, - "UPDATE main.sqlite_stat1 SET " - "tbl = CASE WHEN ?2 THEN ?3 ELSE tbl END, " - "idx = CASE WHEN ?5 THEN ?6 ELSE idx END, " - "stat = CASE WHEN ?8 THEN ?9 ELSE stat END " - "WHERE tbl=?1 AND idx IS " - "CASE WHEN length(?4)=0 AND typeof(?4)='blob' THEN NULL ELSE ?4 END " - "AND (?10 OR ?8=0 OR stat IS ?7)" - ); - } if( rc==SQLITE_OK ){ rc = sessionPrepare(db, &p->pDelete, "DELETE FROM main.sqlite_stat1 WHERE tbl=?1 AND idx IS " @@ -206517,7 +211025,7 @@ static int sessionApplyOneOp( int nCol; int rc = SQLITE_OK; - assert( p->pDelete && p->pUpdate && p->pInsert && p->pSelect ); + assert( p->pDelete && p->pInsert && p->pSelect ); assert( p->azCol && p->abPK ); assert( !pbReplace || *pbReplace==0 ); @@ -206557,29 +211065,28 @@ static int sessionApplyOneOp( }else if( op==SQLITE_UPDATE ){ int i; + sqlite3_stmt *pUp = 0; + int bPatchset = (pbRetry==0 || pIter->bPatchset); + + rc = sessionUpdateFind(pIter, p, bPatchset, &pUp); /* Bind values to the UPDATE statement. */ for(i=0; rc==SQLITE_OK && ipUpdate, i*3+2, !!pNew); - if( pOld ){ - rc = sessionBindValue(p->pUpdate, i*3+1, pOld); + if( p->abPK[i] || (bPatchset==0 && pOld) ){ + rc = sessionBindValue(pUp, i*2+2, pOld); } if( rc==SQLITE_OK && pNew ){ - rc = sessionBindValue(p->pUpdate, i*3+3, pNew); + rc = sessionBindValue(pUp, i*2+1, pNew); } } - if( rc==SQLITE_OK ){ - sqlite3_bind_int(p->pUpdate, nCol*3+1, pbRetry==0 || pIter->bPatchset); - } if( rc!=SQLITE_OK ) return rc; /* Attempt the UPDATE. In the case of a NOTFOUND or DATA conflict, ** the result will be SQLITE_OK with 0 rows modified. */ - sqlite3_step(p->pUpdate); - rc = sqlite3_reset(p->pUpdate); + sqlite3_step(pUp); + rc = sqlite3_reset(pUp); if( rc==SQLITE_OK && sqlite3_changes(p->db)==0 ){ /* A NOTFOUND or DATA error. Search the table to see if it contains @@ -206711,7 +211218,7 @@ static int sessionRetryConstraints( memset(&pApply->constraints, 0, sizeof(SessionBuffer)); rc = sessionChangesetStart( - &pIter2, 0, 0, cons.nBuf, cons.aBuf, pApply->bInvertConstraints + &pIter2, 0, 0, cons.nBuf, cons.aBuf, pApply->bInvertConstraints, 1 ); if( rc==SQLITE_OK ){ size_t nByte = 2*pApply->nCol*sizeof(sqlite3_value*); @@ -206802,14 +211309,13 @@ static int sessionChangesetApply( ); if( rc!=SQLITE_OK ) break; + sessionUpdateFree(&sApply); sqlite3_free((char*)sApply.azCol); /* cast works around VC++ bug */ sqlite3_finalize(sApply.pDelete); - sqlite3_finalize(sApply.pUpdate); sqlite3_finalize(sApply.pInsert); sqlite3_finalize(sApply.pSelect); sApply.db = db; sApply.pDelete = 0; - sApply.pUpdate = 0; sApply.pInsert = 0; sApply.pSelect = 0; sApply.nCol = 0; @@ -206837,7 +211343,7 @@ static int sessionChangesetApply( int i; sqlite3changeset_pk(pIter, &abPK, 0); - rc = sessionTableInfo( + rc = sessionTableInfo(0, db, "main", zNew, &sApply.nCol, &zTab, &sApply.azCol, &sApply.abPK ); if( rc!=SQLITE_OK ) break; @@ -206873,11 +211379,10 @@ static int sessionChangesetApply( } sApply.bStat1 = 1; }else{ - if((rc = sessionSelectRow(db, zTab, &sApply)) - || (rc = sessionUpdateRow(db, zTab, &sApply)) - || (rc = sessionDeleteRow(db, zTab, &sApply)) - || (rc = sessionInsertRow(db, zTab, &sApply)) - ){ + if( (rc = sessionSelectRow(db, zTab, &sApply)) + || (rc = sessionDeleteRow(db, zTab, &sApply)) + || (rc = sessionInsertRow(db, zTab, &sApply)) + ){ break; } sApply.bStat1 = 0; @@ -206936,9 +211441,9 @@ static int sessionChangesetApply( *pnRebase = sApply.rebase.nBuf; sApply.rebase.aBuf = 0; } + sessionUpdateFree(&sApply); sqlite3_finalize(sApply.pInsert); sqlite3_finalize(sApply.pDelete); - sqlite3_finalize(sApply.pUpdate); sqlite3_finalize(sApply.pSelect); sqlite3_free((char*)sApply.azCol); /* cast works around VC++ bug */ sqlite3_free((char*)sApply.constraints.aBuf); @@ -206969,8 +211474,8 @@ SQLITE_API int sqlite3changeset_apply_v2( int flags ){ sqlite3_changeset_iter *pIter; /* Iterator to skip through changeset */ - int bInverse = !!(flags & SQLITE_CHANGESETAPPLY_INVERT); - int rc = sessionChangesetStart(&pIter, 0, 0, nChangeset, pChangeset,bInverse); + int bInv = !!(flags & SQLITE_CHANGESETAPPLY_INVERT); + int rc = sessionChangesetStart(&pIter, 0, 0, nChangeset, pChangeset, bInv, 1); if( rc==SQLITE_OK ){ rc = sessionChangesetApply( db, pIter, xFilter, xConflict, pCtx, ppRebase, pnRebase, flags @@ -207028,7 +211533,7 @@ SQLITE_API int sqlite3changeset_apply_v2_strm( ){ sqlite3_changeset_iter *pIter; /* Iterator to skip through changeset */ int bInverse = !!(flags & SQLITE_CHANGESETAPPLY_INVERT); - int rc = sessionChangesetStart(&pIter, xInput, pIn, 0, 0, bInverse); + int rc = sessionChangesetStart(&pIter, xInput, pIn, 0, 0, bInverse, 1); if( rc==SQLITE_OK ){ rc = sessionChangesetApply( db, pIter, xFilter, xConflict, pCtx, ppRebase, pnRebase, flags @@ -207316,7 +211821,7 @@ static int sessionChangesetToHash( } } - if( sessionGrowHash(pIter->bPatchset, pTab) ){ + if( sessionGrowHash(0, pIter->bPatchset, pTab) ){ rc = SQLITE_NOMEM; break; } @@ -207502,7 +212007,7 @@ SQLITE_API int sqlite3changegroup_output_strm( */ SQLITE_API void sqlite3changegroup_delete(sqlite3_changegroup *pGrp){ if( pGrp ){ - sessionDeleteTable(pGrp->pList); + sessionDeleteTable(0, pGrp->pList); sqlite3_free(pGrp); } } @@ -207648,7 +212153,7 @@ static void sessionAppendPartialUpdate( int n1 = sessionSerialLen(a1); int n2 = sessionSerialLen(a2); if( pIter->abPK[i] || a2[0]==0 ){ - if( !pIter->abPK[i] ) bData = 1; + if( !pIter->abPK[i] && a1[0] ) bData = 1; memcpy(pOut, a1, n1); pOut += n1; }else if( a2[0]!=0xFF ){ @@ -207903,7 +212408,7 @@ SQLITE_API int sqlite3rebaser_rebase_strm( */ SQLITE_API void sqlite3rebaser_delete(sqlite3_rebaser *p){ if( p ){ - sessionDeleteTable(p->grp.pList); + sessionDeleteTable(0, p->grp.pList); sqlite3_free(p); } } @@ -208706,6 +213211,7 @@ struct Fts5Config { Fts5Tokenizer *pTok; fts5_tokenizer *pTokApi; int bLock; /* True when table is preparing statement */ + int ePattern; /* FTS_PATTERN_XXX constant */ /* Values loaded from the %_config table */ int iCookie; /* Incremented when %_config is modified */ @@ -208726,17 +213232,19 @@ struct Fts5Config { }; /* Current expected value of %_config table 'version' field */ -#define FTS5_CURRENT_VERSION 4 +#define FTS5_CURRENT_VERSION 4 #define FTS5_CONTENT_NORMAL 0 #define FTS5_CONTENT_NONE 1 #define FTS5_CONTENT_EXTERNAL 2 -#define FTS5_DETAIL_FULL 0 -#define FTS5_DETAIL_NONE 1 -#define FTS5_DETAIL_COLUMNS 2 - +#define FTS5_DETAIL_FULL 0 +#define FTS5_DETAIL_NONE 1 +#define FTS5_DETAIL_COLUMNS 2 +#define FTS5_PATTERN_NONE 0 +#define FTS5_PATTERN_LIKE 65 /* matches SQLITE_INDEX_CONSTRAINT_LIKE */ +#define FTS5_PATTERN_GLOB 66 /* matches SQLITE_INDEX_CONSTRAINT_GLOB */ static int sqlite3Fts5ConfigParse( Fts5Global*, sqlite3*, int, const char **, Fts5Config**, char** @@ -209006,7 +213514,7 @@ static int sqlite3Fts5IndexSetAverages(Fts5Index *p, const u8*, int); /* ** Functions called by the storage module as part of integrity-check. */ -static int sqlite3Fts5IndexIntegrityCheck(Fts5Index*, u64 cksum); +static int sqlite3Fts5IndexIntegrityCheck(Fts5Index*, u64 cksum, int bUseCksum); /* ** Called during virtual module initialization to register UDF @@ -209076,8 +213584,7 @@ static int sqlite3Fts5GetTokenizer( Fts5Global*, const char **azArg, int nArg, - Fts5Tokenizer**, - fts5_tokenizer**, + Fts5Config*, char **pzErr ); @@ -209161,7 +213668,7 @@ static int sqlite3Fts5StorageDelete(Fts5Storage *p, i64, sqlite3_value**); static int sqlite3Fts5StorageContentInsert(Fts5Storage *p, sqlite3_value**, i64*); static int sqlite3Fts5StorageIndexInsert(Fts5Storage *p, sqlite3_value**, i64); -static int sqlite3Fts5StorageIntegrity(Fts5Storage *p); +static int sqlite3Fts5StorageIntegrity(Fts5Storage *p, int iArg); static int sqlite3Fts5StorageStmt(Fts5Storage *p, int eStmt, sqlite3_stmt**, char**); static void sqlite3Fts5StorageStmtRelease(Fts5Storage *p, int eStmt, sqlite3_stmt*); @@ -209206,11 +213713,19 @@ struct Fts5Token { /* Parse a MATCH expression. */ static int sqlite3Fts5ExprNew( Fts5Config *pConfig, + int bPhraseToAnd, int iCol, /* Column on LHS of MATCH operator */ const char *zExpr, Fts5Expr **ppNew, char **pzErr ); +static int sqlite3Fts5ExprPattern( + Fts5Config *pConfig, + int bGlob, + int iCol, + const char *zText, + Fts5Expr **pp +); /* ** for(rc = sqlite3Fts5ExprFirst(pExpr, pIdx, bDesc); @@ -209319,6 +213834,10 @@ static int sqlite3Fts5AuxInit(fts5_api*); */ static int sqlite3Fts5TokenizerInit(fts5_api*); +static int sqlite3Fts5TokenizerPattern( + int (*xCreate)(void*, const char**, int, Fts5Tokenizer**), + Fts5Tokenizer *pTok +); /* ** End of interface to code in fts5_tokenizer.c. **************************************************************************/ @@ -209365,6 +213884,8 @@ static void sqlite3Fts5UnicodeAscii(u8*, u8*); #define FTS5_PLUS 14 #define FTS5_STAR 15 +/* This file is automatically generated by Lemon from input grammar +** source file "fts5parse.y". */ /* ** 2000-05-29 ** @@ -209389,8 +213910,6 @@ static void sqlite3Fts5UnicodeAscii(u8*, u8*); ** The following is the concatenation of all %include directives from the ** input grammar file: */ -/* #include */ -/* #include */ /************ Begin %include sections from the grammar ************************/ /* #include "fts5Int.h" */ @@ -209420,11 +213939,26 @@ static void sqlite3Fts5UnicodeAscii(u8*, u8*); #define fts5YYMALLOCARGTYPE u64 /**************** End of %include directives **********************************/ -/* These constants specify the various numeric values for terminal symbols -** in a format understandable to "makeheaders". This section is blank unless -** "lemon" is run with the "-m" command-line option. -***************** Begin makeheaders token definitions *************************/ -/**************** End makeheaders token definitions ***************************/ +/* These constants specify the various numeric values for terminal symbols. +***************** Begin token definitions *************************************/ +#ifndef FTS5_OR +#define FTS5_OR 1 +#define FTS5_AND 2 +#define FTS5_NOT 3 +#define FTS5_TERM 4 +#define FTS5_COLON 5 +#define FTS5_MINUS 6 +#define FTS5_LCP 7 +#define FTS5_RCP 8 +#define FTS5_STRING 9 +#define FTS5_LP 10 +#define FTS5_RP 11 +#define FTS5_CARET 12 +#define FTS5_COMMA 13 +#define FTS5_PLUS 14 +#define FTS5_STAR 15 +#endif +/**************** End token definitions ***************************************/ /* The next sections is a series of control #defines. ** various aspects of the generated parser. @@ -209707,6 +214241,7 @@ typedef struct fts5yyParser fts5yyParser; #ifndef NDEBUG /* #include */ +/* #include */ static FILE *fts5yyTraceFILE = 0; static char *fts5yyTracePrompt = 0; #endif /* NDEBUG */ @@ -210121,7 +214656,7 @@ static fts5YYACTIONTYPE fts5yy_find_shift_action( #endif /* fts5YYWILDCARD */ return fts5yy_default[stateno]; }else{ - assert( i>=0 && i=0 && i<(int)(sizeof(fts5yy_action)/sizeof(fts5yy_action[0])) ); return fts5yy_action[i]; } }while(1); @@ -210335,54 +214870,6 @@ static fts5YYACTIONTYPE fts5yy_reduce( (void)fts5yyLookahead; (void)fts5yyLookaheadToken; fts5yymsp = fts5yypParser->fts5yytos; -#ifndef NDEBUG - if( fts5yyTraceFILE && fts5yyruleno<(int)(sizeof(fts5yyRuleName)/sizeof(fts5yyRuleName[0])) ){ - fts5yysize = fts5yyRuleInfoNRhs[fts5yyruleno]; - if( fts5yysize ){ - fprintf(fts5yyTraceFILE, "%sReduce %d [%s]%s, pop back to state %d.\n", - fts5yyTracePrompt, - fts5yyruleno, fts5yyRuleName[fts5yyruleno], - fts5yyrulenofts5yytos - fts5yypParser->fts5yystack)>fts5yypParser->fts5yyhwm ){ - fts5yypParser->fts5yyhwm++; - assert( fts5yypParser->fts5yyhwm == (int)(fts5yypParser->fts5yytos - fts5yypParser->fts5yystack)); - } -#endif -#if fts5YYSTACKDEPTH>0 - if( fts5yypParser->fts5yytos>=fts5yypParser->fts5yystackEnd ){ - fts5yyStackOverflow(fts5yypParser); - /* The call to fts5yyStackOverflow() above pops the stack until it is - ** empty, causing the main parser loop to exit. So the return value - ** is never used and does not matter. */ - return 0; - } -#else - if( fts5yypParser->fts5yytos>=&fts5yypParser->fts5yystack[fts5yypParser->fts5yystksz-1] ){ - if( fts5yyGrowStack(fts5yypParser) ){ - fts5yyStackOverflow(fts5yypParser); - /* The call to fts5yyStackOverflow() above pops the stack until it is - ** empty, causing the main parser loop to exit. So the return value - ** is never used and does not matter. */ - return 0; - } - fts5yymsp = fts5yypParser->fts5yytos; - } -#endif - } switch( fts5yyruleno ){ /* Beginning here are the reduction cases. A typical example @@ -210685,12 +215172,56 @@ static void sqlite3Fts5Parser( } #endif - do{ + while(1){ /* Exit by "break" */ + assert( fts5yypParser->fts5yytos>=fts5yypParser->fts5yystack ); assert( fts5yyact==fts5yypParser->fts5yytos->stateno ); fts5yyact = fts5yy_find_shift_action((fts5YYCODETYPE)fts5yymajor,fts5yyact); if( fts5yyact >= fts5YY_MIN_REDUCE ){ - fts5yyact = fts5yy_reduce(fts5yypParser,fts5yyact-fts5YY_MIN_REDUCE,fts5yymajor, - fts5yyminor sqlite3Fts5ParserCTX_PARAM); + unsigned int fts5yyruleno = fts5yyact - fts5YY_MIN_REDUCE; /* Reduce by this rule */ + assert( fts5yyruleno<(int)(sizeof(fts5yyRuleName)/sizeof(fts5yyRuleName[0])) ); +#ifndef NDEBUG + if( fts5yyTraceFILE ){ + int fts5yysize = fts5yyRuleInfoNRhs[fts5yyruleno]; + if( fts5yysize ){ + fprintf(fts5yyTraceFILE, "%sReduce %d [%s]%s, pop back to state %d.\n", + fts5yyTracePrompt, + fts5yyruleno, fts5yyRuleName[fts5yyruleno], + fts5yyrulenofts5yytos[fts5yysize].stateno); + }else{ + fprintf(fts5yyTraceFILE, "%sReduce %d [%s]%s.\n", + fts5yyTracePrompt, fts5yyruleno, fts5yyRuleName[fts5yyruleno], + fts5yyrulenofts5yytos - fts5yypParser->fts5yystack)>fts5yypParser->fts5yyhwm ){ + fts5yypParser->fts5yyhwm++; + assert( fts5yypParser->fts5yyhwm == + (int)(fts5yypParser->fts5yytos - fts5yypParser->fts5yystack)); + } +#endif +#if fts5YYSTACKDEPTH>0 + if( fts5yypParser->fts5yytos>=fts5yypParser->fts5yystackEnd ){ + fts5yyStackOverflow(fts5yypParser); + break; + } +#else + if( fts5yypParser->fts5yytos>=&fts5yypParser->fts5yystack[fts5yypParser->fts5yystksz-1] ){ + if( fts5yyGrowStack(fts5yypParser) ){ + fts5yyStackOverflow(fts5yypParser); + break; + } + } +#endif + } + fts5yyact = fts5yy_reduce(fts5yypParser,fts5yyruleno,fts5yymajor,fts5yyminor sqlite3Fts5ParserCTX_PARAM); }else if( fts5yyact <= fts5YY_MAX_SHIFTREDUCE ){ fts5yy_shift(fts5yypParser,fts5yyact,(fts5YYCODETYPE)fts5yymajor,fts5yyminor); #ifndef fts5YYNOERRORRECOVERY @@ -210803,7 +215334,7 @@ static void sqlite3Fts5Parser( break; #endif } - }while( fts5yypParser->fts5yytos>fts5yypParser->fts5yystack ); + } #ifndef NDEBUG if( fts5yyTraceFILE ){ fts5yyStackEntry *i; @@ -211401,7 +215932,7 @@ static int fts5Bm25GetData( int rc = SQLITE_OK; /* Return code */ Fts5Bm25Data *p; /* Object to return */ - p = pApi->xGetAuxdata(pFts, 0); + p = (Fts5Bm25Data*)pApi->xGetAuxdata(pFts, 0); if( p==0 ){ int nPhrase; /* Number of phrases in query */ sqlite3_int64 nRow = 0; /* Number of rows in table */ @@ -211475,7 +216006,7 @@ static void fts5Bm25Function( ){ const double k1 = 1.2; /* Constant "k1" from BM25 formula */ const double b = 0.75; /* Constant "b" from BM25 formula */ - int rc = SQLITE_OK; /* Error code */ + int rc; /* Error code */ double score = 0.0; /* SQL function return value */ Fts5Bm25Data *pData; /* Values allocated/calculated once only */ int i; /* Iterator variable */ @@ -211507,17 +216038,15 @@ static void fts5Bm25Function( D = (double)nTok; } - /* Determine the BM25 score for the current row. */ - for(i=0; rc==SQLITE_OK && inPhrase; i++){ - score += pData->aIDF[i] * ( - ( aFreq[i] * (k1 + 1.0) ) / - ( aFreq[i] + k1 * (1 - b + b * D / pData->avgdl) ) - ); - } - - /* If no error has occurred, return the calculated score. Otherwise, - ** throw an SQL exception. */ + /* Determine and return the BM25 score for the current row. Or, if an + ** error has occurred, throw an exception. */ if( rc==SQLITE_OK ){ + for(i=0; inPhrase; i++){ + score += pData->aIDF[i] * ( + ( aFreq[i] * (k1 + 1.0) ) / + ( aFreq[i] + k1 * (1 - b + b * D / pData->avgdl) ) + ); + } sqlite3_result_double(pCtx, -1.0 * score); }else{ sqlite3_result_error_code(pCtx, rc); @@ -211730,6 +216259,7 @@ static int sqlite3Fts5PoslistNext64( i64 iOff = *piOff; int iVal; fts5FastGetVarint32(a, i, iVal); + assert( iVal>=0 ); if( iVal<=1 ){ if( iVal==0 ){ *pi = i; @@ -211743,9 +216273,12 @@ static int sqlite3Fts5PoslistNext64( *piOff = -1; return 1; } + *piOff = iOff + ((iVal-2) & 0x7FFFFFFF); + }else{ + *piOff = (iOff & (i64)0x7FFFFFFF<<32)+((iOff + (iVal-2)) & 0x7FFFFFFF); } - *piOff = iOff + ((iVal-2) & 0x7FFFFFFF); *pi = i; + assert( *piOff>=iOff ); return 0; } } @@ -211784,14 +216317,16 @@ static void sqlite3Fts5PoslistSafeAppend( i64 *piPrev, i64 iPos ){ - static const i64 colmask = ((i64)(0x7FFFFFFF)) << 32; - if( (iPos & colmask) != (*piPrev & colmask) ){ - pBuf->p[pBuf->n++] = 1; - pBuf->n += sqlite3Fts5PutVarint(&pBuf->p[pBuf->n], (iPos>>32)); - *piPrev = (iPos & colmask); + if( iPos>=*piPrev ){ + static const i64 colmask = ((i64)(0x7FFFFFFF)) << 32; + if( (iPos & colmask) != (*piPrev & colmask) ){ + pBuf->p[pBuf->n++] = 1; + pBuf->n += sqlite3Fts5PutVarint(&pBuf->p[pBuf->n], (iPos>>32)); + *piPrev = (iPos & colmask); + } + pBuf->n += sqlite3Fts5PutVarint(&pBuf->p[pBuf->n], (iPos-*piPrev)+2); + *piPrev = iPos; } - pBuf->n += sqlite3Fts5PutVarint(&pBuf->p[pBuf->n], (iPos-*piPrev)+2); - *piPrev = iPos; } static int sqlite3Fts5PoslistWriterAppend( @@ -212281,7 +216816,7 @@ static int fts5ConfigParseSpecial( rc = SQLITE_ERROR; }else{ rc = sqlite3Fts5GetTokenizer(pGlobal, - (const char**)azArg, (int)nArg, &pConfig->pTok, &pConfig->pTokApi, + (const char**)azArg, (int)nArg, pConfig, pzErr ); } @@ -212353,9 +216888,7 @@ static int fts5ConfigParseSpecial( */ static int fts5ConfigDefaultTokenizer(Fts5Global *pGlobal, Fts5Config *pConfig){ assert( pConfig->pTok==0 && pConfig->pTokApi==0 ); - return sqlite3Fts5GetTokenizer( - pGlobal, 0, 0, &pConfig->pTok, &pConfig->pTokApi, 0 - ); + return sqlite3Fts5GetTokenizer(pGlobal, 0, 0, pConfig, 0); } /* @@ -212495,7 +217028,7 @@ static int sqlite3Fts5ConfigParse( nByte = nArg * (sizeof(char*) + sizeof(u8)); pRet->azCol = (char**)sqlite3Fts5MallocZero(&rc, nByte); - pRet->abUnindexed = (u8*)&pRet->azCol[nArg]; + pRet->abUnindexed = pRet->azCol ? (u8*)&pRet->azCol[nArg] : 0; pRet->zDb = sqlite3Fts5Strndup(&rc, azArg[1], -1); pRet->zName = sqlite3Fts5Strndup(&rc, azArg[2], -1); pRet->bColumnsize = 1; @@ -213047,6 +217580,7 @@ struct Fts5Parse { int nPhrase; /* Size of apPhrase array */ Fts5ExprPhrase **apPhrase; /* Array of all phrases */ Fts5ExprNode *pExpr; /* Result of a successful parse */ + int bPhraseToAnd; /* Convert "a+b" to "a AND b" */ }; static void sqlite3Fts5ParseError(Fts5Parse *pParse, const char *zFmt, ...){ @@ -213135,6 +217669,7 @@ static void fts5ParseFree(void *p){ sqlite3_free(p); } static int sqlite3Fts5ExprNew( Fts5Config *pConfig, /* FTS5 Configuration */ + int bPhraseToAnd, int iCol, const char *zExpr, /* Expression text */ Fts5Expr **ppNew, @@ -213150,6 +217685,7 @@ static int sqlite3Fts5ExprNew( *ppNew = 0; *pzErr = 0; memset(&sParse, 0, sizeof(sParse)); + sParse.bPhraseToAnd = bPhraseToAnd; pEngine = sqlite3Fts5ParserAlloc(fts5ParseAlloc); if( pEngine==0 ){ return SQLITE_NOMEM; } sParse.pConfig = pConfig; @@ -213192,6 +217728,7 @@ static int sqlite3Fts5ExprNew( pNew->pConfig = pConfig; pNew->apExprPhrase = sParse.apPhrase; pNew->nPhrase = sParse.nPhrase; + pNew->bDesc = 0; sParse.apPhrase = 0; } }else{ @@ -213203,6 +217740,81 @@ static int sqlite3Fts5ExprNew( return sParse.rc; } +/* +** This function is only called when using the special 'trigram' tokenizer. +** Argument zText contains the text of a LIKE or GLOB pattern matched +** against column iCol. This function creates and compiles an FTS5 MATCH +** expression that will match a superset of the rows matched by the LIKE or +** GLOB. If successful, SQLITE_OK is returned. Otherwise, an SQLite error +** code. +*/ +static int sqlite3Fts5ExprPattern( + Fts5Config *pConfig, int bGlob, int iCol, const char *zText, Fts5Expr **pp +){ + i64 nText = strlen(zText); + char *zExpr = (char*)sqlite3_malloc64(nText*4 + 1); + int rc = SQLITE_OK; + + if( zExpr==0 ){ + rc = SQLITE_NOMEM; + }else{ + char aSpec[3]; + int iOut = 0; + int i = 0; + int iFirst = 0; + + if( bGlob==0 ){ + aSpec[0] = '_'; + aSpec[1] = '%'; + aSpec[2] = 0; + }else{ + aSpec[0] = '*'; + aSpec[1] = '?'; + aSpec[2] = '['; + } + + while( i<=nText ){ + if( i==nText + || zText[i]==aSpec[0] || zText[i]==aSpec[1] || zText[i]==aSpec[2] + ){ + if( i-iFirst>=3 ){ + int jj; + zExpr[iOut++] = '"'; + for(jj=iFirst; jj0 ){ + int bAnd = 0; + if( pConfig->eDetail!=FTS5_DETAIL_FULL ){ + bAnd = 1; + if( pConfig->eDetail==FTS5_DETAIL_NONE ){ + iCol = pConfig->nCol; + } + } + zExpr[iOut] = '\0'; + rc = sqlite3Fts5ExprNew(pConfig, bAnd, iCol, zExpr, pp,pConfig->pzErrmsg); + }else{ + *pp = 0; + } + sqlite3_free(zExpr); + } + + return rc; +} + /* ** Free the expression node object passed as the only argument. */ @@ -214340,8 +218952,8 @@ static int sqlite3Fts5ExprFirst(Fts5Expr *p, Fts5Index *pIdx, i64 iFirst, int bD } /* If the iterator is not at a real match, skip forward until it is. */ - while( pRoot->bNomatch ){ - assert( pRoot->bEof==0 && rc==SQLITE_OK ); + while( pRoot->bNomatch && rc==SQLITE_OK ){ + assert( pRoot->bEof==0 ); rc = fts5ExprNodeNext(p, pRoot, 0, 0); } return rc; @@ -214580,6 +219192,20 @@ static void sqlite3Fts5ParseFinished(Fts5Parse *pParse, Fts5ExprNode *p){ pParse->pExpr = p; } +static int parseGrowPhraseArray(Fts5Parse *pParse){ + if( (pParse->nPhrase % 8)==0 ){ + sqlite3_int64 nByte = sizeof(Fts5ExprPhrase*) * (pParse->nPhrase + 8); + Fts5ExprPhrase **apNew; + apNew = (Fts5ExprPhrase**)sqlite3_realloc64(pParse->apPhrase, nByte); + if( apNew==0 ){ + pParse->rc = SQLITE_NOMEM; + return SQLITE_NOMEM; + } + pParse->apPhrase = apNew; + } + return SQLITE_OK; +} + /* ** This function is called by the parser to process a string token. The ** string may or may not be quoted. In any case it is tokenized and a @@ -214615,16 +219241,9 @@ static Fts5ExprPhrase *sqlite3Fts5ParseTerm( }else{ if( pAppend==0 ){ - if( (pParse->nPhrase % 8)==0 ){ - sqlite3_int64 nByte = sizeof(Fts5ExprPhrase*) * (pParse->nPhrase + 8); - Fts5ExprPhrase **apNew; - apNew = (Fts5ExprPhrase**)sqlite3_realloc64(pParse->apPhrase, nByte); - if( apNew==0 ){ - pParse->rc = SQLITE_NOMEM; - fts5ExprPhraseFree(sCtx.pPhrase); - return 0; - } - pParse->apPhrase = apNew; + if( parseGrowPhraseArray(pParse) ){ + fts5ExprPhraseFree(sCtx.pPhrase); + return 0; } pParse->nPhrase++; } @@ -215031,6 +219650,67 @@ static void fts5ExprAddChildren(Fts5ExprNode *p, Fts5ExprNode *pSub){ } } +/* +** This function is used when parsing LIKE or GLOB patterns against +** trigram indexes that specify either detail=column or detail=none. +** It converts a phrase: +** +** abc + def + ghi +** +** into an AND tree: +** +** abc AND def AND ghi +*/ +static Fts5ExprNode *fts5ParsePhraseToAnd( + Fts5Parse *pParse, + Fts5ExprNearset *pNear +){ + int nTerm = pNear->apPhrase[0]->nTerm; + int ii; + int nByte; + Fts5ExprNode *pRet; + + assert( pNear->nPhrase==1 ); + assert( pParse->bPhraseToAnd ); + + nByte = sizeof(Fts5ExprNode) + nTerm*sizeof(Fts5ExprNode*); + pRet = (Fts5ExprNode*)sqlite3Fts5MallocZero(&pParse->rc, nByte); + if( pRet ){ + pRet->eType = FTS5_AND; + pRet->nChild = nTerm; + fts5ExprAssignXNext(pRet); + pParse->nPhrase--; + for(ii=0; iirc, sizeof(Fts5ExprPhrase) + ); + if( pPhrase ){ + if( parseGrowPhraseArray(pParse) ){ + fts5ExprPhraseFree(pPhrase); + }else{ + pParse->apPhrase[pParse->nPhrase++] = pPhrase; + pPhrase->nTerm = 1; + pPhrase->aTerm[0].zTerm = sqlite3Fts5Strndup( + &pParse->rc, pNear->apPhrase[0]->aTerm[ii].zTerm, -1 + ); + pRet->apChild[ii] = sqlite3Fts5ParseNode(pParse, FTS5_STRING, + 0, 0, sqlite3Fts5ParseNearset(pParse, 0, pPhrase) + ); + } + } + } + + if( pParse->rc ){ + sqlite3Fts5ParseNodeFree(pRet); + pRet = 0; + }else{ + sqlite3Fts5ParseNearsetFree(pNear); + } + } + + return pRet; +} + /* ** Allocate and return a new expression object. If anything goes wrong (i.e. ** OOM error), leave an error code in pParse and return NULL. @@ -215055,51 +219735,58 @@ static Fts5ExprNode *sqlite3Fts5ParseNode( if( eType!=FTS5_STRING && pLeft==0 ) return pRight; if( eType!=FTS5_STRING && pRight==0 ) return pLeft; - if( eType==FTS5_NOT ){ - nChild = 2; - }else if( eType==FTS5_AND || eType==FTS5_OR ){ - nChild = 2; - if( pLeft->eType==eType ) nChild += pLeft->nChild-1; - if( pRight->eType==eType ) nChild += pRight->nChild-1; - } + if( eType==FTS5_STRING + && pParse->bPhraseToAnd + && pNear->apPhrase[0]->nTerm>1 + ){ + pRet = fts5ParsePhraseToAnd(pParse, pNear); + }else{ + if( eType==FTS5_NOT ){ + nChild = 2; + }else if( eType==FTS5_AND || eType==FTS5_OR ){ + nChild = 2; + if( pLeft->eType==eType ) nChild += pLeft->nChild-1; + if( pRight->eType==eType ) nChild += pRight->nChild-1; + } - nByte = sizeof(Fts5ExprNode) + sizeof(Fts5ExprNode*)*(nChild-1); - pRet = (Fts5ExprNode*)sqlite3Fts5MallocZero(&pParse->rc, nByte); + nByte = sizeof(Fts5ExprNode) + sizeof(Fts5ExprNode*)*(nChild-1); + pRet = (Fts5ExprNode*)sqlite3Fts5MallocZero(&pParse->rc, nByte); - if( pRet ){ - pRet->eType = eType; - pRet->pNear = pNear; - fts5ExprAssignXNext(pRet); - if( eType==FTS5_STRING ){ - int iPhrase; - for(iPhrase=0; iPhrasenPhrase; iPhrase++){ - pNear->apPhrase[iPhrase]->pNode = pRet; - if( pNear->apPhrase[iPhrase]->nTerm==0 ){ - pRet->xNext = 0; - pRet->eType = FTS5_EOF; + if( pRet ){ + pRet->eType = eType; + pRet->pNear = pNear; + fts5ExprAssignXNext(pRet); + if( eType==FTS5_STRING ){ + int iPhrase; + for(iPhrase=0; iPhrasenPhrase; iPhrase++){ + pNear->apPhrase[iPhrase]->pNode = pRet; + if( pNear->apPhrase[iPhrase]->nTerm==0 ){ + pRet->xNext = 0; + pRet->eType = FTS5_EOF; + } } - } - if( pParse->pConfig->eDetail!=FTS5_DETAIL_FULL ){ - Fts5ExprPhrase *pPhrase = pNear->apPhrase[0]; - if( pNear->nPhrase!=1 - || pPhrase->nTerm>1 - || (pPhrase->nTerm>0 && pPhrase->aTerm[0].bFirst) - ){ - assert( pParse->rc==SQLITE_OK ); - pParse->rc = SQLITE_ERROR; - assert( pParse->zErr==0 ); - pParse->zErr = sqlite3_mprintf( - "fts5: %s queries are not supported (detail!=full)", - pNear->nPhrase==1 ? "phrase": "NEAR" - ); - sqlite3_free(pRet); - pRet = 0; + if( pParse->pConfig->eDetail!=FTS5_DETAIL_FULL ){ + Fts5ExprPhrase *pPhrase = pNear->apPhrase[0]; + if( pNear->nPhrase!=1 + || pPhrase->nTerm>1 + || (pPhrase->nTerm>0 && pPhrase->aTerm[0].bFirst) + ){ + assert( pParse->rc==SQLITE_OK ); + pParse->rc = SQLITE_ERROR; + assert( pParse->zErr==0 ); + pParse->zErr = sqlite3_mprintf( + "fts5: %s queries are not supported (detail!=full)", + pNear->nPhrase==1 ? "phrase": "NEAR" + ); + sqlite3_free(pRet); + pRet = 0; + } } + }else{ + fts5ExprAddChildren(pRet, pLeft); + fts5ExprAddChildren(pRet, pRight); } - }else{ - fts5ExprAddChildren(pRet, pLeft); - fts5ExprAddChildren(pRet, pRight); } } } @@ -215177,6 +219864,7 @@ static Fts5ExprNode *sqlite3Fts5ParseImplicitAnd( return pRet; } +#ifdef SQLITE_TEST static char *fts5ExprTermPrint(Fts5ExprTerm *pTerm){ sqlite3_int64 nByte = 0; Fts5ExprTerm *p; @@ -215320,8 +220008,17 @@ static char *fts5ExprPrint(Fts5Config *pConfig, Fts5ExprNode *pExpr){ int iTerm; if( pNear->pColset ){ - int iCol = pNear->pColset->aiCol[0]; - zRet = fts5PrintfAppend(zRet, "%s : ", pConfig->azCol[iCol]); + int ii; + Fts5Colset *pColset = pNear->pColset; + if( pColset->nCol>1 ) zRet = fts5PrintfAppend(zRet, "{"); + for(ii=0; iinCol; ii++){ + zRet = fts5PrintfAppend(zRet, "%s%s", + pConfig->azCol[pColset->aiCol[ii]], ii==pColset->nCol-1 ? "" : " " + ); + } + if( zRet ){ + zRet = fts5PrintfAppend(zRet, "%s : ", pColset->nCol>1 ? "}" : ""); + } if( zRet==0 ) return 0; } @@ -215444,7 +220141,7 @@ static void fts5ExprFunction( rc = sqlite3Fts5ConfigParse(pGlobal, db, nConfig, azConfig, &pConfig, &zErr); if( rc==SQLITE_OK ){ - rc = sqlite3Fts5ExprNew(pConfig, pConfig->nCol, zExpr, &pExpr, &zErr); + rc = sqlite3Fts5ExprNew(pConfig, 0, pConfig->nCol, zExpr, &pExpr, &zErr); } if( rc==SQLITE_OK ){ char *zText; @@ -215534,12 +220231,14 @@ static void fts5ExprFold( sqlite3_result_int(pCtx, sqlite3Fts5UnicodeFold(iCode, bRemoveDiacritics)); } } +#endif /* ifdef SQLITE_TEST */ /* ** This is called during initialization to register the fts5_expr() scalar ** UDF with the SQLite handle passed as the only argument. */ static int sqlite3Fts5ExprInit(Fts5Global *pGlobal, sqlite3 *db){ +#ifdef SQLITE_TEST struct Fts5ExprFunc { const char *z; void (*x)(sqlite3_context*,int,sqlite3_value**); @@ -215557,6 +220256,10 @@ static int sqlite3Fts5ExprInit(Fts5Global *pGlobal, sqlite3 *db){ struct Fts5ExprFunc *p = &aFunc[i]; rc = sqlite3_create_function(db, p->z, -1, SQLITE_UTF8, pCtx, p->x, 0, 0); } +#else + int rc = SQLITE_OK; + UNUSED_PARAM2(pGlobal,db); +#endif /* Avoid warnings indicating that sqlite3Fts5ParserTrace() and ** sqlite3Fts5ParserFallback() are unused */ @@ -216117,7 +220820,6 @@ static int sqlite3Fts5HashWrite( p->iCol = (pHash->eDetail==FTS5_DETAIL_FULL ? 0 : -1); } - nIncr += p->nData; }else{ /* Appending to an existing hash-entry. Check that there is enough @@ -216150,8 +220852,9 @@ static int sqlite3Fts5HashWrite( /* If this is a new rowid, append the 4-byte size field for the previous ** entry, and the new rowid for this entry. */ if( iRowid!=p->iRowid ){ + u64 iDiff = (u64)iRowid - (u64)p->iRowid; fts5HashAddPoslistSize(pHash, p, 0); - p->nData += sqlite3Fts5PutVarint(&pPtr[p->nData], iRowid - p->iRowid); + p->nData += sqlite3Fts5PutVarint(&pPtr[p->nData], iDiff); p->iRowid = iRowid; bNew = 1; p->iSzPoslist = p->nData; @@ -216803,7 +221506,7 @@ struct Fts5SegIter { int iLeafPgno; /* Current leaf page number */ Fts5Data *pLeaf; /* Current leaf data */ Fts5Data *pNextLeaf; /* Leaf page (iLeafPgno+1) */ - int iLeafOffset; /* Byte offset within current leaf */ + i64 iLeafOffset; /* Byte offset within current leaf */ /* Next method */ void (*xNext)(Fts5Index*, Fts5SegIter*, int*); @@ -217983,7 +222686,7 @@ static void fts5SegIterLoadNPos(Fts5Index *p, Fts5SegIter *pIter){ static void fts5SegIterLoadRowid(Fts5Index *p, Fts5SegIter *pIter){ u8 *a = pIter->pLeaf->p; /* Buffer to read data from */ - int iOff = pIter->iLeafOffset; + i64 iOff = pIter->iLeafOffset; ASSERT_SZLEAF_OK(pIter->pLeaf); if( iOff>=pIter->pLeaf->szLeaf ){ @@ -218016,7 +222719,7 @@ static void fts5SegIterLoadRowid(Fts5Index *p, Fts5SegIter *pIter){ */ static void fts5SegIterLoadTerm(Fts5Index *p, Fts5SegIter *pIter, int nKeep){ u8 *a = pIter->pLeaf->p; /* Buffer to read data from */ - int iOff = pIter->iLeafOffset; /* Offset to read at */ + i64 iOff = pIter->iLeafOffset; /* Offset to read at */ int nNew; /* Bytes of new data */ iOff += fts5GetVarint32(&a[iOff], nNew); @@ -218126,7 +222829,7 @@ static void fts5SegIterReverseInitPage(Fts5Index *p, Fts5SegIter *pIter){ ASSERT_SZLEAF_OK(pIter->pLeaf); while( 1 ){ - i64 iDelta = 0; + u64 iDelta = 0; if( eDetail==FTS5_DETAIL_NONE ){ /* todo */ @@ -218141,7 +222844,7 @@ static void fts5SegIterReverseInitPage(Fts5Index *p, Fts5SegIter *pIter){ i += nPos; } if( i>=n ) break; - i += fts5GetVarint(&a[i], (u64*)&iDelta); + i += fts5GetVarint(&a[i], &iDelta); pIter->iRowid += iDelta; /* If necessary, grow the pIter->aRowidOffset[] array. */ @@ -218240,7 +222943,7 @@ static void fts5SegIterNext_Reverse( if( pIter->iRowidOffset>0 ){ u8 *a = pIter->pLeaf->p; int iOff; - i64 iDelta; + u64 iDelta; pIter->iRowidOffset--; pIter->iLeafOffset = pIter->aRowidOffset[pIter->iRowidOffset]; @@ -218249,7 +222952,7 @@ static void fts5SegIterNext_Reverse( if( p->pConfig->eDetail!=FTS5_DETAIL_NONE ){ iOff += pIter->nPos; } - fts5GetVarint(&a[iOff], (u64*)&iDelta); + fts5GetVarint(&a[iOff], &iDelta); pIter->iRowid -= iDelta; }else{ fts5SegIterReverseNewPage(p, pIter); @@ -218442,14 +223145,9 @@ static void fts5SegIterNext( }else{ /* The following could be done by calling fts5SegIterLoadNPos(). But ** this block is particularly performance critical, so equivalent - ** code is inlined. - ** - ** Later: Switched back to fts5SegIterLoadNPos() because it supports - ** detail=none mode. Not ideal. - */ + ** code is inlined. */ int nSz; - assert( p->rc==SQLITE_OK ); - assert( pIter->iLeafOffset<=pIter->pLeaf->nn ); + assert_nc( pIter->iLeafOffset<=pIter->pLeaf->nn ); fts5FastGetVarint32(pIter->pLeaf->p, pIter->iLeafOffset, nSz); pIter->bDel = (nSz & 0x0001); pIter->nPos = nSz>>1; @@ -219441,7 +224139,7 @@ static void fts5ChunkIterate( int pgno = pSeg->iLeafPgno; int pgnoSave = 0; - /* This function does notmwork with detail=none databases. */ + /* This function does not work with detail=none databases. */ assert( p->pConfig->eDetail!=FTS5_DETAIL_NONE ); if( (pSeg->flags & FTS5_SEGITER_REVERSE)==0 ){ @@ -219454,6 +224152,9 @@ static void fts5ChunkIterate( fts5DataRelease(pData); if( nRem<=0 ){ break; + }else if( pSeg->pSeg==0 ){ + p->rc = FTS5_CORRUPT; + return; }else{ pgno++; pData = fts5LeafRead(p, FTS5_SEGMENT_ROWID(pSeg->pSeg->iSegid, pgno)); @@ -219505,66 +224206,72 @@ static void fts5SegiterPoslist( } /* -** IN/OUT parameter (*pa) points to a position list n bytes in size. If -** the position list contains entries for column iCol, then (*pa) is set -** to point to the sub-position-list for that column and the number of -** bytes in it returned. Or, if the argument position list does not -** contain any entries for column iCol, return 0. +** Parameter pPos points to a buffer containing a position list, size nPos. +** This function filters it according to pColset (which must be non-NULL) +** and sets pIter->base.pData/nData to point to the new position list. +** If memory is required for the new position list, use buffer pIter->poslist. +** Or, if the new position list is a contiguous subset of the input, set +** pIter->base.pData/nData to point directly to it. +** +** This function is a no-op if *pRc is other than SQLITE_OK when it is +** called. If an OOM error is encountered, *pRc is set to SQLITE_NOMEM +** before returning. */ -static int fts5IndexExtractCol( - const u8 **pa, /* IN/OUT: Pointer to poslist */ - int n, /* IN: Size of poslist in bytes */ - int iCol /* Column to extract from poslist */ -){ - int iCurrent = 0; /* Anything before the first 0x01 is col 0 */ - const u8 *p = *pa; - const u8 *pEnd = &p[n]; /* One byte past end of position list */ - - while( iCol>iCurrent ){ - /* Advance pointer p until it points to pEnd or an 0x01 byte that is - ** not part of a varint. Note that it is not possible for a negative - ** or extremely large varint to occur within an uncorrupted position - ** list. So the last byte of each varint may be assumed to have a clear - ** 0x80 bit. */ - while( *p!=0x01 ){ - while( *p++ & 0x80 ); - if( p>=pEnd ) return 0; - } - *pa = p++; - iCurrent = *p++; - if( iCurrent & 0x80 ){ - p--; - p += fts5GetVarint32(p, iCurrent); - } - } - if( iCol!=iCurrent ) return 0; - - /* Advance pointer p until it points to pEnd or an 0x01 byte that is - ** not part of a varint */ - while( pnCol; i++){ - const u8 *pSub = pPos; - int nSub = fts5IndexExtractCol(&pSub, nPos, pColset->aiCol[i]); - if( nSub ){ - fts5BufferAppendBlob(pRc, pBuf, nSub, pSub); + const u8 *p = pPos; + const u8 *aCopy = p; + const u8 *pEnd = &p[nPos]; /* One byte past end of position list */ + int i = 0; + int iCurrent = 0; + + if( pColset->nCol>1 && sqlite3Fts5BufferSize(pRc, &pIter->poslist, nPos) ){ + return; + } + + while( 1 ){ + while( pColset->aiCol[i]nCol ){ + pIter->base.pData = pIter->poslist.p; + pIter->base.nData = pIter->poslist.n; + return; + } + } + + /* Advance pointer p until it points to pEnd or an 0x01 byte that is + ** not part of a varint */ + while( paiCol[i]==iCurrent ){ + if( pColset->nCol==1 ){ + pIter->base.pData = aCopy; + pIter->base.nData = p-aCopy; + return; + } + fts5BufferSafeAppendBlob(&pIter->poslist, aCopy, p-aCopy); + } + if( p>=pEnd ){ + pIter->base.pData = pIter->poslist.p; + pIter->base.nData = pIter->poslist.n; + return; + } + aCopy = p++; + iCurrent = *p++; + if( iCurrent & 0x80 ){ + p--; + p += fts5GetVarint32(p, iCurrent); } } } + } /* @@ -219684,16 +224391,9 @@ static void fts5IterSetOutputs_Full(Fts5Iter *pIter, Fts5SegIter *pSeg){ /* All data is stored on the current page. Populate the output ** variables to point into the body of the page object. */ const u8 *a = &pSeg->pLeaf->p[pSeg->iLeafOffset]; - if( pColset->nCol==1 ){ - pIter->base.nData = fts5IndexExtractCol(&a, pSeg->nPos,pColset->aiCol[0]); - pIter->base.pData = a; - }else{ - int *pRc = &pIter->pIndex->rc; - fts5BufferZero(&pIter->poslist); - fts5IndexExtractColset(pRc, pColset, a, pSeg->nPos, &pIter->poslist); - pIter->base.pData = pIter->poslist.p; - pIter->base.nData = pIter->poslist.n; - } + int *pRc = &pIter->pIndex->rc; + fts5BufferZero(&pIter->poslist); + fts5IndexExtractColset(pRc, pColset, a, pSeg->nPos, pIter); }else{ /* The data is distributed over two or more pages. Copy it into the ** Fts5Iter.poslist buffer and then set the output pointer to point @@ -220915,14 +225615,14 @@ static void fts5FlushOneHash(Fts5Index *p){ fts5BufferSafeAppendBlob(pBuf, pDoclist, nDoclist); }else{ i64 iRowid = 0; - i64 iDelta = 0; + u64 iDelta = 0; int iOff = 0; /* The entire doclist will not fit on this leaf. The following ** loop iterates through the poslists that make up the current ** doclist. */ while( p->rc==SQLITE_OK && iOffaPoslist + pIter->nSize + pIter->nPoslist; - assert( pIter->aPoslist ); + assert( pIter->aPoslist || (p==0 && pIter->aPoslist==0) ); if( p>=pIter->aEof ){ pIter->aPoslist = 0; }else{ @@ -221196,6 +225896,9 @@ static void fts5DoclistIterNext(Fts5DoclistIter *pIter){ } pIter->aPoslist = p; + if( &pIter->aPoslist[pIter->nPoslist]>pIter->aEof ){ + pIter->aPoslist = 0; + } } } @@ -221204,9 +225907,11 @@ static void fts5DoclistIterInit( Fts5DoclistIter *pIter ){ memset(pIter, 0, sizeof(*pIter)); - pIter->aPoslist = pBuf->p; - pIter->aEof = &pBuf->p[pBuf->n]; - fts5DoclistIterNext(pIter); + if( pBuf->n>0 ){ + pIter->aPoslist = pBuf->p; + pIter->aEof = &pBuf->p[pBuf->n]; + fts5DoclistIterNext(pIter); + } } #if 0 @@ -221260,16 +225965,20 @@ static void fts5NextRowid(Fts5Buffer *pBuf, int *piOff, i64 *piRowid){ static void fts5MergeRowidLists( Fts5Index *p, /* FTS5 backend object */ Fts5Buffer *p1, /* First list to merge */ - Fts5Buffer *p2 /* Second list to merge */ + int nBuf, /* Number of entries in apBuf[] */ + Fts5Buffer *aBuf /* Array of other lists to merge into p1 */ ){ int i1 = 0; int i2 = 0; i64 iRowid1 = 0; i64 iRowid2 = 0; i64 iOut = 0; - + Fts5Buffer *p2 = &aBuf[0]; Fts5Buffer out; + + (void)nBuf; memset(&out, 0, sizeof(out)); + assert( nBuf==1 ); sqlite3Fts5BufferSize(&p->rc, &out, p1->n + p2->n); if( p->rc ) return; @@ -221296,177 +226005,214 @@ static void fts5MergeRowidLists( fts5BufferFree(&out); } +typedef struct PrefixMerger PrefixMerger; +struct PrefixMerger { + Fts5DoclistIter iter; /* Doclist iterator */ + i64 iPos; /* For iterating through a position list */ + int iOff; + u8 *aPos; + PrefixMerger *pNext; /* Next in docid/poslist order */ +}; + +static void fts5PrefixMergerInsertByRowid( + PrefixMerger **ppHead, + PrefixMerger *p +){ + if( p->iter.aPoslist ){ + PrefixMerger **pp = ppHead; + while( *pp && p->iter.iRowid>(*pp)->iter.iRowid ){ + pp = &(*pp)->pNext; + } + p->pNext = *pp; + *pp = p; + } +} + +static void fts5PrefixMergerInsertByPosition( + PrefixMerger **ppHead, + PrefixMerger *p +){ + if( p->iPos>=0 ){ + PrefixMerger **pp = ppHead; + while( *pp && p->iPos>(*pp)->iPos ){ + pp = &(*pp)->pNext; + } + p->pNext = *pp; + *pp = p; + } +} + + /* -** Buffers p1 and p2 contain doclists. This function merges the content -** of the two doclists together and sets buffer p1 to the result before -** returning. -** -** If an error occurs, an error code is left in p->rc. If an error has -** already occurred, this function is a no-op. +** Array aBuf[] contains nBuf doclists. These are all merged in with the +** doclist in buffer p1. */ static void fts5MergePrefixLists( Fts5Index *p, /* FTS5 backend object */ Fts5Buffer *p1, /* First list to merge */ - Fts5Buffer *p2 /* Second list to merge */ -){ - if( p2->n ){ - i64 iLastRowid = 0; - Fts5DoclistIter i1; - Fts5DoclistIter i2; - Fts5Buffer out = {0, 0, 0}; - Fts5Buffer tmp = {0, 0, 0}; - - /* The maximum size of the output is equal to the sum of the two - ** input sizes + 1 varint (9 bytes). The extra varint is because if the - ** first rowid in one input is a large negative number, and the first in - ** the other a non-negative number, the delta for the non-negative - ** number will be larger on disk than the literal integer value - ** was. - ** - ** Or, if the input position-lists are corrupt, then the output might - ** include up to 2 extra 10-byte positions created by interpreting -1 - ** (the value PoslistNext64() uses for EOF) as a position and appending - ** it to the output. This can happen at most once for each input - ** position-list, hence two 10 byte paddings. */ - if( sqlite3Fts5BufferSize(&p->rc, &out, p1->n + p2->n + 9+10+10) ) return; - fts5DoclistIterInit(p1, &i1); - fts5DoclistIterInit(p2, &i2); + int nBuf, /* Number of buffers in array aBuf[] */ + Fts5Buffer *aBuf /* Other lists to merge in */ +){ +#define fts5PrefixMergerNextPosition(p) \ + sqlite3Fts5PoslistNext64((p)->aPos,(p)->iter.nPoslist,&(p)->iOff,&(p)->iPos) +#define FTS5_MERGE_NLIST 16 + PrefixMerger aMerger[FTS5_MERGE_NLIST]; + PrefixMerger *pHead = 0; + int i; + int nOut = 0; + Fts5Buffer out = {0, 0, 0}; + Fts5Buffer tmp = {0, 0, 0}; + i64 iLastRowid = 0; + + /* Initialize a doclist-iterator for each input buffer. Arrange them in + ** a linked-list starting at pHead in ascending order of rowid. Avoid + ** linking any iterators already at EOF into the linked list at all. */ + assert( nBuf+1<=sizeof(aMerger)/sizeof(aMerger[0]) ); + memset(aMerger, 0, sizeof(PrefixMerger)*(nBuf+1)); + pHead = &aMerger[nBuf]; + fts5DoclistIterInit(p1, &pHead->iter); + for(i=0; in + 9 + 10*nBuf; + + /* The maximum size of the output is equal to the sum of the + ** input sizes + 1 varint (9 bytes). The extra varint is because if the + ** first rowid in one input is a large negative number, and the first in + ** the other a non-negative number, the delta for the non-negative + ** number will be larger on disk than the literal integer value + ** was. + ** + ** Or, if the input position-lists are corrupt, then the output might + ** include up to (nBuf+1) extra 10-byte positions created by interpreting -1 + ** (the value PoslistNext64() uses for EOF) as a position and appending + ** it to the output. This can happen at most once for each input + ** position-list, hence (nBuf+1) 10 byte paddings. */ + if( sqlite3Fts5BufferSize(&p->rc, &out, nOut) ) return; + + while( pHead ){ + fts5MergeAppendDocid(&out, iLastRowid, pHead->iter.iRowid); + + if( pHead->pNext && iLastRowid==pHead->pNext->iter.iRowid ){ + /* Merge data from two or more poslists */ + i64 iPrev = 0; + int nTmp = FTS5_DATA_ZERO_PADDING; + int nMerge = 0; + PrefixMerger *pSave = pHead; + PrefixMerger *pThis = 0; + int nTail = 0; + + pHead = 0; + while( pSave && pSave->iter.iRowid==iLastRowid ){ + PrefixMerger *pNext = pSave->pNext; + pSave->iOff = 0; + pSave->iPos = 0; + pSave->aPos = &pSave->iter.aPoslist[pSave->iter.nSize]; + fts5PrefixMergerNextPosition(pSave); + nTmp += pSave->iter.nPoslist + 10; + nMerge++; + fts5PrefixMergerInsertByPosition(&pHead, pSave); + pSave = pNext; + } + + if( pHead==0 || pHead->pNext==0 ){ + p->rc = FTS5_CORRUPT; + break; + } - while( 1 ){ - if( i1.iRowidp) + (i2.aPoslist-p2->p)+9+10+10) ); - } - else if( i2.iRowid!=i1.iRowid ){ - /* Copy entry from i2 */ - fts5MergeAppendDocid(&out, iLastRowid, i2.iRowid); - fts5BufferSafeAppendBlob(&out, i2.aPoslist, i2.nPoslist+i2.nSize); - fts5DoclistIterNext(&i2); - if( i2.aPoslist==0 ) break; - assert( out.n<=((i1.aPoslist-p1->p) + (i2.aPoslist-p2->p)+9+10+10) ); + /* See the earlier comment in this function for an explanation of why + ** corrupt input position lists might cause the output to consume + ** at most nMerge*10 bytes of unexpected space. */ + if( sqlite3Fts5BufferSize(&p->rc, &tmp, nTmp+nMerge*10) ){ + break; } - else{ - /* Merge the two position lists. */ - i64 iPos1 = 0; - i64 iPos2 = 0; - int iOff1 = 0; - int iOff2 = 0; - u8 *a1 = &i1.aPoslist[i1.nSize]; - u8 *a2 = &i2.aPoslist[i2.nSize]; - int nCopy; - u8 *aCopy; - - i64 iPrev = 0; - Fts5PoslistWriter writer; - memset(&writer, 0, sizeof(writer)); - - /* See the earlier comment in this function for an explanation of why - ** corrupt input position lists might cause the output to consume - ** at most 20 bytes of unexpected space. */ - fts5MergeAppendDocid(&out, iLastRowid, i2.iRowid); - fts5BufferZero(&tmp); - sqlite3Fts5BufferSize(&p->rc, &tmp, i1.nPoslist + i2.nPoslist + 10 + 10); - if( p->rc ) break; - - sqlite3Fts5PoslistNext64(a1, i1.nPoslist, &iOff1, &iPos1); - sqlite3Fts5PoslistNext64(a2, i2.nPoslist, &iOff2, &iPos2); - assert_nc( iPos1>=0 && iPos2>=0 ); - - if( iPos1=0 && iPos2>=0 ){ - while( 1 ){ - if( iPos1=0 ){ - if( iPos1!=iPrev ){ - sqlite3Fts5PoslistSafeAppend(&tmp, &iPrev, iPos1); - } - aCopy = &a1[iOff1]; - nCopy = i1.nPoslist - iOff1; - }else{ - assert_nc( iPos2>=0 && iPos2!=iPrev ); - sqlite3Fts5PoslistSafeAppend(&tmp, &iPrev, iPos2); - aCopy = &a2[iOff2]; - nCopy = i2.nPoslist - iOff2; - } - if( nCopy>0 ){ - fts5BufferSafeAppendBlob(&tmp, aCopy, nCopy); + pThis = pHead; + pHead = pThis->pNext; + sqlite3Fts5PoslistSafeAppend(&tmp, &iPrev, pThis->iPos); + fts5PrefixMergerNextPosition(pThis); + fts5PrefixMergerInsertByPosition(&pHead, pThis); + + while( pHead->pNext ){ + pThis = pHead; + if( pThis->iPos!=iPrev ){ + sqlite3Fts5PoslistSafeAppend(&tmp, &iPrev, pThis->iPos); } + fts5PrefixMergerNextPosition(pThis); + pHead = pThis->pNext; + fts5PrefixMergerInsertByPosition(&pHead, pThis); + } - /* WRITEPOSLISTSIZE */ - assert_nc( tmp.n<=i1.nPoslist+i2.nPoslist ); - assert( tmp.n<=i1.nPoslist+i2.nPoslist+10+10 ); - if( tmp.n>i1.nPoslist+i2.nPoslist ){ - if( p->rc==SQLITE_OK ) p->rc = FTS5_CORRUPT; - break; + if( pHead->iPos!=iPrev ){ + sqlite3Fts5PoslistSafeAppend(&tmp, &iPrev, pHead->iPos); + } + nTail = pHead->iter.nPoslist - pHead->iOff; + + /* WRITEPOSLISTSIZE */ + assert_nc( tmp.n+nTail<=nTmp ); + assert( tmp.n+nTail<=nTmp+nMerge*10 ); + if( tmp.n+nTail>nTmp-FTS5_DATA_ZERO_PADDING ){ + if( p->rc==SQLITE_OK ) p->rc = FTS5_CORRUPT; + break; + } + fts5BufferSafeAppendVarint(&out, (tmp.n+nTail) * 2); + fts5BufferSafeAppendBlob(&out, tmp.p, tmp.n); + if( nTail>0 ){ + fts5BufferSafeAppendBlob(&out, &pHead->aPos[pHead->iOff], nTail); + } + + pHead = pSave; + for(i=0; iiter.aPoslist && pX->iter.iRowid==iLastRowid ){ + fts5DoclistIterNext(&pX->iter); + fts5PrefixMergerInsertByRowid(&pHead, pX); } - fts5BufferSafeAppendVarint(&out, tmp.n * 2); - fts5BufferSafeAppendBlob(&out, tmp.p, tmp.n); - fts5DoclistIterNext(&i1); - fts5DoclistIterNext(&i2); - assert_nc( out.n<=(p1->n+p2->n+9) ); - if( i1.aPoslist==0 || i2.aPoslist==0 ) break; - assert( out.n<=((i1.aPoslist-p1->p) + (i2.aPoslist-p2->p)+9+10+10) ); } - } - if( i1.aPoslist ){ - fts5MergeAppendDocid(&out, iLastRowid, i1.iRowid); - fts5BufferSafeAppendBlob(&out, i1.aPoslist, i1.aEof - i1.aPoslist); - } - else if( i2.aPoslist ){ - fts5MergeAppendDocid(&out, iLastRowid, i2.iRowid); - fts5BufferSafeAppendBlob(&out, i2.aPoslist, i2.aEof - i2.aPoslist); + }else{ + /* Copy poslist from pHead to output */ + PrefixMerger *pThis = pHead; + Fts5DoclistIter *pI = &pThis->iter; + fts5BufferSafeAppendBlob(&out, pI->aPoslist, pI->nPoslist+pI->nSize); + fts5DoclistIterNext(pI); + pHead = pThis->pNext; + fts5PrefixMergerInsertByRowid(&pHead, pThis); } - assert_nc( out.n<=(p1->n+p2->n+9) ); - - fts5BufferSet(&p->rc, p1, out.n, out.p); - fts5BufferFree(&tmp); - fts5BufferFree(&out); } + + fts5BufferFree(p1); + fts5BufferFree(&tmp); + memset(&out.p[out.n], 0, FTS5_DATA_ZERO_PADDING); + *p1 = out; } static void fts5SetupPrefixIter( Fts5Index *p, /* Index to read from */ int bDesc, /* True for "ORDER BY rowid DESC" */ - const u8 *pToken, /* Buffer containing prefix to match */ + int iIdx, /* Index to scan for data */ + u8 *pToken, /* Buffer containing prefix to match */ int nToken, /* Size of buffer pToken in bytes */ Fts5Colset *pColset, /* Restrict matches to these columns */ Fts5Iter **ppIter /* OUT: New iterator */ ){ Fts5Structure *pStruct; Fts5Buffer *aBuf; - const int nBuf = 32; + int nBuf = 32; + int nMerge = 1; - void (*xMerge)(Fts5Index*, Fts5Buffer*, Fts5Buffer*); + void (*xMerge)(Fts5Index*, Fts5Buffer*, int, Fts5Buffer*); void (*xAppend)(Fts5Index*, i64, Fts5Iter*, Fts5Buffer*); if( p->pConfig->eDetail==FTS5_DETAIL_NONE ){ xMerge = fts5MergeRowidLists; xAppend = fts5AppendRowid; }else{ + nMerge = FTS5_MERGE_NLIST-1; + nBuf = nMerge*8; /* Sufficient to merge (16^8)==(2^32) lists */ xMerge = fts5MergePrefixLists; xAppend = fts5AppendPoslist; } @@ -221486,6 +226232,27 @@ static void fts5SetupPrefixIter( int bNewTerm = 1; memset(&doclist, 0, sizeof(doclist)); + if( iIdx!=0 ){ + int dummy = 0; + const int f2 = FTS5INDEX_QUERY_SKIPEMPTY|FTS5INDEX_QUERY_NOOUTPUT; + pToken[0] = FTS5_MAIN_PREFIX; + fts5MultiIterNew(p, pStruct, f2, pColset, pToken, nToken, -1, 0, &p1); + fts5IterSetOutputCb(&p->rc, p1); + for(; + fts5MultiIterEof(p, p1)==0; + fts5MultiIterNext2(p, p1, &dummy) + ){ + Fts5SegIter *pSeg = &p1->aSeg[ p1->aFirst[1].iFirst ]; + p1->xSetOutputs(p1, pSeg); + if( p1->base.nData ){ + xAppend(p, p1->base.iRowid-iLastRowid, p1, &doclist); + iLastRowid = p1->base.iRowid; + } + } + fts5MultiIterFree(p1); + } + + pToken[0] = FTS5_MAIN_PREFIX + iIdx; fts5MultiIterNew(p, pStruct, flags, pColset, pToken, nToken, -1, 0, &p1); fts5IterSetOutputCb(&p->rc, p1); for( /* no-op */ ; @@ -221506,13 +226273,21 @@ static void fts5SetupPrefixIter( if( p1->base.iRowid<=iLastRowid && doclist.n>0 ){ for(i=0; p->rc==SQLITE_OK && doclist.n; i++){ - assert( ibase.iRowid; } - for(i=0; irc==SQLITE_OK ){ - xMerge(p, &doclist, &aBuf[i]); + xMerge(p, &doclist, nMerge, &aBuf[i]); + } + for(iFree=i; iFreerc, &buf, nToken+1)==0 ){ int iIdx = 0; /* Index to search */ + int iPrefixIdx = 0; /* +1 prefix index */ if( nToken ) memcpy(&buf.p[1], pToken, nToken); /* Figure out which index to search and set iIdx accordingly. If this @@ -221802,7 +226582,9 @@ static int sqlite3Fts5IndexQuery( if( flags & FTS5INDEX_QUERY_PREFIX ){ int nChar = fts5IndexCharlen(pToken, nToken); for(iIdx=1; iIdx<=pConfig->nPrefix; iIdx++){ - if( pConfig->aPrefix[iIdx-1]==nChar ) break; + int nIdxChar = pConfig->aPrefix[iIdx-1]; + if( nIdxChar==nChar ) break; + if( nIdxChar==nChar+1 ) iPrefixIdx = iIdx; } } @@ -221819,8 +226601,7 @@ static int sqlite3Fts5IndexQuery( }else{ /* Scan multiple terms in the main index */ int bDesc = (flags & FTS5INDEX_QUERY_DESC)!=0; - buf.p[0] = FTS5_MAIN_PREFIX; - fts5SetupPrefixIter(p, bDesc, buf.p, nToken+1, pColset, &pRet); + fts5SetupPrefixIter(p, bDesc, iPrefixIdx, buf.p, nToken+1, pColset,&pRet); assert( p->rc!=SQLITE_OK || pRet->pColset==0 ); fts5IterSetOutputCb(&p->rc, pRet); if( p->rc==SQLITE_OK ){ @@ -221893,8 +226674,9 @@ static int sqlite3Fts5IterNextFrom(Fts5IndexIter *pIndexIter, i64 iMatch){ static const char *sqlite3Fts5IterTerm(Fts5IndexIter *pIndexIter, int *pn){ int n; const char *z = (const char*)fts5MultiIterTerm((Fts5Iter*)pIndexIter, &n); + assert_nc( z || n<=1 ); *pn = n-1; - return &z[1]; + return (z ? &z[1] : 0); } /* @@ -222431,7 +227213,7 @@ static void fts5IndexIntegrityCheckSegment( ** error, or some other SQLite error code if another error (e.g. OOM) ** occurs. */ -static int sqlite3Fts5IndexIntegrityCheck(Fts5Index *p, u64 cksum){ +static int sqlite3Fts5IndexIntegrityCheck(Fts5Index *p, u64 cksum, int bUseCksum){ int eDetail = p->pConfig->eDetail; u64 cksum2 = 0; /* Checksum based on contents of indexes */ Fts5Buffer poslist = {0,0,0}; /* Buffer used to hold a poslist */ @@ -222492,6 +227274,7 @@ static int sqlite3Fts5IndexIntegrityCheck(Fts5Index *p, u64 cksum){ }else{ poslist.n = 0; fts5SegiterPoslist(p, &pIter->aSeg[pIter->aFirst[1].iFirst], 0, &poslist); + fts5BufferAppendBlob(&p->rc, &poslist, 4, (const u8*)"\0\0\0\0"); while( 0==sqlite3Fts5PoslistNext64(poslist.p, poslist.n, &iOff, &iPos) ){ int iCol = FTS5_POS2COLUMN(iPos); int iTokOff = FTS5_POS2OFFSET(iPos); @@ -222502,7 +227285,7 @@ static int sqlite3Fts5IndexIntegrityCheck(Fts5Index *p, u64 cksum){ fts5TestTerm(p, &term, 0, 0, cksum2, &cksum3); fts5MultiIterFree(pIter); - if( p->rc==SQLITE_OK && cksum!=cksum2 ) p->rc = FTS5_CORRUPT; + if( p->rc==SQLITE_OK && bUseCksum && cksum!=cksum2 ) p->rc = FTS5_CORRUPT; fts5StructureRelease(pStruct); #ifdef SQLITE_DEBUG @@ -222518,6 +227301,7 @@ static int sqlite3Fts5IndexIntegrityCheck(Fts5Index *p, u64 cksum){ ** function only. */ +#ifdef SQLITE_TEST /* ** Decode a segment-data rowid from the %_data table. This function is ** the opposite of macro FTS5_SEGMENT_ROWID(). @@ -222540,7 +227324,9 @@ static void fts5DecodeRowid( *piSegid = (int)(iRowid & (((i64)1 << FTS5_DATA_ID_B) - 1)); } +#endif /* SQLITE_TEST */ +#ifdef SQLITE_TEST static void fts5DebugRowid(int *pRc, Fts5Buffer *pBuf, i64 iKey){ int iSegid, iHeight, iPgno, bDlidx; /* Rowid compenents */ fts5DecodeRowid(iKey, &iSegid, &bDlidx, &iHeight, &iPgno); @@ -222558,7 +227344,9 @@ static void fts5DebugRowid(int *pRc, Fts5Buffer *pBuf, i64 iKey){ ); } } +#endif /* SQLITE_TEST */ +#ifdef SQLITE_TEST static void fts5DebugStructure( int *pRc, /* IN/OUT: error code */ Fts5Buffer *pBuf, @@ -222580,7 +227368,9 @@ static void fts5DebugStructure( sqlite3Fts5BufferAppendPrintf(pRc, pBuf, "}"); } } +#endif /* SQLITE_TEST */ +#ifdef SQLITE_TEST /* ** This is part of the fts5_decode() debugging aid. ** @@ -222605,7 +227395,9 @@ static void fts5DecodeStructure( fts5DebugStructure(pRc, pBuf, p); fts5StructureRelease(p); } +#endif /* SQLITE_TEST */ +#ifdef SQLITE_TEST /* ** This is part of the fts5_decode() debugging aid. ** @@ -222628,7 +227420,9 @@ static void fts5DecodeAverages( zSpace = " "; } } +#endif /* SQLITE_TEST */ +#ifdef SQLITE_TEST /* ** Buffer (a/n) is assumed to contain a list of serialized varints. Read ** each varint and append its string representation to buffer pBuf. Return @@ -222645,7 +227439,9 @@ static int fts5DecodePoslist(int *pRc, Fts5Buffer *pBuf, const u8 *a, int n){ } return iOff; } +#endif /* SQLITE_TEST */ +#ifdef SQLITE_TEST /* ** The start of buffer (a/n) contains the start of a doclist. The doclist ** may or may not finish within the buffer. This function appends a text @@ -222678,7 +227474,9 @@ static int fts5DecodeDoclist(int *pRc, Fts5Buffer *pBuf, const u8 *a, int n){ return iOff; } +#endif /* SQLITE_TEST */ +#ifdef SQLITE_TEST /* ** This function is part of the fts5_decode() debugging function. It is ** only ever used with detail=none tables. @@ -222719,7 +227517,9 @@ static void fts5DecodeRowidList( sqlite3Fts5BufferAppendPrintf(pRc, pBuf, " %lld%s", iRowid, zApp); } } +#endif /* SQLITE_TEST */ +#ifdef SQLITE_TEST /* ** The implementation of user-defined scalar function fts5_decode(). */ @@ -222928,7 +227728,9 @@ static void fts5DecodeFunction( } fts5BufferFree(&s); } +#endif /* SQLITE_TEST */ +#ifdef SQLITE_TEST /* ** The implementation of user-defined scalar function fts5_rowid(). */ @@ -222962,6 +227764,7 @@ static void fts5RowidFunction( } } } +#endif /* SQLITE_TEST */ /* ** This is called as part of registering the FTS5 module with database @@ -222972,6 +227775,7 @@ static void fts5RowidFunction( ** SQLite error code is returned instead. */ static int sqlite3Fts5IndexInit(sqlite3 *db){ +#ifdef SQLITE_TEST int rc = sqlite3_create_function( db, "fts5_decode", 2, SQLITE_UTF8, 0, fts5DecodeFunction, 0, 0 ); @@ -222989,6 +227793,10 @@ static int sqlite3Fts5IndexInit(sqlite3 *db){ ); } return rc; +#else + return SQLITE_OK; + UNUSED_PARAM(db); +#endif } @@ -223024,7 +227832,9 @@ static int sqlite3Fts5IndexReset(Fts5Index *p){ ** assert() conditions in the fts5 code are activated - conditions that are ** only true if it is guaranteed that the fts5 database is not corrupt. */ +#ifdef SQLITE_DEBUG SQLITE_API int sqlite3_fts5_may_be_corrupt = 1; +#endif typedef struct Fts5Auxdata Fts5Auxdata; @@ -223466,6 +228276,23 @@ static void fts5SetUniqueFlag(sqlite3_index_info *pIdxInfo){ #endif } +static int fts5UsePatternMatch( + Fts5Config *pConfig, + struct sqlite3_index_constraint *p +){ + assert( FTS5_PATTERN_GLOB==SQLITE_INDEX_CONSTRAINT_GLOB ); + assert( FTS5_PATTERN_LIKE==SQLITE_INDEX_CONSTRAINT_LIKE ); + if( pConfig->ePattern==FTS5_PATTERN_GLOB && p->op==FTS5_PATTERN_GLOB ){ + return 1; + } + if( pConfig->ePattern==FTS5_PATTERN_LIKE + && (p->op==FTS5_PATTERN_LIKE || p->op==FTS5_PATTERN_GLOB) + ){ + return 1; + } + return 0; +} + /* ** Implementation of the xBestIndex method for FTS5 tables. Within the ** WHERE constraint, it searches for the following: @@ -223495,7 +228322,9 @@ static void fts5SetUniqueFlag(sqlite3_index_info *pIdxInfo){ ** ** Match against table column: "m" ** Match against rank column: "r" -** Match against other column: "" +** Match against other column: "M" +** LIKE against other column: "L" +** GLOB against other column: "G" ** Equality constraint against the rowid: "=" ** A < or <= against the rowid: "<" ** A > or >= against the rowid: ">" @@ -223556,7 +228385,7 @@ static int fts5BestIndexMethod(sqlite3_vtab *pVTab, sqlite3_index_info *pInfo){ return SQLITE_ERROR; } - idxStr = (char*)sqlite3_malloc(pInfo->nConstraint * 6 + 1); + idxStr = (char*)sqlite3_malloc(pInfo->nConstraint * 8 + 1); if( idxStr==0 ) return SQLITE_NOMEM; pInfo->idxStr = idxStr; pInfo->needToFreeIdxStr = 1; @@ -223580,25 +228409,29 @@ static int fts5BestIndexMethod(sqlite3_vtab *pVTab, sqlite3_index_info *pInfo){ if( bSeenRank ) continue; idxStr[iIdxStr++] = 'r'; bSeenRank = 1; - }else{ + }else if( iCol>=0 ){ bSeenMatch = 1; - idxStr[iIdxStr++] = 'm'; - if( iColaConstraintUsage[i].argvIndex = ++iCons; pInfo->aConstraintUsage[i].omit = 1; } - } - else if( p->usable && bSeenEq==0 - && p->op==SQLITE_INDEX_CONSTRAINT_EQ && iCol<0 - ){ - idxStr[iIdxStr++] = '='; - bSeenEq = 1; - pInfo->aConstraintUsage[i].argvIndex = ++iCons; + }else if( p->usable ){ + if( iCol>=0 && iColop==FTS5_PATTERN_LIKE || p->op==FTS5_PATTERN_GLOB ); + idxStr[iIdxStr++] = p->op==FTS5_PATTERN_LIKE ? 'L' : 'G'; + sqlite3_snprintf(6, &idxStr[iIdxStr], "%d", iCol); + idxStr += strlen(&idxStr[iIdxStr]); + pInfo->aConstraintUsage[i].argvIndex = ++iCons; + assert( idxStr[iIdxStr]=='\0' ); + }else if( bSeenEq==0 && p->op==SQLITE_INDEX_CONSTRAINT_EQ && iCol<0 ){ + idxStr[iIdxStr++] = '='; + bSeenEq = 1; + pInfo->aConstraintUsage[i].argvIndex = ++iCons; + } } } @@ -224231,19 +229064,14 @@ static int fts5FilterMethod( case 'r': pRank = apVal[i]; break; - case 'm': { + case 'M': { const char *zText = (const char*)sqlite3_value_text(apVal[i]); if( zText==0 ) zText = ""; - - if( idxStr[iIdxStr]>='0' && idxStr[iIdxStr]<='9' ){ - iCol = 0; - do{ - iCol = iCol*10 + (idxStr[iIdxStr]-'0'); - iIdxStr++; - }while( idxStr[iIdxStr]>='0' && idxStr[iIdxStr]<='9' ); - }else{ - iCol = pConfig->nCol; - } + iCol = 0; + do{ + iCol = iCol*10 + (idxStr[iIdxStr]-'0'); + iIdxStr++; + }while( idxStr[iIdxStr]>='0' && idxStr[iIdxStr]<='9' ); if( zText[0]=='*' ){ /* The user has issued a query of the form "MATCH '*...'". This @@ -224253,7 +229081,7 @@ static int fts5FilterMethod( goto filter_out; }else{ char **pzErr = &pTab->p.base.zErrMsg; - rc = sqlite3Fts5ExprNew(pConfig, iCol, zText, &pExpr, pzErr); + rc = sqlite3Fts5ExprNew(pConfig, 0, iCol, zText, &pExpr, pzErr); if( rc==SQLITE_OK ){ rc = sqlite3Fts5ExprAnd(&pCsr->pExpr, pExpr); pExpr = 0; @@ -224263,6 +229091,25 @@ static int fts5FilterMethod( break; } + case 'L': + case 'G': { + int bGlob = (idxStr[iIdxStr-1]=='G'); + const char *zText = (const char*)sqlite3_value_text(apVal[i]); + iCol = 0; + do{ + iCol = iCol*10 + (idxStr[iIdxStr]-'0'); + iIdxStr++; + }while( idxStr[iIdxStr]>='0' && idxStr[iIdxStr]<='9' ); + if( zText ){ + rc = sqlite3Fts5ExprPattern(pConfig, bGlob, iCol, zText, &pExpr); + } + if( rc==SQLITE_OK ){ + rc = sqlite3Fts5ExprAnd(&pCsr->pExpr, pExpr); + pExpr = 0; + } + if( rc!=SQLITE_OK ) goto filter_out; + break; + } case '=': pRowidEq = apVal[i]; break; @@ -224510,7 +229357,8 @@ static int fts5SpecialInsert( int nMerge = sqlite3_value_int(pVal); rc = sqlite3Fts5StorageMerge(pTab->pStorage, nMerge); }else if( 0==sqlite3_stricmp("integrity-check", zCmd) ){ - rc = sqlite3Fts5StorageIntegrity(pTab->pStorage); + int iArg = sqlite3_value_int(pVal); + rc = sqlite3Fts5StorageIntegrity(pTab->pStorage, iArg); #ifdef SQLITE_DEBUG }else if( 0==sqlite3_stricmp("prefix-index", zCmd) ){ pConfig->bPrefixIndex = sqlite3_value_int(pVal); @@ -224911,13 +229759,15 @@ static int fts5CacheInstArray(Fts5Cursor *pCsr){ nInst++; if( nInst>=pCsr->nInstAlloc ){ - pCsr->nInstAlloc = pCsr->nInstAlloc ? pCsr->nInstAlloc*2 : 32; + int nNewSize = pCsr->nInstAlloc ? pCsr->nInstAlloc*2 : 32; aInst = (int*)sqlite3_realloc64( - pCsr->aInst, pCsr->nInstAlloc*sizeof(int)*3 + pCsr->aInst, nNewSize*sizeof(int)*3 ); if( aInst ){ pCsr->aInst = aInst; + pCsr->nInstAlloc = nNewSize; }else{ + nInst--; rc = SQLITE_NOMEM; break; } @@ -225141,7 +229991,8 @@ static int fts5ApiPhraseFirst( int n; int rc = fts5CsrPoslist(pCsr, iPhrase, &pIter->a, &n); if( rc==SQLITE_OK ){ - pIter->b = &pIter->a[n]; + assert( pIter->a || n==0 ); + pIter->b = (pIter->a ? &pIter->a[n] : 0); *piCol = 0; *piOff = 0; fts5ApiPhraseNext(pCtx, pIter, piCol, piOff); @@ -225200,7 +230051,8 @@ static int fts5ApiPhraseFirstColumn( rc = sqlite3Fts5ExprPhraseCollist(pCsr->pExpr, iPhrase, &pIter->a, &n); } if( rc==SQLITE_OK ){ - pIter->b = &pIter->a[n]; + assert( pIter->a || n==0 ); + pIter->b = (pIter->a ? &pIter->a[n] : 0); *piCol = 0; fts5ApiPhraseNextColumn(pCtx, pIter, piCol); } @@ -225208,7 +230060,8 @@ static int fts5ApiPhraseFirstColumn( int n; rc = fts5CsrPoslist(pCsr, iPhrase, &pIter->a, &n); if( rc==SQLITE_OK ){ - pIter->b = &pIter->a[n]; + assert( pIter->a || n==0 ); + pIter->b = (pIter->a ? &pIter->a[n] : 0); if( n<=0 ){ *piCol = -1; }else if( pIter->a[0]==0x01 ){ @@ -225673,8 +230526,7 @@ static int sqlite3Fts5GetTokenizer( Fts5Global *pGlobal, const char **azArg, int nArg, - Fts5Tokenizer **ppTok, - fts5_tokenizer **ppTokApi, + Fts5Config *pConfig, char **pzErr ){ Fts5TokenizerModule *pMod; @@ -225686,16 +230538,22 @@ static int sqlite3Fts5GetTokenizer( rc = SQLITE_ERROR; *pzErr = sqlite3_mprintf("no such tokenizer: %s", azArg[0]); }else{ - rc = pMod->x.xCreate(pMod->pUserData, &azArg[1], (nArg?nArg-1:0), ppTok); - *ppTokApi = &pMod->x; - if( rc!=SQLITE_OK && pzErr ){ - *pzErr = sqlite3_mprintf("error in tokenizer constructor"); + rc = pMod->x.xCreate( + pMod->pUserData, (azArg?&azArg[1]:0), (nArg?nArg-1:0), &pConfig->pTok + ); + pConfig->pTokApi = &pMod->x; + if( rc!=SQLITE_OK ){ + if( pzErr ) *pzErr = sqlite3_mprintf("error in tokenizer constructor"); + }else{ + pConfig->ePattern = sqlite3Fts5TokenizerPattern( + pMod->x.xCreate, pConfig->pTok + ); } } if( rc!=SQLITE_OK ){ - *ppTokApi = 0; - *ppTok = 0; + pConfig->pTokApi = 0; + pConfig->pTok = 0; } return rc; @@ -225744,7 +230602,7 @@ static void fts5SourceIdFunc( ){ assert( nArg==0 ); UNUSED_PARAM2(nArg, apUnused); - sqlite3_result_text(pCtx, "fts5: 2020-08-14 13:23:32 fca8dc8b578f215a969cd899336378966156154710873e68b3d9ac5881b0ff3f", -1, SQLITE_TRANSIENT); + sqlite3_result_text(pCtx, "fts5: 2021-06-18 18:36:39 5c9a6c06871cb9fe42814af9c039eb6da5427a6ec28f187af7ebfb62eafa66e5", -1, SQLITE_TRANSIENT); } /* @@ -226307,9 +231165,16 @@ static int fts5StorageDeleteFromIndex( zText, nText, (void*)&ctx, fts5StorageInsertCallback ); p->aTotalSize[iCol-1] -= (i64)ctx.szCol; + if( p->aTotalSize[iCol-1]<0 ){ + rc = FTS5_CORRUPT; + } } } - p->nTotalRow--; + if( rc==SQLITE_OK && p->nTotalRow<1 ){ + rc = FTS5_CORRUPT; + }else{ + p->nTotalRow--; + } rc2 = sqlite3_reset(pSeek); if( rc==SQLITE_OK ) rc = rc2; @@ -226752,13 +231617,14 @@ static int fts5StorageIntegrityCallback( ** some other SQLite error code if an error occurs while attempting to ** determine this. */ -static int sqlite3Fts5StorageIntegrity(Fts5Storage *p){ +static int sqlite3Fts5StorageIntegrity(Fts5Storage *p, int iArg){ Fts5Config *pConfig = p->pConfig; - int rc; /* Return code */ + int rc = SQLITE_OK; /* Return code */ int *aColSize; /* Array of size pConfig->nCol */ i64 *aTotalSize; /* Array of size pConfig->nCol */ Fts5IntegrityCtx ctx; sqlite3_stmt *pScan; + int bUseCksum; memset(&ctx, 0, sizeof(Fts5IntegrityCtx)); ctx.pConfig = p->pConfig; @@ -226767,83 +231633,88 @@ static int sqlite3Fts5StorageIntegrity(Fts5Storage *p){ aColSize = (int*)&aTotalSize[pConfig->nCol]; memset(aTotalSize, 0, sizeof(i64) * pConfig->nCol); - /* Generate the expected index checksum based on the contents of the - ** %_content table. This block stores the checksum in ctx.cksum. */ - rc = fts5StorageGetStmt(p, FTS5_STMT_SCAN, &pScan, 0); - if( rc==SQLITE_OK ){ - int rc2; - while( SQLITE_ROW==sqlite3_step(pScan) ){ - int i; - ctx.iRowid = sqlite3_column_int64(pScan, 0); - ctx.szCol = 0; - if( pConfig->bColumnsize ){ - rc = sqlite3Fts5StorageDocsize(p, ctx.iRowid, aColSize); - } - if( rc==SQLITE_OK && pConfig->eDetail==FTS5_DETAIL_NONE ){ - rc = sqlite3Fts5TermsetNew(&ctx.pTermset); - } - for(i=0; rc==SQLITE_OK && inCol; i++){ - if( pConfig->abUnindexed[i] ) continue; - ctx.iCol = i; + bUseCksum = (pConfig->eContent==FTS5_CONTENT_NORMAL + || (pConfig->eContent==FTS5_CONTENT_EXTERNAL && iArg) + ); + if( bUseCksum ){ + /* Generate the expected index checksum based on the contents of the + ** %_content table. This block stores the checksum in ctx.cksum. */ + rc = fts5StorageGetStmt(p, FTS5_STMT_SCAN, &pScan, 0); + if( rc==SQLITE_OK ){ + int rc2; + while( SQLITE_ROW==sqlite3_step(pScan) ){ + int i; + ctx.iRowid = sqlite3_column_int64(pScan, 0); ctx.szCol = 0; - if( pConfig->eDetail==FTS5_DETAIL_COLUMNS ){ - rc = sqlite3Fts5TermsetNew(&ctx.pTermset); - } - if( rc==SQLITE_OK ){ - const char *zText = (const char*)sqlite3_column_text(pScan, i+1); - int nText = sqlite3_column_bytes(pScan, i+1); - rc = sqlite3Fts5Tokenize(pConfig, - FTS5_TOKENIZE_DOCUMENT, - zText, nText, - (void*)&ctx, - fts5StorageIntegrityCallback - ); + if( pConfig->bColumnsize ){ + rc = sqlite3Fts5StorageDocsize(p, ctx.iRowid, aColSize); } - if( rc==SQLITE_OK && pConfig->bColumnsize && ctx.szCol!=aColSize[i] ){ - rc = FTS5_CORRUPT; + if( rc==SQLITE_OK && pConfig->eDetail==FTS5_DETAIL_NONE ){ + rc = sqlite3Fts5TermsetNew(&ctx.pTermset); } - aTotalSize[i] += ctx.szCol; - if( pConfig->eDetail==FTS5_DETAIL_COLUMNS ){ - sqlite3Fts5TermsetFree(ctx.pTermset); - ctx.pTermset = 0; + for(i=0; rc==SQLITE_OK && inCol; i++){ + if( pConfig->abUnindexed[i] ) continue; + ctx.iCol = i; + ctx.szCol = 0; + if( pConfig->eDetail==FTS5_DETAIL_COLUMNS ){ + rc = sqlite3Fts5TermsetNew(&ctx.pTermset); + } + if( rc==SQLITE_OK ){ + const char *zText = (const char*)sqlite3_column_text(pScan, i+1); + int nText = sqlite3_column_bytes(pScan, i+1); + rc = sqlite3Fts5Tokenize(pConfig, + FTS5_TOKENIZE_DOCUMENT, + zText, nText, + (void*)&ctx, + fts5StorageIntegrityCallback + ); + } + if( rc==SQLITE_OK && pConfig->bColumnsize && ctx.szCol!=aColSize[i] ){ + rc = FTS5_CORRUPT; + } + aTotalSize[i] += ctx.szCol; + if( pConfig->eDetail==FTS5_DETAIL_COLUMNS ){ + sqlite3Fts5TermsetFree(ctx.pTermset); + ctx.pTermset = 0; + } } - } - sqlite3Fts5TermsetFree(ctx.pTermset); - ctx.pTermset = 0; + sqlite3Fts5TermsetFree(ctx.pTermset); + ctx.pTermset = 0; - if( rc!=SQLITE_OK ) break; + if( rc!=SQLITE_OK ) break; + } + rc2 = sqlite3_reset(pScan); + if( rc==SQLITE_OK ) rc = rc2; } - rc2 = sqlite3_reset(pScan); - if( rc==SQLITE_OK ) rc = rc2; - } - /* Test that the "totals" (sometimes called "averages") record looks Ok */ - if( rc==SQLITE_OK ){ - int i; - rc = fts5StorageLoadTotals(p, 0); - for(i=0; rc==SQLITE_OK && inCol; i++){ - if( p->aTotalSize[i]!=aTotalSize[i] ) rc = FTS5_CORRUPT; + /* Test that the "totals" (sometimes called "averages") record looks Ok */ + if( rc==SQLITE_OK ){ + int i; + rc = fts5StorageLoadTotals(p, 0); + for(i=0; rc==SQLITE_OK && inCol; i++){ + if( p->aTotalSize[i]!=aTotalSize[i] ) rc = FTS5_CORRUPT; + } } - } - /* Check that the %_docsize and %_content tables contain the expected - ** number of rows. */ - if( rc==SQLITE_OK && pConfig->eContent==FTS5_CONTENT_NORMAL ){ - i64 nRow = 0; - rc = fts5StorageCount(p, "content", &nRow); - if( rc==SQLITE_OK && nRow!=p->nTotalRow ) rc = FTS5_CORRUPT; - } - if( rc==SQLITE_OK && pConfig->bColumnsize ){ - i64 nRow = 0; - rc = fts5StorageCount(p, "docsize", &nRow); - if( rc==SQLITE_OK && nRow!=p->nTotalRow ) rc = FTS5_CORRUPT; + /* Check that the %_docsize and %_content tables contain the expected + ** number of rows. */ + if( rc==SQLITE_OK && pConfig->eContent==FTS5_CONTENT_NORMAL ){ + i64 nRow = 0; + rc = fts5StorageCount(p, "content", &nRow); + if( rc==SQLITE_OK && nRow!=p->nTotalRow ) rc = FTS5_CORRUPT; + } + if( rc==SQLITE_OK && pConfig->bColumnsize ){ + i64 nRow = 0; + rc = fts5StorageCount(p, "docsize", &nRow); + if( rc==SQLITE_OK && nRow!=p->nTotalRow ) rc = FTS5_CORRUPT; + } } /* Pass the expected checksum down to the FTS index module. It will ** verify, amongst other things, that it matches the checksum generated by ** inspecting the index itself. */ if( rc==SQLITE_OK ){ - rc = sqlite3Fts5IndexIntegrityCheck(p->pIndex, ctx.cksum); + rc = sqlite3Fts5IndexIntegrityCheck(p->pIndex, ctx.cksum, bUseCksum); } sqlite3_free(aTotalSize); @@ -228285,6 +233156,133 @@ static int fts5PorterTokenize( ); } +/************************************************************************** +** Start of trigram implementation. +*/ +typedef struct TrigramTokenizer TrigramTokenizer; +struct TrigramTokenizer { + int bFold; /* True to fold to lower-case */ +}; + +/* +** Free a trigram tokenizer. +*/ +static void fts5TriDelete(Fts5Tokenizer *p){ + sqlite3_free(p); +} + +/* +** Allocate a trigram tokenizer. +*/ +static int fts5TriCreate( + void *pUnused, + const char **azArg, + int nArg, + Fts5Tokenizer **ppOut +){ + int rc = SQLITE_OK; + TrigramTokenizer *pNew = (TrigramTokenizer*)sqlite3_malloc(sizeof(*pNew)); + UNUSED_PARAM(pUnused); + if( pNew==0 ){ + rc = SQLITE_NOMEM; + }else{ + int i; + pNew->bFold = 1; + for(i=0; rc==SQLITE_OK && ibFold = (zArg[0]=='0'); + } + }else{ + rc = SQLITE_ERROR; + } + } + if( rc!=SQLITE_OK ){ + fts5TriDelete((Fts5Tokenizer*)pNew); + pNew = 0; + } + } + *ppOut = (Fts5Tokenizer*)pNew; + return rc; +} + +/* +** Trigram tokenizer tokenize routine. +*/ +static int fts5TriTokenize( + Fts5Tokenizer *pTok, + void *pCtx, + int unusedFlags, + const char *pText, int nText, + int (*xToken)(void*, int, const char*, int, int, int) +){ + TrigramTokenizer *p = (TrigramTokenizer*)pTok; + int rc = SQLITE_OK; + char aBuf[32]; + const unsigned char *zIn = (const unsigned char*)pText; + const unsigned char *zEof = &zIn[nText]; + u32 iCode; + + UNUSED_PARAM(unusedFlags); + while( 1 ){ + char *zOut = aBuf; + int iStart = zIn - (const unsigned char*)pText; + const unsigned char *zNext; + + READ_UTF8(zIn, zEof, iCode); + if( iCode==0 ) break; + zNext = zIn; + if( zInbFold ) iCode = sqlite3Fts5UnicodeFold(iCode, 0); + WRITE_UTF8(zOut, iCode); + READ_UTF8(zIn, zEof, iCode); + if( iCode==0 ) break; + }else{ + break; + } + if( zInbFold ) iCode = sqlite3Fts5UnicodeFold(iCode, 0); + WRITE_UTF8(zOut, iCode); + READ_UTF8(zIn, zEof, iCode); + if( iCode==0 ) break; + if( p->bFold ) iCode = sqlite3Fts5UnicodeFold(iCode, 0); + WRITE_UTF8(zOut, iCode); + }else{ + break; + } + rc = xToken(pCtx, 0, aBuf, zOut-aBuf, iStart, iStart + zOut-aBuf); + if( rc!=SQLITE_OK ) break; + zIn = zNext; + } + + return rc; +} + +/* +** Argument xCreate is a pointer to a constructor function for a tokenizer. +** pTok is a tokenizer previously created using the same method. This function +** returns one of FTS5_PATTERN_NONE, FTS5_PATTERN_LIKE or FTS5_PATTERN_GLOB +** indicating the style of pattern matching that the tokenizer can support. +** In practice, this is: +** +** "trigram" tokenizer, case_sensitive=1 - FTS5_PATTERN_GLOB +** "trigram" tokenizer, case_sensitive=0 (the default) - FTS5_PATTERN_LIKE +** all other tokenizers - FTS5_PATTERN_NONE +*/ +static int sqlite3Fts5TokenizerPattern( + int (*xCreate)(void*, const char**, int, Fts5Tokenizer**), + Fts5Tokenizer *pTok +){ + if( xCreate==fts5TriCreate ){ + TrigramTokenizer *p = (TrigramTokenizer*)pTok; + return p->bFold ? FTS5_PATTERN_LIKE : FTS5_PATTERN_GLOB; + } + return FTS5_PATTERN_NONE; +} + /* ** Register all built-in tokenizers with FTS5. */ @@ -228296,6 +233294,7 @@ static int sqlite3Fts5TokenizerInit(fts5_api *pApi){ { "unicode61", {fts5UnicodeCreate, fts5UnicodeDelete, fts5UnicodeTokenize}}, { "ascii", {fts5AsciiCreate, fts5AsciiDelete, fts5AsciiTokenize }}, { "porter", {fts5PorterCreate, fts5PorterDelete, fts5PorterTokenize }}, + { "trigram", {fts5TriCreate, fts5TriDelete, fts5TriTokenize}}, }; int rc = SQLITE_OK; /* Return code */ @@ -229088,8 +234087,10 @@ static void sqlite3Fts5UnicodeAscii(u8 *aArray, u8 *aAscii){ } iTbl++; } + aAscii[0] = 0; /* 0x00 is never a token character */ } + /* ** 2015 May 30 ** @@ -230527,9 +235528,9 @@ SQLITE_API int sqlite3_stmt_init( #endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_STMTVTAB) */ /************** End of stmt.c ************************************************/ -#if __LINE__!=230527 +#if __LINE__!=235528 #undef SQLITE_SOURCE_ID -#define SQLITE_SOURCE_ID "2020-08-14 13:23:32 fca8dc8b578f215a969cd899336378966156154710873e68b3d9ac5881b0alt2" +#define SQLITE_SOURCE_ID "2021-06-18 18:36:39 5c9a6c06871cb9fe42814af9c039eb6da5427a6ec28f187af7ebfb62eafaalt2" #endif /* Return the source-id for this library */ SQLITE_API const char *sqlite3_sourceid(void){ return SQLITE_SOURCE_ID; } diff --git a/database/sqlite/sqlite3.h b/database/sqlite/sqlite3.h index d200c7a2b..3274bbe07 100644 --- a/database/sqlite/sqlite3.h +++ b/database/sqlite/sqlite3.h @@ -123,9 +123,9 @@ extern "C" { ** [sqlite3_libversion_number()], [sqlite3_sourceid()], ** [sqlite_version()] and [sqlite_source_id()]. */ -#define SQLITE_VERSION "3.33.0" -#define SQLITE_VERSION_NUMBER 3033000 -#define SQLITE_SOURCE_ID "2020-08-14 13:23:32 fca8dc8b578f215a969cd899336378966156154710873e68b3d9ac5881b0alt1" +#define SQLITE_VERSION "3.36.0" +#define SQLITE_VERSION_NUMBER 3036000 +#define SQLITE_SOURCE_ID "2021-06-18 18:36:39 5c9a6c06871cb9fe42814af9c039eb6da5427a6ec28f187af7ebfb62eafa66e5" /* ** CAPI3REF: Run-Time Library Version Numbers @@ -504,6 +504,7 @@ SQLITE_API int sqlite3_exec( #define SQLITE_IOERR_COMMIT_ATOMIC (SQLITE_IOERR | (30<<8)) #define SQLITE_IOERR_ROLLBACK_ATOMIC (SQLITE_IOERR | (31<<8)) #define SQLITE_IOERR_DATA (SQLITE_IOERR | (32<<8)) +#define SQLITE_IOERR_CORRUPTFS (SQLITE_IOERR | (33<<8)) #define SQLITE_LOCKED_SHAREDCACHE (SQLITE_LOCKED | (1<<8)) #define SQLITE_LOCKED_VTAB (SQLITE_LOCKED | (2<<8)) #define SQLITE_BUSY_RECOVERY (SQLITE_BUSY | (1<<8)) @@ -1127,6 +1128,23 @@ struct sqlite3_io_methods { ** file to the database file, but before the *-shm file is updated to ** record the fact that the pages have been checkpointed. ** +** +**
  • [[SQLITE_FCNTL_EXTERNAL_READER]] +** The EXPERIMENTAL [SQLITE_FCNTL_EXTERNAL_READER] opcode is used to detect +** whether or not there is a database client in another process with a wal-mode +** transaction open on the database or not. It is only available on unix.The +** (void*) argument passed with this file-control should be a pointer to a +** value of type (int). The integer value is set to 1 if the database is a wal +** mode database and there exists at least one client in another process that +** currently has an SQL transaction open on the database. It is set to 0 if +** the database is not a wal-mode db, or if there is no such connection in any +** other process. This opcode cannot be used to detect transactions opened +** by clients within the current process, only within other processes. +** +** +**
  • [[SQLITE_FCNTL_CKSM_FILE]] +** Used by the cksmvfs VFS module only. +** */ #define SQLITE_FCNTL_LOCKSTATE 1 #define SQLITE_FCNTL_GET_LOCKPROXYFILE 2 @@ -1166,6 +1184,8 @@ struct sqlite3_io_methods { #define SQLITE_FCNTL_CKPT_DONE 37 #define SQLITE_FCNTL_RESERVE_BYTES 38 #define SQLITE_FCNTL_CKPT_START 39 +#define SQLITE_FCNTL_EXTERNAL_READER 40 +#define SQLITE_FCNTL_CKSM_FILE 41 /* deprecated names */ #define SQLITE_GET_LOCKPROXYFILE SQLITE_FCNTL_GET_LOCKPROXYFILE @@ -2114,7 +2134,13 @@ struct sqlite3_mem_methods { ** The second parameter is a pointer to an integer into which ** is written 0 or 1 to indicate whether triggers are disabled or enabled ** following this call. The second parameter may be a NULL pointer, in -** which case the trigger setting is not reported back. +** which case the trigger setting is not reported back. +** +**

    Originally this option disabled all triggers. ^(However, since +** SQLite version 3.35.0, TEMP triggers are still allowed even if +** this option is off. So, in other words, this option now only disables +** triggers in the main database schema or in the schemas of ATTACH-ed +** databases.)^ ** ** [[SQLITE_DBCONFIG_ENABLE_VIEW]] **

    SQLITE_DBCONFIG_ENABLE_VIEW
    @@ -2125,7 +2151,13 @@ struct sqlite3_mem_methods { ** The second parameter is a pointer to an integer into which ** is written 0 or 1 to indicate whether views are disabled or enabled ** following this call. The second parameter may be a NULL pointer, in -** which case the view setting is not reported back. +** which case the view setting is not reported back. +** +**

    Originally this option disabled all views. ^(However, since +** SQLite version 3.35.0, TEMP views are still allowed even if +** this option is off. So, in other words, this option now only disables +** views in the main database schema or in the schemas of ATTACH-ed +** databases.)^ ** ** [[SQLITE_DBCONFIG_ENABLE_FTS3_TOKENIZER]] **

    SQLITE_DBCONFIG_ENABLE_FTS3_TOKENIZER
    @@ -3498,6 +3530,7 @@ SQLITE_API void sqlite3_progress_handler(sqlite3*, int, int(*)(void*), void*); ** that uses dot-files in place of posix advisory locking. **
  • file:data.db?mode=readonly ** An error. "readonly" is not a valid option for the "mode" parameter. +** Use "ro" instead: "file:data.db?mode=ro". **
    ** ** ^URI hexadecimal escape sequences (%HH) are supported within the path and @@ -3696,7 +3729,7 @@ SQLITE_API sqlite3_file *sqlite3_database_file_object(const char*); ** If the Y parameter to sqlite3_free_filename(Y) is anything other ** than a NULL pointer or a pointer previously acquired from ** sqlite3_create_filename(), then bad things such as heap -** corruption or segfaults may occur. The value Y should be +** corruption or segfaults may occur. The value Y should not be ** used again after sqlite3_free_filename(Y) has been called. This means ** that if the [sqlite3_vfs.xOpen()] method of a VFS has been called using Y, ** then the corresponding [sqlite3_module.xClose() method should also be @@ -4165,6 +4198,15 @@ SQLITE_API const char *sqlite3_normalized_sql(sqlite3_stmt *pStmt); ** [BEGIN] merely sets internal flags, but the [BEGIN|BEGIN IMMEDIATE] and ** [BEGIN|BEGIN EXCLUSIVE] commands do touch the database and so ** sqlite3_stmt_readonly() returns false for those commands. +** +** ^This routine returns false if there is any possibility that the +** statement might change the database file. ^A false return does +** not guarantee that the statement will change the database file. +** ^For example, an UPDATE statement might have a WHERE clause that +** makes it a no-op, but the sqlite3_stmt_readonly() result would still +** be false. ^Similarly, a CREATE TABLE IF NOT EXISTS statement is a +** read-only no-op if the table already exists, but +** sqlite3_stmt_readonly() still returns false for such a statement. */ SQLITE_API int sqlite3_stmt_readonly(sqlite3_stmt *pStmt); @@ -4334,18 +4376,22 @@ typedef struct sqlite3_context sqlite3_context; ** contain embedded NULs. The result of expressions involving strings ** with embedded NULs is undefined. ** -** ^The fifth argument to the BLOB and string binding interfaces -** is a destructor used to dispose of the BLOB or -** string after SQLite has finished with it. ^The destructor is called -** to dispose of the BLOB or string even if the call to the bind API fails, -** except the destructor is not called if the third parameter is a NULL -** pointer or the fourth parameter is negative. -** ^If the fifth argument is -** the special value [SQLITE_STATIC], then SQLite assumes that the -** information is in static, unmanaged space and does not need to be freed. -** ^If the fifth argument has the value [SQLITE_TRANSIENT], then -** SQLite makes its own private copy of the data immediately, before -** the sqlite3_bind_*() routine returns. +** ^The fifth argument to the BLOB and string binding interfaces controls +** or indicates the lifetime of the object referenced by the third parameter. +** These three options exist: +** ^ (1) A destructor to dispose of the BLOB or string after SQLite has finished +** with it may be passed. ^It is called to dispose of the BLOB or string even +** if the call to the bind API fails, except the destructor is not called if +** the third parameter is a NULL pointer or the fourth parameter is negative. +** ^ (2) The special constant, [SQLITE_STATIC], may be passsed to indicate that +** the application remains responsible for disposing of the object. ^In this +** case, the object and the provided pointer to it must remain valid until +** either the prepared statement is finalized or the same SQL parameter is +** bound to something else, whichever occurs sooner. +** ^ (3) The constant, [SQLITE_TRANSIENT], may be passed to indicate that the +** object is to be copied prior to the return from sqlite3_bind_*(). ^The +** object and pointer to it must remain valid until then. ^SQLite will then +** manage the lifetime of its private copy. ** ** ^The sixth argument to sqlite3_bind_text64() must be one of ** [SQLITE_UTF8], [SQLITE_UTF16], [SQLITE_UTF16BE], or [SQLITE_UTF16LE] @@ -5087,7 +5133,6 @@ SQLITE_API int sqlite3_reset(sqlite3_stmt *pStmt); ** within VIEWs, TRIGGERs, CHECK constraints, generated column expressions, ** index expressions, or the WHERE clause of partial indexes. ** -** ** For best security, the [SQLITE_DIRECTONLY] flag is recommended for ** all application-defined SQL functions that do not need to be ** used inside of triggers, view, CHECK constraints, or other elements of @@ -5097,7 +5142,6 @@ SQLITE_API int sqlite3_reset(sqlite3_stmt *pStmt); ** a database file to include invocations of the function with parameters ** chosen by the attacker, which the application will then execute when ** the database file is opened and read. -** ** ** ^(The fifth parameter is an arbitrary pointer. The implementation of the ** function can gain access to this pointer using [sqlite3_user_data()].)^ @@ -6186,6 +6230,57 @@ SQLITE_API const char *sqlite3_db_filename(sqlite3 *db, const char *zDbName); */ SQLITE_API int sqlite3_db_readonly(sqlite3 *db, const char *zDbName); +/* +** CAPI3REF: Determine the transaction state of a database +** METHOD: sqlite3 +** +** ^The sqlite3_txn_state(D,S) interface returns the current +** [transaction state] of schema S in database connection D. ^If S is NULL, +** then the highest transaction state of any schema on database connection D +** is returned. Transaction states are (in order of lowest to highest): +**
      +**
    1. SQLITE_TXN_NONE +**
    2. SQLITE_TXN_READ +**
    3. SQLITE_TXN_WRITE +**
    +** ^If the S argument to sqlite3_txn_state(D,S) is not the name of +** a valid schema, then -1 is returned. +*/ +SQLITE_API int sqlite3_txn_state(sqlite3*,const char *zSchema); + +/* +** CAPI3REF: Allowed return values from [sqlite3_txn_state()] +** KEYWORDS: {transaction state} +** +** These constants define the current transaction state of a database file. +** ^The [sqlite3_txn_state(D,S)] interface returns one of these +** constants in order to describe the transaction state of schema S +** in [database connection] D. +** +**
    +** [[SQLITE_TXN_NONE]]
    SQLITE_TXN_NONE
    +**
    The SQLITE_TXN_NONE state means that no transaction is currently +** pending.
    +** +** [[SQLITE_TXN_READ]]
    SQLITE_TXN_READ
    +**
    The SQLITE_TXN_READ state means that the database is currently +** in a read transaction. Content has been read from the database file +** but nothing in the database file has changed. The transaction state +** will advanced to SQLITE_TXN_WRITE if any changes occur and there are +** no other conflicting concurrent write transactions. The transaction +** state will revert to SQLITE_TXN_NONE following a [ROLLBACK] or +** [COMMIT].
    +** +** [[SQLITE_TXN_WRITE]]
    SQLITE_TXN_WRITE
    +**
    The SQLITE_TXN_WRITE state means that the database is currently +** in a write transaction. Content has been written to the database file +** but has not yet committed. The transaction state will change to +** to SQLITE_TXN_NONE at the next [ROLLBACK] or [COMMIT].
    +*/ +#define SQLITE_TXN_NONE 0 +#define SQLITE_TXN_READ 1 +#define SQLITE_TXN_WRITE 2 + /* ** CAPI3REF: Find the next prepared statement ** METHOD: sqlite3 @@ -7712,7 +7807,10 @@ SQLITE_API int sqlite3_test_control(int op, ...); #define SQLITE_TESTCTRL_RESULT_INTREAL 27 #define SQLITE_TESTCTRL_PRNG_SEED 28 #define SQLITE_TESTCTRL_EXTRA_SCHEMA_CHECKS 29 -#define SQLITE_TESTCTRL_LAST 29 /* Largest TESTCTRL */ +#define SQLITE_TESTCTRL_SEEK_COUNT 30 +#define SQLITE_TESTCTRL_TRACEFLAGS 31 +#define SQLITE_TESTCTRL_TUNE 32 +#define SQLITE_TESTCTRL_LAST 32 /* Largest TESTCTRL */ /* ** CAPI3REF: SQL Keyword Checking @@ -9192,10 +9290,11 @@ SQLITE_API int sqlite3_vtab_on_conflict(sqlite3 *); ** CAPI3REF: Determine If Virtual Table Column Access Is For UPDATE ** ** If the sqlite3_vtab_nochange(X) routine is called within the [xColumn] -** method of a [virtual table], then it returns true if and only if the +** method of a [virtual table], then it might return true if the ** column is being fetched as part of an UPDATE operation during which the -** column value will not change. Applications might use this to substitute -** a return value that is less expensive to compute and that the corresponding +** column value will not change. The virtual table implementation can use +** this hint as permission to substitute a return value that is less +** expensive to compute and that the corresponding ** [xUpdate] method understands as a "no-change" value. ** ** If the [xColumn] method calls sqlite3_vtab_nochange() and finds that @@ -9204,6 +9303,12 @@ SQLITE_API int sqlite3_vtab_on_conflict(sqlite3 *); ** any of the [sqlite3_result_int|sqlite3_result_xxxxx() interfaces]. ** In that case, [sqlite3_value_nochange(X)] will return true for the ** same column in the [xUpdate] method. +** +** The sqlite3_vtab_nochange() routine is an optimization. Virtual table +** implementations should continue to give a correct answer even if the +** sqlite3_vtab_nochange() interface were to always return false. In the +** current implementation, the sqlite3_vtab_nochange() interface does always +** returns false for the enhanced [UPDATE FROM] statement. */ SQLITE_API int sqlite3_vtab_nochange(sqlite3_context*); @@ -9345,6 +9450,7 @@ SQLITE_API void sqlite3_stmt_scanstatus_reset(sqlite3_stmt*); /* ** CAPI3REF: Flush caches to disk mid-transaction +** METHOD: sqlite3 ** ** ^If a write-transaction is open on [database connection] D when the ** [sqlite3_db_cacheflush(D)] interface invoked, any dirty @@ -9377,6 +9483,7 @@ SQLITE_API int sqlite3_db_cacheflush(sqlite3*); /* ** CAPI3REF: The pre-update hook. +** METHOD: sqlite3 ** ** ^These interfaces are only available if SQLite is compiled using the ** [SQLITE_ENABLE_PREUPDATE_HOOK] compile-time option. @@ -9417,7 +9524,7 @@ SQLITE_API int sqlite3_db_cacheflush(sqlite3*); ** seventh parameter is the final rowid value of the row being inserted ** or updated. The value of the seventh parameter passed to the callback ** function is not defined for operations on WITHOUT ROWID tables, or for -** INSERT operations on rowid tables. +** DELETE operations on rowid tables. ** ** The [sqlite3_preupdate_old()], [sqlite3_preupdate_new()], ** [sqlite3_preupdate_count()], and [sqlite3_preupdate_depth()] interfaces @@ -9455,6 +9562,15 @@ SQLITE_API int sqlite3_db_cacheflush(sqlite3*); ** triggers; or 2 for changes resulting from triggers called by top-level ** triggers; and so forth. ** +** When the [sqlite3_blob_write()] API is used to update a blob column, +** the pre-update hook is invoked with SQLITE_DELETE. This is because the +** in this case the new values are not available. In this case, when a +** callback made with op==SQLITE_DELETE is actuall a write using the +** sqlite3_blob_write() API, the [sqlite3_preupdate_blobwrite()] returns +** the index of the column being written. In other cases, where the +** pre-update hook is being invoked for some other reason, including a +** regular DELETE, sqlite3_preupdate_blobwrite() returns -1. +** ** See also: [sqlite3_update_hook()] */ #if defined(SQLITE_ENABLE_PREUPDATE_HOOK) @@ -9475,10 +9591,12 @@ SQLITE_API int sqlite3_preupdate_old(sqlite3 *, int, sqlite3_value **); SQLITE_API int sqlite3_preupdate_count(sqlite3 *); SQLITE_API int sqlite3_preupdate_depth(sqlite3 *); SQLITE_API int sqlite3_preupdate_new(sqlite3 *, int, sqlite3_value **); +SQLITE_API int sqlite3_preupdate_blobwrite(sqlite3 *); #endif /* ** CAPI3REF: Low-level system error code +** METHOD: sqlite3 ** ** ^Attempt to return the underlying operating system error code or error ** number that caused the most recent I/O error or failure to open a file. @@ -9712,8 +9830,8 @@ SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_recover(sqlite3 *db, const c ** SQLITE_SERIALIZE_NOCOPY bit is omitted from argument F if a memory ** allocation error occurs. ** -** This interface is only available if SQLite is compiled with the -** [SQLITE_ENABLE_DESERIALIZE] option. +** This interface is omitted if SQLite is compiled with the +** [SQLITE_OMIT_DESERIALIZE] option. */ SQLITE_API unsigned char *sqlite3_serialize( sqlite3 *db, /* The database connection */ @@ -9764,8 +9882,8 @@ SQLITE_API unsigned char *sqlite3_serialize( ** SQLITE_DESERIALIZE_FREEONCLOSE bit is set in argument F, then ** [sqlite3_free()] is invoked on argument P prior to returning. ** -** This interface is only available if SQLite is compiled with the -** [SQLITE_ENABLE_DESERIALIZE] option. +** This interface is omitted if SQLite is compiled with the +** [SQLITE_OMIT_DESERIALIZE] option. */ SQLITE_API int sqlite3_deserialize( sqlite3 *db, /* The database connection */ @@ -10014,6 +10132,38 @@ SQLITE_API int sqlite3session_create( */ SQLITE_API void sqlite3session_delete(sqlite3_session *pSession); +/* +** CAPIREF: Conigure a Session Object +** METHOD: sqlite3_session +** +** This method is used to configure a session object after it has been +** created. At present the only valid value for the second parameter is +** [SQLITE_SESSION_OBJCONFIG_SIZE]. +** +** Arguments for sqlite3session_object_config() +** +** The following values may passed as the the 4th parameter to +** sqlite3session_object_config(). +** +**
    SQLITE_SESSION_OBJCONFIG_SIZE
    +** This option is used to set, clear or query the flag that enables +** the [sqlite3session_changeset_size()] API. Because it imposes some +** computational overhead, this API is disabled by default. Argument +** pArg must point to a value of type (int). If the value is initially +** 0, then the sqlite3session_changeset_size() API is disabled. If it +** is greater than 0, then the same API is enabled. Or, if the initial +** value is less than zero, no change is made. In all cases the (int) +** variable is set to 1 if the sqlite3session_changeset_size() API is +** enabled following the current call, or 0 otherwise. +** +** It is an error (SQLITE_MISUSE) to attempt to modify this setting after +** the first table has been attached to the session object. +*/ +SQLITE_API int sqlite3session_object_config(sqlite3_session*, int op, void *pArg); + +/* +*/ +#define SQLITE_SESSION_OBJCONFIG_SIZE 1 /* ** CAPI3REF: Enable Or Disable A Session Object @@ -10258,6 +10408,22 @@ SQLITE_API int sqlite3session_changeset( void **ppChangeset /* OUT: Buffer containing changeset */ ); +/* +** CAPI3REF: Return An Upper-limit For The Size Of The Changeset +** METHOD: sqlite3_session +** +** By default, this function always returns 0. For it to return +** a useful result, the sqlite3_session object must have been configured +** to enable this API using sqlite3session_object_config() with the +** SQLITE_SESSION_OBJCONFIG_SIZE verb. +** +** When enabled, this function returns an upper limit, in bytes, for the size +** of the changeset that might be produced if sqlite3session_changeset() were +** called. The final changeset size might be equal to or smaller than the +** size in bytes returned by this function. +*/ +SQLITE_API sqlite3_int64 sqlite3session_changeset_size(sqlite3_session *pSession); + /* ** CAPI3REF: Load The Difference Between Tables Into A Session ** METHOD: sqlite3_session @@ -10375,6 +10541,14 @@ SQLITE_API int sqlite3session_patchset( */ SQLITE_API int sqlite3session_isempty(sqlite3_session *pSession); +/* +** CAPI3REF: Query for the amount of heap memory used by a session object. +** +** This API returns the total amount of heap memory in bytes currently +** used by the session object passed as the only argument. +*/ +SQLITE_API sqlite3_int64 sqlite3session_memory_used(sqlite3_session *pSession); + /* ** CAPI3REF: Create An Iterator To Traverse A Changeset ** CONSTRUCTOR: sqlite3_changeset_iter @@ -10477,18 +10651,23 @@ SQLITE_API int sqlite3changeset_next(sqlite3_changeset_iter *pIter); ** call to [sqlite3changeset_next()] must have returned [SQLITE_ROW]. If this ** is not the case, this function returns [SQLITE_MISUSE]. ** -** If argument pzTab is not NULL, then *pzTab is set to point to a -** nul-terminated utf-8 encoded string containing the name of the table -** affected by the current change. The buffer remains valid until either -** sqlite3changeset_next() is called on the iterator or until the -** conflict-handler function returns. If pnCol is not NULL, then *pnCol is -** set to the number of columns in the table affected by the change. If -** pbIndirect is not NULL, then *pbIndirect is set to true (1) if the change +** Arguments pOp, pnCol and pzTab may not be NULL. Upon return, three +** outputs are set through these pointers: +** +** *pOp is set to one of [SQLITE_INSERT], [SQLITE_DELETE] or [SQLITE_UPDATE], +** depending on the type of change that the iterator currently points to; +** +** *pnCol is set to the number of columns in the table affected by the change; and +** +** *pzTab is set to point to a nul-terminated utf-8 encoded string containing +** the name of the table affected by the current change. The buffer remains +** valid until either sqlite3changeset_next() is called on the iterator +** or until the conflict-handler function returns. +** +** If pbIndirect is not NULL, then *pbIndirect is set to true (1) if the change ** is an indirect change, or false (0) otherwise. See the documentation for ** [sqlite3session_indirect()] for a description of direct and indirect -** changes. Finally, if pOp is not NULL, then *pOp is set to one of -** [SQLITE_INSERT], [SQLITE_DELETE] or [SQLITE_UPDATE], depending on the -** type of change that the iterator currently points to. +** changes. ** ** If no error occurs, SQLITE_OK is returned. If an error does occur, an ** SQLite error code is returned. The values of the output variables may not diff --git a/database/sqlite/sqlite_aclk.c b/database/sqlite/sqlite_aclk.c new file mode 100644 index 000000000..6803092f2 --- /dev/null +++ b/database/sqlite/sqlite_aclk.c @@ -0,0 +1,820 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "sqlite_functions.h" +#include "sqlite_aclk.h" + +#include "sqlite_aclk_chart.h" +#include "sqlite_aclk_node.h" + +#ifdef ENABLE_NEW_CLOUD_PROTOCOL +#include "../../aclk/aclk.h" +#endif + +const char *aclk_sync_config[] = { + "CREATE TABLE IF NOT EXISTS dimension_delete (dimension_id blob, dimension_name text, chart_type_id text, " + "dim_id blob, chart_id blob, host_id blob, date_created);", + + "CREATE INDEX IF NOT EXISTS ind_h1 ON dimension_delete (host_id);", + + "CREATE TRIGGER IF NOT EXISTS tr_dim_del AFTER DELETE ON dimension BEGIN INSERT INTO dimension_delete " + "(dimension_id, dimension_name, chart_type_id, dim_id, chart_id, host_id, date_created)" + " select old.id, old.name, c.type||\".\"||c.id, old.dim_id, old.chart_id, c.host_id, strftime('%s') FROM" + " chart c WHERE c.chart_id = old.chart_id; END;", + + "DELETE FROM dimension_delete WHERE host_id NOT IN" + " (SELECT host_id FROM host) OR strftime('%s') - date_created > 604800;", + + NULL, +}; + +uv_mutex_t aclk_async_lock; +struct aclk_database_worker_config *aclk_thread_head = NULL; + +int claimed() +{ + int rc; + rrdhost_aclk_state_lock(localhost); + rc = (localhost->aclk_state.claimed_id != NULL); + rrdhost_aclk_state_unlock(localhost); + return rc; +} + +void aclk_add_worker_thread(struct aclk_database_worker_config *wc) +{ + if (unlikely(!wc)) + return; + + uv_mutex_lock(&aclk_async_lock); + if (unlikely(!wc->host)) { + wc->next = aclk_thread_head; + aclk_thread_head = wc; + } + uv_mutex_unlock(&aclk_async_lock); + return; +} + +void aclk_del_worker_thread(struct aclk_database_worker_config *wc) +{ + if (unlikely(!wc)) + return; + + uv_mutex_lock(&aclk_async_lock); + struct aclk_database_worker_config **tmp = &aclk_thread_head; + while (*tmp && (*tmp) != wc) + tmp = &(*tmp)->next; + if (*tmp) + *tmp = wc->next; + uv_mutex_unlock(&aclk_async_lock); + return; +} + +int aclk_worker_thread_exists(char *guid) +{ + int rc = 0; + uv_mutex_lock(&aclk_async_lock); + + struct aclk_database_worker_config *tmp = aclk_thread_head; + + while (tmp && !rc) { + rc = strcmp(tmp->uuid_str, guid) == 0; + tmp = tmp->next; + } + uv_mutex_unlock(&aclk_async_lock); + return rc; +} + +void aclk_database_init_cmd_queue(struct aclk_database_worker_config *wc) +{ + wc->cmd_queue.head = wc->cmd_queue.tail = 0; + wc->queue_size = 0; + fatal_assert(0 == uv_cond_init(&wc->cmd_cond)); + fatal_assert(0 == uv_mutex_init(&wc->cmd_mutex)); +} + +int aclk_database_enq_cmd_noblock(struct aclk_database_worker_config *wc, struct aclk_database_cmd *cmd) +{ + unsigned queue_size; + + /* wait for free space in queue */ + uv_mutex_lock(&wc->cmd_mutex); + if ((queue_size = wc->queue_size) == ACLK_DATABASE_CMD_Q_MAX_SIZE || wc->is_shutting_down) { + uv_mutex_unlock(&wc->cmd_mutex); + return 1; + } + + fatal_assert(queue_size < ACLK_DATABASE_CMD_Q_MAX_SIZE); + /* enqueue command */ + wc->cmd_queue.cmd_array[wc->cmd_queue.tail] = *cmd; + wc->cmd_queue.tail = wc->cmd_queue.tail != ACLK_DATABASE_CMD_Q_MAX_SIZE - 1 ? + wc->cmd_queue.tail + 1 : 0; + wc->queue_size = queue_size + 1; + uv_mutex_unlock(&wc->cmd_mutex); + return 0; +} + +void aclk_database_enq_cmd(struct aclk_database_worker_config *wc, struct aclk_database_cmd *cmd) +{ + unsigned queue_size; + + /* wait for free space in queue */ + uv_mutex_lock(&wc->cmd_mutex); + if (wc->is_shutting_down) { + uv_mutex_unlock(&wc->cmd_mutex); + return; + } + while ((queue_size = wc->queue_size) == ACLK_DATABASE_CMD_Q_MAX_SIZE) { + uv_cond_wait(&wc->cmd_cond, &wc->cmd_mutex); + } + fatal_assert(queue_size < ACLK_DATABASE_CMD_Q_MAX_SIZE); + /* enqueue command */ + wc->cmd_queue.cmd_array[wc->cmd_queue.tail] = *cmd; + wc->cmd_queue.tail = wc->cmd_queue.tail != ACLK_DATABASE_CMD_Q_MAX_SIZE - 1 ? + wc->cmd_queue.tail + 1 : 0; + wc->queue_size = queue_size + 1; + uv_mutex_unlock(&wc->cmd_mutex); + + /* wake up event loop */ + int rc = uv_async_send(&wc->async); + if (unlikely(rc)) + debug(D_ACLK_SYNC, "Failed to wake up event loop"); +} + +struct aclk_database_cmd aclk_database_deq_cmd(struct aclk_database_worker_config* wc) +{ + struct aclk_database_cmd ret; + unsigned queue_size; + + uv_mutex_lock(&wc->cmd_mutex); + queue_size = wc->queue_size; + if (queue_size == 0 || wc->is_shutting_down) { + memset(&ret, 0, sizeof(ret)); + ret.opcode = ACLK_DATABASE_NOOP; + ret.completion = NULL; + if (wc->is_shutting_down) + uv_cond_signal(&wc->cmd_cond); + } else { + /* dequeue command */ + ret = wc->cmd_queue.cmd_array[wc->cmd_queue.head]; + if (queue_size == 1) { + wc->cmd_queue.head = wc->cmd_queue.tail = 0; + } else { + wc->cmd_queue.head = wc->cmd_queue.head != ACLK_DATABASE_CMD_Q_MAX_SIZE - 1 ? + wc->cmd_queue.head + 1 : 0; + } + wc->queue_size = queue_size - 1; + /* wake up producers */ + uv_cond_signal(&wc->cmd_cond); + } + uv_mutex_unlock(&wc->cmd_mutex); + + return ret; +} + +int aclk_worker_enq_cmd(char *node_id, struct aclk_database_cmd *cmd) +{ + if (unlikely(!node_id || !cmd)) + return 0; + + uv_mutex_lock(&aclk_async_lock); + struct aclk_database_worker_config *wc = aclk_thread_head; + + while (wc) { + if (!strcmp(wc->node_id, node_id)) + break; + wc = wc->next; + } + uv_mutex_unlock(&aclk_async_lock); + if (wc) + aclk_database_enq_cmd(wc, cmd); + return (wc == NULL); +} + +void aclk_sync_exit_all() +{ + rrd_wrlock(); + RRDHOST *host = localhost; + while(host) { + struct aclk_database_worker_config *wc = host->dbsync_worker; + if (wc) { + wc->is_shutting_down = 1; + (void) aclk_database_deq_cmd(wc); + uv_cond_signal(&wc->cmd_cond); + } + host = host->next; + } + rrd_unlock(); + + uv_mutex_lock(&aclk_async_lock); + struct aclk_database_worker_config *wc = aclk_thread_head; + while (wc) { + wc->is_shutting_down = 1; + wc = wc->next; + } + uv_mutex_unlock(&aclk_async_lock); +} + +int aclk_start_sync_thread(void *data, int argc, char **argv, char **column) +{ + char uuid_str[GUID_LEN + 1]; + UNUSED(data); + UNUSED(argc); + UNUSED(column); + + uuid_unparse_lower(*((uuid_t *) argv[0]), uuid_str); + + if (rrdhost_find_by_guid(uuid_str, 0) == localhost) + return 0; + + sql_create_aclk_table(NULL, (uuid_t *) argv[0], (uuid_t *) argv[1]); + return 0; +} + +void sql_aclk_sync_init(void) +{ +#ifdef ENABLE_NEW_CLOUD_PROTOCOL + char *err_msg = NULL; + int rc; + + if (unlikely(!db_meta)) { + if (default_rrd_memory_mode != RRD_MEMORY_MODE_DBENGINE) { + return; + } + error_report("Database has not been initialized"); + return; + } + + info("SQLite aclk sync initialization"); + + for (int i = 0; aclk_sync_config[i]; i++) { + debug(D_ACLK_SYNC, "Executing %s", aclk_sync_config[i]); + rc = sqlite3_exec(db_meta, aclk_sync_config[i], 0, 0, &err_msg); + if (rc != SQLITE_OK) { + error_report("SQLite error aclk sync initialization setup, rc = %d (%s)", rc, err_msg); + error_report("SQLite failed statement %s", aclk_sync_config[i]); + sqlite3_free(err_msg); + return; + } + } + info("SQLite aclk sync initialization completed"); + fatal_assert(0 == uv_mutex_init(&aclk_async_lock)); + + rc = sqlite3_exec(db_meta, "SELECT ni.host_id, ni.node_id FROM host h, node_instance ni WHERE " + "h.host_id = ni.host_id AND ni.node_id IS NOT NULL;", aclk_start_sync_thread, NULL, NULL); +#endif + return; +} + +static void async_cb(uv_async_t *handle) +{ + uv_stop(handle->loop); + uv_update_time(handle->loop); + debug(D_ACLK_SYNC, "%s called, active=%d.", __func__, uv_is_active((uv_handle_t *)handle)); +} + +#define TIMER_PERIOD_MS (1000) + +static void timer_cb(uv_timer_t* handle) +{ + uv_stop(handle->loop); + uv_update_time(handle->loop); + +#ifdef ENABLE_NEW_CLOUD_PROTOCOL + struct aclk_database_worker_config *wc = handle->data; + struct aclk_database_cmd cmd; + memset(&cmd, 0, sizeof(cmd)); + cmd.opcode = ACLK_DATABASE_TIMER; + aclk_database_enq_cmd_noblock(wc, &cmd); + + time_t now = now_realtime_sec(); + + if (wc->cleanup_after && wc->cleanup_after < now) { + cmd.opcode = ACLK_DATABASE_CLEANUP; + if (!aclk_database_enq_cmd_noblock(wc, &cmd)) + wc->cleanup_after += ACLK_DATABASE_CLEANUP_INTERVAL; + } + + if (aclk_use_new_cloud_arch && aclk_connected) { + if (wc->rotation_after && wc->rotation_after < now) { + cmd.opcode = ACLK_DATABASE_NODE_INFO; + aclk_database_enq_cmd_noblock(wc, &cmd); + + cmd.opcode = ACLK_DATABASE_UPD_RETENTION; + if (!aclk_database_enq_cmd_noblock(wc, &cmd)) + wc->rotation_after += ACLK_DATABASE_ROTATION_INTERVAL; + } + + if (wc->chart_updates && !wc->chart_pending && wc->chart_payload_count) { + cmd.opcode = ACLK_DATABASE_PUSH_CHART; + cmd.count = ACLK_MAX_CHART_BATCH; + cmd.param1 = ACLK_MAX_CHART_BATCH_COUNT; + if (!aclk_database_enq_cmd_noblock(wc, &cmd)) { + if (wc->retry_count) + info("Queued chart/dimension payload command %s, retry count = %u", wc->host_guid, wc->retry_count); + wc->chart_pending = 1; + wc->retry_count = 0; + } else { + wc->retry_count++; + if (wc->retry_count % 100 == 0) + error_report("Failed to queue chart/dimension payload command %s, retry count = %u", + wc->host_guid, + wc->retry_count); + } + } + + if (wc->alert_updates) { + cmd.opcode = ACLK_DATABASE_PUSH_ALERT; + cmd.count = ACLK_MAX_ALERT_UPDATES; + aclk_database_enq_cmd_noblock(wc, &cmd); + } + } +#endif +} + +#define MAX_CMD_BATCH_SIZE (256) + +void aclk_database_worker(void *arg) +{ + struct aclk_database_worker_config *wc = arg; + uv_loop_t *loop; + int ret; + enum aclk_database_opcode opcode; + uv_timer_t timer_req; + struct aclk_database_cmd cmd; + unsigned cmd_batch_size; + + //aclk_database_init_cmd_queue(wc); + + char threadname[NETDATA_THREAD_NAME_MAX+1]; + if (wc->host) + snprintfz(threadname, NETDATA_THREAD_NAME_MAX, "AS_%s", wc->host->hostname); + else { + snprintfz(threadname, NETDATA_THREAD_NAME_MAX, "AS_%s", wc->uuid_str); + threadname[11] = '\0'; + } + uv_thread_set_name_np(wc->thread, threadname); + + loop = wc->loop = mallocz(sizeof(uv_loop_t)); + ret = uv_loop_init(loop); + if (ret) { + error("uv_loop_init(): %s", uv_strerror(ret)); + goto error_after_loop_init; + } + loop->data = wc; + + ret = uv_async_init(wc->loop, &wc->async, async_cb); + if (ret) { + error("uv_async_init(): %s", uv_strerror(ret)); + goto error_after_async_init; + } + wc->async.data = wc; + + ret = uv_timer_init(loop, &timer_req); + if (ret) { + error("uv_timer_init(): %s", uv_strerror(ret)); + goto error_after_timer_init; + } + timer_req.data = wc; + fatal_assert(0 == uv_timer_start(&timer_req, timer_cb, TIMER_PERIOD_MS, TIMER_PERIOD_MS)); + +// wc->retry_count = 0; + wc->node_info_send = (wc->host && !localhost); +// aclk_add_worker_thread(wc); + info("Starting ACLK sync thread for host %s -- scratch area %lu bytes", wc->host_guid, sizeof(*wc)); + + memset(&cmd, 0, sizeof(cmd)); +#ifdef ENABLE_NEW_CLOUD_PROTOCOL + sql_get_last_chart_sequence(wc); + wc->chart_payload_count = sql_get_pending_count(wc); + if (!wc->chart_payload_count) + info("%s: No pending charts and dimensions detected during startup", wc->host_guid); +#endif + + wc->startup_time = now_realtime_sec(); + wc->cleanup_after = wc->startup_time + ACLK_DATABASE_CLEANUP_FIRST; + wc->rotation_after = wc->startup_time + ACLK_DATABASE_ROTATION_DELAY; + + debug(D_ACLK_SYNC,"Node %s reports pending message count = %u", wc->node_id, wc->chart_payload_count); + while (likely(!netdata_exit)) { + uv_run(loop, UV_RUN_DEFAULT); + + /* wait for commands */ + cmd_batch_size = 0; + do { + if (unlikely(cmd_batch_size >= MAX_CMD_BATCH_SIZE)) + break; + cmd = aclk_database_deq_cmd(wc); + + if (netdata_exit) + break; + + opcode = cmd.opcode; + ++cmd_batch_size; + switch (opcode) { + case ACLK_DATABASE_NOOP: + /* the command queue was empty, do nothing */ + break; + +// MAINTENANCE + case ACLK_DATABASE_CLEANUP: + debug(D_ACLK_SYNC, "Database cleanup for %s", wc->host_guid); + sql_maint_aclk_sync_database(wc, cmd); + if (wc->host == localhost) + sql_check_aclk_table_list(wc); + break; + case ACLK_DATABASE_DELETE_HOST: + debug(D_ACLK_SYNC,"Cleaning ACLK tables for %s", (char *) cmd.data); + sql_delete_aclk_table_list(wc, cmd); + break; + +// CHART / DIMENSION OPERATIONS +#ifdef ENABLE_NEW_CLOUD_PROTOCOL + case ACLK_DATABASE_ADD_CHART: + debug(D_ACLK_SYNC, "Adding chart event for %s", wc->host_guid); + aclk_add_chart_event(wc, cmd); + break; + case ACLK_DATABASE_ADD_DIMENSION: + debug(D_ACLK_SYNC, "Adding dimension event for %s", wc->host_guid); + aclk_add_dimension_event(wc, cmd); + break; + case ACLK_DATABASE_PUSH_CHART: + debug(D_ACLK_SYNC, "Pushing chart info to the cloud for node %s", wc->host_guid); + aclk_send_chart_event(wc, cmd); + break; + case ACLK_DATABASE_PUSH_CHART_CONFIG: + debug(D_ACLK_SYNC, "Pushing chart config info to the cloud for node %s", wc->host_guid); + aclk_send_chart_config(wc, cmd); + break; + case ACLK_DATABASE_CHART_ACK: + debug(D_ACLK_SYNC, "ACK chart SEQ for %s to %"PRIu64, wc->uuid_str, (uint64_t) cmd.param1); + aclk_receive_chart_ack(wc, cmd); + break; + case ACLK_DATABASE_RESET_CHART: + debug(D_ACLK_SYNC, "RESET chart SEQ for %s to %"PRIu64, wc->uuid_str, (uint64_t) cmd.param1); + aclk_receive_chart_reset(wc, cmd); + break; +#endif +// ALERTS + case ACLK_DATABASE_PUSH_ALERT_CONFIG: + debug(D_ACLK_SYNC,"Pushing chart config info to the cloud for %s", wc->host_guid); + aclk_push_alert_config_event(wc, cmd); + break; + case ACLK_DATABASE_PUSH_ALERT: + debug(D_ACLK_SYNC, "Pushing alert info to the cloud for %s", wc->host_guid); + aclk_push_alert_event(wc, cmd); + break; + case ACLK_DATABASE_ALARM_HEALTH_LOG: + debug(D_ACLK_SYNC, "Pushing alarm health log to the cloud for %s", wc->host_guid); + aclk_push_alarm_health_log(wc, cmd); + break; + case ACLK_DATABASE_PUSH_ALERT_SNAPSHOT: + debug(D_ACLK_SYNC, "Pushing alert snapshot to the cloud for node %s", wc->host_guid); + aclk_push_alert_snapshot_event(wc, cmd); + break; + case ACLK_DATABASE_QUEUE_REMOVED_ALERTS: + debug(D_ACLK_SYNC, "Queueing removed alerts for node %s", wc->host_guid); + sql_process_queue_removed_alerts_to_aclk(wc, cmd); + break; + +// NODE OPERATIONS + case ACLK_DATABASE_NODE_INFO: + debug(D_ACLK_SYNC,"Sending node info for %s", wc->uuid_str); + sql_build_node_info(wc, cmd); + break; +#ifdef ENABLE_NEW_CLOUD_PROTOCOL + case ACLK_DATABASE_DIM_DELETION: + debug(D_ACLK_SYNC,"Sending dimension deletion information %s", wc->uuid_str); + aclk_process_dimension_deletion(wc, cmd); + break; + case ACLK_DATABASE_UPD_RETENTION: + debug(D_ACLK_SYNC,"Sending retention info for %s", wc->uuid_str); + aclk_update_retention(wc, cmd); + aclk_process_dimension_deletion(wc, cmd); + break; +#endif + +// NODE_INSTANCE DETECTION + case ACLK_DATABASE_TIMER: + if (unlikely(localhost && !wc->host)) { + if (claimed()) { + wc->host = rrdhost_find_by_guid(wc->host_guid, 0); + if (wc->host) { + info("HOST %s (%s) detected as active", wc->host->hostname, wc->host_guid); + snprintfz(threadname, NETDATA_THREAD_NAME_MAX, "AS_%s", wc->host->hostname); + uv_thread_set_name_np(wc->thread, threadname); + wc->host->dbsync_worker = wc; + aclk_del_worker_thread(wc); + wc->node_info_send = 1; + } + } + } + if (wc->node_info_send && wc->host && localhost && claimed() && aclk_connected) { + cmd.opcode = ACLK_DATABASE_NODE_INFO; + cmd.completion = NULL; + wc->node_info_send = aclk_database_enq_cmd_noblock(wc, &cmd); + } + break; + default: + debug(D_ACLK_SYNC, "%s: default.", __func__); + break; + } + if (cmd.completion) + aclk_complete(cmd.completion); + } while (opcode != ACLK_DATABASE_NOOP); + } + + if (!uv_timer_stop(&timer_req)) + uv_close((uv_handle_t *)&timer_req, NULL); + + /* cleanup operations of the event loop */ + //info("Shutting down ACLK sync event loop for %s", wc->host_guid); + + /* + * uv_async_send after uv_close does not seem to crash in linux at the moment, + * it is however undocumented behaviour we need to be aware if this becomes + * an issue in the future. + */ + uv_close((uv_handle_t *)&wc->async, NULL); + uv_run(loop, UV_RUN_DEFAULT); + + info("Shutting down ACLK sync event loop complete for host %s", wc->host_guid); + /* TODO: don't let the API block by waiting to enqueue commands */ + uv_cond_destroy(&wc->cmd_cond); +/* uv_mutex_destroy(&wc->cmd_mutex); */ + //fatal_assert(0 == uv_loop_close(loop)); + int rc; + + do { + rc = uv_loop_close(loop); + } while (rc != UV_EBUSY); + + freez(loop); + + rrd_wrlock(); + if (likely(wc->host)) + wc->host->dbsync_worker = NULL; + freez(wc); + rrd_unlock(); + return; + +error_after_timer_init: + uv_close((uv_handle_t *)&wc->async, NULL); +error_after_async_init: + fatal_assert(0 == uv_loop_close(loop)); +error_after_loop_init: + freez(loop); +} + +// ------------------------------------------------------------- + +void sql_create_aclk_table(RRDHOST *host, uuid_t *host_uuid, uuid_t *node_id) +{ +#ifdef ENABLE_ACLK + char uuid_str[GUID_LEN + 1]; + char host_guid[GUID_LEN + 1]; + + uuid_unparse_lower_fix(host_uuid, uuid_str); + + if (aclk_worker_thread_exists(uuid_str)) + return; + + uuid_unparse_lower(*host_uuid, host_guid); + + BUFFER *sql = buffer_create(ACLK_SYNC_QUERY_SIZE); + + buffer_sprintf(sql, TABLE_ACLK_CHART, uuid_str); + db_execute(buffer_tostring(sql)); + buffer_flush(sql); + + buffer_sprintf(sql, TABLE_ACLK_CHART_PAYLOAD, uuid_str); + db_execute(buffer_tostring(sql)); + buffer_flush(sql); + + buffer_sprintf(sql, TABLE_ACLK_CHART_LATEST, uuid_str); + db_execute(buffer_tostring(sql)); + buffer_flush(sql); + + buffer_sprintf(sql, INDEX_ACLK_CHART, uuid_str, uuid_str); + db_execute(buffer_tostring(sql)); + buffer_flush(sql); + + buffer_sprintf(sql, INDEX_ACLK_CHART_LATEST, uuid_str, uuid_str); + db_execute(buffer_tostring(sql)); + buffer_flush(sql); + + buffer_sprintf(sql, TRIGGER_ACLK_CHART_PAYLOAD, uuid_str, uuid_str, uuid_str); + db_execute(buffer_tostring(sql)); + buffer_flush(sql); + + buffer_sprintf(sql, TABLE_ACLK_ALERT, uuid_str, uuid_str, uuid_str); + db_execute(buffer_tostring(sql)); + buffer_flush(sql); + + buffer_sprintf(sql, INDEX_ACLK_ALERT, uuid_str, uuid_str); + db_execute(buffer_tostring(sql)); + + buffer_free(sql); + + if (likely(host) && unlikely(host->dbsync_worker)) + return; + + struct aclk_database_worker_config *wc = callocz(1, sizeof(struct aclk_database_worker_config)); + if (likely(host)) + host->dbsync_worker = (void *) wc; + wc->host = host; + strcpy(wc->uuid_str, uuid_str); + strcpy(wc->host_guid, host_guid); + if (node_id && !uuid_is_null(*node_id)) + uuid_unparse_lower(*node_id, wc->node_id); + wc->chart_updates = 0; + wc->alert_updates = 0; + wc->retry_count = 0; + aclk_database_init_cmd_queue(wc); + aclk_add_worker_thread(wc); + fatal_assert(0 == uv_thread_create(&(wc->thread), aclk_database_worker, wc)); +#else + UNUSED(host); + UNUSED(host_uuid); + UNUSED(node_id); +#endif + return; +} + +void sql_maint_aclk_sync_database(struct aclk_database_worker_config *wc, struct aclk_database_cmd cmd) +{ + UNUSED(cmd); + + debug(D_ACLK, "Checking database for %s", wc->host_guid); + + BUFFER *sql = buffer_create(ACLK_SYNC_QUERY_SIZE); + + buffer_sprintf(sql,"DELETE FROM aclk_chart_%s WHERE date_submitted IS NOT NULL AND " + "date_updated < strftime('%%s','now','-%d seconds');", wc->uuid_str, ACLK_DELETE_ACK_INTERNAL); + db_execute(buffer_tostring(sql)); + buffer_flush(sql); + + buffer_sprintf(sql,"DELETE FROM aclk_chart_payload_%s WHERE unique_id NOT IN " + "(SELECT unique_id FROM aclk_chart_%s) AND unique_id NOT IN (SELECT unique_id FROM aclk_chart_latest_%s);", + wc->uuid_str, wc->uuid_str, wc->uuid_str); + db_execute(buffer_tostring(sql)); + buffer_flush(sql); + + buffer_sprintf(sql,"DELETE FROM aclk_alert_%s WHERE date_submitted IS NOT NULL AND " + "date_cloud_ack < strftime('%%s','now','-%d seconds');", wc->uuid_str, ACLK_DELETE_ACK_ALERTS_INTERNAL); + db_execute(buffer_tostring(sql)); + + buffer_free(sql); + return; +} + +#define SQL_SELECT_HOST_BY_UUID "SELECT host_id FROM host WHERE host_id = @host_id;" + +static int is_host_available(uuid_t *host_id) +{ + sqlite3_stmt *res = NULL; + int rc; + + if (unlikely(!db_meta)) { + if (default_rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE) + error_report("Database has not been initialized"); + return 1; + } + + rc = sqlite3_prepare_v2(db_meta, SQL_SELECT_HOST_BY_UUID, -1, &res, 0); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to prepare statement to select node instance information for a node"); + return 1; + } + + rc = sqlite3_bind_blob(res, 1, host_id, sizeof(*host_id), SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to bind host_id parameter to select node instance information"); + goto failed; + } + rc = sqlite3_step(res); + + failed: + if (unlikely(sqlite3_finalize(res) != SQLITE_OK)) + error_report("Failed to finalize the prepared statement when checking host existence"); + + return (rc == SQLITE_ROW); +} + +// OPCODE: ACLK_DATABASE_DELETE_HOST +void sql_delete_aclk_table_list(struct aclk_database_worker_config *wc, struct aclk_database_cmd cmd) +{ + UNUSED(wc); + char uuid_str[GUID_LEN + 1]; + char host_str[GUID_LEN + 1]; + + int rc; + uuid_t host_uuid; + char *host_guid = (char *)cmd.data; + + if (unlikely(!host_guid)) + return; + + rc = uuid_parse(host_guid, host_uuid); + freez(host_guid); + if (rc) + return; + + uuid_unparse_lower(host_uuid, host_str); + uuid_unparse_lower_fix(&host_uuid, uuid_str); + + debug(D_ACLK_SYNC, "Checking if I should delete aclk tables for node %s", host_str); + + if (is_host_available(&host_uuid)) { + debug(D_ACLK_SYNC, "Host %s exists, not deleting aclk sync tables", host_str); + return; + } + + debug(D_ACLK_SYNC, "Host %s does NOT exist, can delete aclk sync tables", host_str); + + sqlite3_stmt *res = NULL; + BUFFER *sql = buffer_create(ACLK_SYNC_QUERY_SIZE); + + buffer_sprintf(sql,"SELECT 'drop '||type||' IF EXISTS '||name||';' FROM sqlite_schema " \ + "WHERE name LIKE 'aclk_%%_%s' AND type IN ('table', 'trigger', 'index');", uuid_str); + + rc = sqlite3_prepare_v2(db_meta, buffer_tostring(sql), -1, &res, 0); + if (rc != SQLITE_OK) { + error_report("Failed to prepare statement to clean up aclk tables"); + goto fail; + } + buffer_flush(sql); + + while (sqlite3_step(res) == SQLITE_ROW) + buffer_strcat(sql, (char *) sqlite3_column_text(res, 0)); + + rc = sqlite3_finalize(res); + if (unlikely(rc != SQLITE_OK)) + error_report("Failed to finalize statement to clean up aclk tables, rc = %d", rc); + + db_execute(buffer_tostring(sql)); + +fail: + buffer_free(sql); + return; +} + +static int sql_check_aclk_table(void *data, int argc, char **argv, char **column) +{ + struct aclk_database_worker_config *wc = data; + UNUSED(argc); + UNUSED(column); + + debug(D_ACLK_SYNC,"Scheduling aclk sync table check for node %s", (char *) argv[0]); + struct aclk_database_cmd cmd; + memset(&cmd, 0, sizeof(cmd)); + cmd.opcode = ACLK_DATABASE_DELETE_HOST; + cmd.data = strdupz((char *) argv[0]); + aclk_database_enq_cmd_noblock(wc, &cmd); + return 0; +} + +#define SQL_SELECT_ACLK_ACTIVE_LIST "SELECT REPLACE(SUBSTR(name,19),'_','-') FROM sqlite_schema " \ + "WHERE name LIKE 'aclk_chart_latest_%' AND type IN ('table');" + +void sql_check_aclk_table_list(struct aclk_database_worker_config *wc) +{ + char *err_msg = NULL; + debug(D_ACLK_SYNC,"Cleaning tables for nodes that do not exist"); + int rc = sqlite3_exec(db_meta, SQL_SELECT_ACLK_ACTIVE_LIST, sql_check_aclk_table, (void *) wc, &err_msg); + if (rc != SQLITE_OK) { + error_report("Query failed when trying to check for obsolete ACLK sync tables, %s", err_msg); + sqlite3_free(err_msg); + } + db_execute("DELETE FROM dimension_delete WHERE host_id NOT IN (SELECT host_id FROM host) " + " OR strftime('%s') - date_created > 604800;"); + return; +} + +void aclk_data_rotated(void) +{ +#ifdef ENABLE_NEW_CLOUD_PROTOCOL + + if (!aclk_use_new_cloud_arch || !aclk_connected) + return; + + time_t next_rotation_time = now_realtime_sec()+ACLK_DATABASE_ROTATION_DELAY; + rrd_wrlock(); + RRDHOST *this_host = localhost; + while (this_host) { + struct aclk_database_worker_config *wc = this_host->dbsync_worker; + if (wc) + wc->rotation_after = next_rotation_time; + this_host = this_host->next; + } + rrd_unlock(); + + struct aclk_database_worker_config *tmp = aclk_thread_head; + + uv_mutex_lock(&aclk_async_lock); + while (tmp) { + tmp->rotation_after = next_rotation_time; + tmp = tmp->next; + } + uv_mutex_unlock(&aclk_async_lock); +#endif + return; +} diff --git a/database/sqlite/sqlite_aclk.h b/database/sqlite/sqlite_aclk.h new file mode 100644 index 000000000..d554e1069 --- /dev/null +++ b/database/sqlite/sqlite_aclk.h @@ -0,0 +1,232 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_SQLITE_ACLK_H +#define NETDATA_SQLITE_ACLK_H + +#include "sqlite3.h" + +// TODO: To be added +#include "../../aclk/schema-wrappers/chart_stream.h" + +#ifndef ACLK_MAX_CHART_BATCH +#define ACLK_MAX_CHART_BATCH (200) +#endif +#ifndef ACLK_MAX_CHART_BATCH_COUNT +#define ACLK_MAX_CHART_BATCH_COUNT (10) +#endif +#define ACLK_MAX_ALERT_UPDATES (5) +#define ACLK_DATABASE_CLEANUP_FIRST (60) +#define ACLK_DATABASE_ROTATION_DELAY (60) +#define ACLK_DATABASE_CLEANUP_INTERVAL (3600) +#define ACLK_DATABASE_ROTATION_INTERVAL (3600) +#define ACLK_DELETE_ACK_INTERNAL (600) +#define ACLK_DELETE_ACK_ALERTS_INTERNAL (86400) +#define ACLK_SYNC_QUERY_SIZE 512 + +struct aclk_completion { + uv_mutex_t mutex; + uv_cond_t cond; + volatile unsigned completed; +}; + +static inline void init_aclk_completion(struct aclk_completion *p) +{ + p->completed = 0; + fatal_assert(0 == uv_cond_init(&p->cond)); + fatal_assert(0 == uv_mutex_init(&p->mutex)); +} + +static inline void destroy_aclk_completion(struct aclk_completion *p) +{ + uv_cond_destroy(&p->cond); + uv_mutex_destroy(&p->mutex); +} + +static inline void wait_for_aclk_completion(struct aclk_completion *p) +{ + uv_mutex_lock(&p->mutex); + while (0 == p->completed) { + uv_cond_wait(&p->cond, &p->mutex); + } + fatal_assert(1 == p->completed); + uv_mutex_unlock(&p->mutex); +} + +static inline void aclk_complete(struct aclk_completion *p) +{ + uv_mutex_lock(&p->mutex); + p->completed = 1; + uv_mutex_unlock(&p->mutex); + uv_cond_broadcast(&p->cond); +} + +extern uv_mutex_t aclk_async_lock; + +static inline void uuid_unparse_lower_fix(uuid_t *uuid, char *out) +{ + uuid_unparse_lower(*uuid, out); + out[8] = '_'; + out[13] = '_'; + out[18] = '_'; + out[23] = '_'; +} + +static inline char *get_str_from_uuid(uuid_t *uuid) +{ + char uuid_str[GUID_LEN + 1]; + if (unlikely(!uuid)) { + uuid_t zero_uuid; + uuid_clear(zero_uuid); + uuid_unparse_lower(zero_uuid, uuid_str); + } + else + uuid_unparse_lower(*uuid, uuid_str); + return strdupz(uuid_str); +} + +#define TABLE_ACLK_CHART "CREATE TABLE IF NOT EXISTS aclk_chart_%s (sequence_id INTEGER PRIMARY KEY, " \ + "date_created, date_updated, date_submitted, status, uuid, type, unique_id, " \ + "update_count default 1, unique(uuid, status));" + +#define TABLE_ACLK_CHART_PAYLOAD "CREATE TABLE IF NOT EXISTS aclk_chart_payload_%s (unique_id BLOB PRIMARY KEY, " \ + "uuid, claim_id, type, date_created, payload);" + +#define TABLE_ACLK_CHART_LATEST "CREATE TABLE IF NOT EXISTS aclk_chart_latest_%s (uuid BLOB PRIMARY KEY, " \ + "unique_id, date_submitted);" + +#define TRIGGER_ACLK_CHART_PAYLOAD "CREATE TRIGGER IF NOT EXISTS aclk_tr_chart_payload_%s " \ + "after insert on aclk_chart_payload_%s " \ + "begin insert into aclk_chart_%s (uuid, unique_id, type, status, date_created) values " \ + " (new.uuid, new.unique_id, new.type, 'pending', strftime('%%s')) on conflict(uuid, status) " \ + " do update set unique_id = new.unique_id, update_count = update_count + 1; " \ + "end;" + +#define TABLE_ACLK_ALERT "CREATE TABLE IF NOT EXISTS aclk_alert_%s (sequence_id INTEGER PRIMARY KEY, " \ + "alert_unique_id, date_created, date_submitted, date_cloud_ack, " \ + "unique(alert_unique_id)); " \ + "insert into aclk_alert_%s (alert_unique_id, date_created) " \ + "select unique_id alert_unique_id, strftime('%%s') date_created from health_log_%s where new_status <> 0 and new_status <> -2 order by unique_id asc on conflict (alert_unique_id) do nothing;" + +#define INDEX_ACLK_CHART "CREATE INDEX IF NOT EXISTS aclk_chart_index_%s ON aclk_chart_%s (unique_id);" + +#define INDEX_ACLK_CHART_LATEST "CREATE INDEX IF NOT EXISTS aclk_chart_latest_index_%s ON aclk_chart_latest_%s (unique_id);" + +#define INDEX_ACLK_ALERT "CREATE INDEX IF NOT EXISTS aclk_alert_index_%s ON aclk_alert_%s (alert_unique_id);" + +enum aclk_database_opcode { + ACLK_DATABASE_NOOP = 0, + +#ifdef ENABLE_NEW_CLOUD_PROTOCOL + ACLK_DATABASE_ADD_CHART, + ACLK_DATABASE_ADD_DIMENSION, + ACLK_DATABASE_PUSH_CHART, + ACLK_DATABASE_PUSH_CHART_CONFIG, + ACLK_DATABASE_RESET_CHART, + ACLK_DATABASE_CHART_ACK, + ACLK_DATABASE_UPD_RETENTION, + ACLK_DATABASE_DIM_DELETION, +#endif + ACLK_DATABASE_ALARM_HEALTH_LOG, + ACLK_DATABASE_CLEANUP, + ACLK_DATABASE_DELETE_HOST, + ACLK_DATABASE_NODE_INFO, + ACLK_DATABASE_PUSH_ALERT, + ACLK_DATABASE_PUSH_ALERT_CONFIG, + ACLK_DATABASE_PUSH_ALERT_SNAPSHOT, + ACLK_DATABASE_QUEUE_REMOVED_ALERTS, + ACLK_DATABASE_TIMER +}; + +struct aclk_chart_payload_t { + long sequence_id; + long last_sequence_id; + char *payload; + struct aclk_chart_payload_t *next; +}; + + +struct aclk_database_cmd { + enum aclk_database_opcode opcode; + void *data; + void *data_param; + int count; + uint64_t param1; + struct aclk_completion *completion; +}; + +#define ACLK_DATABASE_CMD_Q_MAX_SIZE (16384) + +struct aclk_database_cmdqueue { + unsigned head, tail; + struct aclk_database_cmd cmd_array[ACLK_DATABASE_CMD_Q_MAX_SIZE]; +}; + +struct aclk_database_worker_config { + uv_thread_t thread; + char uuid_str[GUID_LEN + 1]; + char node_id[GUID_LEN + 1]; + char host_guid[GUID_LEN + 1]; + uint64_t chart_sequence_id; // last chart_sequence_id + time_t chart_timestamp; // last chart timestamp + time_t cleanup_after; // Start a cleanup after this timestamp + time_t startup_time; // When the sync thread started + time_t rotation_after; + uint64_t batch_id; // batch id to use + uint64_t alerts_batch_id; // batch id for alerts to use + uint64_t alerts_start_seq_id; // cloud has asked to start streaming from + uint64_t alert_sequence_id; // last alert sequence_id + uint32_t chart_payload_count; + uint64_t alerts_snapshot_id; //will contain the snapshot_id value if snapshot was requested + uint64_t alerts_ack_sequence_id; //last sequence_id ack'ed from cloud via sendsnapshot message + uv_loop_t *loop; + RRDHOST *host; + uv_async_t async; + /* FIFO command queue */ + uv_mutex_t cmd_mutex; + uv_cond_t cmd_cond; + volatile unsigned queue_size; + struct aclk_database_cmdqueue cmd_queue; + uint32_t retry_count; + int chart_updates; + int alert_updates; + time_t batch_created; + int node_info_send; + int chart_pending; + int chart_reset_count; + volatile unsigned is_shutting_down; + struct aclk_database_worker_config *next; +}; + +static inline RRDHOST *find_host_by_node_id(char *node_id) +{ + uuid_t node_uuid; + if (unlikely(!node_id)) + return NULL; + + if (uuid_parse(node_id, node_uuid)) + return NULL; + + RRDHOST *host = localhost; + while(host) { + if (host->node_id && !(uuid_compare(*host->node_id, node_uuid))) + return host; + host = host->next; + } + return NULL; +} + + +extern sqlite3 *db_meta; + +extern int aclk_database_enq_cmd_noblock(struct aclk_database_worker_config *wc, struct aclk_database_cmd *cmd); +extern void aclk_database_enq_cmd(struct aclk_database_worker_config *wc, struct aclk_database_cmd *cmd); +extern void sql_create_aclk_table(RRDHOST *host, uuid_t *host_uuid, uuid_t *node_id); +int aclk_worker_enq_cmd(char *node_id, struct aclk_database_cmd *cmd); +void aclk_data_rotated(void); +void sql_aclk_sync_init(void); +void sql_check_aclk_table_list(struct aclk_database_worker_config *wc); +void sql_delete_aclk_table_list(struct aclk_database_worker_config *wc, struct aclk_database_cmd cmd); +void sql_maint_aclk_sync_database(struct aclk_database_worker_config *wc, struct aclk_database_cmd cmd); +int claimed(); +void aclk_sync_exit_all(); +#endif //NETDATA_SQLITE_ACLK_H diff --git a/database/sqlite/sqlite_aclk_alert.c b/database/sqlite/sqlite_aclk_alert.c new file mode 100644 index 000000000..af2797a51 --- /dev/null +++ b/database/sqlite/sqlite_aclk_alert.c @@ -0,0 +1,885 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "sqlite_functions.h" +#include "sqlite_aclk_alert.h" + +#ifdef ENABLE_NEW_CLOUD_PROTOCOL +#include "../../aclk/aclk_alarm_api.h" +#include "../../aclk/aclk.h" +#endif + +// will replace call to aclk_update_alarm in health/health_log.c +// and handle both cases +int sql_queue_alarm_to_aclk(RRDHOST *host, ALARM_ENTRY *ae) +{ + //check aclk architecture and handle old json alarm update to cloud + //include also the valid statuses for this case +#ifdef ENABLE_ACLK +#ifdef ENABLE_NEW_CLOUD_PROTOCOL + if (!aclk_use_new_cloud_arch && aclk_connected) { +#endif + + if ((ae->new_status == RRDCALC_STATUS_WARNING || ae->new_status == RRDCALC_STATUS_CRITICAL) || + ((ae->old_status == RRDCALC_STATUS_WARNING || ae->old_status == RRDCALC_STATUS_CRITICAL))) { + aclk_update_alarm(host, ae); + } +#endif +#ifdef ENABLE_NEW_CLOUD_PROTOCOL + } + + if (!claimed()) + return 0; + + if (ae->flags & HEALTH_ENTRY_FLAG_ACLK_QUEUED) + return 0; + + if (ae->new_status == RRDCALC_STATUS_REMOVED || ae->new_status == RRDCALC_STATUS_UNINITIALIZED) + return 0; + + if (unlikely(!host->dbsync_worker)) + return 1; + + if (unlikely(uuid_is_null(ae->config_hash_id))) + return 0; + + int rc = 0; + + CHECK_SQLITE_CONNECTION(db_meta); + + sqlite3_stmt *res_alert = NULL; + char uuid_str[GUID_LEN + 1]; + uuid_unparse_lower_fix(&host->host_uuid, uuid_str); + + BUFFER *sql = buffer_create(1024); + + buffer_sprintf( + sql, + "INSERT INTO aclk_alert_%s (alert_unique_id, date_created) " + "VALUES (@alert_unique_id, strftime('%%s')) on conflict (alert_unique_id) do nothing; ", + uuid_str); + + rc = sqlite3_prepare_v2(db_meta, buffer_tostring(sql), -1, &res_alert, 0); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to prepare statement to store alert event"); + buffer_free(sql); + return 1; + } + + rc = sqlite3_bind_int(res_alert, 1, ae->unique_id); + if (unlikely(rc != SQLITE_OK)) + goto bind_fail; + + rc = execute_insert(res_alert); + if (unlikely(rc != SQLITE_DONE)) { + error_report("Failed to store alert event %u, rc = %d", ae->unique_id, rc); + goto bind_fail; + } + + ae->flags |= HEALTH_ENTRY_FLAG_ACLK_QUEUED; + +bind_fail: + if (unlikely(sqlite3_finalize(res_alert) != SQLITE_OK)) + error_report("Failed to reset statement in store alert event, rc = %d", rc); + + buffer_free(sql); + return 0; +#else + UNUSED(host); + UNUSED(ae); +#endif + return 0; +} + +int rrdcalc_status_to_proto_enum(RRDCALC_STATUS status) +{ +#ifdef ENABLE_NEW_CLOUD_PROTOCOL + switch(status) { + case RRDCALC_STATUS_REMOVED: + return ALARM_STATUS_REMOVED; + + case RRDCALC_STATUS_UNDEFINED: + return ALARM_STATUS_NOT_A_NUMBER; + + case RRDCALC_STATUS_CLEAR: + return ALARM_STATUS_CLEAR; + + case RRDCALC_STATUS_WARNING: + return ALARM_STATUS_WARNING; + + case RRDCALC_STATUS_CRITICAL: + return ALARM_STATUS_CRITICAL; + + default: + return ALARM_STATUS_UNKNOWN; + } +#else + UNUSED(status); + return 1; +#endif +} + +void aclk_push_alert_event(struct aclk_database_worker_config *wc, struct aclk_database_cmd cmd) +{ +#ifndef ENABLE_NEW_CLOUD_PROTOCOL + UNUSED(wc); + UNUSED(cmd); +#else + int rc; + + if (unlikely(!wc->alert_updates)) { + log_access("AC [%s (%s)]: Ignoring alert push event, updates have been turned off for this node.", wc->node_id, wc->host ? wc->host->hostname : "N/A"); + return; + } + + char *claim_id = is_agent_claimed(); + if (unlikely(!claim_id)) + return; + + BUFFER *sql = buffer_create(1024); + + if (wc->alerts_start_seq_id != 0) { + buffer_sprintf( + sql, + "UPDATE aclk_alert_%s SET date_submitted = NULL, date_cloud_ack = NULL WHERE sequence_id >= %"PRIu64 + "; UPDATE aclk_alert_%s SET date_cloud_ack = strftime('%%s','now') WHERE sequence_id < %"PRIu64 + " and date_cloud_ack is null " + "; UPDATE aclk_alert_%s SET date_submitted = strftime('%%s','now') WHERE sequence_id < %"PRIu64 + " and date_submitted is null", + wc->uuid_str, + wc->alerts_start_seq_id, + wc->uuid_str, + wc->alerts_start_seq_id, + wc->uuid_str, + wc->alerts_start_seq_id); + db_execute(buffer_tostring(sql)); + buffer_reset(sql); + wc->alerts_start_seq_id = 0; + } + + int limit = cmd.count > 0 ? cmd.count : 1; + + sqlite3_stmt *res = NULL; + + buffer_sprintf(sql, "select aa.sequence_id, hl.unique_id, hl.alarm_id, hl.config_hash_id, hl.updated_by_id, hl.when_key, \ + hl.duration, hl.non_clear_duration, hl.flags, hl.exec_run_timestamp, hl.delay_up_to_timestamp, hl.name, \ + hl.chart, hl.family, hl.exec, hl.recipient, hl.source, hl.units, hl.info, hl.exec_code, hl.new_status, \ + hl.old_status, hl.delay, hl.new_value, hl.old_value, hl.last_repeat \ + from health_log_%s hl, aclk_alert_%s aa \ + where hl.unique_id = aa.alert_unique_id and aa.date_submitted is null \ + order by aa.sequence_id asc limit %d;", wc->uuid_str, wc->uuid_str, limit); + + rc = sqlite3_prepare_v2(db_meta, buffer_tostring(sql), -1, &res, 0); + if (rc != SQLITE_OK) { + error_report("Failed to prepare statement when trying to send an alert update via ACLK"); + buffer_free(sql); + freez(claim_id); + return; + } + + char uuid_str[GUID_LEN + 1]; + uint64_t first_sequence_id = 0; + uint64_t last_sequence_id = 0; + static __thread uint64_t log_first_sequence_id = 0; + static __thread uint64_t log_last_sequence_id = 0; + + while (sqlite3_step(res) == SQLITE_ROW) { + struct alarm_log_entry alarm_log; + char old_value_string[100 + 1]; + char new_value_string[100 + 1]; + + alarm_log.node_id = wc->node_id; + alarm_log.claim_id = claim_id; + + alarm_log.chart = strdupz((char *)sqlite3_column_text(res, 12)); + alarm_log.name = strdupz((char *)sqlite3_column_text(res, 11)); + alarm_log.family = sqlite3_column_bytes(res, 13) > 0 ? strdupz((char *)sqlite3_column_text(res, 13)) : NULL; + + alarm_log.batch_id = wc->alerts_batch_id; + alarm_log.sequence_id = (uint64_t) sqlite3_column_int64(res, 0); + alarm_log.when = (time_t) sqlite3_column_int64(res, 5); + + uuid_unparse_lower(*((uuid_t *) sqlite3_column_blob(res, 3)), uuid_str); + alarm_log.config_hash = strdupz((char *)uuid_str); + + alarm_log.utc_offset = wc->host->utc_offset; + alarm_log.timezone = strdupz((char *)wc->host->abbrev_timezone); + alarm_log.exec_path = sqlite3_column_bytes(res, 14) > 0 ? strdupz((char *)sqlite3_column_text(res, 14)) : + strdupz((char *)wc->host->health_default_exec); + alarm_log.conf_source = strdupz((char *)sqlite3_column_text(res, 16)); + + char *edit_command = sqlite3_column_bytes(res, 16) > 0 ? + health_edit_command_from_source((char *)sqlite3_column_text(res, 16)) : + strdupz("UNKNOWN=0"); + alarm_log.command = strdupz(edit_command); + + alarm_log.duration = (time_t) sqlite3_column_int64(res, 6); + alarm_log.non_clear_duration = (time_t) sqlite3_column_int64(res, 7); + alarm_log.status = rrdcalc_status_to_proto_enum((RRDCALC_STATUS) sqlite3_column_int(res, 20)); + alarm_log.old_status = rrdcalc_status_to_proto_enum((RRDCALC_STATUS) sqlite3_column_int(res, 21)); + alarm_log.delay = (int) sqlite3_column_int(res, 22); + alarm_log.delay_up_to_timestamp = (time_t) sqlite3_column_int64(res, 10); + alarm_log.last_repeat = (time_t) sqlite3_column_int64(res, 25); + + alarm_log.silenced = ((sqlite3_column_int64(res, 8) & HEALTH_ENTRY_FLAG_SILENCED) || + (sqlite3_column_type(res, 15) != SQLITE_NULL && + !strncmp((char *)sqlite3_column_text(res, 15), "silent", 6))) ? + 1 : + 0; + + alarm_log.value_string = + sqlite3_column_type(res, 23) == SQLITE_NULL ? + strdupz((char *)"-") : + strdupz((char *)format_value_and_unit( + new_value_string, 100, sqlite3_column_double(res, 23), (char *)sqlite3_column_text(res, 17), -1)); + + alarm_log.old_value_string = + sqlite3_column_type(res, 24) == SQLITE_NULL ? + strdupz((char *)"-") : + strdupz((char *)format_value_and_unit( + old_value_string, 100, sqlite3_column_double(res, 24), (char *)sqlite3_column_text(res, 17), -1)); + + alarm_log.value = (calculated_number) sqlite3_column_double(res, 23); + alarm_log.old_value = (calculated_number) sqlite3_column_double(res, 24); + + alarm_log.updated = (sqlite3_column_int64(res, 8) & HEALTH_ENTRY_FLAG_UPDATED) ? 1 : 0; + alarm_log.rendered_info = strdupz((char *)sqlite3_column_text(res, 18)); + + aclk_send_alarm_log_entry(&alarm_log); + + if (first_sequence_id == 0) + first_sequence_id = (uint64_t) sqlite3_column_int64(res, 0); + + if (log_first_sequence_id == 0) + log_first_sequence_id = (uint64_t) sqlite3_column_int64(res, 0); + + last_sequence_id = (uint64_t) sqlite3_column_int64(res, 0); + log_last_sequence_id = (uint64_t) sqlite3_column_int64(res, 0); + + destroy_alarm_log_entry(&alarm_log); + freez(edit_command); + } + + if (first_sequence_id) { + buffer_flush(sql); + buffer_sprintf(sql, "UPDATE aclk_alert_%s SET date_submitted=strftime('%%s') " + "WHERE date_submitted IS NULL AND sequence_id BETWEEN %" PRIu64 " AND %" PRIu64 ";", + wc->uuid_str, first_sequence_id, last_sequence_id); + db_execute(buffer_tostring(sql)); + } else { + if (log_first_sequence_id) + log_access("OG [%s (%s)]: Sent alert events, first sequence_id %"PRIu64", last sequence_id %"PRIu64, wc->node_id, wc->host ? wc->host->hostname : "N/A", log_first_sequence_id, log_last_sequence_id); + log_first_sequence_id = 0; + log_last_sequence_id = 0; + } + + rc = sqlite3_finalize(res); + if (unlikely(rc != SQLITE_OK)) + error_report("Failed to finalize statement to send alert entries from the database, rc = %d", rc); + + freez(claim_id); + buffer_free(sql); +#endif + + return; +} + +void aclk_send_alarm_health_log(char *node_id) +{ + if (unlikely(!node_id)) + return; + + log_access("IN [%s (N/A)]: Request to send alarm health log.", node_id); + + struct aclk_database_worker_config *wc = NULL; + struct aclk_database_cmd cmd; + memset(&cmd, 0, sizeof(cmd)); + cmd.opcode = ACLK_DATABASE_ALARM_HEALTH_LOG; + + rrd_wrlock(); + RRDHOST *host = find_host_by_node_id(node_id); + if (likely(host)) + wc = (struct aclk_database_worker_config *)host->dbsync_worker; + rrd_unlock(); + if (wc) + aclk_database_enq_cmd(wc, &cmd); + else { + if (aclk_worker_enq_cmd(node_id, &cmd)) + log_access("AC [%s (N/A)]: ACLK synchronization thread is not active.", node_id); + } + return; +} + +void aclk_push_alarm_health_log(struct aclk_database_worker_config *wc, struct aclk_database_cmd cmd) +{ + UNUSED(cmd); +#ifndef ENABLE_NEW_CLOUD_PROTOCOL + UNUSED(wc); +#else + int rc; + + char *claim_id = is_agent_claimed(); + if (unlikely(!claim_id)) + return; + + uint64_t first_sequence = 0; + uint64_t last_sequence = 0; + struct timeval first_timestamp; + struct timeval last_timestamp; + + BUFFER *sql = buffer_create(1024); + + sqlite3_stmt *res = NULL; + + //TODO: make this better: include info from health log too + buffer_sprintf(sql, "SELECT MIN(sequence_id), MIN(date_created), MAX(sequence_id), MAX(date_created) " \ + "FROM aclk_alert_%s;", wc->uuid_str); + + rc = sqlite3_prepare_v2(db_meta, buffer_tostring(sql), -1, &res, 0); + if (rc != SQLITE_OK) { + error_report("Failed to prepare statement to get health log statistics from the database"); + buffer_free(sql); + freez(claim_id); + return; + } + + first_timestamp.tv_sec = 0; + first_timestamp.tv_usec = 0; + last_timestamp.tv_sec = 0; + last_timestamp.tv_usec = 0; + + while (sqlite3_step(res) == SQLITE_ROW) { + first_sequence = sqlite3_column_bytes(res, 0) > 0 ? (uint64_t) sqlite3_column_int64(res, 0) : 0; + if (sqlite3_column_bytes(res, 1) > 0) { + first_timestamp.tv_sec = sqlite3_column_int64(res, 1); + } + + last_sequence = sqlite3_column_bytes(res, 2) > 0 ? (uint64_t) sqlite3_column_int64(res, 2) : 0; + if (sqlite3_column_bytes(res, 3) > 0) { + last_timestamp.tv_sec = sqlite3_column_int64(res, 3); + } + } + + struct alarm_log_entries log_entries; + log_entries.first_seq_id = first_sequence; + log_entries.first_when = first_timestamp; + log_entries.last_seq_id = last_sequence; + log_entries.last_when = last_timestamp; + + struct alarm_log_health alarm_log; + alarm_log.claim_id = claim_id; + alarm_log.node_id = wc->node_id; + alarm_log.log_entries = log_entries; + alarm_log.status = wc->alert_updates == 0 ? 2 : 1; + + wc->alert_sequence_id = last_sequence; + + aclk_send_alarm_log_health(&alarm_log); + log_access("OG [%s (%s)]: Alarm health log sent, first sequence id %"PRIu64", last sequence id %"PRIu64, wc->node_id, wc->host ? wc->host->hostname : "N/A", first_sequence, last_sequence); + + rc = sqlite3_finalize(res); + if (unlikely(rc != SQLITE_OK)) + error_report("Failed to reset statement to get health log statistics from the database, rc = %d", rc); + + freez(claim_id); + buffer_free(sql); +#endif + + return; +} + +void aclk_send_alarm_configuration(char *config_hash) +{ + if (unlikely(!config_hash)) + return; + + struct aclk_database_worker_config *wc = (struct aclk_database_worker_config *) localhost->dbsync_worker; + + if (unlikely(!wc)) { + return; + } + + log_access("IN [%s (%s)]: Request to send alert config %s.", wc->node_id, wc->host ? wc->host->hostname : "N/A", config_hash); + + struct aclk_database_cmd cmd; + memset(&cmd, 0, sizeof(cmd)); + cmd.opcode = ACLK_DATABASE_PUSH_ALERT_CONFIG; + cmd.data_param = (void *) strdupz(config_hash); + cmd.completion = NULL; + aclk_database_enq_cmd(wc, &cmd); + + return; +} + +#define SQL_SELECT_ALERT_CONFIG "SELECT alarm, template, on_key, class, type, component, os, hosts, plugin," \ + "module, charts, families, lookup, every, units, green, red, calc, warn, crit, to_key, exec, delay, repeat, info," \ + "options, host_labels, p_db_lookup_dimensions, p_db_lookup_method, p_db_lookup_options, p_db_lookup_after," \ + "p_db_lookup_before, p_update_every FROM alert_hash WHERE hash_id = @hash_id;" + +int aclk_push_alert_config_event(struct aclk_database_worker_config *wc, struct aclk_database_cmd cmd) +{ + UNUSED(wc); +#ifndef ENABLE_NEW_CLOUD_PROTOCOL + UNUSED(cmd); +#else + int rc = 0; + + CHECK_SQLITE_CONNECTION(db_meta); + + sqlite3_stmt *res = NULL; + + char *config_hash = (char *) cmd.data_param; + + rc = sqlite3_prepare_v2(db_meta, SQL_SELECT_ALERT_CONFIG, -1, &res, 0); + if (rc != SQLITE_OK) { + error_report("Failed to prepare statement when trying to fetch an alarm hash configuration"); + return 1; + } + + uuid_t hash_uuid; + if (uuid_parse(config_hash, hash_uuid)) + return 1; + + rc = sqlite3_bind_blob(res, 1, &hash_uuid , sizeof(hash_uuid), SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) + goto bind_fail; + + struct aclk_alarm_configuration alarm_config; + struct provide_alarm_configuration p_alarm_config; + p_alarm_config.cfg_hash = NULL; + + if (sqlite3_step(res) == SQLITE_ROW) { + + alarm_config.alarm = sqlite3_column_bytes(res, 0) > 0 ? strdupz((char *)sqlite3_column_text(res, 0)) : NULL; + alarm_config.tmpl = sqlite3_column_bytes(res, 1) > 0 ? strdupz((char *)sqlite3_column_text(res, 1)) : NULL; + alarm_config.on_chart = sqlite3_column_bytes(res, 2) > 0 ? strdupz((char *)sqlite3_column_text(res, 2)) : NULL; + alarm_config.classification = sqlite3_column_bytes(res, 3) > 0 ? strdupz((char *)sqlite3_column_text(res, 3)) : NULL; + alarm_config.type = sqlite3_column_bytes(res, 4) > 0 ? strdupz((char *)sqlite3_column_text(res, 4)) : NULL; + alarm_config.component = sqlite3_column_bytes(res, 5) > 0 ? strdupz((char *)sqlite3_column_text(res, 5)) : NULL; + + alarm_config.os = sqlite3_column_bytes(res, 6) > 0 ? strdupz((char *)sqlite3_column_text(res, 6)) : NULL; + alarm_config.hosts = sqlite3_column_bytes(res, 7) > 0 ? strdupz((char *)sqlite3_column_text(res, 7)) : NULL; + alarm_config.plugin = sqlite3_column_bytes(res, 8) > 0 ? strdupz((char *)sqlite3_column_text(res, 8)) : NULL; + alarm_config.module = sqlite3_column_bytes(res, 9) > 0 ? strdupz((char *)sqlite3_column_text(res, 9)) : NULL; + alarm_config.charts = sqlite3_column_bytes(res, 10) > 0 ? strdupz((char *)sqlite3_column_text(res, 10)) : NULL; + alarm_config.families = sqlite3_column_bytes(res, 11) > 0 ? strdupz((char *)sqlite3_column_text(res, 11)) : NULL; + alarm_config.lookup = sqlite3_column_bytes(res, 12) > 0 ? strdupz((char *)sqlite3_column_text(res, 12)) : NULL; + alarm_config.every = sqlite3_column_bytes(res, 13) > 0 ? strdupz((char *)sqlite3_column_text(res, 13)) : NULL; + alarm_config.units = sqlite3_column_bytes(res, 14) > 0 ? strdupz((char *)sqlite3_column_text(res, 14)) : NULL; + + alarm_config.green = sqlite3_column_bytes(res, 15) > 0 ? strdupz((char *)sqlite3_column_text(res, 15)) : NULL; + alarm_config.red = sqlite3_column_bytes(res, 16) > 0 ? strdupz((char *)sqlite3_column_text(res, 16)) : NULL; + + alarm_config.calculation_expr = sqlite3_column_bytes(res, 17) > 0 ? strdupz((char *)sqlite3_column_text(res, 17)) : NULL; + alarm_config.warning_expr = sqlite3_column_bytes(res, 18) > 0 ? strdupz((char *)sqlite3_column_text(res, 18)) : NULL; + alarm_config.critical_expr = sqlite3_column_bytes(res, 19) > 0 ? strdupz((char *)sqlite3_column_text(res, 19)) : NULL; + + alarm_config.recipient = sqlite3_column_bytes(res, 20) > 0 ? strdupz((char *)sqlite3_column_text(res, 20)) : NULL; + alarm_config.exec = sqlite3_column_bytes(res, 21) > 0 ? strdupz((char *)sqlite3_column_text(res, 21)) : NULL; + alarm_config.delay = sqlite3_column_bytes(res, 22) > 0 ? strdupz((char *)sqlite3_column_text(res, 22)) : NULL; + alarm_config.repeat = sqlite3_column_bytes(res, 23) > 0 ? strdupz((char *)sqlite3_column_text(res, 23)) : NULL; + alarm_config.info = sqlite3_column_bytes(res, 24) > 0 ? strdupz((char *)sqlite3_column_text(res, 24)) : NULL; + alarm_config.options = sqlite3_column_bytes(res, 25) > 0 ? strdupz((char *)sqlite3_column_text(res, 25)) : NULL; + alarm_config.host_labels = sqlite3_column_bytes(res, 26) > 0 ? strdupz((char *)sqlite3_column_text(res, 26)) : NULL; + + alarm_config.p_db_lookup_dimensions = NULL; + alarm_config.p_db_lookup_method = NULL; + alarm_config.p_db_lookup_options = NULL; + alarm_config.p_db_lookup_after = 0; + alarm_config.p_db_lookup_before = 0; + + if (sqlite3_column_bytes(res, 30) > 0) { + + alarm_config.p_db_lookup_dimensions = sqlite3_column_bytes(res, 27) > 0 ? strdupz((char *)sqlite3_column_text(res, 27)) : NULL; + alarm_config.p_db_lookup_method = sqlite3_column_bytes(res, 28) > 0 ? strdupz((char *)sqlite3_column_text(res, 28)) : NULL; + + BUFFER *tmp_buf = buffer_create(1024); + buffer_data_options2string(tmp_buf, sqlite3_column_int(res, 29)); + alarm_config.p_db_lookup_options = strdupz((char *)buffer_tostring(tmp_buf)); + buffer_free(tmp_buf); + + alarm_config.p_db_lookup_after = sqlite3_column_int(res, 30); + alarm_config.p_db_lookup_before = sqlite3_column_int(res, 31); + } + + alarm_config.p_update_every = sqlite3_column_int(res, 32); + + p_alarm_config.cfg_hash = strdupz((char *) config_hash); + p_alarm_config.cfg = alarm_config; + } + + if (likely(p_alarm_config.cfg_hash)) { + log_access("OG [%s (%s)]: Sent alert config %s.", wc->node_id, wc->host ? wc->host->hostname : "N/A", config_hash); + aclk_send_provide_alarm_cfg(&p_alarm_config); + freez((char *) cmd.data_param); + freez(p_alarm_config.cfg_hash); + destroy_aclk_alarm_configuration(&alarm_config); + } + else + log_access("AC [%s (%s)]: Alert config for %s not found.", wc->node_id, wc->host ? wc->host->hostname : "N/A", config_hash); + +bind_fail: + rc = sqlite3_finalize(res); + if (unlikely(rc != SQLITE_OK)) + error_report("Failed to reset statement when pushing alarm config hash, rc = %d", rc); + + return rc; +#endif + return 0; +} + + +// Start streaming alerts +void aclk_start_alert_streaming(char *node_id, uint64_t batch_id, uint64_t start_seq_id) +{ +#ifdef ENABLE_NEW_CLOUD_PROTOCOL + if (unlikely(!node_id)) + return; + + log_access("IN [%s (N/A)]: Start streaming alerts with batch_id %"PRIu64" and start_seq_id %"PRIu64".", node_id, batch_id, start_seq_id); + + uuid_t node_uuid; + if (uuid_parse(node_id, node_uuid)) + return; + + struct aclk_database_worker_config *wc = NULL; + rrd_wrlock(); + RRDHOST *host = find_host_by_node_id(node_id); + if (likely(host)) + wc = (struct aclk_database_worker_config *)host->dbsync_worker; + rrd_unlock(); + + if (unlikely(!host->health_enabled)) { + log_access("AC [%s (N/A)]: Ignoring request to stream alert state changes, health is disabled.", node_id); + return; + } + + if (likely(wc)) { + log_access("AC [%s (%s)]: Start streaming alerts enabled with batch_id %"PRIu64" and start_seq_id %"PRIu64".", node_id, wc->host ? wc->host->hostname : "N/A", batch_id, start_seq_id); + __sync_synchronize(); + wc->alerts_batch_id = batch_id; + wc->alerts_start_seq_id = start_seq_id; + wc->alert_updates = 1; + __sync_synchronize(); + } + else + log_access("AC [%s (N/A)]: ACLK synchronization thread is not active.", node_id); + +#else + UNUSED(node_id); + UNUSED(start_seq_id); + UNUSED(batch_id); +#endif + return; +} + +void sql_process_queue_removed_alerts_to_aclk(struct aclk_database_worker_config *wc, struct aclk_database_cmd cmd) +{ + UNUSED(cmd); +#ifndef ENABLE_NEW_CLOUD_PROTOCOL + UNUSED(wc); +#else + BUFFER *sql = buffer_create(1024); + + buffer_sprintf(sql,"insert into aclk_alert_%s (alert_unique_id, date_created) " \ + "select unique_id alert_unique_id, strftime('%%s') date_created from health_log_%s " \ + "where new_status = -2 and updated_by_id = 0 and unique_id not in " \ + "(select alert_unique_id from aclk_alert_%s) order by unique_id asc " \ + "on conflict (alert_unique_id) do nothing;", wc->uuid_str, wc->uuid_str, wc->uuid_str); + + db_execute(buffer_tostring(sql)); + + buffer_free(sql); +#endif + return; +} + +void sql_queue_removed_alerts_to_aclk(RRDHOST *host) +{ +#ifdef ENABLE_NEW_CLOUD_PROTOCOL + if (unlikely(!host->dbsync_worker)) + return; + + struct aclk_database_cmd cmd; + memset(&cmd, 0, sizeof(cmd)); + cmd.opcode = ACLK_DATABASE_QUEUE_REMOVED_ALERTS; + cmd.data = NULL; + cmd.data_param = NULL; + cmd.completion = NULL; + aclk_database_enq_cmd((struct aclk_database_worker_config *) host->dbsync_worker, &cmd); +#else + UNUSED(host); +#endif +} + +void aclk_process_send_alarm_snapshot(char *node_id, char *claim_id, uint64_t snapshot_id, uint64_t sequence_id) +{ + UNUSED(claim_id); +#ifdef ENABLE_NEW_CLOUD_PROTOCOL + if (unlikely(!node_id)) + return; + + uuid_t node_uuid; + if (uuid_parse(node_id, node_uuid)) + return; + + struct aclk_database_worker_config *wc = NULL; + rrd_wrlock(); + RRDHOST *host = find_host_by_node_id(node_id); + if (likely(host)) + wc = (struct aclk_database_worker_config *)host->dbsync_worker; + rrd_unlock(); + + if (likely(wc)) { + log_access( + "IN [%s (%s)]: Request to send alerts snapshot, snapshot_id %" PRIu64 " and ack_sequence_id %" PRIu64, + wc->node_id, + wc->host ? wc->host->hostname : "N/A", + snapshot_id, + sequence_id); + __sync_synchronize(); + wc->alerts_snapshot_id = snapshot_id; + wc->alerts_ack_sequence_id = sequence_id; + __sync_synchronize(); + + struct aclk_database_cmd cmd; + memset(&cmd, 0, sizeof(cmd)); + cmd.opcode = ACLK_DATABASE_PUSH_ALERT_SNAPSHOT; + cmd.data_param = NULL; + cmd.completion = NULL; + aclk_database_enq_cmd(wc, &cmd); + } else + log_access("AC [%s (N/A)]: ACLK synchronization thread is not active.", node_id); +#else + UNUSED(node_id); + UNUSED(snapshot_id); + UNUSED(sequence_id); +#endif + return; +} + +void aclk_mark_alert_cloud_ack(char *uuid_str, uint64_t alerts_ack_sequence_id) +{ + BUFFER *sql = buffer_create(1024); + + if (alerts_ack_sequence_id != 0) { + buffer_sprintf( + sql, + "UPDATE aclk_alert_%s SET date_cloud_ack = strftime('%%s','now') WHERE sequence_id <= %" PRIu64 "", + uuid_str, + alerts_ack_sequence_id); + db_execute(buffer_tostring(sql)); + } + + buffer_free(sql); +} + +#ifdef ENABLE_NEW_CLOUD_PROTOCOL +void health_alarm_entry2proto_nolock(struct alarm_log_entry *alarm_log, ALARM_ENTRY *ae, RRDHOST *host) +{ + char *edit_command = ae->source ? health_edit_command_from_source(ae->source) : strdupz("UNKNOWN=0"); + char config_hash_id[GUID_LEN + 1]; + uuid_unparse_lower(ae->config_hash_id, config_hash_id); + + alarm_log->chart = strdupz((char *)ae->chart); + alarm_log->name = strdupz((char *)ae->name); + alarm_log->family = strdupz((char *)ae->family); + + alarm_log->batch_id = 0; + alarm_log->sequence_id = 0; + alarm_log->when = (time_t)ae->when; + + alarm_log->config_hash = strdupz((char *)config_hash_id); + + alarm_log->utc_offset = host->utc_offset; + alarm_log->timezone = strdupz((char *)host->abbrev_timezone); + alarm_log->exec_path = ae->exec ? strdupz((char *)ae->exec) : strdupz((char *)host->health_default_exec); + alarm_log->conf_source = ae->source ? strdupz((char *)ae->source) : ""; + + alarm_log->command = strdupz((char *)edit_command); + + alarm_log->duration = (time_t)ae->duration; + alarm_log->non_clear_duration = (time_t)ae->non_clear_duration; + alarm_log->status = rrdcalc_status_to_proto_enum((RRDCALC_STATUS)ae->new_status); + alarm_log->old_status = rrdcalc_status_to_proto_enum((RRDCALC_STATUS)ae->old_status); + alarm_log->delay = (int)ae->delay; + alarm_log->delay_up_to_timestamp = (time_t)ae->delay_up_to_timestamp; + alarm_log->last_repeat = (time_t)ae->last_repeat; + + alarm_log->silenced = + ((ae->flags & HEALTH_ENTRY_FLAG_SILENCED) || (ae->recipient && !strncmp((char *)ae->recipient, "silent", 6))) ? + 1 : + 0; + + alarm_log->value_string = strdupz(ae->new_value_string); + alarm_log->old_value_string = strdupz(ae->old_value_string); + + alarm_log->value = (!isnan(ae->new_value)) ? (calculated_number)ae->new_value : 0; + alarm_log->old_value = (!isnan(ae->old_value)) ? (calculated_number)ae->old_value : 0; + + alarm_log->updated = (ae->flags & HEALTH_ENTRY_FLAG_UPDATED) ? 1 : 0; + alarm_log->rendered_info = strdupz(ae->info); + + freez(edit_command); +} +#endif + +static int have_recent_alarm(RRDHOST *host, uint32_t alarm_id, time_t mark) +{ + ALARM_ENTRY *ae = host->health_log.alarms; + + while (ae) { + if (ae->alarm_id == alarm_id && ae->unique_id > mark && + (ae->new_status != RRDCALC_STATUS_WARNING && ae->new_status != RRDCALC_STATUS_CRITICAL)) + return 1; + ae = ae->next; + } + + return 0; +} + +#define ALARM_EVENTS_PER_CHUNK 10 +void aclk_push_alert_snapshot_event(struct aclk_database_worker_config *wc, struct aclk_database_cmd cmd) +{ +#ifndef ENABLE_NEW_CLOUD_PROTOCOL + UNUSED(wc); + UNUSED(cmd); +#else + UNUSED(cmd); + // we perhaps we don't need this for snapshots + if (unlikely(!wc->alert_updates)) { + log_access("AC [%s (%s)]: Ignoring alert snapshot event, updates have been turned off for this node.", wc->node_id, wc->host ? wc->host->hostname : "N/A"); + return; + } + + if (unlikely(!wc->host)) { + error_report("ACLK synchronization thread for %s is not linked to HOST", wc->host_guid); + return; + } + + char *claim_id = is_agent_claimed(); + if (unlikely(!claim_id)) + return; + + log_access("OG [%s (%s)]: Sending alerts snapshot, snapshot_id %" PRIu64, wc->node_id, wc->host ? wc->host->hostname : "N/A", wc->alerts_snapshot_id); + + aclk_mark_alert_cloud_ack(wc->uuid_str, wc->alerts_ack_sequence_id); + + RRDHOST *host = wc->host; + uint32_t cnt = 0; + + netdata_rwlock_rdlock(&host->health_log.alarm_log_rwlock); + + ALARM_ENTRY *ae = host->health_log.alarms; + + for (; ae; ae = ae->next) { + if (likely(ae->updated_by_id)) + continue; + + if (unlikely(ae->new_status == RRDCALC_STATUS_UNINITIALIZED)) + continue; + + if (have_recent_alarm(host, ae->alarm_id, ae->unique_id)) + continue; + + cnt++; + } + + if (cnt) { + uint32_t chunk = 1, chunks = 0; + + chunks = (cnt / ALARM_EVENTS_PER_CHUNK) + (cnt % ALARM_EVENTS_PER_CHUNK != 0); + ae = host->health_log.alarms; + + cnt = 0; + struct alarm_snapshot alarm_snap; + alarm_snap.node_id = wc->node_id; + alarm_snap.claim_id = claim_id; + alarm_snap.snapshot_id = wc->alerts_snapshot_id; + alarm_snap.chunks = chunks; + alarm_snap.chunk = chunk; + + alarm_snapshot_proto_ptr_t snapshot_proto = NULL; + + for (; ae; ae = ae->next) { + if (likely(ae->updated_by_id)) + continue; + + if (unlikely(ae->new_status == RRDCALC_STATUS_UNINITIALIZED)) + continue; + + if (have_recent_alarm(host, ae->alarm_id, ae->unique_id)) + continue; + + cnt++; + + struct alarm_log_entry alarm_log; + alarm_log.node_id = wc->node_id; + alarm_log.claim_id = claim_id; + + if (!snapshot_proto) + snapshot_proto = generate_alarm_snapshot_proto(&alarm_snap); + + health_alarm_entry2proto_nolock(&alarm_log, ae, host); + add_alarm_log_entry2snapshot(snapshot_proto, &alarm_log); + + if (cnt == ALARM_EVENTS_PER_CHUNK) { + aclk_send_alarm_snapshot(snapshot_proto); + + cnt = 0; + + if (chunk < chunks) { + chunk++; + + struct alarm_snapshot alarm_snap; + alarm_snap.node_id = wc->node_id; + alarm_snap.claim_id = claim_id; + alarm_snap.snapshot_id = wc->alerts_snapshot_id; + alarm_snap.chunks = chunks; + alarm_snap.chunk = chunk; + + snapshot_proto = generate_alarm_snapshot_proto(&alarm_snap); + } + } + destroy_alarm_log_entry(&alarm_log); + } + if (cnt) + aclk_send_alarm_snapshot(snapshot_proto); + } + + netdata_rwlock_unlock(&host->health_log.alarm_log_rwlock); + wc->alerts_snapshot_id = 0; + + freez(claim_id); +#endif + return; +} + +void sql_aclk_alert_clean_dead_entries(RRDHOST *host) +{ +#ifdef ENABLE_NEW_CLOUD_PROTOCOL + if (!claimed()) + return; + + if (unlikely(!host->dbsync_worker)) + return; + + char uuid_str[GUID_LEN + 1]; + uuid_unparse_lower_fix(&host->host_uuid, uuid_str); + + BUFFER *sql = buffer_create(1024); + + buffer_sprintf(sql,"delete from aclk_alert_%s where alert_unique_id not in " + " (select unique_id from health_log_%s); ", uuid_str, uuid_str); + + char *err_msg = NULL; + int rc = sqlite3_exec(db_meta, buffer_tostring(sql), NULL, NULL, &err_msg); + if (rc != SQLITE_OK) { + error_report("Failed when trying to clean stale ACLK alert entries from aclk_alert_%s, error message \"%s""", + uuid_str, err_msg); + sqlite3_free(err_msg); + } + buffer_free(sql); +#else + UNUSED(host); +#endif +} diff --git a/database/sqlite/sqlite_aclk_alert.h b/database/sqlite/sqlite_aclk_alert.h new file mode 100644 index 000000000..1aaaa5d23 --- /dev/null +++ b/database/sqlite/sqlite_aclk_alert.h @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_SQLITE_ACLK_ALERT_H +#define NETDATA_SQLITE_ACLK_ALERT_H + +extern sqlite3 *db_meta; + +int aclk_add_alert_event(struct aclk_database_worker_config *wc, struct aclk_database_cmd cmd); +void aclk_push_alert_event(struct aclk_database_worker_config *wc, struct aclk_database_cmd cmd); +void aclk_send_alarm_health_log(char *node_id); +void aclk_push_alarm_health_log(struct aclk_database_worker_config *wc, struct aclk_database_cmd cmd); +void aclk_send_alarm_configuration (char *config_hash); +int aclk_push_alert_config_event(struct aclk_database_worker_config *wc, struct aclk_database_cmd cmd); +void aclk_start_alert_streaming(char *node_id, uint64_t batch_id, uint64_t start_seq_id); +void sql_queue_removed_alerts_to_aclk(RRDHOST *host); +void sql_process_queue_removed_alerts_to_aclk(struct aclk_database_worker_config *wc, struct aclk_database_cmd cmd); +void aclk_push_alert_snapshot_event(struct aclk_database_worker_config *wc, struct aclk_database_cmd cmd); +void aclk_process_send_alarm_snapshot(char *node_id, char *claim_id, uint64_t snapshot_id, uint64_t sequence_id); + +#endif //NETDATA_SQLITE_ACLK_ALERT_H diff --git a/database/sqlite/sqlite_aclk_chart.c b/database/sqlite/sqlite_aclk_chart.c new file mode 100644 index 000000000..4b887abaa --- /dev/null +++ b/database/sqlite/sqlite_aclk_chart.c @@ -0,0 +1,993 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "sqlite_functions.h" +#include "sqlite_aclk_chart.h" + +#ifdef ENABLE_NEW_CLOUD_PROTOCOL +#include "../../aclk/aclk_charts_api.h" +#include "../../aclk/aclk.h" + +static inline int sql_queue_chart_payload(struct aclk_database_worker_config *wc, + void *data, enum aclk_database_opcode opcode) +{ + int rc; + if (unlikely(!wc)) + return 1; + + struct aclk_database_cmd cmd; + memset(&cmd, 0, sizeof(cmd)); + cmd.opcode = opcode; + cmd.data = data; + rc = aclk_database_enq_cmd_noblock(wc, &cmd); + return rc; +} + +static int payload_sent(char *uuid_str, uuid_t *uuid, void *payload, size_t payload_size) +{ + static __thread sqlite3_stmt *res = NULL; + int rc; + int send_status = 0; + + if (unlikely(!res)) { + BUFFER *sql = buffer_create(1024); + buffer_sprintf(sql,"SELECT 1 FROM aclk_chart_latest_%s acl, aclk_chart_payload_%s acp " + "WHERE acl.unique_id = acp.unique_id AND acl.uuid = @uuid AND acp.payload = @payload;", + uuid_str, uuid_str); + rc = prepare_statement(db_meta, (char *) buffer_tostring(sql), &res); + buffer_free(sql); + if (rc != SQLITE_OK) { + error_report("Failed to prepare statement to check payload data"); + return 0; + } + } + + rc = sqlite3_bind_blob(res, 1, uuid , sizeof(*uuid), SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) + goto bind_fail; + + rc = sqlite3_bind_blob(res, 2, payload , payload_size, SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) + goto bind_fail; + + while (sqlite3_step(res) == SQLITE_ROW) { + send_status = sqlite3_column_int(res, 0); + } + +bind_fail: + if (unlikely(sqlite3_reset(res) != SQLITE_OK)) + error_report("Failed to reset statement in check payload, rc = %d", rc); + return send_status; +} + +static int aclk_add_chart_payload(struct aclk_database_worker_config *wc, uuid_t *uuid, char *claim_id, + ACLK_PAYLOAD_TYPE payload_type, void *payload, size_t payload_size) +{ + static __thread sqlite3_stmt *res_chart = NULL; + int rc; + + rc = payload_sent(wc->uuid_str, uuid, payload, payload_size); + if (rc == 1) + return 0; + + if (unlikely(!res_chart)) { + BUFFER *sql = buffer_create(1024); + + buffer_sprintf(sql,"INSERT INTO aclk_chart_payload_%s (unique_id, uuid, claim_id, date_created, type, payload) " \ + "VALUES (@unique_id, @uuid, @claim_id, strftime('%%s','now'), @type, @payload);", wc->uuid_str); + + rc = prepare_statement(db_meta, (char *) buffer_tostring(sql), &res_chart); + buffer_free(sql); + + if (rc != SQLITE_OK) { + error_report("Failed to prepare statement to store chart payload data"); + return 1; + } + } + + uuid_t unique_uuid; + uuid_generate(unique_uuid); + + uuid_t claim_uuid; + if (uuid_parse(claim_id, claim_uuid)) + return 1; + + rc = sqlite3_bind_blob(res_chart, 1, &unique_uuid , sizeof(unique_uuid), SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) + goto bind_fail; + + rc = sqlite3_bind_blob(res_chart, 2, uuid , sizeof(*uuid), SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) + goto bind_fail; + + rc = sqlite3_bind_blob(res_chart, 3, &claim_uuid , sizeof(claim_uuid), SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) + goto bind_fail; + + rc = sqlite3_bind_int(res_chart, 4, payload_type); + if (unlikely(rc != SQLITE_OK)) + goto bind_fail; + + rc = sqlite3_bind_blob(res_chart, 5, payload, payload_size, SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) + goto bind_fail; + + rc = execute_insert(res_chart); + if (unlikely(rc != SQLITE_DONE)) + error_report("Failed store chart payload event, rc = %d", rc); + else { + wc->chart_payload_count++; + time_t now = now_realtime_sec(); + if (wc->rotation_after > now && wc->rotation_after < now + ACLK_DATABASE_ROTATION_DELAY) + wc->rotation_after = now + ACLK_DATABASE_ROTATION_DELAY; + } + +bind_fail: + if (unlikely(sqlite3_reset(res_chart) != SQLITE_OK)) + error_report("Failed to reset statement in store chart payload, rc = %d", rc); + return (rc != SQLITE_DONE); +} + + +int aclk_add_chart_event(struct aclk_database_worker_config *wc, struct aclk_database_cmd cmd) +{ + int rc = 0; + CHECK_SQLITE_CONNECTION(db_meta); + + char *claim_id = is_agent_claimed(); + + RRDSET *st = cmd.data; + + if (likely(claim_id)) { + struct chart_instance_updated chart_payload; + memset(&chart_payload, 0, sizeof(chart_payload)); + chart_payload.config_hash = get_str_from_uuid(&st->state->hash_id); + chart_payload.update_every = st->update_every; + chart_payload.memory_mode = st->rrd_memory_mode; + chart_payload.name = (char *)st->name; + chart_payload.node_id = wc->node_id; + chart_payload.claim_id = claim_id; + chart_payload.id = strdupz(st->id); + + struct label_index *labels = &st->state->labels; + netdata_rwlock_wrlock(&labels->labels_rwlock); + struct label *label_list = labels->head; + struct label *chart_label = NULL; + while (label_list) { + chart_label = add_label_to_list(chart_label, label_list->key, label_list->value, label_list->label_source); + label_list = label_list->next; + } + netdata_rwlock_unlock(&labels->labels_rwlock); + chart_payload.label_head = chart_label; + + size_t size; + char *payload = generate_chart_instance_updated(&size, &chart_payload); + if (likely(payload)) + rc = aclk_add_chart_payload(wc, st->chart_uuid, claim_id, ACLK_PAYLOAD_CHART, (void *) payload, size); + freez(payload); + chart_instance_updated_destroy(&chart_payload); + } + return rc; +} + +static inline int aclk_upd_dimension_event(struct aclk_database_worker_config *wc, char *claim_id, uuid_t *dim_uuid, + const char *dim_id, const char *dim_name, const char *chart_type_id, time_t first_time, time_t last_time) +{ + int rc = 0; + size_t size; + + if (unlikely(!dim_uuid || !dim_id || !dim_name || !chart_type_id)) + return 0; + + struct chart_dimension_updated dim_payload; + memset(&dim_payload, 0, sizeof(dim_payload)); + +#ifdef NETDATA_INTERNAL_CHECKS + if (!first_time) + info("Host %s (node %s) deleting dimension id=[%s] name=[%s] chart=[%s]", + wc->host_guid, wc->node_id, dim_id, dim_name, chart_type_id); +#endif + + dim_payload.node_id = wc->node_id; + dim_payload.claim_id = claim_id; + dim_payload.name = dim_name; + dim_payload.id = dim_id; + dim_payload.chart_id = chart_type_id; + dim_payload.created_at.tv_sec = first_time; + dim_payload.last_timestamp.tv_sec = last_time; + char *payload = generate_chart_dimension_updated(&size, &dim_payload); + if (likely(payload)) + rc = aclk_add_chart_payload(wc, dim_uuid, claim_id, ACLK_PAYLOAD_DIMENSION, (void *)payload, size); + freez(payload); + return rc; +} + +void aclk_process_dimension_deletion(struct aclk_database_worker_config *wc, struct aclk_database_cmd cmd) +{ + int rc = 0; + sqlite3_stmt *res = NULL; + + if (!aclk_use_new_cloud_arch || !aclk_connected) + return; + + if (unlikely(!db_meta)) + return; + + uuid_t host_id; + if (uuid_parse(wc->host_guid, host_id)) + return; + + char *claim_id = is_agent_claimed(); + if (!claim_id) + return; + + rc = sqlite3_prepare_v2(db_meta, "DELETE FROM dimension_delete where host_id = @host_id " \ + "RETURNING dimension_id, dimension_name, chart_type_id, dim_id LIMIT 10;", -1, &res, 0); + + if (rc != SQLITE_OK) { + error_report("Failed to prepare statement when trying to delete dimension deletes"); + freez(claim_id); + return; + } + + rc = sqlite3_bind_blob(res, 1, &host_id , sizeof(host_id), SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) + goto bind_fail; + + unsigned count = 0; + while (sqlite3_step(res) == SQLITE_ROW) { + (void) aclk_upd_dimension_event( + wc, + claim_id, + (uuid_t *)sqlite3_column_text(res, 3), + (const char *)sqlite3_column_text(res, 0), + (const char *)sqlite3_column_text(res, 1), + (const char *)sqlite3_column_text(res, 2), + 0, + 0); + count++; + } + + if (count) { + memset(&cmd, 0, sizeof(cmd)); + cmd.opcode = ACLK_DATABASE_DIM_DELETION; + if (aclk_database_enq_cmd_noblock(wc, &cmd)) + info("Failed to queue a dimension deletion message"); + } + +bind_fail: + rc = sqlite3_finalize(res); + if (unlikely(rc != SQLITE_OK)) + error_report("Failed to finalize statement when adding dimension deletion events, rc = %d", rc); + freez(claim_id); + return; +} + +int aclk_add_dimension_event(struct aclk_database_worker_config *wc, struct aclk_database_cmd cmd) +{ + int rc = 0; + CHECK_SQLITE_CONNECTION(db_meta); + + char *claim_id = is_agent_claimed(); + + RRDDIM *rd = cmd.data; + + if (likely(claim_id)) { + time_t now = now_realtime_sec(); + + time_t first_t = rd->state->query_ops.oldest_time(rd); + time_t last_t = rd->state->query_ops.latest_time(rd); + + int live = ((now - last_t) < (RRDSET_MINIMUM_LIVE_COUNT * rd->update_every)); + + rc = aclk_upd_dimension_event( + wc, + claim_id, + &rd->state->metric_uuid, + rd->id, + rd->name, + rd->rrdset->id, + first_t, + live ? 0 : last_t); + + freez(claim_id); + } + rrddim_flag_clear(rd, RRDDIM_FLAG_ACLK); + return rc; +} + + +void aclk_send_chart_event(struct aclk_database_worker_config *wc, struct aclk_database_cmd cmd) +{ + int rc; + + wc->chart_pending = 0; + if (unlikely(!wc->chart_updates)) { + log_access("AC [%s (%s)]: Ignoring chart push event, updates have been turned off for this node.", wc->node_id, wc->host ? wc->host->hostname : "N/A"); + return; + } + + char *claim_id = is_agent_claimed(); + if (unlikely(!claim_id)) + return; + + uuid_t claim_uuid; + if (uuid_parse(claim_id, claim_uuid)) + return; + + int limit = cmd.count > 0 ? cmd.count : 1; + + uint64_t first_sequence; + uint64_t last_sequence; + time_t last_timestamp; + + BUFFER *sql = buffer_create(1024); + + sqlite3_stmt *res = NULL; + + buffer_sprintf(sql, "SELECT ac.sequence_id, acp.payload, ac.date_created, ac.type, ac.uuid " \ + "FROM aclk_chart_%s ac, aclk_chart_payload_%s acp " \ + "WHERE ac.date_submitted IS NULL AND ac.unique_id = acp.unique_id AND ac.update_count > 0 " \ + "AND acp.claim_id = @claim_id ORDER BY ac.sequence_id ASC LIMIT %d;", wc->uuid_str, wc->uuid_str, limit); + + rc = sqlite3_prepare_v2(db_meta, buffer_tostring(sql), -1, &res, 0); + if (rc != SQLITE_OK) { + error_report("Failed to prepare statement when trying to send a chart update via ACLK"); + buffer_free(sql); + freez(claim_id); + return; + } + + rc = sqlite3_bind_blob(res, 1, claim_uuid , sizeof(claim_uuid), SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) + goto bind_fail; + + char **payload_list = callocz(limit+1, sizeof(char *)); + size_t *payload_list_size = callocz(limit+1, sizeof(size_t)); + size_t *payload_list_max_size = callocz(limit+1, sizeof(size_t)); + struct aclk_message_position *position_list = callocz(limit+1, sizeof(*position_list)); + int *is_dim = callocz(limit+1, sizeof(*is_dim)); + + int loop = cmd.param1; + + uint64_t start_sequence_id = wc->chart_sequence_id; + + while (loop > 0) { + uint64_t previous_sequence_id = wc->chart_sequence_id; + int count = 0; + first_sequence = 0; + last_sequence = 0; + while (count < limit && sqlite3_step(res) == SQLITE_ROW) { + size_t payload_size = sqlite3_column_bytes(res, 1); + if (payload_list_max_size[count] < payload_size) { + freez(payload_list[count]); + payload_list_max_size[count] = payload_size; + payload_list[count] = mallocz(payload_size); + } + payload_list_size[count] = payload_size; + memcpy(payload_list[count], sqlite3_column_blob(res, 1), payload_size); + position_list[count].sequence_id = (uint64_t)sqlite3_column_int64(res, 0); + position_list[count].previous_sequence_id = previous_sequence_id; + position_list[count].seq_id_creation_time.tv_sec = sqlite3_column_int64(res, 2); + position_list[count].seq_id_creation_time.tv_usec = 0; + if (!first_sequence) + first_sequence = position_list[count].sequence_id; + last_sequence = position_list[count].sequence_id; + last_timestamp = position_list[count].seq_id_creation_time.tv_sec; + previous_sequence_id = last_sequence; + is_dim[count] = sqlite3_column_int(res, 3) > 0; + count++; + if (wc->chart_payload_count) + wc->chart_payload_count--; + } + freez(payload_list[count]); + payload_list_max_size[count] = 0; + payload_list[count] = NULL; + + rc = sqlite3_reset(res); + if (unlikely(rc != SQLITE_OK)) + error_report("Failed to reset statement when pushing chart events, rc = %d", rc); + + if (likely(first_sequence)) { + buffer_flush(sql); + + db_lock(); + buffer_sprintf(sql, "UPDATE aclk_chart_%s SET status = NULL, date_submitted=strftime('%%s','now') " + "WHERE date_submitted IS NULL AND sequence_id BETWEEN %" PRIu64 " AND %" PRIu64 ";", + wc->uuid_str, first_sequence, last_sequence); + db_execute(buffer_tostring(sql)); + + buffer_flush(sql); + buffer_sprintf(sql, "INSERT OR REPLACE INTO aclk_chart_latest_%s (uuid, unique_id, date_submitted) " + " SELECT uuid, unique_id, date_submitted FROM aclk_chart_%s s " + " WHERE date_submitted IS NOT NULL AND sequence_id BETWEEN %" PRIu64 " AND %" PRIu64 + " ;", + wc->uuid_str, wc->uuid_str, first_sequence, last_sequence); + db_execute(buffer_tostring(sql)); + db_unlock(); + + aclk_chart_inst_and_dim_update(payload_list, payload_list_size, is_dim, position_list, wc->batch_id); + log_access("OG [%s (%s)]: Sending charts and dimensions update, batch_id %"PRIu64", first sequence %"PRIu64", last sequence %"PRIu64, wc->node_id, wc->host ? wc->host->hostname : "N/A", wc->batch_id, first_sequence, last_sequence); + wc->chart_sequence_id = last_sequence; + wc->chart_timestamp = last_timestamp; + } + else + break; + --loop; + } + + if (start_sequence_id != wc->chart_sequence_id) { + time_t now = now_realtime_sec(); + if (wc->rotation_after > now && wc->rotation_after < now + ACLK_DATABASE_ROTATION_DELAY) + wc->rotation_after = now + ACLK_DATABASE_ROTATION_DELAY; + } + else { + wc->chart_payload_count = sql_get_pending_count(wc); + if (!wc->chart_payload_count) + log_access("AC [%s (%s)]: Sync of charts and dimensions done in %ld seconds.", wc->node_id, wc->host ? wc->host->hostname : "N/A", now_realtime_sec() - wc->startup_time); + } + + for (int i = 0; i <= limit; ++i) + freez(payload_list[i]); + + freez(payload_list); + freez(payload_list_size); + freez(payload_list_max_size); + freez(position_list); + freez(is_dim); + +bind_fail: + rc = sqlite3_finalize(res); + if (unlikely(rc != SQLITE_OK)) + error_report("Failed to finalize statement when pushing chart events, rc = %d", rc); + + buffer_free(sql); + freez(claim_id); + return; +} + +// Push one chart config to the cloud +int aclk_send_chart_config(struct aclk_database_worker_config *wc, struct aclk_database_cmd cmd) +{ + UNUSED(wc); + + CHECK_SQLITE_CONNECTION(db_meta); + + sqlite3_stmt *res = NULL; + int rc = 0; + + char *hash_id = (char *) cmd.data_param; + + uuid_t hash_uuid; + rc = uuid_parse(hash_id, hash_uuid); + + if (unlikely(rc)) { + freez((char *) cmd.data_param); + return 1; + } + + BUFFER *sql = buffer_create(1024); + buffer_sprintf(sql, "SELECT type, family, context, title, priority, plugin, module, unit, chart_type " \ + "FROM chart_hash WHERE hash_id = @hash_id;"); + + rc = sqlite3_prepare_v2(db_meta, buffer_tostring(sql), -1, &res, 0); + if (rc != SQLITE_OK) { + error_report("Failed to prepare statement when trying to fetch a chart hash configuration"); + goto fail; + } + + rc = sqlite3_bind_blob(res, 1, &hash_uuid , sizeof(hash_uuid), SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) + goto bind_fail; + + struct chart_config_updated chart_config; + chart_config.config_hash = NULL; + + while (sqlite3_step(res) == SQLITE_ROW) { + chart_config.type = strdupz((char *)sqlite3_column_text(res, 0)); + chart_config.family = strdupz((char *)sqlite3_column_text(res, 1)); + chart_config.context = strdupz((char *)sqlite3_column_text(res, 2)); + chart_config.title = strdupz((char *)sqlite3_column_text(res, 3)); + chart_config.priority = sqlite3_column_int64(res, 4); + chart_config.plugin = strdupz((char *)sqlite3_column_text(res, 5)); + chart_config.module = sqlite3_column_bytes(res, 6) > 0 ? strdupz((char *)sqlite3_column_text(res, 6)) : NULL; + chart_config.chart_type = (RRDSET_TYPE) sqlite3_column_int(res,8); + chart_config.units = strdupz((char *)sqlite3_column_text(res, 7)); + chart_config.config_hash = strdupz(hash_id); + } + + if (likely(chart_config.config_hash)) { + log_access("OG [%s (%s)]: Sending chart config for %s.", wc->node_id, wc->host ? wc->host->hostname : "N/A", hash_id); + aclk_chart_config_updated(&chart_config, 1); + destroy_chart_config_updated(&chart_config); + } + else + log_access("AC [%s (%s)]: Chart config for %s not found.", wc->node_id, wc->host ? wc->host->hostname : "N/A", hash_id); + + bind_fail: + rc = sqlite3_finalize(res); + if (unlikely(rc != SQLITE_OK)) + error_report("Failed to reset statement when pushing chart config hash, rc = %d", rc); + fail: + freez((char *) cmd.data_param); + buffer_free(sql); + return rc; +} + + +void aclk_receive_chart_ack(struct aclk_database_worker_config *wc, struct aclk_database_cmd cmd) +{ + int rc; + sqlite3_stmt *res = NULL; + + log_access("IN [%s (%s)]: Received ack chart sequence id %"PRIu64, wc->node_id, wc->host ? wc->host->hostname : "N/A", cmd.param1); + + BUFFER *sql = buffer_create(1024); + + buffer_sprintf(sql, "UPDATE aclk_chart_%s SET date_updated=strftime('%%s','now') WHERE sequence_id <= @sequence_id " + "AND date_submitted IS NOT NULL AND date_updated IS NULL;", wc->uuid_str); + + rc = sqlite3_prepare_v2(db_meta, buffer_tostring(sql), -1, &res, 0); + if (rc != SQLITE_OK) { + error_report("Failed to prepare statement count sequence ids in the database"); + goto prepare_fail; + } + + rc = sqlite3_bind_int64(res, 1, (uint64_t) cmd.param1); + if (unlikely(rc != SQLITE_OK)) + goto bind_fail; + + rc = execute_insert(res); + if (rc != SQLITE_DONE) + error_report("Failed to ACK sequence id, rc = %d", rc); + + bind_fail: + if (unlikely(sqlite3_finalize(res) != SQLITE_OK)) + error_report("Failed to finalize statement to ACK older sequence ids, rc = %d", rc); + + prepare_fail: + buffer_free(sql); + return; +} + +void aclk_receive_chart_reset(struct aclk_database_worker_config *wc, struct aclk_database_cmd cmd) +{ + BUFFER *sql = buffer_create(1024); + buffer_sprintf(sql, "UPDATE aclk_chart_%s SET status = NULL, date_submitted = NULL WHERE sequence_id >= %"PRIu64";", + wc->uuid_str, cmd.param1); + db_execute(buffer_tostring(sql)); + if (cmd.param1 == 1) { + db_lock(); + buffer_flush(sql); + log_access("IN [%s (%s)]: Received chart full resync.", wc->node_id, wc->host ? wc->host->hostname : "N/A"); + buffer_sprintf(sql, "DELETE FROM aclk_chart_payload_%s; DELETE FROM aclk_chart_%s; " \ + "DELETE FROM aclk_chart_latest_%s;", wc->uuid_str, wc->uuid_str, wc->uuid_str); + + db_execute("BEGIN TRANSACTION;"); + db_execute(buffer_tostring(sql)); + db_execute("COMMIT TRANSACTION;"); + + db_unlock(); + wc->chart_sequence_id = 0; + wc->chart_timestamp = 0; + wc->chart_payload_count = 0; + + RRDHOST *host = wc->host; + if (likely(host)) { + rrdhost_rdlock(host); + RRDSET *st; + rrdset_foreach_read(st, host) + { + rrdset_rdlock(st); + rrdset_flag_clear(st, RRDSET_FLAG_ACLK); + RRDDIM *rd; + rrddim_foreach_read(rd, st) + { + rd->state->aclk_live_status = (rd->state->aclk_live_status == 0); + } + rrdset_unlock(st); + } + rrdhost_unlock(host); + } + else + error_report("ACLK synchronization thread for %s is not linked to HOST", wc->host_guid); + } + else { + log_access("AC [%s (%s)]: Restarting chart sync from sequence %"PRIu64, wc->node_id, wc->host ? wc->host->hostname : "N/A", cmd.param1); + wc->chart_payload_count = sql_get_pending_count(wc); + sql_get_last_chart_sequence(wc); + } + buffer_free(sql); + wc->chart_updates = 1; + return; +} + + +// +// Functions called directly from ACLK threads and will queue commands +// +void aclk_get_chart_config(char **hash_id) +{ + struct aclk_database_worker_config *wc = (struct aclk_database_worker_config *)localhost->dbsync_worker; + + if (unlikely(!wc || !hash_id)) + return; + + struct aclk_database_cmd cmd; + memset(&cmd, 0, sizeof(cmd)); + cmd.opcode = ACLK_DATABASE_PUSH_CHART_CONFIG; + for (int i = 0; hash_id[i]; ++i) { + // TODO: Verify that we have a valid hash_id + log_access("IN [%s (%s)]: Request %d for chart config with hash %s received.", wc->node_id, wc->host ? wc->host->hostname : "N/A", i, hash_id[i]); + cmd.data_param = (void *)strdupz(hash_id[i]); + aclk_database_enq_cmd(wc, &cmd); + } + return; +} + +// Send a command to a node_id +// Need to discover the thread that will handle the request +// if thread not in active hosts, then try to find in the queue +static void aclk_submit_param_command(char *node_id, enum aclk_database_opcode aclk_command, uint64_t param) +{ + if (unlikely(!node_id)) + return; + + struct aclk_database_worker_config *wc = NULL; + struct aclk_database_cmd cmd; + memset(&cmd, 0, sizeof(cmd)); + cmd.opcode = aclk_command; + cmd.param1 = param; + + rrd_wrlock(); + RRDHOST *host = find_host_by_node_id(node_id); + if (likely(host)) + wc = (struct aclk_database_worker_config *)host->dbsync_worker; + rrd_unlock(); + if (wc) + aclk_database_enq_cmd(wc, &cmd); + else { + if (aclk_worker_enq_cmd(node_id, &cmd)) + log_access("AC [%s (N/A)]: ACLK synchronization thread is not active.", node_id); + } + return; +} + +void aclk_ack_chart_sequence_id(char *node_id, uint64_t last_sequence_id) +{ + if (unlikely(!node_id)) + return; + + log_access("AC [%s (N/A)]: Node reports last sequence id received %"PRIu64, node_id, last_sequence_id); + aclk_submit_param_command(node_id, ACLK_DATABASE_CHART_ACK, last_sequence_id); + return; +} + +// Start streaming charts / dimensions for node_id +void aclk_start_streaming(char *node_id, uint64_t sequence_id, time_t created_at, uint64_t batch_id) +{ + UNUSED(created_at); + if (unlikely(!node_id)) + return; + + log_access("IN [%s (N/A)]: Start streaming charts from sequence %"PRIu64" t=%ld, batch=%"PRIu64, node_id, + sequence_id, created_at, batch_id); + + uuid_t node_uuid; + if (uuid_parse(node_id, node_uuid)) + return; + + struct aclk_database_worker_config *wc = NULL; + rrd_wrlock(); + RRDHOST *host = localhost; + while(host) { + if (host->node_id && !(uuid_compare(*host->node_id, node_uuid))) { + rrd_unlock(); + wc = (struct aclk_database_worker_config *)host->dbsync_worker; + if (likely(wc)) { + wc->chart_reset_count++; + __sync_synchronize(); + wc->chart_updates = 0; + wc->batch_id = batch_id; + __sync_synchronize(); + wc->batch_created = now_realtime_sec(); + if (sequence_id > wc->chart_sequence_id || wc->chart_reset_count > 10) { + log_access("AC [%s (%s)]: Requesting full resync from the cloud " + "(reset=%d, remote_seq=%"PRIu64", local_seq=%"PRIu64")" + , wc->node_id, wc->host ? wc->host->hostname : "N/A", wc->chart_reset_count, sequence_id, wc->chart_sequence_id); + chart_reset_t chart_reset; + chart_reset.claim_id = is_agent_claimed(); + if (chart_reset.claim_id) { + chart_reset.node_id = node_id; + chart_reset.reason = SEQ_ID_NOT_EXISTS; + aclk_chart_reset(chart_reset); + freez(chart_reset.claim_id); + wc->chart_reset_count = -1; + } + return; + } else { + struct aclk_database_cmd cmd; + memset(&cmd, 0, sizeof(cmd)); + // TODO: handle timestamp + if (sequence_id < wc->chart_sequence_id || !sequence_id) { // || created_at != wc->chart_timestamp) { + log_access("AC [%s (%s)]: Reset streaming charts from sequence %"PRIu64 \ + " t=%ld (reset count=%d)", wc->node_id, wc->host ? wc->host->hostname : "N/A", wc->chart_sequence_id, + wc->chart_timestamp, wc->chart_reset_count); + cmd.opcode = ACLK_DATABASE_RESET_CHART; + cmd.param1 = sequence_id + 1; + cmd.completion = NULL; + aclk_database_enq_cmd(wc, &cmd); + } + else { + log_access("AC [%s (%s)]: Start streaming charts enabled -- last streamed sequence %"PRIu64 \ + " t=%ld (reset count=%d)", wc->node_id, wc->host ? wc->host->hostname : "N/A", wc->chart_sequence_id, + wc->chart_timestamp, wc->chart_reset_count); + wc->chart_reset_count = 0; + wc->chart_updates = 1; + } + } + } + else + log_access("AC [%s (N/A)]: ACLK synchronization thread is not active.", node_id); + return; + } + host = host->next; + } + rrd_unlock(); + return; +} + +#define SQL_SELECT_HOST_MEMORY_MODE "SELECT memory_mode FROM chart WHERE host_id = @host_id LIMIT 1;" + +static RRD_MEMORY_MODE sql_get_host_memory_mode(uuid_t *host_id) +{ + int rc; + + RRD_MEMORY_MODE memory_mode = RRD_MEMORY_MODE_RAM; + sqlite3_stmt *res = NULL; + + rc = sqlite3_prepare_v2(db_meta, SQL_SELECT_HOST_MEMORY_MODE, -1, &res, 0); + + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to prepare statement to read host memory mode"); + return memory_mode; + } + + rc = sqlite3_bind_blob(res, 1, host_id, sizeof(*host_id), SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to bind host parameter to fetch host memory mode"); + goto failed; + } + + while (sqlite3_step(res) == SQLITE_ROW) { + memory_mode = (RRD_MEMORY_MODE) sqlite3_column_int(res, 0); + } + +failed: + rc = sqlite3_finalize(res); + if (unlikely(rc != SQLITE_OK)) + error_report("Failed to finalize the prepared statement when reading host memory mode"); + return memory_mode; +} + +#define SELECT_HOST_DIMENSION_LIST "SELECT d.dim_id, c.update_every, c.type||'.'||c.id, d.id, d.name FROM chart c, dimension d " \ + "WHERE d.chart_id = c.chart_id AND c.host_id = @host_id ORDER BY c.update_every ASC;" + +#define SELECT_HOST_CHART_LIST "SELECT distinct h.host_id, c.update_every, c.type||'.'||c.id FROM chart c, host h " \ + "WHERE c.host_id = h.host_id AND c.host_id = @host_id ORDER BY c.update_every ASC;" + +void aclk_update_retention(struct aclk_database_worker_config *wc, struct aclk_database_cmd cmd) +{ + UNUSED(cmd); + int rc; + + if (!aclk_use_new_cloud_arch || !aclk_connected) + return; + + char *claim_id = is_agent_claimed(); + if (unlikely(!claim_id)) + return; + + sqlite3_stmt *res = NULL; + RRD_MEMORY_MODE memory_mode; + + uuid_t host_uuid; + rc = uuid_parse(wc->host_guid, host_uuid); + if (unlikely(rc)) { + freez(claim_id); + return; + } + + if (wc->host) + memory_mode = wc->host->rrd_memory_mode; + else + memory_mode = sql_get_host_memory_mode(&host_uuid); + + if (memory_mode == RRD_MEMORY_MODE_DBENGINE) + rc = sqlite3_prepare_v2(db_meta, SELECT_HOST_DIMENSION_LIST, -1, &res, 0); + else + rc = sqlite3_prepare_v2(db_meta, SELECT_HOST_CHART_LIST, -1, &res, 0); + + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to prepare statement to fetch host dimensions"); + freez(claim_id); + return; + } + + rc = sqlite3_bind_blob(res, 1, &host_uuid, sizeof(host_uuid), SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to bind host parameter to fetch host dimensions"); + goto failed; + } + + time_t start_time = LONG_MAX; + time_t first_entry_t; + time_t last_entry_t; + uint32_t update_every = 0; + + struct retention_updated rotate_data; + + memset(&rotate_data, 0, sizeof(rotate_data)); + + int max_intervals = 32; + + rotate_data.interval_duration_count = 0; + rotate_data.interval_durations = callocz(max_intervals, sizeof(*rotate_data.interval_durations)); + + now_realtime_timeval(&rotate_data.rotation_timestamp); + rotate_data.memory_mode = memory_mode; + rotate_data.claim_id = claim_id; + rotate_data.node_id = strdupz(wc->node_id); + + // time_t now = now_realtime_sec(); + while (sqlite3_step(res) == SQLITE_ROW) { + if (!update_every || update_every != (uint32_t) sqlite3_column_int(res, 1)) { + if (update_every) { + debug(D_ACLK_SYNC,"Update %s for %u oldest time = %ld", wc->host_guid, update_every, start_time); + if (start_time == LONG_MAX) + rotate_data.interval_durations[rotate_data.interval_duration_count].retention = 0; + else + rotate_data.interval_durations[rotate_data.interval_duration_count].retention = + rotate_data.rotation_timestamp.tv_sec - start_time; + rotate_data.interval_duration_count++; + } + update_every = (uint32_t) sqlite3_column_int(res, 1); + rotate_data.interval_durations[rotate_data.interval_duration_count].update_every = update_every; + start_time = LONG_MAX; + } +#ifdef ENABLE_DBENGINE + if (memory_mode == RRD_MEMORY_MODE_DBENGINE) + rc = rrdeng_metric_latest_time_by_uuid((uuid_t *)sqlite3_column_blob(res, 0), &first_entry_t, &last_entry_t); + else +#endif + { + if (wc->host) { + RRDSET *st = NULL; + rc = (st = rrdset_find(wc->host, (const char *)sqlite3_column_text(res, 2))) ? 0 : 1; + if (!rc) { + first_entry_t = rrdset_first_entry_t(st); + last_entry_t = rrdset_last_entry_t(st); + } + } + else { + rc = 0; + first_entry_t = rotate_data.rotation_timestamp.tv_sec; + } + } + + if (likely(!rc && first_entry_t)) + start_time = MIN(start_time, first_entry_t); + } + if (update_every) { + debug(D_ACLK_SYNC, "Update %s for %u oldest time = %ld", wc->host_guid, update_every, start_time); + if (start_time == LONG_MAX) + rotate_data.interval_durations[rotate_data.interval_duration_count].retention = 0; + else + rotate_data.interval_durations[rotate_data.interval_duration_count].retention = + rotate_data.rotation_timestamp.tv_sec - start_time; + rotate_data.interval_duration_count++; + } + +#ifdef NETDATA_INTERNAL_CHECKS + for (int i = 0; i < rotate_data.interval_duration_count; ++i) + info("Update for host %s (node %s) for %u Retention = %u", wc->host_guid, wc->node_id, + rotate_data.interval_durations[i].update_every, rotate_data.interval_durations[i].retention); +#endif + aclk_retention_updated(&rotate_data); + freez(rotate_data.node_id); + freez(rotate_data.interval_durations); + +failed: + freez(claim_id); + rc = sqlite3_finalize(res); + if (unlikely(rc != SQLITE_OK)) + error_report("Failed to finalize the prepared statement when reading host dimensions"); + return; +} + + +uint32_t sql_get_pending_count(struct aclk_database_worker_config *wc) +{ + BUFFER *sql = buffer_create(1024); + sqlite3_stmt *res = NULL; + + buffer_sprintf(sql,"SELECT count(1) FROM aclk_chart_%s ac WHERE ac.date_submitted IS NULL;", wc->uuid_str); + + int rc; + uint32_t chart_payload_count = 0; + rc = sqlite3_prepare_v2(db_meta, buffer_tostring(sql), -1, &res, 0); + if (rc != SQLITE_OK) { + error_report("Failed to prepare statement to count pending messages"); + goto fail; + } + while (sqlite3_step(res) == SQLITE_ROW) + chart_payload_count = (uint32_t) sqlite3_column_int(res, 0); + + rc = sqlite3_finalize(res); + if (unlikely(rc != SQLITE_OK)) + error_report("Failed to reset statement when fetching pending messages, rc = %d", rc); + +fail: + buffer_free(sql); + return chart_payload_count; +} + +void sql_get_last_chart_sequence(struct aclk_database_worker_config *wc) +{ + BUFFER *sql = buffer_create(1024); + + buffer_sprintf(sql,"SELECT ac.sequence_id, ac.date_created FROM aclk_chart_%s ac " \ + "WHERE ac.date_submitted IS NOT NULL ORDER BY ac.sequence_id DESC LIMIT 1;", wc->uuid_str); + + int rc; + sqlite3_stmt *res = NULL; + rc = sqlite3_prepare_v2(db_meta, buffer_tostring(sql), -1, &res, 0); + if (rc != SQLITE_OK) { + error_report("Failed to prepare statement to find last chart sequence id"); + goto fail; + } + + wc->chart_sequence_id = 0; + wc->chart_timestamp = 0; + while (sqlite3_step(res) == SQLITE_ROW) { + wc->chart_sequence_id = (uint64_t) sqlite3_column_int64(res, 0); + wc->chart_timestamp = (time_t) sqlite3_column_int64(res, 1); + } + + debug(D_ACLK_SYNC,"Node %s reports last sequence_id=%"PRIu64, wc->node_id, wc->chart_sequence_id); + + rc = sqlite3_finalize(res); + if (unlikely(rc != SQLITE_OK)) + error_report("Failed to reset statement when fetching chart sequence info, rc = %d", rc); + +fail: + buffer_free(sql); + return; +} + +int queue_dimension_to_aclk(RRDDIM *rd) +{ + int rc = sql_queue_chart_payload((struct aclk_database_worker_config *) rd->rrdset->rrdhost->dbsync_worker, + rd, ACLK_DATABASE_ADD_DIMENSION); + return rc; +} + +#endif //ENABLE_NEW_CLOUD_PROTOCOL + +// ST is read locked +int queue_chart_to_aclk(RRDSET *st) +{ +#ifndef ENABLE_NEW_CLOUD_PROTOCOL +#ifdef ENABLE_ACLK + aclk_update_chart(st->rrdhost, st->id, 1); +#endif + return 0; +#else + if (!aclk_use_new_cloud_arch && aclk_connected) { + aclk_update_chart(st->rrdhost, st->id, 1); + return 0; + } + return sql_queue_chart_payload((struct aclk_database_worker_config *) st->rrdhost->dbsync_worker, + st, ACLK_DATABASE_ADD_CHART); +#endif +} + diff --git a/database/sqlite/sqlite_aclk_chart.h b/database/sqlite/sqlite_aclk_chart.h new file mode 100644 index 000000000..67d81a534 --- /dev/null +++ b/database/sqlite/sqlite_aclk_chart.h @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_SQLITE_ACLK_CHART_H +#define NETDATA_SQLITE_ACLK_CHART_H + + +typedef enum payload_type { + ACLK_PAYLOAD_CHART, + ACLK_PAYLOAD_DIMENSION, + ACLK_PAYLOAD_DIMENSION_ROTATED +} ACLK_PAYLOAD_TYPE; + +extern sqlite3 *db_meta; + +#ifndef RRDSET_MINIMUM_LIVE_COUNT +#define RRDSET_MINIMUM_LIVE_COUNT 3 +#endif + +extern int queue_chart_to_aclk(RRDSET *st); +extern int queue_dimension_to_aclk(RRDDIM *rd); +extern void sql_create_aclk_table(RRDHOST *host, uuid_t *host_uuid, uuid_t *node_id); +extern int sql_queue_alarm_to_aclk(RRDHOST *host, ALARM_ENTRY *ae); +int aclk_add_chart_event(struct aclk_database_worker_config *wc, struct aclk_database_cmd cmd); +int aclk_add_dimension_event(struct aclk_database_worker_config *wc, struct aclk_database_cmd cmd); +int aclk_send_chart_config(struct aclk_database_worker_config *wc, struct aclk_database_cmd cmd); +void aclk_ack_chart_sequence_id(char *node_id, uint64_t last_sequence_id); +void aclk_get_chart_config(char **hash_id_list); +void aclk_send_chart_event(struct aclk_database_worker_config *wc, struct aclk_database_cmd cmd); +void aclk_start_streaming(char *node_id, uint64_t seq_id, time_t created_at, uint64_t batch_id); +void sql_chart_deduplicate(struct aclk_database_worker_config *wc, struct aclk_database_cmd cmd); +void sql_check_rotation_state(struct aclk_database_worker_config *wc, struct aclk_database_cmd cmd); +void sql_get_last_chart_sequence(struct aclk_database_worker_config *wc); +void aclk_receive_chart_reset(struct aclk_database_worker_config *wc, struct aclk_database_cmd cmd); +void aclk_receive_chart_ack(struct aclk_database_worker_config *wc, struct aclk_database_cmd cmd); +void aclk_process_dimension_deletion(struct aclk_database_worker_config *wc, struct aclk_database_cmd cmd); +uint32_t sql_get_pending_count(struct aclk_database_worker_config *wc); +#endif //NETDATA_SQLITE_ACLK_CHART_H diff --git a/database/sqlite/sqlite_aclk_node.c b/database/sqlite/sqlite_aclk_node.c new file mode 100644 index 000000000..ba498c2a7 --- /dev/null +++ b/database/sqlite/sqlite_aclk_node.c @@ -0,0 +1,65 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "sqlite_functions.h" +#include "sqlite_aclk_node.h" + +#ifdef ENABLE_NEW_CLOUD_PROTOCOL +#include "../../aclk/aclk_charts_api.h" +#endif + +void sql_build_node_info(struct aclk_database_worker_config *wc, struct aclk_database_cmd cmd) +{ + UNUSED(cmd); + +#ifdef ENABLE_NEW_CLOUD_PROTOCOL + struct update_node_info node_info; + + if (!wc->host) + return; + + rrd_wrlock(); + node_info.node_id = wc->node_id; + node_info.claim_id = is_agent_claimed(); + node_info.machine_guid = wc->host_guid; + node_info.child = (wc->host != localhost); + now_realtime_timeval(&node_info.updated_at); + + RRDHOST *host = wc->host; + + node_info.data.name = host->hostname; + node_info.data.os = (char *) host->os; + node_info.data.os_name = host->system_info->host_os_name; + node_info.data.os_version = host->system_info->host_os_version; + node_info.data.kernel_name = host->system_info->kernel_name; + node_info.data.kernel_version = host->system_info->kernel_version; + node_info.data.architecture = host->system_info->architecture; + node_info.data.cpus = str2uint32_t(host->system_info->host_cores); + node_info.data.cpu_frequency = host->system_info->host_cpu_freq; + node_info.data.memory = host->system_info->host_ram_total; + node_info.data.disk_space = host->system_info->host_disk_space; + node_info.data.version = VERSION; + node_info.data.release_channel = "nightly"; + node_info.data.timezone = (char *) host->abbrev_timezone; + node_info.data.virtualization_type = host->system_info->virtualization; + node_info.data.container_type = host->system_info->container; + node_info.data.custom_info = config_get(CONFIG_SECTION_WEB, "custom dashboard_info.js", ""); + node_info.data.services = NULL; // char ** + node_info.data.service_count = 0; + node_info.data.machine_guid = wc->host_guid; + + struct label_index *labels = &host->labels; + netdata_rwlock_wrlock(&labels->labels_rwlock); + node_info.data.host_labels_head = labels->head; + + aclk_update_node_info(&node_info); + log_access("OG [%s (%s)]: Sending node info for guid [%s] (%s).", wc->node_id, wc->host->hostname, wc->host_guid, wc->host == localhost ? "parent" : "child"); + + netdata_rwlock_unlock(&labels->labels_rwlock); + rrd_unlock(); + freez(node_info.claim_id); +#else + UNUSED(wc); +#endif + + return; +} diff --git a/database/sqlite/sqlite_aclk_node.h b/database/sqlite/sqlite_aclk_node.h new file mode 100644 index 000000000..9cb411586 --- /dev/null +++ b/database/sqlite/sqlite_aclk_node.h @@ -0,0 +1,8 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_SQLITE_ACLK_NODE_H +#define NETDATA_SQLITE_ACLK_NODE_H + +void sql_build_node_info(struct aclk_database_worker_config *wc, struct aclk_database_cmd cmd); +void aclk_update_retention(struct aclk_database_worker_config *wc, struct aclk_database_cmd cmd); +#endif //NETDATA_SQLITE_ACLK_NODE_H diff --git a/database/sqlite/sqlite_functions.c b/database/sqlite/sqlite_functions.c index 382ed8b02..a0b8ac019 100644 --- a/database/sqlite/sqlite_functions.c +++ b/database/sqlite/sqlite_functions.c @@ -12,6 +12,10 @@ const char *database_config[] = { "chart_type int, memory_mode int, history_entries);", "CREATE TABLE IF NOT EXISTS dimension(dim_id blob PRIMARY KEY, chart_id blob, id text, name text, " "multiplier int, divisor int , algorithm int, options text);", + + "DROP TABLE IF EXISTS chart_active;", + "DROP TABLE IF EXISTS dimension_active;", + "CREATE TABLE IF NOT EXISTS chart_active(chart_id blob PRIMARY KEY, date_created int);", "CREATE TABLE IF NOT EXISTS dimension_active(dim_id blob primary key, date_created int);", "CREATE TABLE IF NOT EXISTS metadata_migration(filename text, file_size, date_created int);", @@ -20,8 +24,35 @@ const char *database_config[] = { "CREATE TABLE IF NOT EXISTS chart_label(chart_id blob, source_type int, label_key text, " "label_value text, date_created int, PRIMARY KEY (chart_id, label_key));", "CREATE TABLE IF NOT EXISTS node_instance (host_id blob PRIMARY KEY, claim_id, node_id, date_created);", - "delete from chart_active;", - "delete from dimension_active;", + "CREATE TABLE IF NOT EXISTS alert_hash(hash_id blob PRIMARY KEY, date_updated int, alarm text, template text, " + "on_key text, class text, component text, type text, os text, hosts text, lookup text, " + "every text, units text, calc text, families text, plugin text, module text, charts text, green text, " + "red text, warn text, crit text, exec text, to_key text, info text, delay text, options text, " + "repeat text, host_labels text, p_db_lookup_dimensions text, p_db_lookup_method text, p_db_lookup_options int, " + "p_db_lookup_after int, p_db_lookup_before int, p_update_every int);", + + "CREATE TABLE IF NOT EXISTS chart_hash_map(chart_id blob , hash_id blob, UNIQUE (chart_id, hash_id));", + + "CREATE TABLE IF NOT EXISTS chart_hash(hash_id blob PRIMARY KEY,type text, id text, name text, " + "family text, context text, title text, unit text, plugin text, " + "module text, priority integer, chart_type, last_used);", + + "CREATE VIEW IF NOT EXISTS v_chart_hash as SELECT ch.*, chm.chart_id FROM chart_hash ch, chart_hash_map chm " + "WHERE ch.hash_id = chm.hash_id;", + + "CREATE TRIGGER IF NOT EXISTS tr_v_chart_hash INSTEAD OF INSERT on v_chart_hash BEGIN " + "INSERT INTO chart_hash (hash_id, type, id, name, family, context, title, unit, plugin, " + "module, priority, chart_type, last_used) " + "values (new.hash_id, new.type, new.id, new.name, new.family, new.context, new.title, new.unit, new.plugin, " + "new.module, new.priority, new.chart_type, strftime('%s')) " + "ON CONFLICT (hash_id) DO UPDATE SET last_used = strftime('%s'); " + "INSERT INTO chart_hash_map (chart_id, hash_id) values (new.chart_id, new.hash_id) " + "on conflict (chart_id, hash_id) do nothing; END; ", + + NULL +}; + +const char *database_cleanup[] = { "delete from chart where chart_id not in (select chart_id from dimension);", "delete from host where host_id not in (select host_id from chart);", "delete from chart_label where chart_id not in (select chart_id from chart);", @@ -32,7 +63,7 @@ sqlite3 *db_meta = NULL; static uv_mutex_t sqlite_transaction_lock; -static int execute_insert(sqlite3_stmt *res) +int execute_insert(sqlite3_stmt *res) { int rc; @@ -66,7 +97,7 @@ static void add_stmt_to_list(sqlite3_stmt *res) statements[idx++] = res; } -static int prepare_statement(sqlite3 *database, char *query, sqlite3_stmt **statement) { +int prepare_statement(sqlite3 *database, char *query, sqlite3_stmt **statement) { int rc = sqlite3_prepare_v2(database, query, -1, statement, 0); if (likely(rc == SQLITE_OK)) add_stmt_to_list(*statement); @@ -155,18 +186,153 @@ void store_active_dimension(uuid_t *dimension_uuid) return; } +static int check_table_integrity_cb(void *data, int argc, char **argv, char **column) +{ + int *status = data; + UNUSED(argc); + UNUSED(column); + info("---> %s", argv[0]); + *status = (strcmp(argv[0], "ok") != 0); + return 0; +} + + +static int check_table_integrity(char *table) +{ + int status = 0; + char *err_msg = NULL; + char wstr[255]; + + if (table) { + info("Checking table %s", table); + snprintfz(wstr, 254, "PRAGMA integrity_check(%s);", table); + } + else { + info("Checking entire database"); + strcpy(wstr,"PRAGMA integrity_check;"); + } + + int rc = sqlite3_exec(db_meta, wstr, check_table_integrity_cb, (void *) &status, &err_msg); + if (rc != SQLITE_OK) { + error_report("SQLite error during database integrity check for %s, rc = %d (%s)", + table ? table : "the entire database", rc, err_msg); + sqlite3_free(err_msg); + } + + return status; +} + +const char *rebuild_chart_commands[] = { + "BEGIN TRANSACTION; ", + "DROP INDEX IF EXISTS ind_c1;" , + "DROP TABLE IF EXISTS chart_backup; " , + "CREATE TABLE chart_backup AS SELECT * FROM chart; " , + "DROP TABLE chart; ", + "CREATE TABLE IF NOT EXISTS chart(chart_id blob PRIMARY KEY, host_id blob, type text, id text, " + "name text, family text, context text, title text, unit text, plugin text, " + "module text, priority int, update_every int, chart_type int, memory_mode int, history_entries); ", + "INSERT INTO chart SELECT DISTINCT * FROM chart_backup; ", + "DROP TABLE chart_backup; " , + "CREATE INDEX IF NOT EXISTS ind_c1 on chart (host_id, id, type, name);", + "COMMIT TRANSACTION;", + NULL +}; + +static void rebuild_chart() +{ + int rc; + char *err_msg = NULL; + info("Rebuilding chart table"); + for (int i = 0; rebuild_chart_commands[i]; i++) { + info("Executing %s", rebuild_chart_commands[i]); + rc = sqlite3_exec(db_meta, rebuild_chart_commands[i], 0, 0, &err_msg); + if (rc != SQLITE_OK) { + error_report("SQLite error during database setup, rc = %d (%s)", rc, err_msg); + error_report("SQLite failed statement %s", rebuild_chart_commands[i]); + sqlite3_free(err_msg); + } + } + return; +} + +const char *rebuild_dimension_commands[] = { + "BEGIN TRANSACTION; ", + "DROP INDEX IF EXISTS ind_d1;" , + "DROP TABLE IF EXISTS dimension_backup; " , + "CREATE TABLE dimension_backup AS SELECT * FROM dimension; " , + "DROP TABLE dimension; " , + "CREATE TABLE IF NOT EXISTS dimension(dim_id blob PRIMARY KEY, chart_id blob, id text, name text, " + "multiplier int, divisor int , algorithm int, options text);" , + "INSERT INTO dimension SELECT distinct * FROM dimension_backup; " , + "DROP TABLE dimension_backup; " , + "CREATE INDEX IF NOT EXISTS ind_d1 on dimension (chart_id, id, name);", + "COMMIT TRANSACTION;", + NULL +}; + +void rebuild_dimension() +{ + int rc; + char *err_msg = NULL; + + info("Rebuilding dimension table"); + for (int i = 0; rebuild_dimension_commands[i]; i++) { + info("Executing %s", rebuild_dimension_commands[i]); + rc = sqlite3_exec(db_meta, rebuild_dimension_commands[i], 0, 0, &err_msg); + if (rc != SQLITE_OK) { + error_report("SQLite error during database setup, rc = %d (%s)", rc, err_msg); + error_report("SQLite failed statement %s", rebuild_dimension_commands[i]); + sqlite3_free(err_msg); + } + } + return; +} + +static int attempt_database_fix() +{ + info("Closing database and attempting to fix it"); + int rc = sqlite3_close(db_meta); + if (rc != SQLITE_OK) + error_report("Failed to close database, rc = %d", rc); + info("Attempting to fix database"); + db_meta = NULL; + return sql_init_database(DB_CHECK_FIX_DB | DB_CHECK_CONT); +} + +static int init_database_batch(int rebuild, int init_type, const char *batch[]) +{ + int rc; + char *err_msg = NULL; + for (int i = 0; batch[i]; i++) { + debug(D_METADATALOG, "Executing %s", batch[i]); + rc = sqlite3_exec(db_meta, batch[i], 0, 0, &err_msg); + if (rc != SQLITE_OK) { + error_report("SQLite error during database %s, rc = %d (%s)", init_type ? "cleanup" : "setup", rc, err_msg); + error_report("SQLite failed statement %s", batch[i]); + sqlite3_free(err_msg); + if (SQLITE_CORRUPT == rc) { + if (!rebuild) + return attempt_database_fix(); + rc = check_table_integrity(NULL); + if (rc) + error_report("Databse integrity errors reported"); + } + return 1; + } + } + return 0; +} + /* * Initialize the SQLite database * Return 0 on success */ -int sql_init_database(void) +int sql_init_database(db_check_action_type_t rebuild) { char *err_msg = NULL; char sqlite_database[FILENAME_MAX + 1]; int rc; - fatal_assert(0 == uv_mutex_init(&sqlite_transaction_lock)); - snprintfz(sqlite_database, FILENAME_MAX, "%s/netdata-meta.db", netdata_configured_cache_dir); rc = sqlite3_open(sqlite_database, &db_meta); if (rc != SQLITE_OK) { @@ -176,18 +342,55 @@ int sql_init_database(void) return 1; } - info("SQLite database %s initialization", sqlite_database); + if (rebuild & (DB_CHECK_INTEGRITY | DB_CHECK_FIX_DB)) { + int errors_detected = 0; + if (!(rebuild & DB_CHECK_CONT)) + info("Running database check on %s", sqlite_database); + + if (check_table_integrity("chart")) { + errors_detected++; + if (rebuild & DB_CHECK_FIX_DB) + rebuild_chart(); + else + error_report("Errors reported -- run with -W sqlite-fix"); + } + + if (check_table_integrity("dimension")) { + errors_detected++; + if (rebuild & DB_CHECK_FIX_DB) + rebuild_dimension(); + else + error_report("Errors reported -- run with -W sqlite-fix"); + } - for (int i = 0; database_config[i]; i++) { - debug(D_METADATALOG, "Executing %s", database_config[i]); - rc = sqlite3_exec(db_meta, database_config[i], 0, 0, &err_msg); + if (!errors_detected) { + if (check_table_integrity(NULL)) + error_report("Errors reported"); + } + } + + if (rebuild & DB_CHECK_RECLAIM_SPACE) { + if (!(rebuild & DB_CHECK_CONT)) + info("Reclaiming space of %s", sqlite_database); + rc = sqlite3_exec(db_meta, "VACUUM;", 0, 0, &err_msg); if (rc != SQLITE_OK) { - error_report("SQLite error during database setup, rc = %d (%s)", rc, err_msg); - error_report("SQLite failed statement %s", database_config[i]); + error_report("Failed to execute VACUUM rc = %d (%s)", rc, err_msg); sqlite3_free(err_msg); - return 1; } } + + if (rebuild && !(rebuild & DB_CHECK_CONT)) + return 1; + + info("SQLite database %s initialization", sqlite_database); + + if (init_database_batch(rebuild, 0, &database_config[0])) + return 1; + + if (init_database_batch(rebuild, 0, &database_cleanup[0])) + return 1; + + fatal_assert(0 == uv_mutex_init(&sqlite_transaction_lock)); info("SQLite database initialization completed"); return 0; } @@ -246,20 +449,20 @@ bind_fail: return 0; } -uuid_t *find_dimension_uuid(RRDSET *st, RRDDIM *rd) +int find_dimension_uuid(RRDSET *st, RRDDIM *rd, uuid_t *store_uuid) { static __thread sqlite3_stmt *res = NULL; - uuid_t *uuid = NULL; int rc; + int status = 1; if (unlikely(!db_meta) && default_rrd_memory_mode != RRD_MEMORY_MODE_DBENGINE) - return NULL; + return 1; if (unlikely(!res)) { rc = prepare_statement(db_meta, SQL_FIND_DIMENSION_UUID, &res); if (rc != SQLITE_OK) { error_report("Failed to bind prepare statement to lookup dimension UUID in the database"); - return NULL; + return 1; } } @@ -277,49 +480,24 @@ uuid_t *find_dimension_uuid(RRDSET *st, RRDDIM *rd) rc = sqlite3_step(res); if (likely(rc == SQLITE_ROW)) { - uuid = mallocz(sizeof(uuid_t)); - uuid_copy(*uuid, sqlite3_column_blob(res, 0)); + uuid_copy(*store_uuid, *((uuid_t *) sqlite3_column_blob(res, 0))); + status = 0; + } + else { + uuid_generate(*store_uuid); + status = sql_store_dimension(store_uuid, st->chart_uuid, rd->id, rd->name, rd->multiplier, rd->divisor, rd->algorithm); + if (unlikely(status)) + error_report("Failed to store dimension metadata in the database"); } rc = sqlite3_reset(res); if (unlikely(rc != SQLITE_OK)) error_report("Failed to reset statement find dimension uuid, rc = %d", rc); - -#ifdef NETDATA_INTERNAL_CHECKS - char uuid_str[GUID_LEN + 1]; - if (likely(uuid)) { - uuid_unparse_lower(*uuid, uuid_str); - debug(D_METADATALOG, "Found UUID %s for dimension %s", uuid_str, rd->name); - } - else - debug(D_METADATALOG, "UUID not found for dimension %s", rd->name); -#endif - return uuid; + return status; bind_fail: error_report("Failed to bind input parameter to perform dimension UUID database lookup, rc = %d", rc); - return NULL; -} - -uuid_t *create_dimension_uuid(RRDSET *st, RRDDIM *rd) -{ - uuid_t *uuid = NULL; - int rc; - - uuid = mallocz(sizeof(uuid_t)); - uuid_generate(*uuid); - -#ifdef NETDATA_INTERNAL_CHECKS - char uuid_str[GUID_LEN + 1]; - uuid_unparse_lower(*uuid, uuid_str); - debug(D_METADATALOG,"Generating uuid [%s] for dimension %s under chart %s", uuid_str, rd->name, st->id); -#endif - - rc = sql_store_dimension(uuid, st->chart_uuid, rd->id, rd->name, rd->multiplier, rd->divisor, rd->algorithm); - if (unlikely(rc)) - error_report("Failed to store dimension metadata in the database"); - - return uuid; + return 1; } #define DELETE_DIMENSION_UUID "delete from dimension where dim_id = @uuid;" @@ -984,7 +1162,7 @@ RRDHOST *sql_create_host_by_uuid(char *hostname) set_host_properties(host, sqlite3_column_int(res, 2), RRD_MEMORY_MODE_DBENGINE, hostname, (char *) sqlite3_column_text(res, 1), (const char *) uuid_str, (char *) sqlite3_column_text(res, 3), (char *) sqlite3_column_text(res, 5), - (char *) sqlite3_column_text(res, 4), NULL, NULL); + (char *) sqlite3_column_text(res, 4), NULL, 0, NULL, NULL); uuid_copy(host->host_uuid, *((uuid_t *) sqlite3_column_blob(res, 0))); @@ -1002,16 +1180,27 @@ failed: return host; } -void db_execute(char *cmd) +#define SQL_MAX_RETRY 100 + +void db_execute(const char *cmd) { int rc; - char *err_msg; - rc = sqlite3_exec(db_meta, cmd, 0, 0, &err_msg); - if (rc != SQLITE_OK) { - error_report("Failed to execute '%s', rc = %d (%s)", cmd, rc, err_msg); - sqlite3_free(err_msg); + int cnt = 0; + while (cnt < SQL_MAX_RETRY) { + char *err_msg; + rc = sqlite3_exec(db_meta, cmd, 0, 0, &err_msg); + if (rc != SQLITE_OK) { + error_report("Failed to execute '%s', rc = %d (%s) -- attempt %d", cmd, rc, err_msg, cnt); + sqlite3_free(err_msg); + if (likely(rc == SQLITE_BUSY || rc == SQLITE_LOCKED)) { + usleep(SQLITE_INSERT_DELAY * USEC_PER_MS); + } + else break; + } + else + break; + ++cnt; } - return; } @@ -1206,7 +1395,7 @@ static RRDDIM *create_rrdim_entry(RRDSET *st, char *id, char *name, uuid_t *metr rd->state->query_ops.oldest_time = rrdeng_metric_oldest_time; rd->state->rrdeng_uuid = mallocz(sizeof(uuid_t)); uuid_copy(*rd->state->rrdeng_uuid, *metric_uuid); - rd->state->metric_uuid = rd->state->rrdeng_uuid; + uuid_copy(rd->state->metric_uuid, *metric_uuid); rd->id = strdupz(id); rd->name = strdupz(name); return rd; @@ -1339,6 +1528,174 @@ failed: return; } + +/* + * Store a chart hash in the database + */ + +#define SQL_STORE_CHART_HASH "insert into v_chart_hash (hash_id, type, id, " \ + "name, family, context, title, unit, plugin, module, priority, chart_type, last_used, chart_id) " \ + "values (?1,?2,?3,?4,?5,?6,?7,?8,?9,?10,?11, ?12, strftime('%s'), ?13);" + +int sql_store_chart_hash( + uuid_t *hash_id, uuid_t *chart_id, const char *type, const char *id, const char *name, const char *family, + const char *context, const char *title, const char *units, const char *plugin, const char *module, long priority, + RRDSET_TYPE chart_type) +{ + static __thread sqlite3_stmt *res = NULL; + int rc, param = 0; + + if (unlikely(!db_meta)) { + if (default_rrd_memory_mode != RRD_MEMORY_MODE_DBENGINE) + return 0; + error_report("Database has not been initialized"); + return 1; + } + + if (unlikely(!res)) { + rc = prepare_statement(db_meta, SQL_STORE_CHART_HASH, &res); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to prepare statement to store chart, rc = %d", rc); + return 1; + } + } + + param++; + rc = sqlite3_bind_blob(res, 1, hash_id, sizeof(*hash_id), SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) + goto bind_fail; + + param++; + rc = sqlite3_bind_text(res, 2, type, -1, SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) + goto bind_fail; + + param++; + rc = sqlite3_bind_text(res, 3, id, -1, SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) + goto bind_fail; + + param++; + if (name && *name) + rc = sqlite3_bind_text(res, 4, name, -1, SQLITE_STATIC); + else + rc = sqlite3_bind_null(res, 4); + if (unlikely(rc != SQLITE_OK)) + goto bind_fail; + + param++; + rc = sqlite3_bind_text(res, 5, family, -1, SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) + goto bind_fail; + + param++; + rc = sqlite3_bind_text(res, 6, context, -1, SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) + goto bind_fail; + + param++; + rc = sqlite3_bind_text(res, 7, title, -1, SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) + goto bind_fail; + + param++; + rc = sqlite3_bind_text(res, 8, units, -1, SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) + goto bind_fail; + + param++; + rc = sqlite3_bind_text(res, 9, plugin, -1, SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) + goto bind_fail; + + param++; + rc = sqlite3_bind_text(res, 10, module, -1, SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) + goto bind_fail; + + param++; + rc = sqlite3_bind_int(res, 11, (int) priority); + if (unlikely(rc != SQLITE_OK)) + goto bind_fail; + + param++; + rc = sqlite3_bind_int(res, 12, chart_type); + if (unlikely(rc != SQLITE_OK)) + goto bind_fail; + + param++; + rc = sqlite3_bind_blob(res, 13, chart_id, sizeof(*chart_id), SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) + goto bind_fail; + + rc = execute_insert(res); + if (unlikely(rc != SQLITE_DONE)) + error_report("Failed to store chart hash_id, rc = %d", rc); + + rc = sqlite3_reset(res); + if (unlikely(rc != SQLITE_OK)) + error_report("Failed to reset statement in chart hash_id store function, rc = %d", rc); + + return 0; + + bind_fail: + error_report("Failed to bind parameter %d to store chart hash_id, rc = %d", param, rc); + rc = sqlite3_reset(res); + if (unlikely(rc != SQLITE_OK)) + error_report("Failed to reset statement in chart hash_id store function, rc = %d", rc); + return 1; +} + +void compute_chart_hash(RRDSET *st) +{ + EVP_MD_CTX *evpctx; + unsigned char hash_value[EVP_MAX_MD_SIZE]; + unsigned int hash_len; + char priority_str[32]; + + sprintf(priority_str, "%ld", st->priority); + + evpctx = EVP_MD_CTX_create(); + EVP_DigestInit_ex(evpctx, EVP_sha256(), NULL); + //EVP_DigestUpdate(evpctx, st->type, strlen(st->type)); + EVP_DigestUpdate(evpctx, st->id, strlen(st->id)); + EVP_DigestUpdate(evpctx, st->name, strlen(st->name)); + EVP_DigestUpdate(evpctx, st->family, strlen(st->family)); + EVP_DigestUpdate(evpctx, st->context, strlen(st->context)); + EVP_DigestUpdate(evpctx, st->title, strlen(st->title)); + EVP_DigestUpdate(evpctx, st->units, strlen(st->units)); + EVP_DigestUpdate(evpctx, st->plugin_name, strlen(st->plugin_name)); + if (st->module_name) + EVP_DigestUpdate(evpctx, st->module_name, strlen(st->module_name)); +// EVP_DigestUpdate(evpctx, priority_str, strlen(priority_str)); + EVP_DigestUpdate(evpctx, &st->priority, sizeof(st->priority)); + EVP_DigestUpdate(evpctx, &st->chart_type, sizeof(st->chart_type)); + EVP_DigestFinal_ex(evpctx, hash_value, &hash_len); + EVP_MD_CTX_destroy(evpctx); + fatal_assert(hash_len > sizeof(uuid_t)); + + char uuid_str[GUID_LEN + 1]; + uuid_unparse_lower(*((uuid_t *) &hash_value), uuid_str); + //info("Calculating HASH %s for chart %s", uuid_str, st->name); + uuid_copy(st->state->hash_id, *((uuid_t *) &hash_value)); + + (void)sql_store_chart_hash( + (uuid_t *)&hash_value, + st->chart_uuid, + st->type, + st->id, + st->name, + st->family, + st->context, + st->title, + st->units, + st->plugin_name, + st->module_name, + st->priority, + st->chart_type); + return; +} + #define SQL_STORE_CLAIM_ID "insert into node_instance " \ "(host_id, claim_id, date_created) values (@host_id, @claim_id, strftime('%s')) " \ "on conflict(host_id) do update set claim_id = excluded.claim_id;" @@ -1397,9 +1754,16 @@ static inline void set_host_node_id(RRDHOST *host, uuid_t *node_id) return; } + struct aclk_database_worker_config *wc = host->dbsync_worker; + if (unlikely(!host->node_id)) host->node_id = mallocz(sizeof(*host->node_id)); uuid_copy(*(host->node_id), *node_id); + + if (unlikely(!wc)) + sql_create_aclk_table(host, &host->host_uuid, node_id); + else + uuid_unparse_lower(*node_id, wc->node_id); return; } @@ -1455,6 +1819,42 @@ failed: return rc - 1; } +#define SQL_SELECT_HOST_BY_NODE_ID "select host_id from node_instance where node_id = @node_id;" + +int get_host_id(uuid_t *node_id, uuid_t *host_id) +{ + sqlite3_stmt *res = NULL; + int rc; + + if (unlikely(!db_meta)) { + if (default_rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE) + error_report("Database has not been initialized"); + return 1; + } + + rc = sqlite3_prepare_v2(db_meta, SQL_SELECT_HOST_BY_NODE_ID, -1, &res, 0); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to prepare statement to select node instance information for a node"); + return 1; + } + + rc = sqlite3_bind_blob(res, 1, node_id, sizeof(*node_id), SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to bind host_id parameter to select node instance information"); + goto failed; + } + + rc = sqlite3_step(res); + if (likely(rc == SQLITE_ROW && host_id)) + uuid_copy(*host_id, *((uuid_t *) sqlite3_column_blob(res, 0))); + +failed: + if (unlikely(sqlite3_finalize(res) != SQLITE_OK)) + error_report("Failed to finalize the prepared statement when selecting node instance information"); + + return (rc == SQLITE_ROW) ? 0 : -1; +} + #define SQL_SELECT_NODE_ID "select node_id from node_instance where host_id = @host_id and node_id not null;" int get_node_id(uuid_t *host_id, uuid_t *node_id) @@ -1553,7 +1953,7 @@ struct node_instance_list *get_node_list(void) rc = sqlite3_prepare_v2(db_meta, SQL_GET_NODE_INSTANCE_LIST, -1, &res, 0); if (unlikely(rc != SQLITE_OK)) { - error_report("Failed to prepare statement store chart labels"); + error_report("Failed to prepare statement to get node instance information"); return NULL; }; @@ -1563,22 +1963,25 @@ struct node_instance_list *get_node_list(void) row++; if (sqlite3_reset(res) != SQLITE_OK) { - error_report("Failed to reset the prepared statement fetching storing node instance information"); + error_report("Failed to reset the prepared statement while fetching node instance information"); goto failed; } node_list = callocz(row + 1, sizeof(*node_list)); int max_rows = row; row = 0; + rrd_wrlock(); while (sqlite3_step(res) == SQLITE_ROW) { if (sqlite3_column_bytes(res, 0) == sizeof(uuid_t)) uuid_copy(node_list[row].node_id, *((uuid_t *)sqlite3_column_blob(res, 0))); if (sqlite3_column_bytes(res, 1) == sizeof(uuid_t)) { uuid_t *host_id = (uuid_t *)sqlite3_column_blob(res, 1); uuid_copy(node_list[row].host_id, *host_id); - node_list[row].querable = 1; + node_list[row].queryable = 1; uuid_unparse_lower(*host_id, host_guid); - node_list[row].live = rrdhost_find_by_guid(host_guid, 0) ? 1 : 0; - node_list[row].hops = uuid_compare(*host_id, localhost->host_uuid) ? 1 : 0; + RRDHOST *host = rrdhost_find_by_guid(host_guid, 0); + node_list[row].live = host && (host == localhost || host->receiver) ? 1 : 0; + node_list[row].hops = (host && host->system_info) ? host->system_info->hops : + uuid_compare(*host_id, localhost->host_uuid) ? 1 : 0; node_list[row].hostname = sqlite3_column_bytes(res, 2) ? strdupz((char *)sqlite3_column_text(res, 2)) : NULL; } @@ -1586,10 +1989,11 @@ struct node_instance_list *get_node_list(void) if (row == max_rows) break; } + rrd_unlock(); failed: if (unlikely(sqlite3_finalize(res) != SQLITE_OK)) - error_report("Failed to finalize the prepared statement when storing node instance information"); + error_report("Failed to finalize the prepared statement when fetching node instance information"); return node_list; }; @@ -1609,13 +2013,13 @@ void sql_load_node_id(RRDHOST *host) rc = sqlite3_prepare_v2(db_meta, SQL_GET_HOST_NODE_ID, -1, &res, 0); if (unlikely(rc != SQLITE_OK)) { - error_report("Failed to prepare statement store chart labels"); + error_report("Failed to prepare statement to fetch node id"); return; }; rc = sqlite3_bind_blob(res, 1, &host->host_uuid, sizeof(host->host_uuid), SQLITE_STATIC); if (unlikely(rc != SQLITE_OK)) { - error_report("Failed to bind host_id parameter to store node instance information"); + error_report("Failed to bind host_id parameter to load node instance information"); goto failed; } @@ -1629,7 +2033,7 @@ void sql_load_node_id(RRDHOST *host) failed: if (unlikely(sqlite3_finalize(res) != SQLITE_OK)) - error_report("Failed to finalize the prepared statement when storing node instance information"); + error_report("Failed to finalize the prepared statement when loading node instance information"); return; }; diff --git a/database/sqlite/sqlite_functions.h b/database/sqlite/sqlite_functions.h index 30a52bf73..3e41f6aaa 100644 --- a/database/sqlite/sqlite_functions.h +++ b/database/sqlite/sqlite_functions.h @@ -3,7 +3,7 @@ #ifndef NETDATA_SQLITE_FUNCTIONS_H #define NETDATA_SQLITE_FUNCTIONS_H -#include "../../daemon/common.h" +#include "daemon/common.h" #include "sqlite3.h" // return a node list @@ -12,10 +12,17 @@ struct node_instance_list { uuid_t host_id; char *hostname; int live; - int querable; + int queryable; int hops; }; +typedef enum db_check_action_type { + DB_CHECK_NONE = 0x0000, + DB_CHECK_INTEGRITY = 0x0001, + DB_CHECK_FIX_DB = 0x0002, + DB_CHECK_RECLAIM_SPACE = 0x0004, + DB_CHECK_CONT = 0x00008 +} db_check_action_type_t; #define SQLITE_INSERT_DELAY (50) // Insert delay in case of lock @@ -34,11 +41,22 @@ struct node_instance_list { #define SQL_STORE_DIMENSION \ "INSERT OR REPLACE into dimension (dim_id, chart_id, id, name, multiplier, divisor , algorithm) values (?0001,?0002,?0003,?0004,?0005,?0006,?0007);" -#define SQL_FIND_DIMENSION_UUID "select dim_id from dimension where chart_id=@chart and id=@id and name=@name;" +#define SQL_FIND_DIMENSION_UUID \ + "select dim_id from dimension where chart_id=@chart and id=@id and name=@name and length(dim_id)=16;" -#define SQL_STORE_ACTIVE_DIMENSION \ +#define SQL_STORE_ACTIVE_DIMENSION \ "insert or replace into dimension_active (dim_id, date_created) values (@id, strftime('%s'));" -extern int sql_init_database(void); + +#define CHECK_SQLITE_CONNECTION(db_meta) \ + if (unlikely(!db_meta)) { \ + if (default_rrd_memory_mode != RRD_MEMORY_MODE_DBENGINE) { \ + return 1; \ + } \ + error_report("Database has not been initialized"); \ + return 1; \ + } + +extern int sql_init_database(db_check_action_type_t rebuild); extern void sql_close_database(void); extern int sql_store_host(uuid_t *guid, const char *hostname, const char *registry_hostname, int update_every, const char *os, const char *timezone, const char *tags); @@ -49,8 +67,7 @@ extern int sql_store_chart( extern int sql_store_dimension(uuid_t *dim_uuid, uuid_t *chart_uuid, const char *id, const char *name, collected_number multiplier, collected_number divisor, int algorithm); -extern uuid_t *find_dimension_uuid(RRDSET *st, RRDDIM *rd); -extern uuid_t *create_dimension_uuid(RRDSET *st, RRDDIM *rd); +extern int find_dimension_uuid(RRDSET *st, RRDDIM *rd, uuid_t *store_uuid); extern void store_active_dimension(uuid_t *dimension_uuid); extern uuid_t *find_chart_uuid(RRDHOST *host, const char *type, const char *id, const char *name); @@ -63,7 +80,9 @@ extern int find_uuid_type(uuid_t *uuid); extern void sql_rrdset2json(RRDHOST *host, BUFFER *wb); extern RRDHOST *sql_create_host_by_uuid(char *guid); -extern void db_execute(char *cmd); +extern int prepare_statement(sqlite3 *database, char *query, sqlite3_stmt **statement); +extern int execute_insert(sqlite3_stmt *res); +extern void db_execute(const char *cmd); extern int file_is_migrated(char *path); extern void add_migrated_file(char *path, uint64_t file_size); extern void db_unlock(void); @@ -74,7 +93,9 @@ extern void sql_build_context_param_list(struct context_param **param_list, RRDH extern void store_claim_id(uuid_t *host_id, uuid_t *claim_id); extern int update_node_id(uuid_t *host_id, uuid_t *node_id); extern int get_node_id(uuid_t *host_id, uuid_t *node_id); +extern int get_host_id(uuid_t *node_id, uuid_t *host_id); extern void invalidate_node_instances(uuid_t *host_id, uuid_t *claim_id); extern struct node_instance_list *get_node_list(void); extern void sql_load_node_id(RRDHOST *host); +extern void compute_chart_hash(RRDSET *st); #endif //NETDATA_SQLITE_FUNCTIONS_H diff --git a/database/sqlite/sqlite_health.c b/database/sqlite/sqlite_health.c new file mode 100644 index 000000000..116cb4f3e --- /dev/null +++ b/database/sqlite/sqlite_health.c @@ -0,0 +1,944 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "sqlite_health.h" +#include "sqlite_functions.h" + +#define MAX_HEALTH_SQL_SIZE 2048 + +/* Health related SQL queries + Creates a health log table in sqlite, one per host guid +*/ +#define SQL_CREATE_HEALTH_LOG_TABLE(guid) "CREATE TABLE IF NOT EXISTS health_log_%s(hostname text, unique_id int, alarm_id int, alarm_event_id int, config_hash_id blob, updated_by_id int, updates_id int, when_key int, duration int, non_clear_duration int, flags int, exec_run_timestamp int, delay_up_to_timestamp int, name text, chart text, family text, exec text, recipient text, source text, units text, info text, exec_code int, new_status real, old_status real, delay int, new_value double, old_value double, last_repeat int, class text, component text, type text);", guid +int sql_create_health_log_table(RRDHOST *host) { + int rc; + char *err_msg = NULL, command[MAX_HEALTH_SQL_SIZE + 1]; + + if (unlikely(!db_meta)) { + if (default_rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE) + error_report("HEALTH [%s]: Database has not been initialized", host->hostname); + return 1; + } + + char uuid_str[GUID_LEN + 1]; + uuid_unparse_lower_fix(&host->host_uuid, uuid_str); + + snprintfz(command, MAX_HEALTH_SQL_SIZE, SQL_CREATE_HEALTH_LOG_TABLE(uuid_str)); + + rc = sqlite3_exec(db_meta, command, 0, 0, &err_msg); + if (rc != SQLITE_OK) { + error_report("HEALTH [%s]: SQLite error during creation of health log table, rc = %d (%s)", host->hostname, rc, err_msg); + sqlite3_free(err_msg); + return 1; + } + + snprintfz(command, MAX_HEALTH_SQL_SIZE, "CREATE INDEX IF NOT EXISTS " + "health_log_index_%s ON health_log_%s (unique_id); ", uuid_str, uuid_str); + db_execute(command); + + return 0; +} + +/* Health related SQL queries + Updates an entry in the table +*/ +#define SQL_UPDATE_HEALTH_LOG(guid) "UPDATE health_log_%s set updated_by_id = ?, flags = ?, exec_run_timestamp = ?, exec_code = ? where unique_id = ?;", guid +void sql_health_alarm_log_update(RRDHOST *host, ALARM_ENTRY *ae) { + sqlite3_stmt *res = NULL; + int rc; + char command[MAX_HEALTH_SQL_SIZE + 1]; + + if (unlikely(!db_meta)) { + if (default_rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE) + error_report("HEALTH [%s]: Database has not been initialized", host->hostname); + return; + } + + char uuid_str[GUID_LEN + 1]; + uuid_unparse_lower_fix(&host->host_uuid, uuid_str); + + snprintfz(command, MAX_HEALTH_SQL_SIZE, SQL_UPDATE_HEALTH_LOG(uuid_str)); + + rc = sqlite3_prepare_v2(db_meta, command, -1, &res, 0); + if (unlikely(rc != SQLITE_OK)) { + error_report("HEALTH [%s]: Failed to prepare statement for SQL_UPDATE_HEALTH_LOG", host->hostname); + return; + } + + rc = sqlite3_bind_int64(res, 1, (sqlite3_int64) ae->updated_by_id); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to bind updated_by_id parameter for SQL_UPDATE_HEALTH_LOG"); + goto failed; + } + + rc = sqlite3_bind_int64(res, 2, (sqlite3_int64) ae->flags); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to bind flags parameter for SQL_UPDATE_HEALTH_LOG"); + goto failed; + } + + rc = sqlite3_bind_int64(res, 3, (sqlite3_int64) ae->exec_run_timestamp); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to bind exec_run_timestamp parameter for SQL_UPDATE_HEALTH_LOG"); + goto failed; + } + + rc = sqlite3_bind_int(res, 4, ae->exec_code); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to bind exec_code parameter for SQL_UPDATE_HEALTH_LOG"); + goto failed; + } + + rc = sqlite3_bind_int64(res, 5, (sqlite3_int64) ae->unique_id); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to bind unique_id parameter for SQL_UPDATE_HEALTH_LOG"); + goto failed; + } + + rc = execute_insert(res); + if (unlikely(rc != SQLITE_DONE)) { + error_report("HEALTH [%s]: Failed to update health log, rc = %d", host->hostname, rc); + } + + failed: + if (unlikely(sqlite3_finalize(res) != SQLITE_OK)) + error_report("HEALTH [%s]: Failed to finalize the prepared statement for updating health log.", host->hostname); + + return; +} + +/* Health related SQL queries + Inserts an entry in the table +*/ +#define SQL_INSERT_HEALTH_LOG(guid) "INSERT INTO health_log_%s(hostname, unique_id, alarm_id, alarm_event_id, " \ + "config_hash_id, updated_by_id, updates_id, when_key, duration, non_clear_duration, flags, " \ + "exec_run_timestamp, delay_up_to_timestamp, name, chart, family, exec, recipient, source, " \ + "units, info, exec_code, new_status, old_status, delay, new_value, old_value, last_repeat, " \ + "class, component, type) values (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?);", guid + +void sql_health_alarm_log_insert(RRDHOST *host, ALARM_ENTRY *ae) { + sqlite3_stmt *res = NULL; + int rc; + char command[MAX_HEALTH_SQL_SIZE + 1]; + + if (unlikely(!db_meta)) { + if (default_rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE) + error_report("HEALTH [%s]: Database has not been initialized", host->hostname); + return; + } + + char uuid_str[GUID_LEN + 1]; + uuid_unparse_lower_fix(&host->host_uuid, uuid_str); + + snprintfz(command, MAX_HEALTH_SQL_SIZE, SQL_INSERT_HEALTH_LOG(uuid_str)); + + rc = sqlite3_prepare_v2(db_meta, command, -1, &res, 0); + if (unlikely(rc != SQLITE_OK)) { + error_report("HEALTH [%s]: Failed to prepare statement for SQL_INSERT_HEALTH_LOG", host->hostname); + return; + } + + rc = sqlite3_bind_text(res, 1, host->hostname, -1, SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to bind hostname parameter for SQL_INSERT_HEALTH_LOG"); + goto failed; + } + + rc = sqlite3_bind_int64(res, 2, (sqlite3_int64) ae->unique_id); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to bind unique_id parameter for SQL_INSERT_HEALTH_LOG"); + goto failed; + } + + rc = sqlite3_bind_int64(res, 3, (sqlite3_int64) ae->alarm_id); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to bind alarm_id parameter for SQL_INSERT_HEALTH_LOG"); + goto failed; + } + + rc = sqlite3_bind_int64(res, 4, (sqlite3_int64) ae->alarm_event_id); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to bind alarm_event_id parameter for SQL_INSERT_HEALTH_LOG"); + goto failed; + } + + rc = sqlite3_bind_blob(res, 5, &ae->config_hash_id, sizeof(ae->config_hash_id), SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to bind config_hash_id parameter for SQL_INSERT_HEALTH_LOG"); + goto failed; + } + + rc = sqlite3_bind_int64(res, 6, (sqlite3_int64) ae->updated_by_id); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to bind updated_by_id parameter for SQL_INSERT_HEALTH_LOG"); + goto failed; + } + + rc = sqlite3_bind_int64(res, 7, (sqlite3_int64) ae->updates_id); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to bind updates_id parameter for SQL_INSERT_HEALTH_LOG"); + goto failed; + } + + rc = sqlite3_bind_int64(res, 8, (sqlite3_int64) ae->when); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to bind when parameter for SQL_INSERT_HEALTH_LOG"); + goto failed; + } + + rc = sqlite3_bind_int64(res, 9, (sqlite3_int64) ae->duration); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to bind duration parameter for SQL_INSERT_HEALTH_LOG"); + goto failed; + } + + rc = sqlite3_bind_int64(res, 10, (sqlite3_int64) ae->non_clear_duration); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to bind non_clear_duration parameter for SQL_INSERT_HEALTH_LOG"); + goto failed; + } + + rc = sqlite3_bind_int64(res, 11, (sqlite3_int64) ae->flags); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to bind flags parameter for SQL_INSERT_HEALTH_LOG"); + goto failed; + } + + rc = sqlite3_bind_int64(res, 12, (sqlite3_int64) ae->exec_run_timestamp); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to bind exec_run_timestamp parameter for SQL_INSERT_HEALTH_LOG"); + goto failed; + } + + rc = sqlite3_bind_int64(res, 13, (sqlite3_int64) ae->delay_up_to_timestamp); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to bind delay_up_to_timestamp parameter for SQL_INSERT_HEALTH_LOG"); + goto failed; + } + + rc = sqlite3_bind_text(res, 14, ae->name, -1, SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to bind name parameter for SQL_INSERT_HEALTH_LOG"); + goto failed; + } + + rc = sqlite3_bind_text(res, 15, ae->chart, -1, SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to bind chart parameter for SQL_INSERT_HEALTH_LOG"); + goto failed; + } + + rc = sqlite3_bind_text(res, 16, ae->family, -1, SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to bind family parameter for SQL_INSERT_HEALTH_LOG"); + goto failed; + } + + rc = sqlite3_bind_text(res, 17, ae->exec, -1, SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to bind exec parameter for SQL_INSERT_HEALTH_LOG"); + goto failed; + } + + rc = sqlite3_bind_text(res, 18, ae->recipient, -1, SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to bind recipient parameter for SQL_INSERT_HEALTH_LOG"); + goto failed; + } + + rc = sqlite3_bind_text(res, 19, ae->source, -1, SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to bind source parameter for SQL_INSERT_HEALTH_LOG"); + goto failed; + } + + rc = sqlite3_bind_text(res, 20, ae->units, -1, SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to bind host_id parameter to store node instance information"); + goto failed; + } + + rc = sqlite3_bind_text(res, 21, ae->info, -1, SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to bind info parameter for SQL_INSERT_HEALTH_LOG"); + goto failed; + } + + rc = sqlite3_bind_int(res, 22, ae->exec_code); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to bind exec_code parameter for SQL_INSERT_HEALTH_LOG"); + goto failed; + } + + rc = sqlite3_bind_int(res, 23, ae->new_status); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to bind new_status parameter for SQL_INSERT_HEALTH_LOG"); + goto failed; + } + + rc = sqlite3_bind_int(res, 24, ae->old_status); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to bind old_status parameter for SQL_INSERT_HEALTH_LOG"); + goto failed; + } + + rc = sqlite3_bind_int(res, 25, ae->delay); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to bind delay parameter for SQL_INSERT_HEALTH_LOG"); + goto failed; + } + + rc = sqlite3_bind_double(res, 26, ae->new_value); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to bind new_value parameter for SQL_INSERT_HEALTH_LOG"); + goto failed; + } + + rc = sqlite3_bind_double(res, 27, ae->old_value); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to bind old_value parameter for SQL_INSERT_HEALTH_LOG"); + goto failed; + } + + rc = sqlite3_bind_int64(res, 28, (sqlite3_int64) ae->last_repeat); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to bind last_repeat parameter for SQL_INSERT_HEALTH_LOG"); + goto failed; + } + + rc = sqlite3_bind_text(res, 29, ae->classification, -1, SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to bind classification parameter for SQL_INSERT_HEALTH_LOG"); + goto failed; + } + + rc = sqlite3_bind_text(res, 30, ae->component, -1, SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to bind component parameter for SQL_INSERT_HEALTH_LOG"); + goto failed; + } + + rc = sqlite3_bind_text(res, 31, ae->type, -1, SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to bind type parameter for SQL_INSERT_HEALTH_LOG"); + goto failed; + } + + rc = execute_insert(res); + if (unlikely(rc != SQLITE_DONE)) { + error_report("HEALTH [%s]: Failed to execute SQL_INSERT_HEALTH_LOG, rc = %d", host->hostname, rc); + goto failed; + } + + ae->flags |= HEALTH_ENTRY_FLAG_SAVED; + host->health_log_entries_written++; + + failed: + if (unlikely(sqlite3_finalize(res) != SQLITE_OK)) + error_report("HEALTH [%s]: Failed to finalize the prepared statement for inserting to health log.", host->hostname); + + return; +} + +void sql_health_alarm_log_save(RRDHOST *host, ALARM_ENTRY *ae) +{ + if (ae->flags & HEALTH_ENTRY_FLAG_SAVED) + sql_health_alarm_log_update(host, ae); + else + sql_health_alarm_log_insert(host, ae); +} + +/* Health related SQL queries + Cleans up the health_log table. +*/ +#define SQL_CLEANUP_HEALTH_LOG(guid,guid2,limit) "DELETE from health_log_%s where unique_id in (SELECT unique_id from health_log_%s order by unique_id asc LIMIT %lu);", guid, guid2, limit +void sql_health_alarm_log_cleanup(RRDHOST *host) { + sqlite3_stmt *res = NULL; + static size_t rotate_every = 0; + int rc; + char command[MAX_HEALTH_SQL_SIZE + 1]; + + if(unlikely(rotate_every == 0)) { + rotate_every = (size_t)config_get_number(CONFIG_SECTION_HEALTH, "rotate log every lines", 2000); + if(rotate_every < 100) rotate_every = 100; + } + + if(likely(host->health_log_entries_written < rotate_every)) { + return; + } + + if (unlikely(!db_meta)) { + if (default_rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE) + error_report("Database has not been initialized"); + return; + } + + char uuid_str[GUID_LEN + 1]; + uuid_unparse_lower_fix(&host->host_uuid, uuid_str); + + snprintfz(command, MAX_HEALTH_SQL_SIZE, SQL_CLEANUP_HEALTH_LOG(uuid_str, uuid_str, host->health_log_entries_written - rotate_every)); + + rc = sqlite3_prepare_v2(db_meta, command, -1, &res, 0); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to prepare statement to cleanup health log table"); + return; + } + + rc = sqlite3_step(res); + if (unlikely(rc != SQLITE_DONE)) + error_report("Failed to cleanup health log table, rc = %d", rc); + + rc = sqlite3_finalize(res); + if (unlikely(rc != SQLITE_OK)) + error_report("Failed to finalize the prepared statement to cleanup health log table"); + + host->health_log_entries_written = rotate_every; + + sql_aclk_alert_clean_dead_entries(host); +} + +/* Health related SQL queries + Get a count of rows from health log table +*/ +#define SQL_COUNT_HEALTH_LOG(guid) "SELECT count(1) FROM health_log_%s;", guid +void sql_health_alarm_log_count(RRDHOST *host) { + sqlite3_stmt *res = NULL; + int rc; + char command[MAX_HEALTH_SQL_SIZE + 1]; + + if (unlikely(!db_meta)) { + if (default_rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE) + error_report("Database has not been initialized"); + return; + } + + char uuid_str[GUID_LEN + 1]; + uuid_unparse_lower_fix(&host->host_uuid, uuid_str); + + snprintfz(command, MAX_HEALTH_SQL_SIZE, SQL_COUNT_HEALTH_LOG(uuid_str)); + + rc = sqlite3_prepare_v2(db_meta, command, -1, &res, 0); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to prepare statement to count health log entries from db"); + return; + } + + rc = sqlite3_step(res); + if (likely(rc == SQLITE_ROW)) + host->health_log_entries_written = (size_t) sqlite3_column_int64(res, 0); + + rc = sqlite3_finalize(res); + if (unlikely(rc != SQLITE_OK)) + error_report("Failed to finalize the prepared statement to count health log entries from db"); + + info("HEALTH [%s]: Table health_log_%s, contains %lu entries.", host->hostname, uuid_str, host->health_log_entries_written); +} + +/* Health related SQL queries + Load from the health log table +*/ +#define SQL_LOAD_HEALTH_LOG(guid,limit) "SELECT hostname, unique_id, alarm_id, alarm_event_id, config_hash_id, updated_by_id, updates_id, when_key, duration, non_clear_duration, flags, exec_run_timestamp, delay_up_to_timestamp, name, chart, family, exec, recipient, source, units, info, exec_code, new_status, old_status, delay, new_value, old_value, last_repeat, class, component, type FROM (SELECT hostname, unique_id, alarm_id, alarm_event_id, config_hash_id, updated_by_id, updates_id, when_key, duration, non_clear_duration, flags, exec_run_timestamp, delay_up_to_timestamp, name, chart, family, exec, recipient, source, units, info, exec_code, new_status, old_status, delay, new_value, old_value, last_repeat, class, component, type FROM health_log_%s order by unique_id desc limit %u) order by unique_id asc;", guid, limit +void sql_health_alarm_log_load(RRDHOST *host) { + sqlite3_stmt *res = NULL; + int rc; + ssize_t errored = 0, loaded = 0; + char command[MAX_HEALTH_SQL_SIZE + 1]; + + host->health_log_entries_written = 0; + + if (unlikely(!db_meta)) { + if (default_rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE) + error_report("HEALTH [%s]: Database has not been initialized", host->hostname); + return; + } + + char uuid_str[GUID_LEN + 1]; + uuid_unparse_lower_fix(&host->host_uuid, uuid_str); + + snprintfz(command, MAX_HEALTH_SQL_SIZE, SQL_LOAD_HEALTH_LOG(uuid_str, host->health_log.max)); + + rc = sqlite3_prepare_v2(db_meta, command, -1, &res, 0); + if (unlikely(rc != SQLITE_OK)) { + error_report("HEALTH [%s]: Failed to prepare sql statement to load health log.", host->hostname); + return; + } + + netdata_rwlock_rdlock(&host->health_log.alarm_log_rwlock); + + while (sqlite3_step(res) == SQLITE_ROW) { + ALARM_ENTRY *ae = NULL; + + // check that we have valid ids + uint32_t unique_id = (uint32_t) sqlite3_column_int64(res, 1); + if(!unique_id) { + error_report("HEALTH [%s]: Got invalid unique id. Ignoring it.", host->hostname); + errored++; + continue; + } + + uint32_t alarm_id = (uint32_t) sqlite3_column_int64(res, 2); + if(!alarm_id) { + error_report("HEALTH [%s]: Got invalid alarm id. Ignoring it.", host->hostname); + errored++; + continue; + } + + //need name, chart and family + if (sqlite3_column_type(res, 13) == SQLITE_NULL) { + error_report("HEALTH [%s]: Got null name field. Ignoring it.", host->hostname); + errored++; + continue; + } + + if (sqlite3_column_type(res, 14) == SQLITE_NULL) { + error_report("HEALTH [%s]: Got null chart field. Ignoring it.", host->hostname); + errored++; + continue; + } + + if (sqlite3_column_type(res, 15) == SQLITE_NULL) { + error_report("HEALTH [%s]: Got null family field. Ignoring it.", host->hostname); + errored++; + continue; + } + + // Check if we got last_repeat field + time_t last_repeat = 0; + last_repeat = (time_t)sqlite3_column_int64(res, 27); + + RRDCALC *rc = alarm_max_last_repeat(host, (char *) sqlite3_column_text(res, 14), simple_hash((char *) sqlite3_column_text(res, 14))); + if (!rc) { + for(rc = host->alarms; rc ; rc = rc->next) { + RRDCALC *rdcmp = (RRDCALC *) avl_insert_lock(&(host)->alarms_idx_name, (avl_t *)rc); + if(rdcmp != rc) { + error("Cannot insert the alarm index ID using log %s", rc->name); + } + } + + rc = alarm_max_last_repeat(host, (char *) sqlite3_column_text(res, 14), simple_hash((char *) sqlite3_column_text(res, 14))); + } + + if(unlikely(rc)) { + if (rrdcalc_isrepeating(rc)) { + rc->last_repeat = last_repeat; + // We iterate through repeating alarm entries only to + // find the latest last_repeat timestamp. Otherwise, + // there is no need to keep them in memory. + continue; + } + } + + ae = callocz(1, sizeof(ALARM_ENTRY)); + + ae->unique_id = unique_id; + ae->alarm_id = alarm_id; + + if (sqlite3_column_type(res, 4) != SQLITE_NULL) + uuid_copy(ae->config_hash_id, *((uuid_t *) sqlite3_column_blob(res, 4))); + + ae->alarm_event_id = (uint32_t) sqlite3_column_int64(res, 3); + ae->updated_by_id = (uint32_t) sqlite3_column_int64(res, 5); + ae->updates_id = (uint32_t) sqlite3_column_int64(res, 6); + + ae->when = (time_t) sqlite3_column_int64(res, 7); + ae->duration = (time_t) sqlite3_column_int64(res, 8); + ae->non_clear_duration = (time_t) sqlite3_column_int64(res, 9); + + ae->flags = (uint32_t) sqlite3_column_int64(res, 10); + ae->flags |= HEALTH_ENTRY_FLAG_SAVED; + + ae->exec_run_timestamp = (time_t) sqlite3_column_int64(res, 11); + ae->delay_up_to_timestamp = (time_t) sqlite3_column_int64(res, 12); + + ae->name = strdupz((char *) sqlite3_column_text(res, 13)); + ae->hash_name = simple_hash(ae->name); + + ae->chart = strdupz((char *) sqlite3_column_text(res, 14)); + ae->hash_chart = simple_hash(ae->chart); + + ae->family = strdupz((char *) sqlite3_column_text(res, 15)); + + if (sqlite3_column_type(res, 16) != SQLITE_NULL) + ae->exec = strdupz((char *) sqlite3_column_text(res, 16)); + else + ae->exec = NULL; + + if (sqlite3_column_type(res, 17) != SQLITE_NULL) + ae->recipient = strdupz((char *) sqlite3_column_text(res, 17)); + else + ae->recipient = NULL; + + if (sqlite3_column_type(res, 18) != SQLITE_NULL) + ae->source = strdupz((char *) sqlite3_column_text(res, 18)); + else + ae->source = NULL; + + if (sqlite3_column_type(res, 19) != SQLITE_NULL) + ae->units = strdupz((char *) sqlite3_column_text(res, 19)); + else + ae->units = NULL; + + if (sqlite3_column_type(res, 20) != SQLITE_NULL) + ae->info = strdupz((char *) sqlite3_column_text(res, 20)); + else + ae->info = NULL; + + ae->exec_code = (int) sqlite3_column_int(res, 21); + ae->new_status = (RRDCALC_STATUS) sqlite3_column_int(res, 22); + ae->old_status = (RRDCALC_STATUS)sqlite3_column_int(res, 23); + ae->delay = (int) sqlite3_column_int(res, 24); + + ae->new_value = (calculated_number) sqlite3_column_double(res, 25); + ae->old_value = (calculated_number) sqlite3_column_double(res, 26); + + ae->last_repeat = last_repeat; + + if (sqlite3_column_type(res, 28) != SQLITE_NULL) + ae->classification = strdupz((char *) sqlite3_column_text(res, 28)); + else + ae->classification = NULL; + + if (sqlite3_column_type(res, 29) != SQLITE_NULL) + ae->component = strdupz((char *) sqlite3_column_text(res, 29)); + else + ae->component = NULL; + + if (sqlite3_column_type(res, 30) != SQLITE_NULL) + ae->type = strdupz((char *) sqlite3_column_text(res, 30)); + else + ae->type = NULL; + + char value_string[100 + 1]; + freez(ae->old_value_string); + freez(ae->new_value_string); + ae->old_value_string = strdupz(format_value_and_unit(value_string, 100, ae->old_value, ae->units, -1)); + ae->new_value_string = strdupz(format_value_and_unit(value_string, 100, ae->new_value, ae->units, -1)); + + ae->next = host->health_log.alarms; + host->health_log.alarms = ae; + + if(unlikely(ae->unique_id > host->health_max_unique_id)) + host->health_max_unique_id = ae->unique_id; + + if(unlikely(ae->alarm_id >= host->health_max_alarm_id)) + host->health_max_alarm_id = ae->alarm_id; + + loaded++; + } + + netdata_rwlock_unlock(&host->health_log.alarm_log_rwlock); + + if(!host->health_max_unique_id) host->health_max_unique_id = (uint32_t)now_realtime_sec(); + if(!host->health_max_alarm_id) host->health_max_alarm_id = (uint32_t)now_realtime_sec(); + + host->health_log.next_log_id = host->health_max_unique_id + 1; + if (unlikely(!host->health_log.next_alarm_id || host->health_log.next_alarm_id <= host->health_max_alarm_id)) + host->health_log.next_alarm_id = host->health_max_alarm_id + 1; + + info("HEALTH [%s]: Table health_log_%s, loaded %zd alarm entries, errors in %zd entries.", host->hostname, uuid_str, loaded, errored); + + rc = sqlite3_finalize(res); + if (unlikely(rc != SQLITE_OK)) + error_report("Failed to finalize the health log read statement"); + + sql_health_alarm_log_count(host); +} + +/* + * Store an alert config hash in the database + */ +#define SQL_STORE_ALERT_CONFIG_HASH "insert or replace into alert_hash (hash_id, date_updated, alarm, template, " \ + "on_key, class, component, type, os, hosts, lookup, every, units, calc, families, plugin, module, " \ + "charts, green, red, warn, crit, exec, to_key, info, delay, options, repeat, host_labels, " \ + "p_db_lookup_dimensions, p_db_lookup_method, p_db_lookup_options, p_db_lookup_after, " \ + "p_db_lookup_before, p_update_every) values (?1,strftime('%s'),?2,?3,?4,?5,?6,?7,?8,?9,?10,?11,?12," \ + "?13,?14,?15,?16,?17,?18,?19,?20,?21,?22,?23,?24,?25,?26,?27,?28,?29,?30,?31,?32,?33,?34);" + +int sql_store_alert_config_hash(uuid_t *hash_id, struct alert_config *cfg) +{ + static __thread sqlite3_stmt *res = NULL; + int rc, param = 0; + + if (unlikely(!db_meta)) { + if (default_rrd_memory_mode != RRD_MEMORY_MODE_DBENGINE) + return 0; + error_report("Database has not been initialized"); + return 1; + } + + if (unlikely(!res)) { + rc = prepare_statement(db_meta, SQL_STORE_ALERT_CONFIG_HASH, &res); + if (unlikely(rc != SQLITE_OK)) { + error_report("Failed to prepare statement to store alert configuration, rc = %d", rc); + return 1; + } + } + + param++; + rc = sqlite3_bind_blob(res, 1, hash_id, sizeof(*hash_id), SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) + goto bind_fail; + + param++; + if (cfg->alarm && *cfg->alarm) + rc = sqlite3_bind_text(res, 2, cfg->alarm, -1, SQLITE_STATIC); + else + rc = sqlite3_bind_null(res, 2); + if (unlikely(rc != SQLITE_OK)) + goto bind_fail; + + param++; + if (cfg->template_key && *cfg->template_key) + rc = sqlite3_bind_text(res, 3, cfg->template_key, -1, SQLITE_STATIC); + else + rc = sqlite3_bind_null(res, 3); + if (unlikely(rc != SQLITE_OK)) + goto bind_fail; + + param++; + rc = sqlite3_bind_text(res, 4, cfg->on, -1, SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) + goto bind_fail; + + param++; + rc = sqlite3_bind_text(res, 5, cfg->classification, -1, SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) + goto bind_fail; + + param++; + rc = sqlite3_bind_text(res, 6, cfg->component, -1, SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) + goto bind_fail; + + param++; + rc = sqlite3_bind_text(res, 7, cfg->type, -1, SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) + goto bind_fail; + + param++; + rc = sqlite3_bind_text(res, 8, cfg->os, -1, SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) + goto bind_fail; + + param++; + rc = sqlite3_bind_text(res, 9, cfg->host, -1, SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) + goto bind_fail; + + param++; + rc = sqlite3_bind_text(res, 10, cfg->lookup, -1, SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) + goto bind_fail; + + param++; + rc = sqlite3_bind_text(res, 11, cfg->every, -1, SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) + goto bind_fail; + + param++; + rc = sqlite3_bind_text(res, 12, cfg->units, -1, SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) + goto bind_fail; + + param++; + rc = sqlite3_bind_text(res, 13, cfg->calc, -1, SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) + goto bind_fail; + + param++; + rc = sqlite3_bind_text(res, 14, cfg->families, -1, SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) + goto bind_fail; + + param++; + rc = sqlite3_bind_text(res, 15, cfg->plugin, -1, SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) + goto bind_fail; + + param++; + rc = sqlite3_bind_text(res, 16, cfg->module, -1, SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) + goto bind_fail; + + param++; + rc = sqlite3_bind_text(res, 17, cfg->charts, -1, SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) + goto bind_fail; + + param++; + rc = sqlite3_bind_text(res, 18, cfg->green, -1, SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) + goto bind_fail; + + param++; + rc = sqlite3_bind_text(res, 19, cfg->red, -1, SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) + goto bind_fail; + + param++; + rc = sqlite3_bind_text(res, 20, cfg->warn, -1, SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) + goto bind_fail; + + param++; + rc = sqlite3_bind_text(res, 21, cfg->crit, -1, SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) + goto bind_fail; + + param++; + rc = sqlite3_bind_text(res, 22, cfg->exec, -1, SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) + goto bind_fail; + + param++; + rc = sqlite3_bind_text(res, 23, cfg->to, -1, SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) + goto bind_fail; + + param++; + rc = sqlite3_bind_text(res, 24, cfg->info, -1, SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) + goto bind_fail; + + param++; + rc = sqlite3_bind_text(res, 25, cfg->delay, -1, SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) + goto bind_fail; + + param++; + rc = sqlite3_bind_text(res, 26, cfg->options, -1, SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) + goto bind_fail; + + param++; + rc = sqlite3_bind_text(res, 27, cfg->repeat, -1, SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) + goto bind_fail; + + param++; + rc = sqlite3_bind_text(res, 28, cfg->host_labels, -1, SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) + goto bind_fail; + + if (cfg->p_db_lookup_after) { + param++; + rc = sqlite3_bind_text(res, 29, cfg->p_db_lookup_dimensions, -1, SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) + goto bind_fail; + + param++; + rc = sqlite3_bind_text(res, 30, cfg->p_db_lookup_method, -1, SQLITE_STATIC); + if (unlikely(rc != SQLITE_OK)) + goto bind_fail; + + param++; + rc = sqlite3_bind_int(res, 31, cfg->p_db_lookup_options); + if (unlikely(rc != SQLITE_OK)) + goto bind_fail; + + param++; + rc = sqlite3_bind_int(res, 32, cfg->p_db_lookup_after); + if (unlikely(rc != SQLITE_OK)) + goto bind_fail; + + param++; + rc = sqlite3_bind_int(res, 33, cfg->p_db_lookup_before); + if (unlikely(rc != SQLITE_OK)) + goto bind_fail; + } else { + param++; + rc = sqlite3_bind_null(res, 29); + if (unlikely(rc != SQLITE_OK)) + goto bind_fail; + param++; + rc = sqlite3_bind_null(res, 30); + if (unlikely(rc != SQLITE_OK)) + goto bind_fail; + param++; + rc = sqlite3_bind_null(res, 31); + if (unlikely(rc != SQLITE_OK)) + goto bind_fail; + param++; + rc = sqlite3_bind_null(res, 32); + if (unlikely(rc != SQLITE_OK)) + goto bind_fail; + param++; + rc = sqlite3_bind_null(res, 33); + if (unlikely(rc != SQLITE_OK)) + goto bind_fail; + } + + param++; + rc = sqlite3_bind_int(res, 34, cfg->p_update_every); + if (unlikely(rc != SQLITE_OK)) + goto bind_fail; + + rc = execute_insert(res); + if (unlikely(rc != SQLITE_DONE)) + error_report("Failed to store alert config, rc = %d", rc); + + rc = sqlite3_reset(res); + if (unlikely(rc != SQLITE_OK)) + error_report("Failed to reset statement in alert hash_id store function, rc = %d", rc); + + return 0; + + bind_fail: + error_report("Failed to bind parameter %d to store alert hash_id, rc = %d", param, rc); + rc = sqlite3_reset(res); + if (unlikely(rc != SQLITE_OK)) + error_report("Failed to reset statement in alert hash_id store function, rc = %d", rc); + return 1; +} + +#define DIGEST_ALERT_CONFIG_VAL(v) ((v) ? EVP_DigestUpdate(evpctx, (v), strlen((v))) : EVP_DigestUpdate(evpctx, "", 1)) +int alert_hash_and_store_config( + uuid_t hash_id, + struct alert_config *cfg) +{ + EVP_MD_CTX *evpctx; + unsigned char hash_value[EVP_MAX_MD_SIZE]; + unsigned int hash_len; + evpctx = EVP_MD_CTX_create(); + EVP_DigestInit_ex(evpctx, EVP_sha256(), NULL); + + DIGEST_ALERT_CONFIG_VAL(cfg->alarm); + DIGEST_ALERT_CONFIG_VAL(cfg->template_key); + DIGEST_ALERT_CONFIG_VAL(cfg->os); + DIGEST_ALERT_CONFIG_VAL(cfg->host); + DIGEST_ALERT_CONFIG_VAL(cfg->on); + DIGEST_ALERT_CONFIG_VAL(cfg->families); + DIGEST_ALERT_CONFIG_VAL(cfg->plugin); + DIGEST_ALERT_CONFIG_VAL(cfg->module); + DIGEST_ALERT_CONFIG_VAL(cfg->charts); + DIGEST_ALERT_CONFIG_VAL(cfg->lookup); + DIGEST_ALERT_CONFIG_VAL(cfg->calc); + DIGEST_ALERT_CONFIG_VAL(cfg->every); + DIGEST_ALERT_CONFIG_VAL(cfg->green); + DIGEST_ALERT_CONFIG_VAL(cfg->red); + DIGEST_ALERT_CONFIG_VAL(cfg->warn); + DIGEST_ALERT_CONFIG_VAL(cfg->crit); + DIGEST_ALERT_CONFIG_VAL(cfg->exec); + DIGEST_ALERT_CONFIG_VAL(cfg->to); + DIGEST_ALERT_CONFIG_VAL(cfg->units); + DIGEST_ALERT_CONFIG_VAL(cfg->info); + DIGEST_ALERT_CONFIG_VAL(cfg->classification); + DIGEST_ALERT_CONFIG_VAL(cfg->component); + DIGEST_ALERT_CONFIG_VAL(cfg->type); + DIGEST_ALERT_CONFIG_VAL(cfg->delay); + DIGEST_ALERT_CONFIG_VAL(cfg->options); + DIGEST_ALERT_CONFIG_VAL(cfg->repeat); + DIGEST_ALERT_CONFIG_VAL(cfg->host_labels); + + EVP_DigestFinal_ex(evpctx, hash_value, &hash_len); + EVP_MD_CTX_destroy(evpctx); + fatal_assert(hash_len > sizeof(uuid_t)); + + char uuid_str[GUID_LEN + 1]; + uuid_unparse_lower(*((uuid_t *)&hash_value), uuid_str); + uuid_copy(hash_id, *((uuid_t *)&hash_value)); + + /* store everything, so it can be recreated when not in memory or just a subset ? */ + (void)sql_store_alert_config_hash( (uuid_t *)&hash_value, cfg); + + return 1; +} diff --git a/database/sqlite/sqlite_health.h b/database/sqlite/sqlite_health.h new file mode 100644 index 000000000..3b9460897 --- /dev/null +++ b/database/sqlite/sqlite_health.h @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_SQLITE_HEALTH_H +#define NETDATA_SQLITE_HEALTH_H +#include "../../daemon/common.h" +#include "sqlite3.h" + +extern sqlite3 *db_meta; +extern void sql_health_alarm_log_load(RRDHOST *host); +extern int sql_create_health_log_table(RRDHOST *host); +extern void sql_health_alarm_log_update(RRDHOST *host, ALARM_ENTRY *ae); +extern void sql_health_alarm_log_insert(RRDHOST *host, ALARM_ENTRY *ae); +extern void sql_health_alarm_log_save(RRDHOST *host, ALARM_ENTRY *ae); +extern void sql_health_alarm_log_cleanup(RRDHOST *host); +extern int alert_hash_and_store_config(uuid_t hash_id, struct alert_config *cfg); +extern void sql_aclk_alert_clean_dead_entries(RRDHOST *host); +#endif //NETDATA_SQLITE_HEALTH_H diff --git a/docs/Demo-Sites.md b/docs/Demo-Sites.md index 8af1282ba..5c0369c15 100644 --- a/docs/Demo-Sites.md +++ b/docs/Demo-Sites.md @@ -10,7 +10,7 @@ You can also view live demos of Netdata at **[https://www.netdata.cloud](https:/ | Location | Netdata demo URL | 60 mins reqs | VM donated by | | :------------------ | :-------------------------------------------------------------------------------------------------------------------------------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| :------------------------------------------------- | -| London (UK) | **[london.my-netdata.io](https://london.my-netdata.io)**
    (this is the global Netdata **registry** and has **named** and **mysql** charts) | [![Requests Per Second](https://london.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://london.my-netdata.io) | [DigitalOcean.com](https://m.do.co/c/83dc9f941745) | +| London (UK) | **[london3.my-netdata.io](https://london3.my-netdata.io)**
    (this is the global Netdata **registry** and has **named** and **mysql** charts) | [![Requests Per Second](https://london3.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://london3.my-netdata.io) | [DigitalOcean.com](https://m.do.co/c/83dc9f941745) | | Atlanta (USA) | **[cdn77.my-netdata.io](https://cdn77.my-netdata.io)**
    (with **named** and **mysql** charts) | [![Requests Per Second](https://cdn77.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://cdn77.my-netdata.io) | [CDN77.com](https://www.cdn77.com/) | | Israel | **[octopuscs.my-netdata.io](https://octopuscs.my-netdata.io)** | [![Requests Per Second](https://octopuscs.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://octopuscs.my-netdata.io) | [OctopusCS.com](https://www.octopuscs.com) | | Bangalore (India) | **[bangalore.my-netdata.io](https://bangalore.my-netdata.io)** | [![Requests Per Second](https://bangalore.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://bangalore.my-netdata.io) | [DigitalOcean.com](https://m.do.co/c/83dc9f941745) | diff --git a/docs/Running-behind-lighttpd.md b/docs/Running-behind-lighttpd.md index 864915800..1e86f334f 100644 --- a/docs/Running-behind-lighttpd.md +++ b/docs/Running-behind-lighttpd.md @@ -14,7 +14,7 @@ $HTTP["url"] =~ "^/netdata/" { } ``` -If you have older lighttpd you have to use a chain (such as bellow), as explained [at this stackoverflow answer](http://stackoverflow.com/questions/14536554/lighttpd-configuration-to-proxy-rewrite-from-one-domain-to-another). +If you have older lighttpd you have to use a chain (such as below), as explained [at this stackoverflow answer](http://stackoverflow.com/questions/14536554/lighttpd-configuration-to-proxy-rewrite-from-one-domain-to-another). ```txt $HTTP["url"] =~ "^/netdata/" { diff --git a/docs/agent-cloud.md b/docs/agent-cloud.md index 061b8472d..fcec10af8 100644 --- a/docs/agent-cloud.md +++ b/docs/agent-cloud.md @@ -28,7 +28,7 @@ Cloud](https://user-images.githubusercontent.com/1153921/80828986-1ebb3b00-8b9b- [Read more about Netdata Cloud](https://learn.netdata.cloud/docs/cloud/) to better understand how it gives you real-time visibility into your entire infrastructure, and why you might consider using it. -Next, [get started in 5 minutes](https://learn.netdata.cloud/docs/cloud/get-started/), or read our [claiming +Next, [get started in 5 minutes](https://learn.netdata.cloud/docs/cloud/get-started/), or read our [connection to Cloud reference](/claim/README.md) for a complete investigation of Cloud's security and encryption features, plus instructions for Docker containers. @@ -72,8 +72,8 @@ about how you might want to use or configure Cloud, we recommend the following: - Get an overview of Cloud's features by reading [Cloud documentation](https://learn.netdata.cloud/docs/cloud/). - Follow the 5-minute [get started with Cloud](https://learn.netdata.cloud/docs/cloud/get-started/) guide to finish - onboarding and claim your first nodes. -- Better understand how agents connect securely to the Cloud with [claiming](/claim/README.md) and [Agent-Cloud + onboarding and connect your first nodes. +- Better understand how agents connect securely to the Cloud with [connect agent to Cloud](/claim/README.md) and [Agent-Cloud link](/aclk/README.md) documentation. [![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fagent-cloud&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/docs/anonymous-statistics.md b/docs/anonymous-statistics.md index 27b48e13c..75e586bd4 100644 --- a/docs/anonymous-statistics.md +++ b/docs/anonymous-statistics.md @@ -7,7 +7,13 @@ custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/anonymous-s # Anonymous statistics -Netdata collects anonymous usage information by default using the open-source product analytics platform [PostHog](https://github.com/PostHog/posthog). We self-host our PostHog instance, which means your data is never sent or processed by any third parties outside of the Netdata infrastructure. We use the statistics gathered from this information for two purposes: +By default, Netdata collects anonymous usage information from the open-source monitoring agent using the open-source +product analytics platform [PostHog](https://github.com/PostHog/posthog). We self-host our PostHog instance, which means +your data is never sent or processed by any third parties outside of the Netdata infrastructure. + +We are strongly committed to your [data privacy](https://netdata.cloud/data-privacy/). + +We use the statistics gathered from this information for two purposes: 1. **Quality assurance**, to help us understand if Netdata behaves as expected, and to help us classify repeated issues with certain distributions or environments. @@ -62,7 +68,7 @@ Starting with v1.21, we additionally collect information about: - Failures to build the dependencies required to use Cloud features. - Unavailability of Cloud features in an agent. -- Failures to connect to the Cloud in case the agent has been [claimed](/claim/README.md). This includes error codes +- Failures to connect to the Cloud in case the [connection process](/claim/README.md) has been completed. This includes error codes to inform the Netdata team about the reason why the connection failed. To see exactly what and how is collected, you can review the script template `daemon/anonymous-statistics.sh.in`. The diff --git a/docs/configure/nodes.md b/docs/configure/nodes.md index a721c73c4..8399e89d9 100644 --- a/docs/configure/nodes.md +++ b/docs/configure/nodes.md @@ -18,9 +18,10 @@ anomaly, or change in infrastructure affects how their Agents should perform. ## The Netdata config directory -On most Linux systems, using our [recommended one-line installation](/docs/get/README.md#install-the-netdata-agent), the -**Netdata config directory** is `/etc/netdata/`. The config directory contains several configuration files with the -`.conf` extension, a few directories, and a shell script named `edit-config`. +On most Linux systems, using our [recommended one-line +installation](/docs/get-started.mdx#install-on-linux-with-one-line-installer-recommended), the **Netdata config +directory** is `/etc/netdata/`. The config directory contains several configuration files with the `.conf` extension, a +few directories, and a shell script named `edit-config`. > Some operating systems will use `/opt/netdata/etc/netdata/` as the config directory. If you're not sure where yours > is, navigate to `http://NODE:19999/netdata.conf` in your browser, replacing `NODE` with the IP address or hostname of diff --git a/docs/configure/secure-nodes.md b/docs/configure/secure-nodes.md index 704db35a3..180ffe357 100644 --- a/docs/configure/secure-nodes.md +++ b/docs/configure/secure-nodes.md @@ -34,7 +34,7 @@ that align with your goals and your organization's standards. ## Disable the local dashboard -This is the _recommended method for those who have claimed their nodes to Netdata Cloud_ and prefer viewing real-time +This is the _recommended method for those who have connected their nodes to Netdata Cloud_ and prefer viewing real-time metrics using the War Room Overview, Nodes view, and Cloud dashboards. You can disable the local dashboard (and API) but retain the encrypted Agent-Cloud link ([ACLK](/aclk/README.md)) that diff --git a/docs/contributing/style-guide.md b/docs/contributing/style-guide.md index faa6fc62b..625237bc0 100644 --- a/docs/contributing/style-guide.md +++ b/docs/contributing/style-guide.md @@ -469,7 +469,7 @@ The following tables describe the standard spelling, capitalization, and usage o | Term | Definition | |-----------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| **claimed node** | A node that you've proved ownership of by completing the [claiming process](/claim/README.md). The claimed node will then appear in your Space and any War Rooms you added it to. | +| **claimed node** | A node that you've proved ownership of by completing the [connecting to Cloud process](/claim/README.md). The claimed node will then appear in your Space and any War Rooms you added it to. | | **Netdata** | The company behind the open-source Netdata Agent and the Netdata Cloud web application. Never use _netdata_ or _NetData_.

    In general, focus on the user's goals, actions, and solutions rather than what the company provides. For example, write _Learn more about enabling alarm notifications on your preferred platforms_ instead of _Netdata sends alarm notifications to your preferred platforms_. | | **Netdata Agent** | The free and open source [monitoring agent](https://github.com/netdata/netdata) that you can install on all of your distributed systems, whether they're physical, virtual, containerized, ephemeral, and more. The Agent monitors systems running Linux, Docker, Kubernetes, macOS, FreeBSD, and more, and collects metrics from hundreds of popular services and applications. | | **Netdata Cloud** | The web application hosted at [https://app.netdata.cloud](https://app.netdata.cloud) that helps you monitor an entire infrastructure of distributed systems in real time.

    Never use _Cloud_ without the preceding _Netdata_ to avoid ambiguity. | @@ -477,7 +477,7 @@ The following tables describe the standard spelling, capitalization, and usage o | **Netdata community forum** | The Discourse-powered forum for feature requests, Netdata Cloud technical support, and conversations about Netdata's monitoring and troubleshooting products. | | **node** | A system on which the Netdata Agent is installed. The system can be physical, virtual, in a Docker container, and more. Depending on your infrastructure, you may have one, dozens, or hundreds of nodes. Some nodes are _ephemeral_, in that they're created/destroyed automatically by an orchestrator service. | | **Space** | The highest level container within Netdata Cloud for a user to organize their team members and nodes within their infrastructure. A Space likely represents an entire organization or a large team.

    _Space_ is always capitalized. | -| **unreachable node** | A claimed node with a disrupted [Agent-Cloud link](/aclk/README.md). Unreachable could mean the node no longer exists or is experiencing network connectivity issues with Cloud. | +| **unreachable node** | A connected node with a disrupted [Agent-Cloud link](/aclk/README.md). Unreachable could mean the node no longer exists or is experiencing network connectivity issues with Cloud. | | **visited node** | A node which has had its Agent dashboard directly visited by a user. A list of these is maintained on a per-user basis. | | **War Room** | A smaller grouping of nodes where users can view key metrics in real-time and monitor the health of many nodes with their alarm status. War Rooms can be used to organize nodes in any way that makes sense for your infrastructure, such as by a service, purpose, physical location, and more.

    _War Room_ is always capitalized. | diff --git a/docs/dashboard/customize.mdx b/docs/dashboard/customize.mdx index f3a8f805a..2c82a1c50 100644 --- a/docs/dashboard/customize.mdx +++ b/docs/dashboard/customize.mdx @@ -25,7 +25,7 @@ Here are a few popular settings: ### Change chart legend position Find this setting under the **Visual** tab. By default, Netdata places the [legend of -dimensions](/docs/dashboards/charts-dimensions-contexts-families.mdx#dimensions) _below_ charts. Click this toggle to +dimensions](/docs/dashboard/dimensions-contexts-families.mdx#dimensions) _below_ charts. Click this toggle to move the legend to the _right_ of charts. ### Change theme @@ -88,6 +88,6 @@ dashboards](/web/gui/custom/README.md) with HTML, CSS, and JavaScript. - [How the dashboard works](/docs/dashboard/how-dashboard-works.mdx) - [Interact with charts](/docs/dashboard/interact-charts.mdx) - [Chart dimensions, contexts, and families](/docs/dashboard/dimensions-contexts-families.mdx) - - [Select timeframes to visualize](/docs/dashboard/select-timeframes.mdx) + - [Select timeframes to visualize](/docs/dashboard/visualization-date-and-time-controls.mdx) - [Import, export, and print a snapshot](/docs/dashboard/import-export-print-snapshot.mdx) - **[Customize the standard dashboard](/docs/dashboard/customize.mdx)** diff --git a/docs/dashboard/dimensions-contexts-families.mdx b/docs/dashboard/dimensions-contexts-families.mdx index 49438bf2d..da9aad78f 100644 --- a/docs/dashboard/dimensions-contexts-families.mdx +++ b/docs/dashboard/dimensions-contexts-families.mdx @@ -79,7 +79,7 @@ names: ## What's next? With an understanding of a chart's dimensions, context, and family, you're now ready to dig even deeper into Netdata's -dashboard. We recommend looking into [using the timeframe selector](/docs/dashboard/select-timeframes.mdx). +dashboard. We recommend looking into [using the timeframe selector](/docs/dashboard/visualization-date-and-time-controls.mdx). If you feel comfortable with the [dashboard](/docs/dashboard/how-dashboard-works.mdx) and interacting with charts, we recommend learning about [configuration](/docs/configure/nodes.md). While Netdata doesn't _require_ a complicated setup @@ -91,6 +91,6 @@ process or a query language to create charts, there are a lot of ways to tweak t - [How the dashboard works](/docs/dashboard/how-dashboard-works.mdx) - [Interact with charts](/docs/dashboard/interact-charts.mdx) - **[Chart dimensions, contexts, and families](/docs/dashboard/dimensions-contexts-families.mdx)** - - [Select timeframes to visualize](/docs/dashboard/select-timeframes.mdx) + - [Select timeframes to visualize](/docs/dashboard/visualization-date-and-time-controls.mdx) - [Import, export, and print a snapshot](/docs/dashboard/import-export-print-snapshot.mdx) - [Customize the standard dashboard](/docs/dashboard/customize.mdx) diff --git a/docs/dashboard/how-dashboard-works.mdx b/docs/dashboard/how-dashboard-works.mdx index 00c5df33b..315614d69 100644 --- a/docs/dashboard/how-dashboard-works.mdx +++ b/docs/dashboard/how-dashboard-works.mdx @@ -105,7 +105,7 @@ organizes its dashboard and operates [alarms](/docs/monitor/configure-alarms.md) - **[How the dashboard works](/docs/dashboard/how-dashboard-works.mdx)** - [Interact with charts](/docs/dashboard/interact-charts.mdx) - [Chart dimensions, contexts, and families](/docs/dashboard/dimensions-contexts-families.mdx) - - [Select timeframes to visualize](/docs/dashboard/select-timeframes.mdx) + - [Select timeframes to visualize](/docs/dashboard/visualization-date-and-time-controls.mdx) - [Import, export, and print a snapshot](/docs/dashboard/import-export-print-snapshot.mdx) - [Customize the standard dashboard](/docs/dashboard/customize.mdx) - [HTTP API](/web/api/README.md) diff --git a/docs/dashboard/import-export-print-snapshot.mdx b/docs/dashboard/import-export-print-snapshot.mdx index b5488914a..7e94a52c8 100644 --- a/docs/dashboard/import-export-print-snapshot.mdx +++ b/docs/dashboard/import-export-print-snapshot.mdx @@ -13,8 +13,8 @@ paper. Snapshots can be incredibly useful for diagnosing anomalies after they've already happened. Let's say Netdata triggered a warning alarm while you were asleep. In the morning, you can [pick the -timeframe](/docs/dashboards/pick-timeframes.mdx) when the alarm triggered, export a snapshot, and send it to a colleague -for further analysis. +timeframe](/docs/dashboard/visualization-date-and-time-controls.mdx) when the alarm triggered, export a snapshot, and send it to a +colleague for further analysis. Or, send the Netdata team a snapshot of your dashboard when [filing a bug report](https://github.com/netdata/netdata/issues/new?assignees=&labels=bug%2C+needs+triage&template=bug_report.md) on @@ -39,7 +39,7 @@ Some caveats and tips to keep in mind: - Only metrics in the export timeframe are available to you. If you zoom out or pan through time, you'll see the beginning and end of the snapshot. -- Charts won't update with new inforamtion, as you're looking at a static replica, not the live dashboard. +- Charts won't update with new information, as you're looking at a static replica, not the live dashboard. - The import is only temporary. Reload your browser tab to return to your node's real-time dashboard. ## Export a snapshot @@ -78,6 +78,6 @@ dashboards](/web/gui/custom/README.md). - [How the dashboard works](/docs/dashboard/how-dashboard-works.mdx) - [Interact with charts](/docs/dashboard/interact-charts.mdx) - [Chart dimensions, contexts, and families](/docs/dashboard/dimensions-contexts-families.mdx) - - [Select timeframes to visualize](/docs/dashboard/select-timeframes.mdx) + - [Select timeframes to visualize](/docs/dashboard/visualization-date-and-time-controls.mdx) - **[Import, export, and print a snapshot](/docs/dashboard/import-export-print-snapshot.mdx)** - [Customize the standard dashboard](/docs/dashboard/customize.mdx) \ No newline at end of file diff --git a/docs/dashboard/interact-charts.mdx b/docs/dashboard/interact-charts.mdx index 2266e101e..5633bd080 100644 --- a/docs/dashboard/interact-charts.mdx +++ b/docs/dashboard/interact-charts.mdx @@ -7,6 +7,9 @@ custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/dashboard/i # Interact with charts +> ⚠️ There is a new version of charts that is currently **only** available on [Netdata Cloud](https://learn.netdata.cloud/docs/cloud/visualize/interact-new-charts). We didn't +> want to keep this valuable feature from you, so after we get this into your hands on the Cloud, we will collect and implement your feedback to make sure we are providing the best possible version of the feature on the Netdata Agent dashboard as quickly as possible. + While charts that update every second with new metrics are helpful for understanding the immediate state of a node, deep troubleshooting and root cause analysis begins by manipulating the default charts. To help you troubleshoot, Netdata synchronizes every chart every time you interact with one of them. @@ -119,7 +122,7 @@ are, respectively, `Pan Left`, `Reset`, `Pan Right`, `Zoom In`, and `Zoom Out`. We recommend you read up on the differences between [chart dimensions, contexts, and families](/docs/dashboard/dimensions-contexts-families.mdx) to complete your understanding of how Netdata organizes its dashboards. Another valuable way to interact with charts is to use the [timeframe -selector](/docs/dashboard/select-timeframes.mdx), which helps you visualize specific moments of historical metrics. +selector](/docs/dashboard/visualization-date-and-time-controls.mdx), which helps you visualize specific moments of historical metrics. If you feel comfortable with the [dashboard](/docs/dashboard/how-dashboard-works.mdx) and interacting with charts, we recommend moving on to learning about [configuration](/docs/configure/nodes.md). While Netdata doesn't _require_ a @@ -130,8 +133,8 @@ your needs. - Dashboard - [How the dashboard works](/docs/dashboard/how-dashboard-works.mdx) - - **[Interact with charts](/docs/dashboard/interact-charts.mdx)** + - [Netdata Cloud · Interact with new charts](https://learn.netdata.cloud/docs/cloud/visualize/interact-new-charts) - [Chart dimensions, contexts, and families](/docs/dashboard/dimensions-contexts-families.mdx) - - [Select timeframes to visualize](/docs/dashboard/select-timeframes.mdx) + - [Select timeframes to visualize](/docs/dashboard/visualization-date-and-time-controls.mdx) - [Import, export, and print a snapshot](/docs/dashboard/import-export-print-snapshot.mdx) - [Customize the standard dashboard](/docs/dashboard/customize.mdx) diff --git a/docs/dashboard/select-timeframes.mdx b/docs/dashboard/select-timeframes.mdx deleted file mode 100644 index ac1b3f7c7..000000000 --- a/docs/dashboard/select-timeframes.mdx +++ /dev/null @@ -1,85 +0,0 @@ ---- -title: "Select timeframes to visualize" -description: "Netdata's dashboard features a rich timeframe selector, with useful defaults and rich customization, to help you narrow your focus when troubleshooting issues or anomalies." -type: how-to -custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/dashboard/select-timeframes.mdx ---- - -# Pick timeframes to visualize - -While [panning through time and zooming in/out](/docs/dashboard/interact-charts.mdx) from charts is helpful when -you're looking a recent history, or want to do granular troubleshooting, what if you want to see metrics from 6 hours -ago? Or 6 days? - -Netdata's dashboard features a **timeframe selector** to help you visualize specific timeframes in a few helpful ways. -By default, it shows a certain number of minutes of historical metrics based on the your browser's viewport to ensure -it's always showing per-second granularity. - -## Open the timeframe selector - -To visualize a new timeframe, you need to open the picker, which appears just above the menu, near the top-right cover -of the dashboard. - -![The timeframe selector in the local Agent -dashboard](https://user-images.githubusercontent.com/1153921/101507784-2c585080-3934-11eb-9d6e-eff30b8553e4.png) - -The **Clear** button resets the dashboard back to its default state based on your browser viewport, and **Apply** closes -the picker and shifts all charts to the selected timeframe. - -## Use the Quick Selector - -Click any of the following options in the **Quick Selector** to choose a commonly-used timeframe. - -- Last 5 minutes -- Last 15 minutes -- Last 2 hours -- Last 6 hours -- Last 12 hours - -Click **Apply** to see metrics from your selected timeframe. - -## Choose a specific interval - -Beneath the Quick Selector is an input field and dropdown you use in combination to select a specific timeframe of -minutes, hours, days, or months. Enter a number and choose the appropriate unit of time, then click **Apply**. - -## Choose multiple days - -Use the calendar to select multiple days. Click on a date to begin the timeframe selection, then an ending date. The -timeframe begins at noon on the beginning and end dates. Click **Apply** to see your selected multi-day timeframe. - -## Caveats and considerations - -**Longer timeframes will decrease metrics granularity**. At the default timeframe, based on your browser viewport, each -"tick" on charts represents one second. If you select a timeframe of 6 hours, each tick represents the _average_ value -across a larger period of time. - -**You can only see metrics as far back in history as your metrics retention policy allows**. Netdata uses an internal -time-series database (TSDB) to store as many metrics as it can within a specific amount of disk space. The default -storage is 256 MiB, which should be enough for 1-3 days of historical metrics. If you navigate back to a timeframe -beyond stored historical metrics, you'll see this message: - -![Screenshot of reaching the end of historical metrics -storage](https://user-images.githubusercontent.com/1153921/114207597-63a23280-9911-11eb-863d-4d2f75b030b4.png) - -At any time, [configure the internal TSDB's storage capacity](/docs/store/change-metrics-storage.md) to expand your -depth of historical metrics. - -## What's next? - -One useful next step after selecting a timeframe is [exporting the -metrics](/docs/dashboard/import-export-print-snapshot.mdx) into a snapshot file, which can then be shared and imported -into any other Netdata dashboard. - -There are also many ways to [customize](/docs/dashboard/customize.mdx) the standard dashboard experience, from changing -the theme to editing the text that accompanies every section of charts. - -### Further reading & related information - -- Dashboard - - [How the dashboard works](/docs/dashboard/how-dashboard-works.mdx) - - [Interact with charts](/docs/dashboard/interact-charts.mdx) - - [Chart dimensions, contexts, and families](/docs/dashboard/dimensions-contexts-families.mdx) - - **[Select timeframes to visualize](/docs/dashboard/select-timeframes.mdx)** - - [Import, export, and print a snapshot](/docs/dashboard/import-export-print-snapshot.mdx) - - [Customize the standard dashboard](/docs/dashboard/customize.mdx) diff --git a/docs/dashboard/visualization-date-and-time-controls.mdx b/docs/dashboard/visualization-date-and-time-controls.mdx new file mode 100644 index 000000000..fe3e51091 --- /dev/null +++ b/docs/dashboard/visualization-date-and-time-controls.mdx @@ -0,0 +1,121 @@ +--- +title: "Visualization date and time controls" +description: "Netdata's dashboard features powerful date visualization controls that include a time control (play, pause, force play), a timezone selector and a rich date and timeframe selector, with useful defaults and rich customization, to help you narrow your focus when troubleshooting issues or anomalies." +type: how-to +custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/dashboard/visualization-date-and-time-controls.mdx +--- + +# Visualization date and time controls + +## Date and time selector + +### Pick timeframes to visualize + +While [panning through time and zooming in/out](/docs/dashboard/interact-charts.mdx) from charts it is helpful when +you're looking a recent history, or want to do granular troubleshooting, what if you want to see metrics from 6 hours +ago? Or 6 days? + +Netdata's dashboard features a **timeframe selector** to help you visualize specific timeframes in a few helpful ways. +By default, it shows a certain number of minutes of historical metrics based on the your browser's viewport to ensure +it's always showing per-second granularity. + +#### Open the timeframe selector + +To visualize a new timeframe, you need to open the picker, which appears just above the menu, near the top-right cover +of the dashboard. + +![The timeframe selector in the local Agent +dashboard](https://user-images.githubusercontent.com/1153921/101507784-2c585080-3934-11eb-9d6e-eff30b8553e4.png) + +The **Clear** button resets the dashboard back to its default state based on your browser viewport, and **Apply** closes +the picker and shifts all charts to the selected timeframe. + +#### Use the Quick Selector + +Click any of the following options in the **Quick Selector** to choose a commonly-used timeframe. + +- Last 5 minutes +- Last 15 minutes +- Last 2 hours +- Last 6 hours +- Last 12 hours + +Click **Apply** to see metrics from your selected timeframe. + +#### Choose a specific interval + +Beneath the Quick Selector is an input field and dropdown you use in combination to select a specific timeframe of +minutes, hours, days, or months. Enter a number and choose the appropriate unit of time, then click **Apply**. + +#### Choose multiple days + +Use the calendar to select multiple days. Click on a date to begin the timeframe selection, then an ending date. The +timeframe begins at noon on the beginning and end dates. Click **Apply** to see your selected multi-day timeframe. + +## Time controls + +The time control provides you the following options: **Play**, **Pause** and **Force Play**. +* **Play** - the content of the page will be automatically refreshed while this is in the foreground +* **Pause** - the content of the page isn't refreshed due to a manual request to pause it or, for example, when your investigating data on a +chart (cursor is on top of a chart) +* **Force Play** - the content of the page will be automatically refreshed even if this is in the background + +With this, we aim to bring more clarity and allow you to distinguish if the content you are looking at is live or historical and also allow you + to always refresh the content of the page when the tabs are in the background. + +Main use cases for **Force Play**: +* You use a terminal or deployment tools to do changes in your infra and want to see immediately, Netdata is in the background, displaying the impact +of these changes +* You want to have Netdata on the background, example displayed on a TV, to constantly see metrics through dashboards or to watch the alert +status + +![The time control with Play, Pause and +Force Play](https://user-images.githubusercontent.com/82235632/129206460-03c47d0d-1a5b-428a-b972-473718b74bdb.png) + +## Timezone selector + +With the timezone selector, you have the ability to change the timezone on Netdata Cloud. More often than not teams are +distributed in different timezones and they need to collaborate. + +Our goal is to make it easier for you and your teams to troubleshoot based on your timezone preference and communicate easily +with varying timezones and timeframes without the need to be concerned about their specificity. + +![Timezon selector](https://user-images.githubusercontent.com/82235632/129209528-bc1d572d-4582-4142-aace-918287849499.png) + +When you change the timezone all the date and time fields will be updated to be displayed according to the specified timezone, this goes from +charts to alerts information and across the Netdata Cloud. + +## Caveats and considerations + +**Longer timeframes will decrease metrics granularity**. At the default timeframe, based on your browser viewport, each +"tick" on charts represents one second. If you select a timeframe of 6 hours, each tick represents the _average_ value +across a larger period of time. + +**You can only see metrics as far back in history as your metrics retention policy allows**. Netdata uses an internal +time-series database (TSDB) to store as many metrics as it can within a specific amount of disk space. The default +storage is 256 MiB, which should be enough for 1-3 days of historical metrics. If you navigate back to a timeframe +beyond stored historical metrics, you'll see this message: + +![Screenshot of reaching the end of historical metrics +storage](https://user-images.githubusercontent.com/1153921/114207597-63a23280-9911-11eb-863d-4d2f75b030b4.png) + +At any time, [configure the internal TSDB's storage capacity](/docs/store/change-metrics-storage.md) to expand your +depth of historical metrics. + +## What's next? + +One useful next step after selecting a timeframe is [exporting the +metrics](/docs/dashboard/import-export-print-snapshot.mdx) into a snapshot file, which can then be shared and imported +into any other Netdata dashboard. + +There are also many ways to [customize](/docs/dashboard/customize.mdx) the standard dashboard experience, from changing +the theme to editing the text that accompanies every section of charts. + +## Further reading & related information + +- Dashboard + - [How the dashboard works](/docs/dashboard/how-dashboard-works.mdx) + - [Interact with charts](/docs/dashboard/interact-charts.mdx) + - [Chart dimensions, contexts, and families](/docs/dashboard/dimensions-contexts-families.mdx) + - [Import, export, and print a snapshot](/docs/dashboard/import-export-print-snapshot.mdx) + - [Customize the standard dashboard](/docs/dashboard/customize.mdx) diff --git a/docs/get-started.mdx b/docs/get-started.mdx index ef1d3f9a6..562c587ef 100644 --- a/docs/get-started.mdx +++ b/docs/get-started.mdx @@ -11,7 +11,7 @@ import { Install, InstallBox } from '../src/components/Install/' # Get started with Netdata -Netdata is an free and open-source (FOSS) monitoring agent that collects thousands of hardware and software metrics from +Netdata is a free and open-source (FOSS) monitoring agent that collects thousands of hardware and software metrics from any physical or virtual system (we call them _nodes_) and organizes them in an easy-to-use and -navigate interface. Together with [Netdata Cloud](https://learn.netdata.cloud/docs/cloud), you can monitor your entire infrastructure in diff --git a/docs/getting-started.md b/docs/getting-started.md index e80b80eed..2d1f3de6d 100644 --- a/docs/getting-started.md +++ b/docs/getting-started.md @@ -203,7 +203,7 @@ You can use these features together or separately—the decision is up to yo - Sign up for [Netdata Cloud](https://app.netdata.cloud). - Read the [infrastructure monitoring quickstart](/docs/quickstart/infrastructure.md). -- Better understand how the Netdata Agent connects securely to Netdata Cloud with [claiming](/claim/README.md) and +- Better understand how the Netdata Agent connects securely to Netdata Cloud with [connection process](/claim/README.md) and [Agent-Cloud link](/aclk/README.md) documentation. ## Start, stop, and restart Netdata @@ -221,7 +221,7 @@ details. ## What's next? Even after you've configured `netdata.conf`, tweaked alarms, learned the basics of performance troubleshooting, and -claimed all your systems in Netdata Cloud or added them to the Visited nodes menu, you've just gotten started with +connected all your systems in Netdata Cloud or added them to the Visited nodes menu, you've just gotten started with Netdata. Take a look at some more advanced features and configurations: diff --git a/docs/guides/deploy/ansible.md b/docs/guides/deploy/ansible.md index 8298fd00c..f7bf514e2 100644 --- a/docs/guides/deploy/ansible.md +++ b/docs/guides/deploy/ansible.md @@ -7,11 +7,11 @@ custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/guides/depl # Deploy Netdata with Ansible -Netdata's [one-line kickstart](https://learn.netdata.cloud/docs/get) is zero-configuration, highly adaptable, and -compatible with tons of different operating systems and Linux distributions. You can use it on bare metal, VMs, -containers, and everything in-between. +Netdata's [one-line kickstart](/docs/get-started.mdx) is zero-configuration, highly adaptable, and compatible with tons +of different operating systems and Linux distributions. You can use it on bare metal, VMs, containers, and everything +in-between. -But what if you're trying to bootstrap an infrastructure monitoring solution as quickly as possible. What if you need to +But what if you're trying to bootstrap an infrastructure monitoring solution as quickly as possible? What if you need to deploy Netdata across an entire infrastructure with many nodes? What if you want to make this deployment reliable, repeatable, and idempotent? What if you want to write and deploy your infrastructure or cloud monitoring system like code? @@ -22,7 +22,7 @@ those operations over standard and secure SSH connections. There's no agent to i have to worry about is your application and your monitoring software. Ansible has some competition from the likes of [Puppet](https://puppet.com/) or [Chef](https://www.chef.io/), but the -most valuable feature about Ansible is that every is **idempotent**. From the [Ansible +most valuable feature about Ansible is **idempotent**. From the [Ansible glossary](https://docs.ansible.com/ansible/latest/reference_appendices/glossary.html) > An operation is idempotent if the result of performing it once is exactly the same as the result of performing it @@ -33,7 +33,7 @@ operate. When you deploy Netdata with Ansible, you're also deploying _monitoring In this guide, we'll walk through the process of using an [Ansible playbook](https://github.com/netdata/community/tree/main/netdata-agent-deployment/ansible-quickstart) to automatically -deploy the Netdata Agent to any number of distributed nodes, manage the configuration of each node, and claim them to +deploy the Netdata Agent to any number of distributed nodes, manage the configuration of each node, and connect them to your Netdata Cloud account. You'll go from some unmonitored nodes to a infrastructure monitoring solution in a matter of minutes. @@ -98,7 +98,7 @@ two different SSH keys supplied by AWS. ### Edit the `vars/main.yml` file -In order to claim your node(s) to your Space in Netdata Cloud, and see all their metrics in real-time in [composite +In order to connect your node(s) to your Space in Netdata Cloud, and see all their metrics in real-time in [composite charts](/docs/visualize/overview-infrastructure.md) or perform [Metric Correlations](https://learn.netdata.cloud/docs/cloud/insights/metric-correlations), you need to set the `claim_token` and `claim_room` variables. @@ -120,7 +120,7 @@ claim_rooms: XXXXX Change the `dbengine_multihost_disk_space` if you want to change the metrics retention policy by allocating more or less disk space for storing metrics. The default is 2048 Mib, or 2 GiB. -Because we're claiming this node to Netdata Cloud, and will view its dashboards there instead of via the IP address or +Because we're connecting this node to Netdata Cloud, and will view its dashboards there instead of via the IP address or hostname of the node, the playbook disables that local dashboard by setting `web_mode` to `none`. This gives a small security boost by not allowing any unwanted access to the local dashboard. @@ -147,7 +147,7 @@ Next, Ansible makes changes to each node according to the `tasks` defined in the [returns](https://docs.ansible.com/ansible/latest/reference_appendices/common_return_values.html#changed) whether each task results in a changed, failure, or was skipped entirely. -The task to install Netdata will take a few minutes per node, so be patient! Once the playbook reaches the claiming +The task to install Netdata will take a few minutes per node, so be patient! Once the playbook reaches the connect to Cloud task, your nodes start populating your Space in Netdata Cloud. ## What's next? diff --git a/docs/guides/monitor-cockroachdb.md b/docs/guides/monitor-cockroachdb.md index 0ff9f3c77..0307381e3 100644 --- a/docs/guides/monitor-cockroachdb.md +++ b/docs/guides/monitor-cockroachdb.md @@ -13,7 +13,7 @@ maximum granularity using Netdata. Collect more than 50 unique metrics and put t designed for better visual anomaly detection. Netdata itself uses CockroachDB as part of its Netdata Cloud infrastructure, so we're happy to introduce this new -collector and help others get started with it straightaway. +collector and help others get started with it straight away. Let's dive in and walk through the process of monitoring CockroachDB metrics with Netdata. diff --git a/docs/guides/monitor/anomaly-detection.md b/docs/guides/monitor/anomaly-detection.md index f680f5f2e..2d8b6d1d6 100644 --- a/docs/guides/monitor/anomaly-detection.md +++ b/docs/guides/monitor/anomaly-detection.md @@ -23,7 +23,7 @@ library](https://github.com/yzhao062/pyod/tree/master), which periodically runs quantify how anomalous certain charts are. All these metrics and alarms are available for centralized monitoring in [Netdata Cloud](https://app.netdata.cloud). If -you choose to sign up for Netdata Cloud and [claim your nodes](/claim/README.md), you will have the ability to run +you choose to sign up for Netdata Cloud and [coonect your nodes](/claim/README.md), you will have the ability to run tailored anomaly detection on every node in your infrastructure, regardless of its purpose or workload. In this guide, you'll learn how to set up the anomalies collector to instantly detect anomalies in an Nginx web server @@ -123,7 +123,7 @@ configure the collector to monitor charts from the log](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/weblog) collectors. `charts_regex` allows for some basic regex, such as wildcards (`*`) to match all contexts with a certain pattern. For -example, `system\..*` matches with any chart wit ha context that begins with `system.`, and ends in any number of other +example, `system\..*` matches with any chart with a context that begins with `system.`, and ends in any number of other characters (`.*`). Note the escape character (`\`) around the first period to capture a period character exactly, and not any character. diff --git a/docs/guides/monitor/kubernetes-k8s-netdata.md b/docs/guides/monitor/kubernetes-k8s-netdata.md index c5cb2c1bc..5d4886e68 100644 --- a/docs/guides/monitor/kubernetes-k8s-netdata.md +++ b/docs/guides/monitor/kubernetes-k8s-netdata.md @@ -45,9 +45,9 @@ To follow this tutorial, you need: - A free Netdata Cloud account. [Sign up](https://app.netdata.cloud/sign-up?cloudRoute=/spaces) if you don't have one already. -- A working cluster running Kubernetes v1.9 or newer, with a Netdata deployment and claimed parent/child nodes. See +- A working cluster running Kubernetes v1.9 or newer, with a Netdata deployment and connected parent/child nodes. See our [Kubernetes deployment process](/packaging/installer/methods/kubernetes.md) for details on deployment and - claiming. + conneting to Cloud. - The [`kubectl`](https://kubernetes.io/docs/reference/kubectl/overview/) command line tool, within [one minor version difference](https://kubernetes.io/docs/tasks/tools/install-kubectl/#before-you-begin) of your cluster, on an administrative system. @@ -98,10 +98,10 @@ robot-shop web-8bb887476-lkcjx 1/1 Running 0 14m ## Explore Netdata's Kubernetes monitoring charts The Netdata Helm chart deploys and enables everything you need for monitoring Kubernetes on every layer. Once you deploy -Netdata and claim your cluster's nodes, you're ready to check out the visualizations **with zero configuration**. +Netdata and connect your cluster's nodes, you're ready to check out the visualizations **with zero configuration**. To get started, [sign in](https://app.netdata.cloud/sign-in?cloudRoute=/spaces) to your Netdata Cloud account. Head over -to the War Room you claimed your cluster to, if not **General**. +to the War Room you connected your cluster to, if not **General**. Netdata Cloud is already visualizing your Kubernetes metrics, streamed in real-time from each node, in the [Overview](https://learn.netdata.cloud/docs/cloud/visualize/overview): diff --git a/docs/guides/monitor/lamp-stack.md b/docs/guides/monitor/lamp-stack.md index 95aa03f0b..38b9d0bef 100644 --- a/docs/guides/monitor/lamp-stack.md +++ b/docs/guides/monitor/lamp-stack.md @@ -167,7 +167,7 @@ If the Netdata Agent isn't already open in your browser, open a new tab and navi > If you [signed up](https://app.netdata.cloud/sign-up?cloudRoute=/spaces) for Netdata Cloud earlier, you can also view > the exact same LAMP stack metrics there, plus additional features, like drag-and-drop custom dashboards. Be sure to -> [claim your node](/claim/README.md) to start streaming metrics to your browser through Netdata Cloud. +> [connecting your node](/claim/README.md) to start streaming metrics to your browser through Netdata Cloud. Netdata automatically organizes all metrics and charts onto a single page for easy navigation. Peek at gauges to see overall system performance, then scroll down to see more. Click-and-drag with your mouse to pan _all_ charts back and diff --git a/docs/guides/monitor/statsd.md b/docs/guides/monitor/statsd.md index 120715b19..e4f04c575 100644 --- a/docs/guides/monitor/statsd.md +++ b/docs/guides/monitor/statsd.md @@ -22,14 +22,15 @@ In general, the process for creating a StatsD collector can be summarized in 2 s - Run an experiment by sending StatsD metrics to Netdata, without any prior configuration. This will create a chart per metric (called private charts) and will help you verify that everything works as expected from the application side of things. - Make sure to reload the dashboard tab **after** you start sending data to Netdata. -- Create a configuration file for your app using [edit-config](https://learn.netdata.cloud/guides/step-by-step/step-04): `sudo ./edit-config statsd.d/myapp.conf` +- Create a configuration file for your app using [edit-config](/docs/configure/nodes.md): `sudo ./edit-config + statsd.d/myapp.conf` - Each app will have it's own section in the right-hand menu. Now, let's see the above process in detail. ## Prerequisites -- A node with the [Netdata Agent](https://learn.netdata.cloud/docs/get#install-the-netdata-agent) installed. +- A node with the [Netdata](/docs/get-started.mdx) installed. - An application to instrument. For this guide, that will be [k6](https://k6.io/docs/getting-started/installation). ## Understanding the metrics @@ -110,7 +111,7 @@ Find more details about family and context in our [documentation](/web/README.md Now, having decided on how we are going to group the charts, we need to define how we are going to group metrics into different charts. This is particularly important, since we decide: - What metrics **not** to show, since they are not useful for our use-case. -- What metrics to consolidate into the same charts, so as to reduce noice and increase visual correlation. +- What metrics to consolidate into the same charts, so as to reduce noise and increase visual correlation. The dimension option has this syntax: `dimension = [pattern] METRIC NAME TYPE MULTIPLIER DIVIDER OPTIONS` diff --git a/docs/guides/python-collector.md b/docs/guides/python-collector.md index 0478bffe0..b8facd9f0 100644 --- a/docs/guides/python-collector.md +++ b/docs/guides/python-collector.md @@ -24,7 +24,7 @@ prebuilt method for collecting your required metric data. In this tutorial, you'll learn how to leverage the [Python programming language](https://www.python.org/) to build a custom data collector for the Netdata Agent. Follow along with your own dataset, using the techniques and best practices -covered here, or use the included examples for collecting and organizing eithre random or weather data. +covered here, or use the included examples for collecting and organizing either random or weather data. ## What you need to get started @@ -48,7 +48,7 @@ The basic elements of a Netdata collector are: - `ORDER[]`: A list containing the charts to be displayed. - `CHARTS{}`: A dictionary containing the details for the charts to be displayed. - `data{}`: A dictionary containing the values to be displayed. -- `get_data()`: The basic function of the plugin which will retrun to Netdata the correct values. +- `get_data()`: The basic function of the plugin which will return to Netdata the correct values. Let's walk through these jobs and elements as independent elements first, then apply them to example Python code. @@ -138,7 +138,7 @@ correct values. The `python.d` plugin has a number of framework classes that can be used to speed up the development of your python collector. Your class can inherit one of these framework classes, which have preconfigured methods. -For example, the snippet bellow is from the [RabbitMQ +For example, the snippet below is from the [RabbitMQ collector](https://github.com/netdata/netdata/blob/91f3268e9615edd393bd43de4ad8068111024cc9/collectors/python.d.plugin/rabbitmq/rabbitmq.chart.py#L273). This collector uses an HTTP endpoint and uses the `UrlService` framework class, which only needs to define an HTTP endpoint for data collection. @@ -298,7 +298,7 @@ class Service(SimpleService): def get_data(self): #The data dict is basically all the values to be represented # The entries are in the format: { "dimension": value} - #And each "dimension" shoudl belong to a chart. + #And each "dimension" should belong to a chart. data = dict() self.populate_data() @@ -356,7 +356,7 @@ chart: Next, time to add one more chart that visualizes the average, minimum, and maximum temperature values. Add a new entry in the `CHARTS` dictionary with the definition for the new chart. Since you want three values -represented in this this chart, add three dimensions. You shoudl also use the same `FAMILY` value in the charts (`TEMP`) +represented in this this chart, add three dimensions. You should also use the same `FAMILY` value in the charts (`TEMP`) so that those two charts are grouped together. ```python @@ -418,7 +418,7 @@ configuration in [YAML](https://www.tutorialspoint.com/yaml/yaml_basics.htm) for - Create a configuration file in the same directory as the `.chart.py`. Name it `.conf`. - Define a `job`, which is an instance of the collector. It is useful when you want to collect data from different sources with different attributes. For example, we could gather data from 2 different weather stations, which use - different temperature measures: Fahrenheit and Celcius. + different temperature measures: Fahrenheit and Celsius. - You can define many different jobs with the same name, but with different attributes. Netdata will try each job serially and will stop at the first job that returns data. If multiple jobs have the same name, only one of them can run. This enables you to define different "ways" to fetch data from a particular data source so that the collector has diff --git a/docs/guides/step-by-step/step-00.md b/docs/guides/step-by-step/step-00.md index 794366645..106571915 100644 --- a/docs/guides/step-by-step/step-00.md +++ b/docs/guides/step-by-step/step-00.md @@ -32,7 +32,6 @@ Click on the **issues** tab to see all the conversations we're having with Netda previously-written advice for your specific problem, and if you don't see any results, hit the **New issue** button to send us a question. -Or, if that's too complicated, feel free to send this guide's author [an email](mailto:joel@netdata.cloud). ## Before we get started diff --git a/docs/guides/step-by-step/step-03.md b/docs/guides/step-by-step/step-03.md index 2319adb44..a2f37beeb 100644 --- a/docs/guides/step-by-step/step-03.md +++ b/docs/guides/step-by-step/step-03.md @@ -43,7 +43,7 @@ features, new collectors for more applications, and improved UI, so will Cloud. ## Get started with Netdata Cloud -Signing in, onboarding, and claiming your first nodes only takes a few minutes, and we have a [Get started with +Signing in, onboarding, and connecting your first nodes only takes a few minutes, and we have a [Get started with Cloud](https://learn.netdata.cloud/docs/cloud/get-started) guide to help you walk through every step. Or, if you're feeling confident, dive right in. @@ -82,7 +82,7 @@ nodes](https://user-images.githubusercontent.com/1153921/80831018-e158ac80-8b9e- ## What's next? -Now that you have a Netdata Cloud account with a claimed node (or a few!) and can navigate between your dashboards with +Now that you have a Netdata Cloud account with a connected node (or a few!) and can navigate between your dashboards with Visited nodes, it's time to learn more about how you can configure Netdata to your liking. From there, you'll be able to customize your Netdata experience to your exact infrastructure and the information you need. diff --git a/docs/guides/step-by-step/step-05.md b/docs/guides/step-by-step/step-05.md index 30ab329cd..8a4d084e4 100644 --- a/docs/guides/step-by-step/step-05.md +++ b/docs/guides/step-by-step/step-05.md @@ -110,6 +110,13 @@ bother you with notifications. The best way to understand how health entities work is building your own and experimenting with the options. To start, let's build a health entity that triggers an alarm when system RAM usage goes above 80%. +We will first create a new file inside of the `health.d/` directory. We'll name our file +`example.conf` for now. + +```bash +./edit-config health.d/example.conf +``` + The first line in a health entity will be `alarm:`. This is how you name your entity. You can give it any name you choose, but the only symbols allowed are `.` and `_`. Let's call the alarm `ram_usage`. diff --git a/docs/guides/troubleshoot/monitor-debug-applications-ebpf.md b/docs/guides/troubleshoot/monitor-debug-applications-ebpf.md index d6c4b0697..688e7d296 100644 --- a/docs/guides/troubleshoot/monitor-debug-applications-ebpf.md +++ b/docs/guides/troubleshoot/monitor-debug-applications-ebpf.md @@ -236,8 +236,8 @@ same application on multiple systems and want to correlate how it performs on ea findings with someone else on your team. If you don't already have a Netdata Cloud account, go [sign in](https://app.netdata.cloud) and get started for free. -Read the [get started with Cloud guide](https://learn.netdata.cloud/docs/cloud/get-started) for a walkthrough of node -claiming and other fundamentals. +Read the [get started with Cloud guide](https://learn.netdata.cloud/docs/cloud/get-started) for a walkthrough of +connecting nodes to and other fundamentals. Once you've added one or more nodes to a Space in Netdata Cloud, you can see aggregated eBPF metrics in the [Overview dashboard](/docs/visualize/overview-infrastructure.md) under the same **Applications** or **eBPF** sections that you diff --git a/docs/guides/using-host-labels.md b/docs/guides/using-host-labels.md index 6d4af2e5d..79558dd16 100644 --- a/docs/guides/using-host-labels.md +++ b/docs/guides/using-host-labels.md @@ -27,7 +27,7 @@ sudo ./edit-config netdata.conf ``` Create a new `[host labels]` section defining a new host label and its value for the system in question. Make sure not -to violate any of the [host label naming rules](/docs/configuration-guide.md#netdata-labels). +to violate any of the [host label naming rules](/docs/configure/common-changes.md#organize-nodes-with-host-labels). ```conf [host labels] diff --git a/docs/metrics-storage-management/enable-streaming.mdx b/docs/metrics-storage-management/enable-streaming.mdx index 65acdb14f..d97a8a769 100644 --- a/docs/metrics-storage-management/enable-streaming.mdx +++ b/docs/metrics-storage-management/enable-streaming.mdx @@ -85,7 +85,7 @@ by the `netdata` user. ```bash sudo openssl req -newkey rsa:2048 -nodes -sha512 -x509 -days 365 -keyout /etc/netdata/ssl/key.pem -out /etc/netdata/ssl/cert.pem -sudo chown netdata:netdata /etc/netdata/ssl/cert.pem k/etc/netdata/ssl/ey.pem +sudo chown netdata:netdata /etc/netdata/ssl/cert.pem /etc/netdata/ssl/key.pem ``` Next, enforce TLS/SSL on the web server. Open `netdata.conf`, scroll down to the `[web]` section, and look for the `bind @@ -148,4 +148,4 @@ details. - Streaming - [How Netdata streams metrics](/docs/metrics-storage-management/how-streaming-works.mdx) - **[Enable streaming between nodes](/docs/metrics-storage-management/enable-streaming.mdx)** - - [Streaming reference](/docs/metrics-storage-management/reference-streaming.mdx) \ No newline at end of file + - [Streaming reference](/docs/metrics-storage-management/reference-streaming.mdx) diff --git a/docs/monitor/enable-notifications.md b/docs/monitor/enable-notifications.md index 68beba53e..e5b5a6f26 100644 --- a/docs/monitor/enable-notifications.md +++ b/docs/monitor/enable-notifications.md @@ -14,7 +14,7 @@ alarms](/docs/monitor/configure-alarms.md) to change the preconfigured threshold infrastructure. Netdata Cloud offers [centralized alarm notifications](#netdata-cloud) via email, which leverages the health status -information already streamed to Netdata Cloud from claimed nodes to send notifications to those who have enabled them. +information already streamed to Netdata Cloud from connected nodes to send notifications to those who have enabled them. The Netdata Agent has a [notification system](#netdata-agent) that supports more than a dozen services, such as email, Slack, PagerDuty, Twilio, Amazon SNS, Discord, and much more. @@ -25,10 +25,11 @@ response process. ## Netdata Cloud -Netdata Cloud's [centralized alarm notifications](https://learn.netdata.cloud/docs/cloud/monitoring/notifications/) is a -zero-configuration way to get notified when an anomaly or incident strikes any node or application in your -infrastructure. The advantage of using centralized alarm notifications from Netdata Cloud is that you don't have to -worry about configuring each node in your infrastructure. +Netdata Cloud's [centralized alarm +notifications](https://learn.netdata.cloud/docs/cloud/alerts-notifications/notifications) is a zero-configuration way to +get notified when an anomaly or incident strikes any node or application in your infrastructure. The advantage of using +centralized alarm notifications from Netdata Cloud is that you don't have to worry about configuring each node in your +infrastructure. To enable centralized alarm notifications for a Space, click on **Manage Space** in the left-hand menu, then click on the **Notifications** tab. Click the toggle switch next to **E-mail** to enable this notification method. @@ -40,8 +41,9 @@ choose what types of notifications to receive from each War Room. ![Enabling and configuring alarm notifications in Netdata Cloud](https://user-images.githubusercontent.com/1153921/101936280-93c50900-3b9d-11eb-9ba0-d6927fa872b7.gif) -See the [centralized alarm notifications](https://learn.netdata.cloud/docs/cloud/monitoring/notifications/) reference -doc for further details about what information is conveyed in an email notification, flood protection, and more. +See the [centralized alarm notifications](https://learn.netdata.cloud/docs/cloud/alerts-notifications/notifications) +reference doc for further details about what information is conveyed in an email notification, flood protection, and +more. ## Netdata Agent @@ -69,6 +71,7 @@ notification platform. - [**Kavenegar**](/health/notifications/kavenegar/README.md) - [**Matrix**](/health/notifications/matrix/README.md) - [**Messagebird**](/health/notifications/messagebird/README.md) +- [**Microsoft Teams**](/health/notifications/msteams/README.md) - [**Netdata Agent dashboard**](/health/notifications/web/README.md) - [**Opsgenie**](/health/notifications/opsgenie/README.md) - [**PagerDuty**](/health/notifications/pagerduty/README.md) @@ -138,7 +141,7 @@ architecture](/docs/store/distributed-data-architecture.md) for the best-in-clas ### Related reference documentation -- [Netdata Cloud · Alarm notifications](https://learn.netdata.cloud/docs/cloud/monitoring/notifications/) +- [Netdata Cloud · Alarm notifications](https://learn.netdata.cloud/docs/cloud/alerts-notifications/notifications) - [Netdata Agent · Notifications](/health/notifications/README.md) [![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fmonitor%2Fenable-notifications&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>) diff --git a/docs/monitor/view-active-alarms.md b/docs/monitor/view-active-alarms.md index b23e2d721..99169c2c8 100644 --- a/docs/monitor/view-active-alarms.md +++ b/docs/monitor/view-active-alarms.md @@ -11,9 +11,10 @@ performance issue affects your node or the applications it runs. ## Netdata Cloud -A War Room's [alarms indicator](https://learn.netdata.cloud/docs/cloud/war-rooms#indicators) displays the number of active `critical` (red) and -`warning` (yellow) alerts for the nodes in this War Room. Click on either the critical or warning badges to open a -pre-filtered modal displaying only those types of [active alarms](https://learn.netdata.cloud/docs/cloud/monitoring/alarms). +A War Room's [alarms indicator](https://learn.netdata.cloud/docs/cloud/war-rooms#indicators) displays the number of +active `critical` (red) and `warning` (yellow) alerts for the nodes in this War Room. Click on either the critical or +warning badges to open a pre-filtered modal displaying only those types of [active +alarms](https://learn.netdata.cloud/docs/cloud/alerts-notifications/view-active-alerts). ![The Alarms panel in Netdata Cloud](https://user-images.githubusercontent.com/1153921/108564747-d2bfbb00-72c0-11eb-97b9-5863ad3324eb.png) diff --git a/docs/netdata-security.md b/docs/netdata-security.md index 8c0fc6d63..42dd0415f 100644 --- a/docs/netdata-security.md +++ b/docs/netdata-security.md @@ -9,7 +9,7 @@ We have given special attention to all aspects of Netdata, ensuring that everyth **Table of Contents** -1. [Your data are safe with Netdata](#your-data-are-safe-with-netdata) +1. [Your data is safe with Netdata](#your-data-is-safe-with-netdata) 2. [Your systems are safe with Netdata](#your-systems-are-safe-with-netdata) 3. [Netdata is read-only](#netdata-is-read-only) 4. [Netdata viewers authentication](#netdata-viewers-authentication) @@ -20,13 +20,13 @@ We have given special attention to all aspects of Netdata, ensuring that everyth \- [Other methods](#other-methods) 5. [Registry or how to not send any information to a third party server](#registry-or-how-to-not-send-any-information-to-a-third-party-server) -## Your data are safe with Netdata +## Your data is safe with Netdata Netdata collects raw data from many sources. For each source, Netdata uses a plugin that connects to the source (or reads the relative files produced by the source), receives raw data and processes them to calculate the metrics shown on Netdata dashboards. Even if Netdata plugins connect to your database server, or read your application log file to collect raw data, the product of this data collection process is always a number of **chart metadata and metric values** (summarized data for dashboard visualization). All Netdata plugins (internal to the Netdata daemon, and external ones written in any computer language), convert raw data collected into metrics, and only these metrics are stored in Netdata databases, sent to upstream Netdata servers, or archived to external time-series databases. -> The **raw data** collected by Netdata, do not leave the host they are collected. **The only data Netdata exposes are chart metadata and metric values.** +> The **raw data** collected by Netdata, does not leave the host when collected. **The only data Netdata exposes are chart metadata and metric values.** This means that Netdata can safely be used in environments that require the highest level of data isolation (like PCI Level 1). @@ -133,7 +133,7 @@ to IP addresses within the `160.1.x.x` range and that reverse DNS is setup for t Use one web server to provide authentication in front of **all your Netdata servers**. So, you will be accessing all your Netdata with URLs like `http://{HOST}/netdata/{NETDATA_HOSTNAME}/` and authentication will be shared among all of them (you will sign-in once for all your servers). Instructions are provided on how to set the proxy configuration to have Netdata run behind [nginx](Running-behind-nginx.md), [Apache](Running-behind-apache.md), [lighttpd](Running-behind-lighttpd.md) and [Caddy](Running-behind-caddy.md). -To use this method, you should firewall protect all your Netdata servers, so that only the web server IP will allowed to directly access Netdata. To do this, run this on each of your servers (or use your firewall manager): +To use this method, you should firewall protect all your Netdata servers, so that only the web server IP will be allowed to directly access Netdata. To do this, run this on each of your servers (or use your firewall manager): ```sh PROXY_IP="1.2.3.4" diff --git a/docs/overview/what-is-netdata.md b/docs/overview/what-is-netdata.md index 0a600234c..37f974ad8 100644 --- a/docs/overview/what-is-netdata.md +++ b/docs/overview/what-is-netdata.md @@ -32,10 +32,8 @@ Cloud, you can view key metrics, insightful charts, and active alarms from all y When an anomaly strikes, seamlessly navigate to any node to troubleshoot and discover the root cause with the familiar Netdata dashboard. -**[Netdata Cloud is -free](https://learn.netdata.cloud/docs/cloud/faq-glossary#how-much-does-netdata-cost-how-and-why-is-it-free)**! You can -add an entire infrastructure of nodes, invite all your colleagues, and visualize any number of metrics, charts, and -alarms entirely for free. +**[Netdata Cloud is free](https://www.netdata.cloud/blog/why-netdata-is-free/)**! You can add an entire infrastructure +of nodes, invite all your colleagues, and visualize any number of metrics, charts, and alarms entirely for free. While Netdata Cloud offers a centralized method of monitoring your Agents, your metrics data is not stored or centralized in any way. Metrics data remains with your nodes and is only streamed to your browser, through Cloud, when diff --git a/docs/quickstart/infrastructure.md b/docs/quickstart/infrastructure.md index 71e70b94b..ed136fe12 100644 --- a/docs/quickstart/infrastructure.md +++ b/docs/quickstart/infrastructure.md @@ -25,10 +25,10 @@ In this quickstart guide, you'll learn the basics of using Netdata Cloud to moni composite charts, and alarm viewing. You'll then learn about the most critical ways to configure the Agent on each of your nodes to maximize the value you get from Netdata. -This quickstart assumes you've installed the Netdata Agent on more than one node in your infrastructure, and claimed +This quickstart assumes you've installed the Netdata Agent on more than one node in your infrastructure, and connected those nodes to your Space in Netdata Cloud. If you haven't yet, see the [Netdata Cloud](https://learn.netdata.cloud/docs/cloud) docs for details on signing up for Netdata Cloud, installation, and -claiming. +connection process. > If you want to monitor a Kubernetes cluster with Netdata, see our [k8s installation > doc](/packaging/installer/methods/kubernetes.md) for setup details, and then read our guide, [_Monitor a Kubernetes diff --git a/docs/visualize/interact-dashboards-charts.md b/docs/visualize/interact-dashboards-charts.md index 30503c220..c5b4c1b62 100644 --- a/docs/visualize/interact-dashboards-charts.md +++ b/docs/visualize/interact-dashboards-charts.md @@ -6,6 +6,9 @@ custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/visualize/i # Interact with dashboards and charts +> ⚠️ There is a new version of charts that is currently **only** available on [Netdata Cloud](https://learn.netdata.cloud/docs/cloud/visualize/interact-new-charts). We didn't +> want to keep this valuable feature from you, so after we get this into your hands on the Cloud, we will collect and implement your feedback to make sure we are providing the best possible version of the feature on the Netdata Agent dashboard as quickly as possible. + You can find Netdata's dashboards in two places: locally served at `http://NODE:19999` by the Netdata Agent, and in Netdata Cloud. While you access these dashboards differently, they have similar interfaces, identical charts and metrics, and you interact with both of them the same way. @@ -119,6 +122,7 @@ to get informed when something goes wrong in your infrastructure. ### Related reference documentation - [Netdata Agent · Web dashboards overview](/web/README.md) +- [Netdata Cloud · Interact with new charts](https://learn.netdata.cloud/docs/cloud/visualize/interact-new-charts) - [Netdata Cloud · War Rooms](https://learn.netdata.cloud/docs/cloud/war-rooms) - [Netdata Cloud · Overview](https://learn.netdata.cloud/docs/cloud/visualize/overview) - [Netdata Cloud · Nodes](https://learn.netdata.cloud/docs/cloud/visualize/nodes) diff --git a/exporting/README.md b/exporting/README.md index 933de0e07..ef485bb18 100644 --- a/exporting/README.md +++ b/exporting/README.md @@ -164,6 +164,8 @@ You can configure each connector individually using the available [options](#opt [opentsdb:http:my_opentsdb_http_instance] enabled = yes destination = localhost:4242 + username = my_username + password = my_password [opentsdb:https:my_opentsdb_https_instance] enabled = yes diff --git a/exporting/WALKTHROUGH.md b/exporting/WALKTHROUGH.md index ac1712916..24afd2097 100644 --- a/exporting/WALKTHROUGH.md +++ b/exporting/WALKTHROUGH.md @@ -178,14 +178,14 @@ Prometheus's homepage and begin to type `netdata\_` Prometheus should auto compl ![](https://github.com/ldelossa/NetdataTutorial/raw/master/Screen%20Shot%202017-07-28%20at%205.13.43%20PM.png) -Let's now start exploring how we can graph some metrics. Back in our NetData container lets get the CPU spinning with a +Let's now start exploring how we can graph some metrics. Back in our Netdata container lets get the CPU spinning with a pointless busy loop. On the shell do the following: ```sh [root@netdata /]# while true; do echo "HOT HOT HOT CPU"; done ``` -Our NetData cpu graph should be showing some activity. Let's represent this in Prometheus. In order to do this let's +Our Netdata cpu graph should be showing some activity. Let's represent this in Prometheus. In order to do this let's keep our metrics page open for reference: . We are setting out to graph the data in the CPU chart so let's search for `system.cpu` in the metrics page above. We come across a section of metrics with the first comments `# COMMENT homogeneous chart "system.cpu", context "system.cpu", @@ -211,18 +211,18 @@ query the dimension also. Place this into our query text box. ![](https://github.com/ldelossa/NetdataTutorial/raw/master/Screen%20Shot%202017-07-28%20at%205.54.40%20PM.png) -Awesome, this is exactly what we wanted. If you haven't caught on yet we can emulate entire charts from NetData by using +Awesome, this is exactly what we wanted. If you haven't caught on yet we can emulate entire charts from Netdata by using the `chart` dimension. If you'd like you can combine the `chart` and `instance` dimension to create per-instance charts. Let's give this a try: `netdata_system_cpu_percentage_average{chart="system.cpu", instance="netdata:19999"}` -This is the basics of using Prometheus to query NetData. I'd advise everyone at this point to read [this -page](/exporting/prometheus/#using-netdata-with-prometheus). The key point here is that NetData can export metrics from +This is the basics of using Prometheus to query Netdata. I'd advise everyone at this point to read [this +page](/exporting/prometheus/#using-netdata-with-prometheus). The key point here is that Netdata can export metrics from its internal DB or can send metrics _as-collected_ by specifying the `source=as-collected` URL parameter like so. If you choose to use this method you will need to use Prometheus's set of functions here: to obtain useful metrics as you are now dealing with raw counters from the system. For example you will have to use the `irate()` function over a counter to get that metric's rate per second. If your graphing needs are met by using the -metrics returned by NetData's internal database (not specifying any source= URL parameter) then use that. If you find +metrics returned by Netdata's internal database (not specifying any source= URL parameter) then use that. If you find limitations then consider re-writing your queries using the raw data and using Prometheus functions to get the desired chart. diff --git a/exporting/check_filters.c b/exporting/check_filters.c index 64ced7238..d2d7d870f 100644 --- a/exporting/check_filters.c +++ b/exporting/check_filters.c @@ -43,7 +43,9 @@ int rrdhost_is_exportable(struct instance *instance, RRDHOST *host) */ int rrdset_is_exportable(struct instance *instance, RRDSET *st) { +#ifdef NETDATA_INTERNAL_CHECKS RRDHOST *host = st->rrdhost; +#endif if (st->exporting_flags == NULL) st->exporting_flags = callocz(instance->engine->instance_num, sizeof(size_t)); diff --git a/exporting/clean_connectors.c b/exporting/clean_connectors.c index 890e8daac..4af1219a6 100644 --- a/exporting/clean_connectors.c +++ b/exporting/clean_connectors.c @@ -15,6 +15,8 @@ static void clean_instance_config(struct instance_config *config) freez((void *)config->type_name); freez((void *)config->name); freez((void *)config->destination); + freez((void *)config->username); + freez((void *)config->password); freez((void *)config->prefix); freez((void *)config->hostname); @@ -49,6 +51,8 @@ void simple_connector_cleanup(struct instance *instance) struct simple_connector_data *simple_connector_data = (struct simple_connector_data *)instance->connector_specific_data; + freez(simple_connector_data->auth_string); + buffer_free(instance->buffer); buffer_free(simple_connector_data->buffer); buffer_free(simple_connector_data->header); diff --git a/exporting/exporting.conf b/exporting/exporting.conf index c2e902c05..314e1541e 100644 --- a/exporting/exporting.conf +++ b/exporting/exporting.conf @@ -17,6 +17,9 @@ # [graphite:my_graphite_instance] # enabled = no # destination = localhost + # Credentials for basic HTTP authentication + # username = my_username + # password = my_password # data source = average # prefix = netdata # hostname = my_hostname @@ -31,6 +34,8 @@ # enabled = no # destination = localhost # remote write URL path = /receive + # username = my_username + # password = my_password # data source = average # prefix = netdata # hostname = my_hostname diff --git a/exporting/exporting_engine.c b/exporting/exporting_engine.c index 70aceea8c..faace86d9 100644 --- a/exporting/exporting_engine.c +++ b/exporting/exporting_engine.c @@ -4,6 +4,22 @@ static struct engine *engine = NULL; +void analytics_exporting_connectors_ssl(BUFFER *b) +{ +#ifdef ENABLE_HTTPS + if (netdata_exporting_ctx) { + for (struct instance *instance = engine->instance_root; instance; instance = instance->next) { + struct simple_connector_data *connector_specific_data = instance->connector_specific_data; + if (connector_specific_data->flags == NETDATA_SSL_HANDSHAKE_COMPLETE) { + buffer_strcat(b, "exporting"); + break; + } + } + } +#endif + buffer_strcat(b, "|"); +} + void analytics_exporting_connectors(BUFFER *b) { if (!engine) diff --git a/exporting/exporting_engine.h b/exporting/exporting_engine.h index 1ad6e6856..f08583fb5 100644 --- a/exporting/exporting_engine.h +++ b/exporting/exporting_engine.h @@ -66,6 +66,8 @@ struct instance_config { const char *name; const char *destination; + const char *username; + const char *password; const char *prefix; const char *hostname; @@ -104,6 +106,8 @@ struct simple_connector_data { void *connector_specific_data; char connected_to[CONNECTED_TO_MAX]; + + char *auth_string; size_t total_buffered_metrics; diff --git a/exporting/graphite/README.md b/exporting/graphite/README.md index a6a25ef7a..d755e0934 100644 --- a/exporting/graphite/README.md +++ b/exporting/graphite/README.md @@ -22,7 +22,12 @@ directory and set the following options: ``` Add `:http` or `:https` modifiers to the connector type if you need to use other than a plaintext protocol. For example: `graphite:http:my_graphite_instance`, -`graphite:https:my_graphite_instance`. +`graphite:https:my_graphite_instance`. You can set basic HTTP authentication credentials using + +```conf + username = my_username + password = my_password +``` The Graphite connector is further configurable using additional settings. See the [exporting reference doc](/exporting/README.md#options) for details. diff --git a/exporting/graphite/graphite.c b/exporting/graphite/graphite.c index 722db0fff..84d4febf1 100644 --- a/exporting/graphite/graphite.c +++ b/exporting/graphite/graphite.c @@ -218,10 +218,12 @@ void graphite_http_prepare_header(struct instance *instance) simple_connector_data->last_buffer->header, "POST /api/put HTTP/1.1\r\n" "Host: %s\r\n" + "%s" "Content-Type: application/graphite\r\n" "Content-Length: %lu\r\n" "\r\n", instance->config.destination, + simple_connector_data->auth_string ? simple_connector_data->auth_string : "", buffer_strlen(simple_connector_data->last_buffer->buffer)); return; diff --git a/exporting/init_connectors.c b/exporting/init_connectors.c index 6aff26354..69ea0685c 100644 --- a/exporting/init_connectors.c +++ b/exporting/init_connectors.c @@ -92,7 +92,7 @@ int init_connectors(struct engine *engine) // dispatch the instance worker thread int error = uv_thread_create(&instance->thread, instance->worker, instance); if (error) { - error("EXPORTING: cannot create tread worker. uv_thread_create(): %s", uv_strerror(error)); + error("EXPORTING: cannot create thread worker. uv_thread_create(): %s", uv_strerror(error)); return 1; } char threadname[NETDATA_THREAD_NAME_MAX + 1]; @@ -105,8 +105,57 @@ int init_connectors(struct engine *engine) return 0; } +// TODO: use a base64 encoder from a library +static size_t base64_encode(unsigned char *input, size_t input_size, char *output, size_t output_size) +{ + uint32_t value; + static char lookup[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + "abcdefghijklmnopqrstuvwxyz" + "0123456789+/"; + if ((input_size / 3 + 1) * 4 >= output_size) { + error("Output buffer for encoding size=%zu is not large enough for %zu-bytes input", output_size, input_size); + return 0; + } + size_t count = 0; + while (input_size > 3) { + value = ((input[0] << 16) + (input[1] << 8) + input[2]) & 0xffffff; + output[0] = lookup[value >> 18]; + output[1] = lookup[(value >> 12) & 0x3f]; + output[2] = lookup[(value >> 6) & 0x3f]; + output[3] = lookup[value & 0x3f]; + //error("Base-64 encode (%04x) -> %c %c %c %c\n", value, output[0], output[1], output[2], output[3]); + output += 4; + input += 3; + input_size -= 3; + count += 4; + } + switch (input_size) { + case 2: + value = (input[0] << 10) + (input[1] << 2); + output[0] = lookup[(value >> 12) & 0x3f]; + output[1] = lookup[(value >> 6) & 0x3f]; + output[2] = lookup[value & 0x3f]; + output[3] = '='; + //error("Base-64 encode (%06x) -> %c %c %c %c\n", (value>>2)&0xffff, output[0], output[1], output[2], output[3]); + count += 4; + break; + case 1: + value = input[0] << 4; + output[0] = lookup[(value >> 6) & 0x3f]; + output[1] = lookup[value & 0x3f]; + output[2] = '='; + output[3] = '='; + //error("Base-64 encode (%06x) -> %c %c %c %c\n", value, output[0], output[1], output[2], output[3]); + count += 4; + break; + case 0: + break; + } + return count; +} + /** - * Initialize a ring buffer for a simple connector + * Initialize a ring buffer and credentials for a simple connector * * @param instance an instance data structure. */ @@ -141,5 +190,25 @@ void simple_connector_init(struct instance *instance) first_buffer->next = connector_specific_data->first_buffer; connector_specific_data->last_buffer = connector_specific_data->first_buffer; + if (*instance->config.username || *instance->config.password) { + BUFFER *auth_string = buffer_create(0); + + buffer_sprintf(auth_string, "%s:%s", instance->config.username, instance->config.password); + + size_t encoded_size = (buffer_strlen(auth_string) / 3 + 1) * 4 + 1; + char *encoded_credentials = callocz(1, encoded_size); + + base64_encode((unsigned char*)buffer_tostring(auth_string), buffer_strlen(auth_string), encoded_credentials, encoded_size); + + buffer_flush(auth_string); + buffer_sprintf(auth_string, "Authorization: Basic %s\n", encoded_credentials); + + freez(encoded_credentials); + + connector_specific_data->auth_string = strdupz(buffer_tostring(auth_string)); + + buffer_free(auth_string); + } + return; } diff --git a/exporting/json/README.md b/exporting/json/README.md index a0f8472a0..7cce463e2 100644 --- a/exporting/json/README.md +++ b/exporting/json/README.md @@ -22,7 +22,12 @@ directory and set the following options: ``` Add `:http` or `:https` modifiers to the connector type if you need to use other than a plaintext protocol. For example: `json:http:my_json_instance`, -`json:https:my_json_instance`. +`json:https:my_json_instance`. You can set basic HTTP authentication credentials using + +```conf + username = my_username + password = my_password +``` The JSON connector is further configurable using additional settings. See the [exporting reference doc](/exporting/README.md#options) for details. diff --git a/exporting/json/json.c b/exporting/json/json.c index f2396bafa..50278c5b8 100644 --- a/exporting/json/json.c +++ b/exporting/json/json.c @@ -352,10 +352,12 @@ void json_http_prepare_header(struct instance *instance) simple_connector_data->last_buffer->header, "POST /api/put HTTP/1.1\r\n" "Host: %s\r\n" + "%s" "Content-Type: application/json\r\n" "Content-Length: %lu\r\n" "\r\n", instance->config.destination, + simple_connector_data->auth_string ? simple_connector_data->auth_string : "", buffer_strlen(simple_connector_data->last_buffer->buffer)); return; diff --git a/exporting/mongodb/mongodb.c b/exporting/mongodb/mongodb.c index 44922a242..49ce95269 100644 --- a/exporting/mongodb/mongodb.c +++ b/exporting/mongodb/mongodb.c @@ -276,7 +276,9 @@ void mongodb_cleanup(struct instance *instance) void mongodb_connector_worker(void *instance_p) { struct instance *instance = (struct instance *)instance_p; +#ifdef NETDATA_INTERNAL_CHECKS struct mongodb_specific_config *connector_specific_config = instance->config.connector_specific_config; +#endif struct mongodb_specific_data *connector_specific_data = (struct mongodb_specific_data *)instance->connector_specific_data; diff --git a/exporting/opentsdb/README.md b/exporting/opentsdb/README.md index 3765ad271..0ca6d2449 100644 --- a/exporting/opentsdb/README.md +++ b/exporting/opentsdb/README.md @@ -22,7 +22,12 @@ directory and set the following options: ``` Add `:http` or `:https` modifiers to the connector type if you need to use other than a plaintext protocol. For example: `opentsdb:http:my_opentsdb_instance`, -`opentsdb:https:my_opentsdb_instance`. +`opentsdb:https:my_opentsdb_instance`. You can set basic HTTP authentication credentials using + +```conf + username = my_username + password = my_password +``` The OpenTSDB connector is further configurable using additional settings. See the [exporting reference doc](/exporting/README.md#options) for details. diff --git a/exporting/opentsdb/opentsdb.c b/exporting/opentsdb/opentsdb.c index 1310c150e..7ed88fd6d 100644 --- a/exporting/opentsdb/opentsdb.c +++ b/exporting/opentsdb/opentsdb.c @@ -269,10 +269,12 @@ void opentsdb_http_prepare_header(struct instance *instance) simple_connector_data->last_buffer->header, "POST /api/put HTTP/1.1\r\n" "Host: %s\r\n" + "%s" "Content-Type: application/json\r\n" "Content-Length: %lu\r\n" "\r\n", instance->config.destination, + simple_connector_data->auth_string ? simple_connector_data->auth_string : "", buffer_strlen(simple_connector_data->last_buffer->buffer)); return; diff --git a/exporting/process_data.c b/exporting/process_data.c index 5e11b3948..2c0c2d17c 100644 --- a/exporting/process_data.c +++ b/exporting/process_data.c @@ -70,7 +70,9 @@ calculated_number exporting_calculate_value_from_stored_data( time_t *last_timestamp) { RRDSET *st = rd->rrdset; +#ifdef NETDATA_INTERNAL_CHECKS RRDHOST *host = st->rrdhost; +#endif time_t after = instance->after; time_t before = instance->before; diff --git a/exporting/prometheus/README.md b/exporting/prometheus/README.md index d718a366e..ef6f61358 100644 --- a/exporting/prometheus/README.md +++ b/exporting/prometheus/README.md @@ -128,46 +128,46 @@ scrape_configs: #### Install nodes.yml -The following is completely optional, it will enable Prometheus to generate alerts from some NetData sources. Tweak the +The following is completely optional, it will enable Prometheus to generate alerts from some Netdata sources. Tweak the values to your own needs. We will use the following `nodes.yml` file below. Save it at `/opt/prometheus/nodes.yml`, and add a _- "nodes.yml"_ entry under the _rule_files:_ section in the example prometheus.yml file above. ```yaml groups: -- name: nodes - - rules: - - alert: node_high_cpu_usage_70 - expr: avg(rate(netdata_cpu_cpu_percentage_average{dimension="idle"}[1m])) by (job) > 70 - for: 1m - annotations: - description: '{{ $labels.job }} on ''{{ $labels.job }}'' CPU usage is at {{ humanize $value }}%.' - summary: CPU alert for container node '{{ $labels.job }}' - - - alert: node_high_memory_usage_70 - expr: 100 / sum(netdata_system_ram_MB_average) by (job) - * sum(netdata_system_ram_MB_average{dimension=~"free|cached"}) by (job) < 30 - for: 1m - annotations: - description: '{{ $labels.job }} memory usage is {{ humanize $value}}%.' - summary: Memory alert for container node '{{ $labels.job }}' - - - alert: node_low_root_filesystem_space_20 - expr: 100 / sum(netdata_disk_space_GB_average{family="/"}) by (job) - * sum(netdata_disk_space_GB_average{family="/",dimension=~"avail|cached"}) by (job) < 20 - for: 1m - annotations: - description: '{{ $labels.job }} root filesystem space is {{ humanize $value}}%.' - summary: Root filesystem alert for container node '{{ $labels.job }}' - - - alert: node_root_filesystem_fill_rate_6h - expr: predict_linear(netdata_disk_space_GB_average{family="/",dimension=~"avail|cached"}[1h], 6 * 3600) < 0 - for: 1h - labels: - severity: critical - annotations: - description: Container node {{ $labels.job }} root filesystem is going to fill up in 6h. - summary: Disk fill alert for Swarm node '{{ $labels.job }}' + - name: nodes + + rules: + - alert: node_high_cpu_usage_70 + expr: sum(sum_over_time(netdata_system_cpu_percentage_average{dimension=~"(user|system|softirq|irq|guest)"}[10m])) by (job) / sum(count_over_time(netdata_system_cpu_percentage_average{dimension="idle"}[10m])) by (job) > 70 + for: 1m + annotations: + description: '{{ $labels.job }} on ''{{ $labels.job }}'' CPU usage is at {{ humanize $value }}%.' + summary: CPU alert for container node '{{ $labels.job }}' + + - alert: node_high_memory_usage_70 + expr: 100 / sum(netdata_system_ram_MB_average) by (job) + * sum(netdata_system_ram_MB_average{dimension=~"free|cached"}) by (job) < 30 + for: 1m + annotations: + description: '{{ $labels.job }} memory usage is {{ humanize $value}}%.' + summary: Memory alert for container node '{{ $labels.job }}' + + - alert: node_low_root_filesystem_space_20 + expr: 100 / sum(netdata_disk_space_GB_average{family="/"}) by (job) + * sum(netdata_disk_space_GB_average{family="/",dimension=~"avail|cached"}) by (job) < 20 + for: 1m + annotations: + description: '{{ $labels.job }} root filesystem space is {{ humanize $value}}%.' + summary: Root filesystem alert for container node '{{ $labels.job }}' + + - alert: node_root_filesystem_fill_rate_6h + expr: predict_linear(netdata_disk_space_GB_average{family="/",dimension=~"avail|cached"}[1h], 6 * 3600) < 0 + for: 1h + labels: + severity: critical + annotations: + description: Container node {{ $labels.job }} root filesystem is going to fill up in 6h. + summary: Disk fill alert for Swarm node '{{ $labels.job }}' ``` #### Install prometheus.service diff --git a/exporting/prometheus/prometheus.c b/exporting/prometheus/prometheus.c index 6759313c3..0a3190074 100644 --- a/exporting/prometheus/prometheus.c +++ b/exporting/prometheus/prometheus.c @@ -16,7 +16,9 @@ */ inline int can_send_rrdset(struct instance *instance, RRDSET *st) { +#ifdef NETDATA_INTERNAL_CHECKS RRDHOST *host = st->rrdhost; +#endif if (unlikely(rrdset_flag_check(st, RRDSET_FLAG_EXPORTING_IGNORE))) return 0; @@ -136,7 +138,7 @@ static inline time_t prometheus_server_last_access(const char *server, RRDHOST * * Copy and sanitize name. * * @param d a destination string. - * @param s a source sting. + * @param s a source string. * @param usable the number of characters to copy. * @return Returns the length of the copied string. */ @@ -161,7 +163,7 @@ inline size_t prometheus_name_copy(char *d, const char *s, size_t usable) * Copy and sanitize label. * * @param d a destination string. - * @param s a source sting. + * @param s a source string. * @param usable the number of characters to copy. * @return Returns the length of the copied string. */ @@ -190,7 +192,7 @@ inline size_t prometheus_label_copy(char *d, const char *s, size_t usable) * Copy and sanitize units. * * @param d a destination string. - * @param s a source sting. + * @param s a source string. * @param usable the number of characters to copy. * @param showoldunits set this flag to 1 to show old (before v1.12) units. * @return Returns the destination string. diff --git a/exporting/prometheus/remote_write/README.md b/exporting/prometheus/remote_write/README.md index fe901024b..ce379063e 100644 --- a/exporting/prometheus/remote_write/README.md +++ b/exporting/prometheus/remote_write/README.md @@ -41,6 +41,13 @@ For example, if your endpoint is `http://example.domain:example_port/storage/rea remote write URL path = /storage/read ``` +You can set basic HTTP authentication credentials using + +```conf + username = my_username + password = my_password +``` + `buffered` and `lost` dimensions in the Netdata Exporting Connector Data Size operation monitoring chart estimate uncompressed buffer size on failures. diff --git a/exporting/prometheus/remote_write/remote_write.c b/exporting/prometheus/remote_write/remote_write.c index 986ad9f0e..8339712eb 100644 --- a/exporting/prometheus/remote_write/remote_write.c +++ b/exporting/prometheus/remote_write/remote_write.c @@ -25,6 +25,7 @@ void prometheus_remote_write_prepare_header(struct instance *instance) "POST %s HTTP/1.1\r\n" "Host: %s\r\n" "Accept: */*\r\n" + "%s" "Content-Encoding: snappy\r\n" "Content-Type: application/x-protobuf\r\n" "X-Prometheus-Remote-Write-Version: 0.1.0\r\n" @@ -32,6 +33,7 @@ void prometheus_remote_write_prepare_header(struct instance *instance) "\r\n", connector_specific_config->remote_write_path, simple_connector_data->connected_to, + simple_connector_data->auth_string ? simple_connector_data->auth_string : "", buffer_strlen(simple_connector_data->last_buffer->buffer)); } diff --git a/exporting/read_config.c b/exporting/read_config.c index ea50fa0f6..77687d845 100644 --- a/exporting/read_config.c +++ b/exporting/read_config.c @@ -456,6 +456,10 @@ struct engine *read_exporting_config() tmp_instance->config.destination = strdupz(exporter_get(instance_name, "destination", default_destination)); + tmp_instance->config.username = strdupz(exporter_get(instance_name, "username", "")); + + tmp_instance->config.password = strdupz(exporter_get(instance_name, "password", "")); + tmp_instance->config.prefix = strdupz(exporter_get(instance_name, "prefix", "netdata")); tmp_instance->config.hostname = strdupz(exporter_get(instance_name, "hostname", engine->config.hostname)); diff --git a/exporting/tests/exporting_doubles.c b/exporting/tests/exporting_doubles.c index 3c73e0327..b8c9f3756 100644 --- a/exporting/tests/exporting_doubles.c +++ b/exporting/tests/exporting_doubles.c @@ -22,6 +22,8 @@ struct engine *__mock_read_exporting_config() instance->config.type = EXPORTING_CONNECTOR_TYPE_GRAPHITE; instance->config.name = strdupz("instance_name"); instance->config.destination = strdupz("localhost"); + instance->config.username = strdupz(""); + instance->config.password = strdupz(""); instance->config.prefix = strdupz("netdata"); instance->config.hostname = strdupz("test-host"); instance->config.update_every = 1; diff --git a/exporting/tests/exporting_fixtures.c b/exporting/tests/exporting_fixtures.c index b5b0ce816..b632761e7 100644 --- a/exporting/tests/exporting_fixtures.c +++ b/exporting/tests/exporting_fixtures.c @@ -18,6 +18,8 @@ int teardown_configured_engine(void **state) struct instance *instance = engine->instance_root; free((void *)instance->config.destination); + free((void *)instance->config.username); + free((void *)instance->config.password); free((void *)instance->config.name); free((void *)instance->config.prefix); free((void *)instance->config.hostname); diff --git a/exporting/tests/test_exporting_engine.c b/exporting/tests/test_exporting_engine.c index 73fd3ca66..7188c6eee 100644 --- a/exporting/tests/test_exporting_engine.c +++ b/exporting/tests/test_exporting_engine.c @@ -312,12 +312,12 @@ static void test_exporting_calculate_value_from_stored_data(void **state) expect_function_call(__mock_rrddim_query_is_finished); will_return(__mock_rrddim_query_is_finished, 0); expect_function_call(__mock_rrddim_query_next_metric); - will_return(__mock_rrddim_query_next_metric, pack_storage_number(27, SN_EXISTS)); + will_return(__mock_rrddim_query_next_metric, pack_storage_number(27, SN_DEFAULT_FLAGS)); expect_function_call(__mock_rrddim_query_is_finished); will_return(__mock_rrddim_query_is_finished, 0); expect_function_call(__mock_rrddim_query_next_metric); - will_return(__mock_rrddim_query_next_metric, pack_storage_number(45, SN_EXISTS)); + will_return(__mock_rrddim_query_next_metric, pack_storage_number(45, SN_DEFAULT_FLAGS)); expect_function_call(__mock_rrddim_query_is_finished); will_return(__mock_rrddim_query_is_finished, 1); @@ -431,7 +431,7 @@ static void test_format_dimension_stored_graphite_plaintext(void **state) struct engine *engine = *state; expect_function_call(__wrap_exporting_calculate_value_from_stored_data); - will_return(__wrap_exporting_calculate_value_from_stored_data, pack_storage_number(27, SN_EXISTS)); + will_return(__wrap_exporting_calculate_value_from_stored_data, pack_storage_number(27, SN_DEFAULT_FLAGS)); RRDDIM *rd = localhost->rrdset_root->dimensions; assert_int_equal(format_dimension_stored_graphite_plaintext(engine->instance_root, rd), 0); @@ -459,7 +459,7 @@ static void test_format_dimension_stored_json_plaintext(void **state) struct engine *engine = *state; expect_function_call(__wrap_exporting_calculate_value_from_stored_data); - will_return(__wrap_exporting_calculate_value_from_stored_data, pack_storage_number(27, SN_EXISTS)); + will_return(__wrap_exporting_calculate_value_from_stored_data, pack_storage_number(27, SN_DEFAULT_FLAGS)); RRDDIM *rd = localhost->rrdset_root->dimensions; assert_int_equal(format_dimension_stored_json_plaintext(engine->instance_root, rd), 0); @@ -487,7 +487,7 @@ static void test_format_dimension_stored_opentsdb_telnet(void **state) struct engine *engine = *state; expect_function_call(__wrap_exporting_calculate_value_from_stored_data); - will_return(__wrap_exporting_calculate_value_from_stored_data, pack_storage_number(27, SN_EXISTS)); + will_return(__wrap_exporting_calculate_value_from_stored_data, pack_storage_number(27, SN_DEFAULT_FLAGS)); RRDDIM *rd = localhost->rrdset_root->dimensions; assert_int_equal(format_dimension_stored_opentsdb_telnet(engine->instance_root, rd), 0); @@ -515,7 +515,7 @@ static void test_format_dimension_stored_opentsdb_http(void **state) struct engine *engine = *state; expect_function_call(__wrap_exporting_calculate_value_from_stored_data); - will_return(__wrap_exporting_calculate_value_from_stored_data, pack_storage_number(27, SN_EXISTS)); + will_return(__wrap_exporting_calculate_value_from_stored_data, pack_storage_number(27, SN_DEFAULT_FLAGS)); RRDDIM *rd = localhost->rrdset_root->dimensions; assert_int_equal(format_dimension_stored_opentsdb_http(engine->instance_root, rd), 0); @@ -1053,7 +1053,7 @@ static void test_format_host_labels_prometheus(void **state) instance->config.options |= EXPORTING_OPTION_SEND_AUTOMATIC_LABELS; format_host_labels_prometheus(instance, localhost); - assert_string_equal(buffer_tostring(instance->labels), "key1=\"netdata\",key2=\"value2\""); + assert_string_equal(buffer_tostring(instance->labels), "key1=\"value1\",key2=\"value2\""); } static void rrd_stats_api_v1_charts_allmetrics_prometheus(void **state) @@ -1070,7 +1070,7 @@ static void rrd_stats_api_v1_charts_allmetrics_prometheus(void **state) will_return(__wrap_now_realtime_sec, 2); expect_function_call(__wrap_exporting_calculate_value_from_stored_data); - will_return(__wrap_exporting_calculate_value_from_stored_data, pack_storage_number(27, SN_EXISTS)); + will_return(__wrap_exporting_calculate_value_from_stored_data, pack_storage_number(27, SN_DEFAULT_FLAGS)); rrd_stats_api_v1_charts_allmetrics_prometheus_single_host(localhost, buffer, "test_server", "test_prefix", 0, 0); @@ -1087,7 +1087,7 @@ static void rrd_stats_api_v1_charts_allmetrics_prometheus(void **state) will_return(__wrap_now_realtime_sec, 2); expect_function_call(__wrap_exporting_calculate_value_from_stored_data); - will_return(__wrap_exporting_calculate_value_from_stored_data, pack_storage_number(27, SN_EXISTS)); + will_return(__wrap_exporting_calculate_value_from_stored_data, pack_storage_number(27, SN_DEFAULT_FLAGS)); rrd_stats_api_v1_charts_allmetrics_prometheus_single_host( localhost, buffer, "test_server", "test_prefix", 0, PROMETHEUS_OUTPUT_NAMES | PROMETHEUS_OUTPUT_TYPES); @@ -1106,7 +1106,7 @@ static void rrd_stats_api_v1_charts_allmetrics_prometheus(void **state) will_return(__wrap_now_realtime_sec, 2); expect_function_call(__wrap_exporting_calculate_value_from_stored_data); - will_return(__wrap_exporting_calculate_value_from_stored_data, pack_storage_number(27, SN_EXISTS)); + will_return(__wrap_exporting_calculate_value_from_stored_data, pack_storage_number(27, SN_DEFAULT_FLAGS)); rrd_stats_api_v1_charts_allmetrics_prometheus_all_hosts(localhost, buffer, "test_server", "test_prefix", 0, 0); @@ -1265,7 +1265,7 @@ static void test_format_dimension_prometheus_remote_write(void **state) RRDDIM *rd = localhost->rrdset_root->dimensions; expect_function_call(__wrap_exporting_calculate_value_from_stored_data); - will_return(__wrap_exporting_calculate_value_from_stored_data, pack_storage_number(27, SN_EXISTS)); + will_return(__wrap_exporting_calculate_value_from_stored_data, pack_storage_number(27, SN_DEFAULT_FLAGS)); expect_function_call(__wrap_add_metric); expect_value(__wrap_add_metric, write_request_p, 0xff); @@ -1877,7 +1877,7 @@ int main(void) cmocka_unit_test_setup_teardown(test_prometheus_label_copy, setup_prometheus, teardown_prometheus), cmocka_unit_test_setup_teardown(test_prometheus_units_copy, setup_prometheus, teardown_prometheus), cmocka_unit_test_setup_teardown( - test_format_host_labels_prometheus, setup_configured_engine, teardown_configured_engine), + test_format_host_labels_prometheus, setup_initialized_engine, teardown_initialized_engine), cmocka_unit_test_setup_teardown( rrd_stats_api_v1_charts_allmetrics_prometheus, setup_prometheus, teardown_prometheus), }; diff --git a/health/Makefile.am b/health/Makefile.am index b963ea0cd..349b86d61 100644 --- a/health/Makefile.am +++ b/health/Makefile.am @@ -25,9 +25,7 @@ install-exec-local: healthconfigdir=$(libconfigdir)/health.d dist_healthconfig_DATA = \ health.d/adaptec_raid.conf \ - health.d/am2320.conf \ health.d/anomalies.conf \ - health.d/apache.conf \ health.d/apcupsd.conf \ health.d/backend.conf \ health.d/bcache.conf \ @@ -39,18 +37,18 @@ dist_healthconfig_DATA = \ health.d/cgroups.conf \ health.d/cpu.conf \ health.d/cockroachdb.conf \ - health.d/couchdb.conf \ health.d/disks.conf \ health.d/dnsmasq_dhcp.conf \ health.d/dns_query.conf \ health.d/dockerd.conf \ - health.d/elasticsearch.conf \ health.d/entropy.conf \ health.d/exporting.conf \ health.d/fping.conf \ + health.d/geth.conf \ health.d/ioping.conf \ health.d/fronius.conf \ health.d/gearman.conf \ + health.d/go.d.plugin.conf \ health.d/haproxy.conf \ health.d/hdfs.conf \ health.d/httpcheck.conf \ @@ -59,26 +57,19 @@ dist_healthconfig_DATA = \ health.d/ipmi.conf \ health.d/isc_dhcpd.conf \ health.d/kubelet.conf \ - health.d/lighttpd.conf \ health.d/linux_power_supply.conf \ health.d/load.conf \ health.d/mdstat.conf \ health.d/megacli.conf \ health.d/memcached.conf \ health.d/memory.conf \ - health.d/mongodb.conf \ health.d/mysql.conf \ - health.d/named.conf \ health.d/net.conf \ health.d/netfilter.conf \ - health.d/nginx.conf \ - health.d/nginx_plus.conf \ health.d/pihole.conf \ - health.d/phpfpm.conf \ health.d/portcheck.conf \ - health.d/postgres.conf \ health.d/processes.conf \ - health.d/pulsar.conf \ + health.d/python.d.plugin.conf \ health.d/qos.conf \ health.d/ram.conf \ health.d/redis.conf \ @@ -86,11 +77,11 @@ dist_healthconfig_DATA = \ health.d/riakkv.conf \ health.d/scaleio.conf \ health.d/softnet.conf \ - health.d/squid.conf \ health.d/stiebeleltron.conf \ health.d/synchronization.conf \ health.d/swap.conf \ health.d/systemdunits.conf \ + health.d/timex.conf \ health.d/tcp_conn.conf \ health.d/tcp_listen.conf \ health.d/tcp_mem.conf \ @@ -98,7 +89,6 @@ dist_healthconfig_DATA = \ health.d/tcp_resets.conf \ health.d/udp_errors.conf \ health.d/unbound.conf \ - health.d/varnish.conf \ health.d/vcsa.conf \ health.d/vernemq.conf \ health.d/vsphere.conf \ @@ -107,6 +97,5 @@ dist_healthconfig_DATA = \ health.d/wmi.conf \ health.d/x509check.conf \ health.d/zfs.conf \ - health.d/zookeeper.conf \ health.d/dbengine.conf \ $(NULL) diff --git a/health/REFERENCE.md b/health/REFERENCE.md index 5ea6b7c5d..f1bb5557d 100644 --- a/health/REFERENCE.md +++ b/health/REFERENCE.md @@ -54,14 +54,17 @@ Netdata parses the following lines. Beneath the table is an in-depth explanation - A few lines use space-separated lists to define how the entity behaves. You can use `*` as a wildcard or prefix with `!` for a negative match. Order is important, too! See our [simple patterns docs](../libnetdata/simple_pattern/) for more examples. +- Lines terminated by a `\` are spliced together with the next line. The backslash is removed and the following line is + joined with the current one. No space is inserted, so you may split a line anywhere, even in the middle of a word. + This comes in handy if your `info` line consists of several sentences. | line | required | functionality | | --------------------------------------------------- | --------------- | ------------------------------------------------------------------------------------- | | [`alarm`/`template`](#alarm-line-alarm-or-template) | yes | Name of the alarm/template. | | [`on`](#alarm-line-on) | yes | The chart this alarm should attach to. | -| [`class`](#alarm-line-class) | no | The general classification of the alarm. | -| [`component`](#alarm-line-component) | no | Specify the component of the class of the alarm. | -| [`type`](#alarm-line-type) | no | The type of error the alarm monitors. | +| [`class`](#alarm-line-class) | no | The general alarm classification. | +| [`type`](#alarm-line-type) | no | What area of the system the alarm monitors. | +| [`component`](#alarm-line-component) | no | Specific component of the type of the alarm. | | [`os`](#alarm-line-os) | no | Which operating systems to run this chart. | | [`hosts`](#alarm-line-hosts) | no | Which hostnames will run this alarm. | | [`plugin`](#alarm-line-plugin) | no | Restrict an alarm or template to only a certain plugin. | @@ -136,24 +139,45 @@ If you create a template using the `disk.io` context, it will apply an alarm to #### Alarm line `class` -Specify the classification of the alarm or template. +This indicates the type of error (or general problem area) that the alarm or template applies to. For example, `Latency` can be used for alarms that trigger on latency issues on network interfaces, web servers, or database systems. Example: -Class can be used to indicate the broader area of the system that the alarm applies to. For example, under the general `Database` class, you can group together alarms that operate on various database systems, like `MySQL`, `CockroachDB`, `CouchDB` etc. Example: +```yaml +class: Latency +``` + +
    +Netdata's stock alarms use the following `class` attributes by default: + +| Class | +| ----------------| +| Errors | +| Latency | +| Utilization | +| Workload | + + +
    + +`class` will default to `Unknown` if the line is missing from the alarm configuration. + +#### Alarm line `type` + +Type can be used to indicate the broader area of the system that the alarm applies to. For example, under the general `Database` type, you can group together alarms that operate on various database systems, like `MySQL`, `CockroachDB`, `CouchDB` etc. Example: ```yaml -class: Database +type: Database ```
    -Netdata's stock alarms use the following `class` attributes by default, but feel free to adjust for your own requirements. +Netdata's stock alarms use the following `type` attributes by default, but feel free to adjust for your own requirements. -| Class | Description | +| Type | Description | | ------------------------ | ------------------------------------------------------------------------------------------------ | | Ad Filtering | Services related to Ad Filtering (like pi-hole) | | Certificates | Certificates monitoring related | | Cgroups | Alerts for cpu and memory usage of control groups | | Computing | Alerts for shared computing applications (e.g. boinc) | | Containers | Container related alerts (e.g. docker instances) | -| Database | Database systems (e.g. MySQL, Postgress, etc) | +| Database | Database systems (e.g. MySQL, PostgreSQL, etc) | | Data Sharing | Used to group together alerts for data sharing applications | | DHCP | Alerts for dhcp related services | | DNS | Alerts for dns related services | @@ -162,7 +186,7 @@ class: Database | Linux | Services specific to Linux (e.g. systemd) | | Messaging | Alerts for message passing services (e.g. vernemq) | | Netdata | Internal Netdata components monitoring | -| Other | Use as a general class of alerts | +| Other | When an alert doesn't fit in other types. | | Power Supply | Alerts from power supply related services (e.g. apcupsd) | | Search engine | Alerts for search services (e.g. elasticsearch) | | Storage | Class for alerts dealing with storage services (storage devices typically live under `System`) | @@ -174,26 +198,16 @@ class: Database
    -If an alarm configuration is missing the `class` line, its value will default to `Unknown`. +If an alarm configuration is missing the `type` line, its value will default to `Unknown`. #### Alarm line `component` -Component can be used to narrow down what the previous `class` value specifies for each alarm or template. Continuing from the previous example, `component` might include `MySQL`, `CockroachDB`, `MongoDB`, all under the same `Database` classification. Example: +Component can be used to narrow down what the previous `type` value specifies for each alarm or template. Continuing from the previous example, `component` might include `MySQL`, `CockroachDB`, `MongoDB`, all under the same `Database` type. Example: ```yaml component: MySQL ``` -As with the `class` line, if `component` is missing from the configuration, its value will default to `Unknown`. - -#### Alarm line `type` - -This indicates the type of error (or general problem area) that the alarm or template applies to. For example, `Latency` can be used for alarms that trigger on latency issues in network interfaces, web servers, or database systems. Example: - -```yaml -type: Latency -``` - -`type` will also (as with `class` and `component`) default to `Unknown` if the line is missing from the alarm configuration. +As with the `class` and `type` line, if `component` is missing from the configuration, its value will default to `Unknown`. #### Alarm line `os` diff --git a/health/health.c b/health/health.c index 85d2a2458..d8e1d4b77 100644 --- a/health/health.c +++ b/health/health.c @@ -230,6 +230,9 @@ void health_reload(void) { if (netdata_cloud_setting) { aclk_single_update_enable(); aclk_alarm_reload(); +#ifdef ENABLE_NEW_CLOUD_PROTOCOL + aclk_alert_reloaded = 1; +#endif } #endif } @@ -308,26 +311,44 @@ static inline void health_alarm_execute(RRDHOST *host, ALARM_ENTRY *ae) { int n_warn=0, n_crit=0; RRDCALC *rc; EVAL_EXPRESSION *expr=NULL; + BUFFER *warn_alarms, *crit_alarms; + + warn_alarms = buffer_create(NETDATA_WEB_RESPONSE_INITIAL_SIZE); + crit_alarms = buffer_create(NETDATA_WEB_RESPONSE_INITIAL_SIZE); for(rc = host->alarms; rc ; rc = rc->next) { if(unlikely(!rc->rrdset || !rc->rrdset->last_collected_time.tv_sec)) continue; - if(unlikely(rc->status == RRDCALC_STATUS_WARNING)) { - n_warn++; - if (ae->alarm_id == rc->id) - expr=rc->warning; + if (unlikely(rc->status == RRDCALC_STATUS_WARNING)) { + if (likely(ae->alarm_id != rc->id) || likely(ae->alarm_event_id != rc->next_event_id - 1)) { + if (n_warn) + buffer_strcat(warn_alarms, ","); + buffer_strcat(warn_alarms, rc->name); + buffer_strcat(warn_alarms, "="); + buffer_snprintf(warn_alarms, 11, "%ld", rc->last_status_change); + n_warn++; + } else if (ae->alarm_id == rc->id) + expr = rc->warning; } else if (unlikely(rc->status == RRDCALC_STATUS_CRITICAL)) { - n_crit++; - if (ae->alarm_id == rc->id) - expr=rc->critical; + if (likely(ae->alarm_id != rc->id) || likely(ae->alarm_event_id != rc->next_event_id - 1)) { + if (n_crit) + buffer_strcat(crit_alarms, ","); + buffer_strcat(crit_alarms, rc->name); + buffer_strcat(crit_alarms, "="); + buffer_snprintf(crit_alarms, 11, "%ld", rc->last_status_change); + n_crit++; + } else if (ae->alarm_id == rc->id) + expr = rc->critical; } else if (unlikely(rc->status == RRDCALC_STATUS_CLEAR)) { if (ae->alarm_id == rc->id) - expr=rc->warning; + expr = rc->warning; } } - snprintfz(command_to_run, ALARM_EXEC_COMMAND_LENGTH, "exec %s '%s' '%s' '%u' '%u' '%u' '%lu' '%s' '%s' '%s' '%s' '%s' '" CALCULATED_NUMBER_FORMAT_ZERO "' '" CALCULATED_NUMBER_FORMAT_ZERO "' '%s' '%u' '%u' '%s' '%s' '%s' '%s' '%s' '%s' '%d' '%d'", + char *edit_command = ae->source ? health_edit_command_from_source(ae->source) : strdupz("UNKNOWN=0"); + + snprintfz(command_to_run, ALARM_EXEC_COMMAND_LENGTH, "exec %s '%s' '%s' '%u' '%u' '%u' '%lu' '%s' '%s' '%s' '%s' '%s' '" CALCULATED_NUMBER_FORMAT_ZERO "' '" CALCULATED_NUMBER_FORMAT_ZERO "' '%s' '%u' '%u' '%s' '%s' '%s' '%s' '%s' '%s' '%d' '%d' '%s' '%s' '%s' '%s' '%s'", exec, recipient, host->registry_hostname, @@ -352,7 +373,12 @@ static inline void health_alarm_execute(RRDHOST *host, ALARM_ENTRY *ae) { (expr && expr->source)?expr->source:"NOSOURCE", (expr && expr->error_msg)?buffer_tostring(expr->error_msg):"NOERRMSG", n_warn, - n_crit + n_crit, + buffer_tostring(warn_alarms), + buffer_tostring(crit_alarms), + ae->classification?ae->classification:"Unknown", + edit_command, + localhost->registry_hostname ); ae->flags |= HEALTH_ENTRY_FLAG_EXEC_RUN; @@ -363,6 +389,10 @@ static inline void health_alarm_execute(RRDHOST *host, ALARM_ENTRY *ae) { ae->exec_spawn_serial = spawn_enq_cmd(command_to_run); enqueue_alarm_notify_in_progress(ae); + freez(edit_command); + buffer_free(warn_alarms); + buffer_free(crit_alarms); + return; //health_alarm_wait_for_execution done: health_alarm_log_save(host, ae); @@ -635,6 +665,8 @@ void *health_main(void *ptr) { int min_run_every = (int)config_get_number(CONFIG_SECTION_HEALTH, "run at least every seconds", 10); if(min_run_every < 1) min_run_every = 1; + int cleanup_sql_every_loop = 7200 / min_run_every; + time_t now = now_realtime_sec(); time_t hibernation_delay = config_get_number(CONFIG_SECTION_HEALTH, "postpone alarms during hibernation for seconds", 60); @@ -689,6 +721,9 @@ void *health_main(void *ptr) { host->health_delay_up_to = 0; } + if(likely(!host->health_log_fp) && (loop == 1 || loop % cleanup_sql_every_loop == 0)) + sql_health_alarm_log_cleanup(host); + rrdhost_rdlock(host); // the first loop is to lookup values from the db @@ -929,7 +964,7 @@ void *health_main(void *ptr) { if(likely(!rrdcalc_isrepeating(rc))) { ALARM_ENTRY *ae = health_create_alarm_entry( - host, rc->id, rc->next_event_id++, now, rc->name, rc->rrdset->id, + host, rc->id, rc->next_event_id++, rc->config_hash_id, now, rc->name, rc->rrdset->id, rc->rrdset->family, rc->classification, rc->component, rc->type, rc->exec, rc->recipient, now - rc->last_status_change, rc->old_value, rc->value, rc->status, status, rc->source, rc->units, rc->info, rc->delay_last, @@ -979,7 +1014,7 @@ void *health_main(void *ptr) { if(unlikely(repeat_every > 0 && (rc->last_repeat + repeat_every) <= now)) { rc->last_repeat = now; ALARM_ENTRY *ae = health_create_alarm_entry( - host, rc->id, rc->next_event_id++, now, rc->name, rc->rrdset->id, + host, rc->id, rc->next_event_id++, rc->config_hash_id, now, rc->name, rc->rrdset->id, rc->rrdset->family, rc->classification, rc->component, rc->type, rc->exec, rc->recipient, now - rc->last_status_change, rc->old_value, rc->value, rc->old_status, rc->status, rc->source, rc->units, rc->info, rc->delay_last, @@ -1003,6 +1038,14 @@ void *health_main(void *ptr) { rrdhost_unlock(host); } +#ifdef ENABLE_ACLK +#ifdef ENABLE_NEW_CLOUD_PROTOCOL + if (netdata_cloud_setting && unlikely(aclk_alert_reloaded) && loop > 2) { + sql_queue_removed_alerts_to_aclk(host); + } +#endif +#endif + if (unlikely(netdata_exit)) break; @@ -1027,8 +1070,12 @@ void *health_main(void *ptr) { health_alarm_wait_for_execution(ae); } - rrd_unlock(); +#ifdef ENABLE_NEW_CLOUD_PROTOCOL + if (netdata_cloud_setting && unlikely(aclk_alert_reloaded)) + aclk_alert_reloaded = 0; +#endif + rrd_unlock(); if(unlikely(netdata_exit)) break; diff --git a/health/health.d/adaptec_raid.conf b/health/health.d/adaptec_raid.conf index b067e1840..1d823addd 100644 --- a/health/health.d/adaptec_raid.conf +++ b/health/health.d/adaptec_raid.conf @@ -3,9 +3,9 @@ template: adaptec_raid_ld_status on: adaptec_raid.ld_status - class: System + class: Errors + type: System component: RAID - type: Errors lookup: max -10s foreach * units: bool every: 10s @@ -18,9 +18,9 @@ component: RAID template: adaptec_raid_pd_state on: adaptec_raid.pd_state - class: System + class: Errors + type: System component: RAID - type: Errors lookup: max -10s foreach * units: bool every: 10s diff --git a/health/health.d/am2320.conf b/health/health.d/am2320.conf deleted file mode 100644 index 4bac98fbb..000000000 --- a/health/health.d/am2320.conf +++ /dev/null @@ -1,15 +0,0 @@ -# make sure am2320 is sending stats - - template: am2320_last_collected_secs - on: am2320.temperature - class: Other -component: Sensors - type: Latency - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: webmaster diff --git a/health/health.d/anomalies.conf b/health/health.d/anomalies.conf index f27e39fc1..269ae544b 100644 --- a/health/health.d/anomalies.conf +++ b/health/health.d/anomalies.conf @@ -2,9 +2,9 @@ template: anomalies_anomaly_probabilities on: anomalies.probability - class: Netdata + class: Errors + type: Netdata component: ML - type: Errors lookup: average -2m foreach * every: 1m warn: $this > 50 @@ -14,9 +14,9 @@ component: ML template: anomalies_anomaly_flags on: anomalies.anomaly - class: Netdata + class: Errors + type: Netdata component: ML - type: Errors lookup: sum -2m foreach * every: 1m warn: $this > 10 diff --git a/health/health.d/apache.conf b/health/health.d/apache.conf deleted file mode 100644 index c623fb880..000000000 --- a/health/health.d/apache.conf +++ /dev/null @@ -1,17 +0,0 @@ - -# make sure apache is running - - template: apache_last_collected_secs - on: apache.requests - class: Web Server -component: Apache - type: Latency - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: webmaster - diff --git a/health/health.d/apcupsd.conf b/health/health.d/apcupsd.conf index 07b5c28c9..65f1a69ab 100644 --- a/health/health.d/apcupsd.conf +++ b/health/health.d/apcupsd.conf @@ -2,9 +2,9 @@ template: apcupsd_10min_ups_load on: apcupsd.load - class: Power Supply + class: Utilization + type: Power Supply component: UPS - type: Utilization os: * hosts: * lookup: average -10m unaligned of percentage @@ -20,9 +20,9 @@ component: UPS # Fire the alarm as soon as it's going on battery (99% charge) and clear only when full. template: apcupsd_ups_charge on: apcupsd.charge - class: Power Supply + class: Errors + type: Power Supply component: UPS - type: Errors os: * hosts: * lookup: average -60s unaligned of charge @@ -36,9 +36,9 @@ component: UPS template: apcupsd_last_collected_secs on: apcupsd.load - class: Power Supply + class: Latency + type: Power Supply component: UPS device - type: Latency calc: $now - $last_collected_t every: 10s units: seconds ago diff --git a/health/health.d/backend.conf b/health/health.d/backend.conf index 948ea551a..91d469395 100644 --- a/health/health.d/backend.conf +++ b/health/health.d/backend.conf @@ -1,9 +1,9 @@ # Alert that backends subsystem will be disabled soon alarm: backend_metrics_eol on: netdata.backend_metrics - class: Netdata + class: Errors + type: Netdata component: Exporting engine - type: Errors units: boolean calc: $now - $last_collected_t every: 1m @@ -16,9 +16,9 @@ component: Exporting engine alarm: backend_last_buffering on: netdata.backend_metrics - class: Netdata + class: Latency + type: Netdata component: Exporting engine - type: Latency calc: $now - $last_collected_t units: seconds ago every: 10s @@ -30,9 +30,9 @@ component: Exporting engine alarm: backend_metrics_sent on: netdata.backend_metrics - class: Netdata + class: Workload + type: Netdata component: Exporting engine - type: Workload units: % calc: abs($sent) * 100 / abs($buffered) every: 10s diff --git a/health/health.d/bcache.conf b/health/health.d/bcache.conf index d75d8e19b..49cb5ad0f 100644 --- a/health/health.d/bcache.conf +++ b/health/health.d/bcache.conf @@ -1,9 +1,9 @@ template: bcache_cache_errors on: disk.bcache_cache_read_races - class: System + class: Errors + type: System component: Disk - type: Errors lookup: sum -1m unaligned absolute units: errors every: 1m @@ -16,9 +16,9 @@ component: Disk template: bcache_cache_dirty on: disk.bcache_cache_alloc - class: System + class: Utilization + type: System component: Disk - type: Utilization calc: $dirty + $metadata + $undefined units: % every: 1m diff --git a/health/health.d/beanstalkd.conf b/health/health.d/beanstalkd.conf index 99c754571..13ac8c182 100644 --- a/health/health.d/beanstalkd.conf +++ b/health/health.d/beanstalkd.conf @@ -2,9 +2,9 @@ template: beanstalk_server_buried_jobs on: beanstalk.current_jobs - class: Messaging + class: Workload + type: Messaging component: Beanstalk - type: Workload calc: $buried units: jobs every: 10s diff --git a/health/health.d/bind_rndc.conf b/health/health.d/bind_rndc.conf index e88f87a4f..7c09225ff 100644 --- a/health/health.d/bind_rndc.conf +++ b/health/health.d/bind_rndc.conf @@ -1,8 +1,8 @@ template: bind_rndc_stats_file_size on: bind_rndc.stats_size - class: DNS + class: Utilization + type: DNS component: BIND - type: Utilization units: megabytes every: 60 calc: $stats_size diff --git a/health/health.d/boinc.conf b/health/health.d/boinc.conf index 8604abee9..7d7a4fdae 100644 --- a/health/health.d/boinc.conf +++ b/health/health.d/boinc.conf @@ -3,9 +3,9 @@ # Warn on any compute errors encountered. template: boinc_compute_errors on: boinc.states - class: Computing + class: Errors + type: Computing component: BOINC - type: Errors os: * hosts: * families: * @@ -21,9 +21,9 @@ component: BOINC # Warn on lots of upload errors template: boinc_upload_errors on: boinc.states - class: Computing + class: Errors + type: Computing component: BOINC - type: Errors os: * hosts: * families: * @@ -39,9 +39,9 @@ component: BOINC # Warn on the task queue being empty template: boinc_total_tasks on: boinc.tasks - class: Computing + class: Utilization + type: Computing component: BOINC - type: Utilization os: * hosts: * families: * @@ -57,9 +57,9 @@ component: BOINC # Warn on no active tasks with a non-empty queue template: boinc_active_tasks on: boinc.tasks - class: Computing + class: Utilization + type: Computing component: BOINC - type: Utilization os: * hosts: * families: * diff --git a/health/health.d/btrfs.conf b/health/health.d/btrfs.conf index d3200a7ee..8d197aa8d 100644 --- a/health/health.d/btrfs.conf +++ b/health/health.d/btrfs.conf @@ -1,9 +1,9 @@ template: btrfs_allocated on: btrfs.disk - class: System + class: Utilization + type: System component: File system - type: Utilization os: * hosts: * families: * @@ -18,9 +18,9 @@ component: File system template: btrfs_data on: btrfs.data - class: System + class: Utilization + type: System component: File system - type: Utilization os: * hosts: * families: * @@ -35,9 +35,9 @@ component: File system template: btrfs_metadata on: btrfs.metadata - class: System + class: Utilization + type: System component: File system - type: Utilization os: * hosts: * families: * @@ -52,9 +52,9 @@ component: File system template: btrfs_system on: btrfs.system - class: System + class: Utilization + type: System component: File system - type: Utilization os: * hosts: * families: * diff --git a/health/health.d/ceph.conf b/health/health.d/ceph.conf index ed8f9b4b9..1f9da25c7 100644 --- a/health/health.d/ceph.conf +++ b/health/health.d/ceph.conf @@ -2,9 +2,9 @@ template: ceph_cluster_space_usage on: ceph.general_usage - class: Storage + class: Utilization + type: Storage component: Ceph - type: Utilization calc: $used * 100 / ($used + $avail) units: % every: 1m diff --git a/health/health.d/cgroups.conf b/health/health.d/cgroups.conf index 068533f10..45b34806c 100644 --- a/health/health.d/cgroups.conf +++ b/health/health.d/cgroups.conf @@ -3,9 +3,9 @@ template: cgroup_10min_cpu_usage on: cgroup.cpu_limit - class: Cgroups + class: Utilization + type: Cgroups component: CPU - type: Utilization os: linux hosts: * lookup: average -10m unaligned @@ -19,9 +19,9 @@ component: CPU template: cgroup_ram_in_use on: cgroup.mem_usage - class: Cgroups + class: Utilization + type: Cgroups component: Memory - type: Utilization os: linux hosts: * calc: ($ram) * 100 / $memory_limit diff --git a/health/health.d/cockroachdb.conf b/health/health.d/cockroachdb.conf index dccd2b064..1f227841e 100644 --- a/health/health.d/cockroachdb.conf +++ b/health/health.d/cockroachdb.conf @@ -1,27 +1,11 @@ -# Availability - - template: cockroachdb_last_collected_secs - on: cockroachdb.live_nodes - class: Database -component: CockroachDB - type: Latency - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: dba - # Capacity template: cockroachdb_used_storage_capacity on: cockroachdb.storage_used_capacity_percentage - class: Database + class: Utilization + type: Database component: CockroachDB - type: Utilization calc: $capacity_used_percent units: % every: 10s @@ -33,9 +17,9 @@ component: CockroachDB template: cockroachdb_used_usable_storage_capacity on: cockroachdb.storage_used_capacity_percentage - class: Database + class: Utilization + type: Database component: CockroachDB - type: Utilization calc: $capacity_usable_used_percent units: % every: 10s @@ -49,37 +33,37 @@ component: CockroachDB template: cockroachdb_unavailable_ranges on: cockroachdb.ranges_replication_problem - class: Database + class: Errors + type: Database component: CockroachDB - type: Utilization calc: $ranges_unavailable units: num every: 10s warn: $this > 0 delay: down 15m multiplier 1.5 max 1h - info: number of ranges with fewer live replicas than the replication target + info: number of ranges with fewer live replicas than needed for quorum to: dba - template: cockroachdb_replicas_leaders_not_leaseholders - on: cockroachdb.replicas_leaders - class: Database + template: cockroachdb_underreplicated_ranges + on: cockroachdb.ranges_replication_problem + class: Errors + type: Database component: CockroachDB - type: Utilization - calc: $replicas_leaders_not_leaseholders + calc: $ranges_underreplicated units: num every: 10s warn: $this > 0 delay: down 15m multiplier 1.5 max 1h - info: number of replicas that are Raft leaders whose range lease is held by another store + info: number of ranges with fewer live replicas than the replication target to: dba # FD template: cockroachdb_open_file_descriptors_limit on: cockroachdb.process_file_descriptors - class: Database + class: Utilization + type: Database component: CockroachDB - type: Utilization calc: $sys_fd_open/$sys_fd_softlimit * 100 units: % every: 10s @@ -87,29 +71,3 @@ component: CockroachDB delay: down 15m multiplier 1.5 max 1h info: open file descriptors utilization (against softlimit) to: dba - -# SQL - - template: cockroachdb_sql_active_connections - on: cockroachdb.sql_connections - class: Database -component: CockroachDB - type: Utilization - calc: $sql_conns - units: active connections - every: 10s - info: number of active SQL connections - to: dba - - template: cockroachdb_sql_executed_statements_total_last_5m - on: cockroachdb.sql_statements_total - class: Database -component: CockroachDB - type: Workload - lookup: sum -5m absolute of sql_query_count - units: statements - every: 10s - warn: $this == 0 AND $cockroachdb_sql_active_connections != 0 - delay: down 15m up 30s multiplier 1.5 max 1h - info: number of executed SQL statements in the last 5 minutes - to: dba diff --git a/health/health.d/couchdb.conf b/health/health.d/couchdb.conf deleted file mode 100644 index c86c6b988..000000000 --- a/health/health.d/couchdb.conf +++ /dev/null @@ -1,16 +0,0 @@ - -# make sure couchdb is running - - template: couchdb_last_collected_secs - on: couchdb.request_methods - class: Database -component: CouchDB - type: Latency - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: dba diff --git a/health/health.d/cpu.conf b/health/health.d/cpu.conf index d11215768..ad6952825 100644 --- a/health/health.d/cpu.conf +++ b/health/health.d/cpu.conf @@ -3,9 +3,9 @@ template: 10min_cpu_usage on: system.cpu - class: System + class: Utilization + type: System component: CPU - type: Utilization os: linux hosts: * lookup: average -10m unaligned of user,system,softirq,irq,guest @@ -19,9 +19,9 @@ component: CPU template: 10min_cpu_iowait on: system.cpu - class: System + class: Utilization + type: System component: CPU - type: Utilization os: linux hosts: * lookup: average -10m unaligned of iowait @@ -35,9 +35,9 @@ component: CPU template: 20min_steal_cpu on: system.cpu - class: System + class: Latency + type: System component: CPU - type: Latency os: linux hosts: * lookup: average -20m unaligned of steal @@ -52,9 +52,9 @@ component: CPU ## FreeBSD template: 10min_cpu_usage on: system.cpu - class: System + class: Utilization + type: System component: CPU - type: Utilization os: freebsd hosts: * lookup: average -10m unaligned of user,system,interrupt diff --git a/health/health.d/dbengine.conf b/health/health.d/dbengine.conf index 79c156ab8..65c41b846 100644 --- a/health/health.d/dbengine.conf +++ b/health/health.d/dbengine.conf @@ -3,9 +3,9 @@ alarm: 10min_dbengine_global_fs_errors on: netdata.dbengine_global_errors - class: Netdata + class: Errors + type: Netdata component: DB engine - type: Errors os: linux freebsd macos hosts: * lookup: sum -10m unaligned of fs_errors @@ -18,9 +18,9 @@ component: DB engine alarm: 10min_dbengine_global_io_errors on: netdata.dbengine_global_errors - class: Netdata + class: Errors + type: Netdata component: DB engine - type: Errors os: linux freebsd macos hosts: * lookup: sum -10m unaligned of io_errors @@ -33,9 +33,9 @@ component: DB engine alarm: 10min_dbengine_global_flushing_warnings on: netdata.dbengine_global_errors - class: Netdata + class: Errors + type: Netdata component: DB engine - type: Errors os: linux freebsd macos hosts: * lookup: sum -10m unaligned of pg_cache_over_half_dirty_events @@ -49,9 +49,9 @@ component: DB engine alarm: 10min_dbengine_global_flushing_errors on: netdata.dbengine_long_term_page_stats - class: Netdata + class: Errors + type: Netdata component: DB engine - type: Errors os: linux freebsd macos hosts: * lookup: sum -10m unaligned of flushing_pressure_deletions diff --git a/health/health.d/disks.conf b/health/health.d/disks.conf index 60f8faed9..5daff61a1 100644 --- a/health/health.d/disks.conf +++ b/health/health.d/disks.conf @@ -11,9 +11,9 @@ template: disk_space_usage on: disk.space - class: System + class: Utilization + type: System component: Disk - type: Utilization os: linux freebsd hosts: * families: !/dev !/dev/* !/run !/run/* * @@ -28,9 +28,9 @@ component: Disk template: disk_inode_usage on: disk.inodes - class: System + class: Utilization + type: System component: Disk - type: Utilization os: linux freebsd hosts: * families: !/dev !/dev/* !/run !/run/* * @@ -136,19 +136,16 @@ component: Disk template: 10min_disk_utilization on: disk.util - class: System + class: Utilization + type: System component: Disk - type: Utilization os: linux freebsd hosts: * families: * lookup: average -10m unaligned units: % every: 1m - green: 90 - red: 98 - warn: $this > $green * (($status >= $WARNING) ? (0.7) : (1)) - crit: $this > $red * (($status == $CRITICAL) ? (0.7) : (1)) + warn: $this > 98 * (($status >= $WARNING) ? (0.7) : (1)) delay: down 15m multiplier 1.2 max 1h info: average percentage of time $family disk was busy over the last 10 minutes to: silent @@ -161,19 +158,16 @@ component: Disk template: 10min_disk_backlog on: disk.backlog - class: System + class: Latency + type: System component: Disk - type: Latency os: linux hosts: * families: * lookup: average -10m unaligned units: ms every: 1m - green: 2000 - red: 5000 - warn: $this > $green * (($status >= $WARNING) ? (0.7) : (1)) - crit: $this > $red * (($status == $CRITICAL) ? (0.7) : (1)) + warn: $this > 5000 * (($status >= $WARNING) ? (0.7) : (1)) delay: down 15m multiplier 1.2 max 1h info: average backlog size of the $family disk over the last 10 minutes to: silent diff --git a/health/health.d/dns_query.conf b/health/health.d/dns_query.conf index 1fbb2c598..ec4937c0a 100644 --- a/health/health.d/dns_query.conf +++ b/health/health.d/dns_query.conf @@ -3,9 +3,9 @@ template: dns_query_time_query_time on: dns_query_time.query_time - class: DNS + class: Latency + type: DNS component: DNS - type: Latency lookup: average -10s unaligned foreach * units: ms every: 10s diff --git a/health/health.d/dnsmasq_dhcp.conf b/health/health.d/dnsmasq_dhcp.conf index 10d139f77..010b94599 100644 --- a/health/health.d/dnsmasq_dhcp.conf +++ b/health/health.d/dnsmasq_dhcp.conf @@ -2,9 +2,9 @@ template: dnsmasq_dhcp_dhcp_range_utilization on: dnsmasq_dhcp.dhcp_range_utilization - class: DHCP + class: Utilization + type: DHCP component: Dnsmasq - type: Utilization every: 10s units: % calc: $used diff --git a/health/health.d/dockerd.conf b/health/health.d/dockerd.conf index ba866f81b..220ddd664 100644 --- a/health/health.d/dockerd.conf +++ b/health/health.d/dockerd.conf @@ -1,8 +1,8 @@ template: docker_unhealthy_containers on: docker.unhealthy_containers - class: Containers + class: Errors + type: Containers component: Docker - type: Errors units: unhealthy containers every: 10s lookup: average -10s diff --git a/health/health.d/elasticsearch.conf b/health/health.d/elasticsearch.conf deleted file mode 100644 index 05d576c39..000000000 --- a/health/health.d/elasticsearch.conf +++ /dev/null @@ -1,15 +0,0 @@ - -# make sure elasticsearch is running - - template: elasticsearch_last_collected - on: elasticsearch.cluster_health_status - class: Search engine -component: Elasticsearch - type: Latency - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - info: number of seconds since the last successful data collection - to: sysadmin diff --git a/health/health.d/entropy.conf b/health/health.d/entropy.conf index 0478fa0be..13b0fcde4 100644 --- a/health/health.d/entropy.conf +++ b/health/health.d/entropy.conf @@ -5,9 +5,9 @@ alarm: lowest_entropy on: system.entropy - class: System + class: Utilization + type: System component: Cryptography - type: Utilization os: linux hosts: * lookup: min -5m unaligned diff --git a/health/health.d/exporting.conf b/health/health.d/exporting.conf index 4430f3fd8..06f398c6e 100644 --- a/health/health.d/exporting.conf +++ b/health/health.d/exporting.conf @@ -1,22 +1,25 @@ -template: exporting_last_buffering -families: * - on: exporting_data_size - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful buffering of exporting data - to: dba + template: exporting_last_buffering + families: * + on: exporting_data_size + class: Latency + type: Netdata +component: Exporting engine + calc: $now - $last_collected_t + units: seconds ago + every: 10s + warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) + crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) + delay: down 5m multiplier 1.5 max 1h + info: number of seconds since the last successful buffering of exporting data + to: dba template: exporting_metrics_sent families: * on: exporting_data_size - class: Netdata + class: Workload + type: Netdata component: Exporting engine - type: Workload units: % calc: abs($sent) * 100 / abs($buffered) every: 10s diff --git a/health/health.d/fping.conf b/health/health.d/fping.conf index 120fe8f28..bb22419fa 100644 --- a/health/health.d/fping.conf +++ b/health/health.d/fping.conf @@ -2,9 +2,9 @@ template: fping_last_collected_secs families: * on: fping.latency - class: Other + class: Latency + type: Other component: Network - type: Latency calc: $now - $last_collected_t units: seconds ago every: 10s @@ -17,9 +17,9 @@ component: Network template: fping_host_reachable families: * on: fping.latency - class: Other + class: Errors + type: Other component: Network - type: Errors calc: $average != nan units: up/down every: 10s @@ -31,9 +31,9 @@ component: Network template: fping_host_latency families: * on: fping.latency - class: Other + class: Latency + type: Other component: Network - type: Latency lookup: average -10s unaligned of average units: ms every: 10s @@ -48,9 +48,9 @@ component: Network template: fping_packet_loss families: * on: fping.quality - class: System + class: Errors + type: System component: Network - type: Errors lookup: average -10m unaligned of returned calc: 100 - $this green: 1 diff --git a/health/health.d/fronius.conf b/health/health.d/fronius.conf index 81aafaa60..853bd7fbc 100644 --- a/health/health.d/fronius.conf +++ b/health/health.d/fronius.conf @@ -1,9 +1,9 @@ template: fronius_last_collected_secs families: * on: fronius.power - class: Power Supply + class: Latency + type: Power Supply component: Solar - type: Latency calc: $now - $last_collected_t every: 10s units: seconds ago diff --git a/health/health.d/gearman.conf b/health/health.d/gearman.conf index e2031bf2b..14010d445 100644 --- a/health/health.d/gearman.conf +++ b/health/health.d/gearman.conf @@ -1,24 +1,10 @@ -# make sure Gearman is running - template: gearman_last_collected_secs - on: gearman.total_jobs - class: Computing -component: Gearman - type: Latency - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: sysadmin template: gearman_workers_queued on: gearman.single_job - class: Computing + class: Latency + type: Computing component: Gearman - type: Latency - lookup: average -10m unaligned match-names of Queued + lookup: average -10m unaligned match-names of Pending units: workers every: 10s warn: $this > 30000 diff --git a/health/health.d/geth.conf b/health/health.d/geth.conf new file mode 100644 index 000000000..dd1eb4701 --- /dev/null +++ b/health/health.d/geth.conf @@ -0,0 +1,12 @@ +#chainhead_header is expected momenterarily to be ahead. If its considerably ahead (e.g more than 5 blocks), then the node is definitely out of sync. + template: geth_chainhead_diff_between_header_block + on: geth.chainhead + class: Workload + type: ethereum_node +component: geth + every: 10s + calc: $chain_head_block - $chain_head_header + units: blocks + warn: $this != 0 + crit: $this > 5 + delay: down 1m multiplier 1.5 max 1h diff --git a/health/health.d/go.d.plugin.conf b/health/health.d/go.d.plugin.conf new file mode 100644 index 000000000..8bf84a976 --- /dev/null +++ b/health/health.d/go.d.plugin.conf @@ -0,0 +1,17 @@ + +# make sure go.d.plugin data collection job is running + + template: go.d_job_last_collected_secs + on: netdata.go_plugin_execution_time + class: Error + type: Netdata +component: go.d.plugin + module: * + calc: $now - $last_collected_t + units: seconds ago + every: 10s + warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) + crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) + delay: down 5m multiplier 1.5 max 1h + info: number of seconds since the last successful data collection + to: webmaster diff --git a/health/health.d/haproxy.conf b/health/health.d/haproxy.conf index 9f6b1c577..a0ab52bca 100644 --- a/health/health.d/haproxy.conf +++ b/health/health.d/haproxy.conf @@ -1,8 +1,8 @@ template: haproxy_backend_server_status on: haproxy_hs.down - class: Web Proxy + class: Errors + type: Web Proxy component: HAProxy - type: Errors units: failed servers every: 10s lookup: average -10s @@ -12,25 +12,12 @@ component: HAProxy template: haproxy_backend_status on: haproxy_hb.down - class: Web Proxy + class: Errors + type: Web Proxy component: HAProxy - type: Errors units: failed backend every: 10s lookup: average -10s crit: $this > 0 info: average number of failed haproxy backends over the last 10 seconds to: sysadmin - - template: haproxy_last_collected - on: haproxy_hb.down - class: Web Proxy -component: HAProxy - type: Latency - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - info: number of seconds since the last successful data collection - to: sysadmin diff --git a/health/health.d/hdfs.conf b/health/health.d/hdfs.conf index bd8308bed..ca8df31b9 100644 --- a/health/health.d/hdfs.conf +++ b/health/health.d/hdfs.conf @@ -1,28 +1,11 @@ -# make sure hdfs is running - - template: hdfs_last_collected_secs - on: hdfs.heap_memory - class: Storage -component: HDFS - type: Latency - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: webmaster - - # Common template: hdfs_capacity_usage on: hdfs.capacity - class: Storage + class: Utilization + type: Storage component: HDFS - type: Utilization calc: ($used) * 100 / ($used + $remaining) units: % every: 10s @@ -37,9 +20,9 @@ component: HDFS template: hdfs_missing_blocks on: hdfs.blocks - class: Storage + class: Errors + type: Storage component: HDFS - type: Errors calc: $missing units: missing blocks every: 10s @@ -51,9 +34,9 @@ component: HDFS template: hdfs_stale_nodes on: hdfs.data_nodes - class: Storage + class: Errors + type: Storage component: HDFS - type: Errors calc: $stale units: dead nodes every: 10s @@ -65,9 +48,9 @@ component: HDFS template: hdfs_dead_nodes on: hdfs.data_nodes - class: Storage + class: Errors + type: Storage component: HDFS - type: Errors calc: $dead units: dead nodes every: 10s @@ -81,9 +64,9 @@ component: HDFS template: hdfs_num_failed_volumes on: hdfs.num_failed_volumes - class: Storage + class: Errors + type: Storage component: HDFS - type: Errors calc: $fsds_num_failed_volumes units: failed volumes every: 10s diff --git a/health/health.d/httpcheck.conf b/health/health.d/httpcheck.conf index d4d6376a3..599c47acc 100644 --- a/health/health.d/httpcheck.conf +++ b/health/health.d/httpcheck.conf @@ -1,25 +1,11 @@ - template: httpcheck_last_collected_secs - families: * - on: httpcheck.status - class: Other -component: HTTP endpoint - type: Latency - calc: $now - $last_collected_t - every: 10s - units: seconds ago - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: sysadmin # This is a fast-reacting no-notification alarm ideal for custom dashboards or badges template: httpcheck_web_service_up families: * on: httpcheck.status - class: Web Server + class: Utilization + type: Web Server component: HTTP endpoint - type: Utilization lookup: average -1m unaligned percentage of success calc: ($this < 75) ? (0) : ($this) every: 5s @@ -30,9 +16,9 @@ component: HTTP endpoint template: httpcheck_web_service_bad_content families: * on: httpcheck.status - class: Web Server + class: Workload + type: Web Server component: HTTP endpoint - type: Workload lookup: average -5m unaligned percentage of bad_content every: 10s units: % @@ -46,9 +32,9 @@ component: HTTP endpoint template: httpcheck_web_service_bad_status families: * on: httpcheck.status - class: Web Server + class: Workload + type: Web Server component: HTTP endpoint - type: Workload lookup: average -5m unaligned percentage of bad_status every: 10s units: % @@ -62,9 +48,9 @@ component: HTTP endpoint template: httpcheck_web_service_timeouts families: * on: httpcheck.status - class: Web Server + class: Latency + type: Web Server component: HTTP endpoint - type: Latency lookup: average -5m unaligned percentage of timeout every: 10s units: % @@ -73,9 +59,9 @@ component: HTTP endpoint template: httpcheck_no_web_service_connections families: * on: httpcheck.status - class: Other + class: Errors + type: Other component: HTTP endpoint - type: Errors lookup: average -5m unaligned percentage of no_connection every: 10s units: % @@ -85,9 +71,9 @@ component: HTTP endpoint template: httpcheck_web_service_unreachable families: * on: httpcheck.status - class: Web Server + class: Errors + type: Web Server component: HTTP endpoint - type: Errors calc: ($httpcheck_no_web_service_connections >= $httpcheck_web_service_timeouts) ? ($httpcheck_no_web_service_connections) : ($httpcheck_web_service_timeouts) units: % every: 10s @@ -101,9 +87,9 @@ component: HTTP endpoint template: httpcheck_1h_web_service_response_time families: * on: httpcheck.responsetime - class: Other + class: Latency + type: Other component: HTTP endpoint - type: Latency lookup: average -1h unaligned of time every: 30s units: ms @@ -112,9 +98,9 @@ component: HTTP endpoint template: httpcheck_web_service_slow families: * on: httpcheck.responsetime - class: Web Server + class: Latency + type: Web Server component: HTTP endpoint - type: Latency lookup: average -3m unaligned of time units: ms every: 10s diff --git a/health/health.d/ioping.conf b/health/health.d/ioping.conf index 57ce4e866..ee4befbea 100644 --- a/health/health.d/ioping.conf +++ b/health/health.d/ioping.conf @@ -1,9 +1,9 @@ template: ioping_disk_latency families: * on: ioping.latency - class: System + class: Latency + type: System component: Disk - type: Latency lookup: average -10s unaligned of average units: ms every: 10s diff --git a/health/health.d/ipc.conf b/health/health.d/ipc.conf index 6eaf7abe9..c178a410a 100644 --- a/health/health.d/ipc.conf +++ b/health/health.d/ipc.conf @@ -3,9 +3,9 @@ alarm: semaphores_used on: system.ipc_semaphores - class: System + class: Utilization + type: System component: IPC - type: Utilization os: linux hosts: * calc: $semaphores * 100 / $ipc_semaphores_max @@ -19,9 +19,9 @@ component: IPC alarm: semaphore_arrays_used on: system.ipc_semaphore_arrays - class: System + class: Utilization + type: System component: IPC - type: Utilization os: linux hosts: * calc: $arrays * 100 / $ipc_semaphores_arrays_max diff --git a/health/health.d/ipfs.conf b/health/health.d/ipfs.conf index 6268f4092..a514ddfd0 100644 --- a/health/health.d/ipfs.conf +++ b/health/health.d/ipfs.conf @@ -1,9 +1,9 @@ template: ipfs_datastore_usage on: ipfs.repo_size - class: Data Sharing + class: Utilization + type: Data Sharing component: IPFS - type: Utilization calc: $size * 100 / $avail units: % every: 10s diff --git a/health/health.d/ipmi.conf b/health/health.d/ipmi.conf index d4fdc6c79..feadba1b7 100644 --- a/health/health.d/ipmi.conf +++ b/health/health.d/ipmi.conf @@ -1,8 +1,8 @@ alarm: ipmi_sensors_states on: ipmi.sensors_states - class: System + class: Errors + type: System component: IPMI - type: Errors calc: $warning + $critical units: sensors every: 10s @@ -14,9 +14,9 @@ component: IPMI alarm: ipmi_events on: ipmi.events - class: System + class: Utilization + type: System component: IPMI - type: Utilization calc: $events units: events every: 10s diff --git a/health/health.d/kubelet.conf b/health/health.d/kubelet.conf index 4d3c45f97..c2778cc5e 100644 --- a/health/health.d/kubelet.conf +++ b/health/health.d/kubelet.conf @@ -6,9 +6,9 @@ template: kubelet_node_config_error on: k8s_kubelet.kubelet_node_config_error - class: Kubernetes + class: Errors + type: Kubernetes component: Kubelet - type: Errors calc: $kubelet_node_config_error units: bool every: 10s @@ -22,9 +22,9 @@ component: Kubelet template: kubelet_token_requests lookup: sum -10s of token_fail_count on: k8s_kubelet.kubelet_token_requests - class: Kubernetes + class: Errors + type: Kubernetes component: Kubelet - type: Errors units: failed requests every: 10s warn: $this > 0 @@ -37,9 +37,9 @@ component: Kubelet template: kubelet_operations_error lookup: sum -1m on: k8s_kubelet.kubelet_operations_errors - class: Kubernetes + class: Errors + type: Kubernetes component: Kubelet - type: Errors units: errors every: 10s warn: $this > (($status >= $WARNING) ? (0) : (20)) @@ -64,9 +64,9 @@ component: Kubelet template: kubelet_1m_pleg_relist_latency_quantile_05 on: k8s_kubelet.kubelet_pleg_relist_latency_microseconds - class: Kubernetes + class: Latency + type: Kubernetes component: Kubelet - type: Latency lookup: average -1m unaligned of kubelet_pleg_relist_latency_05 units: microseconds every: 10s @@ -74,9 +74,9 @@ component: Kubelet template: kubelet_10s_pleg_relist_latency_quantile_05 on: k8s_kubelet.kubelet_pleg_relist_latency_microseconds - class: Kubernetes + class: Latency + type: Kubernetes component: Kubelet - type: Latency lookup: average -10s unaligned of kubelet_pleg_relist_latency_05 calc: $this * 100 / (($kubelet_1m_pleg_relist_latency_quantile_05 < 1000)?(1000):($kubelet_1m_pleg_relist_latency_quantile_05)) every: 10s @@ -92,9 +92,9 @@ component: Kubelet template: kubelet_1m_pleg_relist_latency_quantile_09 on: k8s_kubelet.kubelet_pleg_relist_latency_microseconds - class: Kubernetes + class: Latency + type: Kubernetes component: Kubelet - type: Latency lookup: average -1m unaligned of kubelet_pleg_relist_latency_09 units: microseconds every: 10s @@ -102,9 +102,9 @@ component: Kubelet template: kubelet_10s_pleg_relist_latency_quantile_09 on: k8s_kubelet.kubelet_pleg_relist_latency_microseconds - class: Kubernetes + class: Latency + type: Kubernetes component: Kubelet - type: Latency lookup: average -10s unaligned of kubelet_pleg_relist_latency_09 calc: $this * 100 / (($kubelet_1m_pleg_relist_latency_quantile_09 < 1000)?(1000):($kubelet_1m_pleg_relist_latency_quantile_09)) every: 10s @@ -120,9 +120,9 @@ component: Kubelet template: kubelet_1m_pleg_relist_latency_quantile_099 on: k8s_kubelet.kubelet_pleg_relist_latency_microseconds - class: Kubernetes + class: Latency + type: Kubernetes component: Kubelet - type: Latency lookup: average -1m unaligned of kubelet_pleg_relist_latency_099 units: microseconds every: 10s @@ -130,9 +130,9 @@ component: Kubelet template: kubelet_10s_pleg_relist_latency_quantile_099 on: k8s_kubelet.kubelet_pleg_relist_latency_microseconds - class: Kubernetes + class: Latency + type: Kubernetes component: Kubelet - type: Latency lookup: average -10s unaligned of kubelet_pleg_relist_latency_099 calc: $this * 100 / (($kubelet_1m_pleg_relist_latency_quantile_099 < 1000)?(1000):($kubelet_1m_pleg_relist_latency_quantile_099)) every: 10s diff --git a/health/health.d/lighttpd.conf b/health/health.d/lighttpd.conf deleted file mode 100644 index 0f067549e..000000000 --- a/health/health.d/lighttpd.conf +++ /dev/null @@ -1,17 +0,0 @@ - -# make sure lighttpd is running - - template: lighttpd_last_collected_secs - on: lighttpd.requests - class: Web Server -component: Lighttpd - type: Latency - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: webmaster - diff --git a/health/health.d/linux_power_supply.conf b/health/health.d/linux_power_supply.conf index e28c246a3..c0bc6de8a 100644 --- a/health/health.d/linux_power_supply.conf +++ b/health/health.d/linux_power_supply.conf @@ -2,9 +2,9 @@ template: linux_power_supply_capacity on: powersupply.capacity - class: Power Supply + class: Utilization + type: Power Supply component: Battery - type: Utilization calc: $capacity units: % every: 10s diff --git a/health/health.d/load.conf b/health/health.d/load.conf index e811f6ee2..0bd872f85 100644 --- a/health/health.d/load.conf +++ b/health/health.d/load.conf @@ -6,9 +6,9 @@ # minute, with a special case for a single CPU of setting the trigger at 2. alarm: load_cpu_number on: system.load - class: System + class: Utilization + type: System component: Load - type: Utilization os: linux hosts: * calc: ($active_processors == nan or $active_processors == inf or $active_processors < 2) ? ( 2 ) : ( $active_processors ) @@ -22,9 +22,9 @@ component: Load alarm: load_average_15 on: system.load - class: System + class: Utilization + type: System component: Load - type: Utilization os: linux hosts: * lookup: max -1m unaligned of load15 @@ -37,9 +37,9 @@ component: Load alarm: load_average_5 on: system.load - class: System + class: Utilization + type: System component: Load - type: Utilization os: linux hosts: * lookup: max -1m unaligned of load5 @@ -52,9 +52,9 @@ component: Load alarm: load_average_1 on: system.load - class: System + class: Utilization + type: System component: Load - type: Utilization os: linux hosts: * lookup: max -1m unaligned of load1 diff --git a/health/health.d/mdstat.conf b/health/health.d/mdstat.conf index 67483b201..cedaa000e 100644 --- a/health/health.d/mdstat.conf +++ b/health/health.d/mdstat.conf @@ -1,8 +1,8 @@ template: mdstat_last_collected on: md.disks - class: System + class: Latency + type: System component: RAID - type: Latency calc: $now - $last_collected_t units: seconds ago every: 10s @@ -13,9 +13,9 @@ component: RAID template: mdstat_disks on: md.disks - class: System + class: Errors + type: System component: RAID - type: Errors units: failed devices every: 10s calc: $down @@ -26,9 +26,9 @@ component: RAID template: mdstat_mismatch_cnt on: md.mismatch_cnt - class: System + class: Errors + type: System component: RAID - type: Errors families: !*(raid1) !*(raid10) * units: unsynchronized blocks calc: $count @@ -40,9 +40,9 @@ component: RAID template: mdstat_nonredundant_last_collected on: md.nonredundant - class: System + class: Latency + type: System component: RAID - type: Latency calc: $now - $last_collected_t units: seconds ago every: 10s diff --git a/health/health.d/megacli.conf b/health/health.d/megacli.conf index 1b6502f62..9fbcfdb92 100644 --- a/health/health.d/megacli.conf +++ b/health/health.d/megacli.conf @@ -3,9 +3,9 @@ template: megacli_adapter_state on: megacli.adapter_degraded - class: System + class: Errors + type: System component: RAID - type: Errors lookup: max -10s foreach * units: boolean every: 10s @@ -18,9 +18,9 @@ component: RAID template: megacli_pd_predictive_failures on: megacli.pd_predictive_failure - class: System + class: Errors + type: System component: RAID - type: Errors lookup: sum -10s foreach * units: predictive failures every: 10s @@ -31,9 +31,9 @@ component: RAID template: megacli_pd_media_errors on: megacli.pd_media_error - class: System + class: Errors + type: System component: RAID - type: Errors lookup: sum -10s foreach * units: media errors every: 10s @@ -46,9 +46,9 @@ component: RAID template: megacli_bbu_relative_charge on: megacli.bbu_relative_charge - class: System + class: Workload + type: System component: RAID - type: Workload lookup: average -10s units: percent every: 10s @@ -59,9 +59,9 @@ component: RAID template: megacli_bbu_cycle_count on: megacli.bbu_cycle_count - class: System + class: Workload + type: System component: RAID - type: Workload lookup: average -10s units: cycles every: 10s diff --git a/health/health.d/memcached.conf b/health/health.d/memcached.conf index f4b734c38..2a2fe4b82 100644 --- a/health/health.d/memcached.conf +++ b/health/health.d/memcached.conf @@ -1,28 +1,11 @@ -# make sure memcached is running - - template: memcached_last_collected_secs - on: memcached.cache - class: KV Storage -component: Memcached - type: Latency - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: dba - - # detect if memcached cache is full template: memcached_cache_memory_usage on: memcached.cache - class: KV Storage + class: Utilization + type: KV Storage component: Memcached - type: Utilization calc: $used * 100 / ($used + $available) units: % every: 10s @@ -37,9 +20,9 @@ component: Memcached template: memcached_cache_fill_rate on: memcached.cache - class: KV Storage + class: Utilization + type: KV Storage component: Memcached - type: Utilization lookup: min -10m at -50m unaligned of available calc: ($this - $available) / (($now - $after) / 3600) units: KB/hour @@ -51,9 +34,9 @@ component: Memcached template: memcached_out_of_cache_space_time on: memcached.cache - class: KV Storage + class: Utilization + type: KV Storage component: Memcached - type: Utilization calc: ($memcached_cache_fill_rate > 0) ? ($available / $memcached_cache_fill_rate) : (inf) units: hours every: 10s diff --git a/health/health.d/memory.conf b/health/health.d/memory.conf index ab651315f..010cbbd7b 100644 --- a/health/health.d/memory.conf +++ b/health/health.d/memory.conf @@ -3,9 +3,9 @@ alarm: 1hour_ecc_memory_correctable on: mem.ecc_ce - class: System + class: Errors + type: System component: Memory - type: Errors os: linux hosts: * lookup: sum -10m unaligned @@ -18,9 +18,9 @@ component: Memory alarm: 1hour_ecc_memory_uncorrectable on: mem.ecc_ue - class: System + class: Errors + type: System component: Memory - type: Errors os: linux hosts: * lookup: sum -10m unaligned @@ -33,9 +33,9 @@ component: Memory alarm: 1hour_memory_hw_corrupted on: mem.hwcorrupt - class: System + class: Errors + type: System component: Memory - type: Errors os: linux hosts: * calc: $HardwareCorrupted diff --git a/health/health.d/mongodb.conf b/health/health.d/mongodb.conf deleted file mode 100644 index 8c9bdeb6f..000000000 --- a/health/health.d/mongodb.conf +++ /dev/null @@ -1,16 +0,0 @@ - -# make sure mongodb is running - - template: mongodb_last_collected_secs - on: mongodb.read_operations - class: Database -component: MongoDB - type: Latency - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: dba diff --git a/health/health.d/mysql.conf b/health/health.d/mysql.conf index 91860c4a7..34452d983 100644 --- a/health/health.d/mysql.conf +++ b/health/health.d/mysql.conf @@ -1,29 +1,11 @@ -# make sure mysql is running - - template: mysql_last_collected_secs - on: mysql.queries - class: Database -component: MySQL - type: Latency - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: dba - - -# ----------------------------------------------------------------------------- # slow queries template: mysql_10s_slow_queries on: mysql.queries - class: Database + class: Latency + type: Database component: MySQL - type: Latency lookup: sum -10s of slow_queries units: slow queries every: 10s @@ -39,9 +21,9 @@ component: MySQL template: mysql_10s_table_locks_immediate on: mysql.table_locks - class: Database + class: Utilization + type: Database component: MySQL - type: Utilization lookup: sum -10s absolute of immediate units: immediate locks every: 10s @@ -50,9 +32,9 @@ component: MySQL template: mysql_10s_table_locks_waited on: mysql.table_locks - class: Database + class: Latency + type: Database component: MySQL - type: Latency lookup: sum -10s absolute of waited units: waited locks every: 10s @@ -61,9 +43,9 @@ component: MySQL template: mysql_10s_waited_locks_ratio on: mysql.table_locks - class: Database + class: Latency + type: Database component: MySQL - type: Latency calc: ( ($mysql_10s_table_locks_waited + $mysql_10s_table_locks_immediate) > 0 ) ? (($mysql_10s_table_locks_waited * 100) / ($mysql_10s_table_locks_waited + $mysql_10s_table_locks_immediate)) : 0 units: % every: 10s @@ -79,9 +61,9 @@ component: MySQL template: mysql_connections on: mysql.connections_active - class: Database + class: Utilization + type: Database component: MySQL - type: Utilization calc: $active * 100 / $limit units: % every: 10s @@ -97,9 +79,9 @@ component: MySQL template: mysql_replication on: mysql.slave_status - class: Database + class: Errors + type: Database component: MySQL - type: Errors calc: ($sql_running <= 0 OR $io_running <= 0)?0:1 units: ok/failed every: 10s @@ -110,9 +92,9 @@ component: MySQL template: mysql_replication_lag on: mysql.slave_behind - class: Database + class: Latency + type: Database component: MySQL - type: Errors calc: $seconds units: seconds every: 10s @@ -129,9 +111,9 @@ component: MySQL template: mysql_galera_cluster_size_max_2m on: mysql.galera_cluster_size - class: Database + class: Utilization + type: Database component: MySQL - type: Utilization lookup: max -2m absolute units: nodes every: 10s @@ -140,9 +122,9 @@ component: MySQL template: mysql_galera_cluster_size on: mysql.galera_cluster_size - class: Database + class: Utilization + type: Database component: MySQL - type: Utilization calc: $nodes units: nodes every: 10s @@ -156,9 +138,9 @@ component: MySQL template: mysql_galera_cluster_state on: mysql.galera_cluster_state - class: Database + class: Errors + type: Database component: MySQL - type: Errors calc: $state every: 10s warn: $this == 2 OR $this == 3 @@ -173,9 +155,9 @@ component: MySQL template: mysql_galera_cluster_status on: mysql.galera_cluster_status - class: Database + class: Errors + type: Database component: MySQL - type: Errors calc: $wsrep_cluster_status every: 10s crit: $mysql_galera_cluster_state != nan AND $this != 0 diff --git a/health/health.d/named.conf b/health/health.d/named.conf deleted file mode 100644 index 90266df16..000000000 --- a/health/health.d/named.conf +++ /dev/null @@ -1,17 +0,0 @@ - -# make sure named is running - - template: named_last_collected_secs - on: named.global_queries - class: DNS -component: BIND - type: Latency - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: domainadmin - diff --git a/health/health.d/net.conf b/health/health.d/net.conf index 04219e163..028ca7b81 100644 --- a/health/health.d/net.conf +++ b/health/health.d/net.conf @@ -6,9 +6,9 @@ template: interface_speed on: net.net - class: System + class: Latency + type: System component: Network - type: Latency os: * hosts: * families: * @@ -19,9 +19,9 @@ component: Network template: 1m_received_traffic_overflow on: net.net - class: System + class: Workload + type: System component: Network - type: Workload os: linux hosts: * families: * @@ -36,9 +36,9 @@ component: Network template: 1m_sent_traffic_overflow on: net.net - class: System + class: Workload + type: System component: Network - type: Workload os: linux hosts: * families: * @@ -63,9 +63,9 @@ component: Network template: inbound_packets_dropped on: net.drops - class: System + class: Errors + type: System component: Network - type: Errors os: linux hosts: * families: !net* * @@ -76,9 +76,9 @@ component: Network template: outbound_packets_dropped on: net.drops - class: System + class: Errors + type: System component: Network - type: Errors os: linux hosts: * families: !net* * @@ -89,14 +89,14 @@ component: Network template: inbound_packets_dropped_ratio on: net.packets - class: System + class: Errors + type: System component: Network - type: Errors os: linux hosts: * families: !net* !wl* * lookup: sum -10m unaligned absolute of received - calc: (($inbound_packets_dropped != nan AND $this > 1000) ? ($inbound_packets_dropped * 100 / $this) : (0)) + calc: (($inbound_packets_dropped != nan AND $this > 10000) ? ($inbound_packets_dropped * 100 / $this) : (0)) units: % every: 1m warn: $this >= 2 @@ -106,9 +106,9 @@ component: Network template: outbound_packets_dropped_ratio on: net.packets - class: System + class: Errors + type: System component: Network - type: Errors os: linux hosts: * families: !net* !wl* * @@ -123,14 +123,14 @@ component: Network template: wifi_inbound_packets_dropped_ratio on: net.packets - class: System + class: Errors + type: System component: Network - type: Errors os: linux hosts: * families: wl* lookup: sum -10m unaligned absolute of received - calc: (($inbound_packets_dropped != nan AND $this > 1000) ? ($inbound_packets_dropped * 100 / $this) : (0)) + calc: (($inbound_packets_dropped != nan AND $this > 10000) ? ($inbound_packets_dropped * 100 / $this) : (0)) units: % every: 1m warn: $this >= 10 @@ -140,9 +140,9 @@ component: Network template: wifi_outbound_packets_dropped_ratio on: net.packets - class: System + class: Errors + type: System component: Network - type: Errors os: linux hosts: * families: wl* @@ -160,9 +160,9 @@ component: Network template: interface_inbound_errors on: net.errors - class: System + class: Errors + type: System component: Network - type: Errors os: freebsd hosts: * families: * @@ -176,9 +176,9 @@ component: Network template: interface_outbound_errors on: net.errors - class: System + class: Errors + type: System component: Network - type: Errors os: freebsd hosts: * families: * @@ -200,9 +200,9 @@ component: Network template: 10min_fifo_errors on: net.fifo - class: System + class: Errors + type: System component: Network - type: Errors os: linux hosts: * families: * @@ -225,9 +225,9 @@ component: Network template: 1m_received_packets_rate on: net.packets - class: System + class: Workload + type: System component: Network - type: Workload os: linux freebsd hosts: * families: * @@ -238,9 +238,9 @@ component: Network template: 10s_received_packets_storm on: net.packets - class: System + class: Workload + type: System component: Network - type: Workload os: linux freebsd hosts: * families: * diff --git a/health/health.d/netfilter.conf b/health/health.d/netfilter.conf index 35c89caf7..7de383fa2 100644 --- a/health/health.d/netfilter.conf +++ b/health/health.d/netfilter.conf @@ -3,9 +3,9 @@ alarm: netfilter_conntrack_full on: netfilter.conntrack_sockets - class: System + class: Workload + type: System component: Network - type: Workload os: linux hosts: * lookup: max -10s unaligned of connections diff --git a/health/health.d/nginx.conf b/health/health.d/nginx.conf deleted file mode 100644 index 30c738f47..000000000 --- a/health/health.d/nginx.conf +++ /dev/null @@ -1,17 +0,0 @@ - -# make sure nginx is running - - template: nginx_last_collected_secs - on: nginx.requests - class: Web Server -component: NGINX - type: Latency - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: webmaster - diff --git a/health/health.d/nginx_plus.conf b/health/health.d/nginx_plus.conf deleted file mode 100644 index 5849a9e7e..000000000 --- a/health/health.d/nginx_plus.conf +++ /dev/null @@ -1,17 +0,0 @@ - -# make sure nginx_plus is running - - template: nginx_plus_last_collected_secs - on: nginx_plus.requests_total - class: Web Server -component: NGINX Plus - type: Latency - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: webmaster - diff --git a/health/health.d/phpfpm.conf b/health/health.d/phpfpm.conf deleted file mode 100644 index fc073a944..000000000 --- a/health/health.d/phpfpm.conf +++ /dev/null @@ -1,17 +0,0 @@ - -# make sure phpfpm is running - - template: phpfpm_last_collected_secs - on: phpfpm.requests - class: Web Server -component: PHP-FPM - type: Latency - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: webmaster - diff --git a/health/health.d/pihole.conf b/health/health.d/pihole.conf index 72622caed..2e5c1cbfd 100644 --- a/health/health.d/pihole.conf +++ b/health/health.d/pihole.conf @@ -1,45 +1,12 @@ -# Make sure Pi-hole is responding. - - template: pihole_last_collected_secs - on: pihole.dns_queries_total - class: Ad Filtering -component: Pi-hole - type: Latency - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: webmaster - -# Blocked DNS queries. - - template: pihole_blocked_queries - on: pihole.dns_queries_percentage - class: Ad Filtering -component: Pi-hole - type: Errors - every: 10s - units: % - calc: $blocked - warn: $this > ( ($status >= $WARNING ) ? ( 45 ) : ( 55 ) ) - crit: $this > ( ($status == $CRITICAL) ? ( 55 ) : ( 75 ) ) - delay: up 2m down 5m - info: percentage of blocked dns queries over the last 24 hour - to: sysadmin - - # Blocklist last update time. # Default update interval is a week. template: pihole_blocklist_last_update on: pihole.blocklist_last_update - class: Ad Filtering + class: Errors + type: Ad Filtering component: Pi-hole - type: Errors every: 10s units: seconds calc: $ago @@ -52,15 +19,15 @@ component: Pi-hole template: pihole_blocklist_gravity_file on: pihole.blocklist_last_update - class: Ad Filtering + class: Errors + type: Ad Filtering component: Pi-hole - type: Errors every: 10s units: boolean calc: $file_exists crit: $this != 1 delay: up 2m down 5m - info: gravity.list (blocklist) file existence state (0: exists, 1: not-exists) + info: gravity.list (blocklist) file existence state (0: not-exists, 1: exists) to: sysadmin # Pi-hole's ability to block unwanted domains. @@ -68,13 +35,13 @@ component: Pi-hole template: pihole_status on: pihole.unwanted_domains_blocking_status - class: Ad Filtering + class: Errors + type: Ad Filtering component: Pi-hole - type: Errors every: 10s units: boolean calc: $enabled warn: $this != 1 delay: up 2m down 5m - info: unwanted domains blocking status (0: enabled, 1: disabled) + info: unwanted domains blocking status (0: disabled, 1: enabled) to: sysadmin diff --git a/health/health.d/portcheck.conf b/health/health.d/portcheck.conf index b977dbb31..8cbd7729c 100644 --- a/health/health.d/portcheck.conf +++ b/health/health.d/portcheck.conf @@ -1,25 +1,11 @@ - template: portcheck_last_collected_secs - families: * - on: portcheck.status - class: Other -component: TCP endpoint - type: Latency - calc: $now - $last_collected_t - every: 10s - units: seconds ago - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: sysadmin # This is a fast-reacting no-notification alarm ideal for custom dashboards or badges template: portcheck_service_reachable families: * on: portcheck.status - class: Other + class: Workload + type: Other component: TCP endpoint - type: Workload lookup: average -1m unaligned percentage of success calc: ($this < 75) ? (0) : ($this) every: 5s @@ -30,9 +16,9 @@ component: TCP endpoint template: portcheck_connection_timeouts families: * on: portcheck.status - class: Other + class: Errors + type: Other component: TCP endpoint - type: Errors lookup: average -5m unaligned percentage of timeout every: 10s units: % @@ -45,9 +31,9 @@ component: TCP endpoint template: portcheck_connection_fails families: * on: portcheck.status - class: Other + class: Errors + type: Other component: TCP endpoint - type: Errors lookup: average -5m unaligned percentage of no_connection,failed every: 10s units: % diff --git a/health/health.d/postgres.conf b/health/health.d/postgres.conf deleted file mode 100644 index f908a802a..000000000 --- a/health/health.d/postgres.conf +++ /dev/null @@ -1,16 +0,0 @@ - -# make sure postgres is running - - template: postgres_last_collected_secs - on: postgres.db_stat_transactions - class: Database -component: PostgreSQL - type: Latency - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: dba diff --git a/health/health.d/processes.conf b/health/health.d/processes.conf index b44a24c0b..2929ee3d4 100644 --- a/health/health.d/processes.conf +++ b/health/health.d/processes.conf @@ -2,9 +2,9 @@ alarm: active_processes on: system.active_processes - class: System + class: Workload + type: System component: Processes - type: Workload hosts: * calc: $active * 100 / $pidmax units: % diff --git a/health/health.d/pulsar.conf b/health/health.d/pulsar.conf deleted file mode 100644 index 9903d4e38..000000000 --- a/health/health.d/pulsar.conf +++ /dev/null @@ -1,16 +0,0 @@ - -# Availability - - template: pulsar_last_collected_secs - on: pulsar.broker_components - class: Messaging -component: Pulsar - type: Latency - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: sysadmin diff --git a/health/health.d/python.d.plugin.conf b/health/health.d/python.d.plugin.conf new file mode 100644 index 000000000..f3abc588f --- /dev/null +++ b/health/health.d/python.d.plugin.conf @@ -0,0 +1,17 @@ + +# make sure python.d.plugin data collection job is running + + template: python.d_job_last_collected_secs + on: netdata.pythond_runtime + class: Error + type: Netdata +component: python.d.plugin + module: * + calc: $now - $last_collected_t + units: seconds ago + every: 10s + warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) + crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) + delay: down 5m multiplier 1.5 max 1h + info: number of seconds since the last successful data collection + to: webmaster diff --git a/health/health.d/ram.conf b/health/health.d/ram.conf index 0e3cc29fa..6e6e3b400 100644 --- a/health/health.d/ram.conf +++ b/health/health.d/ram.conf @@ -3,9 +3,9 @@ alarm: used_ram_to_ignore on: system.ram - class: System + class: Utilization + type: System component: Memory - type: Utilization os: linux freebsd hosts: * calc: ($zfs.arc_size.arcsz = nan)?(0):($zfs.arc_size.arcsz - $zfs.arc_size.min) @@ -15,13 +15,12 @@ component: Memory alarm: ram_in_use on: system.ram - class: System + class: Utilization + type: System component: Memory - type: Utilization os: linux hosts: * -# calc: $used * 100 / ($used + $cached + $free) - calc: ($used - $used_ram_to_ignore) * 100 / ($used + $cached + $free) + calc: ($used - $used_ram_to_ignore) * 100 / ($used + $cached + $free + $buffers) units: % every: 10s warn: $this > (($status >= $WARNING) ? (80) : (90)) @@ -32,12 +31,12 @@ component: Memory alarm: ram_available on: mem.available - class: System + class: Utilization + type: System component: Memory - type: Utilization os: linux hosts: * - calc: ($avail + $system.ram.used_ram_to_ignore) * 100 / ($system.ram.used + $system.ram.cached + $system.ram.free + $system.ram.buffers) + calc: $avail * 100 / ($system.ram.used + $system.ram.cached + $system.ram.free + $system.ram.buffers) units: % every: 10s warn: $this < (($status >= $WARNING) ? (15) : (10)) @@ -46,24 +45,25 @@ component: Memory info: percentage of estimated amount of RAM available for userspace processes, without causing swapping to: sysadmin - alarm: oom_kill - on: mem.oom_kill - os: linux - hosts: * - lookup: sum -1m unaligned - units: kills - every: 10s - warn: $this > 0 - delay: down 5m - info: number of out of memory kills in the last minute - to: sysadmin + alarm: oom_kill + on: mem.oom_kill + os: linux + hosts: * + lookup: sum -30m unaligned + units: kills + every: 5m + warn: $this > 0 + delay: down 10m +host labels: _is_k8s_node = false + info: number of out of memory kills in the last 30 minutes + to: sysadmin ## FreeBSD alarm: ram_in_use on: system.ram - class: System + class: Utilization + type: System component: Memory - type: Utilization os: freebsd hosts: * calc: ($active + $wired + $laundry + $buffers - $used_ram_to_ignore) * 100 / ($active + $wired + $laundry + $buffers - $used_ram_to_ignore + $cache + $free + $inactive) @@ -77,9 +77,9 @@ component: Memory alarm: ram_available on: system.ram - class: System + class: Utilization + type: System component: Memory - type: Utilization os: freebsd hosts: * calc: ($free + $inactive + $used_ram_to_ignore) * 100 / ($free + $active + $inactive + $wired + $cache + $laundry + $buffers) diff --git a/health/health.d/redis.conf b/health/health.d/redis.conf index e8b289942..dfb771e8c 100644 --- a/health/health.d/redis.conf +++ b/health/health.d/redis.conf @@ -1,26 +1,10 @@ -# make sure redis is running - - template: redis_last_collected_secs - on: redis.operations - class: KV Storage -component: Redis - type: Latency - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: dba - template: redis_bgsave_broken families: * on: redis.bgsave_health - class: KV Storage + class: Errors + type: KV Storage component: Redis - type: Errors every: 10s crit: $rdb_last_bgsave_status != 0 units: ok/failed @@ -31,9 +15,9 @@ component: Redis template: redis_bgsave_slow families: * on: redis.bgsave_now - class: KV Storage + class: Latency + type: KV Storage component: Redis - type: Latency every: 10s warn: $rdb_bgsave_in_progress > 600 crit: $rdb_bgsave_in_progress > 1200 diff --git a/health/health.d/retroshare.conf b/health/health.d/retroshare.conf index ca22e60de..14aa76b4c 100644 --- a/health/health.d/retroshare.conf +++ b/health/health.d/retroshare.conf @@ -1,26 +1,11 @@ -# make sure RetroShare is running - - template: retroshare_last_collected_secs - on: retroshare.peers - class: Data Sharing -component: Retroshare - type: Latency - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: sysadmin # make sure the DHT is fine when active template: retroshare_dht_working on: retroshare.dht - class: Data Sharing + class: Utilization + type: Data Sharing component: Retroshare - type: Utilization calc: $dht_size_all units: peers every: 1m diff --git a/health/health.d/riakkv.conf b/health/health.d/riakkv.conf index b2c0e8d9c..261fd48c6 100644 --- a/health/health.d/riakkv.conf +++ b/health/health.d/riakkv.conf @@ -1,24 +1,10 @@ -# Ensure that Riak is running. template: riak_last_collected_secs - template: riakkv_last_collected_secs - on: riak.kv.throughput - class: Database -component: Riak KV - type: Latency - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: dba # Warn if a list keys operation is running. template: riakkv_list_keys_active on: riak.core.fsm_active - class: Database + class: Utilization + type: Database component: Riak KV - type: Utilization calc: $list_fsm_active units: state machines every: 10s @@ -31,9 +17,9 @@ component: Riak KV # KV GET template: riakkv_1h_kv_get_mean_latency on: riak.kv.latency.get - class: Database + class: Latency + type: Database component: Riak KV - type: Latency calc: $node_get_fsm_time_mean lookup: average -1h unaligned of time every: 30s @@ -43,9 +29,9 @@ component: Riak KV template: riakkv_kv_get_slow on: riak.kv.latency.get - class: Database + class: Latency + type: Database component: Riak KV - type: Latency calc: $mean lookup: average -3m unaligned of time units: ms @@ -61,9 +47,9 @@ component: Riak KV # KV PUT template: riakkv_1h_kv_put_mean_latency on: riak.kv.latency.put - class: Database + class: Latency + type: Database component: Riak KV - type: Latency calc: $node_put_fsm_time_mean lookup: average -1h unaligned of time every: 30s @@ -73,9 +59,9 @@ component: Riak KV template: riakkv_kv_put_slow on: riak.kv.latency.put - class: Database + class: Latency + type: Database component: Riak KV - type: Latency calc: $mean lookup: average -3m unaligned of time units: ms @@ -95,9 +81,9 @@ component: Riak KV # On systems observed, this is < 2000, but may grow depending on load. template: riakkv_vm_high_process_count on: riak.vm - class: Database + class: Utilization + type: Database component: Riak KV - type: Utilization calc: $sys_process_count units: processes every: 10s diff --git a/health/health.d/scaleio.conf b/health/health.d/scaleio.conf index 3c0dc1168..ab110bf07 100644 --- a/health/health.d/scaleio.conf +++ b/health/health.d/scaleio.conf @@ -1,27 +1,11 @@ -# make sure scaleio is running - - template: scaleio_last_collected_secs - on: scaleio.system_capacity_total - class: Storage -component: ScaleIO - type: Latency - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: sysadmin - # make sure Storage Pool capacity utilization is under limit template: scaleio_storage_pool_capacity_utilization on: scaleio.storage_pool_capacity_utilization - class: Storage + class: Utilization + type: Storage component: ScaleIO - type: Utilization calc: $used units: % every: 10s @@ -36,9 +20,9 @@ component: ScaleIO template: scaleio_sdc_mdm_connection_state on: scaleio.sdc_mdm_connection_state - class: Storage + class: Utilization + type: Storage component: ScaleIO - type: Utilization calc: $connected every: 10s warn: $this != 1 diff --git a/health/health.d/softnet.conf b/health/health.d/softnet.conf index d8b01caff..345f87505 100644 --- a/health/health.d/softnet.conf +++ b/health/health.d/softnet.conf @@ -5,9 +5,9 @@ alarm: 1min_netdev_backlog_exceeded on: system.softnet_stat - class: System + class: Errors + type: System component: Network - type: Errors os: linux hosts: * lookup: average -1m unaligned absolute of dropped @@ -21,9 +21,9 @@ component: Network alarm: 1min_netdev_budget_ran_outs on: system.softnet_stat - class: System + class: Errors + type: System component: Network - type: Errors os: linux hosts: * lookup: average -1m unaligned absolute of squeezed @@ -38,9 +38,9 @@ component: Network alarm: 10min_netisr_backlog_exceeded on: system.softnet_stat - class: System + class: Errors + type: System component: Network - type: Errors os: freebsd hosts: * lookup: average -1m unaligned absolute of qdrops diff --git a/health/health.d/squid.conf b/health/health.d/squid.conf deleted file mode 100644 index 5c3d17629..000000000 --- a/health/health.d/squid.conf +++ /dev/null @@ -1,17 +0,0 @@ - -# make sure squid is running - - template: squid_last_collected_secs - on: squid.clients_requests - class: Web Proxy -component: Squid - type: Latency - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: proxyadmin - diff --git a/health/health.d/stiebeleltron.conf b/health/health.d/stiebeleltron.conf index f793b5ed1..493c8b73a 100644 --- a/health/health.d/stiebeleltron.conf +++ b/health/health.d/stiebeleltron.conf @@ -1,9 +1,9 @@ template: stiebeleltron_last_collected_secs families: * on: stiebeleltron.heating.hc1 - class: Other + class: Latency + type: Other component: Sensors - type: Latency calc: $now - $last_collected_t every: 10s units: seconds ago diff --git a/health/health.d/swap.conf b/health/health.d/swap.conf index 5b3f89a97..03c319320 100644 --- a/health/health.d/swap.conf +++ b/health/health.d/swap.conf @@ -3,9 +3,9 @@ alarm: 30min_ram_swapped_out on: system.swapio - class: System + class: Workload + type: System component: Memory - type: Workload os: linux freebsd hosts: * lookup: sum -30m unaligned absolute of out @@ -20,12 +20,12 @@ component: Memory alarm: used_swap on: system.swap - class: System + class: Utilization + type: System component: Memory - type: Utilization os: linux freebsd hosts: * - calc: $used * 100 / ( $used + $free ) + calc: ($used + $free) > 0 ? ($used * 100 / ($used + $free)) : 0 units: % every: 10s warn: $this > (($status >= $WARNING) ? (80) : (90)) diff --git a/health/health.d/systemdunits.conf b/health/health.d/systemdunits.conf index cc1a8698d..38213a8db 100644 --- a/health/health.d/systemdunits.conf +++ b/health/health.d/systemdunits.conf @@ -4,9 +4,9 @@ ## Service units template: systemd_service_units_state on: systemd.service_units_state - class: Linux + class: Errors + type: Linux component: Systemd units - type: Errors lookup: max -1s min2max units: ok/failed every: 10s @@ -18,9 +18,9 @@ component: Systemd units ## Socket units template: systemd_socket_units_state on: systemd.socket_unit_state - class: Linux + class: Errors + type: Linux component: Systemd units - type: Errors lookup: max -1s min2max units: ok/failed every: 10s @@ -32,9 +32,9 @@ component: Systemd units ## Target units template: systemd_target_units_state on: systemd.target_unit_state - class: Linux + class: Errors + type: Linux component: Systemd units - type: Errors lookup: max -1s min2max units: ok/failed every: 10s @@ -46,9 +46,9 @@ component: Systemd units ## Path units template: systemd_path_units_state on: systemd.path_unit_state - class: Linux + class: Errors + type: Linux component: Systemd units - type: Errors lookup: max -1s min2max units: ok/failed every: 10s @@ -60,9 +60,9 @@ component: Systemd units ## Device units template: systemd_device_units_state on: systemd.device_unit_state - class: Linux + class: Errors + type: Linux component: Systemd units - type: Errors lookup: max -1s min2max units: ok/failed every: 10s @@ -74,9 +74,9 @@ component: Systemd units ## Mount units template: systemd_mount_units_state on: systemd.mount_unit_state - class: Linux + class: Errors + type: Linux component: Systemd units - type: Errors lookup: max -1s min2max units: ok/failed every: 10s @@ -88,9 +88,9 @@ component: Systemd units ## Automount units template: systemd_automount_units_state on: systemd.automount_unit_state - class: Linux + class: Errors + type: Linux component: Systemd units - type: Errors lookup: max -1s min2max units: ok/failed every: 10s @@ -102,9 +102,9 @@ component: Systemd units ## Swap units template: systemd_swap_units_state on: systemd.swap_unit_state - class: Linux + class: Errors + type: Linux component: Systemd units - type: Errors lookup: max -1s min2max units: ok/failed every: 10s @@ -116,9 +116,9 @@ component: Systemd units ## Scope units template: systemd_scope_units_state on: systemd.scope_unit_state - class: Linux + class: Errors + type: Linux component: Systemd units - type: Errors lookup: max -1s min2max units: ok/failed every: 10s @@ -130,9 +130,9 @@ component: Systemd units ## Slice units template: systemd_slice_units_state on: systemd.slice_unit_state - class: Linux + class: Errors + type: Linux component: Systemd units - type: Errors lookup: max -1s min2max units: ok/failed every: 10s diff --git a/health/health.d/tcp_conn.conf b/health/health.d/tcp_conn.conf index f2c5e4e5d..67b3bee53 100644 --- a/health/health.d/tcp_conn.conf +++ b/health/health.d/tcp_conn.conf @@ -7,9 +7,9 @@ alarm: tcp_connections on: ipv4.tcpsock - class: System + class: Workload + type: System component: Network - type: Workload os: linux hosts: * calc: (${tcp_max_connections} > 0) ? ( ${connections} * 100 / ${tcp_max_connections} ) : 0 diff --git a/health/health.d/tcp_listen.conf b/health/health.d/tcp_listen.conf index 51a0e461c..d4bcfa248 100644 --- a/health/health.d/tcp_listen.conf +++ b/health/health.d/tcp_listen.conf @@ -20,9 +20,9 @@ alarm: 1m_tcp_accept_queue_overflows on: ip.tcp_accept_queue - class: System + class: Workload + type: System component: Network - type: Workload os: linux hosts: * lookup: average -60s unaligned absolute of ListenOverflows @@ -38,9 +38,9 @@ component: Network # CHECK: https://github.com/netdata/netdata/issues/3234#issuecomment-423935842 alarm: 1m_tcp_accept_queue_drops on: ip.tcp_accept_queue - class: System + class: Workload + type: System component: Network - type: Workload os: linux hosts: * lookup: average -60s unaligned absolute of ListenDrops @@ -63,9 +63,9 @@ component: Network alarm: 1m_tcp_syn_queue_drops on: ip.tcp_syn_queue - class: System + class: Workload + type: System component: Network - type: Workload os: linux hosts: * lookup: average -60s unaligned absolute of TCPReqQFullDrop @@ -80,9 +80,9 @@ component: Network alarm: 1m_tcp_syn_queue_cookies on: ip.tcp_syn_queue - class: System + class: Workload + type: System component: Network - type: Workload os: linux hosts: * lookup: average -60s unaligned absolute of TCPReqQFullDoCookies diff --git a/health/health.d/tcp_mem.conf b/health/health.d/tcp_mem.conf index 646e5c6da..318be20ac 100644 --- a/health/health.d/tcp_mem.conf +++ b/health/health.d/tcp_mem.conf @@ -8,9 +8,9 @@ alarm: tcp_memory on: ipv4.sockstat_tcp_mem - class: System + class: Utilization + type: System component: Network - type: Utilization os: linux hosts: * calc: ${mem} * 100 / ${tcp_mem_high} diff --git a/health/health.d/tcp_orphans.conf b/health/health.d/tcp_orphans.conf index 6e94d67d1..cbd628da5 100644 --- a/health/health.d/tcp_orphans.conf +++ b/health/health.d/tcp_orphans.conf @@ -9,9 +9,9 @@ alarm: tcp_orphans on: ipv4.sockstat_tcp_sockets - class: System + class: Errors + type: System component: Network - type: Errors os: linux hosts: * calc: ${orphan} * 100 / ${tcp_max_orphans} diff --git a/health/health.d/tcp_resets.conf b/health/health.d/tcp_resets.conf index 41355dad6..190271e47 100644 --- a/health/health.d/tcp_resets.conf +++ b/health/health.d/tcp_resets.conf @@ -6,9 +6,9 @@ alarm: 1m_ipv4_tcp_resets_sent on: ipv4.tcphandshake - class: System + class: Errors + type: System component: Network - type: Errors os: linux hosts: * lookup: average -1m at -10s unaligned absolute of OutRsts @@ -18,9 +18,9 @@ component: Network alarm: 10s_ipv4_tcp_resets_sent on: ipv4.tcphandshake - class: System + class: Errors + type: System component: Network - type: Errors os: linux hosts: * lookup: average -10s unaligned absolute of OutRsts @@ -40,9 +40,9 @@ component: Network alarm: 1m_ipv4_tcp_resets_received on: ipv4.tcphandshake - class: System + class: Errors + type: System component: Network - type: Errors os: linux freebsd hosts: * lookup: average -1m at -10s unaligned absolute of AttemptFails @@ -52,9 +52,9 @@ component: Network alarm: 10s_ipv4_tcp_resets_received on: ipv4.tcphandshake - class: System + class: Errors + type: System component: Network - type: Errors os: linux freebsd hosts: * lookup: average -10s unaligned absolute of AttemptFails diff --git a/health/health.d/timex.conf b/health/health.d/timex.conf new file mode 100644 index 000000000..ea90c4000 --- /dev/null +++ b/health/health.d/timex.conf @@ -0,0 +1,17 @@ + +# It can take several minutes before ntpd selects a server to synchronize with; +# try checking after 17 minutes (1024 seconds). + + alarm: system_clock_sync_state + on: system.clock_sync_state + os: linux + class: Error + type: System +component: Clock + calc: $state + units: synchronization state + every: 10s + warn: $system.uptime.uptime > 17 * 60 AND $this == 0 + delay: down 5m + info: the system time is not synchronized to a reliable server + to: silent diff --git a/health/health.d/udp_errors.conf b/health/health.d/udp_errors.conf index 342a1aedd..64f47dfa7 100644 --- a/health/health.d/udp_errors.conf +++ b/health/health.d/udp_errors.conf @@ -6,9 +6,9 @@ alarm: 1m_ipv4_udp_receive_buffer_errors on: ipv4.udperrors - class: System + class: Errors + type: System component: Network - type: Errors os: linux freebsd hosts: * lookup: average -1m unaligned absolute of RcvbufErrors @@ -24,9 +24,9 @@ component: Network alarm: 1m_ipv4_udp_send_buffer_errors on: ipv4.udperrors - class: System + class: Errors + type: System component: Network - type: Errors os: linux hosts: * lookup: average -1m unaligned absolute of SndbufErrors diff --git a/health/health.d/unbound.conf b/health/health.d/unbound.conf index 1df15474f..4e8d164d2 100644 --- a/health/health.d/unbound.conf +++ b/health/health.d/unbound.conf @@ -1,27 +1,11 @@ -# make sure unbound is running - - template: unbound_last_collected_secs - on: unbound.queries - class: DNS -component: Unbound - type: Latency - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: sysadmin - # make sure there is no overwritten/dropped queries in the request-list template: unbound_request_list_overwritten on: unbound.request_list_jostle_list - class: DNS + class: Errors + type: DNS component: Unbound - type: Errors lookup: average -60s unaligned absolute match-names of overwritten units: queries every: 10s @@ -32,9 +16,9 @@ component: Unbound template: unbound_request_list_dropped on: unbound.request_list_jostle_list - class: DNS + class: Errors + type: DNS component: Unbound - type: Errors lookup: average -60s unaligned absolute match-names of dropped units: queries every: 10s diff --git a/health/health.d/varnish.conf b/health/health.d/varnish.conf deleted file mode 100644 index 7f3bd6c82..000000000 --- a/health/health.d/varnish.conf +++ /dev/null @@ -1,12 +0,0 @@ - alarm: varnish_last_collected - on: varnish.uptime - class: Web Proxy -component: Varnish - type: Latency - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - info: number of seconds since the last successful data collection - to: sysadmin diff --git a/health/health.d/vcsa.conf b/health/health.d/vcsa.conf index 8538e488c..a9cc7ceef 100644 --- a/health/health.d/vcsa.conf +++ b/health/health.d/vcsa.conf @@ -1,20 +1,4 @@ -# make sure vcsa is running and responding - - template: vcsa_last_collected_secs - on: vcsa.system_health - class: Virtual Machine -component: VMware vCenter - type: Latency - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: sysadmin - # Overall system health: # - 0: all components are healthy. # - 1: one or more components might become overloaded soon. @@ -24,9 +8,9 @@ component: VMware vCenter template: vcsa_system_health on: vcsa.system_health - class: Virtual Machine + class: Errors + type: Virtual Machine component: VMware vCenter - type: Errors lookup: max -10s unaligned of system units: status every: 10s @@ -46,9 +30,9 @@ component: VMware vCenter template: vcsa_swap_health on: vcsa.components_health - class: Virtual Machine + class: Errors + type: Virtual Machine component: VMware vCenter - type: Errors lookup: max -10s unaligned of swap units: status every: 10s @@ -61,9 +45,9 @@ component: VMware vCenter template: vcsa_storage_health on: vcsa.components_health - class: Virtual Machine + class: Errors + type: Virtual Machine component: VMware vCenter - type: Errors lookup: max -10s unaligned of storage units: status every: 10s @@ -76,9 +60,9 @@ component: VMware vCenter template: vcsa_mem_health on: vcsa.components_health - class: Virtual Machine + class: Errors + type: Virtual Machine component: VMware vCenter - type: Errors lookup: max -10s unaligned of mem units: status every: 10s @@ -91,9 +75,9 @@ component: VMware vCenter template: vcsa_load_health on: vcsa.components_health - class: Virtual Machine + class: Utilization + type: Virtual Machine component: VMware vCenter - type: Utilization lookup: max -10s unaligned of load units: status every: 10s @@ -106,9 +90,9 @@ component: VMware vCenter template: vcsa_database_storage_health on: vcsa.components_health - class: Virtual Machine + class: Errors + type: Virtual Machine component: VMware vCenter - type: Errors lookup: max -10s unaligned of database_storage units: status every: 10s @@ -121,9 +105,9 @@ component: VMware vCenter template: vcsa_applmgmt_health on: vcsa.components_health - class: Virtual Machine + class: Errors + type: Virtual Machine component: VMware vCenter - type: Errors lookup: max -10s unaligned of applmgmt units: status every: 10s @@ -143,9 +127,9 @@ component: VMware vCenter template: vcsa_software_updates_health on: vcsa.software_updates_health - class: Virtual Machine + class: Errors + type: Virtual Machine component: VMware vCenter - type: Errors lookup: max -10s unaligned of software_packages units: status every: 10s diff --git a/health/health.d/vernemq.conf b/health/health.d/vernemq.conf index 737147f38..cfbe2a524 100644 --- a/health/health.d/vernemq.conf +++ b/health/health.d/vernemq.conf @@ -1,27 +1,11 @@ -# Availability - - template: vernemq_last_collected_secs - on: vernemq.node_uptime - class: Messaging -component: VerneMQ - type: Latency - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: sysadmin - # Socket errors template: vernemq_socket_errors on: vernemq.socket_errors - class: Messaging + class: Errors + type: Messaging component: VerneMQ - type: Errors lookup: sum -1m unaligned absolute of socket_error units: errors every: 1m @@ -34,9 +18,9 @@ component: VerneMQ template: vernemq_queue_message_drop on: vernemq.queue_undelivered_messages - class: Messaging + class: Errors + type: Messaging component: VerneMQ - type: Errors lookup: average -1m unaligned absolute of queue_message_drop units: dropped messages every: 1m @@ -47,9 +31,9 @@ component: VerneMQ template: vernemq_queue_message_expired on: vernemq.queue_undelivered_messages - class: Messaging + class: Latency + type: Messaging component: VerneMQ - type: Latency lookup: average -1m unaligned absolute of queue_message_expired units: expired messages every: 1m @@ -60,9 +44,9 @@ component: VerneMQ template: vernemq_queue_message_unhandled on: vernemq.queue_undelivered_messages - class: Messaging + class: Latency + type: Messaging component: VerneMQ - type: Latency lookup: average -1m unaligned absolute of queue_message_unhandled units: unhandled messages every: 1m @@ -75,9 +59,9 @@ component: VerneMQ template: vernemq_average_scheduler_utilization on: vernemq.average_scheduler_utilization - class: Messaging + class: Utilization + type: Messaging component: VerneMQ - type: Utilization lookup: average -10m unaligned units: % every: 1m @@ -91,9 +75,9 @@ component: VerneMQ template: vernemq_cluster_dropped on: vernemq.cluster_dropped - class: Messaging + class: Errors + type: Messaging component: VerneMQ - type: Errors lookup: sum -1m unaligned units: KiB every: 1m @@ -104,9 +88,9 @@ component: VerneMQ template: vernemq_netsplits on: vernemq.netsplits - class: Messaging + class: Workload + type: Messaging component: VerneMQ - type: Workload lookup: sum -1m unaligned absolute of netsplit_detected units: netsplits every: 10s @@ -119,9 +103,9 @@ component: VerneMQ template: vernemq_mqtt_connack_sent_reason_unsuccessful on: vernemq.mqtt_connack_sent_reason - class: Messaging + class: Errors + type: Messaging component: VerneMQ - type: Errors lookup: average -1m unaligned absolute match-names of !success,* units: packets every: 1m @@ -134,9 +118,9 @@ component: VerneMQ template: vernemq_mqtt_disconnect_received_reason_not_normal on: vernemq.mqtt_disconnect_received_reason - class: Messaging + class: Workload + type: Messaging component: VerneMQ - type: Workload lookup: average -1m unaligned absolute match-names of !normal_disconnect,* units: packets every: 1m @@ -147,9 +131,9 @@ component: VerneMQ template: vernemq_mqtt_disconnect_sent_reason_not_normal on: vernemq.mqtt_disconnect_sent_reason - class: Messaging + class: Errors + type: Messaging component: VerneMQ - type: Errors lookup: average -1m unaligned absolute match-names of !normal_disconnect,* units: packets every: 1m @@ -162,9 +146,9 @@ component: VerneMQ template: vernemq_mqtt_subscribe_error on: vernemq.mqtt_subscribe_error - class: Messaging + class: Errors + type: Messaging component: VerneMQ - type: Errors lookup: average -1m unaligned absolute units: failed ops every: 1m @@ -175,9 +159,9 @@ component: VerneMQ template: vernemq_mqtt_subscribe_auth_error on: vernemq.mqtt_subscribe_auth_error - class: Messaging + class: Workload + type: Messaging component: VerneMQ - type: Workload lookup: average -1m unaligned absolute units: attempts every: 1m @@ -190,9 +174,9 @@ component: VerneMQ template: vernemq_mqtt_unsubscribe_error on: vernemq.mqtt_unsubscribe_error - class: Messaging + class: Errors + type: Messaging component: VerneMQ - type: Errors lookup: average -1m unaligned absolute units: failed ops every: 1m @@ -205,9 +189,9 @@ component: VerneMQ template: vernemq_mqtt_publish_errors on: vernemq.mqtt_publish_errors - class: Messaging + class: Errors + type: Messaging component: VerneMQ - type: Errors lookup: average -1m unaligned absolute units: failed ops every: 1m @@ -218,9 +202,9 @@ component: VerneMQ template: vernemq_mqtt_publish_auth_errors on: vernemq.mqtt_publish_auth_errors - class: Messaging + class: Workload + type: Messaging component: VerneMQ - type: Workload lookup: average -1m unaligned absolute units: attempts every: 1m @@ -233,9 +217,9 @@ component: VerneMQ template: vernemq_mqtt_puback_received_reason_unsuccessful on: vernemq.mqtt_puback_received_reason - class: Messaging + class: Errors + type: Messaging component: VerneMQ - type: Errors lookup: average -1m unaligned absolute match-names of !success,* units: packets every: 1m @@ -246,9 +230,9 @@ component: VerneMQ template: vernemq_mqtt_puback_sent_reason_unsuccessful on: vernemq.mqtt_puback_sent_reason - class: Messaging + class: Errors + type: Messaging component: VerneMQ - type: Errors lookup: average -1m unaligned absolute match-names of !success,* units: packets every: 1m @@ -259,9 +243,9 @@ component: VerneMQ template: vernemq_mqtt_puback_unexpected on: vernemq.mqtt_puback_invalid_error - class: Messaging + class: Workload + type: Messaging component: VerneMQ - type: Workload lookup: average -1m unaligned absolute units: messages every: 1m @@ -274,9 +258,9 @@ component: VerneMQ template: vernemq_mqtt_pubrec_received_reason_unsuccessful on: vernemq.mqtt_pubrec_received_reason - class: Messaging + class: Errors + type: Messaging component: VerneMQ - type: Errors lookup: average -1m unaligned absolute match-names of !success,* units: packets every: 1m @@ -287,9 +271,9 @@ component: VerneMQ template: vernemq_mqtt_pubrec_sent_reason_unsuccessful on: vernemq.mqtt_pubrec_sent_reason - class: Messaging + class: Errors + type: Messaging component: VerneMQ - type: Errors lookup: average -1m unaligned absolute match-names of !success,* units: packets every: 1m @@ -300,9 +284,9 @@ component: VerneMQ template: vernemq_mqtt_pubrec_invalid_error on: vernemq.mqtt_pubrec_invalid_error - class: Messaging + class: Workload + type: Messaging component: VerneMQ - type: Workload lookup: average -1m unaligned absolute units: messages every: 1m @@ -315,9 +299,9 @@ component: VerneMQ template: vernemq_mqtt_pubrel_received_reason_unsuccessful on: vernemq.mqtt_pubrel_received_reason - class: Messaging + class: Errors + type: Messaging component: VerneMQ - type: Errors lookup: average -1m unaligned absolute match-names of !success,* units: packets every: 1m @@ -328,9 +312,9 @@ component: VerneMQ template: vernemq_mqtt_pubrel_sent_reason_unsuccessful on: vernemq.mqtt_pubrel_sent_reason - class: Messaging + class: Errors + type: Messaging component: VerneMQ - type: Errors lookup: average -1m unaligned absolute match-names of !success,* units: packets every: 1m @@ -343,9 +327,9 @@ component: VerneMQ template: vernemq_mqtt_pubcomp_received_reason_unsuccessful on: vernemq.mqtt_pubcomp_received_reason - class: Messaging + class: Errors + type: Messaging component: VerneMQ - type: Errors lookup: average -1m unaligned absolute match-names of !success,* units: packets every: 1m @@ -356,9 +340,9 @@ component: VerneMQ template: vernemq_mqtt_pubcomp_sent_reason_unsuccessful on: vernemq.mqtt_pubcomp_sent_reason - class: Messaging + class: Errors + type: Messaging component: VerneMQ - type: Errors lookup: average -1m unaligned absolute match-names of !success,* units: packets every: 1m @@ -369,9 +353,9 @@ component: VerneMQ template: vernemq_mqtt_pubcomp_unexpected on: vernemq.mqtt_pubcomp_invalid_error - class: Messaging + class: Workload + type: Messaging component: VerneMQ - type: Workload lookup: average -1m unaligned absolute units: messages every: 1m diff --git a/health/health.d/vsphere.conf b/health/health.d/vsphere.conf index aee7c5cd4..d8fc899b9 100644 --- a/health/health.d/vsphere.conf +++ b/health/health.d/vsphere.conf @@ -6,9 +6,9 @@ template: vsphere_vm_mem_usage on: vsphere.vm_mem_usage_percentage - class: Virtual Machine + class: Utilization + type: Virtual Machine component: Memory - type: Utilization hosts: * calc: $used units: % @@ -23,9 +23,9 @@ component: Memory template: vsphere_host_mem_usage on: vsphere.host_mem_usage_percentage - class: Virtual Machine + class: Utilization + type: Virtual Machine component: Memory - type: Utilization hosts: * calc: $used units: % @@ -39,9 +39,9 @@ component: Memory template: vsphere_inbound_packets_errors on: vsphere.net_errors_total - class: Virtual Machine + class: Errors + type: Virtual Machine component: Network - type: Errors hosts: * families: * lookup: sum -10m unaligned absolute match-names of rx @@ -51,9 +51,9 @@ component: Network template: vsphere_outbound_packets_errors on: vsphere.net_errors_total - class: Virtual Machine + class: Errors + type: Virtual Machine component: Network - type: Errors hosts: * families: * lookup: sum -10m unaligned absolute match-names of tx @@ -65,9 +65,9 @@ component: Network template: vsphere_inbound_packets_errors_ratio on: vsphere.net_packets_total - class: Virtual Machine + class: Errors + type: Virtual Machine component: Network - type: Errors hosts: * families: * lookup: sum -10m unaligned absolute match-names of rx @@ -81,9 +81,9 @@ component: Network template: vsphere_outbound_packets_errors_ratio on: vsphere.net_packets_total - class: Virtual Machine + class: Errors + type: Virtual Machine component: Network - type: Errors hosts: * families: * lookup: sum -10m unaligned absolute match-names of tx @@ -100,9 +100,9 @@ component: Network template: vsphere_cpu_usage on: vsphere.cpu_usage_total - class: Virtual Machine + class: Utilization + type: Virtual Machine component: CPU - type: Utilization hosts: * lookup: average -10m unaligned match-names of used units: % @@ -117,9 +117,9 @@ component: CPU template: vsphere_inbound_packets_dropped on: vsphere.net_drops_total - class: Virtual Machine + class: Errors + type: Virtual Machine component: Network - type: Errors hosts: * families: * lookup: sum -10m unaligned absolute match-names of rx @@ -129,9 +129,9 @@ component: Network template: vsphere_outbound_packets_dropped on: vsphere.net_drops_total - class: Virtual Machine + class: Errors + type: Virtual Machine component: Network - type: Errors hosts: * families: * lookup: sum -10m unaligned absolute match-names of tx @@ -143,9 +143,9 @@ component: Network template: vsphere_inbound_packets_dropped_ratio on: vsphere.net_packets_total - class: Virtual Machine + class: Errors + type: Virtual Machine component: Network - type: Errors hosts: * families: * lookup: sum -10m unaligned absolute match-names of rx @@ -159,9 +159,9 @@ component: Network template: vsphere_outbound_packets_dropped_ratio on: vsphere.net_packets_total - class: Virtual Machine + class: Errors + type: Virtual Machine component: Network - type: Errors hosts: * families: * lookup: sum -10m unaligned absolute match-names of tx diff --git a/health/health.d/web_log.conf b/health/health.d/web_log.conf index 127c9a9c6..454e0abef 100644 --- a/health/health.d/web_log.conf +++ b/health/health.d/web_log.conf @@ -1,22 +1,4 @@ -# make sure we can collect web log data - - template: last_collected_secs - on: web_log.response_codes - class: Web Server -component: Web log - type: Latency - families: * - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: webmaster - - # ----------------------------------------------------------------------------- # high level response code alarms @@ -29,9 +11,9 @@ component: Web log template: 1m_requests on: web_log.response_statuses - class: Web Server + class: Workload + type: Web Server component: Web log - type: Workload families: * lookup: sum -1m unaligned calc: ($this == 0)?(1):($this) @@ -41,9 +23,9 @@ component: Web log template: 1m_successful on: web_log.response_statuses - class: Web Server + class: Workload + type: Web Server component: Web log - type: Workload families: * lookup: sum -1m unaligned of successful_requests calc: $this * 100 / $1m_requests @@ -57,41 +39,39 @@ component: Web log template: 1m_redirects on: web_log.response_statuses - class: Web Server + class: Workload + type: Web Server component: Web log - type: Workload families: * lookup: sum -1m unaligned of redirects calc: $this * 100 / $1m_requests units: % every: 10s warn: ($1m_requests > 120) ? ($this > (($status >= $WARNING ) ? ( 1 ) : ( 20 )) ) : ( 0 ) - crit: ($1m_requests > 120) ? ($this > (($status == $CRITICAL) ? ( 20 ) : ( 30 )) ) : ( 0 ) delay: up 2m down 15m multiplier 1.5 max 1h info: ratio of redirection HTTP requests over the last minute (3xx except 304) to: webmaster template: 1m_bad_requests on: web_log.response_statuses - class: Web Server + class: Errors + type: Web Server component: Web log - type: Errors families: * lookup: sum -1m unaligned of bad_requests calc: $this * 100 / $1m_requests units: % every: 10s warn: ($1m_requests > 120) ? ($this > (($status >= $WARNING) ? ( 10 ) : ( 30 )) ) : ( 0 ) - crit: ($1m_requests > 120) ? ($this > (($status == $CRITICAL) ? ( 30 ) : ( 50 )) ) : ( 0 ) delay: up 2m down 15m multiplier 1.5 max 1h info: ratio of client error HTTP requests over the last minute (4xx except 401) to: webmaster template: 1m_internal_errors on: web_log.response_statuses - class: Web Server + class: Errors + type: Web Server component: Web log - type: Errors families: * lookup: sum -1m unaligned of server_errors calc: $this * 100 / $1m_requests @@ -114,9 +94,9 @@ component: Web log template: 1m_total_requests on: web_log.response_codes - class: Web Server + class: Workload + type: Web Server component: Web log - type: Workload families: * lookup: sum -1m unaligned calc: ($this == 0)?(1):($this) @@ -126,9 +106,9 @@ component: Web log template: 1m_unmatched on: web_log.response_codes - class: Web Server + class: Errors + type: Web Server component: Web log - type: Errors families: * lookup: sum -1m unaligned of unmatched calc: $this * 100 / $1m_total_requests @@ -151,9 +131,9 @@ component: Web log template: 10m_response_time on: web_log.response_time - class: System + class: Latency + type: System component: Web log - type: Latency families: * lookup: average -10m unaligned of avg units: ms @@ -162,9 +142,9 @@ component: Web log template: web_slow on: web_log.response_time - class: Web Server + class: Latency + type: Web Server component: Web log - type: Latency families: * lookup: average -1m unaligned of avg units: ms @@ -191,9 +171,9 @@ component: Web log template: 5m_successful_old on: web_log.response_statuses - class: Web Server + class: Workload + type: Web Server component: Web log - type: Workload families: * lookup: average -5m at -5m unaligned of successful_requests units: requests/s @@ -202,9 +182,9 @@ component: Web log template: 5m_successful on: web_log.response_statuses - class: Web Server + class: Workload + type: Web Server component: Web log - type: Workload families: * lookup: average -5m unaligned of successful_requests units: requests/s @@ -213,9 +193,9 @@ component: Web log template: 5m_requests_ratio on: web_log.response_codes - class: Web Server + class: Workload + type: Web Server component: Web log - type: Workload families: * calc: ($5m_successful_old > 0)?($5m_successful * 100 / $5m_successful_old):(100) units: % @@ -233,23 +213,6 @@ component: Web log # ---------------------------------------------------GO-VERSION--------------------------------------------------------- -# make sure we can collect web log data - - template: web_log_last_collected_secs - on: web_log.requests - class: Web Server -component: Web log - type: Latency - families: * - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: webmaster - # unmatched lines # the following alarms trigger only when there are enough data. @@ -261,9 +224,9 @@ component: Web log template: web_log_1m_total_requests on: web_log.requests - class: Web Server + class: Workload + type: Web Server component: Web log - type: Workload families: * lookup: sum -1m unaligned calc: ($this == 0)?(1):($this) @@ -273,9 +236,9 @@ component: Web log template: web_log_1m_unmatched on: web_log.excluded_requests - class: Web Server + class: Errors + type: Web Server component: Web log - type: Errors families: * lookup: sum -1m unaligned of unmatched calc: $this * 100 / $web_log_1m_total_requests @@ -298,9 +261,9 @@ component: Web log template: web_log_1m_requests on: web_log.type_requests - class: Web Server + class: Workload + type: Web Server component: Web log - type: Workload families: * lookup: sum -1m unaligned calc: ($this == 0)?(1):($this) @@ -310,9 +273,9 @@ component: Web log template: web_log_1m_successful on: web_log.type_requests - class: Web Server + class: Workload + type: Web Server component: Web log - type: Workload families: * lookup: sum -1m unaligned of success calc: $this * 100 / $web_log_1m_requests @@ -326,41 +289,39 @@ component: Web log template: web_log_1m_redirects on: web_log.type_requests - class: Web Server + class: Workload + type: Web Server component: Web log - type: Workload families: * lookup: sum -1m unaligned of redirect calc: $this * 100 / $web_log_1m_requests units: % every: 10s warn: ($web_log_1m_requests > 120) ? ($this > (($status >= $WARNING ) ? ( 1 ) : ( 20 )) ) : ( 0 ) - crit: ($web_log_1m_requests > 120) ? ($this > (($status == $CRITICAL) ? ( 20 ) : ( 30 )) ) : ( 0 ) delay: up 2m down 15m multiplier 1.5 max 1h info: ratio of redirection HTTP requests over the last minute (3xx except 304) to: webmaster template: web_log_1m_bad_requests on: web_log.type_requests - class: Web Server + class: Errors + type: Web Server component: Web log - type: Errors families: * lookup: sum -1m unaligned of bad calc: $this * 100 / $web_log_1m_requests units: % every: 10s warn: ($web_log_1m_requests > 120) ? ($this > (($status >= $WARNING) ? ( 10 ) : ( 30 )) ) : ( 0 ) - crit: ($web_log_1m_requests > 120) ? ($this > (($status == $CRITICAL) ? ( 30 ) : ( 50 )) ) : ( 0 ) delay: up 2m down 15m multiplier 1.5 max 1h info: ratio of client error HTTP requests over the last minute (4xx except 401) to: webmaster template: web_log_1m_internal_errors on: web_log.type_requests - class: Web Server + class: Errors + type: Web Server component: Web log - type: Errors families: * lookup: sum -1m unaligned of error calc: $this * 100 / $web_log_1m_requests @@ -384,9 +345,9 @@ component: Web log template: web_log_10m_response_time on: web_log.request_processing_time - class: System + class: Latency + type: System component: Web log - type: Latency families: * lookup: average -10m unaligned of avg units: ms @@ -395,9 +356,9 @@ component: Web log template: web_log_web_slow on: web_log.request_processing_time - class: Web Server + class: Latency + type: Web Server component: Web log - type: Latency families: * lookup: average -1m unaligned of avg units: ms @@ -424,9 +385,9 @@ component: Web log template: web_log_5m_successful_old on: web_log.type_requests - class: Web Server + class: Workload + type: Web Server component: Web log - type: Workload families: * lookup: average -5m at -5m unaligned of success units: requests/s @@ -435,9 +396,9 @@ component: Web log template: web_log_5m_successful on: web_log.type_requests - class: Web Server + class: Workload + type: Web Server component: Web log - type: Workload families: * lookup: average -5m unaligned of success units: requests/s @@ -446,9 +407,9 @@ component: Web log template: web_log_5m_requests_ratio on: web_log.type_requests - class: Web Server + class: Workload + type: Web Server component: Web log - type: Workload families: * calc: ($web_log_5m_successful_old > 0)?($web_log_5m_successful * 100 / $web_log_5m_successful_old):(100) units: % diff --git a/health/health.d/whoisquery.conf b/health/health.d/whoisquery.conf index c6d3a9de0..be5eb58f9 100644 --- a/health/health.d/whoisquery.conf +++ b/health/health.d/whoisquery.conf @@ -1,26 +1,9 @@ -# make sure whoisquery is running - - template: whoisquery_last_collected_secs - on: whoisquery.time_until_expiration - class: Other -component: WHOIS - type: Latency - calc: $now - $last_collected_t - units: seconds ago - every: 60s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: webmaster - - template: whoisquery_days_until_expiration on: whoisquery.time_until_expiration - class: Other + class: Utilization + type: Other component: WHOIS - type: Utilization calc: $expiry units: seconds every: 60s diff --git a/health/health.d/wmi.conf b/health/health.d/wmi.conf index 6bd4e077f..90d39ce9d 100644 --- a/health/health.d/wmi.conf +++ b/health/health.d/wmi.conf @@ -1,29 +1,11 @@ -# you can disable an alarm notification by setting the 'to' line to: silent - -## Availability - - template: wmi_last_collected_secs - on: cpu.collector_duration - class: Windows -component: Availability - type: Latency - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: sysadmin - ## CPU template: wmi_10min_cpu_usage on: wmi.cpu_utilization_total - class: Windows + class: Utilization + type: Windows component: CPU - type: Utilization os: linux hosts: * lookup: average -10m unaligned match-names of dpc,user,privileged,interrupt @@ -40,9 +22,9 @@ component: CPU template: wmi_ram_in_use on: wmi.memory_utilization - class: Windows + class: Utilization + type: Windows component: Memory - type: Utilization os: linux hosts: * calc: ($used) * 100 / ($used + $available) @@ -56,9 +38,9 @@ component: Memory template: wmi_swap_in_use on: wmi.memory_swap_utilization - class: Windows + class: Utilization + type: Windows component: Memory - type: Utilization os: linux hosts: * calc: ($used) * 100 / ($used + $available) @@ -75,9 +57,9 @@ component: Memory template: wmi_inbound_packets_discarded on: wmi.net_discarded - class: Windows + class: Errors + type: Windows component: Network - type: Errors os: linux hosts: * families: * @@ -91,9 +73,9 @@ component: Network template: wmi_outbound_packets_discarded on: wmi.net_discarded - class: Windows + class: Errors + type: Windows component: Network - type: Errors os: linux hosts: * families: * @@ -107,9 +89,9 @@ component: Network template: wmi_inbound_packets_errors on: wmi.net_errors - class: Windows + class: Errors + type: Windows component: Network - type: Errors os: linux hosts: * families: * @@ -123,9 +105,9 @@ component: Network template: wmi_outbound_packets_errors on: wmi.net_errors - class: Windows + class: Errors + type: Windows component: Network - type: Errors os: linux hosts: * families: * @@ -142,9 +124,9 @@ component: Network template: wmi_disk_in_use on: wmi.logical_disk_utilization - class: Windows + class: Utilization + type: Windows component: Disk - type: Utilization os: linux hosts: * calc: ($used) * 100 / ($used + $free) diff --git a/health/health.d/x509check.conf b/health/health.d/x509check.conf index 93c406b7a..fc69d0288 100644 --- a/health/health.d/x509check.conf +++ b/health/health.d/x509check.conf @@ -1,26 +1,9 @@ -# make sure x509check is running - - template: x509check_last_collected_secs - on: x509check.time_until_expiration - class: Certificates -component: x509 certificates - type: Latency - calc: $now - $last_collected_t - units: seconds ago - every: 60s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: webmaster - - template: x509check_days_until_expiration on: x509check.time_until_expiration - class: Certificates + class: Latency + type: Certificates component: x509 certificates - type: Latency calc: $expiry units: seconds every: 60s @@ -31,9 +14,9 @@ component: x509 certificates template: x509check_revocation_status on: x509check.revocation_status - class: Certificates + class: Errors + type: Certificates component: x509 certificates - type: Errors calc: $revoked every: 60s crit: $this != nan AND $this != 0 diff --git a/health/health.d/zfs.conf b/health/health.d/zfs.conf index d6f5fa2fe..785838d47 100644 --- a/health/health.d/zfs.conf +++ b/health/health.d/zfs.conf @@ -1,9 +1,9 @@ alarm: zfs_memory_throttle on: zfs.memory_ops - class: System + class: Utilization + type: System component: File system - type: Utilization lookup: sum -10m unaligned absolute of throttled units: events every: 1m @@ -16,9 +16,9 @@ component: File system template: zfs_pool_state_warn on: zfspool.state - class: System + class: Errors + type: System component: File system - type: Errors calc: $degraded units: boolean every: 10s @@ -29,9 +29,9 @@ component: File system template: zfs_pool_state_crit on: zfspool.state - class: System + class: Errors + type: System component: File system - type: Errors calc: $faulted + $unavail units: boolean every: 10s diff --git a/health/health.d/zookeeper.conf b/health/health.d/zookeeper.conf deleted file mode 100644 index 8c7d5a73d..000000000 --- a/health/health.d/zookeeper.conf +++ /dev/null @@ -1,17 +0,0 @@ - -# make sure zookeeper is running - - template: zookeeper_last_collected_secs - on: zookeeper.requests - class: KV Storage -component: ZooKeeper - type: Latency - calc: $now - $last_collected_t - units: seconds ago - every: 10s - warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every)) - crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every)) - delay: down 5m multiplier 1.5 max 1h - info: number of seconds since the last successful data collection - to: webmaster - diff --git a/health/health.h b/health/health.h index 56331b227..09040b3a8 100644 --- a/health/health.h +++ b/health/health.h @@ -3,7 +3,7 @@ #ifndef NETDATA_HEALTH_H #define NETDATA_HEALTH_H 1 -#include "../daemon/common.h" +#include "daemon/common.h" #define NETDATA_PLUGIN_HOOK_HEALTH \ { \ @@ -27,6 +27,7 @@ extern unsigned int default_health_enabled; #define HEALTH_ENTRY_FLAG_EXEC_IN_PROGRESS 0x00000040 #define HEALTH_ENTRY_FLAG_SAVED 0x10000000 +#define HEALTH_ENTRY_FLAG_ACLK_QUEUED 0x20000000 #define HEALTH_ENTRY_FLAG_NO_CLEAR_NOTIFICATION 0x80000000 #ifndef HEALTH_LISTEN_PORT @@ -63,6 +64,7 @@ extern ALARM_ENTRY* health_create_alarm_entry( RRDHOST *host, uint32_t alarm_id, uint32_t alarm_event_id, + uuid_t config_hash_id, time_t when, const char *name, const char *chart, @@ -96,6 +98,8 @@ extern void *health_cmdapi_thread(void *ptr); extern void health_label_log_save(RRDHOST *host); +extern char *health_edit_command_from_source(const char *source); + extern SIMPLE_PATTERN *health_pattern_from_foreach(char *s); #endif //NETDATA_HEALTH_H diff --git a/health/health_config.c b/health/health_config.c index 756023715..35234df15 100644 --- a/health/health_config.c +++ b/health/health_config.c @@ -473,6 +473,29 @@ static inline char *health_source_file(size_t line, const char *file) { return strdupz(buffer); } +char *health_edit_command_from_source(const char *source) +{ + char buffer[FILENAME_MAX + 1]; + char *temp = strdupz(source); + char *line_num = strchr(temp, '@'); + char *file_no_path = strrchr(temp, '/'); + + if (likely(file_no_path && line_num)) { + *line_num = '\0'; + snprintfz( + buffer, + FILENAME_MAX, + "sudo %s/edit-config health.d/%s=%s", + netdata_configured_user_config_dir, + file_no_path + 1, + temp); + } else + buffer[0] = '\0'; + + freez(temp); + return strdupz(buffer); +} + static inline void strip_quotes(char *s) { while(*s) { if(*s == '\'' || *s == '"') *s = ' '; @@ -480,6 +503,40 @@ static inline void strip_quotes(char *s) { } } +static inline void alert_config_free(struct alert_config *cfg) +{ + freez(cfg->alarm); + freez(cfg->template_key); + freez(cfg->os); + freez(cfg->host); + freez(cfg->on); + freez(cfg->families); + freez(cfg->plugin); + freez(cfg->module); + freez(cfg->charts); + freez(cfg->lookup); + freez(cfg->calc); + freez(cfg->warn); + freez(cfg->crit); + freez(cfg->every); + freez(cfg->green); + freez(cfg->red); + freez(cfg->exec); + freez(cfg->to); + freez(cfg->units); + freez(cfg->info); + freez(cfg->classification); + freez(cfg->component); + freez(cfg->type); + freez(cfg->delay); + freez(cfg->options); + freez(cfg->repeat); + freez(cfg->host_labels); + freez(cfg->p_db_lookup_dimensions); + freez(cfg->p_db_lookup_method); + freez(cfg); +} + static int health_readfile(const char *filename, void *data) { RRDHOST *host = (RRDHOST *)data; @@ -554,6 +611,7 @@ static int health_readfile(const char *filename, void *data) { RRDCALC *rc = NULL; RRDCALCTEMPLATE *rt = NULL; + struct alert_config *alert_cfg = NULL; int ignore_this = 0; size_t line = 0, append = 0; @@ -603,16 +661,18 @@ static int health_readfile(const char *filename, void *data) { if(hash == hash_alarm && !strcasecmp(key, HEALTH_ALARM_KEY)) { if(rc) { - if(ignore_this || !rrdcalc_add_alarm_from_config(host, rc)) { + if(ignore_this || !alert_hash_and_store_config(rc->config_hash_id, alert_cfg) || !rrdcalc_add_alarm_from_config(host, rc)) { rrdcalc_free(rc); + alert_config_free(alert_cfg); } // health_add_alarms_loop(host, rc, ignore_this) ; } if(rt) { - if (ignore_this || !rrdcalctemplate_add_template_from_config(host, rt)) + if (ignore_this || !alert_hash_and_store_config(rt->config_hash_id, alert_cfg) || !rrdcalctemplate_add_template_from_config(host, rt)) { rrdcalctemplate_free(rt); - + alert_config_free(alert_cfg); + } rt = NULL; } @@ -629,25 +689,30 @@ static int health_readfile(const char *filename, void *data) { rc->old_status = RRDCALC_STATUS_UNINITIALIZED; rc->warn_repeat_every = host->health_default_warn_repeat_every; rc->crit_repeat_every = host->health_default_crit_repeat_every; + alert_cfg = callocz(1, sizeof(struct alert_config)); if(rrdvar_fix_name(rc->name)) error("Health configuration renamed alarm '%s' to '%s'", value, rc->name); + alert_cfg->alarm = strdupz(rc->name); ignore_this = 0; } else if(hash == hash_template && !strcasecmp(key, HEALTH_TEMPLATE_KEY)) { if(rc) { // health_add_alarms_loop(host, rc, ignore_this) ; - if(ignore_this || !rrdcalc_add_alarm_from_config(host, rc)) { + if(ignore_this || !alert_hash_and_store_config(rc->config_hash_id, alert_cfg) || !rrdcalc_add_alarm_from_config(host, rc)) { rrdcalc_free(rc); + alert_config_free(alert_cfg); } rc = NULL; } if(rt) { - if(ignore_this || !rrdcalctemplate_add_template_from_config(host, rt)) + if(ignore_this || !alert_hash_and_store_config(rt->config_hash_id, alert_cfg) || !rrdcalctemplate_add_template_from_config(host, rt)) { rrdcalctemplate_free(rt); + alert_config_free(alert_cfg); + } } rt = callocz(1, sizeof(RRDCALCTEMPLATE)); @@ -659,14 +724,17 @@ static int health_readfile(const char *filename, void *data) { rt->delay_multiplier = 1.0; rt->warn_repeat_every = host->health_default_warn_repeat_every; rt->crit_repeat_every = host->health_default_crit_repeat_every; + alert_cfg = callocz(1, sizeof(struct alert_config)); if(rrdvar_fix_name(rt->name)) error("Health configuration renamed template '%s' to '%s'", value, rt->name); + alert_cfg->template_key = strdupz(rt->name); ignore_this = 0; } else if(hash == hash_os && !strcasecmp(key, HEALTH_OS_KEY)) { char *os_match = value; + if (alert_cfg) alert_cfg->os = strdupz(value); SIMPLE_PATTERN *os_pattern = simple_pattern_create(os_match, NULL, SIMPLE_PATTERN_EXACT); if(!simple_pattern_matches(os_pattern, host->os)) { @@ -683,6 +751,7 @@ static int health_readfile(const char *filename, void *data) { } else if(hash == hash_host && !strcasecmp(key, HEALTH_HOST_KEY)) { char *host_match = value; + if (alert_cfg) alert_cfg->host = strdupz(value); SIMPLE_PATTERN *host_pattern = simple_pattern_create(host_match, NULL, SIMPLE_PATTERN_EXACT); if(!simple_pattern_matches(host_pattern, host->hostname)) { @@ -699,6 +768,7 @@ static int health_readfile(const char *filename, void *data) { } else if(rc) { if(hash == hash_on && !strcasecmp(key, HEALTH_ON_KEY)) { + alert_cfg->on = strdupz(value); if(rc->chart) { if(strcmp(rc->chart, value) != 0) error("Health configuration at line %zu of file '%s' for alarm '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", @@ -710,6 +780,7 @@ static int health_readfile(const char *filename, void *data) { rc->hash_chart = simple_hash(rc->chart); } else if(hash == hash_class && !strcasecmp(key, HEALTH_CLASS_KEY)) { + alert_cfg->classification = strdupz(value); if(rc->classification) { if(strcmp(rc->classification, value) != 0) error("Health configuration at line %zu of file '%s' for alarm '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", @@ -721,6 +792,7 @@ static int health_readfile(const char *filename, void *data) { strip_quotes(rc->classification); } else if(hash == hash_component && !strcasecmp(key, HEALTH_COMPONENT_KEY)) { + alert_cfg->component = strdupz(value); if(rc->component) { if(strcmp(rc->component, value) != 0) error("Health configuration at line %zu of file '%s' for alarm '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", @@ -732,6 +804,7 @@ static int health_readfile(const char *filename, void *data) { strip_quotes(rc->component); } else if(hash == hash_type && !strcasecmp(key, HEALTH_TYPE_KEY)) { + alert_cfg->type = strdupz(value); if(rc->type) { if(strcmp(rc->type, value) != 0) error("Health configuration at line %zu of file '%s' for alarm '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", @@ -743,18 +816,32 @@ static int health_readfile(const char *filename, void *data) { strip_quotes(rc->type); } else if(hash == hash_lookup && !strcasecmp(key, HEALTH_LOOKUP_KEY)) { + alert_cfg->lookup = strdupz(value); health_parse_db_lookup(line, filename, value, &rc->group, &rc->after, &rc->before, &rc->update_every, &rc->options, &rc->dimensions, &rc->foreachdim); if(rc->foreachdim) { rc->spdim = health_pattern_from_foreach(rc->foreachdim); } + if (rc->after) { + if (rc->dimensions) + alert_cfg->p_db_lookup_dimensions = strdupz(rc->dimensions); + if (rc->group) + alert_cfg->p_db_lookup_method = strdupz(group_method2string(rc->group)); + alert_cfg->p_db_lookup_options = rc->options; + alert_cfg->p_db_lookup_after = rc->after; + alert_cfg->p_db_lookup_before = rc->before; + alert_cfg->p_update_every = rc->update_every; + } } else if(hash == hash_every && !strcasecmp(key, HEALTH_EVERY_KEY)) { + alert_cfg->every = strdupz(value); if(!config_parse_duration(value, &rc->update_every)) error("Health configuration at line %zu of file '%s' for alarm '%s' at key '%s' cannot parse duration: '%s'.", line, filename, rc->name, key, value); + alert_cfg->p_update_every = rc->update_every; } else if(hash == hash_green && !strcasecmp(key, HEALTH_GREEN_KEY)) { + alert_cfg->green = strdupz(value); char *e; rc->green = str2ld(value, &e); if(e && *e) { @@ -763,6 +850,7 @@ static int health_readfile(const char *filename, void *data) { } } else if(hash == hash_red && !strcasecmp(key, HEALTH_RED_KEY)) { + alert_cfg->red = strdupz(value); char *e; rc->red = str2ld(value, &e); if(e && *e) { @@ -771,6 +859,7 @@ static int health_readfile(const char *filename, void *data) { } } else if(hash == hash_calc && !strcasecmp(key, HEALTH_CALC_KEY)) { + alert_cfg->calc = strdupz(value); const char *failed_at = NULL; int error = 0; rc->calculation = expression_parse(value, &failed_at, &error); @@ -780,6 +869,7 @@ static int health_readfile(const char *filename, void *data) { } } else if(hash == hash_warn && !strcasecmp(key, HEALTH_WARN_KEY)) { + alert_cfg->warn = strdupz(value); const char *failed_at = NULL; int error = 0; rc->warning = expression_parse(value, &failed_at, &error); @@ -789,6 +879,7 @@ static int health_readfile(const char *filename, void *data) { } } else if(hash == hash_crit && !strcasecmp(key, HEALTH_CRIT_KEY)) { + alert_cfg->crit = strdupz(value); const char *failed_at = NULL; int error = 0; rc->critical = expression_parse(value, &failed_at, &error); @@ -798,6 +889,7 @@ static int health_readfile(const char *filename, void *data) { } } else if(hash == hash_exec && !strcasecmp(key, HEALTH_EXEC_KEY)) { + alert_cfg->exec = strdupz(value); if(rc->exec) { if(strcmp(rc->exec, value) != 0) error("Health configuration at line %zu of file '%s' for alarm '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", @@ -808,6 +900,7 @@ static int health_readfile(const char *filename, void *data) { rc->exec = strdupz(value); } else if(hash == hash_recipient && !strcasecmp(key, HEALTH_RECIPIENT_KEY)) { + alert_cfg->to = strdupz(value); if(rc->recipient) { if(strcmp(rc->recipient, value) != 0) error("Health configuration at line %zu of file '%s' for alarm '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", @@ -818,6 +911,7 @@ static int health_readfile(const char *filename, void *data) { rc->recipient = strdupz(value); } else if(hash == hash_units && !strcasecmp(key, HEALTH_UNITS_KEY)) { + alert_cfg->units = strdupz(value); if(rc->units) { if(strcmp(rc->units, value) != 0) error("Health configuration at line %zu of file '%s' for alarm '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", @@ -829,6 +923,7 @@ static int health_readfile(const char *filename, void *data) { strip_quotes(rc->units); } else if(hash == hash_info && !strcasecmp(key, HEALTH_INFO_KEY)) { + alert_cfg->info = strdupz(value); if(rc->info) { if(strcmp(rc->info, value) != 0) error("Health configuration at line %zu of file '%s' for alarm '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", @@ -840,17 +935,21 @@ static int health_readfile(const char *filename, void *data) { strip_quotes(rc->info); } else if(hash == hash_delay && !strcasecmp(key, HEALTH_DELAY_KEY)) { + alert_cfg->delay = strdupz(value); health_parse_delay(line, filename, value, &rc->delay_up_duration, &rc->delay_down_duration, &rc->delay_max_duration, &rc->delay_multiplier); } else if(hash == hash_options && !strcasecmp(key, HEALTH_OPTIONS_KEY)) { + alert_cfg->options = strdupz(value); rc->options |= health_parse_options(value); } else if(hash == hash_repeat && !strcasecmp(key, HEALTH_REPEAT_KEY)){ + alert_cfg->repeat = strdupz(value); health_parse_repeat(line, filename, value, &rc->warn_repeat_every, &rc->crit_repeat_every); } else if(hash == hash_host_label && !strcasecmp(key, HEALTH_HOST_LABEL_KEY)) { + alert_cfg->host_labels = strdupz(value); if(rc->labels) { if(strcmp(rc->labels, value) != 0) error("Health configuration at line %zu of file '%s' for alarm '%s' has key '%s' twice, once with value '%s' and later with value '%s'.", @@ -864,6 +963,7 @@ static int health_readfile(const char *filename, void *data) { rc->splabels = simple_pattern_create(rc->labels, NULL, SIMPLE_PATTERN_EXACT); } else if(hash == hash_plugin && !strcasecmp(key, HEALTH_PLUGIN_KEY)) { + alert_cfg->plugin = strdupz(value); freez(rc->plugin_match); simple_pattern_free(rc->plugin_pattern); @@ -871,6 +971,7 @@ static int health_readfile(const char *filename, void *data) { rc->plugin_pattern = simple_pattern_create(rc->plugin_match, NULL, SIMPLE_PATTERN_EXACT); } else if(hash == hash_module && !strcasecmp(key, HEALTH_MODULE_KEY)) { + alert_cfg->module = strdupz(value); freez(rc->module_match); simple_pattern_free(rc->module_pattern); @@ -884,6 +985,7 @@ static int health_readfile(const char *filename, void *data) { } else if(rt) { if(hash == hash_on && !strcasecmp(key, HEALTH_ON_KEY)) { + alert_cfg->on = strdupz(value); if(rt->context) { if(strcmp(rt->context, value) != 0) error("Health configuration at line %zu of file '%s' for template '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", @@ -895,6 +997,7 @@ static int health_readfile(const char *filename, void *data) { rt->hash_context = simple_hash(rt->context); } else if(hash == hash_class && !strcasecmp(key, HEALTH_CLASS_KEY)) { + alert_cfg->classification = strdupz(value); if(rt->classification) { if(strcmp(rt->classification, value) != 0) error("Health configuration at line %zu of file '%s' for alarm '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", @@ -906,6 +1009,7 @@ static int health_readfile(const char *filename, void *data) { strip_quotes(rt->classification); } else if(hash == hash_component && !strcasecmp(key, HEALTH_COMPONENT_KEY)) { + alert_cfg->component = strdupz(value); if(rt->component) { if(strcmp(rt->component, value) != 0) error("Health configuration at line %zu of file '%s' for alarm '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", @@ -917,6 +1021,7 @@ static int health_readfile(const char *filename, void *data) { strip_quotes(rt->component); } else if(hash == hash_type && !strcasecmp(key, HEALTH_TYPE_KEY)) { + alert_cfg->type = strdupz(value); if(rt->type) { if(strcmp(rt->type, value) != 0) error("Health configuration at line %zu of file '%s' for alarm '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", @@ -928,6 +1033,7 @@ static int health_readfile(const char *filename, void *data) { strip_quotes(rt->type); } else if(hash == hash_families && !strcasecmp(key, HEALTH_FAMILIES_KEY)) { + alert_cfg->families = strdupz(value); freez(rt->family_match); simple_pattern_free(rt->family_pattern); @@ -935,6 +1041,7 @@ static int health_readfile(const char *filename, void *data) { rt->family_pattern = simple_pattern_create(rt->family_match, NULL, SIMPLE_PATTERN_EXACT); } else if(hash == hash_plugin && !strcasecmp(key, HEALTH_PLUGIN_KEY)) { + alert_cfg->plugin = strdupz(value); freez(rt->plugin_match); simple_pattern_free(rt->plugin_pattern); @@ -942,6 +1049,7 @@ static int health_readfile(const char *filename, void *data) { rt->plugin_pattern = simple_pattern_create(rt->plugin_match, NULL, SIMPLE_PATTERN_EXACT); } else if(hash == hash_module && !strcasecmp(key, HEALTH_MODULE_KEY)) { + alert_cfg->module = strdupz(value); freez(rt->module_match); simple_pattern_free(rt->module_pattern); @@ -949,6 +1057,7 @@ static int health_readfile(const char *filename, void *data) { rt->module_pattern = simple_pattern_create(rt->module_match, NULL, SIMPLE_PATTERN_EXACT); } else if(hash == hash_charts && !strcasecmp(key, HEALTH_CHARTS_KEY)) { + alert_cfg->charts = strdupz(value); freez(rt->charts_match); simple_pattern_free(rt->charts_pattern); @@ -956,18 +1065,32 @@ static int health_readfile(const char *filename, void *data) { rt->charts_pattern = simple_pattern_create(rt->charts_match, NULL, SIMPLE_PATTERN_EXACT); } else if(hash == hash_lookup && !strcasecmp(key, HEALTH_LOOKUP_KEY)) { + alert_cfg->lookup = strdupz(value); health_parse_db_lookup(line, filename, value, &rt->group, &rt->after, &rt->before, &rt->update_every, &rt->options, &rt->dimensions, &rt->foreachdim); if(rt->foreachdim) { rt->spdim = health_pattern_from_foreach(rt->foreachdim); } + if (rt->after) { + if (rt->dimensions) + alert_cfg->p_db_lookup_dimensions = strdupz(rt->dimensions); + if (rt->group) + alert_cfg->p_db_lookup_method = strdupz(group_method2string(rt->group)); + alert_cfg->p_db_lookup_options = rt->options; + alert_cfg->p_db_lookup_after = rt->after; + alert_cfg->p_db_lookup_before = rt->before; + alert_cfg->p_update_every = rt->update_every; + } } else if(hash == hash_every && !strcasecmp(key, HEALTH_EVERY_KEY)) { + alert_cfg->every = strdupz(value); if(!config_parse_duration(value, &rt->update_every)) error("Health configuration at line %zu of file '%s' for template '%s' at key '%s' cannot parse duration: '%s'.", line, filename, rt->name, key, value); + alert_cfg->p_update_every = rt->update_every; } else if(hash == hash_green && !strcasecmp(key, HEALTH_GREEN_KEY)) { + alert_cfg->green = strdupz(value); char *e; rt->green = str2ld(value, &e); if(e && *e) { @@ -976,6 +1099,7 @@ static int health_readfile(const char *filename, void *data) { } } else if(hash == hash_red && !strcasecmp(key, HEALTH_RED_KEY)) { + alert_cfg->red = strdupz(value); char *e; rt->red = str2ld(value, &e); if(e && *e) { @@ -984,6 +1108,7 @@ static int health_readfile(const char *filename, void *data) { } } else if(hash == hash_calc && !strcasecmp(key, HEALTH_CALC_KEY)) { + alert_cfg->calc = strdupz(value); const char *failed_at = NULL; int error = 0; rt->calculation = expression_parse(value, &failed_at, &error); @@ -993,6 +1118,7 @@ static int health_readfile(const char *filename, void *data) { } } else if(hash == hash_warn && !strcasecmp(key, HEALTH_WARN_KEY)) { + alert_cfg->warn = strdupz(value); const char *failed_at = NULL; int error = 0; rt->warning = expression_parse(value, &failed_at, &error); @@ -1002,6 +1128,7 @@ static int health_readfile(const char *filename, void *data) { } } else if(hash == hash_crit && !strcasecmp(key, HEALTH_CRIT_KEY)) { + alert_cfg->crit = strdupz(value); const char *failed_at = NULL; int error = 0; rt->critical = expression_parse(value, &failed_at, &error); @@ -1011,6 +1138,7 @@ static int health_readfile(const char *filename, void *data) { } } else if(hash == hash_exec && !strcasecmp(key, HEALTH_EXEC_KEY)) { + alert_cfg->exec = strdupz(value); if(rt->exec) { if(strcmp(rt->exec, value) != 0) error("Health configuration at line %zu of file '%s' for template '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", @@ -1021,6 +1149,7 @@ static int health_readfile(const char *filename, void *data) { rt->exec = strdupz(value); } else if(hash == hash_recipient && !strcasecmp(key, HEALTH_RECIPIENT_KEY)) { + alert_cfg->to = strdupz(value); if(rt->recipient) { if(strcmp(rt->recipient, value) != 0) error("Health configuration at line %zu of file '%s' for template '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", @@ -1031,6 +1160,7 @@ static int health_readfile(const char *filename, void *data) { rt->recipient = strdupz(value); } else if(hash == hash_units && !strcasecmp(key, HEALTH_UNITS_KEY)) { + alert_cfg->units = strdupz(value); if(rt->units) { if(strcmp(rt->units, value) != 0) error("Health configuration at line %zu of file '%s' for template '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", @@ -1042,6 +1172,7 @@ static int health_readfile(const char *filename, void *data) { strip_quotes(rt->units); } else if(hash == hash_info && !strcasecmp(key, HEALTH_INFO_KEY)) { + alert_cfg->info = strdupz(value); if(rt->info) { if(strcmp(rt->info, value) != 0) error("Health configuration at line %zu of file '%s' for template '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", @@ -1053,17 +1184,21 @@ static int health_readfile(const char *filename, void *data) { strip_quotes(rt->info); } else if(hash == hash_delay && !strcasecmp(key, HEALTH_DELAY_KEY)) { + alert_cfg->delay = strdupz(value); health_parse_delay(line, filename, value, &rt->delay_up_duration, &rt->delay_down_duration, &rt->delay_max_duration, &rt->delay_multiplier); } else if(hash == hash_options && !strcasecmp(key, HEALTH_OPTIONS_KEY)) { + alert_cfg->options = strdupz(value); rt->options |= health_parse_options(value); } else if(hash == hash_repeat && !strcasecmp(key, HEALTH_REPEAT_KEY)){ + alert_cfg->repeat = strdupz(value); health_parse_repeat(line, filename, value, &rt->warn_repeat_every, &rt->crit_repeat_every); } else if(hash == hash_host_label && !strcasecmp(key, HEALTH_HOST_LABEL_KEY)) { + alert_cfg->host_labels = strdupz(value); if(rt->labels) { if(strcmp(rt->labels, value) != 0) error("Health configuration at line %zu of file '%s' for template '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').", @@ -1089,16 +1224,20 @@ static int health_readfile(const char *filename, void *data) { if(rc) { //health_add_alarms_loop(host, rc, ignore_this) ; - if(ignore_this || !rrdcalc_add_alarm_from_config(host, rc)) { + if(ignore_this || !alert_hash_and_store_config(rc->config_hash_id, alert_cfg) || !rrdcalc_add_alarm_from_config(host, rc)) { rrdcalc_free(rc); } } if(rt) { - if(ignore_this || !rrdcalctemplate_add_template_from_config(host, rt)) + if(ignore_this || !alert_hash_and_store_config(rt->config_hash_id, alert_cfg) || !rrdcalctemplate_add_template_from_config(host, rt)) { rrdcalctemplate_free(rt); + } } + if (alert_cfg) + alert_config_free(alert_cfg); + fclose(fp); return 1; } diff --git a/health/health_json.c b/health/health_json.c index 4df44611c..a21d5a4fd 100644 --- a/health/health_json.c +++ b/health/health_json.c @@ -14,12 +14,19 @@ void health_string2json(BUFFER *wb, const char *prefix, const char *label, const } void health_alarm_entry2json_nolock(BUFFER *wb, ALARM_ENTRY *ae, RRDHOST *host) { + char *edit_command = ae->source ? health_edit_command_from_source(ae->source) : strdupz("UNKNOWN=0"); + char config_hash_id[GUID_LEN + 1]; + uuid_unparse_lower(ae->config_hash_id, config_hash_id); + buffer_sprintf(wb, "\n\t{\n" "\t\t\"hostname\": \"%s\",\n" + "\t\t\"utc_offset\": %d,\n" + "\t\t\"timezone\": \"%s\",\n" "\t\t\"unique_id\": %u,\n" "\t\t\"alarm_id\": %u,\n" "\t\t\"alarm_event_id\": %u,\n" + "\t\t\"config_hash_id\": \"%s\",\n" "\t\t\"name\": \"%s\",\n" "\t\t\"chart\": \"%s\",\n" "\t\t\"family\": \"%s\",\n" @@ -34,6 +41,7 @@ void health_alarm_entry2json_nolock(BUFFER *wb, ALARM_ENTRY *ae, RRDHOST *host) "\t\t\"recipient\": \"%s\",\n" "\t\t\"exec_code\": %d,\n" "\t\t\"source\": \"%s\",\n" + "\t\t\"command\": \"%s\",\n" "\t\t\"units\": \"%s\",\n" "\t\t\"when\": %lu,\n" "\t\t\"duration\": %lu,\n" @@ -49,9 +57,12 @@ void health_alarm_entry2json_nolock(BUFFER *wb, ALARM_ENTRY *ae, RRDHOST *host) "\t\t\"last_repeat\": \"%lu\",\n" "\t\t\"silenced\": \"%s\",\n" , host->hostname + , host->utc_offset + , host->abbrev_timezone , ae->unique_id , ae->alarm_id , ae->alarm_event_id + , config_hash_id , ae->name , ae->chart , ae->family @@ -66,6 +77,7 @@ void health_alarm_entry2json_nolock(BUFFER *wb, ALARM_ENTRY *ae, RRDHOST *host) , ae->recipient?ae->recipient:host->health_default_recipient , ae->exec_code , ae->source + , edit_command , ae->units?ae->units:"" , (unsigned long)ae->when , (unsigned long)ae->duration @@ -114,6 +126,7 @@ void health_alarm_entry2json_nolock(BUFFER *wb, ALARM_ENTRY *ae, RRDHOST *host) buffer_strcat(wb, "\t}"); freez(replaced_info); + freez(edit_command); } void health_alarm_log2json(RRDHOST *host, BUFFER *wb, uint32_t after, char *chart) { @@ -178,9 +191,13 @@ static inline void health_rrdcalc2json_nolock(RRDHOST *host, BUFFER *wb, RRDCALC } } + char hash_id[GUID_LEN + 1]; + uuid_unparse_lower(rc->config_hash_id, hash_id); + buffer_sprintf(wb, "\t\t\"%s.%s\": {\n" "\t\t\t\"id\": %lu,\n" + "\t\t\t\"config_hash_id\": \"%s\",\n" "\t\t\t\"name\": \"%s\",\n" "\t\t\t\"chart\": \"%s\",\n" "\t\t\t\"family\": \"%s\",\n" @@ -212,6 +229,7 @@ static inline void health_rrdcalc2json_nolock(RRDHOST *host, BUFFER *wb, RRDCALC "\t\t\t\"last_repeat\": \"%lu\",\n" , rc->chart, rc->name , (unsigned long)rc->id + , hash_id , rc->name , rc->chart , (rc->rrdset && rc->rrdset->family)?rc->rrdset->family:"" diff --git a/health/health_log.c b/health/health_log.c index de0a0883b..d20085d9e 100644 --- a/health/health_log.c +++ b/health/health_log.c @@ -38,39 +38,41 @@ static inline void health_log_rotate(RRDHOST *host) { } if(unlikely(host->health_log_entries_written > rotate_every)) { - health_alarm_log_close(host); + if(unlikely(host->health_log_fp)) { + health_alarm_log_close(host); - char old_filename[FILENAME_MAX + 1]; - snprintfz(old_filename, FILENAME_MAX, "%s.old", host->health_log_filename); + char old_filename[FILENAME_MAX + 1]; + snprintfz(old_filename, FILENAME_MAX, "%s.old", host->health_log_filename); - if(unlink(old_filename) == -1 && errno != ENOENT) - error("HEALTH [%s]: cannot remove old alarms log file '%s'", host->hostname, old_filename); + if(unlink(old_filename) == -1 && errno != ENOENT) + error("HEALTH [%s]: cannot remove old alarms log file '%s'", host->hostname, old_filename); - if(link(host->health_log_filename, old_filename) == -1 && errno != ENOENT) - error("HEALTH [%s]: cannot move file '%s' to '%s'.", host->hostname, host->health_log_filename, old_filename); + if(link(host->health_log_filename, old_filename) == -1 && errno != ENOENT) + error("HEALTH [%s]: cannot move file '%s' to '%s'.", host->hostname, host->health_log_filename, old_filename); - if(unlink(host->health_log_filename) == -1 && errno != ENOENT) - error("HEALTH [%s]: cannot remove old alarms log file '%s'", host->hostname, host->health_log_filename); + if(unlink(host->health_log_filename) == -1 && errno != ENOENT) + error("HEALTH [%s]: cannot remove old alarms log file '%s'", host->hostname, host->health_log_filename); - // open it with truncate - host->health_log_fp = fopen(host->health_log_filename, "w"); + // open it with truncate + host->health_log_fp = fopen(host->health_log_filename, "w"); - if(host->health_log_fp) - fclose(host->health_log_fp); - else - error("HEALTH [%s]: cannot truncate health log '%s'", host->hostname, host->health_log_filename); + if(host->health_log_fp) + fclose(host->health_log_fp); + else + error("HEALTH [%s]: cannot truncate health log '%s'", host->hostname, host->health_log_filename); - host->health_log_fp = NULL; + host->health_log_fp = NULL; - host->health_log_entries_written = 0; - health_alarm_log_open(host); + host->health_log_entries_written = 0; + health_alarm_log_open(host); + } } } inline void health_label_log_save(RRDHOST *host) { health_log_rotate(host); - if(likely(host->health_log_fp)) { + if(unlikely(host->health_log_fp)) { BUFFER *wb = buffer_create(1024); rrdhost_check_rdlock(host); netdata_rwlock_rdlock(&host->labels.labels_rwlock); @@ -101,7 +103,7 @@ inline void health_label_log_save(RRDHOST *host) { inline void health_alarm_log_save(RRDHOST *host, ALARM_ENTRY *ae) { health_log_rotate(host); - if(likely(host->health_log_fp)) { + if(unlikely(host->health_log_fp)) { if(unlikely(fprintf(host->health_log_fp , "%c\t%s" "\t%08x\t%08x\t%08x\t%08x\t%08x" @@ -155,13 +157,12 @@ inline void health_alarm_log_save(RRDHOST *host, ALARM_ENTRY *ae) { ae->flags |= HEALTH_ENTRY_FLAG_SAVED; host->health_log_entries_written++; } - } + }else + sql_health_alarm_log_save(host, ae); + #ifdef ENABLE_ACLK if (netdata_cloud_setting) { - if ((ae->new_status == RRDCALC_STATUS_WARNING || ae->new_status == RRDCALC_STATUS_CRITICAL) || - ((ae->old_status == RRDCALC_STATUS_WARNING || ae->old_status == RRDCALC_STATUS_CRITICAL))) { - aclk_update_alarm(host, ae); - } + sql_queue_alarm_to_aclk(host, ae); } #endif } @@ -368,7 +369,7 @@ static inline ssize_t health_alarm_log_read(RRDHOST *host, FILE *fp, const char ae->last_repeat = last_repeat; - if (likely(entries > 28)) { + if (likely(entries > 30)) { freez(ae->classification); ae->classification = strdupz(pointers[28]); if(!*ae->classification) { freez(ae->classification); ae->classification = NULL; } @@ -392,9 +393,13 @@ static inline ssize_t health_alarm_log_read(RRDHOST *host, FILE *fp, const char if(unlikely(*pointers[0] == 'A')) { ae->next = host->health_log.alarms; host->health_log.alarms = ae; + sql_health_alarm_log_insert(host, ae); loaded++; } - else updated++; + else { + sql_health_alarm_log_update(host, ae); + updated++; + } if(unlikely(ae->unique_id > host->health_max_unique_id)) host->health_max_unique_id = ae->unique_id; @@ -444,8 +449,6 @@ inline void health_alarm_log_load(RRDHOST *host) { health_alarm_log_read(host, fp, host->health_log_filename); fclose(fp); } - - health_alarm_log_open(host); } @@ -456,6 +459,7 @@ inline ALARM_ENTRY* health_create_alarm_entry( RRDHOST *host, uint32_t alarm_id, uint32_t alarm_event_id, + uuid_t config_hash_id, time_t when, const char *name, const char *chart, @@ -487,6 +491,8 @@ inline ALARM_ENTRY* health_create_alarm_entry( ae->hash_chart = simple_hash(ae->chart); } + uuid_copy(ae->config_hash_id, *((uuid_t *) config_hash_id)); + if(family) ae->family = strdupz(family); diff --git a/health/notifications/alarm-notify.sh.in b/health/notifications/alarm-notify.sh.in index 9a3a80ad6..08a32ff10 100755 --- a/health/notifications/alarm-notify.sh.in +++ b/health/notifications/alarm-notify.sh.in @@ -239,6 +239,11 @@ else calc_param_values="${22}" # the values of the parameters in the expression, at the time of the evaluation total_warnings="${23}" # Total number of alarms in WARNING state total_critical="${24}" # Total number of alarms in CRITICAL state + total_warn_alarms="${25}" # List of alarms in warning state + total_crit_alarms="${26}" # List of alarms in critical state + classification="${27}" # The class field from .conf files + edit_command_line="${28}" # The command to edit the alarm, with the line number + sender_host="${29}" # The host sending this notification fi # ----------------------------------------------------------------------------- @@ -252,6 +257,17 @@ else host="${args_host}" fi +# ----------------------------------------------------------------------------- +# Do the same for sender_host (find a suitable hostname to use, if netdata did not supply a hostname) + +if [ -z ${sender_host} ]; then + this_host=$(hostname -s 2>/dev/null) + s_host="${this_host}" + sender_host="${this_host}" +else + s_host="${sender_host}" +fi + # ----------------------------------------------------------------------------- # screen statuses we don't need to send a notification @@ -303,7 +319,7 @@ SLACK_WEBHOOK_URL= # Microsoft Teams configs MSTEAMS_WEBHOOK_URL= -# Legacy Microsoft Teams configs for backwards compatability: +# Legacy Microsoft Teams configs for backwards compatibility: declare -A role_recipients_msteam # rocketchat configs @@ -810,6 +826,14 @@ date=$(date --date=@${when} "${date_format}" 2>/dev/null) [ -z "${date}" ] && date=$(date --date=@${when} 2>/dev/null) [ -z "${date}" ] && date=$(date 2>/dev/null) +# ----------------------------------------------------------------------------- +# get the date in utc the alarm happened + +date_utc=$(date --date=@${when} "${date_format}" -u 2>/dev/null) +[ -z "${date_utc}" ] && date_utc=$(date -u "${date_format}" 2>/dev/null) +[ -z "${date_utc}" ] && date_utc=$(date -u --date=@${when} 2>/dev/null) +[ -z "${date_utc}" ] && date_utc=$(date -u 2>/dev/null) + # ---------------------------------------------------------------------------- # prepare some extra headers if we've been asked to thread e-mails if [ "${SEND_EMAIL}" == "YES" ] && [ "${EMAIL_THREADING}" != "NO" ]; then @@ -915,7 +939,7 @@ send_email() { fi [ -n "${sender_email}" ] && opts+=(-f "${sender_email}") - [ -n "${sender_name}" ] && sendmail --help 2>&1 | grep -q "\-F " && opts+=(-F "${sender_name}") + [ -n "${sender_name}" ] && ${sendmail} -F 2>&1 | head -1 | grep -qv "sendmail: unrecognized option: F" && opts+=(-F "${sender_name}") if [ "${debug}" = "1" ]; then echo >&2 "--- BEGIN sendmail command ---" @@ -1364,15 +1388,15 @@ EOF )" # Replacing in the webhook CHANNEL string by the MS Teams channel name from conf file. - webhook="${webhook//CHANNEL/${channel}}" + cur_webhook="${webhook//CHANNEL/${channel}}" - httpcode=$(docurl -H "Content-Type: application/json" -d "${payload}" "${webhook}") + httpcode=$(docurl -H "Content-Type: application/json" -d "${payload}" "${cur_webhook}") if [ "${httpcode}" = "200" ]; then - info "sent Microsoft team notification for: ${host} ${chart}.${name} is ${status} to '${webhook}'" + info "sent Microsoft team notification for: ${host} ${chart}.${name} is ${status} to '${cur_webhook}'" sent=$((sent + 1)) else - error "failed to send Microsoft team notification for: ${host} ${chart}.${name} is ${status} to '${webhook}', with HTTP response status code ${httpcode}." + error "failed to send Microsoft team notification for: ${host} ${chart}.${name} is ${status} to '${cur_webhook}', with HTTP response status code ${httpcode}." fi done @@ -2113,12 +2137,12 @@ send_dynatrace() { [ "${SEND_DYNATRACE}" != "YES" ] && return 1 local dynatrace_url="${DYNATRACE_SERVER}/e/${DYNATRACE_SPACE}/api/v1/events" - local description="NetData Notification for: ${host} ${chart}.${name} is ${status}" + local description="Netdata Notification for: ${host} ${chart}.${name} is ${status}" local payload="" payload=$(cat </dev/null url_family="${REPLY}" urlencode "${name}" >/dev/null url_name="${REPLY}" +urlencode "${value_string}" >/dev/null +url_value_string="${REPLY}" -redirect_params="host=${url_host}&chart=${url_chart}&family=${url_family}&alarm=${url_name}&alarm_unique_id=${unique_id}&alarm_id=${alarm_id}&alarm_event_id=${event_id}&alarm_when=${when}" +redirect_params="host=${url_host}&chart=${url_chart}&family=${url_family}&alarm=${url_name}&alarm_unique_id=${unique_id}&alarm_id=${alarm_id}&alarm_event_id=${event_id}&alarm_when=${when}&alarm_status=${status}&alarm_chart=${chart}&alarm_value=${url_value_string}" GOTOCLOUD=0 if [ "${NETDATA_REGISTRY_URL}" == "https://registry.my-netdata.io" ]; then @@ -2284,9 +2310,9 @@ fi if [ ${GOTOCLOUD} -eq 0 ]; then goto_url="${NETDATA_REGISTRY_URL}/goto-host-from-alarm.html?${redirect_params}" else - # Temporarily disable alarm redirection, as the cloud endpoint no longer exists. This functionality will be restored after discussion on #9487. For now, just lead to netdata.cloud - #goto_url="${NETDATA_REGISTRY_CLOUD_BASE_URL}/alarms/redirect?agentID=${NETDATA_REGISTRY_UNIQUE_ID}&${redirect_params}" - goto_url="${NETDATA_REGISTRY_CLOUD_BASE_URL}" + # Temporarily disable alarm redirection, as the cloud endpoint no longer exists. This functionality will be restored after discussion on #9487. For now, just lead to netdata.cloud + # Re-allow alarm redirection, for alarms 2.0, new template + goto_url="${NETDATA_REGISTRY_CLOUD_BASE_URL}/alarms/redirect?agentId=${NETDATA_REGISTRY_UNIQUE_ID}&${redirect_params}" fi # the severity of the alarm @@ -2311,48 +2337,79 @@ alarm="${name//_/ } = ${value_string}" # the image of the alarm image="${images_base_url}/images/banner-icon-144x144.png" +# have a default email status, in case the following case does not catch it +status_email_subject="${status}" + # prepare the title based on status case "${status}" in CRITICAL) image="${images_base_url}/images/alert-128-red.png" + alarm_badge="${NETDATA_REGISTRY_CLOUD_BASE_URL}/static/email/img/label_critical.png" status_message="is critical" + status_email_subject="Critical" color="#ca414b" + rich_status_raised_for="Raised to critical, for ${non_clear_duration_txt}" + background_color="#FFEBEF" + border_color="#FF4136" + text_color="#FF4136" + action_text_color="#FFFFFF" ;; WARNING) image="${images_base_url}/images/alert-128-orange.png" + alarm_badge="${NETDATA_REGISTRY_CLOUD_BASE_URL}/static/email/img/label_warning.png" status_message="needs attention" + status_email_subject="Warning" color="#ffc107" + rich_status_raised_for="Raised to warning, for ${non_clear_duration_txt}" + background_color="#FFF8E1" + border_color="#FFC300" + text_color="#536775" + action_text_color="#35414A" ;; CLEAR) image="${images_base_url}/images/check-mark-2-128-green.png" + alarm_badge="${NETDATA_REGISTRY_CLOUD_BASE_URL}/static/email/img/label_recovered.png" status_message="recovered" + status_email_subject="Clear" color="#77ca6d" + rich_status_raised_for= + background_color="#E5F5E8" + border_color="#68C47D" + text_color="#00AB44" + action_text_color="#FFFFFF" ;; esac +# the html email subject +html_email_subject="${status_email_subject}, ${name} = ${value_string}, on ${host}" + if [ "${status}" = "CLEAR" ]; then severity="Recovered from ${old_status}" if [ ${non_clear_duration} -gt ${duration} ]; then raised_for="(alarm was raised for ${non_clear_duration_txt})" fi + rich_status_raised_for="Recovered from ${old_status,,}, ${raised_for}" # don't show the value when the status is CLEAR # for certain alarms, this value might not have any meaning alarm="${name//_/ } ${raised_for}" + html_email_subject="${status_email_subject}, ${name} ${raised_for}, on ${host}" elif { [ "${old_status}" = "WARNING" ] && [ "${status}" = "CRITICAL" ]; }; then severity="Escalated to ${status}" if [ ${non_clear_duration} -gt ${duration} ]; then raised_for="(alarm is raised for ${non_clear_duration_txt})" fi + rich_status_raised_for="Escalated to critical, ${raised_for}" elif { [ "${old_status}" = "CRITICAL" ] && [ "${status}" = "WARNING" ]; }; then severity="Demoted to ${status}" if [ ${non_clear_duration} -gt ${duration} ]; then raised_for="(alarm is raised for ${non_clear_duration_txt})" fi + rich_status_raised_for="Demoted to warning, ${raised_for}" else raised_for= @@ -2628,6 +2685,13 @@ Subject: ${host} ${status_message} - ${name//_/ } - ${chart} MIME-Version: 1.0 Content-Type: multipart/alternative; boundary="multipart-boundary" ${email_thread_headers} +X-Netdata-Severity: ${status,,} +X-Netdata-Alert-Name: $name +X-Netdata-Chart: $chart +X-Netdata-Family: $family +X-Netdata-Classification: $classification +X-Netdata-Host: $host +X-Netdata-Role: $roles This is a MIME-encoded multipart message @@ -2638,120 +2702,742 @@ EOF else +now=$(date "+%s") + +if [ -n "$total_warn_alarms" ]; then + while read -d, -r pair; do + IFS='=' read -r key val <<<"$pair" + + date_w=$(date --date=@${val} "${date_format}" 2>/dev/null) + [ -z "${date_w}" ] && date_w=$(date "${date_format}" 2>/dev/null) + [ -z "${date_w}" ] && date_w=$(date --date=@${val} 2>/dev/null) + [ -z "${date_w}" ] && date_w=$(date 2>/dev/null) + + elapsed=$((now - val)) + + duration4human ${elapsed} >/dev/null + elapsed_txt="${REPLY}" + + WARN_ALARMS+=" +
    + + + + + + +
    + +
    + + + + + + + + + +
    +
    ${key}
    +
    +
    ${date_w}
    +
    +
    + +
    + + + + + + +
    + + + + + + +
    +
    + Warning for ${elapsed_txt} +
    +
    +
    +
    + +
    +
    + " + + done <<<"$total_warn_alarms," +fi + +if [ -n "$total_crit_alarms" ]; then + while read -d, -r pair; do + IFS='=' read -r key val <<<"$pair" + + date_c=$(date --date=@${val} "${date_format}" 2>/dev/null) + [ -z "${date_c}" ] && date_c=$(date "${date_format}" 2>/dev/null) + [ -z "${date_c}" ] && date_c=$(date --date=@${val} 2>/dev/null) + [ -z "${date_c}" ] && date_c=$(date 2>/dev/null) + + elapsed=$((now - val)) + + duration4human ${elapsed} >/dev/null + elapsed_txt="${REPLY}" + + CRIT_ALARMS+=" +
    + + + + + + +
    + +
    + + + + + + + + + +
    +
    ${key}
    +
    +
    ${date_c}
    +
    +
    + +
    + + + + + + +
    + + + + + + +
    +
    + Critical for ${elapsed_txt} +
    +
    +
    +
    + +
    +
    + " + + done <<<"$total_crit_alarms," +fi + +if [ -n "$edit_command_line" ]; then + IFS='=' read -r edit_command line <<<"$edit_command_line" +fi + IFS='' read -r -d '' email_html_part < - - - - - - - - - -
    -
    - + + + + + + + + + + + + + + + + + + + + + + +
    + +
    +
    + + + + + +
    + +
    + + + + + + +
    + - + + +
    -
    netdata notification
    -
    + Netdata Logo +
    +
    +
    + +
    + + + + + + +
    + + - + + +
    -

    ${host} ${status_message}

    +
    +
    Notification
    +
    +
    +
    + +
    +
    + +
    + + + + + + +
    + +
    + + + + + + +
    + +
    + + + + + + +
    +
    ${name}
    +
    +
    + +
    + + + + + + +
    + + + + + + +
    + +
    +
    +
    + +
    +
    + +
    + + + + + + +
    + +
    + + + + + + +
    +
    on ${host}
    +
    + +
    +
    + +
    + + + + + + +
    + +
    + + + + + + +
    +
    ${value_string} +
    +
    +
    + +
    +
    + +
    + + + + + + +
    + +
    + + + + + + +
    +
    Details: ${info}
    +
    +
    + +
    +
    + +
    + + + + + + +
    + +
    + + + + + + +
    + + + + +
    +

    + GO TO CHART +

    +
    +
    +
    + +
    +
    + +
    +
    + +
    + +
    + + + + + + +
    + +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    Chart: + ${chart}
    +
    +
    Family: + ${family}
    +
    +
    ${rich_status_raised_for}
    +
    +

    +

    + +
    +
    On + ${date}
    +
    +
    By: + ${host}
    +
    +
    Global time: + ${date_utc}
    +
    +

    +

    + +
    +
    Classification: + ${classification}
    +
    +
    Role: + ${roles}
    +
    +
    + +
    +
    + +
    + + + + + + +
    + +
    + + + + + + +
    + + + + + + +
    + + + + + + +
    + +
    +
    +
    +
    + +
    + + + + + + +
    + + + + + + + + +
    +
    Want to know more about this alert?
    +
    +
    Discuss and troubleshoot with others on the Netdata community forums
    +
    +
    +
    + +
    +
    + +
    + + + + + + +
    + +
    + + + + + + +
    + + - + + +
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - ${chart} - Chart -
    - ${alarm}${info_html} - Alarm -
    - ${family} - Family -
    - ${severity} - Severity -
    ${date} - ${raised_for_html} Time -
    - ${calc_expression} - Evaluated Expression -
    - ${calc_param_values} - Expression Variables -
    - The host has ${total_warnings} WARNING and ${total_critical} CRITICAL alarm(s) raised. -
    - View Netdata -
    The source of this alarm is line ${src}
    (alarms are configurable, edit this file to adapt the alarm to your needs) -
    Sent by - netdata, the real-time performance and health monitoring, on ${host}. -
    -
    +
    + + + + + + +
    + +
    +
    +
    +
    + +
    + + + + + + +
    + + + + + + + + + + + + +
    +
    Need to configure this alert?
    +
    +
    Edit this alert's configuration file by logging into $s_host and running the following command:
    +
    +
    ${edit_command}
    + The alarm to edit is at line {${line}}
    +
    +
    +
    + +
    +
    + +
    + + + + + + +
    + +
    + + + + + + +
    + +
    + + + + + + +
    +
    The node has + ${total_warnings} warning + and + ${total_critical} critical + additional active alert(s)
    +
    + +
    +
    + ${CRIT_ALARMS} + ${WARN_ALARMS} + +
    +
    + +
    + + + + + + +
    + +
    + + + +
    + + + + -
    +
    © Netdata 2021 - The real-time performance and health monitoring
    +
    - +
    +
    +
    +
    + + + + + + EOF send_email < Tags --> Manually applied tags create the Tag -# The NetData alarm will be sent as a Dynatrace Event to be correlated with all those hosts tagged with this Tag +# The Netdata alarm will be sent as a Dynatrace Event to be correlated with all those hosts tagged with this Tag # you created. # Required DYNATRACE_TAG_VALUE="" # Change this to what you want -DYNATRACE_ANNOTATION_TYPE="NetData Alarm" +DYNATRACE_ANNOTATION_TYPE="Netdata Alarm" # This can be CUSTOM_INFO, CUSTOM_ANNOTATION, CUSTOM_CONFIGURATION, CUSTOM_DEPLOYMENT # Applying default value diff --git a/health/notifications/syslog/README.md b/health/notifications/syslog/README.md index 456394d2f..360f6844d 100644 --- a/health/notifications/syslog/README.md +++ b/health/notifications/syslog/README.md @@ -17,7 +17,7 @@ netdata WARNING on hostname at Tue Apr 3 09:00:00 EDT 2018: disk_space._ out of System log targets are configured as recipients in [`/etc/netdata/health_alarm_notify.conf`](https://github.com/netdata/netdata/blob/36bedc044584dea791fd29455bdcd287c3306cb2/conf.d/health_alarm_notify.conf#L534) (to edit it on your system run `/etc/netdata/edit-config health_alarm_notify.conf`). -You can als configure per-role targets in the same file a bit further down. +You can also configure per-role targets in the same file a bit further down. Targets are defined as follows: diff --git a/libnetdata/config/appconfig.c b/libnetdata/config/appconfig.c index 6e4df2d09..37e9e7688 100644 --- a/libnetdata/config/appconfig.c +++ b/libnetdata/config/appconfig.c @@ -225,6 +225,31 @@ void appconfig_section_destroy_non_loaded(struct config *root, const char *secti error("Cannot remove section '%s' from config.", section); return; } + + appconfig_wrlock(root); + + if (root->first_section == co) { + root->first_section = co->next; + + if (root->last_section == co) + root->last_section = root->first_section; + } else { + struct section *co_cur = root->first_section, *co_prev = NULL; + + while(co_cur && co_cur != co) { + co_prev = co_cur; + co_cur = co_cur->next; + } + + if (co_cur) { + co_prev->next = co_cur->next; + + if (root->last_section == co_cur) + root->last_section = co_prev; + } + } + + appconfig_unlock(root); avl_destroy_lock(&co->values_index); freez(co->name); @@ -771,6 +796,7 @@ void appconfig_generate(struct config *root, BUFFER *wb, int only_changed) || !strcmp(co->name, CONFIG_SECTION_BACKEND) || !strcmp(co->name, CONFIG_SECTION_STREAM) || !strcmp(co->name, CONFIG_SECTION_HOST_LABEL) + || !strcmp(co->name, CONFIG_SECTION_ML) ) pri = 0; else if(!strncmp(co->name, "plugin:", 7)) pri = 1; diff --git a/libnetdata/config/appconfig.h b/libnetdata/config/appconfig.h index 246d1d5b9..bfc927353 100644 --- a/libnetdata/config/appconfig.h +++ b/libnetdata/config/appconfig.h @@ -91,6 +91,7 @@ #define CONFIG_SECTION_HEALTH "health" #define CONFIG_SECTION_BACKEND "backend" #define CONFIG_SECTION_STREAM "stream" +#define CONFIG_SECTION_ML "ml" #define CONFIG_SECTION_EXPORTING "exporting:global" #define CONFIG_SECTION_PROMETHEUS "prometheus:exporter" #define CONFIG_SECTION_HOST_LABEL "host labels" diff --git a/libnetdata/ebpf/ebpf.c b/libnetdata/ebpf/ebpf.c index 1f71f6a24..1ccaa7b41 100644 --- a/libnetdata/ebpf/ebpf.c +++ b/libnetdata/ebpf/ebpf.c @@ -64,13 +64,19 @@ int clean_kprobe_events(FILE *out, int pid, netdata_ebpf_events_t *ptr) //---------------------------------------------------------------------------------------------------------------------- -int get_kernel_version(char *out, int size) +/** + * Get Kernel version + * + * Get the current kernel from /proc and returns an integer value representing it + * + * @return it returns a value representing the kernel version. + */ +int ebpf_get_kernel_version() { char major[16], minor[16], patch[16]; char ver[VERSION_STRING_LEN]; char *version = ver; - out[0] = '\0'; int fd = open("/proc/sys/kernel/osrelease", O_RDONLY); if (fd < 0) return -1; @@ -104,10 +110,6 @@ int get_kernel_version(char *out, int size) *move++ = *version++; *move = '\0'; - fd = snprintf(out, (size_t)size, "%s.%s.%s", major, minor, patch); - if (fd > size) - error("The buffer to store kernel version is not smaller than necessary."); - return ((int)(str2l(major) * 65536) + (int)(str2l(minor) * 256) + (int)str2l(patch)); } @@ -272,14 +274,24 @@ char *ebpf_kernel_suffix(int version, int isrh) //---------------------------------------------------------------------------------------------------------------------- -int ebpf_update_kernel(ebpf_data_t *ed) +/** + * Update Kernel + * + * Update string used to load eBPF programs + * + * @param ks vector to store the value + * @param length available length to store kernel + * @param isrh Is a Red Hat distribution? + * @param version the kernel version + */ +void ebpf_update_kernel(char *ks, size_t length, int isrh, int version) { - char *kernel = ebpf_kernel_suffix(ed->running_on_kernel, (ed->isrh < 0) ? 0 : 1); - size_t length = strlen(kernel); - strncpyz(ed->kernel_string, kernel, length); - ed->kernel_string[length] = '\0'; - - return 0; + char *kernel = ebpf_kernel_suffix(version, (isrh < 0) ? 0 : 1); + size_t len = strlen(kernel); + if (len > length) + len = length - 1; + strncpyz(ks, kernel, len); + ks[len] = '\0'; } static int select_file(char *name, const char *program, size_t length, int mode, char *kernel_string) @@ -307,18 +319,27 @@ void ebpf_update_map_sizes(struct bpf_object *program, ebpf_module_t *em) if (!maps) return; + uint32_t apps_type = NETDATA_EBPF_MAP_PID | NETDATA_EBPF_MAP_RESIZABLE; bpf_map__for_each(map, program) { const char *map_name = bpf_map__name(map); int i = 0; ; while (maps[i].name) { ebpf_local_maps_t *w = &maps[i]; - if (w->user_input != w->internal_input && !strcmp(w->name, map_name)) { + if (w->type & NETDATA_EBPF_MAP_RESIZABLE) { + if (!strcmp(w->name, map_name)) { + if (w->user_input && w->user_input != w->internal_input) { #ifdef NETDATA_INTERNAL_CHECKS - info("Changing map %s from size %u to %u ", map_name, w->internal_input, w->user_input); + info("Changing map %s from size %u to %u ", map_name, w->internal_input, w->user_input); #endif - bpf_map__resize(map, w->user_input); + bpf_map__resize(map, w->user_input); + } else if (((w->type & apps_type) == apps_type) && (!em->apps_charts) && (!em->cgroup_charts)) { + w->user_input = ND_EBPF_DEFAULT_MIN_PID; + bpf_map__resize(map, w->user_input); + } + } } + i++; } } @@ -377,8 +398,59 @@ static struct bpf_link **ebpf_attach_programs(struct bpf_object *obj, size_t len return links; } +static void ebpf_update_maps(ebpf_module_t *em, struct bpf_object *obj) +{ + if (!em->maps) + return; + + ebpf_local_maps_t *maps = em->maps; + struct bpf_map *map; + bpf_map__for_each(map, obj) + { + int fd = bpf_map__fd(map); + if (maps) { + const char *map_name = bpf_map__name(map); + int j = 0; ; + while (maps[j].name) { + ebpf_local_maps_t *w = &maps[j]; + if (w->map_fd == ND_EBPF_MAP_FD_NOT_INITIALIZED && !strcmp(map_name, w->name)) + w->map_fd = fd; + + j++; + } + } + } +} + +static void ebpf_update_controller(ebpf_module_t *em, struct bpf_object *obj) +{ + ebpf_local_maps_t *maps = em->maps; + if (!maps) + return; + + struct bpf_map *map; + bpf_map__for_each(map, obj) + { + size_t i = 0; + while (maps[i].name) { + ebpf_local_maps_t *w = &maps[i]; + if (w->map_fd != ND_EBPF_MAP_FD_NOT_INITIALIZED && (w->type & NETDATA_EBPF_MAP_CONTROLLER)) { + w->type &= ~NETDATA_EBPF_MAP_CONTROLLER; + w->type |= NETDATA_EBPF_MAP_CONTROLLER_UPDATED; + + uint32_t key = NETDATA_CONTROLLER_APPS_ENABLED; + int value = em->apps_charts | em->cgroup_charts; + int ret = bpf_map_update_elem(w->map_fd, &key, &value, 0); + if (ret) + error("Add key(%u) for controller table failed.", key); + } + i++; + } + } +} + struct bpf_link **ebpf_load_program(char *plugins_dir, ebpf_module_t *em, char *kernel_string, - struct bpf_object **obj, int *map_fd) + struct bpf_object **obj) { char lpath[4096]; char lname[128]; @@ -403,13 +475,8 @@ struct bpf_link **ebpf_load_program(char *plugins_dir, ebpf_module_t *em, char * return NULL; } - struct bpf_map *map; - size_t i = 0; - bpf_map__for_each(map, *obj) - { - map_fd[i] = bpf_map__fd(map); - i++; - } + ebpf_update_maps(em, *obj); + ebpf_update_controller(em, *obj); size_t count_programs = ebpf_count_programs(*obj); @@ -462,7 +529,7 @@ void ebpf_update_names(ebpf_specify_name_t *opt, ebpf_module_t *em) //---------------------------------------------------------------------------------------------------------------------- -void ebpf_mount_config_name(char *filename, size_t length, char *path, char *config) +void ebpf_mount_config_name(char *filename, size_t length, char *path, const char *config) { snprintf(filename, length, "%s/ebpf.d/%s", path, config); } @@ -475,7 +542,7 @@ int ebpf_load_config(struct config *config, char *filename) static netdata_run_mode_t ebpf_select_mode(char *mode) { - if (!strcasecmp(mode, "return")) + if (!strcasecmp(mode,EBPF_CFG_LOAD_MODE_RETURN )) return MODE_RETURN; else if (!strcasecmp(mode, "dev")) return MODE_DEVMODE; @@ -483,17 +550,31 @@ static netdata_run_mode_t ebpf_select_mode(char *mode) return MODE_ENTRY; } -void ebpf_update_module_using_config(ebpf_module_t *modules, struct config *cfg) +static void ebpf_select_mode_string(char *output, size_t len, netdata_run_mode_t sel) { - char *mode = appconfig_get(cfg, EBPF_GLOBAL_SECTION, EBPF_CFG_LOAD_MODE, EBPF_CFG_LOAD_MODE_DEFAULT); + if (sel == MODE_RETURN) + strncpyz(output, EBPF_CFG_LOAD_MODE_RETURN, len); + else + strncpyz(output, EBPF_CFG_LOAD_MODE_DEFAULT, len); +} + +/** + * @param modules structure that will be updated + */ +void ebpf_update_module_using_config(ebpf_module_t *modules) +{ + char default_value[EBPF_MAX_MODE_LENGTH + 1]; + ebpf_select_mode_string(default_value, EBPF_MAX_MODE_LENGTH, modules->mode); + char *mode = appconfig_get(modules->cfg, EBPF_GLOBAL_SECTION, EBPF_CFG_LOAD_MODE, default_value); modules->mode = ebpf_select_mode(mode); - modules->update_time = (int)appconfig_get_number(cfg, EBPF_GLOBAL_SECTION, EBPF_CFG_UPDATE_EVERY, 1); + modules->update_every = (int)appconfig_get_number(modules->cfg, EBPF_GLOBAL_SECTION, + EBPF_CFG_UPDATE_EVERY, modules->update_every); - modules->apps_charts = appconfig_get_boolean(cfg, EBPF_GLOBAL_SECTION, EBPF_CFG_APPLICATION, - CONFIG_BOOLEAN_YES); + modules->apps_charts = appconfig_get_boolean(modules->cfg, EBPF_GLOBAL_SECTION, EBPF_CFG_APPLICATION, + modules->apps_charts); - modules->pid_map_size = (uint32_t)appconfig_get_number(cfg, EBPF_GLOBAL_SECTION, EBPF_CFG_PID_SIZE, + modules->pid_map_size = (uint32_t)appconfig_get_number(modules->cfg, EBPF_GLOBAL_SECTION, EBPF_CFG_PID_SIZE, modules->pid_map_size); } @@ -507,20 +588,249 @@ void ebpf_update_module_using_config(ebpf_module_t *modules, struct config *cfg) * update the variables. * * @param em the module structure - * @param cfg the configuration structure - * @param cfg_file the filename to load */ -void ebpf_update_module(ebpf_module_t *em, struct config *cfg, char *cfg_file) +void ebpf_update_module(ebpf_module_t *em) { char filename[FILENAME_MAX+1]; - ebpf_mount_config_name(filename, FILENAME_MAX, ebpf_user_config_dir, cfg_file); - if (!ebpf_load_config(cfg, filename)) { - ebpf_mount_config_name(filename, FILENAME_MAX, ebpf_stock_config_dir, cfg_file); - if (!ebpf_load_config(cfg, filename)) { - error("Cannot load the ebpf configuration file %s", cfg_file); + ebpf_mount_config_name(filename, FILENAME_MAX, ebpf_user_config_dir, em->config_file); + if (!ebpf_load_config(em->cfg, filename)) { + ebpf_mount_config_name(filename, FILENAME_MAX, ebpf_stock_config_dir, em->config_file); + if (!ebpf_load_config(em->cfg, filename)) { + error("Cannot load the ebpf configuration file %s", em->config_file); return; } } - ebpf_update_module_using_config(em, cfg); + ebpf_update_module_using_config(em); +} + +//---------------------------------------------------------------------------------------------------------------------- + +/** + * Load Address + * + * Helper used to get address from /proc/kallsym + * + * @param fa address structure + * @param fd file descriptor loaded inside kernel. + */ +void ebpf_load_addresses(ebpf_addresses_t *fa, int fd) +{ + if (fa->addr) + return ; + + procfile *ff = procfile_open("/proc/kallsyms", " \t:", PROCFILE_FLAG_DEFAULT); + if (!ff) + return; + + ff = procfile_readall(ff); + if (!ff) + return; + + fa->hash = simple_hash(fa->function); + + size_t lines = procfile_lines(ff), l; + for(l = 0; l < lines ;l++) { + char *fcnt = procfile_lineword(ff, l, 2); + uint32_t hash = simple_hash(fcnt); + if (fa->hash == hash && !strcmp(fcnt, fa->function)) { + char addr[128]; + snprintf(addr, 127, "0x%s", procfile_lineword(ff, l, 0)); + fa->addr = (unsigned long) strtoul(addr, NULL, 16); + uint32_t key = 0; + bpf_map_update_elem(fd, &key, &fa->addr, BPF_ANY); + } + } + + procfile_close(ff); +} + +//---------------------------------------------------------------------------------------------------------------------- + +/** + * Fill Algorithms + * + * Set one unique dimension for all vector position. + * + * @param algorithms the output vector + * @param length number of elements of algorithms vector + * @param algorithm algorithm used on charts. +*/ +void ebpf_fill_algorithms(int *algorithms, size_t length, int algorithm) +{ + size_t i; + for (i = 0; i < length; i++) { + algorithms[i] = algorithm; + } +} + +/** + * Fill Histogram dimension + * + * Fill the histogram dimension with the specified ranges + */ +char **ebpf_fill_histogram_dimension(size_t maximum) +{ + char *dimensions[] = { "us", "ms", "s"}; + int previous_dim = 0, current_dim = 0; + uint32_t previous_level = 1000, current_level = 1000; + uint32_t previous_divisor = 1, current_divisor = 1; + uint32_t current = 1, previous = 0; + uint32_t selector; + char **out = callocz(maximum, sizeof(char *)); + char range[128]; + size_t end = maximum - 1; + for (selector = 0; selector < end; selector++) { + snprintf(range, 127, "%u%s->%u%s", previous/previous_divisor, dimensions[previous_dim], + current/current_divisor, dimensions[current_dim]); + out[selector] = strdupz(range); + previous = current; + current <<= 1; + + if (previous_dim != 2 && previous > previous_level) { + previous_dim++; + + previous_divisor *= 1000; + previous_level *= 1000; + } + + if (current_dim != 2 && current > current_level) { + current_dim++; + + current_divisor *= 1000; + current_level *= 1000; + } + } + snprintf(range, 127, "%u%s->+Inf", previous/previous_divisor, dimensions[previous_dim]); + out[selector] = strdupz(range); + + return out; +} + +/** + * Histogram dimension cleanup + * + * Cleanup dimensions allocated with function ebpf_fill_histogram_dimension + * + * @param ptr + * @param length + */ +void ebpf_histogram_dimension_cleanup(char **ptr, size_t length) +{ + size_t i; + for (i = 0; i < length; i++) { + freez(ptr[i]); + } + freez(ptr); +} + +//---------------------------------------------------------------------------------------------------------------------- + +/** + * Open tracepoint path + * + * @param filename pointer to store the path + * @param length file length + * @param subsys is the name of your subsystem. + * @param eventname is the name of the event to trace. + * @param flags flags used with syscall open + * + * @return it returns a positive value on success and a negative otherwise. + */ +static inline int ebpf_open_tracepoint_path(char *filename, size_t length, char *subsys, char *eventname, int flags) +{ + snprintfz(filename, length, "%s/events/%s/%s/enable", NETDATA_DEBUGFS, subsys, eventname); + return open(filename, flags, 0); +} + +/** + * Is tracepoint enabled + * + * Check whether the tracepoint is enabled. + * + * @param subsys is the name of your subsystem. + * @param eventname is the name of the event to trace. + * + * @return it returns 1 when it is enabled, 0 when it is disabled and -1 on error. + */ +int ebpf_is_tracepoint_enabled(char *subsys, char *eventname) +{ + char text[FILENAME_MAX + 1]; + int fd = ebpf_open_tracepoint_path(text, FILENAME_MAX, subsys, eventname, O_RDONLY); + if (fd < 0) { + return -1; + } + + ssize_t length = read(fd, text, 1); + if (length != 1) { + close(fd); + return -1; + } + close(fd); + + return (text[0] == '1') ? CONFIG_BOOLEAN_YES : CONFIG_BOOLEAN_NO; +} + +/** + * Change Tracing values + * + * Change value for specific tracepoint enabling or disabling it according value given. + * + * @param subsys is the name of your subsystem. + * @param eventname is the name of the event to trace. + * @param value a value to enable (1) or disable (0) a tracepoint. + * + * @return It returns 0 on success and -1 otherwise + */ +static int ebpf_change_tracing_values(char *subsys, char *eventname, char *value) +{ + if (strcmp("0", value) && strcmp("1", value)) { + error("Invalid value given to either enable or disable a tracepoint."); + return -1; + } + + char filename[1024]; + int fd = ebpf_open_tracepoint_path(filename, 1023, subsys, eventname, O_WRONLY); + if (fd < 0) { + return -1; + } + + ssize_t written = write(fd, value, strlen(value)); + if (written < 0) { + close(fd); + return -1; + } + + close(fd); + return 0; +} + +/** + * Enable tracing values + * + * Enable a tracepoint on a system + * + * @param subsys is the name of your subsystem. + * @param eventname is the name of the event to trace. + * + * @return It returns 0 on success and -1 otherwise + */ +int ebpf_enable_tracing_values(char *subsys, char *eventname) +{ + return ebpf_change_tracing_values(subsys, eventname, "1"); +} + +/** + * Disable tracing values + * + * Disable tracing points enabled by collector + * + * @param subsys is the name of your subsystem. + * @param eventname is the name of the event to trace. + * + * @return It returns 0 on success and -1 otherwise + */ +int ebpf_disable_tracing_values(char *subsys, char *eventname) +{ + return ebpf_change_tracing_values(subsys, eventname, "0"); } diff --git a/libnetdata/ebpf/ebpf.h b/libnetdata/ebpf/ebpf.h index bc55d9595..73128f529 100644 --- a/libnetdata/ebpf/ebpf.h +++ b/libnetdata/ebpf/ebpf.h @@ -5,6 +5,7 @@ #include #include +#include // Necessary for stdtoul #define NETDATA_DEBUGFS "/sys/kernel/debug/tracing/" #define NETDATA_KALLSYMS "/proc/kallsyms" @@ -14,10 +15,12 @@ #define EBPF_CFG_LOAD_MODE "ebpf load mode" #define EBPF_CFG_LOAD_MODE_DEFAULT "entry" #define EBPF_CFG_LOAD_MODE_RETURN "return" +#define EBPF_MAX_MODE_LENGTH 6 #define EBPF_CFG_UPDATE_EVERY "update every" #define EBPF_CFG_PID_SIZE "pid table size" #define EBPF_CFG_APPLICATION "apps" +#define EBPF_CFG_CGROUP "cgroups" /** * The next magic number is got doing the following math: @@ -56,6 +59,13 @@ */ #define NETDATA_EBPF_KERNEL_5_10 330240 +/** + * Kernel 5.0 + * + * 327680 = 5*65536 +256*0 + */ +#define NETDATA_EBPF_KERNEL_5_0 327680 + /** * Kernel 4.17 * @@ -80,6 +90,16 @@ #define VERSION_STRING_LEN 256 #define EBPF_KERNEL_REJECT_LIST_FILE "ebpf_kernel_reject_list.txt" +#define ND_EBPF_DEFAULT_MIN_PID 1U +#define ND_EBPF_MAP_FD_NOT_INITIALIZED (int)-1 + +typedef struct ebpf_addresses { + char *function; + uint32_t hash; + // We use long as address, because it matches system length + unsigned long addr; +} ebpf_addresses_t; + extern char *ebpf_user_config_dir; extern char *ebpf_stock_config_dir; @@ -99,10 +119,26 @@ typedef enum { #define ND_EBPF_DEFAULT_PID_SIZE 32768U +enum netdata_ebpf_map_type { + NETDATA_EBPF_MAP_STATIC = 0, + NETDATA_EBPF_MAP_RESIZABLE = 1, + NETDATA_EBPF_MAP_CONTROLLER = 2, + NETDATA_EBPF_MAP_CONTROLLER_UPDATED = 4, + NETDATA_EBPF_MAP_PID = 8 +}; + +enum netdata_controller { + NETDATA_CONTROLLER_APPS_ENABLED, + + NETDATA_CONTROLLER_END +}; + typedef struct ebpf_local_maps { char *name; uint32_t internal_input; uint32_t user_input; + uint32_t type; + int map_fd; } ebpf_local_maps_t; typedef struct ebpf_specify_name { @@ -117,9 +153,10 @@ typedef struct ebpf_module { const char *config_name; int enabled; void *(*start_routine)(void *); - int update_time; + int update_every; int global_charts; int apps_charts; + int cgroup_charts; netdata_run_mode_t mode; uint32_t thread_id; int optional; @@ -127,23 +164,46 @@ typedef struct ebpf_module { ebpf_local_maps_t *maps; ebpf_specify_name_t *names; uint32_t pid_map_size; + struct config *cfg; + const char *config_file; } ebpf_module_t; -extern int get_kernel_version(char *out, int size); +extern int ebpf_get_kernel_version(); extern int get_redhat_release(); extern int has_condition_to_run(int version); extern char *ebpf_kernel_suffix(int version, int isrh); -extern int ebpf_update_kernel(ebpf_data_t *ef); +extern void ebpf_update_kernel(char *ks, size_t length, int isrh, int version); extern struct bpf_link **ebpf_load_program(char *plugins_dir, ebpf_module_t *em, char *kernel_string, - struct bpf_object **obj, - int *map_fd); + struct bpf_object **obj); -extern void ebpf_mount_config_name(char *filename, size_t length, char *path, char *config); +extern void ebpf_mount_config_name(char *filename, size_t length, char *path, const char *config); extern int ebpf_load_config(struct config *config, char *filename); -extern void ebpf_update_module_using_config(ebpf_module_t *modules, struct config *cfg); -extern void ebpf_update_module(ebpf_module_t *em, struct config *cfg, char *cfg_file); +extern void ebpf_update_module(ebpf_module_t *em); extern void ebpf_update_names(ebpf_specify_name_t *opt, ebpf_module_t *em); +extern void ebpf_load_addresses(ebpf_addresses_t *fa, int fd); +extern void ebpf_fill_algorithms(int *algorithms, size_t length, int algorithm); +extern char **ebpf_fill_histogram_dimension(size_t maximum); + +// Histogram +#define NETDATA_EBPF_HIST_MAX_BINS 24UL +#define NETDATA_DISK_MAX 256U +#define NETDATA_DISK_HISTOGRAM_LENGTH (NETDATA_DISK_MAX * NETDATA_EBPF_HIST_MAX_BINS) + +typedef struct netdata_ebpf_histogram { + char *name; + char *title; + int order; + uint64_t histogram[NETDATA_EBPF_HIST_MAX_BINS]; +} netdata_ebpf_histogram_t; + +extern void ebpf_histogram_dimension_cleanup(char **ptr, size_t length); + +// Tracepoint helpers +// For more information related to tracepoints read https://www.kernel.org/doc/html/latest/trace/tracepoints.html +extern int ebpf_is_tracepoint_enabled(char *subsys, char *eventname); +extern int ebpf_enable_tracing_values(char *subsys, char *eventname); +extern int ebpf_disable_tracing_values(char *subsys, char *eventname); #endif /* NETDATA_EBPF_H */ diff --git a/libnetdata/json/jsmn.c b/libnetdata/json/jsmn.c index 952535897..2f48bd65a 100644 --- a/libnetdata/json/jsmn.c +++ b/libnetdata/json/jsmn.c @@ -183,7 +183,7 @@ static jsmnerr_t jsmn_parse_string(jsmn_parser *parser, const char *js, * * Parse JSON string and fill tokens. * - * @param parser the auxiliar vector used to parser + * @param parser the auxiliary vector used to parser * @param js the string to parse * @param len the string length * @param tokens the place to map the tokens diff --git a/libnetdata/libnetdata.h b/libnetdata/libnetdata.h index 77a1bbe7f..b49ab21a0 100644 --- a/libnetdata/libnetdata.h +++ b/libnetdata/libnetdata.h @@ -53,6 +53,7 @@ extern "C" { #include #include +#include #include #include #include @@ -90,6 +91,12 @@ extern "C" { #include #include +// CentOS 7 has older version that doesn't define this +// same goes for MacOS +#ifndef UUID_STR_LEN +#define UUID_STR_LEN (37) +#endif + #ifdef HAVE_NETINET_IN_H #include #endif diff --git a/libnetdata/log/log.h b/libnetdata/log/log.h index 58cc0d26c..c8380d0c1 100644 --- a/libnetdata/log/log.h +++ b/libnetdata/log/log.h @@ -44,7 +44,7 @@ extern "C" { #define D_RRDENGINE 0x0000000100000000 #define D_ACLK 0x0000000200000000 #define D_METADATALOG 0x0000000400000000 -#define D_GUIDLOG 0x0000000800000000 +#define D_ACLK_SYNC 0x0000000800000000 #define D_SYSTEM 0x8000000000000000 //#define DEBUG (D_WEB_CLIENT_ACCESS|D_LISTENER|D_RRD_STATS) diff --git a/libnetdata/procfile/procfile.c b/libnetdata/procfile/procfile.c index 9867c19f6..ce412f4b0 100644 --- a/libnetdata/procfile/procfile.c +++ b/libnetdata/procfile/procfile.c @@ -69,7 +69,7 @@ static inline pfwords *pfwords_new(void) { } static inline void pfwords_reset(pfwords *fw) { - // debug(D_PROCFILE, PF_PREFIX ": reseting words"); + // debug(D_PROCFILE, PF_PREFIX ": resetting words"); fw->len = 0; } @@ -115,7 +115,7 @@ static inline pflines *pflines_new(void) { } static inline void pflines_reset(pflines *fl) { - // debug(D_PROCFILE, PF_PREFIX ": reseting lines"); + // debug(D_PROCFILE, PF_PREFIX ": resetting lines"); fl->len = 0; } diff --git a/libnetdata/socket/security.c b/libnetdata/socket/security.c index 63a71bcbd..6ac512de5 100644 --- a/libnetdata/socket/security.c +++ b/libnetdata/socket/security.c @@ -213,6 +213,7 @@ void security_start_ssl(int selector) { } netdata_srv_ctx = security_initialize_openssl_server(); + SSL_CTX_set_mode(netdata_srv_ctx, SSL_MODE_ENABLE_PARTIAL_WRITE); break; } case NETDATA_SSL_CONTEXT_STREAMING: { diff --git a/libnetdata/storage_number/storage_number.c b/libnetdata/storage_number/storage_number.c index 8ef1353b0..3e6a9f45c 100644 --- a/libnetdata/storage_number/storage_number.c +++ b/libnetdata/storage_number/storage_number.c @@ -2,17 +2,23 @@ #include "../libnetdata.h" +#define get_storage_number_flags(value) \ + ((((storage_number)(value)) & (1 << 24)) | \ + (((storage_number)(value)) & (1 << 25)) | \ + (((storage_number)(value)) & (1 << 26))) + storage_number pack_storage_number(calculated_number value, uint32_t flags) { // bit 32 = sign 0:positive, 1:negative // bit 31 = 0:divide, 1:multiply // bit 30, 29, 28 = (multiplier or divider) 0-7 (8 total) // bit 27 SN_EXISTS_100 // bit 26 SN_EXISTS_RESET - // bit 25 SN_EXISTS + // bit 25 SN_ANOMALY_BIT = 0: anomalous, 1: not anomalous // bit 24 to bit 1 = the value storage_number r = get_storage_number_flags(flags); - if(!value) return r; + if(!value) + goto RET_SN; int m = 0; calculated_number n = value, factor = 10; @@ -47,7 +53,7 @@ storage_number pack_storage_number(calculated_number value, uint32_t flags) { error("Number " CALCULATED_NUMBER_FORMAT " is too big.", value); #endif r += 0x00ffffff; - return r; + goto RET_SN; } } else { @@ -78,6 +84,10 @@ storage_number pack_storage_number(calculated_number value, uint32_t flags) { r += (storage_number)n; #endif +RET_SN: + if (r == SN_EMPTY_SLOT) + r = SN_ANOMALOUS_ZERO; + return r; } @@ -100,7 +110,7 @@ calculated_number unpack_storage_number(storage_number value) { factor = 100; // bit 26 SN_EXISTS_RESET - // bit 25 SN_EXISTS + // bit 25 SN_ANOMALY_BIT // bit 30, 29, 28 = (multiplier or divider) 0-7 (8 total) int mul = (value & ((1<<29)|(1<<28)|(1<<27))) >> 27; diff --git a/libnetdata/storage_number/storage_number.h b/libnetdata/storage_number/storage_number.h index 4ad7ff624..4101f69e0 100644 --- a/libnetdata/storage_number/storage_number.h +++ b/libnetdata/storage_number/storage_number.h @@ -60,17 +60,24 @@ typedef long double collected_number; typedef uint32_t storage_number; #define STORAGE_NUMBER_FORMAT "%u" -#define SN_EXISTS (1 << 24) // the value exists +#define SN_ANOMALY_BIT (1 << 24) // the anomaly bit of the value #define SN_EXISTS_RESET (1 << 25) // the value has been overflown #define SN_EXISTS_100 (1 << 26) // very large value (multiplier is 100 instead of 10) -// extract the flags -#define get_storage_number_flags(value) ((((storage_number)(value)) & (1 << 24)) | (((storage_number)(value)) & (1 << 25)) | (((storage_number)(value)) & (1 << 26))) +#define SN_DEFAULT_FLAGS SN_ANOMALY_BIT + #define SN_EMPTY_SLOT 0x00000000 +// When the calculated number is zero and the value is anomalous (ie. it's bit +// is zero) we want to return a storage_number representation that is +// different from the empty slot. We achieve this by mapping zero to +// SN_EXISTS_100. Unpacking the SN_EXISTS_100 value will return zero because +// its fraction field (as well as its exponent factor field) will be zero. +#define SN_ANOMALOUS_ZERO SN_EXISTS_100 + // checks -#define does_storage_number_exist(value) ((get_storage_number_flags(value) != 0)?1:0) -#define did_storage_number_reset(value) ((get_storage_number_flags(value) == SN_EXISTS_RESET)?1:0) +#define does_storage_number_exist(value) (((storage_number) (value)) != SN_EMPTY_SLOT) +#define did_storage_number_reset(value) ((((storage_number) (value)) & SN_EXISTS_RESET) != 0) storage_number pack_storage_number(calculated_number value, uint32_t flags); calculated_number unpack_storage_number(storage_number value); diff --git a/libnetdata/storage_number/tests/test_storage_number.c b/libnetdata/storage_number/tests/test_storage_number.c index 7ef18b1de..f90521cab 100644 --- a/libnetdata/storage_number/tests/test_storage_number.c +++ b/libnetdata/storage_number/tests/test_storage_number.c @@ -38,7 +38,7 @@ static void test_number_printing(void **state) print_calculated_number(value, -9999.9999999); assert_string_equal(value, "-9999.9999999"); - print_calculated_number(value, unpack_storage_number(pack_storage_number(16.777218L, SN_EXISTS))); + print_calculated_number(value, unpack_storage_number(pack_storage_number(16.777218L, SN_DEFAULT_FLAGS))); assert_string_equal(value, "16.77722"); } diff --git a/ml/BitBufferCounter.cc b/ml/BitBufferCounter.cc new file mode 100644 index 000000000..5e1ab5aca --- /dev/null +++ b/ml/BitBufferCounter.cc @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "BitBufferCounter.h" + +using namespace ml; + +std::vector BitBufferCounter::getBuffer() const { + std::vector Buffer; + + for (size_t Idx = start(); Idx != (start() + size()); Idx++) + Buffer.push_back(V[Idx % V.size()]); + + return Buffer; +} + +void BitBufferCounter::insert(bool Bit) { + if (N >= V.size()) + NumSetBits -= (V[start()] == true); + + NumSetBits += (Bit == true); + V[N++ % V.size()] = Bit; +} + +void BitBufferCounter::print(std::ostream &OS) const { + std::vector Buffer = getBuffer(); + + for (bool B : Buffer) + OS << B; +} diff --git a/ml/BitBufferCounter.h b/ml/BitBufferCounter.h new file mode 100644 index 000000000..db924d776 --- /dev/null +++ b/ml/BitBufferCounter.h @@ -0,0 +1,54 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef BIT_BUFFER_COUNTER_H +#define BIT_BUFFER_COUNTER_H + +#include "ml-private.h" + +namespace ml { + +class BitBufferCounter { +public: + BitBufferCounter(size_t Capacity) : V(Capacity, 0), NumSetBits(0), N(0) {} + + std::vector getBuffer() const; + + void insert(bool Bit); + + void print(std::ostream &OS) const; + + bool isFilled() const { + return N >= V.size(); + } + + size_t numSetBits() const { + return NumSetBits; + } + +private: + inline size_t size() const { + return N < V.size() ? N : V.size(); + } + + inline size_t start() const { + if (N <= V.size()) + return 0; + + return N % V.size(); + } + +private: + std::vector V; + size_t NumSetBits; + + size_t N; +}; + +} // namespace ml + +inline std::ostream& operator<<(std::ostream &OS, const ml::BitBufferCounter &BBC) { + BBC.print(OS); + return OS; +} + +#endif /* BIT_BUFFER_COUNTER_H */ diff --git a/ml/BitRateWindow.cc b/ml/BitRateWindow.cc new file mode 100644 index 000000000..c4c994c42 --- /dev/null +++ b/ml/BitRateWindow.cc @@ -0,0 +1,75 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "BitRateWindow.h" + +using namespace ml; + +std::pair BitRateWindow::insert(bool Bit) { + Edge E; + + BBC.insert(Bit); + switch (CurrState) { + case State::NotFilled: { + if (BBC.isFilled()) { + if (BBC.numSetBits() < SetBitsThreshold) { + CurrState = State::BelowThreshold; + } else { + CurrState = State::AboveThreshold; + } + } else { + CurrState = State::NotFilled; + } + + E = {State::NotFilled, CurrState}; + break; + } case State::BelowThreshold: { + if (BBC.numSetBits() >= SetBitsThreshold) { + CurrState = State::AboveThreshold; + } + + E = {State::BelowThreshold, CurrState}; + break; + } case State::AboveThreshold: { + if ((BBC.numSetBits() < SetBitsThreshold) || + (CurrLength == MaxLength)) { + CurrState = State::Idle; + } + + E = {State::AboveThreshold, CurrState}; + break; + } case State::Idle: { + if (CurrLength == IdleLength) { + CurrState = State::NotFilled; + } + + E = {State::Idle, CurrState}; + break; + } + } + + Action A = EdgeActions[E]; + size_t L = (this->*A)(E.first, Bit); + return {E, L}; +} + +void BitRateWindow::print(std::ostream &OS) const { + switch (CurrState) { + case State::NotFilled: + OS << "NotFilled"; + break; + case State::BelowThreshold: + OS << "BelowThreshold"; + break; + case State::AboveThreshold: + OS << "AboveThreshold"; + break; + case State::Idle: + OS << "Idle"; + break; + default: + OS << "UnknownState"; + break; + } + + OS << ": " << BBC << " (Current Length: " << CurrLength << ")"; +} diff --git a/ml/BitRateWindow.h b/ml/BitRateWindow.h new file mode 100644 index 000000000..0d99008b8 --- /dev/null +++ b/ml/BitRateWindow.h @@ -0,0 +1,170 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef BIT_RATE_WINDOW_H +#define BIT_RATE_WINDOW_H + +#include "BitBufferCounter.h" +#include "ml-private.h" + +namespace ml { + +class BitRateWindow { +public: + enum class State { + NotFilled, + BelowThreshold, + AboveThreshold, + Idle + }; + + using Edge = std::pair; + using Action = size_t (BitRateWindow::*)(State PrevState, bool NewBit); + +private: + std::map EdgeActions = { + // From == To + { + Edge(State::NotFilled, State::NotFilled), + &BitRateWindow::onRoundtripNotFilled, + }, + { + Edge(State::BelowThreshold, State::BelowThreshold), + &BitRateWindow::onRoundtripBelowThreshold, + }, + { + Edge(State::AboveThreshold, State::AboveThreshold), + &BitRateWindow::onRoundtripAboveThreshold, + }, + { + Edge(State::Idle, State::Idle), + &BitRateWindow::onRoundtripIdle, + }, + + + // NotFilled => {BelowThreshold, AboveThreshold} + { + Edge(State::NotFilled, State::BelowThreshold), + &BitRateWindow::onNotFilledToBelowThreshold + }, + { + Edge(State::NotFilled, State::AboveThreshold), + &BitRateWindow::onNotFilledToAboveThreshold + }, + + // BelowThreshold => AboveThreshold + { + Edge(State::BelowThreshold, State::AboveThreshold), + &BitRateWindow::onBelowToAboveThreshold + }, + + // AboveThreshold => Idle + { + Edge(State::AboveThreshold, State::Idle), + &BitRateWindow::onAboveThresholdToIdle + }, + + // Idle => NotFilled + { + Edge(State::Idle, State::NotFilled), + &BitRateWindow::onIdleToNotFilled + }, + }; + +public: + BitRateWindow(size_t MinLength, size_t MaxLength, size_t IdleLength, + size_t SetBitsThreshold) : + MinLength(MinLength), MaxLength(MaxLength), IdleLength(IdleLength), + SetBitsThreshold(SetBitsThreshold), + CurrState(State::NotFilled), CurrLength(0), BBC(MinLength) {} + + std::pair insert(bool Bit); + + void print(std::ostream &OS) const; + +private: + size_t onRoundtripNotFilled(State PrevState, bool NewBit) { + (void) PrevState, (void) NewBit; + + CurrLength += 1; + return CurrLength; + } + + size_t onRoundtripBelowThreshold(State PrevState, bool NewBit) { + (void) PrevState, (void) NewBit; + + CurrLength = MinLength; + return CurrLength; + } + + size_t onRoundtripAboveThreshold(State PrevState, bool NewBit) { + (void) PrevState, (void) NewBit; + + CurrLength += 1; + return CurrLength; + } + + size_t onRoundtripIdle(State PrevState, bool NewBit) { + (void) PrevState, (void) NewBit; + + CurrLength += 1; + return CurrLength; + } + + size_t onNotFilledToBelowThreshold(State PrevState, bool NewBit) { + (void) PrevState, (void) NewBit; + + CurrLength = MinLength; + return CurrLength; + } + + size_t onNotFilledToAboveThreshold(State PrevState, bool NewBit) { + (void) PrevState, (void) NewBit; + + CurrLength += 1; + return CurrLength; + } + + size_t onBelowToAboveThreshold(State PrevState, bool NewBit) { + (void) PrevState, (void) NewBit; + + CurrLength = MinLength; + return CurrLength; + } + + size_t onAboveThresholdToIdle(State PrevState, bool NewBit) { + (void) PrevState, (void) NewBit; + + size_t PrevLength = CurrLength; + CurrLength = 1; + return PrevLength; + } + + size_t onIdleToNotFilled(State PrevState, bool NewBit) { + (void) PrevState, (void) NewBit; + + BBC = BitBufferCounter(MinLength); + BBC.insert(NewBit); + + CurrLength = 1; + return CurrLength; + } + +private: + size_t MinLength; + size_t MaxLength; + size_t IdleLength; + size_t SetBitsThreshold; + + State CurrState; + size_t CurrLength; + BitBufferCounter BBC; +}; + +} // namespace ml + +inline std::ostream& operator<<(std::ostream &OS, const ml::BitRateWindow BRW) { + BRW.print(OS); + return OS; +} + +#endif /* BIT_RATE_WINDOW_H */ diff --git a/ml/Config.cc b/ml/Config.cc new file mode 100644 index 000000000..f48f9b39f --- /dev/null +++ b/ml/Config.cc @@ -0,0 +1,128 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "Config.h" +#include "ml-private.h" + +using namespace ml; + +/* + * Global configuration instance to be shared between training and + * prediction threads. + */ +Config ml::Cfg; + +template +static T clamp(const T& Value, const T& Min, const T& Max) { + return std::max(Min, std::min(Value, Max)); +} + +/* + * Initialize global configuration variable. + */ +void Config::readMLConfig(void) { + const char *ConfigSectionML = CONFIG_SECTION_ML; + + bool EnableAnomalyDetection = config_get_boolean(ConfigSectionML, "enabled", false); + + /* + * Read values + */ + + unsigned MaxTrainSamples = config_get_number(ConfigSectionML, "maximum num samples to train", 4 * 3600); + unsigned MinTrainSamples = config_get_number(ConfigSectionML, "minimum num samples to train", 1 * 3600); + unsigned TrainEvery = config_get_number(ConfigSectionML, "train every", 1 * 3600); + + unsigned DiffN = config_get_number(ConfigSectionML, "num samples to diff", 1); + unsigned SmoothN = config_get_number(ConfigSectionML, "num samples to smooth", 3); + unsigned LagN = config_get_number(ConfigSectionML, "num samples to lag", 5); + + unsigned MaxKMeansIters = config_get_number(ConfigSectionML, "maximum number of k-means iterations", 1000); + + double DimensionAnomalyScoreThreshold = config_get_float(ConfigSectionML, "dimension anomaly score threshold", 0.99); + double HostAnomalyRateThreshold = config_get_float(ConfigSectionML, "host anomaly rate threshold", 0.01); + + double ADMinWindowSize = config_get_float(ConfigSectionML, "minimum window size", 30); + double ADMaxWindowSize = config_get_float(ConfigSectionML, "maximum window size", 600); + double ADIdleWindowSize = config_get_float(ConfigSectionML, "idle window size", 30); + double ADWindowRateThreshold = config_get_float(ConfigSectionML, "window minimum anomaly rate", 0.25); + double ADDimensionRateThreshold = config_get_float(ConfigSectionML, "anomaly event min dimension rate threshold", 0.05); + + std::string HostsToSkip = config_get(ConfigSectionML, "hosts to skip from training", "!*"); + std::string ChartsToSkip = config_get(ConfigSectionML, "charts to skip from training", + "!system.* !cpu.* !mem.* !disk.* !disk_* " + "!ip.* !ipv4.* !ipv6.* !net.* !net_* !netfilter.* " + "!services.* !apps.* !groups.* !user.* !ebpf.* !netdata.* *"); + + std::stringstream SS; + SS << netdata_configured_cache_dir << "/anomaly-detection.db"; + Cfg.AnomalyDBPath = SS.str(); + + /* + * Clamp + */ + + MaxTrainSamples = clamp(MaxTrainSamples, 1 * 3600u, 6 * 3600u); + MinTrainSamples = clamp(MinTrainSamples, 1 * 3600u, 6 * 3600u); + TrainEvery = clamp(TrainEvery, 1 * 3600u, 6 * 3600u); + + DiffN = clamp(DiffN, 0u, 1u); + SmoothN = clamp(SmoothN, 0u, 5u); + LagN = clamp(LagN, 0u, 5u); + + MaxKMeansIters = clamp(MaxKMeansIters, 500u, 1000u); + + DimensionAnomalyScoreThreshold = clamp(DimensionAnomalyScoreThreshold, 0.01, 5.00); + HostAnomalyRateThreshold = clamp(HostAnomalyRateThreshold, 0.01, 1.0); + + ADMinWindowSize = clamp(ADMinWindowSize, 30.0, 300.0); + ADMaxWindowSize = clamp(ADMaxWindowSize, 60.0, 900.0); + ADIdleWindowSize = clamp(ADIdleWindowSize, 30.0, 900.0); + ADWindowRateThreshold = clamp(ADWindowRateThreshold, 0.01, 0.99); + ADDimensionRateThreshold = clamp(ADDimensionRateThreshold, 0.01, 0.99); + + /* + * Validate + */ + + if (MinTrainSamples >= MaxTrainSamples) { + error("invalid min/max train samples found (%u >= %u)", MinTrainSamples, MaxTrainSamples); + + MinTrainSamples = 1 * 3600; + MaxTrainSamples = 4 * 3600; + } + + if (ADMinWindowSize >= ADMaxWindowSize) { + error("invalid min/max anomaly window size found (%lf >= %lf)", ADMinWindowSize, ADMaxWindowSize); + + ADMinWindowSize = 30.0; + ADMaxWindowSize = 600.0; + } + + /* + * Assign to config instance + */ + + Cfg.EnableAnomalyDetection = EnableAnomalyDetection; + + Cfg.MaxTrainSamples = MaxTrainSamples; + Cfg.MinTrainSamples = MinTrainSamples; + Cfg.TrainEvery = TrainEvery; + + Cfg.DiffN = DiffN; + Cfg.SmoothN = SmoothN; + Cfg.LagN = LagN; + + Cfg.MaxKMeansIters = MaxKMeansIters; + + Cfg.DimensionAnomalyScoreThreshold = DimensionAnomalyScoreThreshold; + Cfg.HostAnomalyRateThreshold = HostAnomalyRateThreshold; + + Cfg.ADMinWindowSize = ADMinWindowSize; + Cfg.ADMaxWindowSize = ADMaxWindowSize; + Cfg.ADIdleWindowSize = ADIdleWindowSize; + Cfg.ADWindowRateThreshold = ADWindowRateThreshold; + Cfg.ADDimensionRateThreshold = ADDimensionRateThreshold; + + Cfg.SP_HostsToSkip = simple_pattern_create(HostsToSkip.c_str(), NULL, SIMPLE_PATTERN_EXACT); + Cfg.SP_ChartsToSkip = simple_pattern_create(ChartsToSkip.c_str(), NULL, SIMPLE_PATTERN_EXACT); +} diff --git a/ml/Config.h b/ml/Config.h new file mode 100644 index 000000000..f29bae3a6 --- /dev/null +++ b/ml/Config.h @@ -0,0 +1,45 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef ML_CONFIG_H +#define ML_CONFIG_H + +#include "ml-private.h" + +namespace ml { + +class Config { +public: + bool EnableAnomalyDetection; + + unsigned MaxTrainSamples; + unsigned MinTrainSamples; + unsigned TrainEvery; + + unsigned DiffN; + unsigned SmoothN; + unsigned LagN; + + unsigned MaxKMeansIters; + + double DimensionAnomalyScoreThreshold; + double HostAnomalyRateThreshold; + + double ADMinWindowSize; + double ADMaxWindowSize; + double ADIdleWindowSize; + double ADWindowRateThreshold; + double ADDimensionRateThreshold; + + SIMPLE_PATTERN *SP_HostsToSkip; + SIMPLE_PATTERN *SP_ChartsToSkip; + + std::string AnomalyDBPath; + + void readMLConfig(); +}; + +extern Config Cfg; + +} // namespace ml + +#endif /* ML_CONFIG_H */ diff --git a/ml/Database.cc b/ml/Database.cc new file mode 100644 index 000000000..06d0cdecb --- /dev/null +++ b/ml/Database.cc @@ -0,0 +1,127 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "Database.h" + +const char *ml::Database::SQL_CREATE_ANOMALIES_TABLE = + "CREATE TABLE IF NOT EXISTS anomaly_events( " + " anomaly_detector_name text NOT NULL, " + " anomaly_detector_version int NOT NULL, " + " host_id text NOT NULL, " + " after int NOT NULL, " + " before int NOT NULL, " + " anomaly_event_info text, " + " PRIMARY KEY( " + " anomaly_detector_name, anomaly_detector_version, " + " host_id, after, before " + " ) " + ");"; + +const char *ml::Database::SQL_INSERT_ANOMALY = + "INSERT INTO anomaly_events( " + " anomaly_detector_name, anomaly_detector_version, " + " host_id, after, before, anomaly_event_info) " + "VALUES (?1, ?2, ?3, ?4, ?5, ?6);"; + +const char *ml::Database::SQL_SELECT_ANOMALY = + "SELECT anomaly_event_info FROM anomaly_events WHERE" + " anomaly_detector_name == ?1 AND" + " anomaly_detector_version == ?2 AND" + " host_id == ?3 AND" + " after == ?4 AND" + " before == ?5;"; + +const char *ml::Database::SQL_SELECT_ANOMALY_EVENTS = + "SELECT after, before FROM anomaly_events WHERE" + " anomaly_detector_name == ?1 AND" + " anomaly_detector_version == ?2 AND" + " host_id == ?3 AND" + " after >= ?4 AND" + " before <= ?5;"; + +using namespace ml; + +bool Statement::prepare(sqlite3 *Conn) { + if (!Conn) + return false; + + if (ParsedStmt) + return true; + + int RC = sqlite3_prepare_v2(Conn, RawStmt, -1, &ParsedStmt, nullptr); + if (RC == SQLITE_OK) + return true; + + std::string Msg = "Statement \"%s\" preparation failed due to \"%s\""; + error(Msg.c_str(), RawStmt, sqlite3_errstr(RC)); + + return false; +} + +bool Statement::bindValue(size_t Pos, const std::string &Value) { + int RC = sqlite3_bind_text(ParsedStmt, Pos, Value.c_str(), -1, SQLITE_TRANSIENT); + if (RC == SQLITE_OK) + return true; + + error("Failed to bind text '%s' (pos = %zu) in statement '%s'.", Value.c_str(), Pos, RawStmt); + return false; +} + +bool Statement::bindValue(size_t Pos, const int Value) { + int RC = sqlite3_bind_int(ParsedStmt, Pos, Value); + if (RC == SQLITE_OK) + return true; + + error("Failed to bind integer %d (pos = %zu) in statement '%s'.", Value, Pos, RawStmt); + return false; +} + +bool Statement::resetAndClear(bool Ret) { + int RC = sqlite3_reset(ParsedStmt); + if (RC != SQLITE_OK) { + error("Could not reset statement: '%s'", RawStmt); + return false; + } + + RC = sqlite3_clear_bindings(ParsedStmt); + if (RC != SQLITE_OK) { + error("Could not clear bindings in statement: '%s'", RawStmt); + return false; + } + + return Ret; +} + +Database::Database(const std::string &Path) { + // Get sqlite3 connection handle. + int RC = sqlite3_open(Path.c_str(), &Conn); + if (RC != SQLITE_OK) { + std::string Msg = "Failed to initialize ML DB at %s, due to \"%s\""; + error(Msg.c_str(), Path.c_str(), sqlite3_errstr(RC)); + + sqlite3_close(Conn); + Conn = nullptr; + return; + } + + // Create anomaly events table if it does not exist. + char *ErrMsg; + RC = sqlite3_exec(Conn, SQL_CREATE_ANOMALIES_TABLE, nullptr, nullptr, &ErrMsg); + if (RC == SQLITE_OK) + return; + + error("SQLite error during database initialization, rc = %d (%s)", RC, ErrMsg); + error("SQLite failed statement: %s", SQL_CREATE_ANOMALIES_TABLE); + + sqlite3_free(ErrMsg); + sqlite3_close(Conn); + Conn = nullptr; +} + +Database::~Database() { + if (!Conn) + return; + + int RC = sqlite3_close(Conn); + if (RC != SQLITE_OK) + error("Could not close connection properly (rc=%d)", RC); +} diff --git a/ml/Database.h b/ml/Database.h new file mode 100644 index 000000000..cc7b75872 --- /dev/null +++ b/ml/Database.h @@ -0,0 +1,131 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef ML_DATABASE_H +#define ML_DATABASE_H + +#include "Dimension.h" +#include "ml-private.h" + +#include "json/single_include/nlohmann/json.hpp" + +namespace ml { + +class Statement { +public: + using RowCallback = std::function; + +public: + Statement(const char *RawStmt) : RawStmt(RawStmt), ParsedStmt(nullptr) {} + + template + bool exec(sqlite3 *Conn, RowCallback RowCb, ArgTypes ...Args) { + if (!prepare(Conn)) + return false; + + switch (bind(1, Args...)) { + case 0: + return false; + case sizeof...(Args): + break; + default: + return resetAndClear(false); + } + + while (true) { + switch (int RC = sqlite3_step(ParsedStmt)) { + case SQLITE_BUSY: case SQLITE_LOCKED: + usleep(SQLITE_INSERT_DELAY * USEC_PER_MS); + continue; + case SQLITE_ROW: + RowCb(ParsedStmt); + continue; + case SQLITE_DONE: + return resetAndClear(true); + default: + error("Stepping through '%s' returned rc=%d", RawStmt, RC); + return resetAndClear(false); + } + } + } + + ~Statement() { + if (!ParsedStmt) + return; + + int RC = sqlite3_finalize(ParsedStmt); + if (RC != SQLITE_OK) + error("Could not properly finalize statement (rc=%d)", RC); + } + +private: + bool prepare(sqlite3 *Conn); + + bool bindValue(size_t Pos, const int Value); + bool bindValue(size_t Pos, const std::string &Value); + + template + size_t bind(size_t Pos, ArgType T) { + return bindValue(Pos, T); + } + + template + size_t bind(size_t Pos, ArgType T, ArgTypes ...Args) { + return bindValue(Pos, T) + bind(Pos + 1, Args...); + } + + bool resetAndClear(bool Ret); + +private: + const char *RawStmt; + sqlite3_stmt *ParsedStmt; +}; + +class Database { +private: + static const char *SQL_CREATE_ANOMALIES_TABLE; + static const char *SQL_INSERT_ANOMALY; + static const char *SQL_SELECT_ANOMALY; + static const char *SQL_SELECT_ANOMALY_EVENTS; + +public: + Database(const std::string &Path); + + ~Database(); + + template + bool insertAnomaly(ArgTypes... Args) { + Statement::RowCallback RowCb = [](sqlite3_stmt *Stmt) { (void) Stmt; }; + return InsertAnomalyStmt.exec(Conn, RowCb, Args...); + } + + template + bool getAnomalyInfo(nlohmann::json &Json, ArgTypes&&... Args) { + Statement::RowCallback RowCb = [&](sqlite3_stmt *Stmt) { + const char *Text = static_cast(sqlite3_column_blob(Stmt, 0)); + Json = nlohmann::json::parse(Text); + }; + return GetAnomalyInfoStmt.exec(Conn, RowCb, Args...); + } + + template + bool getAnomaliesInRange(std::vector> &V, ArgTypes&&... Args) { + Statement::RowCallback RowCb = [&](sqlite3_stmt *Stmt) { + V.push_back({ + sqlite3_column_int64(Stmt, 0), + sqlite3_column_int64(Stmt, 1) + }); + }; + return GetAnomaliesInRangeStmt.exec(Conn, RowCb, Args...); + } + +private: + sqlite3 *Conn; + + Statement InsertAnomalyStmt{SQL_INSERT_ANOMALY}; + Statement GetAnomalyInfoStmt{SQL_SELECT_ANOMALY}; + Statement GetAnomaliesInRangeStmt{SQL_SELECT_ANOMALY_EVENTS}; +}; + +} + +#endif /* ML_DATABASE_H */ diff --git a/ml/Dimension.cc b/ml/Dimension.cc new file mode 100644 index 000000000..c27f30bb4 --- /dev/null +++ b/ml/Dimension.cc @@ -0,0 +1,169 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "Config.h" +#include "Dimension.h" +#include "Query.h" + +using namespace ml; + +/* + * Copy of the unpack_storage_number which allows us to convert + * a storage_number to double. + */ +static CalculatedNumber unpack_storage_number_dbl(storage_number value) { + if(!value) + return 0; + + int sign = 0, exp = 0; + int factor = 10; + + // bit 32 = 0:positive, 1:negative + if(unlikely(value & (1 << 31))) + sign = 1; + + // bit 31 = 0:divide, 1:multiply + if(unlikely(value & (1 << 30))) + exp = 1; + + // bit 27 SN_EXISTS_100 + if(unlikely(value & (1 << 26))) + factor = 100; + + // bit 26 SN_EXISTS_RESET + // bit 25 SN_ANOMALY_BIT + + // bit 30, 29, 28 = (multiplier or divider) 0-7 (8 total) + int mul = (value & ((1<<29)|(1<<28)|(1<<27))) >> 27; + + // bit 24 to bit 1 = the value, so remove all other bits + value ^= value & ((1<<31)|(1<<30)|(1<<29)|(1<<28)|(1<<27)|(1<<26)|(1<<25)|(1<<24)); + + CalculatedNumber CN = value; + + if(exp) { + for(; mul; mul--) + CN *= factor; + } + else { + for( ; mul ; mul--) + CN /= 10; + } + + if(sign) + CN = -CN; + + return CN; +} + +std::pair +TrainableDimension::getCalculatedNumbers() { + size_t MinN = Cfg.MinTrainSamples; + size_t MaxN = Cfg.MaxTrainSamples; + + // Figure out what our time window should be. + time_t BeforeT = now_realtime_sec() - 1; + time_t AfterT = BeforeT - (MaxN * updateEvery()); + + BeforeT -= (BeforeT % updateEvery()); + AfterT -= (AfterT % updateEvery()); + + BeforeT = std::min(BeforeT, latestTime()); + AfterT = std::max(AfterT, oldestTime()); + + if (AfterT >= BeforeT) + return { nullptr, 0 }; + + CalculatedNumber *CNs = new CalculatedNumber[MaxN * (Cfg.LagN + 1)](); + + // Start the query. + unsigned Idx = 0; + unsigned CollectedValues = 0; + unsigned TotalValues = 0; + + CalculatedNumber LastValue = std::numeric_limits::quiet_NaN(); + Query Q = Query(getRD()); + + Q.init(AfterT, BeforeT); + while (!Q.isFinished()) { + if (Idx == MaxN) + break; + + auto P = Q.nextMetric(); + storage_number SN = P.second; + + if (does_storage_number_exist(SN)) { + CNs[Idx] = unpack_storage_number_dbl(SN); + LastValue = CNs[Idx]; + CollectedValues++; + } else + CNs[Idx] = LastValue; + + Idx++; + } + TotalValues = Idx; + + if (CollectedValues < MinN) { + delete[] CNs; + return { nullptr, 0 }; + } + + // Find first non-NaN value. + for (Idx = 0; std::isnan(CNs[Idx]); Idx++, TotalValues--) { } + + // Overwrite NaN values. + if (Idx != 0) + memmove(CNs, &CNs[Idx], sizeof(CalculatedNumber) * TotalValues); + + return { CNs, TotalValues }; +} + +MLResult TrainableDimension::trainModel() { + auto P = getCalculatedNumbers(); + CalculatedNumber *CNs = P.first; + unsigned N = P.second; + + if (!CNs) + return MLResult::MissingData; + + SamplesBuffer SB = SamplesBuffer(CNs, N, 1, Cfg.DiffN, Cfg.SmoothN, Cfg.LagN); + KM.train(SB, Cfg.MaxKMeansIters); + Trained = true; + + delete[] CNs; + return MLResult::Success; +} + +void PredictableDimension::addValue(CalculatedNumber Value, bool Exists) { + if (!Exists) { + CNs.clear(); + return; + } + + unsigned N = Cfg.DiffN + Cfg.SmoothN + Cfg.LagN; + if (CNs.size() < N) { + CNs.push_back(Value); + return; + } + + std::rotate(std::begin(CNs), std::begin(CNs) + 1, std::end(CNs)); + CNs[N - 1] = Value; +} + +std::pair PredictableDimension::predict() { + unsigned N = Cfg.DiffN + Cfg.SmoothN + Cfg.LagN; + if (CNs.size() != N) + return { MLResult::MissingData, AnomalyBit }; + + CalculatedNumber *TmpCNs = new CalculatedNumber[N * (Cfg.LagN + 1)](); + std::memcpy(TmpCNs, CNs.data(), N * sizeof(CalculatedNumber)); + + SamplesBuffer SB = SamplesBuffer(TmpCNs, N, 1, Cfg.DiffN, Cfg.SmoothN, Cfg.LagN); + AnomalyScore = computeAnomalyScore(SB); + delete[] TmpCNs; + + if (AnomalyScore == std::numeric_limits::quiet_NaN()) + return { MLResult::NaN, AnomalyBit }; + + AnomalyBit = AnomalyScore >= (100 * Cfg.DimensionAnomalyScoreThreshold); + return { MLResult::Success, AnomalyBit }; +} diff --git a/ml/Dimension.h b/ml/Dimension.h new file mode 100644 index 000000000..fdf923ccc --- /dev/null +++ b/ml/Dimension.h @@ -0,0 +1,124 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef ML_DIMENSION_H +#define ML_DIMENSION_H + +#include "BitBufferCounter.h" +#include "Config.h" + +#include "ml-private.h" + +namespace ml { + +class RrdDimension { +public: + RrdDimension(RRDDIM *RD) : RD(RD), Ops(&RD->state->query_ops) { + std::stringstream SS; + SS << RD->rrdset->id << "|" << RD->name; + ID = SS.str(); + } + + RRDDIM *getRD() const { return RD; } + + time_t latestTime() { return Ops->latest_time(RD); } + + time_t oldestTime() { return Ops->oldest_time(RD); } + + unsigned updateEvery() const { return RD->update_every; } + + const std::string getID() const { return ID; } + + virtual ~RrdDimension() {} + +private: + RRDDIM *RD; + struct rrddim_volatile::rrddim_query_ops *Ops; + + std::string ID; +}; + +enum class MLResult { + Success = 0, + MissingData, + NaN, +}; + +class TrainableDimension : public RrdDimension { +public: + TrainableDimension(RRDDIM *RD) : + RrdDimension(RD), TrainEvery(Cfg.TrainEvery * updateEvery()) {} + + MLResult trainModel(); + + CalculatedNumber computeAnomalyScore(SamplesBuffer &SB) { + return Trained ? KM.anomalyScore(SB) : 0.0; + } + + bool shouldTrain(const TimePoint &TP) const { + return (LastTrainedAt + TrainEvery) < TP; + } + + bool isTrained() const { return Trained; } + + double updateTrainingDuration(double Duration) { + return TrainingDuration.exchange(Duration); + } + +private: + std::pair getCalculatedNumbers(); + +public: + TimePoint LastTrainedAt{Seconds{0}}; + +private: + Seconds TrainEvery; + KMeans KM; + + std::atomic Trained{false}; + std::atomic TrainingDuration{0.0}; +}; + +class PredictableDimension : public TrainableDimension { +public: + PredictableDimension(RRDDIM *RD) : TrainableDimension(RD) {} + + std::pair predict(); + + void addValue(CalculatedNumber Value, bool Exists); + + bool isAnomalous() { return AnomalyBit; } + +private: + CalculatedNumber AnomalyScore{0.0}; + std::atomic AnomalyBit{false}; + + std::vector CNs; +}; + +class DetectableDimension : public PredictableDimension { +public: + DetectableDimension(RRDDIM *RD) : PredictableDimension(RD) {} + + std::pair detect(size_t WindowLength, bool Reset) { + bool AnomalyBit = isAnomalous(); + + if (Reset) + NumSetBits = BBC.numSetBits(); + + NumSetBits += AnomalyBit; + BBC.insert(AnomalyBit); + + double AnomalyRate = static_cast(NumSetBits) / WindowLength; + return { AnomalyBit, AnomalyRate }; + } + +private: + BitBufferCounter BBC{static_cast(Cfg.ADMinWindowSize)}; + size_t NumSetBits{0}; +}; + +using Dimension = DetectableDimension; + +} // namespace ml + +#endif /* ML_DIMENSION_H */ diff --git a/ml/Host.cc b/ml/Host.cc new file mode 100644 index 000000000..d26ff2ae4 --- /dev/null +++ b/ml/Host.cc @@ -0,0 +1,458 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include + +#include "Config.h" +#include "Host.h" + +#include "json/single_include/nlohmann/json.hpp" + +using namespace ml; + +static void updateDimensionsChart(RRDHOST *RH, + collected_number NumTrainedDimensions, + collected_number NumNormalDimensions, + collected_number NumAnomalousDimensions) { + static thread_local RRDSET *RS = nullptr; + static thread_local RRDDIM *NumTotalDimensionsRD = nullptr; + static thread_local RRDDIM *NumTrainedDimensionsRD = nullptr; + static thread_local RRDDIM *NumNormalDimensionsRD = nullptr; + static thread_local RRDDIM *NumAnomalousDimensionsRD = nullptr; + + if (!RS) { + RS = rrdset_create( + RH, // host + "anomaly_detection", // type + "dimensions", // id + NULL, // name + "dimensions", // family + NULL, // ctx + "Anomaly detection dimensions", // title + "dimensions", // units + "netdata", // plugin + "ml", // module + 39183, // priority + RH->rrd_update_every, // update_every + RRDSET_TYPE_LINE // chart_type + ); + + NumTotalDimensionsRD = rrddim_add(RS, "total", NULL, + 1, 1, RRD_ALGORITHM_ABSOLUTE); + NumTrainedDimensionsRD = rrddim_add(RS, "trained", NULL, + 1, 1, RRD_ALGORITHM_ABSOLUTE); + NumNormalDimensionsRD = rrddim_add(RS, "normal", NULL, + 1, 1, RRD_ALGORITHM_ABSOLUTE); + NumAnomalousDimensionsRD = rrddim_add(RS, "anomalous", NULL, + 1, 1, RRD_ALGORITHM_ABSOLUTE); + } else + rrdset_next(RS); + + rrddim_set_by_pointer(RS, NumTotalDimensionsRD, NumNormalDimensions + NumAnomalousDimensions); + rrddim_set_by_pointer(RS, NumTrainedDimensionsRD, NumTrainedDimensions); + rrddim_set_by_pointer(RS, NumNormalDimensionsRD, NumNormalDimensions); + rrddim_set_by_pointer(RS, NumAnomalousDimensionsRD, NumAnomalousDimensions); + + rrdset_done(RS); +} + +static void updateRateChart(RRDHOST *RH, collected_number AnomalyRate) { + static thread_local RRDSET *RS = nullptr; + static thread_local RRDDIM *AnomalyRateRD = nullptr; + + if (!RS) { + RS = rrdset_create( + RH, // host + "anomaly_detection", // type + "anomaly_rate", // id + NULL, // name + "anomaly_rate", // family + NULL, // ctx + "Percentage of anomalous dimensions", // title + "percentage", // units + "netdata", // plugin + "ml", // module + 39184, // priority + RH->rrd_update_every, // update_every + RRDSET_TYPE_LINE // chart_type + ); + + AnomalyRateRD = rrddim_add(RS, "anomaly_rate", NULL, + 1, 100, RRD_ALGORITHM_ABSOLUTE); + } else + rrdset_next(RS); + + rrddim_set_by_pointer(RS, AnomalyRateRD, AnomalyRate); + + rrdset_done(RS); +} + +static void updateWindowLengthChart(RRDHOST *RH, collected_number WindowLength) { + static thread_local RRDSET *RS = nullptr; + static thread_local RRDDIM *WindowLengthRD = nullptr; + + if (!RS) { + RS = rrdset_create( + RH, // host + "anomaly_detection", // type + "detector_window", // id + NULL, // name + "detector_window", // family + NULL, // ctx + "Anomaly detector window length", // title + "seconds", // units + "netdata", // plugin + "ml", // module + 39185, // priority + RH->rrd_update_every, // update_every + RRDSET_TYPE_LINE // chart_type + ); + + WindowLengthRD = rrddim_add(RS, "duration", NULL, + 1, 1, RRD_ALGORITHM_ABSOLUTE); + } else + rrdset_next(RS); + + rrddim_set_by_pointer(RS, WindowLengthRD, WindowLength * RH->rrd_update_every); + rrdset_done(RS); +} + +static void updateEventsChart(RRDHOST *RH, + std::pair P, + bool ResetBitCounter, + bool NewAnomalyEvent) { + static thread_local RRDSET *RS = nullptr; + static thread_local RRDDIM *AboveThresholdRD = nullptr; + static thread_local RRDDIM *ResetBitCounterRD = nullptr; + static thread_local RRDDIM *NewAnomalyEventRD = nullptr; + + if (!RS) { + RS = rrdset_create( + RH, // host + "anomaly_detection", // type + "detector_events", // id + NULL, // name + "detector_events", // family + NULL, // ctx + "Anomaly events triggered", // title + "boolean", // units + "netdata", // plugin + "ml", // module + 39186, // priority + RH->rrd_update_every, // update_every + RRDSET_TYPE_LINE // chart_type + ); + + AboveThresholdRD = rrddim_add(RS, "above_threshold", NULL, + 1, 1, RRD_ALGORITHM_ABSOLUTE); + ResetBitCounterRD = rrddim_add(RS, "reset_bit_counter", NULL, + 1, 1, RRD_ALGORITHM_ABSOLUTE); + NewAnomalyEventRD = rrddim_add(RS, "new_anomaly_event", NULL, + 1, 1, RRD_ALGORITHM_ABSOLUTE); + } else + rrdset_next(RS); + + BitRateWindow::Edge E = P.first; + bool AboveThreshold = E.second == BitRateWindow::State::AboveThreshold; + + rrddim_set_by_pointer(RS, AboveThresholdRD, AboveThreshold); + rrddim_set_by_pointer(RS, ResetBitCounterRD, ResetBitCounter); + rrddim_set_by_pointer(RS, NewAnomalyEventRD, NewAnomalyEvent); + + rrdset_done(RS); +} + +static void updateDetectionChart(RRDHOST *RH, collected_number PredictionDuration) { + static thread_local RRDSET *RS = nullptr; + static thread_local RRDDIM *PredictiobDurationRD = nullptr; + + if (!RS) { + RS = rrdset_create( + RH, // host + "anomaly_detection", // type + "prediction_stats", // id + NULL, // name + "prediction_stats", // family + NULL, // ctx + "Time it took to run prediction", // title + "milliseconds", // units + "netdata", // plugin + "ml", // module + 39187, // priority + RH->rrd_update_every, // update_every + RRDSET_TYPE_LINE // chart_type + ); + + PredictiobDurationRD = rrddim_add(RS, "duration", NULL, + 1, 1, RRD_ALGORITHM_ABSOLUTE); + } else + rrdset_next(RS); + + rrddim_set_by_pointer(RS, PredictiobDurationRD, PredictionDuration); + + rrdset_done(RS); +} + +static void updateTrainingChart(RRDHOST *RH, + collected_number TotalTrainingDuration, + collected_number MaxTrainingDuration) +{ + static thread_local RRDSET *RS = nullptr; + static thread_local RRDDIM *TotalTrainingDurationRD = nullptr; + static thread_local RRDDIM *MaxTrainingDurationRD = nullptr; + + if (!RS) { + RS = rrdset_create( + RH, // host + "anomaly_detection", // type + "training_stats", // id + NULL, // name + "training_stats", // family + NULL, // ctx + "Training step statistics", // title + "milliseconds", // units + "netdata", // plugin + "ml", // module + 39188, // priority + RH->rrd_update_every, // update_every + RRDSET_TYPE_LINE // chart_type + ); + + TotalTrainingDurationRD = rrddim_add(RS, "total_training_duration", NULL, + 1, 1, RRD_ALGORITHM_ABSOLUTE); + MaxTrainingDurationRD = rrddim_add(RS, "max_training_duration", NULL, + 1, 1, RRD_ALGORITHM_ABSOLUTE); + } else + rrdset_next(RS); + + rrddim_set_by_pointer(RS, TotalTrainingDurationRD, TotalTrainingDuration); + rrddim_set_by_pointer(RS, MaxTrainingDurationRD, MaxTrainingDuration); + + rrdset_done(RS); +} + +void RrdHost::addDimension(Dimension *D) { + std::lock_guard Lock(Mutex); + + DimensionsMap[D->getRD()] = D; + + // Default construct mutex for dimension + LocksMap[D]; +} + +void RrdHost::removeDimension(Dimension *D) { + // Remove the dimension from the hosts map. + { + std::lock_guard Lock(Mutex); + DimensionsMap.erase(D->getRD()); + } + + // Delete the dimension by locking the mutex that protects it. + { + std::lock_guard Lock(LocksMap[D]); + delete D; + } + + // Remove the lock entry for the deleted dimension. + { + std::lock_guard Lock(Mutex); + LocksMap.erase(D); + } +} + +void RrdHost::getConfigAsJson(nlohmann::json &Json) const { + Json["version"] = 1; + + Json["enabled"] = Cfg.EnableAnomalyDetection; + + Json["min-train-samples"] = Cfg.MinTrainSamples; + Json["max-train-samples"] = Cfg.MaxTrainSamples; + Json["train-every"] = Cfg.TrainEvery; + + Json["diff-n"] = Cfg.DiffN; + Json["smooth-n"] = Cfg.SmoothN; + Json["lag-n"] = Cfg.LagN; + + Json["max-kmeans-iters"] = Cfg.MaxKMeansIters; + + Json["dimension-anomaly-score-threshold"] = Cfg.DimensionAnomalyScoreThreshold; + Json["host-anomaly-rate-threshold"] = Cfg.HostAnomalyRateThreshold; + + Json["min-window-size"] = Cfg.ADMinWindowSize; + Json["max-window-size"] = Cfg.ADMaxWindowSize; + Json["idle-window-size"] = Cfg.ADIdleWindowSize; + Json["window-rate-threshold"] = Cfg.ADWindowRateThreshold; + Json["dimension-rate-threshold"] = Cfg.ADDimensionRateThreshold; +} + +std::pair> +TrainableHost::findDimensionToTrain(const TimePoint &NowTP) { + std::lock_guard Lock(Mutex); + + Duration AllottedDuration = Duration{Cfg.TrainEvery * updateEvery()} / (DimensionsMap.size() + 1); + + for (auto &DP : DimensionsMap) { + Dimension *D = DP.second; + + if (D->shouldTrain(NowTP)) { + LocksMap[D].lock(); + return { D, AllottedDuration }; + } + } + + return { nullptr, AllottedDuration }; +} + +void TrainableHost::trainDimension(Dimension *D, const TimePoint &NowTP) { + if (D == nullptr) + return; + + D->LastTrainedAt = NowTP + Seconds{D->updateEvery()}; + + TimePoint StartTP = SteadyClock::now(); + D->trainModel(); + Duration Duration = SteadyClock::now() - StartTP; + D->updateTrainingDuration(Duration.count()); + + { + std::lock_guard Lock(Mutex); + LocksMap[D].unlock(); + } +} + +void TrainableHost::train() { + Duration MaxSleepFor = Seconds{updateEvery()}; + + while (!netdata_exit) { + TimePoint NowTP = SteadyClock::now(); + + auto P = findDimensionToTrain(NowTP); + trainDimension(P.first, NowTP); + + Duration AllottedDuration = P.second; + Duration RealDuration = SteadyClock::now() - NowTP; + + Duration SleepFor; + if (RealDuration >= AllottedDuration) + continue; + + SleepFor = std::min(AllottedDuration - RealDuration, MaxSleepFor); + std::this_thread::sleep_for(SleepFor); + } +} + +void DetectableHost::detectOnce() { + auto P = BRW.insert(AnomalyRate >= Cfg.HostAnomalyRateThreshold); + BitRateWindow::Edge Edge = P.first; + size_t WindowLength = P.second; + + bool ResetBitCounter = (Edge.first != BitRateWindow::State::AboveThreshold); + bool NewAnomalyEvent = (Edge.first == BitRateWindow::State::AboveThreshold) && + (Edge.second == BitRateWindow::State::Idle); + + std::vector> DimsOverThreshold; + + size_t NumAnomalousDimensions = 0; + size_t NumNormalDimensions = 0; + size_t NumTrainedDimensions = 0; + + double TotalTrainingDuration = 0.0; + double MaxTrainingDuration = 0.0; + + { + std::lock_guard Lock(Mutex); + + DimsOverThreshold.reserve(DimensionsMap.size()); + + for (auto &DP : DimensionsMap) { + Dimension *D = DP.second; + + auto P = D->detect(WindowLength, ResetBitCounter); + bool IsAnomalous = P.first; + double AnomalyRate = P.second; + + NumTrainedDimensions += D->isTrained(); + + double DimTrainingDuration = D->updateTrainingDuration(0.0); + MaxTrainingDuration = std::max(MaxTrainingDuration, DimTrainingDuration); + TotalTrainingDuration += DimTrainingDuration; + + if (IsAnomalous) + NumAnomalousDimensions += 1; + + if (NewAnomalyEvent && (AnomalyRate >= Cfg.ADDimensionRateThreshold)) + DimsOverThreshold.push_back({ AnomalyRate, D->getID() }); + } + + if (NumAnomalousDimensions) + AnomalyRate = static_cast(NumAnomalousDimensions) / DimensionsMap.size(); + else + AnomalyRate = 0.0; + + NumNormalDimensions = DimensionsMap.size() - NumAnomalousDimensions; + } + + this->NumAnomalousDimensions = NumAnomalousDimensions; + this->NumNormalDimensions = NumNormalDimensions; + this->NumTrainedDimensions = NumTrainedDimensions; + + updateDimensionsChart(getRH(), NumTrainedDimensions, NumNormalDimensions, NumAnomalousDimensions); + updateRateChart(getRH(), AnomalyRate * 10000.0); + updateWindowLengthChart(getRH(), WindowLength); + updateEventsChart(getRH(), P, ResetBitCounter, NewAnomalyEvent); + updateTrainingChart(getRH(), TotalTrainingDuration * 1000.0, MaxTrainingDuration * 1000.0); + + if (!NewAnomalyEvent || (DimsOverThreshold.size() == 0)) + return; + + std::sort(DimsOverThreshold.begin(), DimsOverThreshold.end()); + std::reverse(DimsOverThreshold.begin(), DimsOverThreshold.end()); + + // Make sure the JSON response won't grow beyond a specific number + // of dimensions. Log an error message if this happens, because it + // most likely means that the user specified a very-low anomaly rate + // threshold. + size_t NumMaxDimsOverThreshold = 2000; + if (DimsOverThreshold.size() > NumMaxDimsOverThreshold) { + error("Found %zu dimensions over threshold. Reducing JSON result to %zu dimensions.", + DimsOverThreshold.size(), NumMaxDimsOverThreshold); + DimsOverThreshold.resize(NumMaxDimsOverThreshold); + } + + nlohmann::json JsonResult = DimsOverThreshold; + + time_t Before = now_realtime_sec(); + time_t After = Before - (WindowLength * updateEvery()); + DB.insertAnomaly("AD1", 1, getUUID(), After, Before, JsonResult.dump(4)); +} + +void DetectableHost::detect() { + std::this_thread::sleep_for(Seconds{10}); + + while (!netdata_exit) { + TimePoint StartTP = SteadyClock::now(); + detectOnce(); + TimePoint EndTP = SteadyClock::now(); + + Duration Dur = EndTP - StartTP; + updateDetectionChart(getRH(), Dur.count() * 1000); + + std::this_thread::sleep_for(Seconds{updateEvery()}); + } +} + +void DetectableHost::getDetectionInfoAsJson(nlohmann::json &Json) const { + Json["anomalous-dimensions"] = NumAnomalousDimensions; + Json["normal-dimensions"] = NumNormalDimensions; + Json["total-dimensions"] = NumAnomalousDimensions + NumNormalDimensions; + Json["trained-dimensions"] = NumTrainedDimensions; +} + +void DetectableHost::startAnomalyDetectionThreads() { + TrainingThread = std::thread(&TrainableHost::train, this); + DetectionThread = std::thread(&DetectableHost::detect, this); +} + +void DetectableHost::stopAnomalyDetectionThreads() { + TrainingThread.join(); + DetectionThread.join(); +} diff --git a/ml/Host.h b/ml/Host.h new file mode 100644 index 000000000..86591d7ae --- /dev/null +++ b/ml/Host.h @@ -0,0 +1,104 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef ML_HOST_H +#define ML_HOST_H + +#include "BitRateWindow.h" +#include "Config.h" +#include "Database.h" +#include "Dimension.h" + +#include "ml-private.h" + +namespace ml { + +class RrdHost { +public: + RrdHost(RRDHOST *RH) : RH(RH) {} + + RRDHOST *getRH() { return RH; } + + unsigned updateEvery() { return RH->rrd_update_every; } + + std::string getUUID() { + char S[UUID_STR_LEN]; + uuid_unparse_lower(RH->host_uuid, S); + return S; + } + + void addDimension(Dimension *D); + void removeDimension(Dimension *D); + + void getConfigAsJson(nlohmann::json &Json) const; + + virtual ~RrdHost() {}; + +protected: + RRDHOST *RH; + + // Protect dimension and lock maps + std::mutex Mutex; + + std::map DimensionsMap; + std::map LocksMap; +}; + +class TrainableHost : public RrdHost { +public: + TrainableHost(RRDHOST *RH) : RrdHost(RH) {} + + void train(); + +private: + std::pair> findDimensionToTrain(const TimePoint &NowTP); + void trainDimension(Dimension *D, const TimePoint &NowTP); +}; + +class DetectableHost : public TrainableHost { +public: + DetectableHost(RRDHOST *RH) : TrainableHost(RH) {} + + void startAnomalyDetectionThreads(); + void stopAnomalyDetectionThreads(); + + template + bool getAnomalyInfo(ArgTypes&&... Args) { + return DB.getAnomalyInfo(Args...); + } + + template + bool getAnomaliesInRange(ArgTypes&&... Args) { + return DB.getAnomaliesInRange(Args...); + } + + void getDetectionInfoAsJson(nlohmann::json &Json) const; + +private: + void detect(); + void detectOnce(); + +private: + std::thread TrainingThread; + std::thread DetectionThread; + + BitRateWindow BRW{ + static_cast(Cfg.ADMinWindowSize), + static_cast(Cfg.ADMaxWindowSize), + static_cast(Cfg.ADIdleWindowSize), + static_cast(Cfg.ADMinWindowSize * Cfg.ADWindowRateThreshold) + }; + + CalculatedNumber AnomalyRate{0.0}; + + size_t NumAnomalousDimensions{0}; + size_t NumNormalDimensions{0}; + size_t NumTrainedDimensions{0}; + + Database DB{Cfg.AnomalyDBPath}; +}; + +using Host = DetectableHost; + +} // namespace ml + +#endif /* ML_HOST_H */ diff --git a/ml/Makefile.am b/ml/Makefile.am new file mode 100644 index 000000000..27449d659 --- /dev/null +++ b/ml/Makefile.am @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +SUBDIRS = \ + kmeans \ + $(NULL) diff --git a/ml/Query.h b/ml/Query.h new file mode 100644 index 000000000..cbaf6c297 --- /dev/null +++ b/ml/Query.h @@ -0,0 +1,49 @@ +#ifndef QUERY_H +#define QUERY_H + +#include "ml-private.h" + +namespace ml { + +class Query { +public: + Query(RRDDIM *RD) : RD(RD) { + Ops = &RD->state->query_ops; + } + + time_t latestTime() { + return Ops->latest_time(RD); + } + + time_t oldestTime() { + return Ops->oldest_time(RD); + } + + void init(time_t AfterT, time_t BeforeT) { + Ops->init(RD, &Handle, AfterT, BeforeT); + } + + bool isFinished() { + return Ops->is_finished(&Handle); + } + + std::pair nextMetric() { + time_t CurrT; + storage_number SN = Ops->next_metric(&Handle, &CurrT); + return { CurrT, SN }; + } + + ~Query() { + Ops->finalize(&Handle); + } + +private: + RRDDIM *RD; + + struct rrddim_volatile::rrddim_query_ops *Ops; + struct rrddim_query_handle Handle; +}; + +} // namespace ml + +#endif /* QUERY_H */ diff --git a/ml/Tests.cc b/ml/Tests.cc new file mode 100644 index 000000000..7d369d48d --- /dev/null +++ b/ml/Tests.cc @@ -0,0 +1,301 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "BitBufferCounter.h" +#include "BitRateWindow.h" + +#include "gtest/gtest.h" + +using namespace ml; + +TEST(BitBufferCounterTest, Cap_4) { + size_t Capacity = 4; + BitBufferCounter BBC(Capacity); + + // No bits set + EXPECT_EQ(BBC.numSetBits(), 0); + + // All ones + for (size_t Idx = 0; Idx != (2 * Capacity); Idx++) { + BBC.insert(true); + + EXPECT_EQ(BBC.numSetBits(), std::min(Idx + 1, Capacity)); + } + + // All zeroes + for (size_t Idx = 0; Idx != Capacity; Idx++) { + BBC.insert(false); + + if (Idx < Capacity) + EXPECT_EQ(BBC.numSetBits(), Capacity - (Idx + 1)); + else + EXPECT_EQ(BBC.numSetBits(), 0); + } + + // Even ones/zeroes + for (size_t Idx = 0; Idx != (2 * Capacity); Idx++) + BBC.insert(Idx % 2 == 0); + EXPECT_EQ(BBC.numSetBits(), Capacity / 2); +} + +using State = BitRateWindow::State; +using Edge = BitRateWindow::Edge; +using Result = std::pair; + +TEST(BitRateWindowTest, Cycles) { + /* Test the FSM by going through its two cycles: + * 1) NotFilled -> AboveThreshold -> Idle -> NotFilled + * 2) NotFilled -> BelowThreshold -> AboveThreshold -> Idle -> NotFilled + * + * Check the window's length on every new state transition. + */ + + size_t MinLength = 4, MaxLength = 6, IdleLength = 5; + size_t SetBitsThreshold = 3; + + Result R; + BitRateWindow BRW(MinLength, MaxLength, IdleLength, SetBitsThreshold); + + /* + * 1st cycle + */ + + // NotFilled -> AboveThreshold + R = BRW.insert(true); + EXPECT_EQ(R.first, std::make_pair(State::NotFilled, State::NotFilled)); + R = BRW.insert(true); + EXPECT_EQ(R.first, std::make_pair(State::NotFilled, State::NotFilled)); + R = BRW.insert(true); + EXPECT_EQ(R.first, std::make_pair(State::NotFilled, State::NotFilled)); + R = BRW.insert(true); + EXPECT_EQ(R.first, std::make_pair(State::NotFilled, State::AboveThreshold)); + EXPECT_EQ(R.second, MinLength); + + // AboveThreshold -> Idle + R = BRW.insert(true); + EXPECT_EQ(R.first, std::make_pair(State::AboveThreshold, State::AboveThreshold)); + R = BRW.insert(true); + EXPECT_EQ(R.first, std::make_pair(State::AboveThreshold, State::AboveThreshold)); + + R = BRW.insert(true); + EXPECT_EQ(R.first, std::make_pair(State::AboveThreshold, State::Idle)); + EXPECT_EQ(R.second, MaxLength); + + + // Idle -> NotFilled + R = BRW.insert(true); + EXPECT_EQ(R.first, std::make_pair(State::Idle, State::Idle)); + R = BRW.insert(true); + EXPECT_EQ(R.first, std::make_pair(State::Idle, State::Idle)); + R = BRW.insert(true); + EXPECT_EQ(R.first, std::make_pair(State::Idle, State::Idle)); + R = BRW.insert(true); + EXPECT_EQ(R.first, std::make_pair(State::Idle, State::Idle)); + R = BRW.insert(true); + EXPECT_EQ(R.first, std::make_pair(State::Idle, State::NotFilled)); + EXPECT_EQ(R.second, 1); + + // NotFilled -> AboveThreshold + R = BRW.insert(true); + EXPECT_EQ(R.first, std::make_pair(State::NotFilled, State::NotFilled)); + R = BRW.insert(true); + EXPECT_EQ(R.first, std::make_pair(State::NotFilled, State::NotFilled)); + R = BRW.insert(true); + EXPECT_EQ(R.first, std::make_pair(State::NotFilled, State::AboveThreshold)); + EXPECT_EQ(R.second, MinLength); + + /* + * 2nd cycle + */ + + BRW = BitRateWindow(MinLength, MaxLength, IdleLength, SetBitsThreshold); + + // NotFilled -> BelowThreshold + R = BRW.insert(false); + EXPECT_EQ(R.first, std::make_pair(State::NotFilled, State::NotFilled)); + R = BRW.insert(false); + EXPECT_EQ(R.first, std::make_pair(State::NotFilled, State::NotFilled)); + R = BRW.insert(false); + EXPECT_EQ(R.first, std::make_pair(State::NotFilled, State::NotFilled)); + R = BRW.insert(false); + EXPECT_EQ(R.first, std::make_pair(State::NotFilled, State::BelowThreshold)); + EXPECT_EQ(R.second, MinLength); + + // BelowThreshold -> BelowThreshold: + // Check the state's self loop by adding set bits that will keep the + // bit buffer below the specified threshold. + // + for (size_t Idx = 0; Idx != 2 * MaxLength; Idx++) { + R = BRW.insert(Idx % 2 == 0); + EXPECT_EQ(R.first, std::make_pair(State::BelowThreshold, State::BelowThreshold)); + EXPECT_EQ(R.second, MinLength); + } + + // Verify that at the end of the loop the internal bit buffer contains + // "1010". Do so by adding one set bit and checking that we remain below + // the specified threshold. + R = BRW.insert(true); + EXPECT_EQ(R.first, std::make_pair(State::BelowThreshold, State::BelowThreshold)); + EXPECT_EQ(R.second, MinLength); + + // BelowThreshold -> AboveThreshold + R = BRW.insert(true); + EXPECT_EQ(R.first, std::make_pair(State::BelowThreshold, State::AboveThreshold)); + EXPECT_EQ(R.second, MinLength); + + // AboveThreshold -> Idle: + // Do the transition without filling the max window size this time. + R = BRW.insert(false); + EXPECT_EQ(R.first, std::make_pair(State::AboveThreshold, State::Idle)); + EXPECT_EQ(R.second, MinLength); + + // Idle -> NotFilled + R = BRW.insert(false); + EXPECT_EQ(R.first, std::make_pair(State::Idle, State::Idle)); + R = BRW.insert(false); + EXPECT_EQ(R.first, std::make_pair(State::Idle, State::Idle)); + R = BRW.insert(false); + EXPECT_EQ(R.first, std::make_pair(State::Idle, State::Idle)); + R = BRW.insert(false); + EXPECT_EQ(R.first, std::make_pair(State::Idle, State::Idle)); + R = BRW.insert(false); + EXPECT_EQ(R.first, std::make_pair(State::Idle, State::NotFilled)); + EXPECT_EQ(R.second, 1); + + // NotFilled -> AboveThreshold + R = BRW.insert(true); + EXPECT_EQ(R.first, std::make_pair(State::NotFilled, State::NotFilled)); + R = BRW.insert(true); + EXPECT_EQ(R.first, std::make_pair(State::NotFilled, State::NotFilled)); + R = BRW.insert(true); + EXPECT_EQ(R.first, std::make_pair(State::NotFilled, State::AboveThreshold)); + EXPECT_EQ(R.second, MinLength); +} + +TEST(BitRateWindowTest, ConsecutiveOnes) { + size_t MinLength = 120, MaxLength = 240, IdleLength = 30; + size_t SetBitsThreshold = 30; + + Result R; + BitRateWindow BRW(MinLength, MaxLength, IdleLength, SetBitsThreshold); + + for (size_t Idx = 0; Idx != MaxLength; Idx++) + R = BRW.insert(false); + EXPECT_EQ(R.first, std::make_pair(State::BelowThreshold, State::BelowThreshold)); + EXPECT_EQ(R.second, MinLength); + + for (size_t Idx = 0; Idx != SetBitsThreshold; Idx++) { + EXPECT_EQ(R.first, std::make_pair(State::BelowThreshold, State::BelowThreshold)); + R = BRW.insert(true); + } + EXPECT_EQ(R.first, std::make_pair(State::BelowThreshold, State::AboveThreshold)); + EXPECT_EQ(R.second, MinLength); + + // At this point the window's buffer contains: + // (MinLength - SetBitsThreshold = 90) 0s, followed by + // (SetBitsThreshold = 30) 1s. + // + // To go below the threshold, we need to add (90 + 1) more 0s in the window's + // buffer. At that point, the the window's buffer will contain: + // (SetBitsThreshold = 29) 1s, followed by + // (MinLength - SetBitsThreshold = 91) 0s. + // + // Right before adding the last 0, we expect the window's length to be equal to 210, + // because the bit buffer has gone through these bits: + // (MinLength - SetBitsThreshold = 90) 0s, followed by + // (SetBitsThreshold = 30) 1s, followed by + // (MinLength - SetBitsThreshold = 90) 0s. + + for (size_t Idx = 0; Idx != (MinLength - SetBitsThreshold); Idx++) { + R = BRW.insert(false); + EXPECT_EQ(R.first, std::make_pair(State::AboveThreshold, State::AboveThreshold)); + } + EXPECT_EQ(R.second, 2 * MinLength - SetBitsThreshold); + R = BRW.insert(false); + EXPECT_EQ(R.first, std::make_pair(State::AboveThreshold, State::Idle)); + + // Continue with the Idle -> NotFilled edge. + for (size_t Idx = 0; Idx != IdleLength - 1; Idx++) { + R = BRW.insert(false); + EXPECT_EQ(R.first, std::make_pair(State::Idle, State::Idle)); + } + R = BRW.insert(false); + EXPECT_EQ(R.first, std::make_pair(State::Idle, State::NotFilled)); + EXPECT_EQ(R.second, 1); +} + +TEST(BitRateWindowTest, WithHoles) { + size_t MinLength = 120, MaxLength = 240, IdleLength = 30; + size_t SetBitsThreshold = 30; + + Result R; + BitRateWindow BRW(MinLength, MaxLength, IdleLength, SetBitsThreshold); + + for (size_t Idx = 0; Idx != MaxLength; Idx++) + R = BRW.insert(false); + + for (size_t Idx = 0; Idx != SetBitsThreshold / 3; Idx++) + R = BRW.insert(true); + for (size_t Idx = 0; Idx != SetBitsThreshold / 3; Idx++) + R = BRW.insert(false); + for (size_t Idx = 0; Idx != SetBitsThreshold / 3; Idx++) + R = BRW.insert(true); + for (size_t Idx = 0; Idx != SetBitsThreshold / 3; Idx++) + R = BRW.insert(false); + for (size_t Idx = 0; Idx != SetBitsThreshold / 3; Idx++) + R = BRW.insert(true); + + EXPECT_EQ(R.first, std::make_pair(State::BelowThreshold, State::AboveThreshold)); + EXPECT_EQ(R.second, MinLength); + + // The window's bit buffer contains: + // 70 0s, 10 1s, 10 0s, 10 1s, 10 0s, 10 1s. + // Where: 70 = MinLength - (5 / 3) * SetBitsThresholds, ie. we need + // to add (70 + 1) more zeros to make the bit buffer go below the + // threshold and then the window's length should be: + // 70 + 50 + 70 = 190. + + BitRateWindow::Edge E; + do { + R = BRW.insert(false); + E = R.first; + } while (E.first != State::AboveThreshold || E.second != State::Idle); + EXPECT_EQ(R.second, 2 * MinLength - (5 * SetBitsThreshold) / 3); +} + +TEST(BitRateWindowTest, MinWindow) { + size_t MinLength = 120, MaxLength = 240, IdleLength = 30; + size_t SetBitsThreshold = 30; + + Result R; + BitRateWindow BRW(MinLength, MaxLength, IdleLength, SetBitsThreshold); + + BRW.insert(true); + BRW.insert(false); + for (size_t Idx = 2; Idx != SetBitsThreshold; Idx++) + BRW.insert(true); + for (size_t Idx = SetBitsThreshold; Idx != MinLength - 1; Idx++) + BRW.insert(false); + + R = BRW.insert(true); + EXPECT_EQ(R.first, std::make_pair(State::NotFilled, State::AboveThreshold)); + EXPECT_EQ(R.second, MinLength); + + R = BRW.insert(false); + EXPECT_EQ(R.first, std::make_pair(State::AboveThreshold, State::Idle)); +} + +TEST(BitRateWindowTest, MaxWindow) { + size_t MinLength = 100, MaxLength = 200, IdleLength = 30; + size_t SetBitsThreshold = 50; + + Result R; + BitRateWindow BRW(MinLength, MaxLength, IdleLength, SetBitsThreshold); + + for (size_t Idx = 0; Idx != MaxLength; Idx++) + R = BRW.insert(Idx % 2 == 0); + EXPECT_EQ(R.first, std::make_pair(State::AboveThreshold, State::AboveThreshold)); + EXPECT_EQ(R.second, MaxLength); + + R = BRW.insert(false); + EXPECT_EQ(R.first, std::make_pair(State::AboveThreshold, State::Idle)); +} diff --git a/ml/kmeans/KMeans.cc b/ml/kmeans/KMeans.cc new file mode 100644 index 000000000..e66c66c16 --- /dev/null +++ b/ml/kmeans/KMeans.cc @@ -0,0 +1,55 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "KMeans.h" +#include + +void KMeans::train(SamplesBuffer &SB, size_t MaxIterations) { + std::vector Samples = SB.preprocess(); + + MinDist = std::numeric_limits::max(); + MaxDist = std::numeric_limits::min(); + + { + std::lock_guard Lock(Mutex); + + ClusterCenters.clear(); + + dlib::pick_initial_centers(NumClusters, ClusterCenters, Samples); + dlib::find_clusters_using_kmeans(Samples, ClusterCenters, MaxIterations); + + for (const auto &S : Samples) { + CalculatedNumber MeanDist = 0.0; + + for (const auto &KMCenter : ClusterCenters) + MeanDist += dlib::length(KMCenter - S); + + MeanDist /= NumClusters; + + if (MeanDist < MinDist) + MinDist = MeanDist; + + if (MeanDist > MaxDist) + MaxDist = MeanDist; + } + } +} + +CalculatedNumber KMeans::anomalyScore(SamplesBuffer &SB) { + std::vector DSamples = SB.preprocess(); + + std::unique_lock Lock(Mutex, std::defer_lock); + if (!Lock.try_lock()) + return std::numeric_limits::quiet_NaN(); + + CalculatedNumber MeanDist = 0.0; + for (const auto &CC: ClusterCenters) + MeanDist += dlib::length(CC - DSamples.back()); + + MeanDist /= NumClusters; + + if (MaxDist == MinDist) + return 0.0; + + CalculatedNumber AnomalyScore = 100.0 * std::abs((MeanDist - MinDist) / (MaxDist - MinDist)); + return (AnomalyScore > 100.0) ? 100.0 : AnomalyScore; +} diff --git a/ml/kmeans/KMeans.h b/ml/kmeans/KMeans.h new file mode 100644 index 000000000..4ea3b6a89 --- /dev/null +++ b/ml/kmeans/KMeans.h @@ -0,0 +1,34 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef KMEANS_H +#define KMEANS_H + +#include +#include +#include +#include + +#include "SamplesBuffer.h" + +class KMeans { +public: + KMeans(size_t NumClusters = 2) : NumClusters(NumClusters) { + MinDist = std::numeric_limits::max(); + MaxDist = std::numeric_limits::min(); + }; + + void train(SamplesBuffer &SB, size_t MaxIterations); + CalculatedNumber anomalyScore(SamplesBuffer &SB); + +private: + size_t NumClusters; + + std::vector ClusterCenters; + + CalculatedNumber MinDist; + CalculatedNumber MaxDist; + + std::mutex Mutex; +}; + +#endif /* KMEANS_H */ diff --git a/ml/kmeans/Makefile.am b/ml/kmeans/Makefile.am new file mode 100644 index 000000000..babdcf0df --- /dev/null +++ b/ml/kmeans/Makefile.am @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in diff --git a/ml/kmeans/SamplesBuffer.cc b/ml/kmeans/SamplesBuffer.cc new file mode 100644 index 000000000..f8211fb54 --- /dev/null +++ b/ml/kmeans/SamplesBuffer.cc @@ -0,0 +1,144 @@ +// SPDX-License-Identifier: GPL-3.0-or-later +// +#include "SamplesBuffer.h" + +#include +#include +#include + +void Sample::print(std::ostream &OS) const { + for (size_t Idx = 0; Idx != NumDims - 1; Idx++) + OS << CNs[Idx] << ", "; + + OS << CNs[NumDims - 1]; +} + +void SamplesBuffer::print(std::ostream &OS) const { + for (size_t Idx = Preprocessed ? (DiffN + (SmoothN - 1) + (LagN)) : 0; + Idx != NumSamples; Idx++) { + Sample S = Preprocessed ? getPreprocessedSample(Idx) : getSample(Idx); + OS << S << std::endl; + } +} + +std::vector SamplesBuffer::getPreprocessedSamples() const { + std::vector V; + + for (size_t Idx = Preprocessed ? (DiffN + (SmoothN - 1) + (LagN)) : 0; + Idx != NumSamples; Idx++) { + Sample S = Preprocessed ? getPreprocessedSample(Idx) : getSample(Idx); + V.push_back(S); + } + + return V; +} + +void SamplesBuffer::diffSamples() { + // Panda's DataFrame default behaviour is to subtract each element from + // itself. For us `DiffN = 0` means "disable diff-ing" when preprocessing + // the samples buffer. This deviation will make it easier for us to test + // the KMeans implementation. + if (DiffN == 0) + return; + + for (size_t Idx = 0; Idx != (NumSamples - DiffN); Idx++) { + size_t High = (NumSamples - 1) - Idx; + size_t Low = High - DiffN; + + Sample LHS = getSample(High); + Sample RHS = getSample(Low); + + LHS.diff(RHS); + } +} + +void SamplesBuffer::smoothSamples() { + // Holds the mean value of each window + CalculatedNumber *AccCNs = new CalculatedNumber[NumDimsPerSample](); + Sample Acc(AccCNs, NumDimsPerSample); + + // Used to avoid clobbering the accumulator when moving the window + CalculatedNumber *TmpCNs = new CalculatedNumber[NumDimsPerSample](); + Sample Tmp(TmpCNs, NumDimsPerSample); + + CalculatedNumber Factor = (CalculatedNumber) 1 / SmoothN; + + // Calculate the value of the 1st window + for (size_t Idx = 0; Idx != std::min(SmoothN, NumSamples); Idx++) { + Tmp.add(getSample(NumSamples - (Idx + 1))); + } + + Acc.add(Tmp); + Acc.scale(Factor); + + // Move the window and update the samples + for (size_t Idx = NumSamples; Idx != (DiffN + SmoothN - 1); Idx--) { + Sample S = getSample(Idx - 1); + + // Tmp <- Next window (if any) + if (Idx >= (SmoothN + 1)) { + Tmp.diff(S); + Tmp.add(getSample(Idx - (SmoothN + 1))); + } + + // S <- Acc + S.copy(Acc); + + // Acc <- Tmp + Acc.copy(Tmp); + Acc.scale(Factor); + } + + delete[] AccCNs; + delete[] TmpCNs; +} + +void SamplesBuffer::lagSamples() { + if (LagN == 0) + return; + + for (size_t Idx = NumSamples; Idx != LagN; Idx--) { + Sample PS = getPreprocessedSample(Idx - 1); + PS.lag(getSample(Idx - 1), LagN); + } +} + +std::vector SamplesBuffer::preprocess() { + assert(Preprocessed == false); + + std::vector DSamples; + size_t OutN = NumSamples; + + // Diff + if (DiffN >= OutN) + return DSamples; + OutN -= DiffN; + diffSamples(); + + // Smooth + if (SmoothN == 0 || SmoothN > OutN) + return DSamples; + OutN -= (SmoothN - 1); + smoothSamples(); + + // Lag + if (LagN >= OutN) + return DSamples; + OutN -= LagN; + lagSamples(); + + DSamples.reserve(OutN); + Preprocessed = true; + + for (size_t Idx = NumSamples - OutN; Idx != NumSamples; Idx++) { + DSample DS; + DS.set_size(NumDimsPerSample * (LagN + 1)); + + const Sample PS = getPreprocessedSample(Idx); + PS.initDSample(DS); + + DSamples.push_back(DS); + } + + return DSamples; +} diff --git a/ml/kmeans/SamplesBuffer.h b/ml/kmeans/SamplesBuffer.h new file mode 100644 index 000000000..fccd216d5 --- /dev/null +++ b/ml/kmeans/SamplesBuffer.h @@ -0,0 +1,140 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef SAMPLES_BUFFER_H +#define SAMPLES_BUFFER_H + +#include +#include + +#include +#include +#include + +#include + +typedef double CalculatedNumber; +typedef dlib::matrix DSample; + +class Sample { +public: + Sample(CalculatedNumber *Buf, size_t N) : CNs(Buf), NumDims(N) {} + + void initDSample(DSample &DS) const { + for (size_t Idx = 0; Idx != NumDims; Idx++) + DS(Idx) = CNs[Idx]; + } + + void add(const Sample &RHS) const { + assert(NumDims == RHS.NumDims); + + for (size_t Idx = 0; Idx != NumDims; Idx++) + CNs[Idx] += RHS.CNs[Idx]; + }; + + void diff(const Sample &RHS) const { + assert(NumDims == RHS.NumDims); + + for (size_t Idx = 0; Idx != NumDims; Idx++) + CNs[Idx] -= RHS.CNs[Idx]; + }; + + void copy(const Sample &RHS) const { + assert(NumDims == RHS.NumDims); + + std::memcpy(CNs, RHS.CNs, NumDims * sizeof(CalculatedNumber)); + } + + void scale(CalculatedNumber Factor) { + for (size_t Idx = 0; Idx != NumDims; Idx++) + CNs[Idx] *= Factor; + } + + void lag(const Sample &S, size_t LagN) { + size_t N = S.NumDims; + + for (size_t Idx = 0; Idx != (LagN + 1); Idx++) { + Sample Src(S.CNs - (Idx * N), N); + Sample Dst(CNs + (Idx * N), N); + Dst.copy(Src); + } + } + + const CalculatedNumber *getCalculatedNumbers() const { + return CNs; + }; + + void print(std::ostream &OS) const; + +private: + CalculatedNumber *CNs; + size_t NumDims; +}; + +inline std::ostream& operator<<(std::ostream &OS, const Sample &S) { + S.print(OS); + return OS; +} + +class SamplesBuffer { +public: + SamplesBuffer(CalculatedNumber *CNs, + size_t NumSamples, size_t NumDimsPerSample, + size_t DiffN = 1, size_t SmoothN = 3, size_t LagN = 3) : + CNs(CNs), NumSamples(NumSamples), NumDimsPerSample(NumDimsPerSample), + DiffN(DiffN), SmoothN(SmoothN), LagN(LagN), + BytesPerSample(NumDimsPerSample * sizeof(CalculatedNumber)), + Preprocessed(false) {}; + + std::vector preprocess(); + std::vector getPreprocessedSamples() const; + + size_t capacity() const { return NumSamples; } + void print(std::ostream &OS) const; + +private: + size_t getSampleOffset(size_t Index) const { + assert(Index < NumSamples); + return Index * NumDimsPerSample; + } + + size_t getPreprocessedSampleOffset(size_t Index) const { + assert(Index < NumSamples); + return getSampleOffset(Index) * (LagN + 1); + } + + void setSample(size_t Index, const Sample &S) const { + size_t Offset = getSampleOffset(Index); + std::memcpy(&CNs[Offset], S.getCalculatedNumbers(), BytesPerSample); + } + + const Sample getSample(size_t Index) const { + size_t Offset = getSampleOffset(Index); + return Sample(&CNs[Offset], NumDimsPerSample); + }; + + const Sample getPreprocessedSample(size_t Index) const { + size_t Offset = getPreprocessedSampleOffset(Index); + return Sample(&CNs[Offset], NumDimsPerSample * (LagN + 1)); + }; + + void diffSamples(); + void smoothSamples(); + void lagSamples(); + +private: + CalculatedNumber *CNs; + size_t NumSamples; + size_t NumDimsPerSample; + size_t DiffN; + size_t SmoothN; + size_t LagN; + size_t BytesPerSample; + bool Preprocessed; +}; + +inline std::ostream& operator<<(std::ostream& OS, const SamplesBuffer &SB) { + SB.print(OS); + return OS; +} + +#endif /* SAMPLES_BUFFER_H */ diff --git a/ml/kmeans/Tests.cc b/ml/kmeans/Tests.cc new file mode 100644 index 000000000..0cb595945 --- /dev/null +++ b/ml/kmeans/Tests.cc @@ -0,0 +1,143 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "ml/ml-private.h" +#include + +/* + * The SamplesBuffer class implements the functionality of the following python + * code: + * >> df = pd.DataFrame(data=samples) + * >> df = df.diff(diff_n).dropna() + * >> df = df.rolling(smooth_n).mean().dropna() + * >> df = pd.concat([df.shift(n) for n in range(lag_n + 1)], axis=1).dropna() + * + * Its correctness has been verified by automatically generating random + * data frames in Python and comparing them with the correspondent preprocessed + * SampleBuffers. + * + * The following tests are meant to catch unintended changes in the SamplesBuffer + * implementation. For development purposes, one should compare changes against + * the aforementioned python code. +*/ + +TEST(SamplesBufferTest, NS_8_NDPS_1_DN_1_SN_3_LN_1) { + size_t NumSamples = 8, NumDimsPerSample = 1; + size_t DiffN = 1, SmoothN = 3, LagN = 3; + + size_t N = NumSamples * NumDimsPerSample * (LagN + 1); + CalculatedNumber *CNs = new CalculatedNumber[N](); + + CNs[0] = 0.7568336679490107; + CNs[1] = 0.4814406581763254; + CNs[2] = 0.40073555156221874; + CNs[3] = 0.5973257298194408; + CNs[4] = 0.5334727814345868; + CNs[5] = 0.2632477193454843; + CNs[6] = 0.2684839023122384; + CNs[7] = 0.851332948637479; + + SamplesBuffer SB(CNs, NumSamples, NumDimsPerSample, DiffN, SmoothN, LagN); + SB.preprocess(); + + std::vector Samples = SB.getPreprocessedSamples(); + EXPECT_EQ(Samples.size(), 2); + + Sample S0 = Samples[0]; + const CalculatedNumber *S0_CNs = S0.getCalculatedNumbers(); + Sample S1 = Samples[1]; + const CalculatedNumber *S1_CNs = S1.getCalculatedNumbers(); + + EXPECT_NEAR(S0_CNs[0], -0.109614, 0.001); + EXPECT_NEAR(S0_CNs[1], -0.0458293, 0.001); + EXPECT_NEAR(S0_CNs[2], 0.017344, 0.001); + EXPECT_NEAR(S0_CNs[3], -0.0531693, 0.001); + + EXPECT_NEAR(S1_CNs[0], 0.105953, 0.001); + EXPECT_NEAR(S1_CNs[1], -0.109614, 0.001); + EXPECT_NEAR(S1_CNs[2], -0.0458293, 0.001); + EXPECT_NEAR(S1_CNs[3], 0.017344, 0.001); + + delete[] CNs; +} + +TEST(SamplesBufferTest, NS_8_NDPS_1_DN_2_SN_3_LN_2) { + size_t NumSamples = 8, NumDimsPerSample = 1; + size_t DiffN = 2, SmoothN = 3, LagN = 2; + + size_t N = NumSamples * NumDimsPerSample * (LagN + 1); + CalculatedNumber *CNs = new CalculatedNumber[N](); + + CNs[0] = 0.20511885291342846; + CNs[1] = 0.13151717360306558; + CNs[2] = 0.6017085062423134; + CNs[3] = 0.46256882933941545; + CNs[4] = 0.7887758447877941; + CNs[5] = 0.9237989080034406; + CNs[6] = 0.15552559051428083; + CNs[7] = 0.6309750314597955; + + SamplesBuffer SB(CNs, NumSamples, NumDimsPerSample, DiffN, SmoothN, LagN); + SB.preprocess(); + + std::vector Samples = SB.getPreprocessedSamples(); + EXPECT_EQ(Samples.size(), 2); + + Sample S0 = Samples[0]; + const CalculatedNumber *S0_CNs = S0.getCalculatedNumbers(); + Sample S1 = Samples[1]; + const CalculatedNumber *S1_CNs = S1.getCalculatedNumbers(); + + EXPECT_NEAR(S0_CNs[0], 0.005016, 0.001); + EXPECT_NEAR(S0_CNs[1], 0.326450, 0.001); + EXPECT_NEAR(S0_CNs[2], 0.304903, 0.001); + + EXPECT_NEAR(S1_CNs[0], -0.154948, 0.001); + EXPECT_NEAR(S1_CNs[1], 0.005016, 0.001); + EXPECT_NEAR(S1_CNs[2], 0.326450, 0.001); + + delete[] CNs; +} + +TEST(SamplesBufferTest, NS_8_NDPS_3_DN_2_SN_4_LN_1) { + size_t NumSamples = 8, NumDimsPerSample = 3; + size_t DiffN = 2, SmoothN = 4, LagN = 1; + + size_t N = NumSamples * NumDimsPerSample * (LagN + 1); + CalculatedNumber *CNs = new CalculatedNumber[N](); + + CNs[0] = 0.34310900399667765; CNs[1] = 0.14694315994488194; CNs[2] = 0.8246677800938796; + CNs[3] = 0.48249504592307835; CNs[4] = 0.23241087965531182; CNs[5] = 0.9595348555892567; + CNs[6] = 0.44281094035598334; CNs[7] = 0.5143142171362715; CNs[8] = 0.06391303014242555; + CNs[9] = 0.7460491027783901; CNs[10] = 0.43887217459032923; CNs[11] = 0.2814395025355999; + CNs[12] = 0.9231114281214198; CNs[13] = 0.326882401786898; CNs[14] = 0.26747939220376216; + CNs[15] = 0.7787571209969636; CNs[16] =0.5851700001235088; CNs[17] = 0.34410728945321567; + CNs[18] = 0.9394494507088997; CNs[19] =0.17567223681734334; CNs[20] = 0.42732886195446984; + CNs[21] = 0.9460522396152958; CNs[22] =0.23462747016780894; CNs[23] = 0.35983249900892145; + + SamplesBuffer SB(CNs, NumSamples, NumDimsPerSample, DiffN, SmoothN, LagN); + SB.preprocess(); + + std::vector Samples = SB.getPreprocessedSamples(); + EXPECT_EQ(Samples.size(), 2); + + Sample S0 = Samples[0]; + const CalculatedNumber *S0_CNs = S0.getCalculatedNumbers(); + Sample S1 = Samples[1]; + const CalculatedNumber *S1_CNs = S1.getCalculatedNumbers(); + + EXPECT_NEAR(S0_CNs[0], 0.198225, 0.001); + EXPECT_NEAR(S0_CNs[1], 0.003529, 0.001); + EXPECT_NEAR(S0_CNs[2], -0.063003, 0.001); + EXPECT_NEAR(S0_CNs[3], 0.219066, 0.001); + EXPECT_NEAR(S0_CNs[4], 0.133175, 0.001); + EXPECT_NEAR(S0_CNs[5], -0.293154, 0.001); + + EXPECT_NEAR(S1_CNs[0], 0.174160, 0.001); + EXPECT_NEAR(S1_CNs[1], -0.135722, 0.001); + EXPECT_NEAR(S1_CNs[2], 0.110452, 0.001); + EXPECT_NEAR(S1_CNs[3], 0.198225, 0.001); + EXPECT_NEAR(S1_CNs[4], 0.003529, 0.001); + EXPECT_NEAR(S1_CNs[5], -0.063003, 0.001); + + delete[] CNs; +} diff --git a/ml/ml-dummy.c b/ml/ml-dummy.c new file mode 100644 index 000000000..795b80c34 --- /dev/null +++ b/ml/ml-dummy.c @@ -0,0 +1,38 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "ml.h" + +#if !defined(ENABLE_ML) + +void ml_init(void) {} + +void ml_new_host(RRDHOST *RH) { (void) RH; } + +void ml_delete_host(RRDHOST *RH) { (void) RH; } + +char *ml_get_host_info(RRDHOST *RH) { (void) RH; } + +void ml_new_dimension(RRDDIM *RD) { (void) RD; } + +void ml_delete_dimension(RRDDIM *RD) { (void) RD; } + +bool ml_is_anomalous(RRDDIM *RD, double Value, bool Exists) { + (void) RD; (void) Value; (void) Exists; + return false; +} + +char *ml_get_anomaly_events(RRDHOST *RH, const char *AnomalyDetectorName, + int AnomalyDetectorVersion, time_t After, time_t Before) { + (void) RH; (void) AnomalyDetectorName; + (void) AnomalyDetectorVersion; (void) After; (void) Before; + return NULL; +} + +char *ml_get_anomaly_event_info(RRDHOST *RH, const char *AnomalyDetectorName, + int AnomalyDetectorVersion, time_t After, time_t Before) { + (void) RH; (void) AnomalyDetectorName; + (void) AnomalyDetectorVersion; (void) After; (void) Before; + return NULL; +} + +#endif diff --git a/ml/ml-private.h b/ml/ml-private.h new file mode 100644 index 000000000..7b3e00684 --- /dev/null +++ b/ml/ml-private.h @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef ML_PRIVATE_H +#define ML_PRIVATE_H + +#include "kmeans/KMeans.h" +#include "ml/ml.h" + +#include +#include +#include +#include + +namespace ml { + +using SteadyClock = std::chrono::steady_clock; +using TimePoint = std::chrono::time_point; + +template +using Duration = std::chrono::duration; + +using Seconds = std::chrono::seconds; + +} // namespace ml + +#endif /* ML_PRIVATE_H */ diff --git a/ml/ml.cc b/ml/ml.cc new file mode 100644 index 000000000..857d23d33 --- /dev/null +++ b/ml/ml.cc @@ -0,0 +1,153 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "Config.h" +#include "Dimension.h" +#include "Host.h" + +using namespace ml; + +/* + * Assumptions: + * 1) hosts outlive their sets, and sets outlive their dimensions, + * 2) dimensions always have a set that has a host. + */ + +void ml_init(void) { + Cfg.readMLConfig(); +} + +void ml_new_host(RRDHOST *RH) { + if (!Cfg.EnableAnomalyDetection) + return; + + if (simple_pattern_matches(Cfg.SP_HostsToSkip, RH->hostname)) + return; + + Host *H = new Host(RH); + RH->ml_host = static_cast(H); + + H->startAnomalyDetectionThreads(); +} + +void ml_delete_host(RRDHOST *RH) { + Host *H = static_cast(RH->ml_host); + if (!H) + return; + + H->stopAnomalyDetectionThreads(); + + delete H; + RH->ml_host = nullptr; +} + +void ml_new_dimension(RRDDIM *RD) { + RRDSET *RS = RD->rrdset; + + Host *H = static_cast(RD->rrdset->rrdhost->ml_host); + if (!H) + return; + + if (static_cast(RD->update_every) != H->updateEvery()) + return; + + if (simple_pattern_matches(Cfg.SP_ChartsToSkip, RS->name)) + return; + + Dimension *D = new Dimension(RD); + RD->state->ml_dimension = static_cast(D); + H->addDimension(D); +} + +void ml_delete_dimension(RRDDIM *RD) { + Dimension *D = static_cast(RD->state->ml_dimension); + if (!D) + return; + + Host *H = static_cast(RD->rrdset->rrdhost->ml_host); + H->removeDimension(D); + + RD->state->ml_dimension = nullptr; +} + +char *ml_get_host_info(RRDHOST *RH) { + nlohmann::json ConfigJson; + + if (RH && RH->ml_host) { + Host *H = static_cast(RH->ml_host); + H->getConfigAsJson(ConfigJson); + H->getDetectionInfoAsJson(ConfigJson); + } else { + ConfigJson["enabled"] = false; + } + + return strdup(ConfigJson.dump(2, '\t').c_str()); +} + +bool ml_is_anomalous(RRDDIM *RD, double Value, bool Exists) { + Dimension *D = static_cast(RD->state->ml_dimension); + if (!D) + return false; + + D->addValue(Value, Exists); + bool Result = D->predict().second; + return Result; +} + +char *ml_get_anomaly_events(RRDHOST *RH, const char *AnomalyDetectorName, + int AnomalyDetectorVersion, time_t After, time_t Before) { + if (!RH || !RH->ml_host) { + error("No host"); + return nullptr; + } + + Host *H = static_cast(RH->ml_host); + std::vector> TimeRanges; + + bool Res = H->getAnomaliesInRange(TimeRanges, AnomalyDetectorName, + AnomalyDetectorVersion, + H->getUUID(), + After, Before); + if (!Res) { + error("DB result is empty"); + return nullptr; + } + + nlohmann::json Json = TimeRanges; + return strdup(Json.dump(4).c_str()); +} + +char *ml_get_anomaly_event_info(RRDHOST *RH, const char *AnomalyDetectorName, + int AnomalyDetectorVersion, time_t After, time_t Before) { + if (!RH || !RH->ml_host) { + error("No host"); + return nullptr; + } + + Host *H = static_cast(RH->ml_host); + + nlohmann::json Json; + bool Res = H->getAnomalyInfo(Json, AnomalyDetectorName, + AnomalyDetectorVersion, + H->getUUID(), + After, Before); + if (!Res) { + error("DB result is empty"); + return nullptr; + } + + return strdup(Json.dump(4, '\t').c_str()); +} + +#if defined(ENABLE_ML_TESTS) + +#include "gtest/gtest.h" + +int test_ml(int argc, char *argv[]) { + (void) argc; + (void) argv; + + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} + +#endif // ENABLE_ML_TESTS diff --git a/ml/ml.h b/ml/ml.h new file mode 100644 index 000000000..96448453c --- /dev/null +++ b/ml/ml.h @@ -0,0 +1,41 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_ML_H +#define NETDATA_ML_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include "daemon/common.h" + +typedef void* ml_host_t; +typedef void* ml_dimension_t; + +void ml_init(void); + +void ml_new_host(RRDHOST *RH); +void ml_delete_host(RRDHOST *RH); + +char *ml_get_host_info(RRDHOST *RH); + +void ml_new_dimension(RRDDIM *RD); +void ml_delete_dimension(RRDDIM *RD); + +bool ml_is_anomalous(RRDDIM *RD, double value, bool exists); + +char *ml_get_anomaly_events(RRDHOST *RH, const char *AnomalyDetectorName, + int AnomalyDetectorVersion, time_t After, time_t Before); + +char *ml_get_anomaly_event_info(RRDHOST *RH, const char *AnomalyDetectorName, + int AnomalyDetectorVersion, time_t After, time_t Before); + +#if defined(ENABLE_ML_TESTS) +int test_ml(int argc, char *argv[]); +#endif + +#ifdef __cplusplus +}; +#endif + +#endif /* NETDATA_ML_H */ diff --git a/netdata-installer.sh b/netdata-installer.sh index ea4aadb04..345ce015a 100755 --- a/netdata-installer.sh +++ b/netdata-installer.sh @@ -118,6 +118,7 @@ download_go() { # make sure we save all commands we run run_logfile="netdata-installer.log" + # ----------------------------------------------------------------------------- # fix PKG_CHECK_MODULES error @@ -215,7 +216,7 @@ USAGE: ${PROGRAM} [options] --disable-ebpf Disable eBPF Kernel plugin (Default: enabled) --disable-cloud Disable all Netdata Cloud functionality. --require-cloud Fail the install if it can't build Netdata Cloud support. - --aclk-ng Forces build of ACLK Next Generation which is fallback by default. + --aclk-legacy Forces build of ACLK Legacy which is fallback by default. --enable-plugin-freeipmi Enable the FreeIPMI plugin. Default: enable it when libipmimonitoring is available. --disable-plugin-freeipmi --disable-https Explicitly disable TLS support @@ -232,14 +233,18 @@ USAGE: ${PROGRAM} [options] --disable-backend-prometheus-remote-write --enable-backend-mongodb Enable MongoDB backend. Default: enable it when libmongoc is available. --disable-backend-mongodb - --enable-lto Enable Link-Time-Optimization. Default: enabled + --enable-lto Enable Link-Time-Optimization. Default: disabled --disable-lto + --enable-ml Enable anomaly detection with machine learning. (Default: autodetect) + --disable-ml --disable-x86-sse Disable SSE instructions. By default SSE optimizations are enabled. --use-system-lws Use a system copy of libwebsockets instead of bundling our own (default is to use the bundled copy). + --use-system-protobuf Use a system copy of libprotobuf instead of bundling our own (default is to use the bundled copy). --zlib-is-really-here or --libs-are-really-here If you get errors about missing zlib or libuuid but you know it is available, you might have a broken pkg-config. Use this option to proceed without checking pkg-config. --disable-telemetry Use this flag to opt-out from our anonymous telemetry program. (DO_NOT_TRACK=1) + --skip-available-ram-check Skip checking the amount of RAM the system has and pretend it has enough to build safely. Netdata will by default be compiled with gcc optimization -O2 If you need to pass different CFLAGS, use something like this: @@ -270,6 +275,7 @@ DONOTWAIT=0 AUTOUPDATE=0 NETDATA_PREFIX= LIBS_ARE_HERE=0 +NETDATA_ENABLE_ML="" NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS-}" RELEASE_CHANNEL="nightly" # check .travis/create_artifacts.sh before modifying IS_NETDATA_STATIC_BINARY="${IS_NETDATA_STATIC_BINARY:-"no"}" @@ -278,6 +284,10 @@ while [ -n "${1}" ]; do "--zlib-is-really-here") LIBS_ARE_HERE=1 ;; "--libs-are-really-here") LIBS_ARE_HERE=1 ;; "--use-system-lws") USE_SYSTEM_LWS=1 ;; + "--use-system-protobuf") + USE_SYSTEM_PROTOBUF=1 + NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--without-bundled-protobuf/} --without-bundled-protobuf" + ;; "--dont-scrub-cflags-even-though-it-may-break-things") DONT_SCRUB_CFLAGS_EVEN_THOUGH_IT_MAY_BREAK_THINGS=1 ;; "--dont-start-it") DONOTSTART=1 ;; "--dont-wait") DONOTWAIT=1 ;; @@ -310,19 +320,33 @@ while [ -n "${1}" ]; do "--enable-backend-kinesis") NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--enable-backend-kinesis/} --enable-backend-kinesis" ;; "--disable-backend-kinesis") NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--disable-backend-kinesis/} --disable-backend-kinesis" ;; "--enable-backend-prometheus-remote-write") NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--enable-backend-prometheus-remote-write/} --enable-backend-prometheus-remote-write" ;; - "--disable-backend-prometheus-remote-write") NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--disable-backend-prometheus-remote-write/} --disable-backend-prometheus-remote-write" ;; + "--disable-backend-prometheus-remote-write") + NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--disable-backend-prometheus-remote-write/} --disable-backend-prometheus-remote-write" + NETDATA_DISABLE_PROMETHEUS=1 + ;; "--enable-backend-mongodb") NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--enable-backend-mongodb/} --enable-backend-mongodb" ;; "--disable-backend-mongodb") NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--disable-backend-mongodb/} --disable-backend-mongodb" ;; "--enable-lto") NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--enable-lto/} --enable-lto" ;; + "--enable-ml") + NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--enable-ml/} --enable-ml" + NETDATA_ENABLE_ML=1 + ;; + "--disable-ml") + NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--disable-ml/} --disable-ml" + NETDATA_ENABLE_ML=0 + ;; + "--enable-ml-tests") NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--enable-ml-tests/} --enable-ml-tests" ;; + "--disable-ml-tests") NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--disable-ml-tests/} --disable-ml-tests" ;; "--disable-lto") NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--disable-lto/} --disable-lto" ;; "--disable-x86-sse") NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--disable-x86-sse/} --disable-x86-sse" ;; "--disable-telemetry") NETDATA_DISABLE_TELEMETRY=1 ;; "--disable-go") NETDATA_DISABLE_GO=1 ;; "--enable-ebpf") NETDATA_DISABLE_EBPF=0 ;; "--disable-ebpf") NETDATA_DISABLE_EBPF=1 NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--disable-ebpf/} --disable-ebpf" ;; - "--aclk-ng") - NETDATA_ACLK_NG=1 - NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--with-aclk-ng/} --with-aclk-ng" + "--skip-available-ram-check") SKIP_RAM_CHECK=1 ;; + "--aclk-ng") ;; + "--aclk-legacy") + NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--with-aclk-legacy/} --with-aclk-legacy" ;; "--disable-cloud") if [ -n "${NETDATA_REQUIRE_CLOUD}" ]; then @@ -350,6 +374,10 @@ while [ -n "${1}" ]; do NETDATA_PREFIX="${2}/netdata" shift 1 ;; + "--install-no-prefix") + NETDATA_PREFIX="${2}" + shift 1 + ;; "--help" | "-h") usage exit 1 @@ -373,6 +401,52 @@ fi # replace multiple spaces with a single space NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS// / }" +if [ "$(uname -s)" = "Linux" ] && [ -f /proc/meminfo ]; then + mega="$((1024 * 1024))" + base=1024 + scale=256 + + if [ -n "${MAKEOPTS}" ]; then + proc_count="$(echo ${MAKEOPTS} | grep -oE '\-j *[[:digit:]]+' | tr -d '\-j ')" + else + proc_count="$(find_processors)" + fi + + target_ram="$((base * mega + (scale * mega * (proc_count - 1))))" + total_ram="$(grep MemTotal /proc/meminfo | cut -d ':' -f 2 | tr -d ' kB')" + total_ram="$((total_ram * 1024))" + + if [ "${total_ram}" -le "$((base * mega))" ] && [ -z "${NETDATA_ENABLE_ML}" ]; then + NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--disable-ml/} --disable-ml" + NETDATA_ENABLE_ML=0 + fi + + if [ -z "${MAKEOPTS}" ]; then + MAKEOPTS="-j${proc_count}" + + while [ "${target_ram}" -gt "${total_ram}" ] && [ "${proc_count}" -gt 1 ]; do + proc_count="$((proc_count - 1))" + target_ram="$((base * mega + (scale * mega * (proc_count - 1))))" + MAKEOPTS="-j${proc_count}" + done + else + if [ "${target_ram}" -gt "${total_ram}" ] && [ "${proc_count}" -gt 1 ] && [ -z "${SKIP_RAM_CHECK}" ]; then + target_ram="$(echo "${target_ram}" | awk '{$1/=1024*1024*1024;printf "%.2fGiB\n",$1}')" + total_ram="$(echo "${total_ram}" | awk '{$1/=1024*1024*1024;printf "%.2fGiB\n",$1}')" + run_failed "Netdata needs ${target_ram} of RAM to safely install, but this system only has ${total_ram}." + run_failed "Insufficient RAM available for an install. Try reducing the number of processes used for the install using the \$MAKEOPTS variable." + exit 2 + fi + fi +fi + +# set default make options +if [ -z "${MAKEOPTS}" ]; then + MAKEOPTS="-j$(find_processors)" +elif echo "${MAKEOPTS}" | grep -vqF -e "-j"; then + MAKEOPTS="${MAKEOPTS} -j$(find_processors)" +fi + if [ "${UID}" -ne 0 ]; then if [ -z "${NETDATA_PREFIX}" ]; then netdata_banner "wrong command line options!" @@ -544,7 +618,7 @@ build_libmosquitto() { fi if [ "$(uname -s)" = Linux ]; then - run ${env_cmd} make -C "${1}/lib" + run ${env_cmd} ${make} ${MAKEOPTS} -C "${1}/lib" else pushd ${1} > /dev/null || return 1 if [ "$(uname)" = "Darwin" ] && [ -d /usr/local/opt/openssl ]; then @@ -556,7 +630,7 @@ build_libmosquitto() { else run ${env_cmd} cmake -D WITH_STATIC_LIBRARIES:boolean=YES . fi - run ${env_cmd} make -C lib + run ${env_cmd} ${make} ${MAKEOPTS} -C lib run mv lib/libmosquitto_static.a lib/libmosquitto.a popd || return 1 fi @@ -572,11 +646,13 @@ copy_libmosquitto() { } bundle_libmosquitto() { - if [ -n "${NETDATA_DISABLE_CLOUD}" ] || [ -n "${NETDATA_ACLK_NG}" ]; then + if [ -n "${NETDATA_DISABLE_CLOUD}" ]; then echo "Skipping libmosquitto" return 0 fi + [ -n "${GITHUB_ACTIONS}" ] && echo "::group::Bundling libmosquitto." + progress "Prepare custom libmosquitto version" MOSQUITTO_PACKAGE_VERSION="$(cat packaging/mosquitto.version)" @@ -596,20 +672,14 @@ bundle_libmosquitto() { run_ok "libmosquitto built and prepared." else run_failed "Failed to build libmosquitto." - if [ -n "${NETDATA_REQUIRE_CLOUD}" ]; then - exit 1 - else - defer_error_highlighted "Unable to fetch sources for libmosquitto. You will not be able to connect this node to Netdata Cloud." - fi + defer_error_highlighted "Unable to fetch sources for libmosquitto. You will not be able to connect this node to Netdata Cloud." fi else run_failed "Unable to fetch sources for libmosquitto." - if [ -n "${NETDATA_REQUIRE_CLOUD}" ]; then - exit 1 - else - defer_error_highlighted "Unable to fetch sources for libmosquitto. You will not be able to connect this node to Netdata Cloud." - fi + defer_error_highlighted "Unable to fetch sources for libmosquitto. You will not be able to connect this node to Netdata Cloud." fi + + [ -n "${GITHUB_ACTIONS}" ] && echo "::endgroup::" } bundle_libmosquitto @@ -660,7 +730,7 @@ EOF $CMAKE_FLAGS \ . fi - run ${env_cmd} make -j$(find_processors) + run ${env_cmd} ${make} ${MAKEOPTS} popd > /dev/null || exit 1 } @@ -674,7 +744,8 @@ copy_libwebsockets() { } bundle_libwebsockets() { - if [ -n "${NETDATA_DISABLE_CLOUD}" ] || [ -n "${USE_SYSTEM_LWS}" ] || [ -n "${NETDATA_ACLK_NG}" ]; then + if [ -n "${NETDATA_DISABLE_CLOUD}" ] || [ -n "${USE_SYSTEM_LWS}" ]; then + echo "Skipping libwebsockets" return 0 fi @@ -684,6 +755,8 @@ bundle_libwebsockets() { return 0 fi + [ -n "${GITHUB_ACTIONS}" ] && echo "::group::Bundling libwebsockets." + progress "Prepare libwebsockets" LIBWEBSOCKETS_PACKAGE_VERSION="$(cat packaging/libwebsockets.version)" @@ -701,26 +774,94 @@ bundle_libwebsockets() { copy_libwebsockets "${tmp}/libwebsockets-${LIBWEBSOCKETS_PACKAGE_VERSION}" && rm -rf "${tmp}"; then run_ok "libwebsockets built and prepared." - NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS} --with-bundled-lws=externaldeps/libwebsockets" + NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS} --with-bundled-lws" else run_failed "Failed to build libwebsockets." - if [ -n "${NETDATA_REQUIRE_CLOUD}" ]; then - exit 1 - else - defer_error_highlighted "Failed to build libwebsockets. You may not be able to connect this node to Netdata Cloud." - fi + defer_error_highlighted "Failed to build libwebsockets. You may not be able to connect this node to Netdata Cloud." fi else run_failed "Unable to fetch sources for libwebsockets." - if [ -n "${NETDATA_REQUIRE_CLOUD}" ]; then - exit 1 + defer_error_highlighted "Unable to fetch sources for libwebsockets. You may not be able to connect this node to Netdata Cloud." + fi + + [ -n "${GITHUB_ACTIONS}" ] && echo "::endgroup::" +} + +bundle_libwebsockets + +# ----------------------------------------------------------------------------- + +build_protobuf() { + local env_cmd='' + + if [ -z "${DONT_SCRUB_CFLAGS_EVEN_THOUGH_IT_MAY_BREAK_THINGS}" ]; then + env_cmd="env CFLAGS=-fPIC CXXFLAGS= LDFLAGS=" + fi + + pushd "${1}" > /dev/null || return 1 + if ! run ${env_cmd} ./configure --disable-shared --without-zlib --disable-dependency-tracking --with-pic; then + popd > /dev/null || return 1 + return 1 + fi + + if ! run ${env_cmd} $make ${MAKEOPTS}; then + popd > /dev/null || return 1 + return 1 + fi + + popd > /dev/null || return 1 +} + +copy_protobuf() { + target_dir="${PWD}/externaldeps/protobuf" + + run mkdir -p "${target_dir}" || return 1 + run cp -a "${1}/src" "${target_dir}" || return 1 +} + +bundle_protobuf() { + if [ -n "${NETDATA_DISABLE_CLOUD}" ] && [ -n "${NETDATA_DISABLE_PROMETHEUS}" ]; then + echo "Skipping protobuf" + return 0 + fi + + if [ -n "${USE_SYSTEM_PROTOBUF}" ]; then + echo "Skipping protobuf" + defer_error "You have requested use of a system copy of protobuf. This should work, but it is not recommended as it's very likely to break if you upgrade the currently installed version of protobuf." + return 0 + fi + + [ -n "${GITHUB_ACTIONS}" ] && echo "::group::Bundling protobuf." + + PROTOBUF_PACKAGE_VERSION="$(cat packaging/protobuf.version)" + + tmp="$(mktemp -d -t netdata-protobuf-XXXXXX)" + PROTOBUF_PACKAGE_BASENAME="protobuf-cpp-${PROTOBUF_PACKAGE_VERSION}.tar.gz" + + if fetch_and_verify "protobuf" \ + "https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOBUF_PACKAGE_VERSION}/${PROTOBUF_PACKAGE_BASENAME}" \ + "${PROTOBUF_PACKAGE_BASENAME}" \ + "${tmp}" \ + "${NETDATA_LOCAL_TARBALL_VERRIDE_PROTOBUF}"; then + if run tar -xf "${tmp}/${PROTOBUF_PACKAGE_BASENAME}" -C "${tmp}" && + build_protobuf "${tmp}/protobuf-${PROTOBUF_PACKAGE_VERSION}" && + copy_protobuf "${tmp}/protobuf-${PROTOBUF_PACKAGE_VERSION}" && + rm -rf "${tmp}"; then + run_ok "protobuf built and prepared." + NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS} --with-bundled-protobuf" else - defer_error_highlighted "Unable to fetch sources for libwebsockets. You may not be able to connect this node to Netdata Cloud." + run_failed "Failed to build protobuf." + defer_error_highlighted "Failed to build protobuf. You may not be able to connect this node to Netdata Cloud." fi + else + run_failed "Unable to fetch sources for protobuf." + defer_error_highlighted "Unable to fetch sources for protobuf. You may not be able to connect this node to Netdata Cloud." fi + + [ -n "${GITHUB_ACTIONS}" ] && echo "::endgroup::" } -bundle_libwebsockets +bundle_protobuf # ----------------------------------------------------------------------------- @@ -743,7 +884,7 @@ build_judy() { run ${env_cmd} automake --add-missing --force --copy --include-deps && run ${env_cmd} autoconf && run ${env_cmd} ./configure && - run ${env_cmd} make -C src && + run ${env_cmd} ${make} ${MAKEOPTS} -C src && run ${env_cmd} ar -r src/libJudy.a src/Judy*/*.o; then popd > /dev/null || return 1 else @@ -763,7 +904,7 @@ copy_judy() { bundle_judy() { # If --build-judy flag or no Judy on the system and we're building the dbengine, bundle our own libJudy. - # shellcheck disable=SC2235 + # shellcheck disable=SC2235,SC2030,SC2031 if [ -n "${NETDATA_DISABLE_DBENGINE}" ] || ([ -z "${NETDATA_BUILD_JUDY}" ] && [ -e /usr/include/Judy.h ]); then return 0 elif [ -n "${NETDATA_BUILD_JUDY}" ]; then @@ -772,6 +913,8 @@ bundle_judy() { progress "/usr/include/Judy.h does not exist, but we need libJudy, building our own copy" fi + [ -n "${GITHUB_ACTIONS}" ] && echo "::group::Bundling libJudy." + progress "Prepare libJudy" JUDY_PACKAGE_VERSION="$(cat packaging/judy.version)" @@ -789,10 +932,12 @@ bundle_judy() { copy_judy "${tmp}/libjudy-${JUDY_PACKAGE_VERSION}" && rm -rf "${tmp}"; then run_ok "libJudy built and prepared." - NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS} --with-libJudy=externaldeps/libJudy" + NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS} --with-bundled-libJudy" else run_failed "Failed to build libJudy." if [ -n "${NETDATA_BUILD_JUDY}" ]; then + [ -n "${GITHUB_ACTIONS}" ] && echo "::endgroup::" + exit 1 else defer_error_highlighted "Failed to build libJudy. dbengine support will be disabled." @@ -801,11 +946,15 @@ bundle_judy() { else run_failed "Unable to fetch sources for libJudy." if [ -n "${NETDATA_BUILD_JUDY}" ]; then + [ -n "${GITHUB_ACTIONS}" ] && echo "::endgroup::" + exit 1 else defer_error_highlighted "Unable to fetch sources for libJudy. dbengine support will be disabled." fi fi + + [ -n "${GITHUB_ACTIONS}" ] && echo "::endgroup::" } bundle_judy @@ -821,7 +970,7 @@ build_jsonc() { pushd "${1}" > /dev/null || exit 1 run ${env_cmd} cmake -DBUILD_SHARED_LIBS=OFF . - run ${env_cmd} make + run ${env_cmd} ${make} ${MAKEOPTS} popd > /dev/null || exit 1 } @@ -846,6 +995,8 @@ bundle_jsonc() { return 0 fi + [ -n "${GITHUB_ACTIONS}" ] && echo "::group::Bundling JSON-C." + progress "Prepare JSON-C" JSONC_PACKAGE_VERSION="$(cat packaging/jsonc.version)" @@ -865,29 +1016,43 @@ bundle_jsonc() { run_ok "JSON-C built and prepared." else run_failed "Failed to build JSON-C." - if [ -n "${NETDATA_REQUIRE_CLOUD}" ]; then - exit 1 - else - defer_error_highlighted "Failed to build JSON-C. Netdata Cloud support will be disabled." - fi + defer_error_highlighted "Failed to build JSON-C. Netdata Cloud support will be disabled." fi else run_failed "Unable to fetch sources for JSON-C." - if [ -n "${NETDATA_REQUIRE_CLOUD}" ]; then - exit 1 - else - defer_error_highlighted "Unable to fetch sources for JSON-C. Netdata Cloud support will be disabled." - fi + defer_error_highlighted "Unable to fetch sources for JSON-C. Netdata Cloud support will be disabled." fi + + [ -n "${GITHUB_ACTIONS}" ] && echo "::endgroup::" } bundle_jsonc # ----------------------------------------------------------------------------- +get_kernel_version() { + r="$(uname -r | cut -f 1 -d '-')" + + read -r -a p <<< "$(echo "${r}" | tr '.' ' ')" + + printf "%03d%03d%03d" "${p[0]}" "${p[1]}" "${p[2]}" +} + +rename_libbpf_packaging() { + if [ "$(get_kernel_version)" -ge "004014000" ]; then + cp packaging/current_libbpf.checksums packaging/libbpf.checksums + cp packaging/current_libbpf.version packaging/libbpf.version + else + cp packaging/libbpf_0_0_9.checksums packaging/libbpf.checksums + cp packaging/libbpf_0_0_9.version packaging/libbpf.version + fi +} + + build_libbpf() { pushd "${1}/src" > /dev/null || exit 1 - run env CFLAGS=-fPIC CXXFLAGS= LDFLAGS= BUILD_STATIC_ONLY=y OBJDIR=build DESTDIR=.. make install + mkdir root build + run env CFLAGS=-fPIC CXXFLAGS= LDFLAGS= BUILD_STATIC_ONLY=y OBJDIR=build DESTDIR=.. ${make} ${MAKEOPTS} install popd > /dev/null || exit 1 } @@ -911,6 +1076,10 @@ bundle_libbpf() { return 0 fi + [ -n "${GITHUB_ACTIONS}" ] && echo "::group::Bundling libbpf." + + rename_libbpf_packaging + progress "Prepare libbpf" LIBBPF_PACKAGE_VERSION="$(cat packaging/libbpf.version)" @@ -944,6 +1113,8 @@ bundle_libbpf() { defer_error_highlighted "Unable to fetch sources for libbpf. You may not be able to use eBPF plugin." fi fi + + [ -n "${GITHUB_ACTIONS}" ] && echo "::endgroup::" } bundle_libbpf @@ -964,11 +1135,13 @@ fi if [ -d ./.git ] ; then echo >&2 progress "Updating tags in git to ensure a consistent version number" - run git fetch 'refs/tags/*:refs/tags/*' || true + run git fetch -t || true fi # ----------------------------------------------------------------------------- echo >&2 + +[ -n "${GITHUB_ACTIONS}" ] && echo "::group::Configuring Netdata." progress "Run autotools to configure the build environment" if [ "$have_autotools" ]; then @@ -987,10 +1160,14 @@ run ./configure \ ${NETDATA_CONFIGURE_OPTIONS} \ CFLAGS="${CFLAGS}" LDFLAGS="${LDFLAGS}" || exit 1 +[ -n "${GITHUB_ACTIONS}" ] && echo "::endgroup::" + # remove the build_error hook trap - EXIT # ----------------------------------------------------------------------------- +[ -n "${GITHUB_ACTIONS}" ] && echo "::group::Building Netdata." + progress "Cleanup compilation directory" run $make clean @@ -998,9 +1175,13 @@ run $make clean # ----------------------------------------------------------------------------- progress "Compile netdata" -run $make -j$(find_processors) || exit 1 +run $make ${MAKEOPTS} || exit 1 + +[ -n "${GITHUB_ACTIONS}" ] && echo "::endgroup::" # ----------------------------------------------------------------------------- +[ -n "${GITHUB_ACTIONS}" ] && echo "::group::Installing Netdata." + progress "Migrate configuration files for node.d.plugin and charts.d.plugin" # migrate existing configuration files @@ -1110,7 +1291,7 @@ run $make install || exit 1 # ----------------------------------------------------------------------------- progress "Fix generated files permissions" -run find ./system/ -type f -a \! -name \*.in -a \! -name Makefile\* -a \! -name \*.conf -a \! -name \*.service -a \! -name \*.timer -a \! -name \*.logrotate -exec chmod 755 {} \; +run find ./system/ -type f -a \! -name \*.in -a \! -name Makefile\* -a \! -name \*.conf -a \! -name \*.service -a \! -name \*.timer -a \! -name \*.logrotate -a \! -name \.install-type -exec chmod 755 {} \; # ----------------------------------------------------------------------------- progress "Creating standard user and groups for netdata" @@ -1360,6 +1541,8 @@ else run find "${NETDATA_PREFIX}/usr/libexec/netdata" -type d -exec chmod 0755 {} \; fi +[ -n "${GITHUB_ACTIONS}" ] && echo "::endgroup::" + # ----------------------------------------------------------------------------- # govercomp compares go.d.plugin versions. Exit codes: @@ -1427,6 +1610,8 @@ install_go() { return 0 fi + [ -n "${GITHUB_ACTIONS}" ] && echo "::group::Installing go.d.plugin." + # When updating this value, ensure correct checksums in packaging/go.d.checksums GO_PACKAGE_VERSION="$(cat packaging/go.d.version)" ARCH_MAP=( @@ -1474,6 +1659,7 @@ install_go() { defer_error "go.d plugin download failed, go.d plugin will not be available" echo >&2 "Either check the error or consider disabling it by issuing '--disable-go' in the installer" echo >&2 + [ -n "${GITHUB_ACTIONS}" ] && echo "::endgroup::" return 0 fi @@ -1489,6 +1675,7 @@ install_go() { run_failed "go.d.plugin package files checksum validation failed." defer_error "go.d.plugin package files checksum validation failed, go.d.plugin will not be available" + [ -n "${GITHUB_ACTIONS}" ] && echo "::endgroup::" return 0 fi @@ -1505,31 +1692,11 @@ install_go() { fi run chmod 0750 "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/go.d.plugin" rm -rf "${tmp}" - return 0 -} - -install_go - -function get_kernel_version() { - r="$(uname -r | cut -f 1 -d '-')" - read -r -a p <<< "$(echo "${r}" | tr '.' ' ')" - - printf "%03d%03d%03d" "${p[0]}" "${p[1]}" "${p[2]}" + [ -n "${GITHUB_ACTIONS}" ] && echo "::endgroup::" } -function get_rh_version() { - if [ ! -f /etc/redhat-release ]; then - printf "000000000" - return - fi - - r="$(cut -f 4 -d ' ' < /etc/redhat-release)" - - read -r -a p <<< "$(echo "${r}" | tr '.' ' ')" - - printf "%03d%03d%03d" "${p[0]}" "${p[1]}" "${p[2]}" -} +install_go detect_libc() { libc= @@ -1557,9 +1724,9 @@ should_install_ebpf() { return 1 fi - if [ "$(uname -s)" != "Linux" ]; then - run_failed "Currently eBPF is only supported on Linux." - defer_error "Currently eBPF is only supported on Linux." + if [ "$(uname -s)" != "Linux" ] || [ "$(uname -m)" != "x86_64" ]; then + run_failed "Currently eBPF is only supported on Linux on X86_64." + defer_error "Currently eBPF is only supported on Linux on X86_64." return 1 fi @@ -1606,6 +1773,12 @@ remove_old_ebpf() { echo >&2 "Removing old ebpf_kernel_reject_list.txt." rm -f "${NETDATA_PREFIX}/usr/lib/netdata/conf.d/ebpf_kernel_reject_list.txt" fi + + # Remove old reset script + if [ -f "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/reset_netdata_trace.sh" ]; then + echo >&2 "Removing old reset_netdata_trace.sh." + rm -f "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/reset_netdata_trace.sh" + fi } install_ebpf() { @@ -1613,6 +1786,8 @@ install_ebpf() { return 0 fi + [ -n "${GITHUB_ACTIONS}" ] && echo "::group::Installing eBPF code." + remove_old_ebpf progress "Installing eBPF plugin" @@ -1633,6 +1808,8 @@ install_ebpf() { run_failed "Failed to download eBPF collector package" echo 2>&" Removing temporary directory ${tmp} ..." rm -rf "${tmp}" + + [ -n "${GITHUB_ACTIONS}" ] && echo "::endgroup::" return 1 fi @@ -1647,6 +1824,8 @@ install_ebpf() { RET=$? if [ "${RET}" != "0" ]; then rm -rf "${tmp}" + + [ -n "${GITHUB_ACTIONS}" ] && echo "::endgroup::" return 1 fi fi @@ -1655,7 +1834,7 @@ install_ebpf() { rm -rf "${tmp}" - return 0 + [ -n "${GITHUB_ACTIONS}" ] && echo "::endgroup::" } progress "eBPF Kernel Collector" @@ -1873,6 +2052,7 @@ cat << EOF > "${NETDATA_USER_CONFIG_DIR}/.environment" PATH="${PATH}" CFLAGS="${CFLAGS}" LDFLAGS="${LDFLAGS}" +MAKEOPTS="${MAKEOPTS}" NETDATA_TMPDIR="${TMPDIR}" NETDATA_PREFIX="${NETDATA_PREFIX}" NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS}" diff --git a/netdata.spec.in b/netdata.spec.in index 48e928929..9aa0e4e98 100644 --- a/netdata.spec.in +++ b/netdata.spec.in @@ -10,17 +10,23 @@ # PACKAGE IS BROKEN WITHOUT THEM. AutoReqProv: yes -%if "@HAVE_LIBBPF@" == "1" -%global have_bpf 1 -%else -%global have_bpf 0 -%endif - # This is temporary and should eventually be resolved. This bypasses # the default rhel __os_install_post which throws a python compile # error. %global __os_install_post %{nil} +# We don’t want LTO as it has a minimal performance impact at runtime +# but a huge impact on build times (we want our CI to not take multiple +# hours to finish). +%global _lto_cflags %nil + +# Disable eBPF for architectures other than x86 +%ifarch x86_64 i386 +%global _have_ebpf 1 +%else +%global _have_ebpf 0 +%endif + # Mitigate the cross-distro mayhem by strictly defining the libexec destination %define _prefix /usr %define _sysconfdir /etc @@ -132,7 +138,7 @@ BuildRequires: zlib-devel BuildRequires: libuuid-devel BuildRequires: libuv-devel >= 1 BuildRequires: openssl-devel -%if 0%{?centos_ver} >= 8 || 0%{?fedora} +%if 0%{?fedora} BuildRequires: libwebsockets-devel >= 3.2 %endif %if 0%{?suse_version} @@ -221,26 +227,37 @@ happened, on your systems and applications. %prep %setup -q -n %{name}-%{version} export CFLAGS="${CFLAGS} -fPIC" && ${RPM_BUILD_DIR}/%{name}-%{version}/packaging/bundle-mosquitto.sh ${RPM_BUILD_DIR}/%{name}-%{version} -%if 0%{?centos_ver} < 8 || 0%{!?fedora:1} +%if 0%{!?fedora:1} export CFLAGS="${CFLAGS} -fPIC" && ${RPM_BUILD_DIR}/%{name}-%{version}/packaging/bundle-lws.sh ${RPM_BUILD_DIR}/%{name}-%{version} %endif # Only bundle libJudy if this isn't Fedora or SUSE %if 0%{!?fedora:1} && 0%{!?suse_version:1} export CFLAGS="${CFLAGS} -fPIC" && ${RPM_BUILD_DIR}/%{name}-%{version}/packaging/bundle-judy.sh ${RPM_BUILD_DIR}/%{name}-%{version} %endif -%if 0%{?have_bpf} -export CFLAGS="${CFLAGS} -fPIC" && ${RPM_BUILD_DIR}/%{name}-%{version}/packaging/bundle-libbpf.sh ${RPM_BUILD_DIR}/%{name}-%{version} +%if 0%{?_have_ebpf} +%if 0%{?centos_ver:1} +%if %{centos_ver} < 8 +export CFLAGS="${CFLAGS} -fPIC" && ${RPM_BUILD_DIR}/%{name}-%{version}/packaging/bundle-libbpf.sh ${RPM_BUILD_DIR}/%{name}-%{version} centos7 +%else +export CFLAGS="${CFLAGS} -fPIC" && ${RPM_BUILD_DIR}/%{name}-%{version}/packaging/bundle-libbpf.sh ${RPM_BUILD_DIR}/%{name}-%{version} centos8 +%endif +%else +export CFLAGS="${CFLAGS} -fPIC" && ${RPM_BUILD_DIR}/%{name}-%{version}/packaging/bundle-libbpf.sh ${RPM_BUILD_DIR}/%{name}-%{version} other +%endif %endif %build # Conf step autoreconf -ivf %configure \ + %if 0%{!?_have_ebpf} + --disable-ebpf + %endif %if 0%{!?fedora:1} && 0%{!?suse_version:1} - --with-libJudy=externaldeps/libJudy \ + --with-bundled-libJudy \ %endif %if 0%{?centos_ver} < 8 || 0%{!?fedora:1} - --with-bundled-lws=externaldeps/libwebsockets \ + --with-bundled-lws \ %endif --prefix="%{_prefix}" \ --sysconfdir="%{_sysconfdir}" \ @@ -282,7 +299,7 @@ install -m 4750 -p perf.plugin "${RPM_BUILD_ROOT}%{_libexecdir}/%{name}/plugins. # ########################################################### # Install ebpf.plugin -%if 0%{?have_bpf} +%if 0%{?_have_ebpf} install -m 4750 -p ebpf.plugin "${RPM_BUILD_ROOT}%{_libexecdir}/%{name}/plugins.d/ebpf.plugin" %endif @@ -411,7 +428,7 @@ install_go() { install_go install -m 0640 -p go.d.plugin "${RPM_BUILD_ROOT}%{_libexecdir}/%{name}/plugins.d/go.d.plugin" -%if 0%{?have_bpf} +%if 0%{?_have_ebpf} ${RPM_BUILD_DIR}/%{name}-%{version}/packaging/bundle-ebpf.sh ${RPM_BUILD_DIR}/%{name}-%{version} ${RPM_BUILD_ROOT}%{_libexecdir}/%{name}/plugins.d %endif diff --git a/packaging/Dockerfile.packager b/packaging/Dockerfile.packager deleted file mode 100644 index 4c90f14fd..000000000 --- a/packaging/Dockerfile.packager +++ /dev/null @@ -1,43 +0,0 @@ -ARG ARCH=amd64 -ARG DISTRO=debian -ARG TEST_BASE=debian -ARG DISTRO_VERSION=10 -ARG PKG_VERSION=0.1 - -FROM netdata/package-builders:${DISTRO}${DISTRO_VERSION} AS build - -ARG ARCH -ARG DISTRO -ARG DISTRO_VERSION -ARG PKG_VERSION - -ENV ARCH=$ARCH -ENV DISTRO=$DISTRO -ENV DISTRO_VERSION=$DISTRO_VERSION -ENV VERSION=$PKG_VERSION - -WORKDIR /netdata -COPY . . - -RUN /build.sh - -FROM ${TEST_BASE}:${DISTRO_VERSION} AS runtime - -ARG ARCH -ARG DISTRO -ARG DISTRO_VERSION -ARG PKG_VERSION - -ENV ARCH=$ARCH -ENV DISTRO=$DISTRO -ENV DISTRO_VERSION=$DISTRO_VERSION -ENV VERSION=$PKG_VERSION - -COPY ./packaging/scripts/install.sh /install.sh -COPY ./packaging/scripts/test.sh /test.sh - -COPY --from=build /netdata/artifacts /packages - -RUN /install.sh - -CMD ["/test.sh"] diff --git a/packaging/bundle-ebpf.sh b/packaging/bundle-ebpf.sh index c51162987..3204345b0 100755 --- a/packaging/bundle-ebpf.sh +++ b/packaging/bundle-ebpf.sh @@ -6,12 +6,14 @@ PLUGINDIR="${2}" EBPF_VERSION="$(cat "${SRCDIR}/packaging/ebpf.version")" EBPF_TARBALL="netdata-kernel-collector-glibc-${EBPF_VERSION}.tar.xz" -mkdir -p "${SRCDIR}/tmp/ebpf" -curl -sSL --connect-timeout 10 --retry 3 "https://github.com/netdata/kernel-collector/releases/download/${EBPF_VERSION}/${EBPF_TARBALL}" > "${EBPF_TARBALL}" || exit 1 -grep "${EBPF_TARBALL}" "${SRCDIR}/packaging/ebpf.checksums" | sha256sum -c - || exit 1 -tar -xaf "${EBPF_TARBALL}" -C "${SRCDIR}/tmp/ebpf" || exit 1 -if [ ! -d "${PLUGINDIR}/ebpf.d" ];then - mkdir "${PLUGINDIR}/ebpf.d" +if [ -x "${PLUGINDIR}/ebpf.plugin" ] ; then + mkdir -p "${SRCDIR}/tmp/ebpf" + curl -sSL --connect-timeout 10 --retry 3 "https://github.com/netdata/kernel-collector/releases/download/${EBPF_VERSION}/${EBPF_TARBALL}" > "${EBPF_TARBALL}" || exit 1 + grep "${EBPF_TARBALL}" "${SRCDIR}/packaging/ebpf.checksums" | sha256sum -c - || exit 1 + tar -xaf "${EBPF_TARBALL}" -C "${SRCDIR}/tmp/ebpf" || exit 1 + if [ ! -d "${PLUGINDIR}/ebpf.d" ];then + mkdir "${PLUGINDIR}/ebpf.d" + fi + # shellcheck disable=SC2046 + cp -a $(find "${SRCDIR}/tmp/ebpf" -mindepth 1 -maxdepth 1) "${PLUGINDIR}/ebpf.d" fi -# shellcheck disable=SC2046 -cp -a $(find "${SRCDIR}/tmp/ebpf" -mindepth 1 -maxdepth 1) "${PLUGINDIR}/ebpf.d" diff --git a/packaging/bundle-libbpf.sh b/packaging/bundle-libbpf.sh index 4c16dd123..3ca183b0f 100755 --- a/packaging/bundle-libbpf.sh +++ b/packaging/bundle-libbpf.sh @@ -1,7 +1,4 @@ -#!/bin/sh - -LIBBPF_TARBALL="v$(cat "${1}/packaging/libbpf.version").tar.gz" -LIBBPF_BUILD_PATH="${1}/externaldeps/libbpf/libbpf-$(cat "${1}/packaging/libbpf.version")" +#!/bin/bash if [ "$(uname -m)" = x86_64 ]; then lib_subdir="lib64" @@ -9,6 +6,17 @@ else lib_subdir="lib" fi +if [ "${2}" != "centos7" ]; then + cp "${1}/packaging/current_libbpf.checksums" "${1}/packaging/libbpf.checksums" + cp "${1}/packaging/current_libbpf.version" "${1}/packaging/libbpf.version" +else + cp "${1}/packaging/libbpf_0_0_9.checksums" "${1}/packaging/libbpf.checksums" + cp "${1}/packaging/libbpf_0_0_9.version" "${1}/packaging/libbpf.version" +fi + +LIBBPF_TARBALL="v$(cat "${1}/packaging/libbpf.version").tar.gz" +LIBBPF_BUILD_PATH="${1}/externaldeps/libbpf/libbpf-$(cat "${1}/packaging/libbpf.version")" + mkdir -p "${1}/externaldeps/libbpf" || exit 1 curl -sSL --connect-timeout 10 --retry 3 "https://github.com/netdata/libbpf/archive/${LIBBPF_TARBALL}" > "${LIBBPF_TARBALL}" || exit 1 sha256sum -c "${1}/packaging/libbpf.checksums" || exit 1 diff --git a/packaging/bundle-lws.sh b/packaging/bundle-lws.sh index 4ecf0ca92..c4ffc7780 100755 --- a/packaging/bundle-lws.sh +++ b/packaging/bundle-lws.sh @@ -10,7 +10,7 @@ curl -sSL --connect-timeout 10 --retry 3 "https://github.com/warmcat/libwebsocke sha256sum -c "${1}/packaging/libwebsockets.checksums" || exit 1 tar -xzf "${LWS_TARBALL}" -C "${1}/externaldeps/libwebsockets" || exit 1 cd "${LWS_BUILD_PATH}" || exit 1 -cmake -D LWS_WITH_SOCKS5:boolean=YES . || exit 1 +cmake -Wno-dev -Wno-deprecated -D LWS_WITH_SOCKS5:boolean=YES -D WITHOUT_LWS_TESTAPPS:boolean=YES . || exit 1 make || exit 1 cd "${startdir}" || exit 1 cp -a "${LWS_BUILD_PATH}/lib/libwebsockets.a" "${1}/externaldeps/libwebsockets" || exit 1 diff --git a/packaging/current_libbpf.checksums b/packaging/current_libbpf.checksums new file mode 100644 index 000000000..eccbfa9f3 --- /dev/null +++ b/packaging/current_libbpf.checksums @@ -0,0 +1 @@ +47acbdf7836048fad3a585c6ab43cc08d1b70c27ce0a816e9ca92b927555530f v0.5.1_netdata.tar.gz diff --git a/packaging/current_libbpf.version b/packaging/current_libbpf.version new file mode 100644 index 000000000..74ca5f708 --- /dev/null +++ b/packaging/current_libbpf.version @@ -0,0 +1 @@ +0.5.1_netdata diff --git a/packaging/docker/Dockerfile b/packaging/docker/Dockerfile index d91d1a773..d99d393ae 100644 --- a/packaging/docker/Dockerfile +++ b/packaging/docker/Dockerfile @@ -25,7 +25,8 @@ WORKDIR /opt/netdata.git # Install from source RUN chmod +x netdata-installer.sh && \ cp -rp /deps/* /usr/local/ && \ - ./netdata-installer.sh --dont-wait --dont-start-it ${EXTRA_INSTALL_OPTS} \ + /bin/echo -e "INSTALL_TYPE='oci'\nPREBUILT_ARCH='$(uname -m)'" > ./system/.install-type && \ + ./netdata-installer.sh --dont-wait --dont-start-it --use-system-protobuf ${EXTRA_INSTALL_OPTS} \ "$([ "$RELEASE_CHANNEL" = stable ] && echo --stable-channel)" # files to one directory @@ -56,6 +57,9 @@ RUN mkdir -p /app/usr/sbin/ \ # hadolint ignore=DL3007 FROM netdata/base:latest as base +ARG OFFICIAL_IMAGE=false +ENV NETDATA_OFFICIAL_IMAGE=$OFFICIAL_IMAGE + # Configure system ARG NETDATA_UID=201 ARG NETDATA_GID=201 @@ -98,8 +102,10 @@ RUN chown -R root:root \ chmod 0755 /usr/libexec/netdata/plugins.d/*.plugin && \ chmod 4755 \ /usr/libexec/netdata/plugins.d/cgroup-network \ - /usr/libexec/netdata/plugins.d/apps.plugin \ - /usr/libexec/netdata/plugins.d/freeipmi.plugin && \ + /usr/libexec/netdata/plugins.d/apps.plugin && \ + if [ -f /usr/libexec/netdata/plugins.d/freeipmi.plugin ]; then \ + chmod 4755 /usr/libexec/netdata/plugins.d/freeipmi.plugin; \ + fi && \ # Group write permissions due to: https://github.com/netdata/netdata/pull/6543 find /var/lib/netdata /var/cache/netdata -type d -exec chmod 0770 {} \; && \ find /var/lib/netdata /var/cache/netdata -type f -exec chmod 0660 {} \; && \ @@ -112,3 +118,5 @@ EXPOSE $NETDATA_LISTENER_PORT ENTRYPOINT ["/usr/sbin/run.sh"] HEALTHCHECK --interval=60s --timeout=10s --retries=3 CMD /usr/sbin/health.sh + +ONBUILD ENV NETDATA_OFFICIAL_IMAGE=false diff --git a/packaging/docker/README.md b/packaging/docker/README.md index ed136cfa9..0a4804ae9 100644 --- a/packaging/docker/README.md +++ b/packaging/docker/README.md @@ -32,6 +32,9 @@ directive, not a COMMAND directive. Please adapt your execution scripts accordin ENTRYPOINT vs COMMAND in the [Docker documentation](https://docs.docker.com/engine/reference/builder/#understand-how-cmd-and-entrypoint-interact). +Our POWER8+ Docker images do not support our FreeIPMI collector. This is a technical limitation in FreeIPMI itself, +and unfortunately not something we can realistically work around. + ## Create a new Netdata Agent container You can create a new Agent container using either `docker run` or Docker Compose. After using either method, you can diff --git a/packaging/docker/run.sh b/packaging/docker/run.sh index c04be9ff8..3f05a1708 100755 --- a/packaging/docker/run.sh +++ b/packaging/docker/run.sh @@ -13,6 +13,23 @@ if [ ! "${DO_NOT_TRACK:-0}" -eq 0 ] || [ -n "$DO_NOT_TRACK" ]; then touch /etc/netdata/.opt-out-from-anonymous-statistics fi + +BALENA_PGID=$(ls -nd /var/run/balena.sock | awk '{print $4}') +DOCKER_PGID=$(ls -nd /var/run/docker.sock | awk '{print $4}') + +re='^[0-9]+$' +if [[ $BALENA_PGID =~ $re ]]; then + echo "Netdata detected balena-engine.sock" + DOCKER_HOST='/var/run/balena-engine.sock' + PGID="$BALENA_PGID" +elif [[ $DOCKER_PGID =~ $re ]]; then + echo "Netdata detected docker.sock" + DOCKER_HOST="/var/run/docker.sock" + PGID=$(ls -nd /var/run/docker.sock | awk '{print $4}') +fi +export PGID +export DOCKER_HOST + if [ -n "${PGID}" ]; then echo "Creating docker group ${PGID}" addgroup -g "${PGID}" "docker" || echo >&2 "Could not add group docker with ID ${PGID}, its already there probably" diff --git a/packaging/ebpf.checksums b/packaging/ebpf.checksums index 7b13f7655..8ecbc9ce1 100644 --- a/packaging/ebpf.checksums +++ b/packaging/ebpf.checksums @@ -1,3 +1,3 @@ -1442027d53cf11e1b086ec837659a498a9a2738ef43e44b32a2a0171d057544a netdata-kernel-collector-glibc-v0.6.3.tar.xz -0863b06e78bb3a596cb1f68d13560301f563683cb174fd27b1e34c232e6f3c22 netdata-kernel-collector-musl-v0.6.3.tar.xz -571dddd2b3b06d9f53cc24384ffbd88e2bb662ad953acaef46c76249186fe3b6 netdata-kernel-collector-static-v0.6.3.tar.xz +7ffd13d0f32df93bc7868a7cf040e75c40a9e041f6bb7f4a591cc6ce71b1cf72 netdata-kernel-collector-glibc-v0.8.6.tar.xz +05c10d57252941b7f6dbb8a726a243416942c58738015014dd764b4bcb0c2f9c netdata-kernel-collector-musl-v0.8.6.tar.xz +2a607729a9093538624a94dfdbf2a3660eb4eb199f86962d38806d8e1e420a71 netdata-kernel-collector-static-v0.8.6.tar.xz diff --git a/packaging/ebpf.version b/packaging/ebpf.version index e4c57af0b..85c3002dc 100644 --- a/packaging/ebpf.version +++ b/packaging/ebpf.version @@ -1 +1 @@ -v0.6.3 +v0.8.6 diff --git a/packaging/go.d.checksums b/packaging/go.d.checksums index 641563eb2..883dac747 100644 --- a/packaging/go.d.checksums +++ b/packaging/go.d.checksums @@ -1,16 +1,16 @@ -512b371cde4303bce1e0d5721dadd21738b2ef779f3b502b3094eb122dc34a5e *config.tar.gz -222f585c7dc107d3325eba9b59728b2b476c3ef46ccf3e956b113a74b5b7d51c *go.d.plugin-v0.28.2.darwin-amd64.tar.gz -32a293f2363fbd85d07317c809b3ac89df3385b1c0fa63c970c0f9be9f6fe6ba *go.d.plugin-v0.28.2.freebsd-386.tar.gz -e90d4df084395aca47a2927ece350777e7b625db43ebfc2d503aabfd29f2e5dc *go.d.plugin-v0.28.2.freebsd-amd64.tar.gz -274380b5fcef455d144c52b5d73b41d0c406743db36518be3772ace4199c8a28 *go.d.plugin-v0.28.2.freebsd-arm.tar.gz -418a36b506377c19080d76878ce8f7766da00c2b6eec5b4ebe074c7c39a0905d *go.d.plugin-v0.28.2.freebsd-arm64.tar.gz -5d3b317da540ee064a622e3a91162744b883342db4fff97ad401d297d35388f1 *go.d.plugin-v0.28.2.linux-386.tar.gz -3274c55132582df296dbbbd9705f9a7693bf71faee37959da7ed8641d88f16f0 *go.d.plugin-v0.28.2.linux-amd64.tar.gz -3f449ecc91d78616ebb7ca4550c328e810f0b1bb6d92c89dbad01b5da9616b5d *go.d.plugin-v0.28.2.linux-arm.tar.gz -f70ea1bacfb1f61e87c26a4bcb225f49b3507cf033b28dbefd4c7576e1928e22 *go.d.plugin-v0.28.2.linux-arm64.tar.gz -9b92040ac994877c8e16d83df396ca62cc2ac9df6f8ad7ab042e1a0ba59c979c *go.d.plugin-v0.28.2.linux-mips.tar.gz -691c783b2ee6107426ec3151d1218d530d5e28a505afea1ed1fdcbc127cc7c97 *go.d.plugin-v0.28.2.linux-mips64.tar.gz -9de8ab05f9744cb12b3e6cfbca33c98c50bcdda1699fddde096e8407f7354f21 *go.d.plugin-v0.28.2.linux-mips64le.tar.gz -1e20f44642ecb6fbb07d65ecf279af4030e03d40e0900374e7d9b2d8d775da07 *go.d.plugin-v0.28.2.linux-mipsle.tar.gz -6231c577070e8825557b5854748d715ebcd4be4bcfea8dee71ba5887f268f300 *go.d.plugin-v0.28.2.linux-ppc64.tar.gz -106de121c1f73b366bd3ae015db062d6fda1a7ceddb68316dc023ae2081b7d9d *go.d.plugin-v0.28.2.linux-ppc64le.tar.gz +4134de9cc397a2425c18c05a112c7e27d9a7d4bb87b0b180f58a8ffc78df5ba8 *config.tar.gz +28208cde0b8bcd9dab43f832f1c6d80bbd275a68f636a1e7a847298fcda50d3e *go.d.plugin-v0.31.0.darwin-amd64.tar.gz +6a3cbfa0e02185d50f1e6b5538550219df6279ca7bc047b832f050de6f54d9d8 *go.d.plugin-v0.31.0.freebsd-386.tar.gz +75e52904516304456da3352e710d3a028a91417ab8556c19a8836a94c4fb7e2c *go.d.plugin-v0.31.0.freebsd-amd64.tar.gz +8bb88c1c5e374bc17548fe27e01e4649bfff6199866d5175c6d90f3d3a7d7e77 *go.d.plugin-v0.31.0.freebsd-arm.tar.gz +9883d8feee823627f76498d9ce077a2fc75ebe4b2844069b9bd95a15b71aca7c *go.d.plugin-v0.31.0.freebsd-arm64.tar.gz +21babd0651684e33d36b311aa89c5f38da8a3d7e35104e8638af82baa88af2cb *go.d.plugin-v0.31.0.linux-386.tar.gz +fd9e5a674b7d42623561c152e7218dedd3dc3fe227c5c12ee6516256c35c8c1c *go.d.plugin-v0.31.0.linux-amd64.tar.gz +830b946b4c349facbc8664339c58bfb8219889aaa47c1bd5639c3db141bf11c1 *go.d.plugin-v0.31.0.linux-arm.tar.gz +7d476c7315b1bbd90b6e188219cbde2a09a2e5e309a3ee83cdccba2506db297b *go.d.plugin-v0.31.0.linux-arm64.tar.gz +b9f4a4788c4ecea76270ac5403b7dfbe9506b9b9b3ba334429f9c2a4d2602593 *go.d.plugin-v0.31.0.linux-mips.tar.gz +937f68db1eeedec5139a32cf49dc3aa26ca310b38393e99afac083393d0d309f *go.d.plugin-v0.31.0.linux-mips64.tar.gz +d09d08ab7d799dadfd06f516f51dcb16e3c79f101fa20ced2b75de9be68c9209 *go.d.plugin-v0.31.0.linux-mips64le.tar.gz +278653c3f3a0d3b54383b568dac7637f7af750c71bcc5b1ad93cb2d125c9a935 *go.d.plugin-v0.31.0.linux-mipsle.tar.gz +e66c93dbd0a38ae656a04e34f7de945dfce293a82e2fcc00da5b838af51c0cfc *go.d.plugin-v0.31.0.linux-ppc64.tar.gz +50b48186645cb72d735d929ec5a80b075366d8788d3a3b21c760c3fe5235dbae *go.d.plugin-v0.31.0.linux-ppc64le.tar.gz diff --git a/packaging/go.d.version b/packaging/go.d.version index 46e8233f9..7021025f3 100644 --- a/packaging/go.d.version +++ b/packaging/go.d.version @@ -1 +1 @@ -v0.28.2 +v0.31.0 diff --git a/packaging/installer/README.md b/packaging/installer/README.md index 5b16585fe..7bdb6a757 100644 --- a/packaging/installer/README.md +++ b/packaging/installer/README.md @@ -11,7 +11,7 @@ Netdata is a monitoring agent designed to run on all your systems: physical and IoT/edge devices. Netdata runs on Linux, FreeBSD, macOS, Kubernetes, Docker, and all their derivatives. The best way to install Netdata is with our [**automatic one-line installation -script**](#automatic-one-line-installation-script), which works with all Linux distributions, or our [**.deb/rpm +script**](#automatic-one-line-installation-script), which works with all Linux distributions and macOS environments, or our [**.deb/rpm packages**](/packaging/installer/methods/packages.md), which seamlessly install with your distribution's package manager. @@ -34,15 +34,23 @@ _actively_ contributing to Netdata's future. ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.requests_per_url&options=unaligned&dimensions=kickstart&group=sum&after=-3600&label=last+hour&units=installations&value_color=orange&precision=0) ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.requests_per_url&options=unaligned&dimensions=kickstart&group=sum&after=-86400&label=today&units=installations&precision=0) -This method is fully automatic on all Linux distributions, including Ubuntu, Debian, Fedora, CentOS, and others. +This method is fully automatic on all Linux distributions, including Ubuntu, Debian, Fedora, CentOS, and others, as well as on mac OS environments. To install Netdata from source, including all dependencies required to connect to Netdata Cloud, and get _automatic nightly updates_, run the following as your normal user: +**Linux** + ```bash bash <(curl -Ss https://my-netdata.io/kickstart.sh) ``` +**macOS** + +```bash +bash <(curl -Ss https://my-netdata.io/kickstart.sh) --install /usr/local/ +``` + To see more information about this installation script, including how to disable automatic updates, get nightly vs. stable releases, or disable anonymous statistics, see the [`kickstart.sh` method page](/packaging/installer/methods/kickstart.md). diff --git a/packaging/installer/dependencies/alpine.sh b/packaging/installer/dependencies/alpine.sh new file mode 100755 index 000000000..e69de29bb diff --git a/packaging/installer/dependencies/arch.sh b/packaging/installer/dependencies/arch.sh new file mode 100755 index 000000000..e69de29bb diff --git a/packaging/installer/dependencies/centos.sh b/packaging/installer/dependencies/centos.sh new file mode 100755 index 000000000..e69de29bb diff --git a/packaging/installer/dependencies/clearlinux.sh b/packaging/installer/dependencies/clearlinux.sh new file mode 100755 index 000000000..e69de29bb diff --git a/packaging/installer/dependencies/debian.sh b/packaging/installer/dependencies/debian.sh new file mode 100755 index 000000000..e69de29bb diff --git a/packaging/installer/dependencies/freebsd.sh b/packaging/installer/dependencies/freebsd.sh new file mode 100755 index 000000000..e69de29bb diff --git a/packaging/installer/dependencies/gentoo.sh b/packaging/installer/dependencies/gentoo.sh new file mode 100755 index 000000000..e69de29bb diff --git a/packaging/installer/dependencies/macos.sh b/packaging/installer/dependencies/macos.sh new file mode 100755 index 000000000..e69de29bb diff --git a/packaging/installer/dependencies/ol.sh b/packaging/installer/dependencies/ol.sh new file mode 100755 index 000000000..e69de29bb diff --git a/packaging/installer/dependencies/opensuse.sh b/packaging/installer/dependencies/opensuse.sh new file mode 100755 index 000000000..e69de29bb diff --git a/packaging/installer/dependencies/rhel.sh b/packaging/installer/dependencies/rhel.sh new file mode 100755 index 000000000..e69de29bb diff --git a/packaging/installer/dependencies/sabayon.sh b/packaging/installer/dependencies/sabayon.sh new file mode 100755 index 000000000..e69de29bb diff --git a/packaging/installer/dependencies/ubuntu.sh b/packaging/installer/dependencies/ubuntu.sh new file mode 100755 index 000000000..e69de29bb diff --git a/packaging/installer/functions.sh b/packaging/installer/functions.sh index fd10fcfaf..6264c1131 100644 --- a/packaging/installer/functions.sh +++ b/packaging/installer/functions.sh @@ -224,17 +224,22 @@ safe_pidof() { # ----------------------------------------------------------------------------- find_processors() { - # Most UNIX systems have `nproc` as part of their userland (including macOS, Linux and BSD) + # Most UNIX systems have `nproc` as part of their userland (including Linux and BSD) if command -v nproc > /dev/null; then nproc && return fi + # macOS has no nproc but it may have gnproc installed from Homebrew or from Macports. + if command -v gnproc > /dev/null; then + gnproc && return + fi + local cpus if [ -f "/proc/cpuinfo" ]; then # linux cpus=$(grep -c ^processor /proc/cpuinfo) else - # freebsd + # freebsd cpus=$(sysctl hw.ncpu 2> /dev/null | grep ^hw.ncpu | cut -d ' ' -f 2) fi if [ -z "${cpus}" ] || [ $((cpus)) -lt 1 ]; then diff --git a/packaging/installer/install-required-packages.sh b/packaging/installer/install-required-packages.sh index f65535dd0..6eeda53c0 100755 --- a/packaging/installer/install-required-packages.sh +++ b/packaging/installer/install-required-packages.sh @@ -17,10 +17,10 @@ fi # These options control which packages we are going to install # They can be pre-set, but also can be controlled with command line options -PACKAGES_NETDATA=${PACKAGES_NETDATA-0} +PACKAGES_NETDATA=${PACKAGES_NETDATA-1} PACKAGES_NETDATA_NODEJS=${PACKAGES_NETDATA_NODEJS-0} PACKAGES_NETDATA_PYTHON=${PACKAGES_NETDATA_PYTHON-0} -PACKAGES_NETDATA_PYTHON3=${PACKAGES_NETDATA_PYTHON3-0} +PACKAGES_NETDATA_PYTHON3=${PACKAGES_NETDATA_PYTHON3-1} PACKAGES_NETDATA_PYTHON_MYSQL=${PACKAGES_NETDATA_PYTHON_MYSQL-0} PACKAGES_NETDATA_PYTHON_POSTGRES=${PACKAGES_NETDATA_PYTHON_POSTGRES-0} PACKAGES_NETDATA_PYTHON_MONGO=${PACKAGES_NETDATA_PYTHON_MONGO-0} @@ -31,8 +31,8 @@ PACKAGES_FIREQOS=${PACKAGES_FIREQOS-0} PACKAGES_UPDATE_IPSETS=${PACKAGES_UPDATE_IPSETS-0} PACKAGES_NETDATA_DEMO_SITE=${PACKAGES_NETDATA_DEMO_SITE-0} PACKAGES_NETDATA_SENSORS=${PACKAGES_NETDATA_SENSORS-0} -PACKAGES_NETDATA_DATABASE=${PACKAGES_NETDATA_DATABASE-0} -PACKAGES_NETDATA_EBPF=${PACKAGES_NETDATA_EBPF-0} +PACKAGES_NETDATA_DATABASE=${PACKAGES_NETDATA_DATABASE-1} +PACKAGES_NETDATA_EBPF=${PACKAGES_NETDATA_EBPF-1} # needed commands lsb_release=$(command -v lsb_release 2> /dev/null) @@ -198,7 +198,7 @@ get_os_release() { eval "$(grep -E "^(NAME|ID|ID_LIKE|VERSION|VERSION_ID)=" "${os_release_file}")" for x in "${ID}" ${ID_LIKE}; do case "${x,,}" in - alpine | arch | centos | clear-linux-os | debian | fedora | gentoo | manjaro | opensuse-leap | rhel | sabayon | sles | suse | ubuntu) + alpine | arch | centos | clear-linux-os | debian | fedora | gentoo | manjaro | opensuse-leap | ol | rhel | sabayon | sles | suse | ubuntu) distribution="${x}" version="${VERSION_ID}" codename="${VERSION}" @@ -345,7 +345,7 @@ user_picks_distribution() { if [ "${REPLY}" = "yum" ] && [ -z "${distribution}" ]; then REPLY= while [ -z "${REPLY}" ]; do - if ! read -r -p "yum in centos, rhel or fedora? > "; then + if ! read -r -p "yum in centos, rhel, ol or fedora? > "; then continue fi @@ -353,11 +353,14 @@ user_picks_distribution() { fedora | rhel) distribution="rhel" ;; + ol) + distribution="ol" + ;; centos) distribution="centos" ;; *) - echo >&2 "Please enter 'centos', 'fedora' or 'rhel'." + echo >&2 "Please enter 'centos', 'fedora', 'ol' or 'rhel'." REPLY= ;; esac @@ -417,12 +420,12 @@ detect_package_manager_from_distribution() { ;; centos* | clearos*) - echo >&2 "You should have EPEL enabled to install all the prerequisites." - echo >&2 "Check: http://www.tecmint.com/how-to-enable-epel-repository-for-rhel-centos-6-5/" - package_installer="install_yum" + package_installer="" tree="centos" + [ -n "${dnf}" ] && package_installer="install_dnf" + [ -n "${yum}" ] && package_installer="install_yum" if [ "${IGNORE_INSTALLED}" -eq 0 ] && [ -z "${yum}" ]; then - echo >&2 "command 'yum' is required to install packages on a '${distribution} ${version}' system." + echo >&2 "command 'yum' or 'dnf' is required to install packages on a '${distribution} ${version}' system." exit 1 fi ;; @@ -430,8 +433,19 @@ detect_package_manager_from_distribution() { fedora* | redhat* | red\ hat* | rhel*) package_installer= tree="rhel" + [ -n "${dnf}" ] && package_installer="install_dnf" [ -n "${yum}" ] && package_installer="install_yum" + if [ "${IGNORE_INSTALLED}" -eq 0 ] && [ -z "${package_installer}" ]; then + echo >&2 "command 'yum' or 'dnf' is required to install packages on a '${distribution} ${version}' system." + exit 1 + fi + ;; + + ol*) + package_installer= + tree="ol" [ -n "${dnf}" ] && package_installer="install_dnf" + [ -n "${yum}" ] && package_installer="install_yum" if [ "${IGNORE_INSTALLED}" -eq 0 ] && [ -z "${package_installer}" ]; then echo >&2 "command 'yum' or 'dnf' is required to install packages on a '${distribution} ${version}' system." exit 1 @@ -499,7 +513,13 @@ check_package_manager() { dnf) [ "${IGNORE_INSTALLED}" -eq 0 ] && [ -z "${dnf}" ] && echo >&2 "${1} is not available." && return 1 package_installer="install_dnf" - tree="rhel" + if [ "${distribution}" = "centos" ]; then + tree="centos" + elif [ "${distribution}" = "ol" ]; then + tree="ol" + else + tree="rhel" + fi detection="user-input" return 0 ;; @@ -550,6 +570,8 @@ check_package_manager() { package_installer="install_yum" if [ "${distribution}" = "centos" ]; then tree="centos" + elif [ "${distribution}" = "ol" ]; then + tree="ol" else tree="rhel" fi @@ -721,6 +743,21 @@ declare -A pkg_gcc=( ['default']="gcc" ) +# g++, required for building protobuf +# All three cases of this not being required are systems that implicitly +# include g++ when installing gcc. +declare -A pkg_gxx=( + ['alpine']="g++" + ['arch']="NOTREQUIRED" + ['clearlinux']="c-basic" + ['debian']="g++" + ['gentoo']="NOTREQUIRED" + ['macos']="NOTREQUIRED" + ['ubuntu']="g++" + ['freebsd']="NOTREQUIRED" + ['default']="gcc-c++" +) + declare -A pkg_gdb=( ['gentoo']="sys-devel/gdb" ['macos']="NOTREQUIRED" @@ -772,6 +809,7 @@ declare -A pkg_libz_dev=( ['gentoo']="sys-libs/zlib" ['sabayon']="sys-libs/zlib" ['rhel']="zlib-devel" + ['ol']="zlib-devel" ['suse']="zlib-devel" ['clearlinux']="devpkg-zlib" ['macos']="NOTREQUIRED" @@ -788,6 +826,7 @@ declare -A pkg_libuuid_dev=( ['gentoo']="sys-apps/util-linux" ['sabayon']="sys-apps/util-linux" ['rhel']="libuuid-devel" + ['ol']="libuuid-devel" ['suse']="libuuid-devel" ['macos']="NOTREQUIRED" ['freebsd']="e2fsprogs-libuuid" @@ -802,6 +841,7 @@ declare -A pkg_libmnl_dev=( ['gentoo']="net-libs/libmnl" ['sabayon']="net-libs/libmnl" ['rhel']="libmnl-devel" + ['ol']="libmnl-devel" ['suse']="libmnl-devel" ['clearlinux']="devpkg-libmnl" ['macos']="NOTREQUIRED" @@ -859,6 +899,7 @@ declare -A pkg_netcat=( ['gentoo']="net-analyzer/netcat" ['sabayon']="net-analyzer/gnu-netcat" ['rhel']="nmap-ncat" + ['ol']="nmap-ncat" ['suse']="netcat-openbsd" ['clearlinux']="sysadmin-basic" ['arch']="gnu-netcat" @@ -903,6 +944,7 @@ declare -A pkg_pkg_config=( ['gentoo']="virtual/pkgconfig" ['sabayon']="virtual/pkgconfig" ['rhel']="pkgconfig" + ['ol']="pkgconfig" ['suse']="pkg-config" ['freebsd']="pkgconf" ['clearlinux']="c-basic" @@ -934,6 +976,7 @@ declare -A pkg_python_mysqldb=( # exceptions ['fedora-24']="python2-mysql" + ['ol-8']="WARNING|" ) declare -A pkg_python3_mysqldb=( @@ -944,6 +987,7 @@ declare -A pkg_python3_mysqldb=( ['gentoo']="dev-python/mysqlclient" ['sabayon']="dev-python/mysqlclient" ['rhel']="WARNING|" + ['ol']="WARNING|" ['suse']="WARNING|" ['clearlinux']="WARNING|" ['macos']="WARNING|" @@ -975,6 +1019,7 @@ declare -A pkg_python_psycopg2=( ['gentoo']="dev-python/psycopg" ['sabayon']="dev-python/psycopg:2" ['rhel']="python-psycopg2" + ['ol']="python-psycopg2" ['suse']="python-psycopg2" ['clearlinux']="WARNING|" ['macos']="WARNING|" @@ -989,6 +1034,7 @@ declare -A pkg_python3_psycopg2=( ['gentoo']="dev-python/psycopg" ['sabayon']="dev-python/psycopg:2" ['rhel']="WARNING|" + ['ol']="WARNING|" ['suse']="WARNING|" ['clearlinux']="WARNING|" ['macos']="WARNING|" @@ -998,6 +1044,7 @@ declare -A pkg_python3_psycopg2=( ['centos-8']="python38-psycopg2" ['rhel-7']="python3-psycopg2" ['rhel-8']="python38-psycopg2" + ['ol-8']="python3-psycopg2" ) declare -A pkg_python_pip=( @@ -1028,6 +1075,7 @@ declare -A pkg_python_pymongo=( ['suse']="python-pymongo" ['clearlinux']="WARNING|" ['rhel']="WARNING|" + ['ol']="WARNING|" ['macos']="WARNING|" ['default']="python-pymongo" ) @@ -1041,6 +1089,7 @@ declare -A pkg_python3_pymongo=( ['suse']="python3-pymongo" ['clearlinux']="WARNING|" ['rhel']="WARNING|" + ['ol']="WARNING|" ['freebsd']="py37-pymongo" ['macos']="WARNING|" ['default']="python3-pymongo" @@ -1049,6 +1098,7 @@ declare -A pkg_python3_pymongo=( ['centos-8']="python3-pymongo" ['rhel-7']="python36-pymongo" ['rhel-8']="python3-pymongo" + ['ol-8']="python3-pymongo" ) declare -A pkg_python_requests=( @@ -1084,6 +1134,7 @@ declare -A pkg_python3_requests=( ['centos-8']="python3-requests" ['rhel-7']="python36-requests" ['rhel-8']="python3-requests" + ['ol-8']="python3-requests" ) declare -A pkg_lz4=( @@ -1187,6 +1238,7 @@ declare -A pkg_valgrind=( declare -A pkg_ulogd=( ['centos']="WARNING|" ['rhel']="WARNING|" + ['ol']="WARNING|" ['clearlinux']="WARNING|" ['gentoo']="app-admin/ulogd" ['arch']="ulogd" @@ -1216,6 +1268,7 @@ declare -A pkg_libelf=( ['fedora']="elfutils-libelf-devel" ['centos']="elfutils-libelf-devel" ['rhel']="elfutils-libelf-devel" + ['ol']="elfutils-libelf-devel" ['clearlinux']="devpkg-elfutils" ['suse']="libelf-devel" ['macos']="NOTREQUIRED" @@ -1291,6 +1344,7 @@ packages() { require_cmd gcc || require_cmd gcc-multilib || suitable_package gcc + require_cmd g++ || suitable_package gxx require_cmd make || suitable_package make require_cmd autoconf || suitable_package autoconf @@ -1524,21 +1578,38 @@ validate_tree_freebsd() { fi } -validate_tree_centos() { +validate_tree_ol() { local opts= if [ "${NON_INTERACTIVE}" -eq 1 ]; then echo >&2 "Running in non-interactive mode" opts="-y" fi - echo >&2 " > CentOS Version: ${version} ..." - - echo >&2 " > Checking for epel ..." - if ! rpm -qa | grep epel > /dev/null; then - if prompt "epel not found, shall I install it?"; then - run ${sudo} yum ${opts} install epel-release + if [[ "${version}" =~ ^8(\..*)?$ ]]; then + echo " > Checking for CodeReady Builder ..." + if ! run ${sudo} dnf repolist | grep -q codeready; then + if prompt "CodeReady Builder not found, shall I install it?"; then + cat > /etc/yum.repos.d/ol8_codeready.repo <<-EOF + [ol8_codeready_builder] + name=Oracle Linux \$releasever CodeReady Builder (\$basearch) + baseurl=http://yum.oracle.com/repo/OracleLinux/OL8/codeready/builder/\$basearch + gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-oracle + gpgcheck=1 + enabled=1 + EOF + fi fi fi +} + +validate_tree_centos() { + local opts= + if [ "${NON_INTERACTIVE}" -eq 1 ]; then + echo >&2 "Running in non-interactive mode" + opts="-y" + fi + + echo >&2 " > CentOS Version: ${version} ..." if [[ "${version}" =~ ^8(\..*)?$ ]]; then echo >&2 " > Checking for config-manager ..." @@ -1555,16 +1626,19 @@ validate_tree_centos() { fi fi - echo >&2 " > Checking for Okay ..." - if ! rpm -qa | grep okay > /dev/null; then - if prompt "okay not found, shall I install it?"; then - run ${sudo} yum ${opts} install http://repo.okay.com.mx/centos/8/x86_64/release/okay-release-1-5.el8.noarch.rpm - fi - fi + echo >&2 " > Updating libarchive ..." + run ${sudo} yum ${opts} install libarchive echo >&2 " > Installing Judy-devel directly ..." - run ${sudo} yum ${opts} install http://mirror.centos.org/centos/8/PowerTools/x86_64/os/Packages/Judy-devel-1.0.5-18.module_el8.1.0+217+4d875839.x86_64.rpm + run ${sudo} yum ${opts} install http://mirror.centos.org/centos/8/PowerTools/x86_64/os/Packages/Judy-devel-1.0.5-18.module_el8.3.0+757+d382997d.x86_64.rpm + elif [[ "${version}" =~ ^7(\..*)?$ ]]; then + echo >&2 " > Checking for EPEL ..." + if ! rpm -qa | grep epel-release > /dev/null; then + if prompt "EPEL not found, shall I install it?"; then + run ${sudo} yum ${opts} install epel-release + fi + fi elif [[ "${version}" =~ ^6\..*$ ]]; then echo >&2 " > Detected CentOS 6.x ..." echo >&2 " > Checking for Okay ..." @@ -1601,7 +1675,7 @@ install_yum() { read -r -a yum_opts <<< "${opts}" # install the required packages - run ${sudo} yum "${yum_opts[@]}" install "${@}" # --enablerepo=epel-testing + run ${sudo} yum "${yum_opts[@]}" install "${@}" } # ----------------------------------------------------------------------------- diff --git a/packaging/installer/kickstart-ng.sh b/packaging/installer/kickstart-ng.sh new file mode 100644 index 000000000..8fcc6d5d3 --- /dev/null +++ b/packaging/installer/kickstart-ng.sh @@ -0,0 +1,1362 @@ +#!/bin/sh +# +# SPDX-License-Identifier: GPL-3.0-or-later + +# ====================================================================== +# Constants + +KICKSTART_OPTIONS="${*}" +PACKAGES_SCRIPT="https://raw.githubusercontent.com/netdata/netdata/master/packaging/installer/install-required-packages.sh" +PATH="${PATH}:/usr/local/bin:/usr/local/sbin" +REPOCONFIG_URL_PREFIX="https://packagecloud.io/netdata/netdata-repoconfig/packages" +REPOCONFIG_VERSION="1-1" +TELEMETRY_URL="https://posthog.netdata.cloud/capture/" +START_TIME="$(date +%s)" + +# ====================================================================== +# Defaults for environment variables + +SELECTED_INSTALL_METHOD="none" +INSTALL_TYPE="unknown" +INSTALL_PREFIX="" +NETDATA_AUTO_UPDATES="1" +NETDATA_CLAIM_ONLY=0 +NETDATA_CLAIM_URL="https://app.netdata.cloud" +NETDATA_DISABLE_CLOUD=0 +NETDATA_ONLY_BUILD=0 +NETDATA_ONLY_NATIVE=0 +NETDATA_ONLY_STATIC=0 +NETDATA_REQUIRE_CLOUD=1 +RELEASE_CHANNEL="nightly" + +NETDATA_DISABLE_TELEMETRY="${DO_NOT_TRACK:-0}" +NETDATA_TARBALL_BASEURL="${NETDATA_TARBALL_BASEURL:-https://storage.googleapis.com/netdata-nightlies}" +NETDATA_INSTALLER_OPTIONS="${NETDATA_INSTALLER_OPTIONS:-""}" +TELEMETRY_API_KEY="${NETDATA_POSTHOG_API_KEY:-mqkwGT0JNFqO-zX2t0mW6Tec9yooaVu7xCBlXtHnt5Y}" + +if [ ! -t 1 ]; then + INTERACTIVE=0 +else + INTERACTIVE=1 +fi + +# ====================================================================== +# Usage info + +usage() { + cat << HEREDOC +USAGE: kickstart.sh [options] + where options include: + + --non-interactive Do not prompt for user input. (default: prompt if there is a controlling terminal) + --interactive Prompt for user input even if there is no controlling terminal. + --dont-start-it Do not start the agent by default (only for static installs or local builds) + --stable-channel Install a stable version instead of a nightly build (default: install a nightly build) + --nightly-channel Install a nightly build instead of a stable version + --no-updates Do not enable automatic updates (default: enable automatic updates) + --auto-update Enable automatic updates. + --disable-telemetry Opt-out of anonymous statistics. + --native-only Only install if native binary packages are available. + --static-only Only install if a static build is available. + --build-only Only install using a local build. + --reinstall Explicitly reinstall instead of updating any existing install. + --reinstall-even-if-unsafe Even try to reinstall if we don't think we can do so safely (implies --reinstall). + --disable-cloud Disable support for Netdata Cloud (default: detect) + --require-cloud Only install if Netdata Cloud can be enabled. Overrides --disable-cloud. + --install Specify an installation prefix for local builds (default: autodetect based on system type). + --claim-token Use a specified token for claiming to Netdata Cloud. + --claim-rooms When claiming, add the node to the specified rooms. + --claim-only If there is an existing install, only try to claim it, not update it. + --claim-* Specify other options for the claiming script. + --no-cleanup Don't do any cleanup steps. This is intended to help with debugging the installer. + +Additionally, this script may use the following environment variables: + + TMPDIR: Used to specify where to put temporary files. On most systems, the default we select + automatically should be fine. The user running the script needs to both be able to + write files to the temporary directory, and run files from that location. + ROOTCMD: Used to specify a command to use to run another command with root privileges if needed. By + default we try to use sudo, doas, or pkexec (in that order of preference), but if + you need special options for one of those to work, or have a different tool to do + the same thing on your system, you can specify it here. + DO_NOT_TRACK If set to a value other than 0, behave as if \`--disable-telemetry\` was specified. + NETDATA_INSTALLER_OPTIONS: Specifies extra options to pass to the static installer or local build script. + +HEREDOC +} + +# ====================================================================== +# Telemetry functions + +telemetry_event() { + if [ "${NETDATA_DISABLE_TELEMETRY}" -eq 1 ]; then + return 0 + fi + + now="$(date +%s)" + total_duration="$((now - START_TIME))" + + if [ -e "/etc/os-release" ]; then + eval "$(grep -E "^(NAME|ID|ID_LIKE|VERSION|VERSION_ID)=" < /etc/os-release | sed 's/^/HOST_/')" + fi + + if [ -z "${HOST_NAME}" ] || [ -z "${HOST_VERSION}" ] || [ -z "${HOST_ID}" ]; then + if [ -f "/etc/lsb-release" ]; then + DISTRIB_ID="unknown" + DISTRIB_RELEASE="unknown" + DISTRIB_CODENAME="unknown" + eval "$(grep -E "^(DISTRIB_ID|DISTRIB_RELEASE|DISTRIB_CODENAME)=" < /etc/lsb-release)" + if [ -z "${HOST_NAME}" ]; then HOST_NAME="${DISTRIB_ID}"; fi + if [ -z "${HOST_VERSION}" ]; then HOST_VERSION="${DISTRIB_RELEASE}"; fi + if [ -z "${HOST_ID}" ]; then HOST_ID="${DISTRIB_CODENAME}"; fi + fi + fi + + KERNEL_NAME="$(uname -s)" + + if [ "${KERNEL_NAME}" = FreeBSD ]; then + TOTAL_RAM="$(sysctl -n hw.physmem)" + elif [ "${KERNEL_NAME}" = Darwin ]; then + TOTAL_RAM="$(sysctl -n hw.memsize)" + elif [ -r /proc/meminfo ]; then + TOTAL_RAM="$(grep -F MemTotal /proc/meminfo | cut -f 2 -d ':' | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' | cut -f 1 -d ' ')" + TOTAL_RAM="$((TOTAL_RAM * 1024))" + fi + + if [ -f /etc/machine-id ]; then + DISTINCT_ID="$(cat /etc/machine-id)" + elif command -v uuidgen > /dev/null 2>&1; then + DISTINCT_ID="$(uuidgen)" + else + DISTINCT_ID="null" + fi + + REQ_BODY="$(cat << EOF +{ + "api_key": "${TELEMETRY_API_KEY}", + "event": "${1}", + "properties": { + "distinct_id": "${DISTINCT_ID}", + "event_source": "agent installer", + "\$current_url": "agent installer", + "\$pathname": "netdata-installer", + "\$host": "installer.netdata.io", + "\$ip": "127.0.0.1", + "script_variant": "kickstart-ng", + "error_code": "${3}", + "error_message": "${2}", + "install_options": "${KICKSTART_OPTIONS}", + "total_runtime": "${total_duration}", + "selected_install_method": "${SELECTED_INSTALL_METHOD}", + "netdata_release_channel": "${RELEASE_CHANNEL:-null}", + "netdata_install_type": "${INSTALL_TYPE}", + "host_os_name": "${HOST_NAME:-unknown}", + "host_os_id": "${HOST_ID:-unknown}", + "host_os_id_like": "${HOST_ID_LIKE:-unknown}", + "host_os_version": "${HOST_VERSION:-unknown}", + "host_os_version_id": "${HOST_VERSION_ID:-unknown}", + "system_kernel_name": "${KERNEL_NAME}", + "system_kernel_version": "$(uname -r)", + "system_architecture": "$(uname -m)", + "system_total_ram": "${TOTAL_RAM:-unknown}" + } +} +EOF +)" + + if [ -n "$(command -v curl 2> /dev/null)" ]; then + curl --silent -o /dev/null -X POST --max-time 2 --header "Content-Type: application/json" -d "${REQ_BODY}" "${TELEMETRY_URL}" > /dev/null + else + wget -q -O - --no-check-certificate \ + --method POST \ + --timeout=1 \ + --header 'Content-Type: application/json' \ + --body-data "${REQ_BODY}" \ + "${TELEMETRY_URL}" > /dev/null + fi +} + +trap_handler() { + code="${1}" + lineno="${2}" + + printf >&2 "%s\n\n" "${TPUT_BGRED}${TPUT_WHITE}${TPUT_BOLD} ERROR ${TPUT_RESET} Installer exited unexpectedly (${code}-${lineno})" + + telemetry_event INSTALL_CRASH "Installer exited unexpectedly (${code}-${lineno})" "E${code}-${lineno}" + + trap - EXIT + + cleanup + + exit 1 +} + +trap 'trap_handler 0 ${LINENO}' EXIT +trap 'trap_handler 1 0' HUP +trap 'trap_handler 2 0' INT +trap 'trap_handler 3 0' QUIT +trap 'trap_handler 13 0' PIPE +trap 'trap_handler 15 0' TERM + +# ====================================================================== +# Utility functions + +setup_terminal() { + TPUT_RESET="" + TPUT_WHITE="" + TPUT_BGRED="" + TPUT_BGGREEN="" + TPUT_BOLD="" + TPUT_DIM="" + + # Is stderr on the terminal? If not, then fail + test -t 2 || return 1 + + if command -v tput > /dev/null 2>&1; then + if [ $(($(tput colors 2> /dev/null))) -ge 8 ]; then + # Enable colors + TPUT_RESET="$(tput sgr 0)" + TPUT_WHITE="$(tput setaf 7)" + TPUT_BGRED="$(tput setab 1)" + TPUT_BGGREEN="$(tput setab 2)" + TPUT_BOLD="$(tput bold)" + TPUT_DIM="$(tput dim)" + fi + fi + + echo "${TPUT_RESET}" + + return 0 +} + +cleanup() { + if [ -z "${NO_CLEANUP}" ]; then + ${ROOTCMD} rm -rf "${tmpdir}" + fi +} + +fatal() { + printf >&2 "%s\n\n" "${TPUT_BGRED}${TPUT_WHITE}${TPUT_BOLD} ABORTED ${TPUT_RESET} ${*}" + telemetry_event "INSTALL_FAILED" "${1}" "${2}" + cleanup + trap - EXIT + exit 1 +} + +run_ok() { + printf >&2 "%s\n\n" "${TPUT_BGGREEN}${TPUT_WHITE}${TPUT_BOLD} OK ${TPUT_RESET}" +} + +run_failed() { + printf >&2 "%s\n\n" "${TPUT_BGRED}${TPUT_WHITE}${TPUT_BOLD} FAILED ${TPUT_RESET}" +} + +ESCAPED_PRINT_METHOD= +# shellcheck disable=SC3050 +if printf "%q " test > /dev/null 2>&1; then + ESCAPED_PRINT_METHOD="printfq" +fi + +escaped_print() { + if [ "${ESCAPED_PRINT_METHOD}" = "printfq" ]; then + # shellcheck disable=SC3050 + printf "%q " "${@}" + else + printf "%s" "${*}" + fi + return 0 +} + +progress() { + echo >&2 " --- ${TPUT_BOLD}${*}${TPUT_RESET} --- " +} + +run_logfile="/dev/null" +run() { + user="${USER--}" + dir="${PWD}" + + if [ "$(id -u)" = "0" ]; then + info="[root ${dir}]# " + info_console="[${TPUT_DIM}${dir}${TPUT_RESET}]# " + else + info="[${user} ${dir}]$ " + info_console="[${TPUT_DIM}${dir}${TPUT_RESET}]$ " + fi + + { + printf "%s" "${info}" + escaped_print "${@}" + printf " ... " + } >> "${run_logfile}" + + printf >&2 "%s" "${info_console}${TPUT_BOLD}" + escaped_print >&2 "${@}" + printf >&2 "%s\n" "${TPUT_RESET}" + + "${@}" + + ret=$? + if [ ${ret} -ne 0 ]; then + run_failed + printf "%s\n" "FAILED with exit code ${ret}" >> "${run_logfile}" + else + run_ok + printf "OK\n" >> "${run_logfile}" + fi + + return ${ret} +} + +warning() { + printf >&2 "%s\n\n" "${TPUT_BGRED}${TPUT_WHITE}${TPUT_BOLD} WARNING ${TPUT_RESET} ${*}" +} + +_cannot_use_tmpdir() { + testfile="$(TMPDIR="${1}" mktemp -q -t netdata-test.XXXXXXXXXX)" + ret=0 + + if [ -z "${testfile}" ]; then + return "${ret}" + fi + + if printf '#!/bin/sh\necho SUCCESS\n' > "${testfile}"; then + if chmod +x "${testfile}"; then + if [ "$("${testfile}")" = "SUCCESS" ]; then + ret=1 + fi + fi + fi + + rm -f "${testfile}" + return "${ret}" +} + +create_tmp_directory() { + if [ -z "${TMPDIR}" ] || _cannot_use_tmpdir "${TMPDIR}"; then + if _cannot_use_tmpdir /tmp; then + if _cannot_use_tmpdir "${PWD}"; then + fatal "Unable to find a usable temporary directory. Please set \$TMPDIR to a path that is both writable and allows execution of files and try again." F0400 + else + TMPDIR="${PWD}" + fi + else + TMPDIR="/tmp" + fi + fi + + mktemp -d -t netdata-kickstart-XXXXXXXXXX +} + +download() { + url="${1}" + dest="${2}" + if command -v curl > /dev/null 2>&1; then + run curl -q -sSL --connect-timeout 10 --retry 3 --output "${dest}" "${url}" || return 1 + elif command -v wget > /dev/null 2>&1; then + run wget -T 15 -O "${dest}" "${url}" || return 1 + else + fatal "I need curl or wget to proceed, but neither of them are available on this system." F0003 + fi +} + +get_redirect() { + url="${1}" + + if command -v curl > /dev/null 2>&1; then + run sh -c "curl ${url} -s -L -I -o /dev/null -w '%{url_effective}' | grep -o '[^/]*$'" || return 1 + elif command -v wget > /dev/null 2>&1; then + run sh -c "wget --max-redirect=0 ${url} 2>&1 | grep Location | cut -d ' ' -f2 | grep -o '[^/]*$'" || return 1 + else + fatal "I need curl or wget to proceed, but neither of them are available on this system." F0003 + fi +} + +safe_sha256sum() { + # Within the context of the installer, we only use -c option that is common between the two commands + # We will have to reconsider if we start using non-common options + if command -v sha256sum > /dev/null 2>&1; then + sha256sum "$@" + elif command -v shasum > /dev/null 2>&1; then + shasum -a 256 "$@" + else + fatal "I could not find a suitable checksum binary to use" F0004 + fi +} + +get_system_info() { + case "$(uname -s)" in + Linux) + SYSTYPE="Linux" + + os_release_file= + if [ -s "/etc/os-release" ] && [ -r "/etc/os-release" ]; then + os_release_file="/etc/os-release" + elif [ -s "/usr/lib/os-release" ] && [ -r "/usr/lib/os-release" ]; then + os_release_file="/usr/lib/os-release" + else + fatal "Cannot find an os-release file ..." F0401 + fi + + # shellcheck disable=SC1090 + . "${os_release_file}" + + DISTRO="${ID}" + SYSVERSION="${VERSION_ID}" + SYSCODENAME="${VERSION_CODENAME}" + SYSARCH="$(uname -m)" + + supported_compat_names="debian ubuntu centos fedora opensuse" + + if str_in_list "${DISTRO}" "${supported_compat_names}"; then + DISTRO_COMPAT_NAME="${DISTRO}" + else + case "${DISTRO}" in + opensuse-leap) + DISTRO_COMPAT_NAME="opensuse" + ;; + rhel) + DISTRO_COMPAT_NAME="centos" + ;; + *) + DISTRO_COMPAT_NAME="unknown" + ;; + esac + fi + ;; + Darwin) + SYSTYPE="Darwin" + SYSVERSION="$(sw_vers -buildVersion)" + SYSARCH="$(uname -m)" + ;; + FreeBSD) + SYSTYPE="FreeBSD" + SYSVERSION="$(uname -K)" + SYSARCH="$(uname -m)" + ;; + *) + fatal "Unsupported system type detected. Netdata cannot be installed on this system using this script." F0200 + ;; + esac +} + +str_in_list() { + printf "%s\n" "${2}" | tr ' ' "\n" | grep -qE "^${1}\$" + return $? +} + +confirm_root_support() { + if [ "$(id -u)" -ne "0" ]; then + if [ -z "${ROOTCMD}" ] && command -v sudo > /dev/null; then + ROOTCMD="sudo" + fi + + if [ -z "${ROOTCMD}" ] && command -v doas > /dev/null; then + ROOTCMD="doas" + fi + + if [ -z "${ROOTCMD}" ] && command -v pkexec > /dev/null; then + ROOTCMD="pkexec" + fi + + if [ -z "${ROOTCMD}" ]; then + fatal "We need root privileges to continue, but cannot find a way to gain them. Either re-run this script as root, or set \$ROOTCMD to a command that can be used to gain root privileges" F0201 + fi + fi +} + +confirm() { + prompt="${1} [y/n]" + + while true; do + echo "${prompt}" + read -r yn + + case "$yn" in + [Yy]*) return 0;; + [Nn]*) return 1;; + *) echo "Please answer yes or no.";; + esac + done +} + +# ====================================================================== +# Existing install handling code + +update() { + updater="${ndprefix}/usr/libexec/netdata/netdata-updater.sh" + + if [ -x "${updater}" ]; then + if run ${ROOTCMD} "${updater}" --not-running-from-cron; then + progress "Updated existing install at ${ndprefix}" + return 0 + else + fatal "Failed to update existing Netdata install at ${ndprefix}" F0100 + fi + else + return 1 + fi +} + +handle_existing_install() { + if pkg_installed netdata; then + ndprefix="/" + else + if [ -n "${INSTALL_PREFIX}" ]; then + searchpath="${INSTALL_PREFIX}/bin:${INSTALL_PREFIX}/sbin:${INSTALL_PREFIX}/usr/bin:${INSTALL_PREFIX}/usr/sbin:${PATH}" + searchpath="${INSTALL_PREFIX}/netdata/bin:${INSTALL_PREFIX}/netdata/sbin:${INSTALL_PREFIX}/netdata/usr/bin:${INSTALL_PREFIX}/netdata/usr/sbin:${searchpath}" + else + searchpath="${PATH}" + fi + + ndpath="$(PATH="${searchpath}" command -v netdata 2>/dev/null)" + + if [ -z "$ndpath" ] && [ -x /opt/netdata/bin/netdata ]; then + ndpath="/opt/netdata/bin/netdata" + fi + + if [ -n "${ndpath}" ]; then + ndprefix="$(dirname "$(dirname "${ndpath}")")" + fi + + if echo "${ndprefix}" | grep -Eq '/usr$'; then + ndprefix="$(dirname "${ndprefix}")" + fi + fi + + if [ -n "${ndprefix}" ]; then + typefile="${ndprefix}/etc/netdata/.install-type" + if [ -r "${typefile}" ]; then + ${ROOTCMD} sh -c "cat \"${typefile}\" > \"${tmpdir}/install-type\"" + # shellcheck disable=SC1091 + . "${tmpdir}/install-type" + else + INSTALL_TYPE="unknown" + fi + fi + + if [ -z "${ndprefix}" ]; then + progress "No existing installations of netdata found, assuming this is a fresh install." + return 0 + fi + + case "${INSTALL_TYPE}" in + kickstart-*|legacy-*|binpkg-*|manual-static|unknown) + if [ "${INSTALL_TYPE}" = "unknown" ]; then + warning "Found an existing netdata install at ${ndprefix}, but could not determine the install type." + else + progress "Found an existing netdata install at ${ndprefix}, with installation type '${INSTALL_TYPE}'." + fi + + if [ -n "${NETDATA_REINSTALL}" ] || [ -n "${NETDATA_UNSAFE_REINSTALL}" ]; then + progress "Found an existing netdata install at ${ndprefix}, but user requested reinstall, continuing." + + case "${INSTALL_TYPE}" in + binpkg-*) NETDATA_ONLY_NATIVE=1 ;; + *-build) NETDATA_ONLY_BUILD=1 ;; + *-static) NETDATA_ONLY_STATIC=1 ;; + *) + if [ -n "${NETDATA_UNSAFE_REINSTALL}" ]; then + warning "Reinstalling over top of a ${INSTALL_TYPE} installation may be unsafe, but the user has requested we proceed." + elif [ "${INTERACTIVE}" -eq 0 ]; then + fatal "User requested reinstall, but we cannot safely reinstall over top of a ${INSTALL_TYPE} installation, exiting." F0104 + else + if confirm "Reinstalling over top of a ${INSTALL_TYPE} installation may be unsafe, do you want to continue?"; then + progress "OK, continuing." + else + fatal "Cancelling reinstallation at user request." F0105 + fi + fi + ;; + esac + + return 0 + fi + + ret=0 + + if [ "${NETDATA_CLAIM_ONLY}" -eq 0 ] && echo "${INSTALL_TYPE}" | grep -vq "binpkg-*"; then + if ! update; then + warning "Unable to find usable updater script, not updating existing install at ${ndprefix}." + fi + else + warning "Not updating existing install at ${ndprefix}." + fi + + if [ -n "${NETDATA_CLAIM_TOKEN}" ]; then + progress "Attempting to claim existing install at ${ndprefix}." + INSTALL_PREFIX="${ndprefix}" + claim + ret=$? + elif [ "${NETDATA_CLAIM_ONLY}" -eq 1 ]; then + fatal "User asked to claim, but did not proide a claiming token." F0202 + else + progress "Not attempting to claim existing install at ${ndprefix} (no claiming token provided)." + fi + + cleanup + trap - EXIT + exit $ret + ;; + oci) + fatal "This is an OCI container, use the regular image lifecycle management commands in your container instead of this script for managing it." F0203 + ;; + *) + if [ -n "${NETDATA_REINSTALL}" ] || [ -n "${NETDATA_UNSAFE_REINSTALL}" ]; then + if [ -n "${NETDATA_UNSAFE_REINSTALL}" ]; then + warning "Reinstalling over top of a ${INSTALL_TYPE} installation may be unsafe, but the user has requested we proceed." + elif [ "${INTERACTIVE}" -eq 0 ]; then + fatal "User requested reinstall, but we cannot safely reinstall over top of a ${INSTALL_TYPE} installation, exiting." F0104 + else + if confirm "Reinstalling over top of a ${INSTALL_TYPE} installation may be unsafe, do you want to continue?"; then + progress "OK, continuing." + else + fatal "Cancelling reinstallation at user request." F0105 + fi + fi + else + fatal "Found an existing netdata install at ${ndprefix}, but the install type is '${INSTALL_TYPE}', which is not supported, refusing to proceed." F0103 + fi + ;; + esac +} + +soft_disable_cloud() { + cloud_prefix="${INSTALL_PREFIX}/var/lib/netdata/cloud.d" + + run ${ROOTCMD} mkdir -p "${cloud_prefix}" + + cat > "${tmpdir}/cloud.conf" << EOF +[global] + enabled = no +EOF + + run ${ROOTCMD} cp "${tmpdir}/cloud.conf" "${cloud_prefix}/cloud.conf" + + if [ -z "${NETDATA_NO_START}" ]; then + case "${SYSTYPE}" in + Darwin) run ${ROOTCMD} launchctl kickstart -k com.github.netdata ;; + FreeBSD) run ${ROOTCMD} service netdata restart ;; + Linux) + initpath="$(${ROOTCMD} readlink /proc/1/exe)" + + if command -v service > /dev/null 2>&1; then + run ${ROOTCMD} service netdata restart + elif command -v rc-service > /dev/null 2>&1; then + run ${ROOTCMD} rc-service netdata restart + elif [ "$(basename "${initpath}" 2> /dev/null)" = "systemd" ]; then + run ${ROOTCMD} systemctl restart netdata + elif [ -f /etc/init.d/netdata ]; then + run ${ROOTCMD} /etc/init.d/netdata restart + fi + ;; + esac + fi +} + +confirm_install_prefix() { + if [ -n "${INSTALL_PREFIX}" ] && [ "${NETDATA_ONLY_BUILD}" -ne 1 ]; then + fatal "The \`--install\` option is only supported together with the \`--only-build\` option." F0204 + fi + + if [ -n "${INSTALL_PREFIX}" ]; then + NETDATA_INSTALLER_OPTIONS="${NETDATA_INSTALLER_OPTIONS} --install ${INSTALL_PREFIX}" + else + case "${SYSTYPE}" in + Darwin) + INSTALL_PREFIX="/usr/local/netdata" + NETDATA_INSTALLER_OPTIONS="${NETDATA_INSTALLER_OPTIONS} --install-no-prefix ${INSTALL_PREFIX}" + ;; + FreeBSD) + INSTALL_PREFIX="/usr/local" + NETDATA_INSTALLER_OPTIONS="${NETDATA_INSTALLER_OPTIONS} --install-no-prefix ${INSTALL_PREFIX}" + ;; + esac + fi +} + +# ====================================================================== +# Claiming support code + +check_claim_opts() { +# shellcheck disable=SC2235,SC2030 + if [ -z "${NETDATA_CLAIM_TOKEN}" ] && [ -n "${NETDATA_CLAIM_ROOMS}" ]; then + fatal "Invalid claiming options, claim rooms may only be specified when a token and URL are specified." F0204 + elif [ -z "${NETDATA_CLAIM_TOKEN}" ] && [ -n "${NETDATA_CLAIM_EXTRA}" ]; then + fatal "Invalid claiming options, a claiming token must be specified." F0204 + elif [ "${NETDATA_DISABLE_CLOUD}" -eq 1 ] && [ -n "${NETDATA_CLAIM_TOKEN}" ]; then + fatal "Cloud explicitly disabled, but automatic claiming requested. Either enable Netdata Cloud, or remove the --claim-* options." F0204 + fi +} + +claim() { + progress "Attempting to claim agent to ${NETDATA_CLAIM_URL}" + if [ -z "${INSTALL_PREFIX}" ]; then + NETDATA_CLAIM_PATH=/usr/sbin/netdata-claim.sh + elif [ "${INSTALL_PREFIX}" = "/opt/netdata" ]; then + NETDATA_CLAIM_PATH="/opt/netdata/bin/netdata-claim.sh" + else + NETDATA_CLAIM_PATH="${INSTALL_PREFIX}/netdata/usr/sbin/netdata-claim.sh" + fi + + if ! pgrep netdata > /dev/null; then + NETDATA_CLAIM_EXTRA="${NETDATA_CLAIM_EXTRA} -daemon-not-running" + fi + + # shellcheck disable=SC2086 + if ${ROOTCMD} "${NETDATA_CLAIM_PATH}" -token="${NETDATA_CLAIM_TOKEN}" -rooms="${NETDATA_CLAIM_ROOMS}" -url="${NETDATA_CLAIM_URL}" ${NETDATA_CLAIM_EXTRA}; then + progress "Successfully claimed node" + else + warning "Unable to claim node, you must do so manually." + if [ -z "${NETDATA_NEW_INSTALL}" ]; then + cleanup + trap - EXIT + exit 1 + fi + fi +} + +# ====================================================================== +# Native package install code. + +# Check for an already installed package with a given name. +pkg_installed() { + case "${DISTRO_COMPAT_NAME}" in + debian|ubuntu) + dpkg -l "${1}" > /dev/null 2>&1 + return $? + ;; + centos|fedora|opensuse) + rpm -q "${1}" > /dev/null 2>&1 + return $? + ;; + *) + return 1 + ;; + esac +} + +# Check for the existence of a usable netdata package in the repo. +netdata_avail_check() { + case "${DISTRO_COMPAT_NAME}" in + debian|ubuntu) + env DEBIAN_FRONTEND=noninteractive apt-cache policy netdata | grep -q packagecloud.io/netdata/netdata; + return $? + ;; + centos|fedora) + # shellcheck disable=SC2086 + ${pm_cmd} search -v netdata | grep -qE 'Repo *: netdata(-edge)?$' + return $? + ;; + opensuse) + zypper packages -r "$(zypper repos | grep -E 'netdata |netdata-edge ' | cut -f 1 -d '|' | tr -d ' ')" | grep -E 'netdata ' + return $? + ;; + *) + return 1 + ;; + esac +} + +# Check for any distro-specific dependencies we know we need. +check_special_native_deps() { + if [ "${DISTRO_COMPAT_NAME}" = "centos" ] && [ "${SYSVERSION}" = "7" ]; then + progress "Checking for libuv availability." + # shellcheck disable=SC2086 + if ${pm_cmd} search ${interactive_opts} -v libuv | grep -q "No matches found"; then + progress "libv not found, checking for EPEL availability." + # shellcheck disable=SC2086 + if ${pm_cmd} search ${interactive_opts} -v epel-release | grep -q "No matches found"; then + warning "Unable to find a suitable source for libuv, cannot install on this system." + return 1 + else + progress "EPEL is available, attempting to install so that required dependencies are available." + + # shellcheck disable=SC2086 + if ! run ${ROOTCMD} env ${env} ${pm_cmd} install ${pkg_install_opts} epel-release; then + warning "Failed to install EPEL." + return 1 + fi + fi + else + return 0 + fi + fi +} + +try_package_install() { + if [ -z "${DISTRO}" ]; then + warning "Unable to determine Linux distribution for native packages." + return 1 + fi + + progress "Attempting to install using native packages..." + + if [ "${RELEASE_CHANNEL}" = "nightly" ]; then + release="-edge" + else + release="" + fi + + if [ "${INTERACTIVE}" = "0" ]; then + interactive_opts="-y" + env="DEBIAN_FRONTEND=noninteractive" + else + interactive_opts="" + env="" + fi + + case "${DISTRO_COMPAT_NAME}" in + debian) + needs_early_refresh=1 + pm_cmd="apt-get" + repo_subcmd="update" + repo_prefix="debian/${SYSCODENAME}" + pkg_type="deb" + pkg_suffix="_all" + pkg_vsep="_" + pkg_install_opts="${interactive_opts}" + repo_update_opts="${interactive_opts}" + uninstall_subcmd="uninstall" + INSTALL_TYPE="binpkg-deb" + ;; + ubuntu) + needs_early_refresh=1 + pm_cmd="apt-get" + repo_subcmd="update" + repo_prefix="ubuntu/${SYSCODENAME}" + pkg_type="deb" + pkg_suffix="_all" + pkg_vsep="_" + pkg_install_opts="${interactive_opts}" + repo_update_opts="${interactive_opts}" + uninstall_subcmd="uninstall" + INSTALL_TYPE="binpkg-deb" + ;; + centos) + if command -v dnf > /dev/null; then + pm_cmd="dnf" + repo_subcmd="makecache" + else + pm_cmd="yum" + fi + repo_prefix="el/${SYSVERSION}" + pkg_type="rpm" + pkg_suffix=".noarch" + pkg_vsep="-" + pkg_install_opts="${interactive_opts}" + repo_update_opts="${interactive_opts}" + uninstall_subcmd="remove" + INSTALL_TYPE="binpkg-rpm" + ;; + fedora) + if command -v dnf > /dev/null; then + pm_cmd="dnf" + repo_subcmd="makecache" + else + pm_cmd="yum" + fi + repo_prefix="fedora/${SYSVERSION}" + pkg_type="rpm" + pkg_suffix=".noarch" + pkg_vsep="-" + pkg_install_opts="${interactive_opts}" + repo_update_opts="${interactive_opts}" + uninstall_subcmd="remove" + INSTALL_TYPE="binpkg-rpm" + ;; + opensuse) + pm_cmd="zypper" + repo_subcmd="--gpg-auto-import-keys refresh" + repo_prefix="opensuse/${SYSVERSION}" + pkg_type="rpm" + pkg_suffix=".noarch" + pkg_vsep="-" + pkg_install_opts="${interactive_opts} --allow-unsigned-rpm" + repo_update_opts="" + uninstall_subcmd="remove" + INSTALL_TYPE="binpkg-rpm" + ;; + *) + warning "We do not provide native packages for ${DISTRO}." + return 2 + ;; + esac + + repoconfig_name="netdata-repo${release}" + repoconfig_file="${repoconfig_name}${pkg_vsep}${REPOCONFIG_VERSION}${pkg_suffix}.${pkg_type}" + repoconfig_url="${REPOCONFIG_URL_PREFIX}/${repo_prefix}/${repoconfig_file}/download.${pkg_type}" + + if ! pkg_installed "${repoconfig_name}"; then + progress "Downloading repository configuration package." + if ! download "${repoconfig_url}" "${tmpdir}/${repoconfig_file}"; then + warning "Failed to download repository configuration package." + return 2 + fi + + if [ -n "${needs_early_refresh}" ]; then + progress "Updating repository metadata." + # shellcheck disable=SC2086 + if ! run ${ROOTCMD} env ${env} ${pm_cmd} ${repo_subcmd} ${repo_update_opts}; then + warning "Failed to refresh repository metadata." + return 2 + fi + fi + + progress "Installing repository configuration package." + # shellcheck disable=SC2086 + if ! run ${ROOTCMD} env ${env} ${pm_cmd} install ${pkg_install_opts} "${tmpdir}/${repoconfig_file}"; then + warning "Failed to install repository configuration package." + return 2 + fi + + if [ -n "${repo_subcmd}" ]; then + progress "Updating repository metadata." + # shellcheck disable=SC2086 + if ! run ${ROOTCMD} env ${env} ${pm_cmd} ${repo_subcmd} ${repo_update_opts}; then + fatal "Failed to update repository metadata." F0205 + fi + fi + else + progress "Repository configuration is already present, attempting to install netdata." + fi + + if ! check_special_native_deps; then + warning "Could not find secondary dependencies ${DISTRO} on ${SYSARCH}." + if [ -z "${NO_CLEANUP}" ]; then + progress "Attempting to uninstall repository configuration package." + # shellcheck disable=SC2086 + run ${ROOTCMD} env ${env} ${pm_cmd} ${uninstall_subcmd} ${pkg_install_opts} "${repoconfig_name}" + fi + return 2 + fi + + progress "Checking for usable Netdata package." + if ! netdata_avail_check "${DISTRO_COMPAT_NAME}"; then + warning "Could not find a usable native package for ${DISTRO} on ${SYSARCH}." + if [ -z "${NO_CLEANUP}" ]; then + progress "Attempting to uninstall repository configuration package." + # shellcheck disable=SC2086 + run ${ROOTCMD} env ${env} ${pm_cmd} ${uninstall_subcmd} ${pkg_install_opts} "${repoconfig_name}" + fi + return 2 + fi + + if [ "${NETDATA_DISABLE_TELEMETRY}" -eq 1 ]; then + run ${ROOTCMD} mkdir -p "/etc/netdata" + run ${ROOTCMD} touch "/etc/netdata/.opt-out-from-anonymous-statistics" + fi + + progress "Installing Netdata package." + # shellcheck disable=SC2086 + if ! run ${ROOTCMD} env ${env} ${pm_cmd} install ${pkg_install_opts} netdata; then + warning "Failed to install Netdata package." + if [ -z "${NO_CLEANUP}" ]; then + progress "Attempting to uninstall repository configuration package." + # shellcheck disable=SC2086 + run ${ROOTCMD} env ${env} ${pm_cmd} ${uninstall_subcmd} ${pkg_install_opts} "${repoconfig_name}" + fi + return 2 + fi +} + +# ====================================================================== +# Static build install code + +set_static_archive_urls() { + if [ "${RELEASE_CHANNEL}" = "stable" ]; then + latest="$(get_redirect "https://github.com/netdata/netdata/releases/latest")" + export NETDATA_STATIC_ARCHIVE_URL="https://github.com/netdata/netdata/releases/download/${latest}/netdata-${SYSARCH}-${latest}.gz.run" + export NETDATA_STATIC_ARCHIVE_CHECKSUM_URL="https://github.com/netdata/netdata/releases/download/${latest}/sha256sums.txt" + else + export NETDATA_STATIC_ARCHIVE_URL="${NETDATA_TARBALL_BASEURL}/netdata-latest.gz.run" + export NETDATA_STATIC_ARCHIVE_CHECKSUM_URL="${NETDATA_TARBALL_BASEURL}/sha256sums.txt" + fi +} + +try_static_install() { + set_static_archive_urls "${RELEASE_CHANNEL}" + progress "Downloading static netdata binary: ${NETDATA_STATIC_ARCHIVE_URL}" + + if ! download "${NETDATA_STATIC_ARCHIVE_URL}" "${tmpdir}/netdata-${SYSARCH}-latest.gz.run"; then + warning "Unable to download static build archive for ${SYSARCH}." + return 2 + fi + + if ! download "${NETDATA_STATIC_ARCHIVE_CHECKSUM_URL}" "${tmpdir}/sha256sum.txt"; then + fatal "Unable to fetch checksums to verify static build archive." F0206 + fi + + if ! grep "netdata-${SYSARCH}-latest.gz.run" "${tmpdir}/sha256sum.txt" | safe_sha256sum -c - > /dev/null 2>&1; then + fatal "Static binary checksum validation failed. Usually this is a result of an older copy of the file being cached somewhere upstream and can be resolved by retrying in an hour." F0207 + fi + + if [ "${INTERACTIVE}" -eq 0 ]; then + opts="${opts} --accept" + fi + + progress "Installing netdata" + # shellcheck disable=SC2086 + if ! run ${ROOTCMD} sh "${tmpdir}/netdata-${SYSARCH}-latest.gz.run" ${opts} -- ${NETDATA_AUTO_UPDATES:+--auto-update} ${NETDATA_INSTALLER_OPTIONS}; then + warning "Failed to install static build of Netdata on ${SYSARCH}." + run rm -rf /opt/netdata + return 2 + fi + + install_type_file="/opt/netdata/etc/netdata/.install-type" + if [ -f "${install_type_file}" ]; then + ${ROOTCMD} sh -c "cat \"${install_type_file}\" > \"${tmpdir}/install-type\"" + ${ROOTCMD} chown "$(id -u)":"$(id -g)" "${tmpdir}/install-type" + # shellcheck disable=SC1091 + . "${tmpdir}/install-type" + cat > "${tmpdir}/install-type" <<- EOF + INSTALL_TYPE='kickstart-static' + PREBUILT_ARCH='${PREBUILT_ARCH}' + EOF + ${ROOTCMD} chown netdata:netdata "${tmpdir}/install-type" + ${ROOTCMD} cp "${tmpdir}/install-type" "${install_type_file}" + fi +} + +# ====================================================================== +# Local build install code + +set_source_archive_urls() { + if [ "$1" = "stable" ]; then + latest="$(get_redirect "https://github.com/netdata/netdata/releases/latest")" + export NETDATA_SOURCE_ARCHIVE_URL="https://github.com/netdata/netdata/releases/download/${latest}/netdata-${latest}.tar.gz" + export NETDATA_SOURCE_ARCHIVE_CHECKSUM_URL="https://github.com/netdata/netdata/releases/download/${latest}/sha256sums.txt" + else + export NETDATA_SOURCE_ARCHIVE_URL="${NETDATA_TARBALL_BASEURL}/netdata-latest.tar.gz" + export NETDATA_SOURCE_ARCHIVE_CHECKSUM_URL="${NETDATA_TARBALL_BASEURL}/sha256sums.txt" + fi +} + +install_local_build_dependencies() { + bash="$(command -v bash 2> /dev/null)" + + if [ -z "${bash}" ] || [ ! -x "${bash}" ]; then + warning "Unable to find a usable version of \`bash\` (required for local build)." + return 1 + fi + + progress "Fetching script to detect required packages..." + download "${PACKAGES_SCRIPT}" "${tmpdir}/install-required-packages.sh" + + if [ ! -s "${tmpdir}/install-required-packages.sh" ]; then + warning "Downloaded dependency installation script is empty." + else + progress "Running downloaded script to detect required packages..." + + if [ "${INTERACTIVE}" -eq 0 ]; then + opts="--dont-wait --non-interactive" + fi + + if [ "${SYSTYPE}" = "Darwin" ]; then + sudo="" + else + sudo="${ROOTCMD}" + fi + + # shellcheck disable=SC2086 + if ! run ${sudo} "${bash}" "${tmpdir}/install-required-packages.sh" ${opts} netdata; then + warning "It failed to install all the required packages, but installation might still be possible." + fi + fi +} + +build_and_install() { + progress "Building netdata" + + echo "INSTALL_TYPE='kickstart-build'" > system/.install-type + + opts="${NETDATA_INSTALLER_OPTIONS}" + + if [ "${INTERACTIVE}" -eq 0 ]; then + opts="${opts} --dont-wait" + fi + + if [ "${NETDATA_AUTO_UPDATES}" -eq 1 ]; then + opts="${opts} --auto-update" + fi + + if [ "${RELEASE_CHANNEL}" = "stable" ]; then + opts="${opts} --stable-channel" + fi + + if [ "${NETDATA_REQUIRE_CLOUD}" -eq 1 ]; then + opts="${opts} --require-cloud" + elif [ "${NETDATA_DISABLE_CLOUD}" -eq 1 ]; then + opts="${opts} --disable-cloud" + fi + + # shellcheck disable=SC2086 + run ${ROOTCMD} ./netdata-installer.sh ${opts} + + case $? in + 1) + fatal "netdata-installer.sh exited with error" F0007 + ;; + 2) + fatal "Insufficient RAM to install netdata" F0008 + ;; + esac +} + +try_build_install() { + if ! install_local_build_dependencies; then + return 1 + fi + + set_source_archive_urls "${RELEASE_CHANNEL}" + + download "${NETDATA_SOURCE_ARCHIVE_CHECKSUM_URL}" "${tmpdir}/sha256sum.txt" + download "${NETDATA_SOURCE_ARCHIVE_URL}" "${tmpdir}/netdata-latest.tar.gz" + + if ! grep netdata-latest.tar.gz "${tmpdir}/sha256sum.txt" | safe_sha256sum -c - > /dev/null 2>&1; then + fatal "Tarball checksum validation failed. Usually this is a result of an older copy of the file being cached somewhere upstream and can be resolved by retrying in an hour." F0005 + fi + + run tar -xf "${tmpdir}/netdata-latest.tar.gz" -C "${tmpdir}" + rm -rf "${tmpdir}/netdata-latest.tar.gz" > /dev/null 2>&1 + cd "$(find "${tmpdir}" -mindepth 1 -maxdepth 1 -type d -name netdata-)" || fatal "Cannot cd to netdata source tree" F0006 + + if [ -x netdata-installer.sh ]; then + build_and_install || return 1 + else + # This case is needed because some platforms produce an extra directory on the source tarball extraction. + if [ "$(find . -mindepth 1 -maxdepth 1 -type d | wc -l)" -eq 1 ] && [ -x "$(find . -mindepth 1 -maxdepth 1 -type d)/netdata-installer.sh" ]; then + cd "$(find . -mindepth 1 -maxdepth 1 -type d)" && build_and_install || return 1 + else + fatal "Cannot install netdata from source (the source directory does not include netdata-installer.sh). Leaving all files in ${tmpdir}" F0009 + fi + fi +} + +# ====================================================================== +# Per system-type install logic + +install_on_linux() { + if [ "${NETDATA_ONLY_STATIC}" -ne 1 ] && [ "${NETDATA_ONLY_BUILD}" -ne 1 ]; then + SELECTED_INSTALL_METHOD="native" + try_package_install + + case "$?" in + 0) + NETDATA_INSTALL_SUCCESSFUL=1 + ;; + 1) + fatal "Unable to install on this system." F0300 + ;; + 2) + if [ "${NETDATA_ONLY_NATIVE}" -eq 1 ]; then + fatal "Could not install native binary packages." F0301 + else + warning "Could not install native binary packages, falling back to alternative installation method." + fi + ;; + esac + fi + + if [ "${NETDATA_ONLY_NATIVE}" -ne 1 ] && [ "${NETDATA_ONLY_BUILD}" -ne 1 ] && [ -z "${NETDATA_INSTALL_SUCCESSFUL}" ]; then + SELECTED_INSTALL_METHOD="static" + INSTALL_TYPE="kickstart-static" + try_static_install + + case "$?" in + 0) + NETDATA_INSTALL_SUCCESSFUL=1 + INSTALL_PREFIX="/opt/netdata" + ;; + 1) + fatal "Unable to install on this system." F0302 + ;; + 2) + if [ "${NETDATA_ONLY_STATIC}" -eq 1 ]; then + fatal "Could not install static build." F0303 + else + warning "Could not install static build, falling back to alternative installation method." + fi + ;; + esac + fi + + if [ "${NETDATA_ONLY_NATIVE}" -ne 1 ] && [ "${NETDATA_ONLY_STATIC}" -ne 1 ] && [ -z "${NETDATA_INSTALL_SUCCESSFUL}" ]; then + SELECTED_INSTALL_METHOD="build" + INSTALL_TYPE="kickstart-build" + try_build_install + + case "$?" in + 0) + NETDATA_INSTALL_SUCCESSFUL=1 + ;; + *) + fatal "Unable to install on this system." F0304 + ;; + esac + fi +} + +install_on_macos() { + if [ "${NETDATA_ONLY_NATIVE}" -eq 1 ]; then + fatal "User requested native package, but native packages are not available for macOS. Try installing without \`--only-native\` option." F0305 + elif [ "${NETDATA_ONLY_STATIC}" -eq 1 ]; then + fatal "User requested static build, but static builds are not available for macOS. Try installing without \`--only-static\` option." F0306 + else + SELECTED_INSTALL_METHOD="build" + INSTALL_TYPE="kickstart-build" + try_build_install + + case "$?" in + 0) + NETDATA_INSTALL_SUCCESSFUL=1 + ;; + *) + fatal "Unable to install on this system." F0307 + ;; + esac + fi +} + +install_on_freebsd() { + if [ "${NETDATA_ONLY_NATIVE}" -eq 1 ]; then + fatal "User requested native package, but native packages are not available for FreeBSD. Try installing without \`--only-native\` option." F0308 + elif [ "${NETDATA_ONLY_STATIC}" -eq 1 ]; then + fatal "User requested static build, but static builds are not available for FreeBSD. Try installing without \`--only-static\` option." F0309 + else + SELECTED_INSTALL_METHOD="build" + INSTALL_TYPE="kickstart-build" + try_build_install + + case "$?" in + 0) + NETDATA_INSTALL_SUCCESSFUL=1 + ;; + *) + fatal "Unable to install on this system." F030A + ;; + esac + fi +} + +# ====================================================================== +# Main program + +setup_terminal || echo > /dev/null + +while [ -n "${1}" ]; do + case "${1}" in + "--help") + usage + cleanup + trap - EXIT + exit 0 + ;; + "--no-cleanup") NO_CLEANUP=1 ;; + "--dont-wait"|"--non-interactive") INTERACTIVE=0 ;; + "--interactive") INTERACTIVE=1 ;; + "--stable-channel") RELEASE_CHANNEL="stable" ;; + "--no-updates") NETDATA_AUTO_UPDATES=0 ;; + "--auto-update") NETDATA_AUTO_UPDATES="1" ;; + "--reinstall") NETDATA_REINSTALL=1 ;; + "--reinstall-even-if-unsafe") NETDATA_UNSAFE_REINSTALL=1 ;; + "--claim-only") NETDATA_CLAIM_ONLY=1 ;; + "--disable-cloud") + NETDATA_DISABLE_CLOUD=1 + NETDATA_REQUIRE_CLOUD=0 + ;; + "--require-cloud") + NETDATA_DISABLE_CLOUD=0 + NETDATA_REQUIRE_CLOUD=1 + ;; + "--dont-start-it") + NETDATA_NO_START=1 + NETDATA_INSTALLER_OPTIONS="${NETDATA_INSTALLER_OPTIONS} --dont-start-it" + ;; + "--disable-telemetry") + NETDATA_DISABLE_TELEMETRY="1" + NETDATA_INSTALLER_OPTIONS="${NETDATA_INSTALLER_OPTIONS} --disable-telemetry" + ;; + "--install") + INSTALL_PREFIX="${2}" + shift 1 + ;; + "--native-only") + NETDATA_ONLY_NATIVE=1 + NETDATA_ONLY_STATIC=0 + NETDATA_ONLY_BUILD=0 + SELECTED_INSTALL_METHOD="native" + ;; + "--static-only") + NETDATA_ONLY_STATIC=1 + NETDATA_ONLY_NATIVE=0 + NETDATA_ONLY_BUILD=0 + SELECTED_INSTALL_METHOD="static" + ;; + "--build-only") + NETDATA_ONLY_BUILD=1 + NETDATA_ONLY_NATIVE=0 + NETDATA_ONLY_STATIC=0 + SELECTED_INSTALL_METHOD="build" + ;; + "--claim-token") + NETDATA_CLAIM_TOKEN="${2}" + shift 1 + ;; + "--claim-rooms") + NETDATA_CLAIM_ROOMS="${2}" + shift 1 + ;; + "--claim-url") + NETDATA_CLAIM_URL="${2}" + shift 1 + ;; + "--claim-*") + optname="$(echo "${1}" | cut -d '-' -f 4-)" + case "${optname}" in + id|proxy|user|hostname) + NETDATA_CLAIM_EXTRA="${NETDATA_CLAIM_EXTRA} -${optname} ${2}" + shift 1 + ;; + verbose|insecure|noproxy|noreload|daemon-not-running) + NETDATA_CLAIM_EXTRA="${NETDATA_CLAIM_EXTRA} -${optname}" + ;; + *) + warning "Ignoring unrecognized claiming option ${optname}" + ;; + esac + ;; + *) + warning "Passing unrecognized option '${1}' to installer script. If this is intended, please add it to \$NETDATA_INSTALLER_OPTIONS instead." + NETDATA_INSTALLER_OPTIONS="${NETDATA_INSTALLER_OPTIONS} ${1}" + ;; + esac + shift 1 +done + +check_claim_opts +confirm_root_support +get_system_info +confirm_install_prefix + +tmpdir="$(create_tmp_directory)" +progress "Using ${tmpdir} as a temporary directory." +cd "${tmpdir}" || exit 1 + +handle_existing_install + +case "${SYSTYPE}" in + Linux) install_on_linux ;; + Darwin) install_on_macos ;; + FreeBSD) install_on_freebsd ;; +esac + +if [ -n "${NETDATA_CLAIM_TOKEN}" ]; then + claim +elif [ "${NETDATA_DISABLE_CLOUD}" -eq 1 ]; then + soft_disable_cloud +fi + +telemetry_event INSTALL_SUCCESS "" "" +cleanup +trap - EXIT diff --git a/packaging/installer/kickstart-static64.sh b/packaging/installer/kickstart-static64.sh index 36aca54d7..ec1800a61 100755 --- a/packaging/installer/kickstart-static64.sh +++ b/packaging/installer/kickstart-static64.sh @@ -206,6 +206,33 @@ safe_sha256sum() { fi } +mark_install_type() { + install_type_file="/opt/netdata/etc/netdata/.install-type" + if [ -f "${install_type_file}" ]; then + # shellcheck disable=SC1090 + . "${install_type_file}" + cat > "${TMPDIR}/install-type" <<- EOF + INSTALL_TYPE='kickstart-static' + PREBUILT_ARCH='${PREBUILT_ARCH}' + EOF + ${sudo} chown netdata:netdata "${TMPDIR}/install-type" + ${sudo} cp "${TMPDIR}/install-type" "${install_type_file}" + fi +} + +claim() { + progress "Attempting to claim agent to ${NETDATA_CLAIM_URL}" + NETDATA_CLAIM_PATH=/opt/netdata/bin/netdata-claim.sh + + if ${sudo} "${NETDATA_CLAIM_PATH}" -token=${NETDATA_CLAIM_TOKEN} -rooms=${NETDATA_CLAIM_ROOMS} -url=${NETDATA_CLAIM_URL} ${NETDATA_CLAIM_EXTRA}; then + progress "Successfully claimed node" + return 0 + else + run_failed "Unable to claim node, you must do so manually." + return 1 + fi +} + # ---------------------------------------------------------------------------- umask 022 @@ -326,7 +353,10 @@ if [ -n "$ndpath" ] ; then if [ -r "${ndprefix}/etc/netdata/.environment" ] ; then ndstatic="$(grep IS_NETDATA_STATIC_BINARY "${ndprefix}/etc/netdata/.environment" | cut -d "=" -f 2 | tr -d \")" if [ -z "${NETDATA_REINSTALL}" ] && [ -z "${NETDATA_LOCAL_TARBALL_OVERRIDE}" ] ; then - if [ -x "${ndprefix}/usr/libexec/netdata/netdata-updater.sh" ] ; then + if [ -n "${NETDATA_CLAIM_TOKEN}" ] ; then + claim + exit $? + elif [ -x "${ndprefix}/usr/libexec/netdata/netdata-updater.sh" ] ; then progress "Attempting to update existing install instead of creating a new one" if run ${sudo} "${ndprefix}/usr/libexec/netdata/netdata-updater.sh" --not-running-from-cron ; then progress "Updated existing install at ${ndpath}" @@ -353,7 +383,10 @@ if [ -n "$ndpath" ] ; then fi else progress "Existing install appears to be handled manually or through the system package manager." - if [ -z "${NETDATA_ALLOW_DUPLICATE_INSTALL}" ] ; then + if [ -n "${NETDATA_CLAIM_TOKEN}" ] ; then + claim + exit $? + elif [ -z "${NETDATA_ALLOW_DUPLICATE_INSTALL}" ] ; then fatal "Existing installation detected which cannot be safely updated by this script, refusing to continue." exit 1 else @@ -386,6 +419,8 @@ fi progress "Installing netdata" run ${sudo} sh "${TMPDIR}/netdata-latest.gz.run" ${opts} -- ${NETDATA_UPDATES} ${NETDATA_INSTALLER_OPTIONS} +mark_install_type + #shellcheck disable=SC2181 if [ $? -eq 0 ]; then run ${sudo} rm "${TMPDIR}/netdata-latest.gz.run" @@ -400,12 +435,5 @@ fi # -------------------------------------------------------------------------------------------------------------------- if [ -n "${NETDATA_CLAIM_TOKEN}" ]; then - progress "Attempting to claim agent to ${NETDATA_CLAIM_URL}" - NETDATA_CLAIM_PATH=/opt/netdata/bin/netdata-claim.sh - - if "${NETDATA_CLAIM_PATH}" -token=${NETDATA_CLAIM_TOKEN} -rooms=${NETDATA_CLAIM_ROOMS} -url=${NETDATA_CLAIM_URL} ${NETDATA_CLAIM_EXTRA}; then - progress "Successfully claimed node" - else - run_failed "Unable to claim node, you must do so manually." - fi + claim fi diff --git a/packaging/installer/kickstart.sh b/packaging/installer/kickstart.sh index 3c90cc816..2fa762968 100755 --- a/packaging/installer/kickstart.sh +++ b/packaging/installer/kickstart.sh @@ -1,4 +1,4 @@ -#!/usr/bin/env sh +#!/usr/bin/env bash # SPDX-License-Identifier: GPL-3.0-or-later # # Run me with: @@ -16,6 +16,7 @@ # --local-files set the full path of the desired tarball to run install with # --allow-duplicate-install do not bail if we detect a duplicate install # --reinstall if an existing install would be updated, reinstall instead +# --disable-telemetry opt out of anonymous statistics # --claim-token specify a token to use for claiming the newly installed instance # --claim-url specify a URL to use for claiming the newly installed isntance # --claim-rooms specify a list of rooms to claim the newly installed instance to @@ -43,6 +44,10 @@ PACKAGES_SCRIPT="https://raw.githubusercontent.com/netdata/netdata/master/packag # Netdata Tarball Base URL (defaults to our Google Storage Bucket) [ -z "$NETDATA_TARBALL_BASEURL" ] && NETDATA_TARBALL_BASEURL=https://storage.googleapis.com/netdata-nightlies +TELEMETRY_URL="https://posthog.netdata.cloud/capture/" +TELEMETRY_API_KEY="${NETDATA_POSTHOG_API_KEY:-mqkwGT0JNFqO-zX2t0mW6Tec9yooaVu7xCBlXtHnt5Y}" +KICKSTART_OPTIONS="${*}" + # --------------------------------------------------------------------------------------------------------------------- # library functions copied from packaging/installer/functions.sh @@ -76,8 +81,93 @@ setup_terminal() { setup_terminal || echo > /dev/null # ----------------------------------------------------------------------------- +telemetry_event() { + if [ -n "${NETDATA_DISABLE_TELEMETRY}" ]; then + return 0 + fi + + if [ -e "/etc/os-release" ]; then + eval "$(grep -E "^(NAME|ID|ID_LIKE|VERSION|VERSION_ID)=" < /etc/os-release | sed 's/^/HOST_/')" + fi + + if [ -z "${HOST_NAME}" ] || [ -z "${HOST_VERSION}" ] || [ -z "${HOST_ID}" ]; then + if [ -f "/etc/lsb-release" ]; then + DISTRIB_ID="unknown" + DISTRIB_RELEASE="unknown" + DISTRIB_CODENAME="unknown" + eval "$(grep -E "^(DISTRIB_ID|DISTRIB_RELEASE|DISTRIB_CODENAME)=" < /etc/lsb-release)" + if [ -z "${HOST_NAME}" ]; then HOST_NAME="${DISTRIB_ID}"; fi + if [ -z "${HOST_VERSION}" ]; then HOST_VERSION="${DISTRIB_RELEASE}"; fi + if [ -z "${HOST_ID}" ]; then HOST_ID="${DISTRIB_CODENAME}"; fi + fi + fi + + KERNEL_NAME="$(uname -s)" + + if [ "${KERNEL_NAME}" = FreeBSD ]; then + TOTAL_RAM="$(sysctl -n hw.physmem)" + elif [ "${KERNEL_NAME}" = Darwin ]; then + TOTAL_RAM="$(sysctl -n hw.memsize)" + elif [ -r /proc/meminfo ]; then + TOTAL_RAM="$(grep -F MemTotal /proc/meminfo | cut -f 2 -d ':' | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' | cut -f 1 -d ' ')" + TOTAL_RAM="$((TOTAL_RAM * 1024))" + fi + + if [ -f /etc/machine-id ]; then + DISTINCT_ID="$(cat /etc/machine-id)" + elif command -v uuidgen 2> /dev/null; then + DISTINCT_ID="$(uuidgen)" + else + DISTINCT_ID="null" + fi + + REQ_BODY="$(cat << EOF +{ + "api_key": "${TELEMETRY_API_KEY}", + "event": "${1}", + "properties": { + "distinct_id": "${DISTINCT_ID}", + "event_source": "agent installer", + "\$current_url": "agent installer", + "\$pathname": "netdata-installer", + "\$host": "installer.netdata.io", + "\$ip": "127.0.0.1", + "script_variant": "legacy-kickstart", + "error_code": "${2}", + "error_message": "${3}", + "install_options": "${KICKSTART_OPTIONS}", + "netdata_release_channel": "${RELEASE_CHANNEL:-null}", + "netdata_install_type": "kickstart-build", + "host_os_name": "${HOST_NAME:-unknown}", + "host_os_id": "${HOST_ID:-unknown}", + "host_os_id_like": "${HOST_ID_LIKE:-unknown}", + "host_os_version": "${HOST_VERSION:-unknown}", + "host_os_version_id": "${HOST_VERSION_ID:-unknown}", + "system_kernel_name": "${KERNEL_NAME}", + "system_kernel_version": "$(uname -r)", + "system_architecture": "$(uname -m)", + "system_total_ram": "${TOTAL_RAM:-unknown}" + } +} +EOF +)" + +if [ -n "$(command -v curl 2> /dev/null)" ]; then + curl --silent -o /dev/null --write-out '%{http_code}' -X POST --max-time 2 --header "Content-Type: application/json" -d "${REQ_BODY}" "${TELEMETRY_URL}" +else + wget -q -O - --no-check-certificate \ + --server-response \ + --method POST \ + --timeout=1 \ + --header 'Content-Type: application/json' \ + --body-data "${REQ_BODY}" \ + "${TELEMETRY_URL}" 2>&1 | awk '/^ HTTP/{print $2}' +fi +} + fatal() { - printf >&2 "${TPUT_BGRED}${TPUT_WHITE}${TPUT_BOLD} ABORTED ${TPUT_RESET} ${*} \n\n" + printf >&2 "${TPUT_BGRED}${TPUT_WHITE}${TPUT_BOLD} ABORTED ${TPUT_RESET} ${1} \n\n" + telemetry_event "INSTALL_FAILED" "${1}" "${2}" exit 1 } @@ -142,15 +232,10 @@ run() { return ${ret} } -fatal() { - printf >&2 "${TPUT_BGRED}${TPUT_WHITE}${TPUT_BOLD} ABORTED ${TPUT_RESET} ${*} \n\n" - exit 1 -} - warning() { printf >&2 "${TPUT_BGRED}${TPUT_WHITE}${TPUT_BOLD} WARNING ${TPUT_RESET} ${*} \n\n" if [ "${INTERACTIVE}" = "0" ]; then - fatal "Stopping due to non-interactive mode. Fix the issue or retry installation in an interactive mode." + fatal "Stopping due to non-interactive mode. Fix the issue or retry installation in an interactive mode." F0001 else read -r -p "Press ENTER to attempt netdata installation > " progress "OK, let's give it a try..." @@ -200,11 +285,11 @@ download() { url="${1}" dest="${2}" if command -v curl > /dev/null 2>&1; then - run curl -q -sSL --connect-timeout 10 --retry 3 --output "${dest}" "${url}" + run curl -q -sSL --connect-timeout 10 --retry 3 --output "${dest}" "${url}" || fatal "Cannot download ${url}" F0002 elif command -v wget > /dev/null 2>&1; then - run wget -T 15 -O "${dest}" "${url}" || fatal "Cannot download ${url}" + run wget -T 15 -O "${dest}" "${url}" || fatal "Cannot download ${url}" F0002 else - fatal "I need curl or wget to proceed, but neither is available on this system." + fatal "I need curl or wget to proceed, but neither is available on this system." F0003 fi } @@ -243,7 +328,7 @@ detect_bash4() { echo >&2 "No BASH is available on this system" return 1 elif [ $((BASH_MAJOR_VERSION)) -lt 4 ]; then - echo >&2 "No BASH v4+ is available on this system (installed bash is v${BASH_MAJOR_VERSION}" + echo >&2 "No BASH v4+ is available on this system (installed bash is v${BASH_MAJOR_VERSION})" return 1 fi return 0 @@ -259,36 +344,32 @@ dependencies() { echo "Machine : ${MACHINE}" echo "BASH major version: ${BASH_MAJOR_VERSION}" - if [ "${OS}" != "GNU/Linux" ] && [ "${SYSTEM}" != "Linux" ]; then - warning "Cannot detect the packages to be installed on a ${SYSTEM} - ${OS} system." + bash="$(command -v bash 2> /dev/null)" + if ! detect_bash4 "${bash}"; then + warning "Cannot detect packages to be installed in this system, without BASH v4+." else - bash="$(command -v bash 2> /dev/null)" - if ! detect_bash4 "${bash}"; then - warning "Cannot detect packages to be installed in this system, without BASH v4+." - else - progress "Fetching script to detect required packages..." - if [ -n "${NETDATA_LOCAL_TARBALL_OVERRIDE_DEPS_SCRIPT}" ]; then - if [ -f "${NETDATA_LOCAL_TARBALL_OVERRIDE_DEPS_SCRIPT}" ]; then - run cp "${NETDATA_LOCAL_TARBALL_OVERRIDE_DEPS_SCRIPT}" "${ndtmpdir}/install-required-packages.sh" - else - fatal "Invalid given dependency file, please check your --local-files parameter options and try again" - fi + progress "Fetching script to detect required packages..." + if [ -n "${NETDATA_LOCAL_TARBALL_OVERRIDE_DEPS_SCRIPT}" ]; then + if [ -f "${NETDATA_LOCAL_TARBALL_OVERRIDE_DEPS_SCRIPT}" ]; then + run cp "${NETDATA_LOCAL_TARBALL_OVERRIDE_DEPS_SCRIPT}" "${ndtmpdir}/install-required-packages.sh" else - download "${PACKAGES_SCRIPT}" "${ndtmpdir}/install-required-packages.sh" + fatal "Invalid given dependency file, please check your --local-files parameter options and try again" F1001 fi + else + download "${PACKAGES_SCRIPT}" "${ndtmpdir}/install-required-packages.sh" + fi - if [ ! -s "${ndtmpdir}/install-required-packages.sh" ]; then - warning "Downloaded dependency installation script is empty." - else - progress "Running downloaded script to detect required packages..." - run ${sudo} "${bash}" "${ndtmpdir}/install-required-packages.sh" ${PACKAGES_INSTALLER_OPTIONS} - # shellcheck disable=SC2181 - if [ $? -ne 0 ]; then - warning "It failed to install all the required packages, but installation might still be possible." - fi + if [ ! -s "${ndtmpdir}/install-required-packages.sh" ]; then + warning "Downloaded dependency installation script is empty." + else + progress "Running downloaded script to detect required packages..." + run ${sudo} "${bash}" "${ndtmpdir}/install-required-packages.sh" ${PACKAGES_INSTALLER_OPTIONS} + # shellcheck disable=SC2181 + if [ $? -ne 0 ]; then + warning "It failed to install all the required packages, but installation might still be possible." fi - fi + fi } @@ -300,7 +381,24 @@ safe_sha256sum() { elif command -v shasum > /dev/null 2>&1; then shasum -a 256 "$@" else - fatal "I could not find a suitable checksum binary to use" + fatal "I could not find a suitable checksum binary to use" F0004 + fi +} + +claim() { + progress "Attempting to claim agent to ${NETDATA_CLAIM_URL}" + if [ -z "${NETDATA_PREFIX}" ] ; then + NETDATA_CLAIM_PATH=/usr/sbin/netdata-claim.sh + else + NETDATA_CLAIM_PATH="${NETDATA_PREFIX}/netdata/usr/sbin/netdata-claim.sh" + fi + + if ${sudo} "${NETDATA_CLAIM_PATH}" -token=${NETDATA_CLAIM_TOKEN} -rooms=${NETDATA_CLAIM_ROOMS} -url=${NETDATA_CLAIM_URL} ${NETDATA_CLAIM_EXTRA}; then + progress "Successfully claimed node" + return 0 + else + run_failed "Unable to claim node, you must do so manually." + return 1 fi } @@ -361,33 +459,37 @@ while [ -n "${1}" ]; do NETDATA_INSTALLER_OPTIONS="${NETDATA_INSTALLER_OPTIONS} --disable-cloud" NETDATA_DISABLE_CLOUD=1 ;; + "--disable-telemetry") + NETDATA_INSTALLER_OPTIONS="${NETDATA_INSTALLER_OPTIONS} --disable-telemetry" + NETDATA_DISABLE_TELEMETRY=1 + ;; "--local-files") if [ -z "${2}" ]; then - fatal "Missing netdata: Option --local-files requires extra information. The desired tarball for netdata, the checksum, the go.d plugin tarball , the go.d plugin config tarball and the dependency management script, in this particular order" + fatal "Missing netdata: Option --local-files requires extra information. The desired tarball for netdata, the checksum, the go.d plugin tarball , the go.d plugin config tarball and the dependency management script, in this particular order" F1002 fi export NETDATA_LOCAL_TARBALL_OVERRIDE="${2}" if [ -z "${3}" ]; then - fatal "Missing checksum file: Option --local-files requires extra information. The desired tarball for netdata, the checksum, the go.d plugin tarball , the go.d plugin config tarball and the dependency management script, in this particular order" + fatal "Missing checksum file: Option --local-files requires extra information. The desired tarball for netdata, the checksum, the go.d plugin tarball , the go.d plugin config tarball and the dependency management script, in this particular order" F1003 fi export NETDATA_LOCAL_TARBALL_OVERRIDE_CHECKSUM="${3}" if [ -z "${4}" ]; then - fatal "Missing go.d tarball: Option --local-files requires extra information. The desired tarball for netdata, the checksum, the go.d plugin tarball , the go.d plugin config tarball and the dependency management script, in this particular order" + fatal "Missing go.d tarball: Option --local-files requires extra information. The desired tarball for netdata, the checksum, the go.d plugin tarball , the go.d plugin config tarball and the dependency management script, in this particular order" F1004 fi export NETDATA_LOCAL_TARBALL_OVERRIDE_GO_PLUGIN="${4}" if [ -z "${5}" ]; then - fatal "Missing go.d config tarball: Option --local-files requires extra information. The desired tarball for netdata, the checksum, the go.d plugin tarball , the go.d plugin config tarball and the dependency management script, in this particular order" + fatal "Missing go.d config tarball: Option --local-files requires extra information. The desired tarball for netdata, the checksum, the go.d plugin tarball , the go.d plugin config tarball and the dependency management script, in this particular order" F1005 fi export NETDATA_LOCAL_TARBALL_OVERRIDE_GO_PLUGIN_CONFIG="${5}" if [ -z "${6}" ]; then - fatal "Missing dependencies management scriptlet: Option --local-files requires extra information. The desired tarball for netdata, the checksum, the go.d plugin tarball , the go.d plugin config tarball and the dependency management script, in this particular order" + fatal "Missing dependencies management scriptlet: Option --local-files requires extra information. The desired tarball for netdata, the checksum, the go.d plugin tarball , the go.d plugin config tarball and the dependency management script, in this particular order" F1006 fi export NETDATA_LOCAL_TARBALL_OVERRIDE_DEPS_SCRIPT="${6}" @@ -413,7 +515,7 @@ if [ -n "${NETDATA_DISABLE_CLOUD}" ]; then fi fi -# shellcheck disable=SC2235,SC2030 +# shellcheck disable=SC2235,SC2030,SC2031 if ( [ -z "${NETDATA_CLAIM_TOKEN}" ] && [ -n "${NETDATA_CLAIM_URL}" ] ) || ( [ -n "${NETDATA_CLAIM_TOKEN}" ] && [ -z "${NETDATA_CLAIM_URL}" ] ); then run_failed "Invalid claiming options, both a claiming token and URL must be specified." exit 1 @@ -442,19 +544,24 @@ if [ -n "$ndpath" ] ; then if [ -r "${ndprefix}/etc/netdata/.environment" ] ; then ndstatic="$(grep IS_NETDATA_STATIC_BINARY "${ndprefix}/etc/netdata/.environment" | cut -d "=" -f 2 | tr -d \")" if [ -z "${NETDATA_REINSTALL}" ] && [ -z "${NETDATA_LOCAL_TARBALL_OVERRIDE}" ] ; then - if [ -x "${ndprefix}/usr/libexec/netdata/netdata-updater.sh" ] ; then + if [ -n "${NETDATA_CLAIM_TOKEN}" ] ; then + if [ "${ndprefix}" != '/' ] ; then + NETDATA_PREFIX="${ndprefix}" + fi + + claim + exit $? + elif [ -x "${ndprefix}/usr/libexec/netdata/netdata-updater.sh" ] ; then progress "Attempting to update existing install instead of creating a new one" if run ${sudo} "${ndprefix}/usr/libexec/netdata/netdata-updater.sh" --not-running-from-cron ; then progress "Updated existing install at ${ndpath}" exit 0 else - fatal "Failed to update existing Netdata install" - exit 1 + fatal "Failed to update existing Netdata install" F0100 fi else if [ -z "${NETDATA_ALLOW_DUPLICATE_INSTALL}" ] || [ "${ndstatic}" = "yes" ] ; then - fatal "Existing installation detected which cannot be safely updated by this script, refusing to continue." - exit 1 + fatal "Existing installation detected which cannot be safely updated by this script. Refusing to continue." F0101 else progress "User explicitly requested duplicate install, proceeding." fi @@ -463,15 +570,20 @@ if [ -n "$ndpath" ] ; then if [ "${ndstatic}" = "no" ] ; then progress "User requested reinstall instead of update, proceeding." else - fatal "Existing install is a static install, please use kickstart-static64.sh instead." - exit 1 + fatal "Existing install is a static install. Please use kickstart-static64.sh instead." F0102 fi fi else progress "Existing install appears to be handled manually or through the system package manager." - if [ -z "${NETDATA_ALLOW_DUPLICATE_INSTALL}" ] ; then - fatal "Existing installation detected which cannot be safely updated by this script, refusing to continue." - exit 1 + if [ -n "${NETDATA_CLAIM_TOKEN}" ] ; then + if [ "${ndprefix}" != '/' ] ; then + NETDATA_PREFIX="${ndprefix}" + fi + + claim + exit $? + elif [ -z "${NETDATA_ALLOW_DUPLICATE_INSTALL}" ] ; then + fatal "Existing installation detected which cannot be safely updated by this script. Refusing to continue." F0103 else progress "User explicitly requested duplicate install, proceeding." fi @@ -501,30 +613,39 @@ else fi if ! grep netdata-latest.tar.gz "${ndtmpdir}/sha256sum.txt" | safe_sha256sum -c - > /dev/null 2>&1; then - fatal "Tarball checksum validation failed. Stopping Netdata Agent installation and leaving tarball in ${ndtmpdir}.\nUsually this is a result of an older copy of the file being cached somewhere upstream and can be resolved by retrying in an hour." + fatal "Tarball checksum validation failed. Stopping Netdata Agent installation and leaving tarball in ${ndtmpdir}.\nUsually this is a result of an older copy of the file being cached somewhere upstream and can be resolved by retrying in an hour." F0005 fi run tar -xf netdata-latest.tar.gz rm -rf netdata-latest.tar.gz > /dev/null 2>&1 -cd netdata-* || fatal "Cannot cd to netdata source tree" +cd netdata-* || fatal "Cannot cd to netdata source tree" F0006 # --------------------------------------------------------------------------------------------------------------------- # install netdata from source install() { progress "Installing netdata..." - run ${sudo} ./netdata-installer.sh ${NETDATA_UPDATES} ${NETDATA_INSTALLER_OPTIONS} || fatal "netdata-installer.sh exited with error" + run ${sudo} ./netdata-installer.sh ${NETDATA_UPDATES} ${NETDATA_INSTALLER_OPTIONS} + case $? in + 1) + fatal "netdata-installer.sh exited with error" F0007 + ;; + 2) + fatal "Insufficient RAM to install netdata" F0008 + ;; + esac if [ -d "${ndtmpdir}" ] && [ ! "${ndtmpdir}" = "/" ]; then run ${sudo} rm -rf "${ndtmpdir}" > /dev/null 2>&1 fi } if [ -x netdata-installer.sh ]; then + echo "INSTALL_TYPE='kickstart-build'" > system/.install-type install "$@" else if [ "$(find . -mindepth 1 -maxdepth 1 -type d | wc -l)" -eq 1 ] && [ -x "$(find . -mindepth 1 -maxdepth 1 -type d)/netdata-installer.sh" ]; then cd "$(find . -mindepth 1 -maxdepth 1 -type d)" && install "$@" else - fatal "Cannot install netdata from source (the source directory does not include netdata-installer.sh). Leaving all files in ${ndtmpdir}" + fatal "Cannot install netdata from source (the source directory does not include netdata-installer.sh). Leaving all files in ${ndtmpdir}" F0009 exit 1 fi fi @@ -532,16 +653,5 @@ fi # -------------------------------------------------------------------------------------------------------------------- if [ -n "${NETDATA_CLAIM_TOKEN}" ]; then - progress "Attempting to claim agent to ${NETDATA_CLAIM_URL}" - if [ -z "${NETDATA_PREFIX}" ] ; then - NETDATA_CLAIM_PATH=/usr/sbin/netdata-claim.sh - else - NETDATA_CLAIM_PATH="${NETDATA_PREFIX}/bin/netdata-claim.sh" - fi - - if "${NETDATA_CLAIM_PATH}" -token=${NETDATA_CLAIM_TOKEN} -rooms=${NETDATA_CLAIM_ROOMS} -url=${NETDATA_CLAIM_URL} ${NETDATA_CLAIM_EXTRA}; then - progress "Successfully claimed node" - else - run_failed "Unable to claim node, you must do so manually." - fi + claim fi diff --git a/packaging/installer/methods/kickstart-64.md b/packaging/installer/methods/kickstart-64.md index c390b873a..176d63ad4 100644 --- a/packaging/installer/methods/kickstart-64.md +++ b/packaging/installer/methods/kickstart-64.md @@ -71,18 +71,18 @@ your installation. Here are a few important parameters: kickstart run the process using those files. This option conflicts with the `--stable-channel` option. If you set this _and_ `--stable-channel`, Netdata will use the local files. -### Claim node to Netdata Cloud during installation +### Connect node to Netdata Cloud during installation -The `kickstart.sh` script accepts additional parameters to automatically [claim](/claim/README.md) your node to Netdata +The `kickstart.sh` script accepts additional parameters to automatically [connect](/claim/README.md) your node to Netdata Cloud immediately after installation. Find the `token` and `rooms` strings by [signing in to Netdata -Cloud](https://app.netdata.cloud/sign-in?cloudRoute=/spaces), then clicking on **Claim Nodes** in the [Spaces management +Cloud](https://app.netdata.cloud/sign-in?cloudRoute=/spaces), then clicking on **Connect Nodes** in the [Spaces management area](https://learn.netdata.cloud/docs/cloud/spaces#manage-spaces). - `--claim-token`: The unique token associated with your Space in Netdata Cloud. - `--claim-rooms`: A comma-separated list of tokens for each War Room this node should appear in. - `--claim-proxy`: Should take the form of `socks5[h]://[user:pass@]host:ip` for a SOCKS5 proxy, or - `http://[user:pass@]host:ip` for an HTTP(S) proxy.See [claiming through a - proxy](/claim/README.md#claim-through-a-proxy) for details. + `http://[user:pass@]host:ip` for an HTTP(S) proxy.See [connecting through a + proxy](/claim/README.md#connect-through-a-proxy) for details. - `--claim-url`: Defaults to `https://app.netdata.cloud`. For example: @@ -97,7 +97,7 @@ To use `md5sum` to verify the integrity of the `kickstart-static64.sh` script yo command above, run the following: ```bash -[ "c735fd724be5726c8a1850deed2793b8" = "$(curl -Ss https://my-netdata.io/kickstart-static64.sh | md5sum | cut -d ' ' -f 1)" ] && echo "OK, VALID" || echo "FAILED, INVALID" +[ "78461a3055aee03011a577a716e2dee5" = "$(curl -Ss https://my-netdata.io/kickstart-static64.sh | md5sum | cut -d ' ' -f 1)" ] && echo "OK, VALID" || echo "FAILED, INVALID" ``` If the script is valid, this command will return `OK, VALID`. diff --git a/packaging/installer/methods/kickstart.md b/packaging/installer/methods/kickstart.md index e874d637d..7792989ee 100644 --- a/packaging/installer/methods/kickstart.md +++ b/packaging/installer/methods/kickstart.md @@ -11,15 +11,23 @@ custom_edit_url: https://github.com/netdata/netdata/edit/master/packaging/instal This page covers detailed instructions on using and configuring the automatic one-line installation script named `kickstart.sh`. -This method is fully automatic on all Linux distributions. To install Netdata from source, including all dependencies +This method is fully automatic on all Linux distributions and macOS environments. To install Netdata from source, including all dependencies required to connect to Netdata Cloud, and get _automatic nightly updates_, run the following as your normal user: +**Linux** + ```bash bash <(curl -Ss https://my-netdata.io/kickstart.sh) ``` -> See our [installation guide](../README.md) for details about [automatic updates](../README.md#automatic-updates) or -> [nightly vs. stable releases](../README.md#nightly-vs-stable-releases). +**macOS** + +```bash +bash <(curl -Ss https://my-netdata.io/kickstart.sh) --install /usr/local/ +``` + +> See our [installation guide](/packaging/installer/README.md) for details about [automatic updates](/packaging/installer/README.md#automatic-updates) or +> [nightly vs. stable releases](/packaging/installer/README.md#nightly-vs-stable-releases). ## What does `kickstart.sh` do? @@ -40,33 +48,39 @@ The `kickstart.sh` script does the following after being downloaded and run usin The `kickstart.sh` script passes all its parameters to `netdata-installer.sh`, which you can use to customize your installation. Here are a few important parameters: -- `--dont-wait`: Enable automated installs by not prompting for permission to install any required packages. -- `--dont-start-it`: Prevent the installer from starting Netdata automatically. -- `--stable-channel`: Automatically update only on the release of new major versions. -- `--nightly-channel`: Automatically update on every new nightly build. -- `--disable-telemetry`: Opt-out of [anonymous statistics](/docs/anonymous-statistics.md) we use to make - Netdata better. -- `--no-updates`: Prevent automatic updates of any kind. -- `--reinstall`: If an existing install is detected, reinstall instead of trying to update it. Note that this - cannot be used to change installation types. -- `--local-files`: Used for [offline installations](offline.md). Pass four file paths: the Netdata +- `--dont-wait`: Synonym for `--non-interactive` +- `--non-interactive`: Don’t prompt for anything and assume yes whenever possible. +- `--no-updates`: Disable automatic updates. +- `--stable-channel`: Use a stable build instead of a nightly build. +- `--reinstall`: If an existing install is found, reinstall instead of trying to update it in place. +- `--dont-start-it`: Don’t auto-start the daemon after installing. This parameter is not guaranteed to work. +- `--install`: Specify an alternative install prefix. +- `--disable-cloud`: For local builds, don’t build any of the cloud code at all. For native packages and static builds, + use runtime configuration to disable cloud support. +- `--auto-update-type`: Specify how auto-updates are to be scheduled, overriding auto-detection. +- `--disable-telemetry`: Disable anonymous statistics. +- `--local-files`: Used for [offline installations](offline.md). Pass four file paths: the Netdata tarball, the checksum file, the go.d plugin tarball, and the go.d plugin config tarball, to force kickstart run the process using those files. This option conflicts with the `--stable-channel` option. If you set this _and_ `--stable-channel`, Netdata will use the local files. +- `all`: Ask for all dependencies in the dependency handling script. + + > Note: The `all` and `--local-files` parameters are scheduled to be removed in a forthcoming update. + +### Connect node to Netdata Cloud during installation -### Claim node to Netdata Cloud during installation - -The `kickstart.sh` script accepts additional parameters to automatically [claim](/claim/README.md) your node to Netdata +The `kickstart.sh` script accepts additional parameters to automatically [connect](/claim/README.md) your node to Netdata Cloud immediately after installation. Find the `token` and `rooms` strings by [signing in to Netdata -Cloud](https://app.netdata.cloud/sign-in?cloudRoute=/spaces), then clicking on **Claim Nodes** in the [Spaces management +Cloud](https://app.netdata.cloud/sign-in?cloudRoute=/spaces), then clicking on **Connect Nodes** in the [Spaces management area](https://learn.netdata.cloud/docs/cloud/spaces#manage-spaces). -- `--claim-token`: The unique token associated with your Space in Netdata Cloud. -- `--claim-rooms`: A comma-separated list of tokens for each War Room this node should appear in. -- `--claim-proxy`: Should take the form of `socks5[h]://[user:pass@]host:ip` for a SOCKS5 proxy, or - `http://[user:pass@]host:ip` for an HTTP(S) proxy.See [claiming through a - proxy](/claim/README.md#claim-through-a-proxy) for details. -- `--claim-url`: Defaults to `https://app.netdata.cloud`. +- `--claim-token`: Specify a unique claiming token associated with your Space in Netdata Cloud to be used to connect to the node + after the install. +- `--claim-rooms`: Specify a comma-separated list of tokens for each War Room this node should appear in. +- `--claim-proxy`: Specify a proxy to use when connecting to the cloud in the form of + `socks5[h]://[user:pass@]host:ip` for a SOCKS5 proxy, or `http://[user:pass@]host:ip` for an HTTP(S) proxy. + See [connecting through a proxy](/claim/README.md#connect-through-a-proxy) for details. +- `--claim-url`: Specify a URL to use when connecting to the cloud. Defaults to `https://app.netdata.cloud`. For example: @@ -74,13 +88,15 @@ For example: bash <(curl -Ss https://my-netdata.io/kickstart.sh) --claim-token=TOKEN --claim-rooms=ROOM1,ROOM2 ``` +Please note that to run it you will either need to have root privileges or run it with the user that is running the agent, more details on the [Connect an agent without root privileges](#connect-an-agent-without-root-privileges) section. + ## Verify script integrity To use `md5sum` to verify the integrity of the `kickstart.sh` script you will download using the one-line command above, run the following: ```bash -[ "a708de3790fa39188babe71eb1639c66" = "$(curl -Ss https://my-netdata.io/kickstart.sh | md5sum | cut -d ' ' -f 1)" ] && echo "OK, VALID" || echo "FAILED, INVALID" +[ "7e8b449ca44e49b7074b5b9d6022cbcc" = "$(curl -Ss https://my-netdata.io/kickstart.sh | md5sum | cut -d ' ' -f 1)" ] && echo "OK, VALID" || echo "FAILED, INVALID" ``` If the script is valid, this command will return `OK, VALID`. diff --git a/packaging/installer/methods/kubernetes.md b/packaging/installer/methods/kubernetes.md index f593765fc..f12850665 100644 --- a/packaging/installer/methods/kubernetes.md +++ b/packaging/installer/methods/kubernetes.md @@ -39,25 +39,34 @@ parent pod, and multiple child pods. You've now installed Netdata on your Kubernetes cluster. Next, it's time to opt-in and enable the powerful Kubernetes dashboards available in Netdata Cloud. -## Claim your Kubernetes cluster to Netdata Cloud +## Connect your Kubernetes cluster to Netdata Cloud To start [Kubernetes monitoring](https://learn.netdata.cloud/docs/cloud/visualize/kubernetes/), you must first -[claim](/claim/README.md) your Kubernetes cluster to [Netdata Cloud](https://app.netdata.cloud). Claiming securely +[connect](/claim/README.md) your Kubernetes cluster to [Netdata Cloud](https://app.netdata.cloud). The connection process securely connects your Kubernetes cluster to stream metrics data to Netdata Cloud, enabling Kubernetes-specific visualizations like the health map and time-series composite charts. -First, find your claiming script in Netdata Cloud by clicking on your Space's dropdown, then **Manage your Space**. -Click the **Nodes** tab to reveal the `netdata-claim.sh` script for your Space in Netdata Cloud. You need the `TOKEN` +### New installations + +First, find the script to run an `helm install` command. You can get it by clicking on your Space's dropdown, then **Manage your Space**. +Click the **Nodes** tab and select the environment your node is running, in this case **kubernetes**, to reveal the script for your Space in Netdata Cloud. You need the `TOKEN` and `ROOM` values. -Next, create a file called `override.yml`. +The script should be similar to: + +```bash +helm install netdata netdata/netdata --set parent.claiming.enabled="true" --set parent.claiming.token="TOKEN" --set parent.claiming.rooms="ROOM" --set child.claiming.enabled=true --set child.claiming.token="TOKEN" --set child.claiming.rooms="ROOM" +``` + +### Existing installations + +On an existing installation, you will need to override the configuration values by running the `helm upgrade` command and provide a file with the values to override. You can start with creating a file called `override.yml`. ```bash touch override.yml ``` -Paste the following into your `override.yml` file, replacing instances of `ROOM` and `TOKEN` with those from the -claiming script from Netdata Cloud. These settings claim your `parent`/`child` nodes to Netdata Cloud and store more +Paste the following into your `override.yml` file, replacing instances of `ROOM` and `TOKEN` with those from the script from Netdata Cloud. These settings connect your `parent`/`child` nodes to Netdata Cloud and store more metrics in the nodes' time-series databases. ```yaml @@ -92,7 +101,7 @@ Apply these new settings: helm upgrade -f override.yml netdata netdata/netdata ``` -The cluster terminates the old pods and creates new ones with the proper persistence and claiming configuration. You'll +The cluster terminates the old pods and creates new ones with the proper persistence and connection configuration. You'll see your nodes, containers, and pods appear in Netdata Cloud in a few seconds. ![Netdata's Kubernetes monitoring @@ -107,7 +116,7 @@ Read up on the various configuration options in the [Helm chart documentation](https://github.com/netdata/helmchart#configuration) if you need to tweak your Kubernetes monitoring. Your first option is to create an `override.yml` file, if you haven't created one already for -[claiming](#claim-your-kubernetes-cluster-to-netdata-cloud), then apply the new configuration to your cluster with `helm +[connect](#connect-your-kubernetes-cluster-to-netdata-cloud), then apply the new configuration to your cluster with `helm upgrade`. ```bash diff --git a/packaging/installer/methods/macos.md b/packaging/installer/methods/macos.md index 3fa21d0cd..48d53fd08 100644 --- a/packaging/installer/methods/macos.md +++ b/packaging/installer/methods/macos.md @@ -10,12 +10,27 @@ can use any of Netdata's [external plugins](../../../collectors/plugins.d/README have installed on your macOS system. You could also use a macOS system as the parent node in a [streaming configuration](/streaming/README.md). -We recommend installing Netdata with the community-created and -maintained [**Homebrew +We recommend you to **[install Netdata with the our automatic one-line installation script](#install-netdata-with-the-automatic-one-line-installation-script)**, + + +As an alternative you also have community-created and -maintained [**Homebrew package**](#install-netdata-with-the-homebrew-package). - [Install Netdata via the Homebrew package](#install-netdata-with-the-homebrew-package) - [Install Netdata from source](#install-netdata-from-source) +Being community-created and -maintained we don't guarantee that the features made available on our installation script will also be available or give support to it. + +## Install Netdata with our automatic one-line installation script + +To install Netdata using our automatic [kickstart](/packaging/installer/#automatic-one-line-installation-script) script you will just need to run: + +```bash +bash <(curl -Ss https://my-netdata.io/kickstart.sh) --install /usr/local/ +``` + +With this script, you are also able to connect your nodes directly to Netdata Cloud if you wish, see more details on [Connect an agent running in macOS](/claim/README.md#connect-an-agent-running-in-macos) + ## Install Netdata with the Homebrew package If you don't have [Homebrew](https://brew.sh/) installed already, begin with their installation script: diff --git a/packaging/installer/methods/manual.md b/packaging/installer/methods/manual.md index aa49c81ac..ae96dd954 100644 --- a/packaging/installer/methods/manual.md +++ b/packaging/installer/methods/manual.md @@ -217,13 +217,13 @@ cd netdata process using those files. This option conflicts with the `--stable-channel` option. If you set this _and_ `--stable-channel`, Netdata will use the local files. -### Claim node to Netdata Cloud during installation +### Connect node to Netdata Cloud during installation Unlike the [`kickstart.sh`](/packaging/installer/methods/kickstart.md) or [`kickstart-static64.sh`](/packaging/installer/methods/kickstart-64.md) methods, the `netdata-installer.sh` script does -not allow you to automatically [claim](/claim/README.md) your node to Netdata Cloud immediately after installation. +not allow you to automatically [connect](/claim/README.md) your node to Netdata Cloud immediately after installation. -See the [claiming](/claim/README.md) doc for details on claiming a node with a manual installation of Netdata. +See the [connect to cloud](/claim/README.md) doc for details on connecting a node with a manual installation of Netdata. ### 'nonrepresentable section on output' errors diff --git a/packaging/installer/methods/packages.md b/packaging/installer/methods/packages.md index cf1e33591..20dbaf060 100644 --- a/packaging/installer/methods/packages.md +++ b/packaging/installer/methods/packages.md @@ -6,8 +6,6 @@ custom_edit_url: https://github.com/netdata/netdata/edit/master/packaging/instal # Install Netdata with .deb/.rpm packages -![](https://raw.githubusercontent.com/netdata/netdata/master/web/gui/images/packaging-beta-tag.svg?sanitize=true) - Netdata provides our own flavour of binary packages for the most common operating systems that use with `.deb` and `.rpm` packaging formats. diff --git a/packaging/installer/methods/pfsense.md b/packaging/installer/methods/pfsense.md index ee1a453db..6d9de7606 100644 --- a/packaging/installer/methods/pfsense.md +++ b/packaging/installer/methods/pfsense.md @@ -25,19 +25,19 @@ pkg install -y pkgconf bash e2fsprogs-libuuid libuv nano Then run the following commands to download various dependencies from the FreeBSD repository. ```sh -pkg add http://pkg.freebsd.org/FreeBSD:11:amd64/latest/All/Judy-1.0.5_2.txz -pkg add http://pkg.freebsd.org/FreeBSD:11:amd64/latest/All/json-c-0.15_1.txz -pkg add http://pkg.freebsd.org/FreeBSD:11:amd64/latest/All/py37-certifi-2020.6.20.txz -pkg add http://pkg.freebsd.org/FreeBSD:11:amd64/latest/All/py37-asn1crypto-1.3.0.txz -pkg add http://pkg.freebsd.org/FreeBSD:11:amd64/latest/All/py37-pycparser-2.20.txz -pkg add http://pkg.freebsd.org/FreeBSD:11:amd64/latest/All/py37-cffi-1.14.3.txz -pkg add http://pkg.freebsd.org/FreeBSD:11:amd64/latest/All/py37-six-1.15.0.txz -pkg add http://pkg.freebsd.org/FreeBSD:11:amd64/latest/All/py37-cryptography-2.6.1.txz -pkg add http://pkg.freebsd.org/FreeBSD:11:amd64/latest/All/py37-idna-2.10.txz -pkg add http://pkg.freebsd.org/FreeBSD:11:amd64/latest/All/py37-openssl-19.0.0.txz -pkg add http://pkg.freebsd.org/FreeBSD:11:amd64/latest/All/py37-pysocks-1.7.1.txz -pkg add http://pkg.freebsd.org/FreeBSD:11:amd64/latest/All/py37-urllib3-1.25.11,1.txz -pkg add http://pkg.freebsd.org/FreeBSD:11:amd64/latest/All/py37-yaml-5.3.1.txz +pkg add http://pkg.freebsd.org/FreeBSD:12:amd64/latest/All/Judy-1.0.5_3.txz +pkg add http://pkg.freebsd.org/FreeBSD:12:amd64/latest/All/json-c-0.15_1.txz +pkg add http://pkg.freebsd.org/FreeBSD:12:amd64/latest/All/py38-certifi-2021.10.8.txz +pkg add http://pkg.freebsd.org/FreeBSD:12:amd64/latest/All/py38-asn1crypto-1.4.0.txz +pkg add http://pkg.freebsd.org/FreeBSD:12:amd64/latest/All/py38-pycparser-2.20.txz +pkg add http://pkg.freebsd.org/FreeBSD:12:amd64/latest/All/py38-cffi-1.14.6.txz +pkg add http://pkg.freebsd.org/FreeBSD:12:amd64/latest/All/py38-six-1.16.0.txz +pkg add http://pkg.freebsd.org/FreeBSD:12:amd64/latest/All/py38-cryptography-3.3.2.txz +pkg add http://pkg.freebsd.org/FreeBSD:12:amd64/latest/All/py38-idna-2.10.txz +pkg add http://pkg.freebsd.org/FreeBSD:12:amd64/latest/All/py38-openssl-20.0.1.txz +pkg add http://pkg.freebsd.org/FreeBSD:12:amd64/latest/All/py38-pysocks-1.7.1.txz +pkg add http://pkg.freebsd.org/FreeBSD:12:amd64/latest/All/py38-urllib3-1.26.7,1.txz +pkg add http://pkg.freebsd.org/FreeBSD:12:amd64/latest/All/py38-yaml-5.4.1.txz ``` > ⚠️ If any of the above commands return a `Not Found` error, you need to manually search for the latest package in the @@ -56,7 +56,7 @@ pkg add http://pkg.freebsd.org/FreeBSD:11:amd64/latest/All/py37-yaml-5.3.1.txz You can now install Netdata from the FreeBSD repository. ```bash -pkg add http://pkg.freebsd.org/FreeBSD:11:amd64/latest/All/netdata-1.28.0.txz +pkg add http://pkg.freebsd.org/FreeBSD:12:amd64/latest/All/netdata-1.31.0_1.txz ``` > ⚠️ If the above command returns a `Not Found` error, you need to manually search for the latest version of Netdata in diff --git a/packaging/installer/methods/source.md b/packaging/installer/methods/source.md index e0827fc17..ba29a188d 100644 --- a/packaging/installer/methods/source.md +++ b/packaging/installer/methods/source.md @@ -99,7 +99,7 @@ library for the build system: `externaldeps/libwebsockets`. 6. Copy `lib/libwebsockets.a` from the libwebsockets source directory to `externaldeps/libwebsockets/libwebsockets.a` in the Netdata source tree. -7. Copy the entire contents of `lib/include` from the libwebsockets source +7. Copy the entire contents of `include/` from the libwebsockets source directory to `externaldeps/libwebsockets/include` in the Netdata source tree. #### JSON-C @@ -120,7 +120,7 @@ you can do the following to prepare a copy for the build system: 4. Build JSON-C by running `make` in the JSON-C source directory. 5. In the Netdata source directory, create a directory called `externaldeps/jsonc`. -6. Copy `libjson-c.a` fro the JSON-C source directory to +6. Copy `libjson-c.a` from the JSON-C source directory to `externaldeps/jsonc/libjson-c.a` in the Netdata source tree. 7. Copy all of the header files (`*.h`) from the JSON-C source directory to `externaldeps/jsonc/json-c` in the Netdata source tree. diff --git a/packaging/installer/methods/synology.md b/packaging/installer/methods/synology.md index 4a0ae3551..07b1596fd 100644 --- a/packaging/installer/methods/synology.md +++ b/packaging/installer/methods/synology.md @@ -21,7 +21,7 @@ will install the content into `/opt/netdata`, making future removal safe and sim When Netdata is first installed, it will run as _root_. This may or may not be acceptable for you, and since other installations run it as the `netdata` user, you might wish to do the same. This requires some extra work: -1. Creat a group `netdata` via the Synology group interface. Give it no access to anything. +1. Create a group `netdata` via the Synology group interface. Give it no access to anything. 2. Create a user `netdata` via the Synology user interface. Give it no access to anything and a random password. Assign the user to the `netdata` group. Netdata will chuid to this user when running. 3. Change ownership of the following directories, as defined in [Netdata @@ -33,6 +33,14 @@ chown -R netdata:netdata /opt/netdata/var/lib/netdata /opt/netdata/var/cache/net chown -R netdata:root /opt/netdata/var/log/netdata ``` +4. Uncomment and set `web files owner` to `root`, and `web files group` to `netdata` in + the `/opt/netdata/etc/netdata/netdata.conf`. +5. Restart Netdata + +```sh +/etc/rc.netdata restart +``` + ## Create startup script Additionally, as of 2018/06/24, the Netdata installer doesn't recognize DSM as an operating system, so no init script is diff --git a/packaging/installer/netdata-updater.sh b/packaging/installer/netdata-updater.sh index ad7412255..59c5fab11 100755 --- a/packaging/installer/netdata-updater.sh +++ b/packaging/installer/netdata-updater.sh @@ -6,13 +6,15 @@ # - PATH # - CFLAGS # - LDFLAGS +# - MAKEOPTS # - IS_NETDATA_STATIC_BINARY # - NETDATA_CONFIGURE_OPTIONS # - REINSTALL_OPTIONS # - NETDATA_TARBALL_URL # - NETDATA_TARBALL_CHECKSUM_URL # - NETDATA_TARBALL_CHECKSUM -# - NETDATA_PREFIX / NETDATA_LIB_DIR (After 1.16.1 we will only depend on lib dir) +# - NETDATA_PREFIX +# - NETDATA_LIB_DIR # # Optional environment options: # @@ -170,6 +172,26 @@ download() { fi } +get_netdata_latest_tag() { + local dest="${1}" + local url="https://github.com/netdata/netdata/releases/latest" + local tag + + if command -v curl >/dev/null 2>&1; then + tag=$(curl "${url}" -s -L -I -o /dev/null -w '%{url_effective}' | grep -m 1 -o '[^/]*$') + elif command -v wget >/dev/null 2>&1; then + tag=$(wget --max-redirect=0 "${url}" 2>&1 | grep Location | cut -d ' ' -f2 | grep -m 1 -o '[^/]*$') + else + fatal "I need curl or wget to proceed, but neither of them are available on this system." + fi + + if [[ ! $tag =~ ^v[0-9]+\..+ ]]; then + fatal "Cannot download latest stable tag from ${url}" + fi + + echo "${tag}" >"${dest}" +} + newer_commit_date() { echo >&3 "Checking if a newer version of the updater script is available." @@ -235,7 +257,7 @@ parse_version() { get_latest_version() { if [ "${RELEASE_CHANNEL}" == "stable" ]; then - download "https://api.github.com/repos/netdata/netdata/releases/latest" /dev/stdout | grep tag_name | cut -d'"' -f4 + get_netdata_latest_tag /dev/stdout else download "$NETDATA_NIGHTLIES_BASEURL/latest-version.txt" /dev/stdout fi @@ -250,8 +272,7 @@ set_tarball_urls() { if [ "$1" = "stable" ]; then local latest - # Simple version - latest="$(download "https://api.github.com/repos/netdata/netdata/releases/latest" /dev/stdout | grep tag_name | cut -d'"' -f4)" + latest="$(get_netdata_latest_tag /dev/stdout)" export NETDATA_TARBALL_URL="https://github.com/netdata/netdata/releases/download/$latest/netdata-$latest.${extension}" export NETDATA_TARBALL_CHECKSUM_URL="https://github.com/netdata/netdata/releases/download/$latest/sha256sums.txt" else @@ -326,6 +347,12 @@ update() { fi fi + if [ -e "${NETDATA_PREFIX}/etc/netdata/.install-type" ] ; then + install_type="$(cat "${NETDATA_PREFIX}"/etc/netdata/.install-type)" + else + install_type="INSTALL_TYPE='legacy-build'" + fi + info "Re-installing netdata..." eval "${env} ./netdata-installer.sh ${REINSTALL_OPTIONS} --dont-wait ${do_not_start}" >&3 2>&3 || fatal "FAILED TO COMPILE/INSTALL NETDATA" @@ -334,6 +361,8 @@ update() { info "Updating tarball checksum info" echo "${NEW_CHECKSUM}" > "${NETDATA_LIB_DIR}/netdata.tarball.checksum" + + echo "${install_type}" > "${NETDATA_PREFIX}/etc/netdata/.install-type" fi rm -rf "${ndtmpdir}" >&3 2>&3 @@ -393,7 +422,7 @@ if [ -t 2 ]; then else # we are headless # create a temporary file for the log - logfile="$(mktemp "${logfile}"/netdata-updater.log.XXXXXX)" + logfile="$(mktemp -t netdata-updater.log.XXXXXX)" # open fd 3 and send it to logfile exec 3> "${logfile}" fi @@ -415,6 +444,12 @@ if [ "${IS_NETDATA_STATIC_BINARY}" == "yes" ]; then fatal "Static binary checksum validation failed. Stopping netdata installation and leaving binary in ${ndtmpdir}\nUsually this is a result of an older copy of the file being cached somewhere and can be resolved by simply retrying in an hour." fi + if [ -e /opt/netdata/etc/netdata/.install-type ] ; then + install_type="$(cat /opt/netdata/etc/netdata/.install-type)" + else + install_type="INSTALL_TYPE='legacy-static'" + fi + # Do not pass any options other than the accept, for now # shellcheck disable=SC2086 if sh "${ndtmpdir}/netdata-latest.gz.run" --accept -- ${REINSTALL_OPTIONS}; then @@ -422,8 +457,11 @@ if [ "${IS_NETDATA_STATIC_BINARY}" == "yes" ]; then else echo >&2 "NOTE: did not remove: ${ndtmpdir}" fi + + echo "${install_type}" > /opt/netdata/etc/netdata/.install-type + echo >&2 "Switching back to ${PREVDIR}" - cd "${PREVDIR}" || exit 1 + cd "${PREVDIR}" else # the installer updates this script - so we run and exit in a single line update && exit 0 diff --git a/packaging/libbpf.checksums b/packaging/libbpf.checksums index d4ff87a12..eccbfa9f3 100644 --- a/packaging/libbpf.checksums +++ b/packaging/libbpf.checksums @@ -1 +1 @@ -fc33402ba33c8f8c5aa18afbb86a9932965886f2906c50e8f2110a1a2126e3ee v0.0.9_netdata-1.tar.gz +47acbdf7836048fad3a585c6ab43cc08d1b70c27ce0a816e9ca92b927555530f v0.5.1_netdata.tar.gz diff --git a/packaging/libbpf.version b/packaging/libbpf.version index d2362909d..74ca5f708 100644 --- a/packaging/libbpf.version +++ b/packaging/libbpf.version @@ -1 +1 @@ -0.0.9_netdata-1 +0.5.1_netdata diff --git a/packaging/libbpf_0_0_9.checksums b/packaging/libbpf_0_0_9.checksums new file mode 100644 index 000000000..d4ff87a12 --- /dev/null +++ b/packaging/libbpf_0_0_9.checksums @@ -0,0 +1 @@ +fc33402ba33c8f8c5aa18afbb86a9932965886f2906c50e8f2110a1a2126e3ee v0.0.9_netdata-1.tar.gz diff --git a/packaging/libbpf_0_0_9.version b/packaging/libbpf_0_0_9.version new file mode 100644 index 000000000..d2362909d --- /dev/null +++ b/packaging/libbpf_0_0_9.version @@ -0,0 +1 @@ +0.0.9_netdata-1 diff --git a/packaging/makeself/build-static.sh b/packaging/makeself/build-static.sh new file mode 100755 index 000000000..e9cf620b4 --- /dev/null +++ b/packaging/makeself/build-static.sh @@ -0,0 +1,69 @@ +#!/usr/bin/env bash + +# SPDX-License-Identifier: GPL-3.0-or-later + +# shellcheck source=./packaging/installer/functions.sh +. "$(dirname "$0")"/../installer/functions.sh || exit 1 + +BUILDARCH="${1}" + +set -e + +case ${BUILDARCH} in + x86_64) platform=linux/amd64 ;; + armv7l) platform=linux/arm/v7 ;; + aarch64) platform=linux/arm64/v8 ;; + ppc64le) platform=linux/ppc64le ;; + *) + echo "Unknown target architecture '${BUILDARCH}'." + exit 1 + ;; +esac + +DOCKER_CONTAINER_NAME="netdata-package-${BUILDARCH}-static-alpine314" + +if [ "${BUILDARCH}" != "$(uname -m)" ] && [ "$(uname -m)" = 'x86_64' ] && [ -z "${SKIP_EMULATION}" ]; then + docker run --rm --privileged multiarch/qemu-user-static --reset -p yes || exit 1 +fi + +if ! docker inspect "${DOCKER_CONTAINER_NAME}" > /dev/null 2>&1; then + # To run interactively: + # docker run -it netdata-package-x86_64-static /bin/sh + # (add -v host-dir:guest-dir:rw arguments to mount volumes) + # + # To remove images in order to re-create: + # docker rm -v $(sudo docker ps -a -q -f status=exited) + # docker rmi netdata-package-x86_64-static + # + # This command maps the current directory to + # /usr/src/netdata.git + # inside the container and runs the script install-alpine-packages.sh + # (also inside the container) + # + if docker inspect alpine:3.14 > /dev/null 2>&1; then + run docker image remove alpine:3.14 + run docker pull --platform=${platform} alpine:3.14 + fi + + run docker run --platform=${platform} -v "$(pwd)":/usr/src/netdata.git:rw alpine:3.14 \ + /bin/sh /usr/src/netdata.git/packaging/makeself/install-alpine-packages.sh + + # save the changes made permanently + id=$(docker ps -l -q) + run docker commit "${id}" "${DOCKER_CONTAINER_NAME}" +fi + +# Run the build script inside the container +if [ -t 1 ]; then + run docker run -e BUILDARCH="${BUILDARCH}" -a stdin -a stdout -a stderr -i -t -v "$(pwd)":/usr/src/netdata.git:rw \ + "${DOCKER_CONTAINER_NAME}" \ + /bin/sh /usr/src/netdata.git/packaging/makeself/build.sh "${@}" +else + run docker run -e BUILDARCH="${BUILDARCH}" -v "$(pwd)":/usr/src/netdata.git:rw \ + -e GITHUB_ACTIONS="${GITHUB_ACTIONS}" "${DOCKER_CONTAINER_NAME}" \ + /bin/sh /usr/src/netdata.git/packaging/makeself/build.sh "${@}" +fi + +if [ "${USER}" ]; then + sudo chown -R "${USER}" . +fi diff --git a/packaging/makeself/build-x86_64-static.sh b/packaging/makeself/build-x86_64-static.sh index 83fa0dba9..0f5f1df14 100755 --- a/packaging/makeself/build-x86_64-static.sh +++ b/packaging/makeself/build-x86_64-static.sh @@ -2,47 +2,6 @@ # SPDX-License-Identifier: GPL-3.0-or-later -# shellcheck source=./packaging/installer/functions.sh -. "$(dirname "$0")"/../installer/functions.sh || exit 1 +SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" -set -e - -DOCKER_CONTAINER_NAME="netdata-package-x86_64-static-alpine312" - -if ! docker inspect "${DOCKER_CONTAINER_NAME}" > /dev/null 2>&1; then - # To run interactively: - # docker run -it netdata-package-x86_64-static /bin/sh - # (add -v host-dir:guest-dir:rw arguments to mount volumes) - # - # To remove images in order to re-create: - # docker rm -v $(sudo docker ps -a -q -f status=exited) - # docker rmi netdata-package-x86_64-static - # - # This command maps the current directory to - # /usr/src/netdata.git - # inside the container and runs the script install-alpine-packages.sh - # (also inside the container) - # - run docker run -v "$(pwd)":/usr/src/netdata.git:rw alpine:3.12 \ - /bin/sh /usr/src/netdata.git/packaging/makeself/install-alpine-packages.sh - - # save the changes made permanently - id=$(docker ps -l -q) - run docker commit "${id}" "${DOCKER_CONTAINER_NAME}" -fi - -# Run the build script inside the container -if [ -t 1 ]; then - run docker run -a stdin -a stdout -a stderr -i -t -v \ - "$(pwd)":/usr/src/netdata.git:rw \ - "${DOCKER_CONTAINER_NAME}" \ - /bin/sh /usr/src/netdata.git/packaging/makeself/build.sh "${@}" -else - run docker run -v "$(pwd)":/usr/src/netdata.git:rw \ - "${DOCKER_CONTAINER_NAME}" \ - /bin/sh /usr/src/netdata.git/packaging/makeself/build.sh "${@}" -fi - -if [ "${USER}" ]; then - sudo chown -R "${USER}" . -fi +"${SCRIPT_DIR}/build-static.sh" x86_64 diff --git a/packaging/makeself/build.sh b/packaging/makeself/build.sh index 85cb8fca7..4eb2c9f41 100755 --- a/packaging/makeself/build.sh +++ b/packaging/makeself/build.sh @@ -38,6 +38,9 @@ if [ ! -f ../../netdata-installer.sh ]; then exit $? fi +git clean -dxf +git submodule foreach --recursive git clean -dxf + cat >&2 << EOF This program will create a self-extracting shell package containing a statically linked netdata, able to run on any 64bit Linux system, @@ -49,6 +52,12 @@ EOF if [ ! -d tmp ]; then mkdir tmp || exit 1 +else + rm -rf tmp/* +fi + +if [ -z "${GITHUB_ACTIONS}" ]; then + export GITHUB_ACTIONS=false fi if ! ./run-all-jobs.sh "$@"; then diff --git a/packaging/makeself/functions.sh b/packaging/makeself/functions.sh index 15818d3b2..afc8a9ac9 100755 --- a/packaging/makeself/functions.sh +++ b/packaging/makeself/functions.sh @@ -29,13 +29,25 @@ set -euo pipefail # ----------------------------------------------------------------------------- fetch() { - local dir="${1}" url="${2}" + local dir="${1}" url="${2}" sha256="${3}" local tar="${dir}.tar.gz" if [ ! -f "${NETDATA_MAKESELF_PATH}/tmp/${tar}" ]; then run wget -O "${NETDATA_MAKESELF_PATH}/tmp/${tar}" "${url}" fi + # Check SHA256 of gzip'd tar file (apparently alpine's sha256sum requires + # two empty spaces between the checksum and the file's path) + set +e + echo "${sha256} ${NETDATA_MAKESELF_PATH}/tmp/${tar}" | sha256sum -c -s + local rc=$? + if [ ${rc} -ne 0 ]; then + echo >&2 "SHA256 verification of tar file ${tar} failed (rc=${rc})" + echo >&2 "expected: ${sha256}, got $(sha256sum "${NETDATA_MAKESELF_PATH}/tmp/${tar}")" + exit 1 + fi + set -e + if [ ! -d "${NETDATA_MAKESELF_PATH}/tmp/${dir}" ]; then cd "${NETDATA_MAKESELF_PATH}/tmp" run tar -zxpf "${tar}" diff --git a/packaging/makeself/install-alpine-packages.sh b/packaging/makeself/install-alpine-packages.sh index a3e8cf605..6258d5dda 100755 --- a/packaging/makeself/install-alpine-packages.sh +++ b/packaging/makeself/install-alpine-packages.sh @@ -9,43 +9,37 @@ # Add required APK packages apk add --no-cache -U \ - bash \ - wget \ - curl \ - ncurses \ - git \ - netcat-openbsd \ alpine-sdk \ autoconf \ automake \ - gcc \ - make \ + bash \ + binutils \ cmake \ - libtool \ - pkgconfig \ - util-linux-dev \ + curl \ + elfutils-dev \ + gcc \ + git \ gnutls-dev \ - zlib-dev \ - zlib-static \ + gzip \ + libelf-static \ libmnl-dev \ libnetfilter_acct-dev \ + libtool \ libuv-dev \ libuv-static \ lz4-dev \ lz4-static \ - snappy-dev \ + make \ + ncurses \ + netcat-openbsd \ + openssh \ + pkgconfig \ protobuf-dev \ - binutils \ - gzip \ - xz || exit 1 - -# snappy doesn't have static version in alpine, let's compile it -export SNAPPY_VER="1.1.7" -wget -O /snappy.tar.gz https://github.com/google/snappy/archive/${SNAPPY_VER}.tar.gz -tar -C / -xf /snappy.tar.gz -rm /snappy.tar.gz -cd /snappy-${SNAPPY_VER} || exit 1 -mkdir build -cd build || exit 1 -cmake -DCMAKE_BUILD_SHARED_LIBS=true -DCMAKE_INSTALL_PREFIX:PATH=/usr -DCMAKE_INSTALL_LIBDIR=lib ../ -make && make install + snappy-dev \ + snappy-static \ + util-linux-dev \ + wget \ + xz \ + zlib-dev \ + zlib-static || + exit 1 diff --git a/packaging/makeself/install-or-update.sh b/packaging/makeself/install-or-update.sh index 11fcc091c..ae12cce7b 100755 --- a/packaging/makeself/install-or-update.sh +++ b/packaging/makeself/install-or-update.sh @@ -49,6 +49,7 @@ while [ "${1}" ]; do REINSTALL_OPTIONS="${REINSTALL_OPTIONS} ${1}" ;; "--disable-telemetry") + NETDATA_DISABLE_TELEMETRY=1 REINSTALL_OPTIONS="${REINSTALL_OPTIONS} ${1}" ;; @@ -58,6 +59,7 @@ while [ "${1}" ]; do done if [ ! "${DO_NOT_TRACK:-0}" -eq 0 ] || [ -n "$DO_NOT_TRACK" ]; then + NETDATA_DISABLE_TELEMETRY=1 REINSTALL_OPTIONS="${REINSTALL_OPTIONS} --disable-telemetry" fi @@ -138,7 +140,7 @@ install_netdata_logrotate || run_failed "Cannot install logrotate file for netda progress "Telemetry configuration" # Opt-out from telemetry program -if [ -n "${NETDATA_DISABLE_TELEMETRY+x}" ]; then +if [ -n "${NETDATA_DISABLE_TELEMETRY}" ]; then run touch "${NETDATA_USER_CONFIG_DIR}/.opt-out-from-anonymous-statistics" else printf "You can opt out from anonymous statistics via the --disable-telemetry option, or by creating an empty file %s \n\n" "${NETDATA_USER_CONFIG_DIR}/.opt-out-from-anonymous-statistics" @@ -212,7 +214,7 @@ run chown -R ${NETDATA_USER}:${NETDATA_GROUP} /opt/netdata progress "fix plugin permissions" -for x in apps.plugin freeipmi.plugin ioping cgroup-network; do +for x in apps.plugin freeipmi.plugin ioping cgroup-network ebpf.plugin; do f="usr/libexec/netdata/plugins.d/${x}" if [ -f "${f}" ]; then diff --git a/packaging/makeself/jobs/10-prepare-destination.install.sh b/packaging/makeself/jobs/10-prepare-destination.install.sh index 8cce2d442..4686841b5 100755 --- a/packaging/makeself/jobs/10-prepare-destination.install.sh +++ b/packaging/makeself/jobs/10-prepare-destination.install.sh @@ -4,14 +4,20 @@ # shellcheck source=packaging/makeself/functions.sh . "$(dirname "${0}")/../functions.sh" "${@}" || exit 1 +# shellcheck disable=SC2015 +[ "${GITHUB_ACTIONS}" = "true" ] && echo "::group::Preparing build environment" || true + [ -d "${NETDATA_INSTALL_PATH}.old" ] && run rm -rf "${NETDATA_INSTALL_PATH}.old" [ -d "${NETDATA_INSTALL_PATH}" ] && run mv -f "${NETDATA_INSTALL_PATH}" "${NETDATA_INSTALL_PATH}.old" run mkdir -p "${NETDATA_INSTALL_PATH}/bin" run mkdir -p "${NETDATA_INSTALL_PATH}/usr" -run cd "${NETDATA_INSTALL_PATH}" +run cd "${NETDATA_INSTALL_PATH}" || exit 1 run ln -s bin sbin -run cd "${NETDATA_INSTALL_PATH}/usr" +run cd "${NETDATA_INSTALL_PATH}/usr" || exit 1 run ln -s ../bin bin run ln -s ../sbin sbin run ln -s . local + +# shellcheck disable=SC2015 +[ "${GITHUB_ACTIONS}" = "true" ] && echo "::endgroup::" || true diff --git a/packaging/makeself/jobs/20-openssl.install.sh b/packaging/makeself/jobs/20-openssl.install.sh index 10863f90f..96d53fc86 100755 --- a/packaging/makeself/jobs/20-openssl.install.sh +++ b/packaging/makeself/jobs/20-openssl.install.sh @@ -4,17 +4,24 @@ # shellcheck source=packaging/makeself/functions.sh . "$(dirname "${0}")/../functions.sh" "${@}" || exit 1 +# shellcheck disable=SC2015 +[ "${GITHUB_ACTIONS}" = "true" ] && echo "::group::Building OpenSSL" || true + version="$(cat "$(dirname "${0}")/../openssl.version")" +export CFLAGS='-fno-lto' export LDFLAGS='-static' export PKG_CONFIG="pkg-config --static" # Might be bind-mounted if [ ! -d "${NETDATA_MAKESELF_PATH}/tmp/openssl" ]; then - run git clone --branch "${version}" --single-branch git://git.openssl.org/openssl.git "${NETDATA_MAKESELF_PATH}/tmp/openssl" + run git clone --branch "${version}" --single-branch --depth 1 git://git.openssl.org/openssl.git "${NETDATA_MAKESELF_PATH}/tmp/openssl" fi cd "${NETDATA_MAKESELF_PATH}/tmp/openssl" || exit 1 -run ./config no-shared no-tests --prefix=/openssl-static --openssldir=/opt/netdata/etc/ssl +run ./config -static no-tests --prefix=/openssl-static --openssldir=/opt/netdata/etc/ssl run make -j "$(nproc)" run make -j "$(nproc)" install_sw + +# shellcheck disable=SC2015 +[ "${GITHUB_ACTIONS}" = "true" ] && echo "::endgroup::" || true diff --git a/packaging/makeself/jobs/50-bash-5.0.install.sh b/packaging/makeself/jobs/50-bash-5.0.install.sh deleted file mode 100755 index a204c1575..000000000 --- a/packaging/makeself/jobs/50-bash-5.0.install.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/usr/bin/env bash -# SPDX-License-Identifier: GPL-3.0-or-later - -# shellcheck source=packaging/makeself/functions.sh -. "$(dirname "${0}")/../functions.sh" "${@}" || exit 1 - -fetch "bash-5.0" "http://ftp.gnu.org/gnu/bash/bash-5.0.tar.gz" - -export PKG_CONFIG_PATH="/openssl-static/lib/pkgconfig" - -run ./configure \ - --prefix="${NETDATA_INSTALL_PATH}" \ - --without-bash-malloc \ - --enable-static-link \ - --enable-net-redirections \ - --enable-array-variables \ - --disable-profiling \ - --disable-nls - -run make clean -run make -j "$(nproc)" - -cat > examples/loadables/Makefile << EOF -all: -clean: -install: -EOF - -run make install - -if [ ${NETDATA_BUILD_WITH_DEBUG} -eq 0 ]; then - run strip "${NETDATA_INSTALL_PATH}"/bin/bash -fi diff --git a/packaging/makeself/jobs/50-bash-5.1.8.install.sh b/packaging/makeself/jobs/50-bash-5.1.8.install.sh new file mode 100755 index 000000000..22c9286af --- /dev/null +++ b/packaging/makeself/jobs/50-bash-5.1.8.install.sh @@ -0,0 +1,41 @@ +#!/usr/bin/env bash +# SPDX-License-Identifier: GPL-3.0-or-later + +# shellcheck source=packaging/makeself/functions.sh +. "$(dirname "${0}")/../functions.sh" "${@}" || exit 1 + +# shellcheck disable=SC2015 +[ "${GITHUB_ACTIONS}" = "true" ] && echo "::group::building bash" || true + +fetch "bash-5.1.8" "http://ftp.gnu.org/gnu/bash/bash-5.1.8.tar.gz" \ + 0cfb5c9bb1a29f800a97bd242d19511c997a1013815b805e0fdd32214113d6be + +export PKG_CONFIG_PATH="/openssl-static/lib/pkgconfig" + +run ./configure \ + --prefix="${NETDATA_INSTALL_PATH}" \ + --without-bash-malloc \ + --enable-static-link \ + --enable-net-redirections \ + --enable-array-variables \ + --disable-progcomp \ + --disable-profiling \ + --disable-nls + +run make clean +run make -j "$(nproc)" + +cat > examples/loadables/Makefile << EOF +all: +clean: +install: +EOF + +run make install + +if [ "${NETDATA_BUILD_WITH_DEBUG}" -eq 0 ]; then + run strip "${NETDATA_INSTALL_PATH}"/bin/bash +fi + +# shellcheck disable=SC2015 +[ "${GITHUB_ACTIONS}" = "true" ] && echo "::endgroup::" || true diff --git a/packaging/makeself/jobs/50-curl-7.73.0.install.sh b/packaging/makeself/jobs/50-curl-7.73.0.install.sh deleted file mode 100755 index 4a9505e58..000000000 --- a/packaging/makeself/jobs/50-curl-7.73.0.install.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/usr/bin/env bash -# SPDX-License-Identifier: GPL-3.0-or-later - -# shellcheck source=packaging/makeself/functions.sh -. "$(dirname "${0}")/../functions.sh" "${@}" || exit 1 - -fetch "curl-7.73.0" "https://curl.haxx.se/download/curl-7.73.0.tar.gz" - -export CFLAGS="-I/openssl-static/include" -export LDFLAGS="-static -L/openssl-static/lib" -export PKG_CONFIG="pkg-config --static" -export PKG_CONFIG_PATH="/openssl-static/lib/pkgconfig" - -run autoreconf -fi - -run ./configure \ - --prefix="${NETDATA_INSTALL_PATH}" \ - --enable-optimize \ - --disable-shared \ - --enable-static \ - --enable-http \ - --enable-proxy \ - --enable-ipv6 \ - --enable-cookies \ - --with-ca-fallback - -# Curl autoconf does not honour the curl_LDFLAGS environment variable -run sed -i -e "s/curl_LDFLAGS =/curl_LDFLAGS = -all-static/" src/Makefile - -run make clean -run make -j "$(nproc)" -run make install - -if [ ${NETDATA_BUILD_WITH_DEBUG} -eq 0 ]; then - run strip "${NETDATA_INSTALL_PATH}"/bin/curl -fi diff --git a/packaging/makeself/jobs/50-curl-7.78.0.install.sh b/packaging/makeself/jobs/50-curl-7.78.0.install.sh new file mode 100755 index 000000000..a2bb24e2c --- /dev/null +++ b/packaging/makeself/jobs/50-curl-7.78.0.install.sh @@ -0,0 +1,54 @@ +#!/usr/bin/env bash +# SPDX-License-Identifier: GPL-3.0-or-later + +# shellcheck source=packaging/makeself/functions.sh +. "$(dirname "${0}")/../functions.sh" "${@}" || exit 1 + +# shellcheck disable=SC2015 +[ "${GITHUB_ACTIONS}" = "true" ] && echo "::group::Building cURL" || true + +fetch "curl-7.78.0" "https://curl.haxx.se/download/curl-7.78.0.tar.gz" \ + ed936c0b02c06d42cf84b39dd12bb14b62d77c7c4e875ade022280df5dcc81d7 + +export CFLAGS="-I/openssl-static/include" +export LDFLAGS="-static -L/openssl-static/lib" +export PKG_CONFIG="pkg-config --static" +export PKG_CONFIG_PATH="/openssl-static/lib/pkgconfig" + +run autoreconf -fi + +run ./configure \ + --prefix="${NETDATA_INSTALL_PATH}" \ + --enable-optimize \ + --disable-shared \ + --enable-static \ + --enable-http \ + --disable-ldap \ + --disable-ldaps \ + --enable-proxy \ + --disable-dict \ + --disable-telnet \ + --disable-tftp \ + --disable-pop3 \ + --disable-imap \ + --disable-smb \ + --disable-smtp \ + --disable-gopher \ + --enable-ipv6 \ + --enable-cookies \ + --with-ca-fallback \ + --with-openssl + +# Curl autoconf does not honour the curl_LDFLAGS environment variable +run sed -i -e "s/curl_LDFLAGS =/curl_LDFLAGS = -all-static/" src/Makefile + +run make clean +run make -j "$(nproc)" +run make install + +if [ "${NETDATA_BUILD_WITH_DEBUG}" -eq 0 ]; then + run strip "${NETDATA_INSTALL_PATH}"/bin/curl +fi + +# shellcheck disable=SC2015 +[ "${GITHUB_ACTIONS}" = "true" ] && echo "::group::Preparing build environment" || true diff --git a/packaging/makeself/jobs/50-fping-5.0.install.sh b/packaging/makeself/jobs/50-fping-5.0.install.sh index e62c47e45..8bef27d4d 100755 --- a/packaging/makeself/jobs/50-fping-5.0.install.sh +++ b/packaging/makeself/jobs/50-fping-5.0.install.sh @@ -4,7 +4,11 @@ # shellcheck source=packaging/makeself/functions.sh . "$(dirname "${0}")/../functions.sh" "${@}" || exit 1 -fetch "fping-5.0" "https://fping.org/dist/fping-5.0.tar.gz" +# shellcheck disable=SC2015 +[ "${GITHUB_ACTIONS}" = "true" ] && echo "::group::Building fping" || true + +fetch "fping-5.0" "https://fping.org/dist/fping-5.0.tar.gz" \ + ed38c0b9b64686a05d1b3bc1d66066114a492e04e44eef1821d43b1263cd57b8 export CFLAGS="-static -I/openssl-static/include" export LDFLAGS="-static -L/openssl-static/lib" @@ -25,6 +29,9 @@ run make clean run make -j "$(nproc)" run make install -if [ ${NETDATA_BUILD_WITH_DEBUG} -eq 0 ]; then +if [ "${NETDATA_BUILD_WITH_DEBUG}" -eq 0 ]; then run strip "${NETDATA_INSTALL_PATH}"/bin/fping fi + +# shellcheck disable=SC2015 +[ "${GITHUB_ACTIONS}" = "true" ] && echo "::endgroup::" || true diff --git a/packaging/makeself/jobs/50-ioping-1.2.install.sh b/packaging/makeself/jobs/50-ioping-1.2.install.sh index 22f26914a..9e9505022 100755 --- a/packaging/makeself/jobs/50-ioping-1.2.install.sh +++ b/packaging/makeself/jobs/50-ioping-1.2.install.sh @@ -4,7 +4,11 @@ # shellcheck source=packaging/makeself/functions.sh . "$(dirname "${0}")/../functions.sh" "${@}" || exit 1 -fetch "ioping-1.2" "https://github.com/koct9i/ioping/archive/v1.2.tar.gz" +# shellcheck disable=SC2015 +[ "${GITHUB_ACTIONS}" = "true" ] && echo "::group::Building ioping" || true + +fetch "ioping-1.2" "https://github.com/koct9i/ioping/archive/v1.2.tar.gz" \ + d3e4497c653a1e96df67c72ce2b70da18e9f5e3b93179a5bb57a6e30ceacfa75 export CFLAGS="-static" @@ -13,6 +17,9 @@ run make -j "$(nproc)" run mkdir -p "${NETDATA_INSTALL_PATH}"/usr/libexec/netdata/plugins.d/ run install -o root -g root -m 4750 ioping "${NETDATA_INSTALL_PATH}"/usr/libexec/netdata/plugins.d/ -if [ ${NETDATA_BUILD_WITH_DEBUG} -eq 0 ]; then +if [ "${NETDATA_BUILD_WITH_DEBUG}" -eq 0 ]; then run strip "${NETDATA_INSTALL_PATH}"/usr/libexec/netdata/plugins.d/ioping fi + +# shellcheck disable=SC2015 +[ "${GITHUB_ACTIONS}" = "true" ] && echo "::endgroup::" || true diff --git a/packaging/makeself/jobs/70-netdata-git.install.sh b/packaging/makeself/jobs/70-netdata-git.install.sh index 21d4fd0ac..98fc4dbe8 100755 --- a/packaging/makeself/jobs/70-netdata-git.install.sh +++ b/packaging/makeself/jobs/70-netdata-git.install.sh @@ -6,7 +6,7 @@ cd "${NETDATA_SOURCE_PATH}" || exit 1 -if [ ${NETDATA_BUILD_WITH_DEBUG} -eq 0 ]; then +if [ "${NETDATA_BUILD_WITH_DEBUG}" -eq 0 ]; then export CFLAGS="-static -O3 -I/openssl-static/include" else export CFLAGS="-static -O1 -ggdb -Wall -Wextra -Wformat-signedness -fstack-protector-all -D_FORTIFY_SOURCE=2 -DNETDATA_INTERNAL_CHECKS=1 -I/openssl-static/include" @@ -31,8 +31,18 @@ run ./netdata-installer.sh \ --dont-wait \ --dont-start-it \ --require-cloud \ + --use-system-protobuf \ --dont-scrub-cflags-even-though-it-may-break-things +# shellcheck disable=SC2015 +[ "${GITHUB_ACTIONS}" = "true" ] && echo "::group::Finishing netdata install" || true + +# Properly mark the install type +cat > "${NETDATA_INSTALL_PATH}/etc/netdata/.install-type" <<-EOF + INSTALL_TYPE='manual-static' + PREBUILT_ARCH='${BUILDARCH}' + EOF + # Remove the netdata.conf file from the tree. It has hard-coded sensible defaults builtin. run rm -f "${NETDATA_INSTALL_PATH}/etc/netdata/netdata.conf" @@ -43,8 +53,11 @@ if run readelf -l "${NETDATA_INSTALL_PATH}"/bin/netdata | grep 'INTERP'; then exit 1 fi -if [ ${NETDATA_BUILD_WITH_DEBUG} -eq 0 ]; then +if [ "${NETDATA_BUILD_WITH_DEBUG}" -eq 0 ]; then run strip "${NETDATA_INSTALL_PATH}"/bin/netdata run strip "${NETDATA_INSTALL_PATH}"/usr/libexec/netdata/plugins.d/apps.plugin run strip "${NETDATA_INSTALL_PATH}"/usr/libexec/netdata/plugins.d/cgroup-network fi + +# shellcheck disable=SC2015 +[ "${GITHUB_ACTIONS}" = "true" ] && echo "::endgroup::" || true diff --git a/packaging/makeself/jobs/99-makeself.install.sh b/packaging/makeself/jobs/99-makeself.install.sh index de641027b..9dfcfbed7 100755 --- a/packaging/makeself/jobs/99-makeself.install.sh +++ b/packaging/makeself/jobs/99-makeself.install.sh @@ -4,6 +4,9 @@ # shellcheck source=packaging/makeself/functions.sh . "$(dirname "${0}")/../functions.sh" "${@}" || exit 1 +# shellcheck disable=SC2015 +[ "${GITHUB_ACTIONS}" = "true" ] && echo "::group::Building self-extracting archive" || true + run cd "${NETDATA_SOURCE_PATH}" || exit 1 # ----------------------------------------------------------------------------- @@ -90,12 +93,22 @@ run rm "${NETDATA_MAKESELF_PATH}/makeself.lsm.tmp" # ----------------------------------------------------------------------------- # copy it to the netdata build dir -FILE="netdata-${VERSION}.gz.run" +FILE="netdata-${BUILDARCH}-${VERSION}.gz.run" run mkdir -p artifacts run mv "${NETDATA_INSTALL_PATH}.gz.run" "artifacts/${FILE}" -[ -f netdata-latest.gz.run ] && rm netdata-latest.gz.run -run ln -s "artifacts/${FILE}" netdata-latest.gz.run +[ -f "netdata-${BUILDARCH}-latest.gz.run" ] && rm "netdata-${BUILDARCH}-latest.gz.run" +run ln -s "artifacts/${FILE}" "netdata-${BUILDARCH}-latest.gz.run" + +if [ "${BUILDARCH}" = "x86_64" ]; then + [ -f "netdata-latest.gz.run" ] && rm "netdata-latest.gz.run" + run ln -s "artifacts/${FILE}" "netdata-latest.gz.run" + [ -f "artifacts/netdata-${VERSION}.gz.run" ] && rm "netdata-${VERSION}.gz.run" + run ln -s "./${FILE}" "artifacts/netdata-${VERSION}.gz.run" +fi + +# shellcheck disable=SC2015 +[ "${GITHUB_ACTIONS}" = "true" ] && echo "::endgroup::" || true echo >&2 "Self-extracting installer moved to 'artifacts/${FILE}'" diff --git a/packaging/makeself/makeself.sh b/packaging/makeself/makeself.sh index 15a002884..1581f499e 100755 --- a/packaging/makeself/makeself.sh +++ b/packaging/makeself/makeself.sh @@ -7,7 +7,7 @@ # Utility to create self-extracting tar.gz archives. # The resulting archive is a file holding the tar.gz archive with # a small Shell script stub that uncompresses the archive to a temporary -# directory and then executes a given script from withing that directory. +# directory and then executes a given script from within that directory. # # Makeself home page: http://makeself.io/ # diff --git a/packaging/makeself/openssl.version b/packaging/makeself/openssl.version index aa2c66c02..5b309eb4f 100644 --- a/packaging/makeself/openssl.version +++ b/packaging/makeself/openssl.version @@ -1 +1 @@ -OpenSSL_1_1_1k +OpenSSL_1_1_1l diff --git a/packaging/protobuf.checksums b/packaging/protobuf.checksums new file mode 100644 index 000000000..b901a0be4 --- /dev/null +++ b/packaging/protobuf.checksums @@ -0,0 +1 @@ +51cec99f108b83422b7af1170afd7aeb2dd77d2bcbb7b6bad1f92509e9ccf8cb protobuf-cpp-3.17.3.tar.gz diff --git a/packaging/protobuf.version b/packaging/protobuf.version new file mode 100644 index 000000000..56cc1b61f --- /dev/null +++ b/packaging/protobuf.version @@ -0,0 +1 @@ +3.17.3 diff --git a/packaging/repoconfig/Makefile b/packaging/repoconfig/Makefile new file mode 100644 index 000000000..d0f246ac3 --- /dev/null +++ b/packaging/repoconfig/Makefile @@ -0,0 +1,31 @@ +FILES = netdata.list netdata-edge.list netdata-archive-keyring.gpg netdata-edge-archive-keyring.gpg netdata-repoconfig-archive-keyring.gpg + +all: $(FILES) + +netdata.list: netdata.list.in + cp netdata.list.in netdata.list + set -a && . /etc/os-release && sed -i -e "s/__DISTRO__/$${ID}/" -e "s/__SUITE__/$${VERSION_CODENAME}/" -e "s/__VARIANT__//" netdata.list + +netdata-edge.list: netdata.list.in + cp netdata.list.in netdata-edge.list + set -a && . /etc/os-release && sed -i -e "s/__DISTRO__/$${ID}/" -e "s/__SUITE__/$${VERSION_CODENAME}/" -e "s/__VARIANT__/-edge/" netdata-edge.list + +netdata-archive-keyring.gpg: + curl -L https://packagecloud.io/netdata/netdata/gpgkey | gpg --dearmor > netdata-archive-keyring.gpg + +netdata-edge-archive-keyring.gpg: + curl -L https://packagecloud.io/netdata/netdata-edge/gpgkey | gpg --dearmor > netdata-edge-archive-keyring.gpg + +netdata-repoconfig-archive-keyring.gpg: + curl -L https://packagecloud.io/netdata/netdata-repoconfig/gpgkey | gpg --dearmor > netdata-repoconfig-archive-keyring.gpg + +debian/tmp: + mkdir -p debian/tmp + +install: $(FILES) debian/tmp + cp $(FILES) debian/tmp/ + +clean: + rm -f $(FILES) + +.PHONY: clean diff --git a/packaging/repoconfig/build-deb.sh b/packaging/repoconfig/build-deb.sh new file mode 100755 index 000000000..f1e0d7266 --- /dev/null +++ b/packaging/repoconfig/build-deb.sh @@ -0,0 +1,32 @@ +#!/bin/sh + +# Needed because dpkg is stupid and tries to configure things interactively if it sees a terminal. +export DEBIAN_FRONTEND=noninteractive + +# Pull in our dependencies +apt update || exit 1 +apt upgrade -y || exit 1 +apt install -y build-essential debhelper curl gnupg || exit 1 + +# Run the builds in an isolated source directory. +# This removes the need for cleanup, and ensures anything the build does +# doesn't muck with the user's sources. +cp -a /netdata/packaging/repoconfig /usr/src || exit 1 +cd /usr/src/repoconfig || exit 1 + +# pre/post options are after 1.18.8, is simpler to just check help for their existence than parsing version +if dpkg-buildpackage --help | grep "\-\-post\-clean" 2> /dev/null > /dev/null; then + dpkg-buildpackage --post-clean --pre-clean -b -us -uc || exit 1 +else + dpkg-buildpackage -b -us -uc || exit 1 +fi + +# Copy the built packages to /netdata/artifacts (which may be bind-mounted) +# Also ensure /netdata/artifacts exists and create it if it doesn't +[ -d /netdata/artifacts ] || mkdir -p /netdata/artifacts +cp -a /usr/src/*.deb /netdata/artifacts/ || exit 1 + +# Correct ownership of the artifacts. +# Without this, the artifacts directory and it's contents end up owned +# by root instead of the local user on Linux boxes +chown -R --reference=/netdata /netdata/artifacts diff --git a/packaging/repoconfig/build-rpm.sh b/packaging/repoconfig/build-rpm.sh new file mode 100755 index 000000000..6c07c6619 --- /dev/null +++ b/packaging/repoconfig/build-rpm.sh @@ -0,0 +1,26 @@ +#!/bin/sh + +prefix='/root/rpmbuild' + +if command -v dnf > /dev/null ; then + dnf distro-sync -y --nodocs || exit 1 + dnf install -y --nodocs --setopt=install_weak_deps=False rpm-build || exit 1 +elif command -v yum > /dev/null ; then + yum distro-sync -y || exit 1 + yum install -y rpm-build || exit 1 +elif command -v zypper > /dev/null ; then + zypper update -y || exit 1 + zypper install -y rpm-build || exit 1 + prefix="/usr/src/packages" +fi + +mkdir -p "${prefix}/BUILD" "${prefix}/RPMS" "${prefix}/SRPMS" "${prefix}/SPECS" "${prefix}/SOURCES" || exit 1 +cp -a /netdata/packaging/repoconfig/netdata-repo.spec "${prefix}/SPECS" || exit 1 +cp -a /netdata/packaging/repoconfig/* "${prefix}/SOURCES/" || exit 1 + +rpmbuild -bb --rebuild "${prefix}/SPECS/netdata-repo.spec" || exit 1 + +[ -d /netdata/artifacts ] || mkdir -p /netdata/artifacts +find "${prefix}/RPMS/" -type f -name '*.rpm' -exec cp '{}' /netdata/artifacts \; || exit 1 + +chown -R --reference=/netdata /netdata/artifacts diff --git a/packaging/repoconfig/debian/changelog b/packaging/repoconfig/debian/changelog new file mode 100644 index 000000000..61d2e746b --- /dev/null +++ b/packaging/repoconfig/debian/changelog @@ -0,0 +1,6 @@ +netdata-repo (1-1) unstable; urgency=medium + + * Initial Release + + -- Netdata Builder Mon, 14 Jun 2021 08:00:00 -0500 + diff --git a/packaging/repoconfig/debian/compat b/packaging/repoconfig/debian/compat new file mode 100644 index 000000000..ec635144f --- /dev/null +++ b/packaging/repoconfig/debian/compat @@ -0,0 +1 @@ +9 diff --git a/packaging/repoconfig/debian/control b/packaging/repoconfig/debian/control new file mode 100644 index 000000000..5fdcf140b --- /dev/null +++ b/packaging/repoconfig/debian/control @@ -0,0 +1,19 @@ +Source: netdata-repo +Section: net +Priority: optional +Maintainer: Netdata Builder +Standards-Version: 3.9.6 +Build-Depends: debhelper (>= 9), curl, gnupg +Homepage: https://netdata.cloud + +Package: netdata-repo +Architecture: all +Depends: apt-transport-https, debian-archive-keyring, gnupg +Conflicts: netdata-repo-edge +Description: Configuration for the official Netdata Stable package repository. + +Package: netdata-repo-edge +Architecture:all +Depends: apt-transport-https, debian-archive-keyring, gnupg +Conflicts: netdata-repo +Description: Configuration for the official Netdata Edge package repository. diff --git a/packaging/repoconfig/debian/copyright b/packaging/repoconfig/debian/copyright new file mode 100644 index 000000000..193b45e6a --- /dev/null +++ b/packaging/repoconfig/debian/copyright @@ -0,0 +1,10 @@ +Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ +Upstream-Name: Netdata +Upstream-Contact: Costa Tsaousis +Source: https://github.com/netdata/netdata + +Files: * +Copyright: 2021 Netdata Inc. +License: GPL-3+ + On Debian systems, the complete text of the GNU General Public + License version 3 can be found in /usr/share/common-licenses/GPL-3. diff --git a/packaging/repoconfig/debian/rules b/packaging/repoconfig/debian/rules new file mode 100755 index 000000000..0151b96ea --- /dev/null +++ b/packaging/repoconfig/debian/rules @@ -0,0 +1,21 @@ +#!/usr/bin/make -f + +TOP = $(CURDIR)/debian/netdata-repo +TOP_EDGE = $(CURDIR)/debian/netdata-repo-edge +TEMPTOP = $(CURDIR)/debian/tmp + +%: + dh $@ + +override_dh_configure: + true + +override_dh_install: + mkdir -p $(TOP)/etc/apt/sources.list.d $(TOP)/etc/apt/trusted.gpg.d/ + mv -f $(TEMPTOP)/netdata.list $(TOP)/etc/apt/sources.list.d + mv -f $(TEMPTOP)/netdata-archive-keyring.gpg $(TOP)/etc/apt/trusted.gpg.d + cp $(TEMPTOP)/netdata-repoconfig-archive-keyring.gpg $(TOP)/etc/apt/trusted.gpg.d + mkdir -p $(TOP_EDGE)/etc/apt/sources.list.d $(TOP_EDGE)/etc/apt/trusted.gpg.d/ + mv -f $(TEMPTOP)/netdata-edge.list $(TOP_EDGE)/etc/apt/sources.list.d + mv -f $(TEMPTOP)/netdata-edge-archive-keyring.gpg $(TOP_EDGE)/etc/apt/trusted.gpg.d + cp $(TEMPTOP)/netdata-repoconfig-archive-keyring.gpg $(TOP_EDGE)/etc/apt/trusted.gpg.d diff --git a/packaging/repoconfig/debian/source/format b/packaging/repoconfig/debian/source/format new file mode 100644 index 000000000..163aaf8d8 --- /dev/null +++ b/packaging/repoconfig/debian/source/format @@ -0,0 +1 @@ +3.0 (quilt) diff --git a/packaging/repoconfig/netdata-edge.repo.centos b/packaging/repoconfig/netdata-edge.repo.centos new file mode 100644 index 000000000..124cf612e --- /dev/null +++ b/packaging/repoconfig/netdata-edge.repo.centos @@ -0,0 +1,19 @@ +[netdata-edge] +name=Netdata Edge +baseurl=https://packagecloud.io/netdata/netdata-edge/el/$releasever/$basearch +repo_gpgcheck=1 +gpgcheck=0 +gpgkey=https://packagecloud.io/netdata/netdata-edge/gpgkey +enabled=1 +sslverify=1 +sslcacert=/etc/pki/tls/certs/ca-bundle.crt + +[netdata-repoconfig] +name=Netdata Repository Config +baseurl=https://packagecloud.io/netdata/netdata-repoconfig/el/$releasever/$basearch +repo_gpgcheck=1 +gpgcheck=0 +gpgkey=https://packagecloud.io/netdata/netdata-repoconfig/gpgkey +enabled=1 +sslverify=1 +sslcacert=/etc/pki/tls/certs/ca-bundle.crt diff --git a/packaging/repoconfig/netdata-edge.repo.fedora b/packaging/repoconfig/netdata-edge.repo.fedora new file mode 100644 index 000000000..636fba6e4 --- /dev/null +++ b/packaging/repoconfig/netdata-edge.repo.fedora @@ -0,0 +1,19 @@ +[netdata-edge] +name=Netdata Edge +baseurl=https://packagecloud.io/netdata/netdata-edge/fedora/$releasever/$basearch +repo_gpgcheck=1 +gpgcheck=0 +gpgkey=https://packagecloud.io/netdata/netdata-edge/gpgkey +enabled=1 +sslverify=1 +sslcacert=/etc/pki/tls/certs/ca-bundle.crt + +[netdata-repoconfig] +name=Netdata Repository Config +baseurl=https://packagecloud.io/netdata/netdata-repoconfig/fedora/$releasever/$basearch +repo_gpgcheck=1 +gpgcheck=0 +gpgkey=https://packagecloud.io/netdata/netdata-repoconfig/gpgkey +enabled=1 +sslverify=1 +sslcacert=/etc/pki/tls/certs/ca-bundle.crt diff --git a/packaging/repoconfig/netdata-edge.repo.suse b/packaging/repoconfig/netdata-edge.repo.suse new file mode 100644 index 000000000..94db12a51 --- /dev/null +++ b/packaging/repoconfig/netdata-edge.repo.suse @@ -0,0 +1,19 @@ +[netdata-edge] +name=Netdata Edge +baseurl=https://packagecloud.io/netdata/netdata-edge/opensuse/$releasever/$basearch +repo_gpgcheck=1 +pkg_gpgcheck=0 +gpgkey=https://packagecloud.io/netdata/netdata-edge/gpgkey +enabled=1 +type=rpm-md +autorefresh=1 + +[netdata-repoconfig] +name=Netdata Repoconfig +baseurl=https://packagecloud.io/netdata/netdata-repoconfig/opensuse/$releasever/$basearch +repo_gpgcheck=1 +pkg_gpgcheck=0 +gpgkey=https://packagecloud.io/netdata/netdata-repoconfig/gpgkey +enabled=1 +type=rpm-md +autorefresh=1 diff --git a/packaging/repoconfig/netdata-repo.spec b/packaging/repoconfig/netdata-repo.spec new file mode 100644 index 000000000..6c280b3e7 --- /dev/null +++ b/packaging/repoconfig/netdata-repo.spec @@ -0,0 +1,89 @@ +%{?rhel:%global centos_ver %rhel} + +Name: netdata-repo +Version: 1 +Release: 1 +Summary: Netdata stable repositories configuration. + +Group: System Environment/Base +License: GPLv2 + +Source0: netdata.repo.fedora +Source1: netdata-edge.repo.fedora +Source2: netdata.repo.suse +Source3: netdata-edge.repo.suse +Source4: netdata.repo.centos +Source5: netdata-edge.repo.centos + +BuildArch: noarch + +# Overlapping file installs +Conflicts: netdata-repo-edge + +%description +This package contains the official Netdata package repository configuration for stable versions of Netdata. + +%prep +%setup -q -c -T + +%if 0%{?fedora} +install -pm 644 %{SOURCE0} ./netdata.repo +install -pm 644 %{SOURCE1} ./netdata-edge.repo +%endif + +%if 0%{?suse_version} +install -pm 644 %{SOURCE2} ./netdata.repo +install -pm 644 %{SOURCE3} ./netdata-edge.repo +%endif + +%if 0%{?centos_ver} +install -pm 644 %{SOURCE4} ./netdata.repo +install -pm 644 %{SOURCE5} ./netdata-edge.repo +%endif + +%build +true + +%install +rm -rf $RPM_BUILD_ROOT + +%if 0%{?suse_version} +install -dm 755 $RPM_BUILD_ROOT%{_sysconfdir}/zypp/repos.d +install -pm 644 netdata.repo $RPM_BUILD_ROOT%{_sysconfdir}/zypp/repos.d +install -pm 644 netdata-edge.repo $RPM_BUILD_ROOT%{_sysconfdir}/zypp/repos.d +%else +install -dm 755 $RPM_BUILD_ROOT%{_sysconfdir}/yum.repos.d +install -pm 644 netdata.repo $RPM_BUILD_ROOT%{_sysconfdir}/yum.repos.d +install -pm 644 netdata-edge.repo $RPM_BUILD_ROOT%{_sysconfdir}/yum.repos.d +%endif + +%clean +rm -rf $RPM_BUILD_ROOT + +%files +%if 0%{?suse_version} +%attr(644,root,root) /etc/zypp/repos.d/netdata.repo +%else +%attr(644,root,root) /etc/yum.repos.d/netdata.repo +%endif + +%package edge +Summary: Netdata nightly repositories configuration. +Group: System Environment/Base + +# Overlapping file installs +Conflicts: netdata-repo + +%description edge +This package contains the official Netdata package repository configuration for nightly versions of Netdata. + +%files edge +%if 0%{?suse_version} +%attr(644,root,root) /etc/zypp/repos.d/netdata-edge.repo +%else +%attr(644,root,root) /etc/yum.repos.d/netdata-edge.repo +%endif + +%changelog +* Mon Jun 14 2021 Austin Hemmelgarn 1-1 +- Initial revision diff --git a/packaging/repoconfig/netdata.list.in b/packaging/repoconfig/netdata.list.in new file mode 100644 index 000000000..9c3ddba01 --- /dev/null +++ b/packaging/repoconfig/netdata.list.in @@ -0,0 +1,2 @@ +deb https://packagecloud.io/netdata/netdata__VARIANT__/__DISTRO__/ __SUITE__ main +deb https://packagecloud.io/netdata/netdata-repoconfig/__DISTRO__/ __SUITE__ main diff --git a/packaging/repoconfig/netdata.repo.centos b/packaging/repoconfig/netdata.repo.centos new file mode 100644 index 000000000..1eb2f2616 --- /dev/null +++ b/packaging/repoconfig/netdata.repo.centos @@ -0,0 +1,19 @@ +[netdata] +name=Netdata +baseurl=https://packagecloud.io/netdata/netdata/el/$releasever/$basearch +repo_gpgcheck=1 +gpgcheck=0 +gpgkey=https://packagecloud.io/netdata/netdata/gpgkey +enabled=1 +sslverify=1 +sslcacert=/etc/pki/tls/certs/ca-bundle.crt + +[netdata-repoconfig] +name=Netdata Repository Config +baseurl=https://packagecloud.io/netdata/netdata-repoconfig/el/$releasever/$basearch +repo_gpgcheck=1 +gpgcheck=0 +gpgkey=https://packagecloud.io/netdata/netdata-repoconfig/gpgkey +enabled=1 +sslverify=1 +sslcacert=/etc/pki/tls/certs/ca-bundle.crt diff --git a/packaging/repoconfig/netdata.repo.fedora b/packaging/repoconfig/netdata.repo.fedora new file mode 100644 index 000000000..5efb5c73a --- /dev/null +++ b/packaging/repoconfig/netdata.repo.fedora @@ -0,0 +1,19 @@ +[netdata] +name=Netdata +baseurl=https://packagecloud.io/netdata/netdata/fedora/$releasever/$basearch +repo_gpgcheck=1 +gpgcheck=0 +gpgkey=https://packagecloud.io/netdata/netdata/gpgkey +enabled=1 +sslverify=1 +sslcacert=/etc/pki/tls/certs/ca-bundle.crt + +[netdata-repoconfig] +name=Netdata Repository Config +baseurl=https://packagecloud.io/netdata/netdata-repoconfig/fedora/$releasever/$basearch +repo_gpgcheck=1 +gpgcheck=0 +gpgkey=https://packagecloud.io/netdata/netdata-repoconfig/gpgkey +enabled=1 +sslverify=1 +sslcacert=/etc/pki/tls/certs/ca-bundle.crt diff --git a/packaging/repoconfig/netdata.repo.suse b/packaging/repoconfig/netdata.repo.suse new file mode 100644 index 000000000..55ad73e36 --- /dev/null +++ b/packaging/repoconfig/netdata.repo.suse @@ -0,0 +1,19 @@ +[netdata] +name=Netdata +baseurl=https://packagecloud.io/netdata/netdata/opensuse/$releasever/$basearch +repo_gpgcheck=1 +pkg_gpgcheck=0 +gpgkey=https://packagecloud.io/netdata/netdata/gpgkey +enabled=1 +type=rpm-md +autorefresh=1 + +[netdata-repoconfig] +name=Netdata Repoconfig +baseurl=https://packagecloud.io/netdata/netdata-repoconfig/opensuse/$releasever/$basearch +repo_gpgcheck=1 +pkg_gpgcheck=0 +gpgkey=https://packagecloud.io/netdata/netdata-repoconfig/gpgkey +enabled=1 +type=rpm-md +autorefresh=1 diff --git a/packaging/scripts/install.sh b/packaging/scripts/install.sh deleted file mode 100755 index b14ca11e5..000000000 --- a/packaging/scripts/install.sh +++ /dev/null @@ -1,83 +0,0 @@ -#!/bin/sh - -install_debian_like() { - # This is needed to ensure package installs don't prompt for any user input. - export DEBIAN_FRONTEND=noninteractive - - apt-get update - - # Install NetData - apt-get install -y "/packages/netdata_${VERSION}_${ARCH}.deb" - - # Install testing tools - apt-get install -y --no-install-recommends \ - curl netcat jq -} - -install_fedora_like() { - # Using a glob pattern here because I can't reliably determine what the - # resulting package name will be (TODO: There must be a better way!) - - PKGMGR="$( (command -v dnf > /dev/null && echo "dnf") || echo "yum")" - - pkg_version="$(echo "${VERSION}" | tr - .)" - - # Install NetData - "$PKGMGR" install -y /packages/netdata-"${pkg_version}"-*.rpm - - # Install testing tools - "$PKGMGR" install -y curl nc jq -} - -install_centos() { - # Using a glob pattern here because I can't reliably determine what the - # resulting package name will be (TODO: There must be a better way!) - - PKGMGR="$( (command -v dnf > /dev/null && echo "dnf") || echo "yum")" - - pkg_version="$(echo "${VERSION}" | tr - .)" - - # Install EPEL (needed for `jq` - "$PKGMGR" install -y epel-release - - # Install NetData - "$PKGMGR" install -y /packages/netdata-"${pkg_version}"-*.rpm - - # Install testing tools - "$PKGMGR" install -y curl nc jq -} - -install_suse_like() { - # Using a glob pattern here because I can't reliably determine what the - # resulting package name will be (TODO: There must be a better way!) - - pkg_version="$(echo "${VERSION}" | tr - .)" - - # Install NetData - # FIXME: Allow unsigned packages (for now) #7773 - zypper install -y --allow-unsigned-rpm \ - /packages/netdata-"${pkg_version}"-*.rpm - - # Install testing tools - zypper install -y --no-recommends \ - curl gnu-netcat jq -} - -case "${DISTRO}" in - debian | ubuntu) - install_debian_like - ;; - fedora) - install_fedora_like - ;; - centos) - install_centos - ;; - opensuse) - install_suse_like - ;; - *) - printf "ERROR: unsupported distro: %s_%s\n" "${DISTRO}" "${DISTRO_VERSION}" - exit 1 - ;; -esac diff --git a/packaging/scripts/test.sh b/packaging/scripts/test.sh deleted file mode 100755 index c39082622..000000000 --- a/packaging/scripts/test.sh +++ /dev/null @@ -1,52 +0,0 @@ -#!/bin/sh - -dump_log() { - cat ./netdata.log -} - -trap dump_log EXIT - -wait_for() { - host="${1}" - port="${2}" - name="${3}" - timeout="30" - - if command -v nc > /dev/null ; then - netcat="nc" - elif command -v netcat > /dev/null ; then - netcat="netcat" - else - printf "Unable to find a usable netcat command.\n" - return 1 - fi - - printf "Waiting for %s on %s:%s ... " "${name}" "${host}" "${port}" - - sleep 30 - - i=0 - while ! ${netcat} -z "${host}" "${port}"; do - sleep 1 - if [ "$i" -gt "$timeout" ]; then - printf "Timed out!\n" - return 1 - fi - i="$((i + 1))" - done - printf "OK\n" -} - -/usr/sbin/netdata -D > ./netdata.log 2>&1 & - -wait_for localhost 19999 netdata || exit 1 - -curl -sS http://127.0.0.1:19999/api/v1/info > ./response || exit 1 - -cat ./response - -jq '.version' ./response || exit 1 - -trap - EXIT - -cp -a /packages/* /artifacts diff --git a/packaging/version b/packaging/version index d3aa76971..6d101d3b1 100644 --- a/packaging/version +++ b/packaging/version @@ -1 +1 @@ -v1.31.0 +v1.32.0 diff --git a/parser/parser.c b/parser/parser.c index 21d7fb3fc..5fc601cea 100644 --- a/parser/parser.c +++ b/parser/parser.c @@ -61,6 +61,8 @@ PARSER *parser_init(RRDHOST *host, void *user, void *input, PARSER_INPUT_TYPE fl rc += parser_add_keyword(parser, PLUGINSD_KEYWORD_LABEL, pluginsd_label); rc += parser_add_keyword(parser, PLUGINSD_KEYWORD_OVERWRITE, pluginsd_overwrite); rc += parser_add_keyword(parser, PLUGINSD_KEYWORD_END, pluginsd_end); + rc += parser_add_keyword(parser, "CLABEL_COMMIT", pluginsd_clabel_commit); + rc += parser_add_keyword(parser, "CLABEL", pluginsd_clabel); rc += parser_add_keyword(parser, PLUGINSD_KEYWORD_BEGIN, pluginsd_begin); rc += parser_add_keyword(parser, "SET", pluginsd_set); } diff --git a/parser/parser.h b/parser/parser.h index 86a837e80..8d11a9007 100644 --- a/parser/parser.h +++ b/parser/parser.h @@ -3,7 +3,7 @@ #ifndef NETDATA_INCREMENTAL_PARSER_H #define NETDATA_INCREMENTAL_PARSER_H 1 -#include "../daemon/common.h" +#include "daemon/common.h" #define PARSER_MAX_CALLBACKS 20 #define PARSER_MAX_RECOVER_KEYWORDS 128 @@ -31,6 +31,8 @@ typedef struct pluginsd_action { PARSER_RC (*variable_action)(void *user, RRDHOST *host, RRDSET *st, char *name, int global, calculated_number value); PARSER_RC (*label_action)(void *user, char *key, char *value, LABEL_SOURCE source); PARSER_RC (*overwrite_action)(void *user, RRDHOST *host, struct label *new_labels); + PARSER_RC (*clabel_action)(void *user, char *key, char *value, LABEL_SOURCE source); + PARSER_RC (*clabel_commit_action)(void *user, RRDHOST *host, struct label *new_labels); PARSER_RC (*guid_action)(void *user, uuid_t *uuid); PARSER_RC (*context_action)(void *user, uuid_t *uuid); @@ -110,5 +112,7 @@ extern PARSER_RC pluginsd_overwrite(char **words, void *user, PLUGINSD_ACTION * extern PARSER_RC pluginsd_guid(char **words, void *user, PLUGINSD_ACTION *plugins_action); extern PARSER_RC pluginsd_context(char **words, void *user, PLUGINSD_ACTION *plugins_action); extern PARSER_RC pluginsd_tombstone(char **words, void *user, PLUGINSD_ACTION *plugins_action); +extern PARSER_RC pluginsd_clabel_commit(char **words, void *user, PLUGINSD_ACTION *plugins_action); +extern PARSER_RC pluginsd_clabel(char **words, void *user, PLUGINSD_ACTION *plugins_action); #endif diff --git a/registry/registry.c b/registry/registry.c index 37a311390..64053fe25 100644 --- a/registry/registry.c +++ b/registry/registry.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-3.0-or-later -#include "../daemon/common.h" +#include "daemon/common.h" #include "registry_internals.h" #define REGISTRY_STATUS_OK "ok" diff --git a/registry/registry.h b/registry/registry.h index 44095c237..5e274487d 100644 --- a/registry/registry.h +++ b/registry/registry.h @@ -49,7 +49,7 @@ #ifndef NETDATA_REGISTRY_H #define NETDATA_REGISTRY_H 1 -#include "../daemon/common.h" +#include "daemon/common.h" #define NETDATA_REGISTRY_COOKIE_NAME "netdata_registry_id" diff --git a/registry/registry_db.c b/registry/registry_db.c index d8e2bbd8d..c61a225cc 100644 --- a/registry/registry_db.c +++ b/registry/registry_db.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-3.0-or-later -#include "../daemon/common.h" +#include "daemon/common.h" #include "registry_internals.h" int registry_db_should_be_saved(void) { diff --git a/registry/registry_init.c b/registry/registry_init.c index 36673ff0f..d07daefa5 100644 --- a/registry/registry_init.c +++ b/registry/registry_init.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-3.0-or-later -#include "../daemon/common.h" +#include "daemon/common.h" #include "registry_internals.h" int registry_init(void) { diff --git a/registry/registry_internals.c b/registry/registry_internals.c index 3de6dd17b..cffe3f21d 100644 --- a/registry/registry_internals.c +++ b/registry/registry_internals.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-3.0-or-later -#include "../daemon/common.h" +#include "daemon/common.h" #include "registry_internals.h" struct registry registry; diff --git a/registry/registry_log.c b/registry/registry_log.c index e0e58ede3..b048135e6 100644 --- a/registry/registry_log.c +++ b/registry/registry_log.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-3.0-or-later -#include "../daemon/common.h" +#include "daemon/common.h" #include "registry_internals.h" void registry_log(char action, REGISTRY_PERSON *p, REGISTRY_MACHINE *m, REGISTRY_URL *u, char *name) { diff --git a/registry/registry_machine.c b/registry/registry_machine.c index 8dbeb8ea6..bd1d243a0 100644 --- a/registry/registry_machine.c +++ b/registry/registry_machine.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-3.0-or-later -#include "../daemon/common.h" +#include "daemon/common.h" #include "registry_internals.h" // ---------------------------------------------------------------------------- diff --git a/registry/registry_person.c b/registry/registry_person.c index fae1520c4..acf9d4da2 100644 --- a/registry/registry_person.c +++ b/registry/registry_person.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-3.0-or-later -#include "../daemon/common.h" +#include "daemon/common.h" #include "registry_internals.h" // ---------------------------------------------------------------------------- diff --git a/registry/registry_url.c b/registry/registry_url.c index 559799d8f..699e5e680 100644 --- a/registry/registry_url.c +++ b/registry/registry_url.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-3.0-or-later -#include "../daemon/common.h" +#include "daemon/common.h" #include "registry_internals.h" // ---------------------------------------------------------------------------- diff --git a/spawn/spawn.c b/spawn/spawn.c index e416a96b4..46853ecb9 100644 --- a/spawn/spawn.c +++ b/spawn/spawn.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later #include "spawn.h" -#include "../database/engine/rrdenginelib.h" +#include "database/engine/rrdenginelib.h" static uv_thread_t thread; int spawn_thread_error; diff --git a/spawn/spawn.h b/spawn/spawn.h index 6a4414338..a9f1a0744 100644 --- a/spawn/spawn.h +++ b/spawn/spawn.h @@ -3,7 +3,7 @@ #ifndef NETDATA_SPAWN_H #define NETDATA_SPAWN_H 1 -#include "../daemon/common.h" +#include "daemon/common.h" #define SPAWN_SERVER_COMMAND_LINE_ARGUMENT "--special-spawn-server" diff --git a/spawn/spawn_client.c b/spawn/spawn_client.c index 83dc3c80d..59f1ccb05 100644 --- a/spawn/spawn_client.c +++ b/spawn/spawn_client.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later #include "spawn.h" -#include "../database/engine/rrdenginelib.h" +#include "database/engine/rrdenginelib.h" static uv_process_t process; static uv_pipe_t spawn_channel; @@ -139,7 +139,7 @@ static void spawn_process_cmd(struct spawn_cmd_info *cmdinfo) uv_buf_t writebuf[3]; struct write_context *write_ctx; - write_ctx = mallocz(sizeof(*write_ctx)); + write_ctx = callocz(1, sizeof(*write_ctx)); write_ctx->write_req.data = write_ctx; uv_mutex_lock(&cmdinfo->mutex); diff --git a/streaming/README.md b/streaming/README.md index 94ab1f2e8..7f74fb31f 100644 --- a/streaming/README.md +++ b/streaming/README.md @@ -6,19 +6,19 @@ custom_edit_url: https://github.com/netdata/netdata/edit/master/streaming/README # Streaming and replication -Each Netdata is able to replicate/mirror its database to another Netdata, by streaming collected -metrics, in real-time to it. This is quite different to [data archiving to third party time-series +Each Netdata is able to replicate/mirror its database to another Netdata, by streaming the collected +metrics in real-time to it. This is quite different to [data archiving to third party time-series databases](/exporting/README.md). When Netdata streams metrics to another Netdata, the receiving one is able to perform everything a Netdata instance is -capable of: +capable of. This includes the following: - Visualize metrics with a dashboard - Run health checks that trigger alarms and send alarm notifications -- Export metrics to a external time-series database +- Export metrics to an external time-series database The nodes that send metrics are called **child** nodes, and the nodes that receive metrics are called **parent** nodes. -There are also **proxies**, which collects metrics from a child and sends it to a parent. +There are also **proxy** nodes, which collects metrics from a child and sends it to a parent. ## Supported configurations diff --git a/streaming/receiver.c b/streaming/receiver.c index 11191f3c7..e8f8528a7 100644 --- a/streaming/receiver.c +++ b/streaming/receiver.c @@ -11,6 +11,7 @@ void destroy_receiver_state(struct receiver_state *rpt) { freez(rpt->machine_guid); freez(rpt->os); freez(rpt->timezone); + freez(rpt->abbrev_timezone); freez(rpt->tags); freez(rpt->client_ip); freez(rpt->client_port); @@ -49,7 +50,7 @@ static void rrdpush_receiver_thread_cleanup(void *ptr) { } } -#include "../collectors/plugins.d/pluginsd_parser.h" +#include "collectors/plugins.d/pluginsd_parser.h" PARSER_RC streaming_timestamp(char **words, void *user, PLUGINSD_ACTION *plugins_action) { @@ -220,6 +221,8 @@ size_t streaming_parser(struct receiver_state *rpt, struct plugind *cd, FILE *fp parser->plugins_action->overwrite_action = &pluginsd_overwrite_action; parser->plugins_action->chart_action = &pluginsd_chart_action; parser->plugins_action->set_action = &pluginsd_set_action; + parser->plugins_action->clabel_commit_action = &pluginsd_clabel_commit_action; + parser->plugins_action->clabel_action = &pluginsd_clabel_action; user->parser = parser; @@ -307,6 +310,8 @@ static int rrdpush_receive(struct receiver_state *rpt) , rpt->machine_guid , rpt->os , rpt->timezone + , rpt->abbrev_timezone + , rpt->utc_offset , rpt->tags , rpt->program_name , rpt->program_version @@ -341,13 +346,12 @@ static int rrdpush_receive(struct receiver_state *rpt) netdata_mutex_unlock(&rpt->host->receiver_lock); } +#ifdef NETDATA_INTERNAL_CHECKS int ssl = 0; #ifdef ENABLE_HTTPS if (rpt->ssl.conn != NULL) ssl = 1; #endif - -#ifdef NETDATA_INTERNAL_CHECKS info("STREAM %s [receive from [%s]:%s]: client willing to stream metrics for host '%s' with machine_guid '%s': update every = %d, history = %ld, memory mode = %s, health %s,%s tags '%s'" , rpt->hostname , rpt->client_ip @@ -451,11 +455,11 @@ static int rrdpush_receive(struct receiver_state *rpt) cd.version = rpt->stream_version; -#if defined(ENABLE_ACLK) && !defined(ACLK_NG) +#if defined(ENABLE_ACLK) // in case we have cloud connection we inform cloud // new slave connected if (netdata_cloud_setting) - aclk_host_state_update(rpt->host, ACLK_CMD_CHILD_CONNECT); + aclk_host_state_update(rpt->host, 1); #endif size_t count = streaming_parser(rpt, &cd, fp); @@ -465,11 +469,11 @@ static int rrdpush_receive(struct receiver_state *rpt) error("STREAM %s [receive from [%s]:%s]: disconnected (completed %zu updates).", rpt->hostname, rpt->client_ip, rpt->client_port, count); -#if defined(ENABLE_ACLK) && !defined(ACLK_NG) +#if defined(ENABLE_ACLK) // in case we have cloud connection we inform cloud // new slave connected if (netdata_cloud_setting) - aclk_host_state_update(rpt->host, ACLK_CMD_CHILD_DISCONNECT); + aclk_host_state_update(rpt->host, 0); #endif // During a shutdown there is cleanup code in rrdhost that will cancel the sender thread diff --git a/streaming/rrdpush.c b/streaming/rrdpush.c index f54fc609e..53a897699 100644 --- a/streaming/rrdpush.c +++ b/streaming/rrdpush.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later #include "rrdpush.h" -#include "../parser/parser.h" +#include "parser/parser.h" /* * rrdpush @@ -183,6 +183,24 @@ static inline int need_to_send_chart_definition(RRDSET *st) { return 0; } +// chart labels +void rrdpush_send_clabels(RRDHOST *host, RRDSET *st) { + struct label_index *labels_c = &st->state->labels; + if (labels_c) { + netdata_rwlock_rdlock(&host->labels.labels_rwlock); + struct label *lbl = labels_c->head; + while(lbl) { + buffer_sprintf(host->sender->build, + "CLABEL \"%s\" \"%s\" %d\n", lbl->key, lbl->value, (int)lbl->label_source); + + lbl = lbl->next; + } + if (labels_c->head) + buffer_sprintf(host->sender->build,"CLABEL_COMMIT\n"); + netdata_rwlock_unlock(&host->labels.labels_rwlock); + } +} + // Send the current chart definition. // Assumes that collector thread has already called sender_start for mutex / buffer state. static inline void rrdpush_send_chart_definition_nolock(RRDSET *st) { @@ -224,6 +242,10 @@ static inline void rrdpush_send_chart_definition_nolock(RRDSET *st) { , (st->module_name)?st->module_name:"" ); + // send the chart labels + if (host->sender->version >= STREAM_VERSION_CLABELS) + rrdpush_send_clabels(host, st); + // send the dimensions RRDDIM *rd; rrddim_foreach_read(rd, st) { @@ -464,13 +486,14 @@ void *rrdpush_receiver_thread(void *ptr); int rrdpush_receiver_thread_spawn(struct web_client *w, char *url) { info("clients wants to STREAM metrics."); - char *key = NULL, *hostname = NULL, *registry_hostname = NULL, *machine_guid = NULL, *os = "unknown", *timezone = "unknown", *tags = NULL; + char *key = NULL, *hostname = NULL, *registry_hostname = NULL, *machine_guid = NULL, *os = "unknown", *timezone = "unknown", *abbrev_timezone = "UTC", *tags = NULL; + int32_t utc_offset = 0; int update_every = default_rrd_update_every; uint32_t stream_version = UINT_MAX; char buf[GUID_LEN + 1]; struct rrdhost_system_info *system_info = callocz(1, sizeof(struct rrdhost_system_info)); - + system_info->hops = 1; while(url) { char *value = mystrsep(&url, "&"); if(!value || !*value) continue; @@ -493,6 +516,12 @@ int rrdpush_receiver_thread_spawn(struct web_client *w, char *url) { os = value; else if(!strcmp(name, "timezone")) timezone = value; + else if(!strcmp(name, "abbrev_timezone")) + abbrev_timezone = value; + else if(!strcmp(name, "utc_offset")) + utc_offset = (int32_t)strtol(value, NULL, 0); + else if(!strcmp(name, "hops")) + system_info->hops = (uint16_t) strtoul(value, NULL, 0); else if(!strcmp(name, "tags")) tags = value; else if(!strcmp(name, "ver")) @@ -680,6 +709,8 @@ int rrdpush_receiver_thread_spawn(struct web_client *w, char *url) { rpt->machine_guid = strdupz(machine_guid); rpt->os = strdupz(os); rpt->timezone = strdupz(timezone); + rpt->abbrev_timezone = strdupz(abbrev_timezone); + rpt->utc_offset = utc_offset; rpt->tags = (tags)?strdupz(tags):NULL; rpt->client_ip = strdupz(w->client_ip); rpt->client_port = strdupz(w->client_port); diff --git a/streaming/rrdpush.h b/streaming/rrdpush.h index 225d0c299..027ccd102 100644 --- a/streaming/rrdpush.h +++ b/streaming/rrdpush.h @@ -3,17 +3,17 @@ #ifndef NETDATA_RRDPUSH_H #define NETDATA_RRDPUSH_H 1 -#include "../database/rrd.h" -#include "../libnetdata/libnetdata.h" +#include "database/rrd.h" +#include "libnetdata/libnetdata.h" #include "web/server/web_client.h" #include "daemon/common.h" #define CONNECTED_TO_SIZE 100 -// #define STREAMING_PROTOCOL_CURRENT_VERSION (uint32_t)4 Gap-filling -#define STREAMING_PROTOCOL_CURRENT_VERSION (uint32_t)3 -#define VERSION_GAP_FILLING 4 +#define STREAMING_PROTOCOL_CURRENT_VERSION (uint32_t)4 #define STREAM_VERSION_CLAIM 3 +#define STREAM_VERSION_CLABELS 4 +#define VERSION_GAP_FILLING 5 #define STREAMING_PROTOCOL_VERSION "1.1" #define START_STREAMING_PROMPT "Hit me baby, push them over..." @@ -72,6 +72,8 @@ struct receiver_state { char *machine_guid; char *os; char *timezone; // Unused? + char *abbrev_timezone; + int32_t utc_offset; char *tags; char *client_ip; // Duplicated in pluginsd char *client_port; // Duplicated in pluginsd diff --git a/streaming/sender.c b/streaming/sender.c index 1dee1f050..0abfac180 100644 --- a/streaming/sender.c +++ b/streaming/sender.c @@ -214,7 +214,7 @@ static int rrdpush_sender_thread_connect_to_parent(RRDHOST *host, int default_po char http[HTTP_HEADER_SIZE + 1]; int eol = snprintfz(http, HTTP_HEADER_SIZE, - "STREAM key=%s&hostname=%s®istry_hostname=%s&machine_guid=%s&update_every=%d&os=%s&timezone=%s&tags=%s&ver=%u" + "STREAM key=%s&hostname=%s®istry_hostname=%s&machine_guid=%s&update_every=%d&os=%s&timezone=%s&abbrev_timezone=%s&utc_offset=%d&hops=%d&tags=%s&ver=%u" "&NETDATA_SYSTEM_OS_NAME=%s" "&NETDATA_SYSTEM_OS_ID=%s" "&NETDATA_SYSTEM_OS_ID_LIKE=%s" @@ -250,6 +250,9 @@ static int rrdpush_sender_thread_connect_to_parent(RRDHOST *host, int default_po , default_rrd_update_every , host->os , host->timezone + , host->abbrev_timezone + , host->utc_offset + , host->system_info->hops + 1 , (host->tags) ? host->tags : "" , STREAMING_PROTOCOL_CURRENT_VERSION , se.os_name @@ -424,7 +427,9 @@ void attempt_to_send(struct sender_state *s) { rrdpush_send_labels(s->host); +#ifdef NETDATA_INTERNAL_CHECKS struct circular_buffer *cb = s->buffer; +#endif netdata_thread_disable_cancelability(); netdata_mutex_lock(&s->mutex); diff --git a/system/.install-type b/system/.install-type new file mode 100644 index 000000000..ebac7be59 --- /dev/null +++ b/system/.install-type @@ -0,0 +1 @@ +INSTALL_TYPE='custom' diff --git a/system/Makefile.am b/system/Makefile.am index 5323738c9..a88ccab65 100644 --- a/system/Makefile.am +++ b/system/Makefile.am @@ -23,6 +23,10 @@ dist_config_SCRIPTS = \ edit-config \ $(NULL) +dist_config_DATA = \ + .install-type \ + $(NULL) + # Explicitly install directories to avoid permission issues due to umask install-exec-local: $(INSTALL) -d $(DESTDIR)$(configdir) diff --git a/system/netdata.service.in b/system/netdata.service.in index 1947b15d4..813b2421f 100644 --- a/system/netdata.service.in +++ b/system/netdata.service.in @@ -17,7 +17,6 @@ ExecStartPre=/bin/mkdir -p @localstatedir_POST@/cache/netdata ExecStartPre=/bin/chown -R netdata:netdata @localstatedir_POST@/cache/netdata ExecStartPre=/bin/mkdir -p @localstatedir_POST@/run/netdata ExecStartPre=/bin/chown -R netdata:netdata @localstatedir_POST@/run/netdata -ExecStopPost=@pluginsdir_POST@/reset_netdata_trace.sh PermissionsStartOnly=true # saving a big db on slow disks may need some time diff --git a/tests/ebpf/README.md b/tests/ebpf/README.md new file mode 100644 index 000000000..86e3dd87d --- /dev/null +++ b/tests/ebpf/README.md @@ -0,0 +1 @@ +The file `sync_tester.c` can be used to fill all dimensions present in synchronization submenu. diff --git a/tests/ebpf/sync_tester.c b/tests/ebpf/sync_tester.c new file mode 100644 index 000000000..373c85c6d --- /dev/null +++ b/tests/ebpf/sync_tester.c @@ -0,0 +1,120 @@ +#include +#include +#include +#include +#include +#include +#include + +#define _GNU_SOURCE /* See feature_test_macros(7) */ +#define __USE_GNU +#include +#include + +void test_sync_file_range(char *output, char *text, size_t length) +{ + int fd = open (output, O_WRONLY | O_CREAT | O_APPEND, 0660); + if (fd < 0 ) { + perror("Cannot get page size"); + return; + } + + int i; + size_t offset = 0; + for ( i = 0 ; i < 10000; i++ ) { + write(fd, text, length); + sync_file_range(fd, offset, length, SYNC_FILE_RANGE_WRITE); + offset += length; + } + + close(fd); + sleep(5); +} + +// test based on IBM example https://www.ibm.com/support/knowledgecenter/en/ssw_ibm_i_71/apis/msync.htm +void test_msync(char *output, char *text, size_t length) +{ + int pagesize = sysconf(_SC_PAGE_SIZE); + if (pagesize < 0) { + perror("Cannot get page size"); + return; + } + + int fd = open(output, (O_CREAT | O_TRUNC | O_RDWR), (S_IRWXU | S_IRWXG | S_IRWXO)); + if (fd < 0 ) { + perror("Cannot open file"); + return; + } + + off_t lastoffset = lseek( fd, pagesize, SEEK_SET); + ssize_t written = write(fd, " ", 1); + if ( written != 1 ) { + perror("Write error. "); + close(fd); + return; + } + + off_t my_offset = 0; + void *address = mmap(NULL, pagesize, PROT_WRITE, MAP_SHARED, fd, my_offset); + + if ( address == MAP_FAILED ) { + perror("Map error. "); + close(fd); + return; + } + + (void) strcpy( (char*) address, text); + + if ( msync( address, pagesize, MS_SYNC) < 0 ) { + perror("msync failed with error:"); + } + + close(fd); + sleep(5); +} + +void test_synchronization(char *output, char *text, size_t length, int (*fcnt)(int)) +{ + int fd = open (output, O_WRONLY | O_CREAT | O_APPEND, 0660); + if (fd < 0 ) { + perror("Cannot get page size"); + return; + } + + int i; + for ( i = 0 ; i < 10000; i++ ) + write(fd, text, length); + + fcnt(fd); + close(fd); + + sleep(5); +} + +void remove_files(char **files) { + size_t i = 0; + while (files[i]) { + unlink(files[i]); + i++; + } +} + +int main() +{ + char *default_text = { "This is a simple example to test a PR. The sleep is used to create different peaks on charts.\n" }; + char *files[] = { "fsync.txt", "fdatasync.txt", "syncfs.txt", "msync.txt", "sync_file_range.txt", NULL }; + size_t length = strlen(default_text); + test_synchronization(files[0], default_text, length, fsync); + test_synchronization(files[1], default_text, length, fdatasync); + test_synchronization(files[2], default_text, length, syncfs); + + test_msync(files[3], default_text, length); + + test_sync_file_range(files[4], default_text, length); + + sync(); + + remove_files(files); + + return 0; +} diff --git a/tests/profile/benchmark-procfile-parser.c b/tests/profile/benchmark-procfile-parser.c index 991e2dfc8..214ca9417 100644 --- a/tests/profile/benchmark-procfile-parser.c +++ b/tests/profile/benchmark-procfile-parser.c @@ -17,7 +17,7 @@ extern size_t procfile_max_allocation; static inline void pflines_reset(pflines *fl) { - // debug(D_PROCFILE, PF_PREFIX ": reseting lines"); + // debug(D_PROCFILE, PF_PREFIX ": resetting lines"); fl->len = 0; } @@ -29,7 +29,7 @@ static inline void pflines_free(pflines *fl) { } static inline void pfwords_reset(pfwords *fw) { - // debug(D_PROCFILE, PF_PREFIX ": reseting words"); + // debug(D_PROCFILE, PF_PREFIX ": resetting words"); fw->len = 0; } diff --git a/web/api/formatters/rrd2json.c b/web/api/formatters/rrd2json.c index 4344af4ac..29bb4beb5 100644 --- a/web/api/formatters/rrd2json.c +++ b/web/api/formatters/rrd2json.c @@ -18,7 +18,6 @@ static inline void free_single_rrdrim(RRDDIM *temp_rd, int archive_mode) freez(temp_rd->rrdset); } } - freez(temp_rd->state->metric_uuid); freez(temp_rd->state); freez(temp_rd); } @@ -95,8 +94,6 @@ void build_context_param_list(struct context_param **param_list, RRDSET *st) memcpy(rd->state, rd1->state, sizeof(*rd->state)); memcpy(&rd->state->collect_ops, &rd1->state->collect_ops, sizeof(struct rrddim_collect_ops)); memcpy(&rd->state->query_ops, &rd1->state->query_ops, sizeof(struct rrddim_query_ops)); - rd->state->metric_uuid = mallocz(sizeof(uuid_t)); - uuid_copy(*rd->state->metric_uuid, *rd1->state->metric_uuid); rd->next = (*param_list)->rd; (*param_list)->rd = rd; } diff --git a/web/api/netdata-swagger.json b/web/api/netdata-swagger.json index 2beaee92f..5c2bba9a8 100644 --- a/web/api/netdata-swagger.json +++ b/web/api/netdata-swagger.json @@ -1,7 +1,7 @@ { "openapi": "3.0.0", "info": { - "title": "NetData API", + "title": "Netdata API", "description": "Real-time performance and health monitoring.", "version": "1.11.1_rolling" }, @@ -1343,7 +1343,7 @@ }, "priority": { "type": "number", - "description": "The relative priority of the chart. NetData does not care about priorities. This is just an indication of importance for the chart viewers to sort charts of higher priority (lower number) closer to the top. Priority sorting should only be used among charts of the same type or family." + "description": "The relative priority of the chart. Netdata does not care about priorities. This is just an indication of importance for the chart viewers to sort charts of higher priority (lower number) closer to the top. Priority sorting should only be used among charts of the same type or family." }, "enabled": { "type": "boolean", diff --git a/web/api/netdata-swagger.yaml b/web/api/netdata-swagger.yaml index ebade7991..19f4ded56 100644 --- a/web/api/netdata-swagger.yaml +++ b/web/api/netdata-swagger.yaml @@ -1,6 +1,6 @@ openapi: 3.0.0 info: - title: NetData API + title: Netdata API description: Real-time performance and health monitoring. version: 1.11.1_rolling paths: @@ -456,7 +456,7 @@ paths: required: false allowEmptyValue: true schema: - oneOf: + oneOf: - type: string enum: - green @@ -478,7 +478,7 @@ paths: required: false allowEmptyValue: true schema: - oneOf: + oneOf: - type: string enum: - green @@ -1082,7 +1082,7 @@ components: description: The title of the chart. priority: type: number - description: The relative priority of the chart. NetData does not care about + description: The relative priority of the chart. Netdata does not care about priorities. This is just an indication of importance for the chart viewers to sort charts of higher priority (lower number) closer to the top. Priority sorting should only be used among charts of the diff --git a/web/api/queries/query.c b/web/api/queries/query.c index 56e2e2850..216417ae8 100644 --- a/web/api/queries/query.c +++ b/web/api/queries/query.c @@ -389,6 +389,7 @@ static inline void do_dimension_variablestep( , long dim_id_in_rrdr , time_t after_wanted , time_t before_wanted + , uint32_t options ){ // RRDSET *st = r->st; @@ -445,7 +446,11 @@ static inline void do_dimension_variablestep( // db_now has a different value than above if (likely(now >= db_now)) { if (likely(does_storage_number_exist(n_curr))) { - value = unpack_storage_number(n_curr); + if (options & RRDR_OPTION_ANOMALY_BIT) + value = (n_curr & SN_ANOMALY_BIT) ? 0.0 : 100.0; + else + value = unpack_storage_number(n_curr); + if (likely(value != 0.0)) values_in_group_non_zero++; @@ -530,8 +535,11 @@ static inline void do_dimension_fixedstep( , long dim_id_in_rrdr , time_t after_wanted , time_t before_wanted + , uint32_t options ){ +#ifdef NETDATA_INTERNAL_CHECKS RRDSET *st = r->st; +#endif time_t now = after_wanted, @@ -593,7 +601,11 @@ static inline void do_dimension_fixedstep( error("INTERNAL CHECK: Unaligned query for %s, database time: %ld, expected time: %ld", rd->id, (long)handle.rrdeng.now, (long)now); } #endif - value = unpack_storage_number(n); + if (options & RRDR_OPTION_ANOMALY_BIT) + value = (n & SN_ANOMALY_BIT) ? 0.0 : 100.0; + else + value = unpack_storage_number(n); + if(likely(value != 0.0)) values_in_group_non_zero++; @@ -1100,6 +1112,7 @@ static RRDR *rrd2rrdr_fixedstep( , c , after_wanted , before_wanted + , options ); if(r->od[c] & RRDR_DIMENSION_NONZERO) @@ -1476,6 +1489,7 @@ static RRDR *rrd2rrdr_variablestep( , c , after_wanted , before_wanted + , options ); if(r->od[c] & RRDR_DIMENSION_NONZERO) @@ -1644,4 +1658,4 @@ RRDR *rrd2rrdr( return rrd2rrdr_fixedstep(st, points_requested, after_requested, before_requested, group_method, resampling_time_requested, options, dimensions, rrd_update_every, first_entry_t, last_entry_t, absolute_period_requested, context_param_list); -} \ No newline at end of file +} diff --git a/web/api/queries/rrdr.h b/web/api/queries/rrdr.h index b302f8bd4..3637df687 100644 --- a/web/api/queries/rrdr.h +++ b/web/api/queries/rrdr.h @@ -24,6 +24,7 @@ typedef enum rrdr_options { RRDR_OPTION_MATCH_NAMES = 0x00008000, // when filtering dimensions, match only names RRDR_OPTION_CUSTOM_VARS = 0x00010000, // when wrapping response in a JSON, return custom variables in response RRDR_OPTION_ALLOW_PAST = 0x00020000, // The after parameter can extend in the past before the first entry + RRDR_OPTION_ANOMALY_BIT = 0x00040000, // Return the anomaly bit stored in each collected_number } RRDR_OPTIONS; typedef enum rrdr_value_flag { @@ -99,7 +100,7 @@ typedef struct rrdresult { #define rrdr_rows(r) ((r)->rows) -#include "../../../database/rrd.h" +#include "database/rrd.h" extern void rrdr_free(RRDR *r); extern RRDR *rrdr_create(struct rrdset *st, long n, struct context_param *context_param_list); diff --git a/web/api/tests/valid_urls.c b/web/api/tests/valid_urls.c index d8c261c51..30ae23247 100644 --- a/web/api/tests/valid_urls.c +++ b/web/api/tests/valid_urls.c @@ -1,9 +1,9 @@ // SPDX-License-Identifier: GPL-3.0-or-later -#include "../../../libnetdata/libnetdata.h" -#include "../../../libnetdata/required_dummies.h" -#include "../../../database/rrd.h" -#include "../../../web/server/web_client.h" +#include "libnetdata/libnetdata.h" +#include "libnetdata/required_dummies.h" +#include "database/rrd.h" +#include "web/server/web_client.h" #include #include #include @@ -410,7 +410,7 @@ static void empty_url(void **state) } /* If the %-escape is being performed at the correct time then the url should not be treated as a query, but instead - as a path "/api/v1/info?blah?" which should despatch into the API with the given values. + as a path "/api/v1/info?blah?" which should dispatch into the API with the given values. */ static void not_a_query(void **state) { diff --git a/web/api/tests/web_api.c b/web/api/tests/web_api.c index 0a741e084..b96213255 100644 --- a/web/api/tests/web_api.c +++ b/web/api/tests/web_api.c @@ -1,9 +1,9 @@ // SPDX-License-Identifier: GPL-3.0-or-later -#include "../../../libnetdata/libnetdata.h" -#include "../../../libnetdata/required_dummies.h" -#include "../../../database/rrd.h" -#include "../../../web/server/web_client.h" +#include "libnetdata/libnetdata.h" +#include "libnetdata/required_dummies.h" +#include "database/rrd.h" +#include "web/server/web_client.h" #include #include #include diff --git a/web/api/web_api_v1.c b/web/api/web_api_v1.c index 96fcf485a..d335dd687 100644 --- a/web/api/web_api_v1.c +++ b/web/api/web_api_v1.c @@ -36,6 +36,7 @@ static struct { , {"match-names" , 0 , RRDR_OPTION_MATCH_NAMES} , {"showcustomvars" , 0 , RRDR_OPTION_CUSTOM_VARS} , {"allow_past" , 0 , RRDR_OPTION_ALLOW_PAST} + , {"anomaly-bit" , 0 , RRDR_OPTION_ANOMALY_BIT} , { NULL, 0, 0} }; @@ -867,8 +868,8 @@ static inline void web_client_api_request_v1_info_mirrored_hosts(BUFFER *wb) { netdata_mutex_lock(&host->receiver_lock); buffer_sprintf( - wb, "\t\t{ \"guid\": \"%s\", \"reachable\": %s, \"claim_id\": ", host->machine_guid, - (host->receiver || host == localhost) ? "true" : "false"); + wb, "\t\t{ \"guid\": \"%s\", \"reachable\": %s, \"hops\": %d, \"claim_id\": ", host->machine_guid, + (host->receiver || host == localhost) ? "true" : "false", host->system_info ? host->system_info->hops : (host == localhost) ? 0 : 1); netdata_mutex_unlock(&host->receiver_lock); rrdhost_aclk_state_lock(host); @@ -980,10 +981,27 @@ inline int web_client_api_request_v1_info_fill_buffer(RRDHOST *host, BUFFER *wb) #ifdef ENABLE_ACLK buffer_strcat(wb, "\t\"cloud-available\": true,\n"); #ifdef ACLK_NG - buffer_strcat(wb, "\t\"aclk-implementation\": \"Next Generation\",\n"); + buffer_strcat(wb, "\t\"aclk-ng-available\": true,\n"); #else - buffer_strcat(wb, "\t\"aclk-implementation\": \"legacy\",\n"); + buffer_strcat(wb, "\t\"aclk-ng-available\": false,\n"); #endif +#if defined(ACLK_NG) && defined(ENABLE_NEW_CLOUD_PROTOCOL) + buffer_strcat(wb, "\t\"aclk-ng-new-cloud-protocol\": true,\n"); +#else + buffer_strcat(wb, "\t\"aclk-ng-new-cloud-protocol\": false,\n"); +#endif +#ifdef ACLK_LEGACY + buffer_strcat(wb, "\t\"aclk-legacy-available\": true,\n"); +#else + buffer_strcat(wb, "\t\"aclk-legacy-available\": false,\n"); +#endif + buffer_strcat(wb, "\t\"aclk-implementation\": \""); + if (aclk_ng) { + buffer_strcat(wb, "Next Generation"); + } else { + buffer_strcat(wb, "legacy"); + } + buffer_strcat(wb, "\",\n"); #else buffer_strcat(wb, "\t\"cloud-available\": false,\n"); #endif @@ -1071,12 +1089,109 @@ inline int web_client_api_request_v1_info_fill_buffer(RRDHOST *host, BUFFER *wb) buffer_strcat(wb, "\t\"metrics-count\": "); analytics_get_data(analytics_data.netdata_metrics_count, wb); - buffer_strcat(wb, "\n"); - buffer_strcat(wb, "}"); +#if defined(ENABLE_ML) + buffer_strcat(wb, ",\n"); + char *ml_info = ml_get_host_info(host); + + buffer_strcat(wb, "\t\"ml-info\": "); + buffer_strcat(wb, ml_info); + + free(ml_info); +#endif + + buffer_strcat(wb, "\n}"); return 0; } +#if defined(ENABLE_ML) +int web_client_api_request_v1_anomaly_events(RRDHOST *host, struct web_client *w, char *url) { + if (!netdata_ready) + return HTTP_RESP_BACKEND_FETCH_FAILED; + + uint32_t after = 0, before = 0; + + while (url) { + char *value = mystrsep(&url, "&"); + if (!value || !*value) + continue; + + char *name = mystrsep(&value, "="); + if (!name || !*name) + continue; + if (!value || !*value) + continue; + + if (!strcmp(name, "after")) + after = (uint32_t) (strtoul(value, NULL, 0) / 1000); + else if (!strcmp(name, "before")) + before = (uint32_t) (strtoul(value, NULL, 0) / 1000); + } + + char *s; + if (!before || !after) + s = strdup("{\"error\": \"missing after/before parameters\" }\n"); + else { + s = ml_get_anomaly_events(host, "AD1", 1, after, before); + if (!s) + s = strdup("{\"error\": \"json string is empty\" }\n"); + } + + BUFFER *wb = w->response.data; + buffer_flush(wb); + + wb->contenttype = CT_APPLICATION_JSON; + buffer_strcat(wb, s); + buffer_no_cacheable(wb); + + freez(s); + + return HTTP_RESP_OK; +} + +int web_client_api_request_v1_anomaly_event_info(RRDHOST *host, struct web_client *w, char *url) { + if (!netdata_ready) + return HTTP_RESP_BACKEND_FETCH_FAILED; + + uint32_t after = 0, before = 0; + + while (url) { + char *value = mystrsep(&url, "&"); + if (!value || !*value) + continue; + + char *name = mystrsep(&value, "="); + if (!name || !*name) + continue; + if (!value || !*value) + continue; + + if (!strcmp(name, "after")) + after = (uint32_t) strtoul(value, NULL, 0); + else if (!strcmp(name, "before")) + before = (uint32_t) strtoul(value, NULL, 0); + } + + char *s; + if (!before || !after) + s = strdup("{\"error\": \"missing after/before parameters\" }\n"); + else { + s = ml_get_anomaly_event_info(host, "AD1", 1, after, before); + if (!s) + s = strdup("{\"error\": \"json string is empty\" }\n"); + } + + BUFFER *wb = w->response.data; + buffer_flush(wb); + wb->contenttype = CT_APPLICATION_JSON; + buffer_strcat(wb, s); + buffer_no_cacheable(wb); + + freez(s); + return HTTP_RESP_OK; +} +#endif // defined(ENABLE_ML) + inline int web_client_api_request_v1_info(RRDHOST *host, struct web_client *w, char *url) { (void)url; if (!netdata_ready) return HTTP_RESP_BACKEND_FETCH_FAILED; @@ -1090,6 +1205,23 @@ inline int web_client_api_request_v1_info(RRDHOST *host, struct web_client *w, c return HTTP_RESP_OK; } +static int web_client_api_request_v1_aclk_state(RRDHOST *host, struct web_client *w, char *url) { + UNUSED(url); + UNUSED(host); + if (!netdata_ready) return HTTP_RESP_BACKEND_FETCH_FAILED; + + BUFFER *wb = w->response.data; + buffer_flush(wb); + + char *str = aclk_state_json(); + buffer_strcat(wb, str); + freez(str); + + wb->contenttype = CT_APPLICATION_JSON; + buffer_no_cacheable(wb); + return HTTP_RESP_OK; +} + static struct api_command { const char *command; uint32_t hash; @@ -1114,7 +1246,14 @@ static struct api_command { { "alarm_variables", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_alarm_variables }, { "alarm_count", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_alarm_count }, { "allmetrics", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_allmetrics }, + +#if defined(ENABLE_ML) + { "anomaly_events", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_anomaly_events }, + { "anomaly_event_info", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_anomaly_event_info }, +#endif + { "manage/health", 0, WEB_CLIENT_ACL_MGMT, web_client_api_request_v1_mgmt_health }, + { "aclk", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_aclk_state }, // terminator { NULL, 0, WEB_CLIENT_ACL_NONE, NULL }, }; diff --git a/web/gui/bundle_dashboard.py b/web/gui/bundle_dashboard.py index 4cde01af3..e1815b1e2 100755 --- a/web/gui/bundle_dashboard.py +++ b/web/gui/bundle_dashboard.py @@ -2,7 +2,9 @@ # # Copyright: © 2021 Netdata Inc. # SPDX-License-Identifier: GPL-3.0-or-later -'''Bundle the dashboard code into the agent repo.''' +'''Bundle the dashboard code into the agent repo. + + This is designed to be run as part of a GHA workflow, but will work fine outside of one.''' import os import shutil @@ -69,12 +71,19 @@ dist_webstaticmedia_DATA = \\ def copy_dashboard(tag): '''Fetch and bundle the dashboard code.''' + print('Preparing target directory') shutil.rmtree(BASEPATH) BASEPATH.mkdir() + print('::group::Fetching dashboard release tarball') subprocess.check_call('curl -L -o dashboard.tar.gz ' + URLTEMPLATE.format(tag), shell=True) + print('::endgroup::') + print('::group::Extracting dashboard release tarball') subprocess.check_call('tar -xvzf dashboard.tar.gz -C ' + str(BASEPATH) + ' --strip-components=1', shell=True) + print('::endgroup::') + print('Copying README.md') BASEPATH.joinpath('README.md').symlink_to('../.dashboard-notice.md') -# BASEPATH.joinpath('..', 'dashboard.tar.gz').unlink() + print('Removing dashboard release tarball') + BASEPATH.joinpath('..', 'dashboard.tar.gz').unlink() def genfilelist(path): @@ -87,6 +96,7 @@ def genfilelist(path): def write_makefile(): '''Write out the makefile for the dashboard code.''' + print('Generating Makefile') MAKEFILEDATA = MAKEFILETEMPLATE.format( genfilelist(BASEPATH), genfilelist(BASEPATH.joinpath('css')), @@ -101,5 +111,15 @@ def write_makefile(): BASEPATH.joinpath('Makefile.am').write_text(MAKEFILEDATA) +def list_changed_files(): + '''Create a list of changed files, and set it in an environment variable.''' + if 'GITHUB_ENV' in os.environ: + print('Generating file list for commit.') + subprocess.check_call('echo "COMMIT_FILES<> $GITHUB_ENV', shell=True) + subprocess.check_call('git status --porcelain=v1 --no-renames --untracked-files=all | rev | cut -d \' \' -f 1 | rev >> $GITHUB_ENV', shell=True) + subprocess.check_call('echo "EOF" >> $GITHUB_ENV', shell=True) + + copy_dashboard(sys.argv[1]) write_makefile() +list_changed_files() diff --git a/web/gui/custom/README.md b/web/gui/custom/README.md index 323f1b56b..a646e4b5c 100644 --- a/web/gui/custom/README.md +++ b/web/gui/custom/README.md @@ -16,15 +16,14 @@ You can: You can also add Netdata charts to existing web pages. -Check this **[very simple working example of a custom dashboard](http://netdata.firehol.org/demo.html)**, and its -**[html source](https://raw.githubusercontent.com/netdata/netdata/master/web/gui/demo.html)**. +Check this **[very simple working example of a custom dashboard](http://netdata.firehol.org/demo.html)**. You should also look at the [custom dashboard template](https://my-netdata.io/dashboard.html), which contains samples of all -supported charts. The code is [here](https://raw.githubusercontent.com/netdata/netdata/master/web/gui/dashboard.html). +supported charts. The code is [here](http://netdata.firehol.org/dashboard.html). If you plan to put the dashboard on TV, check out -[tv.html](https://raw.githubusercontent.com/netdata/netdata/master/web/gui/tv.html). Here's is a screenshot of it, +[tv.html](http://netdata.firehol.org/tv.html). Here's is a screenshot of it, monitoring two servers on the same page: ![image](https://cloud.githubusercontent.com/assets/2662304/14252187/d8d5f78e-fa8e-11e5-990d-99821d38c874.png) @@ -40,6 +39,10 @@ that directory and called `index.html`.\ Note: index.html has a different syntax. Don't use it as a template for simple custom dashboards. +> Some operating systems will use `/opt/netdata/usr/share/netdata/web` as the web directory. If you're not sure where +> yours is, navigate to `http://NODE:19999/netdata.conf` in your browser, replacing `NODE` with the IP address or hostname +> of your node, and find the `# web files directory = ` setting. The value listed is the web directory for your system. + ## Example empty dashboard If you need to create a new dashboard on an empty page, we suggest the following diff --git a/web/gui/dashboard/Makefile.am b/web/gui/dashboard/Makefile.am index 1a230cc3f..62e9821b0 100644 --- a/web/gui/dashboard/Makefile.am +++ b/web/gui/dashboard/Makefile.am @@ -26,7 +26,7 @@ dist_web_DATA = \ index.html \ infographic.html \ manifest.json \ - precache-manifest.1a96c027aec7f2d07341fa69aa6b82fa.js \ + precache-manifest.14a1e41ead8f8b6e26e356372042ef5a.js \ refresh-badges.js \ robots.txt \ service-worker.js \ @@ -115,7 +115,7 @@ dist_weblib_DATA = \ lib/dygraph-smooth-plotter-c91c859.js \ lib/fontawesome-all-5.0.1.min.js \ lib/gauge-1.3.2.min.js \ - lib/jquery-2.2.4.min.js \ + lib/jquery-3.6.0.min.js \ lib/jquery.easypiechart-97b5824.min.js \ lib/jquery.peity-3.2.0.min.js \ lib/jquery.sparkline-2.1.2.min.js \ @@ -127,41 +127,41 @@ dist_weblib_DATA = \ webstaticcssdir=$(webdir)/static/css dist_webstaticcss_DATA = \ - static/css/2.6b842ba1.chunk.css \ - static/css/2.6b842ba1.chunk.css.map \ + static/css/2.20fd0a40.chunk.css \ + static/css/2.20fd0a40.chunk.css.map \ static/css/4.a36e3b73.chunk.css \ static/css/4.a36e3b73.chunk.css.map \ - static/css/main.d931154a.chunk.css \ - static/css/main.d931154a.chunk.css.map \ + static/css/main.a46a34fa.chunk.css \ + static/css/main.a46a34fa.chunk.css.map \ $(NULL) webstaticjsdir=$(webdir)/static/js dist_webstaticjs_DATA = \ - static/js/10.db7e8e19.chunk.js \ - static/js/10.db7e8e19.chunk.js.map \ - static/js/2.252b3a57.chunk.js \ - static/js/2.252b3a57.chunk.js.LICENSE \ - static/js/2.252b3a57.chunk.js.map \ - static/js/3.99238dcb.chunk.js \ - static/js/3.99238dcb.chunk.js.map \ - static/js/4.6ef9bdcb.chunk.js \ - static/js/4.6ef9bdcb.chunk.js.map \ - static/js/5.96a698ab.chunk.js \ - static/js/5.96a698ab.chunk.js.LICENSE \ - static/js/5.96a698ab.chunk.js.map \ - static/js/6.d9713eb9.chunk.js \ - static/js/6.d9713eb9.chunk.js.map \ - static/js/7.12e939e5.chunk.js \ - static/js/7.12e939e5.chunk.js.map \ - static/js/8.91852cf4.chunk.js \ - static/js/8.91852cf4.chunk.js.map \ - static/js/9.e3a9ce26.chunk.js \ - static/js/9.e3a9ce26.chunk.js.map \ - static/js/main.8aa70c75.chunk.js \ - static/js/main.8aa70c75.chunk.js.LICENSE \ - static/js/main.8aa70c75.chunk.js.map \ - static/js/runtime-main.6c7b39cd.js \ - static/js/runtime-main.6c7b39cd.js.map \ + static/js/10.a23c74b9.chunk.js \ + static/js/10.a23c74b9.chunk.js.map \ + static/js/2.3456bb26.chunk.js \ + static/js/2.3456bb26.chunk.js.LICENSE \ + static/js/2.3456bb26.chunk.js.map \ + static/js/3.5ef4adcd.chunk.js \ + static/js/3.5ef4adcd.chunk.js.map \ + static/js/4.1621c1ad.chunk.js \ + static/js/4.1621c1ad.chunk.js.map \ + static/js/5.05b274a5.chunk.js \ + static/js/5.05b274a5.chunk.js.LICENSE \ + static/js/5.05b274a5.chunk.js.map \ + static/js/6.299c0acd.chunk.js \ + static/js/6.299c0acd.chunk.js.map \ + static/js/7.850d6c32.chunk.js \ + static/js/7.850d6c32.chunk.js.map \ + static/js/8.fb328a3a.chunk.js \ + static/js/8.fb328a3a.chunk.js.map \ + static/js/9.f8eb4edd.chunk.js \ + static/js/9.f8eb4edd.chunk.js.map \ + static/js/main.fa83a16e.chunk.js \ + static/js/main.fa83a16e.chunk.js.LICENSE \ + static/js/main.fa83a16e.chunk.js.map \ + static/js/runtime-main.ea592e2f.js \ + static/js/runtime-main.ea592e2f.js.map \ $(NULL) webstaticmediadir=$(webdir)/static/media diff --git a/web/gui/dashboard/asset-manifest.json b/web/gui/dashboard/asset-manifest.json index 954e353c4..8c332c731 100644 --- a/web/gui/dashboard/asset-manifest.json +++ b/web/gui/dashboard/asset-manifest.json @@ -1,47 +1,47 @@ { "files": { - "main.css": "./static/css/main.d931154a.chunk.css", - "main.js": "./static/js/main.8aa70c75.chunk.js", - "main.js.map": "./static/js/main.8aa70c75.chunk.js.map", - "runtime-main.js": "./static/js/runtime-main.6c7b39cd.js", - "runtime-main.js.map": "./static/js/runtime-main.6c7b39cd.js.map", - "static/css/2.6b842ba1.chunk.css": "./static/css/2.6b842ba1.chunk.css", - "static/js/2.252b3a57.chunk.js": "./static/js/2.252b3a57.chunk.js", - "static/js/2.252b3a57.chunk.js.map": "./static/js/2.252b3a57.chunk.js.map", - "static/js/3.99238dcb.chunk.js": "./static/js/3.99238dcb.chunk.js", - "static/js/3.99238dcb.chunk.js.map": "./static/js/3.99238dcb.chunk.js.map", + "main.css": "./static/css/main.a46a34fa.chunk.css", + "main.js": "./static/js/main.fa83a16e.chunk.js", + "main.js.map": "./static/js/main.fa83a16e.chunk.js.map", + "runtime-main.js": "./static/js/runtime-main.ea592e2f.js", + "runtime-main.js.map": "./static/js/runtime-main.ea592e2f.js.map", + "static/css/2.20fd0a40.chunk.css": "./static/css/2.20fd0a40.chunk.css", + "static/js/2.3456bb26.chunk.js": "./static/js/2.3456bb26.chunk.js", + "static/js/2.3456bb26.chunk.js.map": "./static/js/2.3456bb26.chunk.js.map", + "static/js/3.5ef4adcd.chunk.js": "./static/js/3.5ef4adcd.chunk.js", + "static/js/3.5ef4adcd.chunk.js.map": "./static/js/3.5ef4adcd.chunk.js.map", "static/css/4.a36e3b73.chunk.css": "./static/css/4.a36e3b73.chunk.css", - "static/js/4.6ef9bdcb.chunk.js": "./static/js/4.6ef9bdcb.chunk.js", - "static/js/4.6ef9bdcb.chunk.js.map": "./static/js/4.6ef9bdcb.chunk.js.map", - "static/js/5.96a698ab.chunk.js": "./static/js/5.96a698ab.chunk.js", - "static/js/5.96a698ab.chunk.js.map": "./static/js/5.96a698ab.chunk.js.map", - "static/js/6.d9713eb9.chunk.js": "./static/js/6.d9713eb9.chunk.js", - "static/js/6.d9713eb9.chunk.js.map": "./static/js/6.d9713eb9.chunk.js.map", - "static/js/7.12e939e5.chunk.js": "./static/js/7.12e939e5.chunk.js", - "static/js/7.12e939e5.chunk.js.map": "./static/js/7.12e939e5.chunk.js.map", - "static/js/8.91852cf4.chunk.js": "./static/js/8.91852cf4.chunk.js", - "static/js/8.91852cf4.chunk.js.map": "./static/js/8.91852cf4.chunk.js.map", - "static/js/9.e3a9ce26.chunk.js": "./static/js/9.e3a9ce26.chunk.js", - "static/js/9.e3a9ce26.chunk.js.map": "./static/js/9.e3a9ce26.chunk.js.map", - "static/js/10.db7e8e19.chunk.js": "./static/js/10.db7e8e19.chunk.js", - "static/js/10.db7e8e19.chunk.js.map": "./static/js/10.db7e8e19.chunk.js.map", + "static/js/4.1621c1ad.chunk.js": "./static/js/4.1621c1ad.chunk.js", + "static/js/4.1621c1ad.chunk.js.map": "./static/js/4.1621c1ad.chunk.js.map", + "static/js/5.05b274a5.chunk.js": "./static/js/5.05b274a5.chunk.js", + "static/js/5.05b274a5.chunk.js.map": "./static/js/5.05b274a5.chunk.js.map", + "static/js/6.299c0acd.chunk.js": "./static/js/6.299c0acd.chunk.js", + "static/js/6.299c0acd.chunk.js.map": "./static/js/6.299c0acd.chunk.js.map", + "static/js/7.850d6c32.chunk.js": "./static/js/7.850d6c32.chunk.js", + "static/js/7.850d6c32.chunk.js.map": "./static/js/7.850d6c32.chunk.js.map", + "static/js/8.fb328a3a.chunk.js": "./static/js/8.fb328a3a.chunk.js", + "static/js/8.fb328a3a.chunk.js.map": "./static/js/8.fb328a3a.chunk.js.map", + "static/js/9.f8eb4edd.chunk.js": "./static/js/9.f8eb4edd.chunk.js", + "static/js/9.f8eb4edd.chunk.js.map": "./static/js/9.f8eb4edd.chunk.js.map", + "static/js/10.a23c74b9.chunk.js": "./static/js/10.a23c74b9.chunk.js", + "static/js/10.a23c74b9.chunk.js.map": "./static/js/10.a23c74b9.chunk.js.map", "index.html": "./index.html", - "precache-manifest.1a96c027aec7f2d07341fa69aa6b82fa.js": "./precache-manifest.1a96c027aec7f2d07341fa69aa6b82fa.js", + "precache-manifest.14a1e41ead8f8b6e26e356372042ef5a.js": "./precache-manifest.14a1e41ead8f8b6e26e356372042ef5a.js", "service-worker.js": "./service-worker.js", - "static/css/2.6b842ba1.chunk.css.map": "./static/css/2.6b842ba1.chunk.css.map", + "static/css/2.20fd0a40.chunk.css.map": "./static/css/2.20fd0a40.chunk.css.map", "static/css/4.a36e3b73.chunk.css.map": "./static/css/4.a36e3b73.chunk.css.map", - "static/css/main.d931154a.chunk.css.map": "./static/css/main.d931154a.chunk.css.map", - "static/js/2.252b3a57.chunk.js.LICENSE": "./static/js/2.252b3a57.chunk.js.LICENSE", - "static/js/5.96a698ab.chunk.js.LICENSE": "./static/js/5.96a698ab.chunk.js.LICENSE", - "static/js/main.8aa70c75.chunk.js.LICENSE": "./static/js/main.8aa70c75.chunk.js.LICENSE", + "static/css/main.a46a34fa.chunk.css.map": "./static/css/main.a46a34fa.chunk.css.map", + "static/js/2.3456bb26.chunk.js.LICENSE": "./static/js/2.3456bb26.chunk.js.LICENSE", + "static/js/5.05b274a5.chunk.js.LICENSE": "./static/js/5.05b274a5.chunk.js.LICENSE", + "static/js/main.fa83a16e.chunk.js.LICENSE": "./static/js/main.fa83a16e.chunk.js.LICENSE", "static/media/index.css": "./static/media/ibm-plex-sans-latin-700italic.72e9af40.woff", "static/media/fonts.css": "./static/media/material-icons.0509ab09.woff2" }, "entrypoints": [ - "static/js/runtime-main.6c7b39cd.js", - "static/css/2.6b842ba1.chunk.css", - "static/js/2.252b3a57.chunk.js", - "static/css/main.d931154a.chunk.css", - "static/js/main.8aa70c75.chunk.js" + "static/js/runtime-main.ea592e2f.js", + "static/css/2.20fd0a40.chunk.css", + "static/js/2.3456bb26.chunk.js", + "static/css/main.a46a34fa.chunk.css", + "static/js/main.fa83a16e.chunk.js" ] } \ No newline at end of file diff --git a/web/gui/dashboard/css/dashboard.css b/web/gui/dashboard/css/dashboard.css index 035263268..71215a031 100644 --- a/web/gui/dashboard/css/dashboard.css +++ b/web/gui/dashboard/css/dashboard.css @@ -146,7 +146,7 @@ body { display: block; position: absolute; bottom: 0px; - right: 30px; + right: 0px; height: 15px; width: 110px; font-size: 12px; diff --git a/web/gui/dashboard/css/dashboard.slate.css b/web/gui/dashboard/css/dashboard.slate.css index b3c65d38a..22e37ada5 100644 --- a/web/gui/dashboard/css/dashboard.slate.css +++ b/web/gui/dashboard/css/dashboard.slate.css @@ -160,7 +160,7 @@ code { display: block; position: absolute; bottom: 0px; - right: 30px; + right: 0px; height: 15px; width: 110px; font-size: 12px; diff --git a/web/gui/dashboard/dashboard.css b/web/gui/dashboard/dashboard.css index 035263268..71215a031 100644 --- a/web/gui/dashboard/dashboard.css +++ b/web/gui/dashboard/dashboard.css @@ -146,7 +146,7 @@ body { display: block; position: absolute; bottom: 0px; - right: 30px; + right: 0px; height: 15px; width: 110px; font-size: 12px; diff --git a/web/gui/dashboard/dashboard.html b/web/gui/dashboard/dashboard.html index d843fc5cb..c550db390 100644 --- a/web/gui/dashboard/dashboard.html +++ b/web/gui/dashboard/dashboard.html @@ -2,7 +2,7 @@ - NetData Dashboard + Netdata Dashboard @@ -25,7 +25,7 @@
    -

    NetData Custom Dashboard

    +

    Netdata Custom Dashboard

    This is a template for building custom dashboards. To build a dashboard you just do this: @@ -518,8 +518,8 @@ Sparklines using dygraphs

    Google Charts

    -NetData was originaly developed with Google Charts. -NetData is a complete Google Visualization API provider. +Netdata was originaly developed with Google Charts. +Netdata is a complete Google Visualization API provider.
    - NetData Dashboard + Netdata Dashboard diff --git a/web/gui/dashboard/demo2.html b/web/gui/dashboard/demo2.html index 05ca72084..9a77fdde5 100644 --- a/web/gui/dashboard/demo2.html +++ b/web/gui/dashboard/demo2.html @@ -2,7 +2,7 @@ - NetData Dashboard + Netdata Dashboard diff --git a/web/gui/dashboard/demosites.html b/web/gui/dashboard/demosites.html index b75e15e07..59435c2b2 100644 --- a/web/gui/dashboard/demosites.html +++ b/web/gui/dashboard/demosites.html @@ -4,7 +4,7 @@ - NetData: Get control of your Linux Servers. Simple. Effective. Awesome. + Netdata: Get control of your Linux Servers. Simple. Effective. Awesome. diff --git a/web/gui/dashboard/demosites2.html b/web/gui/dashboard/demosites2.html index fe35cfb6d..4b2be5c26 100644 --- a/web/gui/dashboard/demosites2.html +++ b/web/gui/dashboard/demosites2.html @@ -2,7 +2,7 @@ - NetData - Real-time performance monitoring, done right! + Netdata - Real-time performance monitoring, done right! diff --git a/web/gui/dashboard/index-node-view.html b/web/gui/dashboard/index-node-view.html index 6b3cdec3a..8483892ec 100644 --- a/web/gui/dashboard/index-node-view.html +++ b/web/gui/dashboard/index-node-view.html @@ -2,7 +2,7 @@ - NetData TV Dashboard + Netdata TV Dashboard diff --git a/web/gui/dashboard/index.html b/web/gui/dashboard/index.html index 39744f597..02a2b4c3c 100644 --- a/web/gui/dashboard/index.html +++ b/web/gui/dashboard/index.html @@ -1,4 +1,4 @@ -netdata dashboard
    You must enable JavaScript in order to use Netdata!
    You can do this in your browser settings.
    \ No newline at end of file + overlayEl.style = theme == 'slate' ? "background-color: #272b30; color: #373b40;" : "background-color: #fff; color: #ddd;";
    \ No newline at end of file diff --git a/web/gui/dashboard/infographic.html b/web/gui/dashboard/infographic.html index 24ff8f4e6..0d3f56375 100644 --- a/web/gui/dashboard/infographic.html +++ b/web/gui/dashboard/infographic.html @@ -3,7 +3,7 @@ - NetData: Get control of your Linux Servers. Simple. Effective. Awesome. + Netdata: Get control of your Linux Servers. Simple. Effective. Awesome. diff --git a/web/gui/dashboard/lib/jquery-2.2.4.min.js b/web/gui/dashboard/lib/jquery-2.2.4.min.js deleted file mode 100644 index c641fdacd..000000000 --- a/web/gui/dashboard/lib/jquery-2.2.4.min.js +++ /dev/null @@ -1,5 +0,0 @@ -/*! jQuery v2.2.4 | (c) jQuery Foundation | jquery.org/license */ -// SPDX-License-Identifier: MIT -!function(a,b){"object"==typeof module&&"object"==typeof module.exports?module.exports=a.document?b(a,!0):function(a){if(!a.document)throw new Error("jQuery requires a window with a document");return b(a)}:b(a)}("undefined"!=typeof window?window:this,function(a,b){var c=[],d=a.document,e=c.slice,f=c.concat,g=c.push,h=c.indexOf,i={},j=i.toString,k=i.hasOwnProperty,l={},m="2.2.4",n=function(a,b){return new n.fn.init(a,b)},o=/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g,p=/^-ms-/,q=/-([\da-z])/gi,r=function(a,b){return b.toUpperCase()};n.fn=n.prototype={jquery:m,constructor:n,selector:"",length:0,toArray:function(){return e.call(this)},get:function(a){return null!=a?0>a?this[a+this.length]:this[a]:e.call(this)},pushStack:function(a){var b=n.merge(this.constructor(),a);return b.prevObject=this,b.context=this.context,b},each:function(a){return n.each(this,a)},map:function(a){return this.pushStack(n.map(this,function(b,c){return a.call(b,c,b)}))},slice:function(){return this.pushStack(e.apply(this,arguments))},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},eq:function(a){var b=this.length,c=+a+(0>a?b:0);return this.pushStack(c>=0&&b>c?[this[c]]:[])},end:function(){return this.prevObject||this.constructor()},push:g,sort:c.sort,splice:c.splice},n.extend=n.fn.extend=function(){var a,b,c,d,e,f,g=arguments[0]||{},h=1,i=arguments.length,j=!1;for("boolean"==typeof g&&(j=g,g=arguments[h]||{},h++),"object"==typeof g||n.isFunction(g)||(g={}),h===i&&(g=this,h--);i>h;h++)if(null!=(a=arguments[h]))for(b in a)c=g[b],d=a[b],g!==d&&(j&&d&&(n.isPlainObject(d)||(e=n.isArray(d)))?(e?(e=!1,f=c&&n.isArray(c)?c:[]):f=c&&n.isPlainObject(c)?c:{},g[b]=n.extend(j,f,d)):void 0!==d&&(g[b]=d));return g},n.extend({expando:"jQuery"+(m+Math.random()).replace(/\D/g,""),isReady:!0,error:function(a){throw new Error(a)},noop:function(){},isFunction:function(a){return"function"===n.type(a)},isArray:Array.isArray,isWindow:function(a){return null!=a&&a===a.window},isNumeric:function(a){var b=a&&a.toString();return!n.isArray(a)&&b-parseFloat(b)+1>=0},isPlainObject:function(a){var b;if("object"!==n.type(a)||a.nodeType||n.isWindow(a))return!1;if(a.constructor&&!k.call(a,"constructor")&&!k.call(a.constructor.prototype||{},"isPrototypeOf"))return!1;for(b in a);return void 0===b||k.call(a,b)},isEmptyObject:function(a){var b;for(b in a)return!1;return!0},type:function(a){return null==a?a+"":"object"==typeof a||"function"==typeof a?i[j.call(a)]||"object":typeof a},globalEval:function(a){var b,c=eval;a=n.trim(a),a&&(1===a.indexOf("use strict")?(b=d.createElement("script"),b.text=a,d.head.appendChild(b).parentNode.removeChild(b)):c(a))},camelCase:function(a){return a.replace(p,"ms-").replace(q,r)},nodeName:function(a,b){return a.nodeName&&a.nodeName.toLowerCase()===b.toLowerCase()},each:function(a,b){var c,d=0;if(s(a)){for(c=a.length;c>d;d++)if(b.call(a[d],d,a[d])===!1)break}else for(d in a)if(b.call(a[d],d,a[d])===!1)break;return a},trim:function(a){return null==a?"":(a+"").replace(o,"")},makeArray:function(a,b){var c=b||[];return null!=a&&(s(Object(a))?n.merge(c,"string"==typeof a?[a]:a):g.call(c,a)),c},inArray:function(a,b,c){return null==b?-1:h.call(b,a,c)},merge:function(a,b){for(var c=+b.length,d=0,e=a.length;c>d;d++)a[e++]=b[d];return a.length=e,a},grep:function(a,b,c){for(var d,e=[],f=0,g=a.length,h=!c;g>f;f++)d=!b(a[f],f),d!==h&&e.push(a[f]);return e},map:function(a,b,c){var d,e,g=0,h=[];if(s(a))for(d=a.length;d>g;g++)e=b(a[g],g,c),null!=e&&h.push(e);else for(g in a)e=b(a[g],g,c),null!=e&&h.push(e);return f.apply([],h)},guid:1,proxy:function(a,b){var c,d,f;return"string"==typeof b&&(c=a[b],b=a,a=c),n.isFunction(a)?(d=e.call(arguments,2),f=function(){return a.apply(b||this,d.concat(e.call(arguments)))},f.guid=a.guid=a.guid||n.guid++,f):void 0},now:Date.now,support:l}),"function"==typeof Symbol&&(n.fn[Symbol.iterator]=c[Symbol.iterator]),n.each("Boolean Number String Function Array Date RegExp Object Error Symbol".split(" "),function(a,b){i["[object "+b+"]"]=b.toLowerCase()});function s(a){var b=!!a&&"length"in a&&a.length,c=n.type(a);return"function"===c||n.isWindow(a)?!1:"array"===c||0===b||"number"==typeof b&&b>0&&b-1 in a}var t=function(a){var b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u="sizzle"+1*new Date,v=a.document,w=0,x=0,y=ga(),z=ga(),A=ga(),B=function(a,b){return a===b&&(l=!0),0},C=1<<31,D={}.hasOwnProperty,E=[],F=E.pop,G=E.push,H=E.push,I=E.slice,J=function(a,b){for(var c=0,d=a.length;d>c;c++)if(a[c]===b)return c;return-1},K="checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped",L="[\\x20\\t\\r\\n\\f]",M="(?:\\\\.|[\\w-]|[^\\x00-\\xa0])+",N="\\["+L+"*("+M+")(?:"+L+"*([*^$|!~]?=)"+L+"*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|("+M+"))|)"+L+"*\\]",O=":("+M+")(?:\\((('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|((?:\\\\.|[^\\\\()[\\]]|"+N+")*)|.*)\\)|)",P=new RegExp(L+"+","g"),Q=new RegExp("^"+L+"+|((?:^|[^\\\\])(?:\\\\.)*)"+L+"+$","g"),R=new RegExp("^"+L+"*,"+L+"*"),S=new RegExp("^"+L+"*([>+~]|"+L+")"+L+"*"),T=new RegExp("="+L+"*([^\\]'\"]*?)"+L+"*\\]","g"),U=new RegExp(O),V=new RegExp("^"+M+"$"),W={ID:new RegExp("^#("+M+")"),CLASS:new RegExp("^\\.("+M+")"),TAG:new RegExp("^("+M+"|[*])"),ATTR:new RegExp("^"+N),PSEUDO:new RegExp("^"+O),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+L+"*(even|odd|(([+-]|)(\\d*)n|)"+L+"*(?:([+-]|)"+L+"*(\\d+)|))"+L+"*\\)|)","i"),bool:new RegExp("^(?:"+K+")$","i"),needsContext:new RegExp("^"+L+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+L+"*((?:-\\d)?\\d*)"+L+"*\\)|)(?=[^-]|$)","i")},X=/^(?:input|select|textarea|button)$/i,Y=/^h\d$/i,Z=/^[^{]+\{\s*\[native \w/,$=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,_=/[+~]/,aa=/'|\\/g,ba=new RegExp("\\\\([\\da-f]{1,6}"+L+"?|("+L+")|.)","ig"),ca=function(a,b,c){var d="0x"+b-65536;return d!==d||c?b:0>d?String.fromCharCode(d+65536):String.fromCharCode(d>>10|55296,1023&d|56320)},da=function(){m()};try{H.apply(E=I.call(v.childNodes),v.childNodes),E[v.childNodes.length].nodeType}catch(ea){H={apply:E.length?function(a,b){G.apply(a,I.call(b))}:function(a,b){var c=a.length,d=0;while(a[c++]=b[d++]);a.length=c-1}}}function fa(a,b,d,e){var f,h,j,k,l,o,r,s,w=b&&b.ownerDocument,x=b?b.nodeType:9;if(d=d||[],"string"!=typeof a||!a||1!==x&&9!==x&&11!==x)return d;if(!e&&((b?b.ownerDocument||b:v)!==n&&m(b),b=b||n,p)){if(11!==x&&(o=$.exec(a)))if(f=o[1]){if(9===x){if(!(j=b.getElementById(f)))return d;if(j.id===f)return d.push(j),d}else if(w&&(j=w.getElementById(f))&&t(b,j)&&j.id===f)return d.push(j),d}else{if(o[2])return H.apply(d,b.getElementsByTagName(a)),d;if((f=o[3])&&c.getElementsByClassName&&b.getElementsByClassName)return H.apply(d,b.getElementsByClassName(f)),d}if(c.qsa&&!A[a+" "]&&(!q||!q.test(a))){if(1!==x)w=b,s=a;else if("object"!==b.nodeName.toLowerCase()){(k=b.getAttribute("id"))?k=k.replace(aa,"\\$&"):b.setAttribute("id",k=u),r=g(a),h=r.length,l=V.test(k)?"#"+k:"[id='"+k+"']";while(h--)r[h]=l+" "+qa(r[h]);s=r.join(","),w=_.test(a)&&oa(b.parentNode)||b}if(s)try{return H.apply(d,w.querySelectorAll(s)),d}catch(y){}finally{k===u&&b.removeAttribute("id")}}}return i(a.replace(Q,"$1"),b,d,e)}function ga(){var a=[];function b(c,e){return a.push(c+" ")>d.cacheLength&&delete b[a.shift()],b[c+" "]=e}return b}function ha(a){return a[u]=!0,a}function ia(a){var b=n.createElement("div");try{return!!a(b)}catch(c){return!1}finally{b.parentNode&&b.parentNode.removeChild(b),b=null}}function ja(a,b){var c=a.split("|"),e=c.length;while(e--)d.attrHandle[c[e]]=b}function ka(a,b){var c=b&&a,d=c&&1===a.nodeType&&1===b.nodeType&&(~b.sourceIndex||C)-(~a.sourceIndex||C);if(d)return d;if(c)while(c=c.nextSibling)if(c===b)return-1;return a?1:-1}function la(a){return function(b){var c=b.nodeName.toLowerCase();return"input"===c&&b.type===a}}function ma(a){return function(b){var c=b.nodeName.toLowerCase();return("input"===c||"button"===c)&&b.type===a}}function na(a){return ha(function(b){return b=+b,ha(function(c,d){var e,f=a([],c.length,b),g=f.length;while(g--)c[e=f[g]]&&(c[e]=!(d[e]=c[e]))})})}function oa(a){return a&&"undefined"!=typeof a.getElementsByTagName&&a}c=fa.support={},f=fa.isXML=function(a){var b=a&&(a.ownerDocument||a).documentElement;return b?"HTML"!==b.nodeName:!1},m=fa.setDocument=function(a){var b,e,g=a?a.ownerDocument||a:v;return g!==n&&9===g.nodeType&&g.documentElement?(n=g,o=n.documentElement,p=!f(n),(e=n.defaultView)&&e.top!==e&&(e.addEventListener?e.addEventListener("unload",da,!1):e.attachEvent&&e.attachEvent("onunload",da)),c.attributes=ia(function(a){return a.className="i",!a.getAttribute("className")}),c.getElementsByTagName=ia(function(a){return a.appendChild(n.createComment("")),!a.getElementsByTagName("*").length}),c.getElementsByClassName=Z.test(n.getElementsByClassName),c.getById=ia(function(a){return o.appendChild(a).id=u,!n.getElementsByName||!n.getElementsByName(u).length}),c.getById?(d.find.ID=function(a,b){if("undefined"!=typeof b.getElementById&&p){var c=b.getElementById(a);return c?[c]:[]}},d.filter.ID=function(a){var b=a.replace(ba,ca);return function(a){return a.getAttribute("id")===b}}):(delete d.find.ID,d.filter.ID=function(a){var b=a.replace(ba,ca);return function(a){var c="undefined"!=typeof a.getAttributeNode&&a.getAttributeNode("id");return c&&c.value===b}}),d.find.TAG=c.getElementsByTagName?function(a,b){return"undefined"!=typeof b.getElementsByTagName?b.getElementsByTagName(a):c.qsa?b.querySelectorAll(a):void 0}:function(a,b){var c,d=[],e=0,f=b.getElementsByTagName(a);if("*"===a){while(c=f[e++])1===c.nodeType&&d.push(c);return d}return f},d.find.CLASS=c.getElementsByClassName&&function(a,b){return"undefined"!=typeof b.getElementsByClassName&&p?b.getElementsByClassName(a):void 0},r=[],q=[],(c.qsa=Z.test(n.querySelectorAll))&&(ia(function(a){o.appendChild(a).innerHTML="",a.querySelectorAll("[msallowcapture^='']").length&&q.push("[*^$]="+L+"*(?:''|\"\")"),a.querySelectorAll("[selected]").length||q.push("\\["+L+"*(?:value|"+K+")"),a.querySelectorAll("[id~="+u+"-]").length||q.push("~="),a.querySelectorAll(":checked").length||q.push(":checked"),a.querySelectorAll("a#"+u+"+*").length||q.push(".#.+[+~]")}),ia(function(a){var b=n.createElement("input");b.setAttribute("type","hidden"),a.appendChild(b).setAttribute("name","D"),a.querySelectorAll("[name=d]").length&&q.push("name"+L+"*[*^$|!~]?="),a.querySelectorAll(":enabled").length||q.push(":enabled",":disabled"),a.querySelectorAll("*,:x"),q.push(",.*:")})),(c.matchesSelector=Z.test(s=o.matches||o.webkitMatchesSelector||o.mozMatchesSelector||o.oMatchesSelector||o.msMatchesSelector))&&ia(function(a){c.disconnectedMatch=s.call(a,"div"),s.call(a,"[s!='']:x"),r.push("!=",O)}),q=q.length&&new RegExp(q.join("|")),r=r.length&&new RegExp(r.join("|")),b=Z.test(o.compareDocumentPosition),t=b||Z.test(o.contains)?function(a,b){var c=9===a.nodeType?a.documentElement:a,d=b&&b.parentNode;return a===d||!(!d||1!==d.nodeType||!(c.contains?c.contains(d):a.compareDocumentPosition&&16&a.compareDocumentPosition(d)))}:function(a,b){if(b)while(b=b.parentNode)if(b===a)return!0;return!1},B=b?function(a,b){if(a===b)return l=!0,0;var d=!a.compareDocumentPosition-!b.compareDocumentPosition;return d?d:(d=(a.ownerDocument||a)===(b.ownerDocument||b)?a.compareDocumentPosition(b):1,1&d||!c.sortDetached&&b.compareDocumentPosition(a)===d?a===n||a.ownerDocument===v&&t(v,a)?-1:b===n||b.ownerDocument===v&&t(v,b)?1:k?J(k,a)-J(k,b):0:4&d?-1:1)}:function(a,b){if(a===b)return l=!0,0;var c,d=0,e=a.parentNode,f=b.parentNode,g=[a],h=[b];if(!e||!f)return a===n?-1:b===n?1:e?-1:f?1:k?J(k,a)-J(k,b):0;if(e===f)return ka(a,b);c=a;while(c=c.parentNode)g.unshift(c);c=b;while(c=c.parentNode)h.unshift(c);while(g[d]===h[d])d++;return d?ka(g[d],h[d]):g[d]===v?-1:h[d]===v?1:0},n):n},fa.matches=function(a,b){return fa(a,null,null,b)},fa.matchesSelector=function(a,b){if((a.ownerDocument||a)!==n&&m(a),b=b.replace(T,"='$1']"),c.matchesSelector&&p&&!A[b+" "]&&(!r||!r.test(b))&&(!q||!q.test(b)))try{var d=s.call(a,b);if(d||c.disconnectedMatch||a.document&&11!==a.document.nodeType)return d}catch(e){}return fa(b,n,null,[a]).length>0},fa.contains=function(a,b){return(a.ownerDocument||a)!==n&&m(a),t(a,b)},fa.attr=function(a,b){(a.ownerDocument||a)!==n&&m(a);var e=d.attrHandle[b.toLowerCase()],f=e&&D.call(d.attrHandle,b.toLowerCase())?e(a,b,!p):void 0;return void 0!==f?f:c.attributes||!p?a.getAttribute(b):(f=a.getAttributeNode(b))&&f.specified?f.value:null},fa.error=function(a){throw new Error("Syntax error, unrecognized expression: "+a)},fa.uniqueSort=function(a){var b,d=[],e=0,f=0;if(l=!c.detectDuplicates,k=!c.sortStable&&a.slice(0),a.sort(B),l){while(b=a[f++])b===a[f]&&(e=d.push(f));while(e--)a.splice(d[e],1)}return k=null,a},e=fa.getText=function(a){var b,c="",d=0,f=a.nodeType;if(f){if(1===f||9===f||11===f){if("string"==typeof a.textContent)return a.textContent;for(a=a.firstChild;a;a=a.nextSibling)c+=e(a)}else if(3===f||4===f)return a.nodeValue}else while(b=a[d++])c+=e(b);return c},d=fa.selectors={cacheLength:50,createPseudo:ha,match:W,attrHandle:{},find:{},relative:{">":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(a){return a[1]=a[1].replace(ba,ca),a[3]=(a[3]||a[4]||a[5]||"").replace(ba,ca),"~="===a[2]&&(a[3]=" "+a[3]+" "),a.slice(0,4)},CHILD:function(a){return a[1]=a[1].toLowerCase(),"nth"===a[1].slice(0,3)?(a[3]||fa.error(a[0]),a[4]=+(a[4]?a[5]+(a[6]||1):2*("even"===a[3]||"odd"===a[3])),a[5]=+(a[7]+a[8]||"odd"===a[3])):a[3]&&fa.error(a[0]),a},PSEUDO:function(a){var b,c=!a[6]&&a[2];return W.CHILD.test(a[0])?null:(a[3]?a[2]=a[4]||a[5]||"":c&&U.test(c)&&(b=g(c,!0))&&(b=c.indexOf(")",c.length-b)-c.length)&&(a[0]=a[0].slice(0,b),a[2]=c.slice(0,b)),a.slice(0,3))}},filter:{TAG:function(a){var b=a.replace(ba,ca).toLowerCase();return"*"===a?function(){return!0}:function(a){return a.nodeName&&a.nodeName.toLowerCase()===b}},CLASS:function(a){var b=y[a+" "];return b||(b=new RegExp("(^|"+L+")"+a+"("+L+"|$)"))&&y(a,function(a){return b.test("string"==typeof a.className&&a.className||"undefined"!=typeof a.getAttribute&&a.getAttribute("class")||"")})},ATTR:function(a,b,c){return function(d){var e=fa.attr(d,a);return null==e?"!="===b:b?(e+="","="===b?e===c:"!="===b?e!==c:"^="===b?c&&0===e.indexOf(c):"*="===b?c&&e.indexOf(c)>-1:"$="===b?c&&e.slice(-c.length)===c:"~="===b?(" "+e.replace(P," ")+" ").indexOf(c)>-1:"|="===b?e===c||e.slice(0,c.length+1)===c+"-":!1):!0}},CHILD:function(a,b,c,d,e){var f="nth"!==a.slice(0,3),g="last"!==a.slice(-4),h="of-type"===b;return 1===d&&0===e?function(a){return!!a.parentNode}:function(b,c,i){var j,k,l,m,n,o,p=f!==g?"nextSibling":"previousSibling",q=b.parentNode,r=h&&b.nodeName.toLowerCase(),s=!i&&!h,t=!1;if(q){if(f){while(p){m=b;while(m=m[p])if(h?m.nodeName.toLowerCase()===r:1===m.nodeType)return!1;o=p="only"===a&&!o&&"nextSibling"}return!0}if(o=[g?q.firstChild:q.lastChild],g&&s){m=q,l=m[u]||(m[u]={}),k=l[m.uniqueID]||(l[m.uniqueID]={}),j=k[a]||[],n=j[0]===w&&j[1],t=n&&j[2],m=n&&q.childNodes[n];while(m=++n&&m&&m[p]||(t=n=0)||o.pop())if(1===m.nodeType&&++t&&m===b){k[a]=[w,n,t];break}}else if(s&&(m=b,l=m[u]||(m[u]={}),k=l[m.uniqueID]||(l[m.uniqueID]={}),j=k[a]||[],n=j[0]===w&&j[1],t=n),t===!1)while(m=++n&&m&&m[p]||(t=n=0)||o.pop())if((h?m.nodeName.toLowerCase()===r:1===m.nodeType)&&++t&&(s&&(l=m[u]||(m[u]={}),k=l[m.uniqueID]||(l[m.uniqueID]={}),k[a]=[w,t]),m===b))break;return t-=e,t===d||t%d===0&&t/d>=0}}},PSEUDO:function(a,b){var c,e=d.pseudos[a]||d.setFilters[a.toLowerCase()]||fa.error("unsupported pseudo: "+a);return e[u]?e(b):e.length>1?(c=[a,a,"",b],d.setFilters.hasOwnProperty(a.toLowerCase())?ha(function(a,c){var d,f=e(a,b),g=f.length;while(g--)d=J(a,f[g]),a[d]=!(c[d]=f[g])}):function(a){return e(a,0,c)}):e}},pseudos:{not:ha(function(a){var b=[],c=[],d=h(a.replace(Q,"$1"));return d[u]?ha(function(a,b,c,e){var f,g=d(a,null,e,[]),h=a.length;while(h--)(f=g[h])&&(a[h]=!(b[h]=f))}):function(a,e,f){return b[0]=a,d(b,null,f,c),b[0]=null,!c.pop()}}),has:ha(function(a){return function(b){return fa(a,b).length>0}}),contains:ha(function(a){return a=a.replace(ba,ca),function(b){return(b.textContent||b.innerText||e(b)).indexOf(a)>-1}}),lang:ha(function(a){return V.test(a||"")||fa.error("unsupported lang: "+a),a=a.replace(ba,ca).toLowerCase(),function(b){var c;do if(c=p?b.lang:b.getAttribute("xml:lang")||b.getAttribute("lang"))return c=c.toLowerCase(),c===a||0===c.indexOf(a+"-");while((b=b.parentNode)&&1===b.nodeType);return!1}}),target:function(b){var c=a.location&&a.location.hash;return c&&c.slice(1)===b.id},root:function(a){return a===o},focus:function(a){return a===n.activeElement&&(!n.hasFocus||n.hasFocus())&&!!(a.type||a.href||~a.tabIndex)},enabled:function(a){return a.disabled===!1},disabled:function(a){return a.disabled===!0},checked:function(a){var b=a.nodeName.toLowerCase();return"input"===b&&!!a.checked||"option"===b&&!!a.selected},selected:function(a){return a.parentNode&&a.parentNode.selectedIndex,a.selected===!0},empty:function(a){for(a=a.firstChild;a;a=a.nextSibling)if(a.nodeType<6)return!1;return!0},parent:function(a){return!d.pseudos.empty(a)},header:function(a){return Y.test(a.nodeName)},input:function(a){return X.test(a.nodeName)},button:function(a){var b=a.nodeName.toLowerCase();return"input"===b&&"button"===a.type||"button"===b},text:function(a){var b;return"input"===a.nodeName.toLowerCase()&&"text"===a.type&&(null==(b=a.getAttribute("type"))||"text"===b.toLowerCase())},first:na(function(){return[0]}),last:na(function(a,b){return[b-1]}),eq:na(function(a,b,c){return[0>c?c+b:c]}),even:na(function(a,b){for(var c=0;b>c;c+=2)a.push(c);return a}),odd:na(function(a,b){for(var c=1;b>c;c+=2)a.push(c);return a}),lt:na(function(a,b,c){for(var d=0>c?c+b:c;--d>=0;)a.push(d);return a}),gt:na(function(a,b,c){for(var d=0>c?c+b:c;++db;b++)d+=a[b].value;return d}function ra(a,b,c){var d=b.dir,e=c&&"parentNode"===d,f=x++;return b.first?function(b,c,f){while(b=b[d])if(1===b.nodeType||e)return a(b,c,f)}:function(b,c,g){var h,i,j,k=[w,f];if(g){while(b=b[d])if((1===b.nodeType||e)&&a(b,c,g))return!0}else while(b=b[d])if(1===b.nodeType||e){if(j=b[u]||(b[u]={}),i=j[b.uniqueID]||(j[b.uniqueID]={}),(h=i[d])&&h[0]===w&&h[1]===f)return k[2]=h[2];if(i[d]=k,k[2]=a(b,c,g))return!0}}}function sa(a){return a.length>1?function(b,c,d){var e=a.length;while(e--)if(!a[e](b,c,d))return!1;return!0}:a[0]}function ta(a,b,c){for(var d=0,e=b.length;e>d;d++)fa(a,b[d],c);return c}function ua(a,b,c,d,e){for(var f,g=[],h=0,i=a.length,j=null!=b;i>h;h++)(f=a[h])&&(c&&!c(f,d,e)||(g.push(f),j&&b.push(h)));return g}function va(a,b,c,d,e,f){return d&&!d[u]&&(d=va(d)),e&&!e[u]&&(e=va(e,f)),ha(function(f,g,h,i){var j,k,l,m=[],n=[],o=g.length,p=f||ta(b||"*",h.nodeType?[h]:h,[]),q=!a||!f&&b?p:ua(p,m,a,h,i),r=c?e||(f?a:o||d)?[]:g:q;if(c&&c(q,r,h,i),d){j=ua(r,n),d(j,[],h,i),k=j.length;while(k--)(l=j[k])&&(r[n[k]]=!(q[n[k]]=l))}if(f){if(e||a){if(e){j=[],k=r.length;while(k--)(l=r[k])&&j.push(q[k]=l);e(null,r=[],j,i)}k=r.length;while(k--)(l=r[k])&&(j=e?J(f,l):m[k])>-1&&(f[j]=!(g[j]=l))}}else r=ua(r===g?r.splice(o,r.length):r),e?e(null,g,r,i):H.apply(g,r)})}function wa(a){for(var b,c,e,f=a.length,g=d.relative[a[0].type],h=g||d.relative[" "],i=g?1:0,k=ra(function(a){return a===b},h,!0),l=ra(function(a){return J(b,a)>-1},h,!0),m=[function(a,c,d){var e=!g&&(d||c!==j)||((b=c).nodeType?k(a,c,d):l(a,c,d));return b=null,e}];f>i;i++)if(c=d.relative[a[i].type])m=[ra(sa(m),c)];else{if(c=d.filter[a[i].type].apply(null,a[i].matches),c[u]){for(e=++i;f>e;e++)if(d.relative[a[e].type])break;return va(i>1&&sa(m),i>1&&qa(a.slice(0,i-1).concat({value:" "===a[i-2].type?"*":""})).replace(Q,"$1"),c,e>i&&wa(a.slice(i,e)),f>e&&wa(a=a.slice(e)),f>e&&qa(a))}m.push(c)}return sa(m)}function xa(a,b){var c=b.length>0,e=a.length>0,f=function(f,g,h,i,k){var l,o,q,r=0,s="0",t=f&&[],u=[],v=j,x=f||e&&d.find.TAG("*",k),y=w+=null==v?1:Math.random()||.1,z=x.length;for(k&&(j=g===n||g||k);s!==z&&null!=(l=x[s]);s++){if(e&&l){o=0,g||l.ownerDocument===n||(m(l),h=!p);while(q=a[o++])if(q(l,g||n,h)){i.push(l);break}k&&(w=y)}c&&((l=!q&&l)&&r--,f&&t.push(l))}if(r+=s,c&&s!==r){o=0;while(q=b[o++])q(t,u,g,h);if(f){if(r>0)while(s--)t[s]||u[s]||(u[s]=F.call(i));u=ua(u)}H.apply(i,u),k&&!f&&u.length>0&&r+b.length>1&&fa.uniqueSort(i)}return k&&(w=y,j=v),t};return c?ha(f):f}return h=fa.compile=function(a,b){var c,d=[],e=[],f=A[a+" "];if(!f){b||(b=g(a)),c=b.length;while(c--)f=wa(b[c]),f[u]?d.push(f):e.push(f);f=A(a,xa(e,d)),f.selector=a}return f},i=fa.select=function(a,b,e,f){var i,j,k,l,m,n="function"==typeof a&&a,o=!f&&g(a=n.selector||a);if(e=e||[],1===o.length){if(j=o[0]=o[0].slice(0),j.length>2&&"ID"===(k=j[0]).type&&c.getById&&9===b.nodeType&&p&&d.relative[j[1].type]){if(b=(d.find.ID(k.matches[0].replace(ba,ca),b)||[])[0],!b)return e;n&&(b=b.parentNode),a=a.slice(j.shift().value.length)}i=W.needsContext.test(a)?0:j.length;while(i--){if(k=j[i],d.relative[l=k.type])break;if((m=d.find[l])&&(f=m(k.matches[0].replace(ba,ca),_.test(j[0].type)&&oa(b.parentNode)||b))){if(j.splice(i,1),a=f.length&&qa(j),!a)return H.apply(e,f),e;break}}}return(n||h(a,o))(f,b,!p,e,!b||_.test(a)&&oa(b.parentNode)||b),e},c.sortStable=u.split("").sort(B).join("")===u,c.detectDuplicates=!!l,m(),c.sortDetached=ia(function(a){return 1&a.compareDocumentPosition(n.createElement("div"))}),ia(function(a){return a.innerHTML="","#"===a.firstChild.getAttribute("href")})||ja("type|href|height|width",function(a,b,c){return c?void 0:a.getAttribute(b,"type"===b.toLowerCase()?1:2)}),c.attributes&&ia(function(a){return a.innerHTML="",a.firstChild.setAttribute("value",""),""===a.firstChild.getAttribute("value")})||ja("value",function(a,b,c){return c||"input"!==a.nodeName.toLowerCase()?void 0:a.defaultValue}),ia(function(a){return null==a.getAttribute("disabled")})||ja(K,function(a,b,c){var d;return c?void 0:a[b]===!0?b.toLowerCase():(d=a.getAttributeNode(b))&&d.specified?d.value:null}),fa}(a);n.find=t,n.expr=t.selectors,n.expr[":"]=n.expr.pseudos,n.uniqueSort=n.unique=t.uniqueSort,n.text=t.getText,n.isXMLDoc=t.isXML,n.contains=t.contains;var u=function(a,b,c){var d=[],e=void 0!==c;while((a=a[b])&&9!==a.nodeType)if(1===a.nodeType){if(e&&n(a).is(c))break;d.push(a)}return d},v=function(a,b){for(var c=[];a;a=a.nextSibling)1===a.nodeType&&a!==b&&c.push(a);return c},w=n.expr.match.needsContext,x=/^<([\w-]+)\s*\/?>(?:<\/\1>|)$/,y=/^.[^:#\[\.,]*$/;function z(a,b,c){if(n.isFunction(b))return n.grep(a,function(a,d){return!!b.call(a,d,a)!==c});if(b.nodeType)return n.grep(a,function(a){return a===b!==c});if("string"==typeof b){if(y.test(b))return n.filter(b,a,c);b=n.filter(b,a)}return n.grep(a,function(a){return h.call(b,a)>-1!==c})}n.filter=function(a,b,c){var d=b[0];return c&&(a=":not("+a+")"),1===b.length&&1===d.nodeType?n.find.matchesSelector(d,a)?[d]:[]:n.find.matches(a,n.grep(b,function(a){return 1===a.nodeType}))},n.fn.extend({find:function(a){var b,c=this.length,d=[],e=this;if("string"!=typeof a)return this.pushStack(n(a).filter(function(){for(b=0;c>b;b++)if(n.contains(e[b],this))return!0}));for(b=0;c>b;b++)n.find(a,e[b],d);return d=this.pushStack(c>1?n.unique(d):d),d.selector=this.selector?this.selector+" "+a:a,d},filter:function(a){return this.pushStack(z(this,a||[],!1))},not:function(a){return this.pushStack(z(this,a||[],!0))},is:function(a){return!!z(this,"string"==typeof a&&w.test(a)?n(a):a||[],!1).length}});var A,B=/^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]*))$/,C=n.fn.init=function(a,b,c){var e,f;if(!a)return this;if(c=c||A,"string"==typeof a){if(e="<"===a[0]&&">"===a[a.length-1]&&a.length>=3?[null,a,null]:B.exec(a),!e||!e[1]&&b)return!b||b.jquery?(b||c).find(a):this.constructor(b).find(a);if(e[1]){if(b=b instanceof n?b[0]:b,n.merge(this,n.parseHTML(e[1],b&&b.nodeType?b.ownerDocument||b:d,!0)),x.test(e[1])&&n.isPlainObject(b))for(e in b)n.isFunction(this[e])?this[e](b[e]):this.attr(e,b[e]);return this}return f=d.getElementById(e[2]),f&&f.parentNode&&(this.length=1,this[0]=f),this.context=d,this.selector=a,this}return a.nodeType?(this.context=this[0]=a,this.length=1,this):n.isFunction(a)?void 0!==c.ready?c.ready(a):a(n):(void 0!==a.selector&&(this.selector=a.selector,this.context=a.context),n.makeArray(a,this))};C.prototype=n.fn,A=n(d);var D=/^(?:parents|prev(?:Until|All))/,E={children:!0,contents:!0,next:!0,prev:!0};n.fn.extend({has:function(a){var b=n(a,this),c=b.length;return this.filter(function(){for(var a=0;c>a;a++)if(n.contains(this,b[a]))return!0})},closest:function(a,b){for(var c,d=0,e=this.length,f=[],g=w.test(a)||"string"!=typeof a?n(a,b||this.context):0;e>d;d++)for(c=this[d];c&&c!==b;c=c.parentNode)if(c.nodeType<11&&(g?g.index(c)>-1:1===c.nodeType&&n.find.matchesSelector(c,a))){f.push(c);break}return this.pushStack(f.length>1?n.uniqueSort(f):f)},index:function(a){return a?"string"==typeof a?h.call(n(a),this[0]):h.call(this,a.jquery?a[0]:a):this[0]&&this[0].parentNode?this.first().prevAll().length:-1},add:function(a,b){return this.pushStack(n.uniqueSort(n.merge(this.get(),n(a,b))))},addBack:function(a){return this.add(null==a?this.prevObject:this.prevObject.filter(a))}});function F(a,b){while((a=a[b])&&1!==a.nodeType);return a}n.each({parent:function(a){var b=a.parentNode;return b&&11!==b.nodeType?b:null},parents:function(a){return u(a,"parentNode")},parentsUntil:function(a,b,c){return u(a,"parentNode",c)},next:function(a){return F(a,"nextSibling")},prev:function(a){return F(a,"previousSibling")},nextAll:function(a){return u(a,"nextSibling")},prevAll:function(a){return u(a,"previousSibling")},nextUntil:function(a,b,c){return u(a,"nextSibling",c)},prevUntil:function(a,b,c){return u(a,"previousSibling",c)},siblings:function(a){return v((a.parentNode||{}).firstChild,a)},children:function(a){return v(a.firstChild)},contents:function(a){return a.contentDocument||n.merge([],a.childNodes)}},function(a,b){n.fn[a]=function(c,d){var e=n.map(this,b,c);return"Until"!==a.slice(-5)&&(d=c),d&&"string"==typeof d&&(e=n.filter(d,e)),this.length>1&&(E[a]||n.uniqueSort(e),D.test(a)&&e.reverse()),this.pushStack(e)}});var G=/\S+/g;function H(a){var b={};return n.each(a.match(G)||[],function(a,c){b[c]=!0}),b}n.Callbacks=function(a){a="string"==typeof a?H(a):n.extend({},a);var b,c,d,e,f=[],g=[],h=-1,i=function(){for(e=a.once,d=b=!0;g.length;h=-1){c=g.shift();while(++h-1)f.splice(c,1),h>=c&&h--}),this},has:function(a){return a?n.inArray(a,f)>-1:f.length>0},empty:function(){return f&&(f=[]),this},disable:function(){return e=g=[],f=c="",this},disabled:function(){return!f},lock:function(){return e=g=[],c||(f=c=""),this},locked:function(){return!!e},fireWith:function(a,c){return e||(c=c||[],c=[a,c.slice?c.slice():c],g.push(c),b||i()),this},fire:function(){return j.fireWith(this,arguments),this},fired:function(){return!!d}};return j},n.extend({Deferred:function(a){var b=[["resolve","done",n.Callbacks("once memory"),"resolved"],["reject","fail",n.Callbacks("once memory"),"rejected"],["notify","progress",n.Callbacks("memory")]],c="pending",d={state:function(){return c},always:function(){return e.done(arguments).fail(arguments),this},then:function(){var a=arguments;return n.Deferred(function(c){n.each(b,function(b,f){var g=n.isFunction(a[b])&&a[b];e[f[1]](function(){var a=g&&g.apply(this,arguments);a&&n.isFunction(a.promise)?a.promise().progress(c.notify).done(c.resolve).fail(c.reject):c[f[0]+"With"](this===d?c.promise():this,g?[a]:arguments)})}),a=null}).promise()},promise:function(a){return null!=a?n.extend(a,d):d}},e={};return d.pipe=d.then,n.each(b,function(a,f){var g=f[2],h=f[3];d[f[1]]=g.add,h&&g.add(function(){c=h},b[1^a][2].disable,b[2][2].lock),e[f[0]]=function(){return e[f[0]+"With"](this===e?d:this,arguments),this},e[f[0]+"With"]=g.fireWith}),d.promise(e),a&&a.call(e,e),e},when:function(a){var b=0,c=e.call(arguments),d=c.length,f=1!==d||a&&n.isFunction(a.promise)?d:0,g=1===f?a:n.Deferred(),h=function(a,b,c){return function(d){b[a]=this,c[a]=arguments.length>1?e.call(arguments):d,c===i?g.notifyWith(b,c):--f||g.resolveWith(b,c)}},i,j,k;if(d>1)for(i=new Array(d),j=new Array(d),k=new Array(d);d>b;b++)c[b]&&n.isFunction(c[b].promise)?c[b].promise().progress(h(b,j,i)).done(h(b,k,c)).fail(g.reject):--f;return f||g.resolveWith(k,c),g.promise()}});var I;n.fn.ready=function(a){return n.ready.promise().done(a),this},n.extend({isReady:!1,readyWait:1,holdReady:function(a){a?n.readyWait++:n.ready(!0)},ready:function(a){(a===!0?--n.readyWait:n.isReady)||(n.isReady=!0,a!==!0&&--n.readyWait>0||(I.resolveWith(d,[n]),n.fn.triggerHandler&&(n(d).triggerHandler("ready"),n(d).off("ready"))))}});function J(){d.removeEventListener("DOMContentLoaded",J),a.removeEventListener("load",J),n.ready()}n.ready.promise=function(b){return I||(I=n.Deferred(),"complete"===d.readyState||"loading"!==d.readyState&&!d.documentElement.doScroll?a.setTimeout(n.ready):(d.addEventListener("DOMContentLoaded",J),a.addEventListener("load",J))),I.promise(b)},n.ready.promise();var K=function(a,b,c,d,e,f,g){var h=0,i=a.length,j=null==c;if("object"===n.type(c)){e=!0;for(h in c)K(a,b,h,c[h],!0,f,g)}else if(void 0!==d&&(e=!0,n.isFunction(d)||(g=!0),j&&(g?(b.call(a,d),b=null):(j=b,b=function(a,b,c){return j.call(n(a),c)})),b))for(;i>h;h++)b(a[h],c,g?d:d.call(a[h],h,b(a[h],c)));return e?a:j?b.call(a):i?b(a[0],c):f},L=function(a){return 1===a.nodeType||9===a.nodeType||!+a.nodeType};function M(){this.expando=n.expando+M.uid++}M.uid=1,M.prototype={register:function(a,b){var c=b||{};return a.nodeType?a[this.expando]=c:Object.defineProperty(a,this.expando,{value:c,writable:!0,configurable:!0}),a[this.expando]},cache:function(a){if(!L(a))return{};var b=a[this.expando];return b||(b={},L(a)&&(a.nodeType?a[this.expando]=b:Object.defineProperty(a,this.expando,{value:b,configurable:!0}))),b},set:function(a,b,c){var d,e=this.cache(a);if("string"==typeof b)e[b]=c;else for(d in b)e[d]=b[d];return e},get:function(a,b){return void 0===b?this.cache(a):a[this.expando]&&a[this.expando][b]},access:function(a,b,c){var d;return void 0===b||b&&"string"==typeof b&&void 0===c?(d=this.get(a,b),void 0!==d?d:this.get(a,n.camelCase(b))):(this.set(a,b,c),void 0!==c?c:b)},remove:function(a,b){var c,d,e,f=a[this.expando];if(void 0!==f){if(void 0===b)this.register(a);else{n.isArray(b)?d=b.concat(b.map(n.camelCase)):(e=n.camelCase(b),b in f?d=[b,e]:(d=e,d=d in f?[d]:d.match(G)||[])),c=d.length;while(c--)delete f[d[c]]}(void 0===b||n.isEmptyObject(f))&&(a.nodeType?a[this.expando]=void 0:delete a[this.expando])}},hasData:function(a){var b=a[this.expando];return void 0!==b&&!n.isEmptyObject(b)}};var N=new M,O=new M,P=/^(?:\{[\w\W]*\}|\[[\w\W]*\])$/,Q=/[A-Z]/g;function R(a,b,c){var d;if(void 0===c&&1===a.nodeType)if(d="data-"+b.replace(Q,"-$&").toLowerCase(),c=a.getAttribute(d),"string"==typeof c){try{c="true"===c?!0:"false"===c?!1:"null"===c?null:+c+""===c?+c:P.test(c)?n.parseJSON(c):c; -}catch(e){}O.set(a,b,c)}else c=void 0;return c}n.extend({hasData:function(a){return O.hasData(a)||N.hasData(a)},data:function(a,b,c){return O.access(a,b,c)},removeData:function(a,b){O.remove(a,b)},_data:function(a,b,c){return N.access(a,b,c)},_removeData:function(a,b){N.remove(a,b)}}),n.fn.extend({data:function(a,b){var c,d,e,f=this[0],g=f&&f.attributes;if(void 0===a){if(this.length&&(e=O.get(f),1===f.nodeType&&!N.get(f,"hasDataAttrs"))){c=g.length;while(c--)g[c]&&(d=g[c].name,0===d.indexOf("data-")&&(d=n.camelCase(d.slice(5)),R(f,d,e[d])));N.set(f,"hasDataAttrs",!0)}return e}return"object"==typeof a?this.each(function(){O.set(this,a)}):K(this,function(b){var c,d;if(f&&void 0===b){if(c=O.get(f,a)||O.get(f,a.replace(Q,"-$&").toLowerCase()),void 0!==c)return c;if(d=n.camelCase(a),c=O.get(f,d),void 0!==c)return c;if(c=R(f,d,void 0),void 0!==c)return c}else d=n.camelCase(a),this.each(function(){var c=O.get(this,d);O.set(this,d,b),a.indexOf("-")>-1&&void 0!==c&&O.set(this,a,b)})},null,b,arguments.length>1,null,!0)},removeData:function(a){return this.each(function(){O.remove(this,a)})}}),n.extend({queue:function(a,b,c){var d;return a?(b=(b||"fx")+"queue",d=N.get(a,b),c&&(!d||n.isArray(c)?d=N.access(a,b,n.makeArray(c)):d.push(c)),d||[]):void 0},dequeue:function(a,b){b=b||"fx";var c=n.queue(a,b),d=c.length,e=c.shift(),f=n._queueHooks(a,b),g=function(){n.dequeue(a,b)};"inprogress"===e&&(e=c.shift(),d--),e&&("fx"===b&&c.unshift("inprogress"),delete f.stop,e.call(a,g,f)),!d&&f&&f.empty.fire()},_queueHooks:function(a,b){var c=b+"queueHooks";return N.get(a,c)||N.access(a,c,{empty:n.Callbacks("once memory").add(function(){N.remove(a,[b+"queue",c])})})}}),n.fn.extend({queue:function(a,b){var c=2;return"string"!=typeof a&&(b=a,a="fx",c--),arguments.length",""],thead:[1,"","
    "],col:[2,"","
    "],tr:[2,"","
    "],td:[3,"","
    "],_default:[0,"",""]};$.optgroup=$.option,$.tbody=$.tfoot=$.colgroup=$.caption=$.thead,$.th=$.td;function _(a,b){var c="undefined"!=typeof a.getElementsByTagName?a.getElementsByTagName(b||"*"):"undefined"!=typeof a.querySelectorAll?a.querySelectorAll(b||"*"):[];return void 0===b||b&&n.nodeName(a,b)?n.merge([a],c):c}function aa(a,b){for(var c=0,d=a.length;d>c;c++)N.set(a[c],"globalEval",!b||N.get(b[c],"globalEval"))}var ba=/<|&#?\w+;/;function ca(a,b,c,d,e){for(var f,g,h,i,j,k,l=b.createDocumentFragment(),m=[],o=0,p=a.length;p>o;o++)if(f=a[o],f||0===f)if("object"===n.type(f))n.merge(m,f.nodeType?[f]:f);else if(ba.test(f)){g=g||l.appendChild(b.createElement("div")),h=(Y.exec(f)||["",""])[1].toLowerCase(),i=$[h]||$._default,g.innerHTML=i[1]+n.htmlPrefilter(f)+i[2],k=i[0];while(k--)g=g.lastChild;n.merge(m,g.childNodes),g=l.firstChild,g.textContent=""}else m.push(b.createTextNode(f));l.textContent="",o=0;while(f=m[o++])if(d&&n.inArray(f,d)>-1)e&&e.push(f);else if(j=n.contains(f.ownerDocument,f),g=_(l.appendChild(f),"script"),j&&aa(g),c){k=0;while(f=g[k++])Z.test(f.type||"")&&c.push(f)}return l}!function(){var a=d.createDocumentFragment(),b=a.appendChild(d.createElement("div")),c=d.createElement("input");c.setAttribute("type","radio"),c.setAttribute("checked","checked"),c.setAttribute("name","t"),b.appendChild(c),l.checkClone=b.cloneNode(!0).cloneNode(!0).lastChild.checked,b.innerHTML="",l.noCloneChecked=!!b.cloneNode(!0).lastChild.defaultValue}();var da=/^key/,ea=/^(?:mouse|pointer|contextmenu|drag|drop)|click/,fa=/^([^.]*)(?:\.(.+)|)/;function ga(){return!0}function ha(){return!1}function ia(){try{return d.activeElement}catch(a){}}function ja(a,b,c,d,e,f){var g,h;if("object"==typeof b){"string"!=typeof c&&(d=d||c,c=void 0);for(h in b)ja(a,h,c,d,b[h],f);return a}if(null==d&&null==e?(e=c,d=c=void 0):null==e&&("string"==typeof c?(e=d,d=void 0):(e=d,d=c,c=void 0)),e===!1)e=ha;else if(!e)return a;return 1===f&&(g=e,e=function(a){return n().off(a),g.apply(this,arguments)},e.guid=g.guid||(g.guid=n.guid++)),a.each(function(){n.event.add(this,b,e,d,c)})}n.event={global:{},add:function(a,b,c,d,e){var f,g,h,i,j,k,l,m,o,p,q,r=N.get(a);if(r){c.handler&&(f=c,c=f.handler,e=f.selector),c.guid||(c.guid=n.guid++),(i=r.events)||(i=r.events={}),(g=r.handle)||(g=r.handle=function(b){return"undefined"!=typeof n&&n.event.triggered!==b.type?n.event.dispatch.apply(a,arguments):void 0}),b=(b||"").match(G)||[""],j=b.length;while(j--)h=fa.exec(b[j])||[],o=q=h[1],p=(h[2]||"").split(".").sort(),o&&(l=n.event.special[o]||{},o=(e?l.delegateType:l.bindType)||o,l=n.event.special[o]||{},k=n.extend({type:o,origType:q,data:d,handler:c,guid:c.guid,selector:e,needsContext:e&&n.expr.match.needsContext.test(e),namespace:p.join(".")},f),(m=i[o])||(m=i[o]=[],m.delegateCount=0,l.setup&&l.setup.call(a,d,p,g)!==!1||a.addEventListener&&a.addEventListener(o,g)),l.add&&(l.add.call(a,k),k.handler.guid||(k.handler.guid=c.guid)),e?m.splice(m.delegateCount++,0,k):m.push(k),n.event.global[o]=!0)}},remove:function(a,b,c,d,e){var f,g,h,i,j,k,l,m,o,p,q,r=N.hasData(a)&&N.get(a);if(r&&(i=r.events)){b=(b||"").match(G)||[""],j=b.length;while(j--)if(h=fa.exec(b[j])||[],o=q=h[1],p=(h[2]||"").split(".").sort(),o){l=n.event.special[o]||{},o=(d?l.delegateType:l.bindType)||o,m=i[o]||[],h=h[2]&&new RegExp("(^|\\.)"+p.join("\\.(?:.*\\.|)")+"(\\.|$)"),g=f=m.length;while(f--)k=m[f],!e&&q!==k.origType||c&&c.guid!==k.guid||h&&!h.test(k.namespace)||d&&d!==k.selector&&("**"!==d||!k.selector)||(m.splice(f,1),k.selector&&m.delegateCount--,l.remove&&l.remove.call(a,k));g&&!m.length&&(l.teardown&&l.teardown.call(a,p,r.handle)!==!1||n.removeEvent(a,o,r.handle),delete i[o])}else for(o in i)n.event.remove(a,o+b[j],c,d,!0);n.isEmptyObject(i)&&N.remove(a,"handle events")}},dispatch:function(a){a=n.event.fix(a);var b,c,d,f,g,h=[],i=e.call(arguments),j=(N.get(this,"events")||{})[a.type]||[],k=n.event.special[a.type]||{};if(i[0]=a,a.delegateTarget=this,!k.preDispatch||k.preDispatch.call(this,a)!==!1){h=n.event.handlers.call(this,a,j),b=0;while((f=h[b++])&&!a.isPropagationStopped()){a.currentTarget=f.elem,c=0;while((g=f.handlers[c++])&&!a.isImmediatePropagationStopped())a.rnamespace&&!a.rnamespace.test(g.namespace)||(a.handleObj=g,a.data=g.data,d=((n.event.special[g.origType]||{}).handle||g.handler).apply(f.elem,i),void 0!==d&&(a.result=d)===!1&&(a.preventDefault(),a.stopPropagation()))}return k.postDispatch&&k.postDispatch.call(this,a),a.result}},handlers:function(a,b){var c,d,e,f,g=[],h=b.delegateCount,i=a.target;if(h&&i.nodeType&&("click"!==a.type||isNaN(a.button)||a.button<1))for(;i!==this;i=i.parentNode||this)if(1===i.nodeType&&(i.disabled!==!0||"click"!==a.type)){for(d=[],c=0;h>c;c++)f=b[c],e=f.selector+" ",void 0===d[e]&&(d[e]=f.needsContext?n(e,this).index(i)>-1:n.find(e,this,null,[i]).length),d[e]&&d.push(f);d.length&&g.push({elem:i,handlers:d})}return h]*)\/>/gi,la=/\s*$/g;function pa(a,b){return n.nodeName(a,"table")&&n.nodeName(11!==b.nodeType?b:b.firstChild,"tr")?a.getElementsByTagName("tbody")[0]||a.appendChild(a.ownerDocument.createElement("tbody")):a}function qa(a){return a.type=(null!==a.getAttribute("type"))+"/"+a.type,a}function ra(a){var b=na.exec(a.type);return b?a.type=b[1]:a.removeAttribute("type"),a}function sa(a,b){var c,d,e,f,g,h,i,j;if(1===b.nodeType){if(N.hasData(a)&&(f=N.access(a),g=N.set(b,f),j=f.events)){delete g.handle,g.events={};for(e in j)for(c=0,d=j[e].length;d>c;c++)n.event.add(b,e,j[e][c])}O.hasData(a)&&(h=O.access(a),i=n.extend({},h),O.set(b,i))}}function ta(a,b){var c=b.nodeName.toLowerCase();"input"===c&&X.test(a.type)?b.checked=a.checked:"input"!==c&&"textarea"!==c||(b.defaultValue=a.defaultValue)}function ua(a,b,c,d){b=f.apply([],b);var e,g,h,i,j,k,m=0,o=a.length,p=o-1,q=b[0],r=n.isFunction(q);if(r||o>1&&"string"==typeof q&&!l.checkClone&&ma.test(q))return a.each(function(e){var f=a.eq(e);r&&(b[0]=q.call(this,e,f.html())),ua(f,b,c,d)});if(o&&(e=ca(b,a[0].ownerDocument,!1,a,d),g=e.firstChild,1===e.childNodes.length&&(e=g),g||d)){for(h=n.map(_(e,"script"),qa),i=h.length;o>m;m++)j=e,m!==p&&(j=n.clone(j,!0,!0),i&&n.merge(h,_(j,"script"))),c.call(a[m],j,m);if(i)for(k=h[h.length-1].ownerDocument,n.map(h,ra),m=0;i>m;m++)j=h[m],Z.test(j.type||"")&&!N.access(j,"globalEval")&&n.contains(k,j)&&(j.src?n._evalUrl&&n._evalUrl(j.src):n.globalEval(j.textContent.replace(oa,"")))}return a}function va(a,b,c){for(var d,e=b?n.filter(b,a):a,f=0;null!=(d=e[f]);f++)c||1!==d.nodeType||n.cleanData(_(d)),d.parentNode&&(c&&n.contains(d.ownerDocument,d)&&aa(_(d,"script")),d.parentNode.removeChild(d));return a}n.extend({htmlPrefilter:function(a){return a.replace(ka,"<$1>")},clone:function(a,b,c){var d,e,f,g,h=a.cloneNode(!0),i=n.contains(a.ownerDocument,a);if(!(l.noCloneChecked||1!==a.nodeType&&11!==a.nodeType||n.isXMLDoc(a)))for(g=_(h),f=_(a),d=0,e=f.length;e>d;d++)ta(f[d],g[d]);if(b)if(c)for(f=f||_(a),g=g||_(h),d=0,e=f.length;e>d;d++)sa(f[d],g[d]);else sa(a,h);return g=_(h,"script"),g.length>0&&aa(g,!i&&_(a,"script")),h},cleanData:function(a){for(var b,c,d,e=n.event.special,f=0;void 0!==(c=a[f]);f++)if(L(c)){if(b=c[N.expando]){if(b.events)for(d in b.events)e[d]?n.event.remove(c,d):n.removeEvent(c,d,b.handle);c[N.expando]=void 0}c[O.expando]&&(c[O.expando]=void 0)}}}),n.fn.extend({domManip:ua,detach:function(a){return va(this,a,!0)},remove:function(a){return va(this,a)},text:function(a){return K(this,function(a){return void 0===a?n.text(this):this.empty().each(function(){1!==this.nodeType&&11!==this.nodeType&&9!==this.nodeType||(this.textContent=a)})},null,a,arguments.length)},append:function(){return ua(this,arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=pa(this,a);b.appendChild(a)}})},prepend:function(){return ua(this,arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=pa(this,a);b.insertBefore(a,b.firstChild)}})},before:function(){return ua(this,arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this)})},after:function(){return ua(this,arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this.nextSibling)})},empty:function(){for(var a,b=0;null!=(a=this[b]);b++)1===a.nodeType&&(n.cleanData(_(a,!1)),a.textContent="");return this},clone:function(a,b){return a=null==a?!1:a,b=null==b?a:b,this.map(function(){return n.clone(this,a,b)})},html:function(a){return K(this,function(a){var b=this[0]||{},c=0,d=this.length;if(void 0===a&&1===b.nodeType)return b.innerHTML;if("string"==typeof a&&!la.test(a)&&!$[(Y.exec(a)||["",""])[1].toLowerCase()]){a=n.htmlPrefilter(a);try{for(;d>c;c++)b=this[c]||{},1===b.nodeType&&(n.cleanData(_(b,!1)),b.innerHTML=a);b=0}catch(e){}}b&&this.empty().append(a)},null,a,arguments.length)},replaceWith:function(){var a=[];return ua(this,arguments,function(b){var c=this.parentNode;n.inArray(this,a)<0&&(n.cleanData(_(this)),c&&c.replaceChild(b,this))},a)}}),n.each({appendTo:"append",prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(a,b){n.fn[a]=function(a){for(var c,d=[],e=n(a),f=e.length-1,h=0;f>=h;h++)c=h===f?this:this.clone(!0),n(e[h])[b](c),g.apply(d,c.get());return this.pushStack(d)}});var wa,xa={HTML:"block",BODY:"block"};function ya(a,b){var c=n(b.createElement(a)).appendTo(b.body),d=n.css(c[0],"display");return c.detach(),d}function za(a){var b=d,c=xa[a];return c||(c=ya(a,b),"none"!==c&&c||(wa=(wa||n("