summaryrefslogtreecommitdiffstats
path: root/src/fluent-bit/plugins
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-05 12:08:03 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-05 12:08:18 +0000
commit5da14042f70711ea5cf66e034699730335462f66 (patch)
tree0f6354ccac934ed87a2d555f45be4c831cf92f4a /src/fluent-bit/plugins
parentReleasing debian version 1.44.3-2. (diff)
downloadnetdata-5da14042f70711ea5cf66e034699730335462f66.tar.xz
netdata-5da14042f70711ea5cf66e034699730335462f66.zip
Merging upstream version 1.45.3+dfsg.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/fluent-bit/plugins')
-rw-r--r--src/fluent-bit/plugins/CMakeLists.txt416
-rw-r--r--src/fluent-bit/plugins/custom_calyptia/CMakeLists.txt4
-rw-r--r--src/fluent-bit/plugins/custom_calyptia/calyptia.c615
-rw-r--r--src/fluent-bit/plugins/filter_alter_size/CMakeLists.txt4
-rw-r--r--src/fluent-bit/plugins/filter_alter_size/alter_size.c212
-rw-r--r--src/fluent-bit/plugins/filter_alter_size/alter_size.h0
-rw-r--r--src/fluent-bit/plugins/filter_aws/CMakeLists.txt5
-rw-r--r--src/fluent-bit/plugins/filter_aws/aws.c1062
-rw-r--r--src/fluent-bit/plugins/filter_aws/aws.h131
-rw-r--r--src/fluent-bit/plugins/filter_checklist/CMakeLists.txt4
-rw-r--r--src/fluent-bit/plugins/filter_checklist/checklist.c656
-rw-r--r--src/fluent-bit/plugins/filter_checklist/checklist.h69
-rw-r--r--src/fluent-bit/plugins/filter_ecs/CMakeLists.txt5
-rw-r--r--src/fluent-bit/plugins/filter_ecs/ecs.c1760
-rw-r--r--src/fluent-bit/plugins/filter_ecs/ecs.h152
-rw-r--r--src/fluent-bit/plugins/filter_expect/CMakeLists.txt4
-rw-r--r--src/fluent-bit/plugins/filter_expect/expect.c614
-rw-r--r--src/fluent-bit/plugins/filter_expect/expect.h53
-rw-r--r--src/fluent-bit/plugins/filter_geoip2/.gitignore1
-rw-r--r--src/fluent-bit/plugins/filter_geoip2/CMakeLists.txt19
-rw-r--r--src/fluent-bit/plugins/filter_geoip2/geoip2.c519
-rw-r--r--src/fluent-bit/plugins/filter_geoip2/geoip2.h46
-rw-r--r--src/fluent-bit/plugins/filter_geoip2/libmaxminddb/CMakeLists.txt101
-rw-r--r--src/fluent-bit/plugins/filter_geoip2/libmaxminddb/LICENSE202
-rw-r--r--src/fluent-bit/plugins/filter_geoip2/libmaxminddb/NOTICE13
-rw-r--r--src/fluent-bit/plugins/filter_geoip2/libmaxminddb/VERSION1
-rw-r--r--src/fluent-bit/plugins/filter_geoip2/libmaxminddb/bin/CMakeLists.txt13
-rw-r--r--src/fluent-bit/plugins/filter_geoip2/libmaxminddb/bin/Makefile.am10
-rw-r--r--src/fluent-bit/plugins/filter_geoip2/libmaxminddb/bin/mmdblookup.c762
-rw-r--r--src/fluent-bit/plugins/filter_geoip2/libmaxminddb/include/maxminddb.h255
-rw-r--r--src/fluent-bit/plugins/filter_geoip2/libmaxminddb/include/maxminddb_config.h.cmake.in14
-rw-r--r--src/fluent-bit/plugins/filter_geoip2/libmaxminddb/include/maxminddb_config.h.in14
-rw-r--r--src/fluent-bit/plugins/filter_geoip2/libmaxminddb/src/Makefile.am25
-rw-r--r--src/fluent-bit/plugins/filter_geoip2/libmaxminddb/src/data-pool.c180
-rw-r--r--src/fluent-bit/plugins/filter_geoip2/libmaxminddb/src/data-pool.h52
-rw-r--r--src/fluent-bit/plugins/filter_geoip2/libmaxminddb/src/libmaxminddb.pc.in11
-rw-r--r--src/fluent-bit/plugins/filter_geoip2/libmaxminddb/src/maxminddb-compat-util.h167
-rw-r--r--src/fluent-bit/plugins/filter_geoip2/libmaxminddb/src/maxminddb.c2157
-rw-r--r--src/fluent-bit/plugins/filter_grep/CMakeLists.txt4
-rw-r--r--src/fluent-bit/plugins/filter_grep/grep.c434
-rw-r--r--src/fluent-bit/plugins/filter_grep/grep.h58
-rw-r--r--src/fluent-bit/plugins/filter_kubernetes/CMakeLists.txt14
-rw-r--r--src/fluent-bit/plugins/filter_kubernetes/kube_conf.c232
-rw-r--r--src/fluent-bit/plugins/filter_kubernetes/kube_conf.h174
-rw-r--r--src/fluent-bit/plugins/filter_kubernetes/kube_meta.c1650
-rw-r--r--src/fluent-bit/plugins/filter_kubernetes/kube_meta.h69
-rw-r--r--src/fluent-bit/plugins/filter_kubernetes/kube_property.c360
-rw-r--r--src/fluent-bit/plugins/filter_kubernetes/kube_property.h40
-rw-r--r--src/fluent-bit/plugins/filter_kubernetes/kube_props.h44
-rw-r--r--src/fluent-bit/plugins/filter_kubernetes/kube_regex.c43
-rw-r--r--src/fluent-bit/plugins/filter_kubernetes/kube_regex.h31
-rw-r--r--src/fluent-bit/plugins/filter_kubernetes/kubernetes.c1000
-rw-r--r--src/fluent-bit/plugins/filter_log_to_metrics/CMakeLists.txt8
-rw-r--r--src/fluent-bit/plugins/filter_log_to_metrics/log_to_metrics.c965
-rw-r--r--src/fluent-bit/plugins/filter_log_to_metrics/log_to_metrics.h85
-rw-r--r--src/fluent-bit/plugins/filter_lua/CMakeLists.txt13
-rw-r--r--src/fluent-bit/plugins/filter_lua/lua.c713
-rw-r--r--src/fluent-bit/plugins/filter_lua/lua_config.c206
-rw-r--r--src/fluent-bit/plugins/filter_lua/lua_config.h49
-rw-r--r--src/fluent-bit/plugins/filter_modify/CMakeLists.txt4
-rw-r--r--src/fluent-bit/plugins/filter_modify/modify.c1659
-rw-r--r--src/fluent-bit/plugins/filter_modify/modify.h96
-rw-r--r--src/fluent-bit/plugins/filter_multiline/CMakeLists.txt5
-rw-r--r--src/fluent-bit/plugins/filter_multiline/ml.c931
-rw-r--r--src/fluent-bit/plugins/filter_multiline/ml.h87
-rw-r--r--src/fluent-bit/plugins/filter_multiline/ml_concat.c473
-rw-r--r--src/fluent-bit/plugins/filter_multiline/ml_concat.h84
-rw-r--r--src/fluent-bit/plugins/filter_nest/CMakeLists.txt4
-rw-r--r--src/fluent-bit/plugins/filter_nest/nest.c761
-rw-r--r--src/fluent-bit/plugins/filter_nest/nest.h55
-rw-r--r--src/fluent-bit/plugins/filter_nightfall/CMakeLists.txt6
-rw-r--r--src/fluent-bit/plugins/filter_nightfall/nightfall.c654
-rw-r--r--src/fluent-bit/plugins/filter_nightfall/nightfall.h57
-rw-r--r--src/fluent-bit/plugins/filter_nightfall/nightfall_api.c536
-rw-r--r--src/fluent-bit/plugins/filter_nightfall/nightfall_api.h31
-rw-r--r--src/fluent-bit/plugins/filter_parser/CMakeLists.txt4
-rw-r--r--src/fluent-bit/plugins/filter_parser/filter_parser.c452
-rw-r--r--src/fluent-bit/plugins/filter_parser/filter_parser.h42
-rw-r--r--src/fluent-bit/plugins/filter_record_modifier/CMakeLists.txt4
-rw-r--r--src/fluent-bit/plugins/filter_record_modifier/filter_modifier.c531
-rw-r--r--src/fluent-bit/plugins/filter_record_modifier/filter_modifier.h68
-rw-r--r--src/fluent-bit/plugins/filter_rewrite_tag/CMakeLists.txt4
-rw-r--r--src/fluent-bit/plugins/filter_rewrite_tag/rewrite_tag.c621
-rw-r--r--src/fluent-bit/plugins/filter_rewrite_tag/rewrite_tag.h64
-rw-r--r--src/fluent-bit/plugins/filter_stdout/CMakeLists.txt4
-rw-r--r--src/fluent-bit/plugins/filter_stdout/stdout.c107
-rw-r--r--src/fluent-bit/plugins/filter_tensorflow/CMakeLists.txt6
-rw-r--r--src/fluent-bit/plugins/filter_tensorflow/tensorflow.c540
-rw-r--r--src/fluent-bit/plugins/filter_tensorflow/tensorflow.h46
-rw-r--r--src/fluent-bit/plugins/filter_throttle/CMakeLists.txt6
-rw-r--r--src/fluent-bit/plugins/filter_throttle/throttle.c337
-rw-r--r--src/fluent-bit/plugins/filter_throttle/throttle.h56
-rw-r--r--src/fluent-bit/plugins/filter_throttle/window.c97
-rw-r--r--src/fluent-bit/plugins/filter_throttle/window.h37
-rw-r--r--src/fluent-bit/plugins/filter_throttle_size/CMakeLists.txt3
-rw-r--r--src/fluent-bit/plugins/filter_throttle_size/size_window.c226
-rw-r--r--src/fluent-bit/plugins/filter_throttle_size/size_window.h140
-rw-r--r--src/fluent-bit/plugins/filter_throttle_size/throttle_size.c774
-rw-r--r--src/fluent-bit/plugins/filter_throttle_size/throttle_size.h60
-rw-r--r--src/fluent-bit/plugins/filter_type_converter/CMakeLists.txt5
-rw-r--r--src/fluent-bit/plugins/filter_type_converter/type_converter.c399
-rw-r--r--src/fluent-bit/plugins/filter_type_converter/type_converter.h46
-rw-r--r--src/fluent-bit/plugins/filter_wasm/CMakeLists.txt11
-rw-r--r--src/fluent-bit/plugins/filter_wasm/filter_wasm.c318
-rw-r--r--src/fluent-bit/plugins/filter_wasm/filter_wasm.h41
-rw-r--r--src/fluent-bit/plugins/in_calyptia_fleet/CMakeLists.txt4
-rw-r--r--src/fluent-bit/plugins/in_calyptia_fleet/in_calyptia_fleet.c1269
-rw-r--r--src/fluent-bit/plugins/in_collectd/CMakeLists.txt7
-rw-r--r--src/fluent-bit/plugins/in_collectd/in_collectd.c226
-rw-r--r--src/fluent-bit/plugins/in_collectd/in_collectd.h46
-rw-r--r--src/fluent-bit/plugins/in_collectd/netprot.c308
-rw-r--r--src/fluent-bit/plugins/in_collectd/netprot.h22
-rw-r--r--src/fluent-bit/plugins/in_collectd/typesdb.c223
-rw-r--r--src/fluent-bit/plugins/in_collectd/typesdb.h45
-rw-r--r--src/fluent-bit/plugins/in_collectd/typesdb_parser.c214
-rw-r--r--src/fluent-bit/plugins/in_collectd/typesdb_parser.h20
-rw-r--r--src/fluent-bit/plugins/in_cpu/CMakeLists.txt4
-rw-r--r--src/fluent-bit/plugins/in_cpu/cpu.c672
-rw-r--r--src/fluent-bit/plugins/in_cpu/cpu.h129
-rw-r--r--src/fluent-bit/plugins/in_disk/CMakeLists.txt4
-rw-r--r--src/fluent-bit/plugins/in_disk/in_disk.c387
-rw-r--r--src/fluent-bit/plugins/in_disk/in_disk.h48
-rw-r--r--src/fluent-bit/plugins/in_docker/CMakeLists.txt6
-rw-r--r--src/fluent-bit/plugins/in_docker/cgroup_v1.c397
-rw-r--r--src/fluent-bit/plugins/in_docker/docker.c560
-rw-r--r--src/fluent-bit/plugins/in_docker/docker.h94
-rw-r--r--src/fluent-bit/plugins/in_docker_events/CMakeLists.txt5
-rw-r--r--src/fluent-bit/plugins/in_docker_events/docker_events.c476
-rw-r--r--src/fluent-bit/plugins/in_docker_events/docker_events.h56
-rw-r--r--src/fluent-bit/plugins/in_docker_events/docker_events_config.c106
-rw-r--r--src/fluent-bit/plugins/in_docker_events/docker_events_config.h29
-rw-r--r--src/fluent-bit/plugins/in_dummy/CMakeLists.txt4
-rw-r--r--src/fluent-bit/plugins/in_dummy/in_dummy.c438
-rw-r--r--src/fluent-bit/plugins/in_dummy/in_dummy.h58
-rw-r--r--src/fluent-bit/plugins/in_elasticsearch/CMakeLists.txt12
-rw-r--r--src/fluent-bit/plugins/in_elasticsearch/in_elasticsearch.c245
-rw-r--r--src/fluent-bit/plugins/in_elasticsearch/in_elasticsearch.h59
-rw-r--r--src/fluent-bit/plugins/in_elasticsearch/in_elasticsearch_bulk_conn.c307
-rw-r--r--src/fluent-bit/plugins/in_elasticsearch/in_elasticsearch_bulk_conn.h55
-rw-r--r--src/fluent-bit/plugins/in_elasticsearch/in_elasticsearch_bulk_prot.c922
-rw-r--r--src/fluent-bit/plugins/in_elasticsearch/in_elasticsearch_bulk_prot.h40
-rw-r--r--src/fluent-bit/plugins/in_elasticsearch/in_elasticsearch_config.c105
-rw-r--r--src/fluent-bit/plugins/in_elasticsearch/in_elasticsearch_config.h29
-rw-r--r--src/fluent-bit/plugins/in_emitter/CMakeLists.txt5
-rw-r--r--src/fluent-bit/plugins/in_emitter/emitter.c321
-rw-r--r--src/fluent-bit/plugins/in_event_test/CMakeLists.txt4
-rw-r--r--src/fluent-bit/plugins/in_event_test/event_test.c407
-rw-r--r--src/fluent-bit/plugins/in_event_type/CMakeLists.txt4
-rw-r--r--src/fluent-bit/plugins/in_event_type/event_type.c482
-rw-r--r--src/fluent-bit/plugins/in_exec/CMakeLists.txt4
-rw-r--r--src/fluent-bit/plugins/in_exec/in_exec.c491
-rw-r--r--src/fluent-bit/plugins/in_exec/in_exec.h52
-rw-r--r--src/fluent-bit/plugins/in_exec/in_exec_win32_compat.h94
-rw-r--r--src/fluent-bit/plugins/in_exec_wasi/CMakeLists.txt11
-rw-r--r--src/fluent-bit/plugins/in_exec_wasi/in_exec_wasi.c451
-rw-r--r--src/fluent-bit/plugins/in_exec_wasi/in_exec_wasi.h55
-rw-r--r--src/fluent-bit/plugins/in_fluentbit_metrics/CMakeLists.txt5
-rw-r--r--src/fluent-bit/plugins/in_fluentbit_metrics/metrics.c201
-rw-r--r--src/fluent-bit/plugins/in_forward/CMakeLists.txt7
-rw-r--r--src/fluent-bit/plugins/in_forward/fw.c325
-rw-r--r--src/fluent-bit/plugins/in_forward/fw.h52
-rw-r--r--src/fluent-bit/plugins/in_forward/fw_config.c120
-rw-r--r--src/fluent-bit/plugins/in_forward/fw_config.h28
-rw-r--r--src/fluent-bit/plugins/in_forward/fw_conn.c199
-rw-r--r--src/fluent-bit/plugins/in_forward/fw_conn.h57
-rw-r--r--src/fluent-bit/plugins/in_forward/fw_prot.c846
-rw-r--r--src/fluent-bit/plugins/in_forward/fw_prot.h28
-rw-r--r--src/fluent-bit/plugins/in_head/CMakeLists.txt4
-rw-r--r--src/fluent-bit/plugins/in_head/in_head.c473
-rw-r--r--src/fluent-bit/plugins/in_head/in_head.h59
-rw-r--r--src/fluent-bit/plugins/in_health/CMakeLists.txt4
-rw-r--r--src/fluent-bit/plugins/in_health/health.c293
-rw-r--r--src/fluent-bit/plugins/in_http/CMakeLists.txt12
-rw-r--r--src/fluent-bit/plugins/in_http/http.c204
-rw-r--r--src/fluent-bit/plugins/in_http/http.h58
-rw-r--r--src/fluent-bit/plugins/in_http/http_config.c157
-rw-r--r--src/fluent-bit/plugins/in_http/http_config.h29
-rw-r--r--src/fluent-bit/plugins/in_http/http_conn.c306
-rw-r--r--src/fluent-bit/plugins/in_http/http_conn.h54
-rw-r--r--src/fluent-bit/plugins/in_http/http_prot.c665
-rw-r--r--src/fluent-bit/plugins/in_http/http_prot.h31
-rw-r--r--src/fluent-bit/plugins/in_kafka/CMakeLists.txt6
-rw-r--r--src/fluent-bit/plugins/in_kafka/in_kafka.c382
-rw-r--r--src/fluent-bit/plugins/in_kafka/in_kafka.h48
-rw-r--r--src/fluent-bit/plugins/in_kmsg/CMakeLists.txt4
-rw-r--r--src/fluent-bit/plugins/in_kmsg/in_kmsg.c390
-rw-r--r--src/fluent-bit/plugins/in_kmsg/in_kmsg.h68
-rw-r--r--src/fluent-bit/plugins/in_kubernetes_events/CMakeLists.txt5
-rw-r--r--src/fluent-bit/plugins/in_kubernetes_events/kubernetes_events.c921
-rw-r--r--src/fluent-bit/plugins/in_kubernetes_events/kubernetes_events.h106
-rw-r--r--src/fluent-bit/plugins/in_kubernetes_events/kubernetes_events_conf.c326
-rw-r--r--src/fluent-bit/plugins/in_kubernetes_events/kubernetes_events_conf.h47
-rw-r--r--src/fluent-bit/plugins/in_kubernetes_events/kubernetes_events_sql.h60
-rw-r--r--src/fluent-bit/plugins/in_lib/CMakeLists.txt10
-rw-r--r--src/fluent-bit/plugins/in_lib/in_lib.c279
-rw-r--r--src/fluent-bit/plugins/in_lib/in_lib.h45
-rw-r--r--src/fluent-bit/plugins/in_mem/CMakeLists.txt5
-rw-r--r--src/fluent-bit/plugins/in_mem/mem.c320
-rw-r--r--src/fluent-bit/plugins/in_mem/mem.h51
-rw-r--r--src/fluent-bit/plugins/in_mem/proc.c185
-rw-r--r--src/fluent-bit/plugins/in_mem/proc.h68
-rw-r--r--src/fluent-bit/plugins/in_mqtt/CMakeLists.txt7
-rw-r--r--src/fluent-bit/plugins/in_mqtt/mqtt.c162
-rw-r--r--src/fluent-bit/plugins/in_mqtt/mqtt.h45
-rw-r--r--src/fluent-bit/plugins/in_mqtt/mqtt_config.c82
-rw-r--r--src/fluent-bit/plugins/in_mqtt/mqtt_config.h29
-rw-r--r--src/fluent-bit/plugins/in_mqtt/mqtt_conn.c157
-rw-r--r--src/fluent-bit/plugins/in_mqtt/mqtt_conn.h49
-rw-r--r--src/fluent-bit/plugins/in_mqtt/mqtt_prot.c465
-rw-r--r--src/fluent-bit/plugins/in_mqtt/mqtt_prot.h62
-rw-r--r--src/fluent-bit/plugins/in_netif/CMakeLists.txt4
-rw-r--r--src/fluent-bit/plugins/in_netif/in_netif.c392
-rw-r--r--src/fluent-bit/plugins/in_netif/in_netif.h70
-rw-r--r--src/fluent-bit/plugins/in_nginx_exporter_metrics/CMakeLists.txt4
-rw-r--r--src/fluent-bit/plugins/in_nginx_exporter_metrics/nginx.c2363
-rw-r--r--src/fluent-bit/plugins/in_nginx_exporter_metrics/nginx.h150
-rw-r--r--src/fluent-bit/plugins/in_node_exporter_metrics/CMakeLists.txt26
-rw-r--r--src/fluent-bit/plugins/in_node_exporter_metrics/ne.c1107
-rw-r--r--src/fluent-bit/plugins/in_node_exporter_metrics/ne.h191
-rw-r--r--src/fluent-bit/plugins/in_node_exporter_metrics/ne_config.c69
-rw-r--r--src/fluent-bit/plugins/in_node_exporter_metrics/ne_config.h31
-rw-r--r--src/fluent-bit/plugins/in_node_exporter_metrics/ne_cpu.c23
-rw-r--r--src/fluent-bit/plugins/in_node_exporter_metrics/ne_cpu.h28
-rw-r--r--src/fluent-bit/plugins/in_node_exporter_metrics/ne_cpu_linux.c396
-rw-r--r--src/fluent-bit/plugins/in_node_exporter_metrics/ne_cpufreq.h28
-rw-r--r--src/fluent-bit/plugins/in_node_exporter_metrics/ne_cpufreq_linux.c196
-rw-r--r--src/fluent-bit/plugins/in_node_exporter_metrics/ne_diskstats.c22
-rw-r--r--src/fluent-bit/plugins/in_node_exporter_metrics/ne_diskstats.h29
-rw-r--r--src/fluent-bit/plugins/in_node_exporter_metrics/ne_diskstats_linux.c449
-rw-r--r--src/fluent-bit/plugins/in_node_exporter_metrics/ne_filefd_linux.c115
-rw-r--r--src/fluent-bit/plugins/in_node_exporter_metrics/ne_filefd_linux.h28
-rw-r--r--src/fluent-bit/plugins/in_node_exporter_metrics/ne_filesystem.c39
-rw-r--r--src/fluent-bit/plugins/in_node_exporter_metrics/ne_filesystem.h29
-rw-r--r--src/fluent-bit/plugins/in_node_exporter_metrics/ne_filesystem_linux.c404
-rw-r--r--src/fluent-bit/plugins/in_node_exporter_metrics/ne_loadavg.c22
-rw-r--r--src/fluent-bit/plugins/in_node_exporter_metrics/ne_loadavg.h29
-rw-r--r--src/fluent-bit/plugins/in_node_exporter_metrics/ne_loadavg_linux.c126
-rw-r--r--src/fluent-bit/plugins/in_node_exporter_metrics/ne_meminfo.c23
-rw-r--r--src/fluent-bit/plugins/in_node_exporter_metrics/ne_meminfo.h29
-rw-r--r--src/fluent-bit/plugins/in_node_exporter_metrics/ne_meminfo_linux.c283
-rw-r--r--src/fluent-bit/plugins/in_node_exporter_metrics/ne_netdev.c22
-rw-r--r--src/fluent-bit/plugins/in_node_exporter_metrics/ne_netdev.h29
-rw-r--r--src/fluent-bit/plugins/in_node_exporter_metrics/ne_netdev_linux.c363
-rw-r--r--src/fluent-bit/plugins/in_node_exporter_metrics/ne_stat_linux.c152
-rw-r--r--src/fluent-bit/plugins/in_node_exporter_metrics/ne_stat_linux.h28
-rw-r--r--src/fluent-bit/plugins/in_node_exporter_metrics/ne_systemd.c807
-rw-r--r--src/fluent-bit/plugins/in_node_exporter_metrics/ne_systemd.h127
-rw-r--r--src/fluent-bit/plugins/in_node_exporter_metrics/ne_textfile.c22
-rw-r--r--src/fluent-bit/plugins/in_node_exporter_metrics/ne_textfile.h28
-rw-r--r--src/fluent-bit/plugins/in_node_exporter_metrics/ne_textfile_linux.c204
-rw-r--r--src/fluent-bit/plugins/in_node_exporter_metrics/ne_time.c59
-rw-r--r--src/fluent-bit/plugins/in_node_exporter_metrics/ne_time.h28
-rw-r--r--src/fluent-bit/plugins/in_node_exporter_metrics/ne_uname.c22
-rw-r--r--src/fluent-bit/plugins/in_node_exporter_metrics/ne_uname.h28
-rw-r--r--src/fluent-bit/plugins/in_node_exporter_metrics/ne_uname_linux.c84
-rw-r--r--src/fluent-bit/plugins/in_node_exporter_metrics/ne_utils.c256
-rw-r--r--src/fluent-bit/plugins/in_node_exporter_metrics/ne_utils.h39
-rw-r--r--src/fluent-bit/plugins/in_node_exporter_metrics/ne_vmstat_linux.c216
-rw-r--r--src/fluent-bit/plugins/in_node_exporter_metrics/ne_vmstat_linux.h29
-rw-r--r--src/fluent-bit/plugins/in_opentelemetry/CMakeLists.txt12
-rw-r--r--src/fluent-bit/plugins/in_opentelemetry/http_conn.c301
-rw-r--r--src/fluent-bit/plugins/in_opentelemetry/http_conn.h57
-rw-r--r--src/fluent-bit/plugins/in_opentelemetry/opentelemetry.c200
-rw-r--r--src/fluent-bit/plugins/in_opentelemetry/opentelemetry.h51
-rw-r--r--src/fluent-bit/plugins/in_opentelemetry/opentelemetry_config.c92
-rw-r--r--src/fluent-bit/plugins/in_opentelemetry/opentelemetry_config.h29
-rw-r--r--src/fluent-bit/plugins/in_opentelemetry/opentelemetry_prot.c1674
-rw-r--r--src/fluent-bit/plugins/in_opentelemetry/opentelemetry_prot.h31
-rw-r--r--src/fluent-bit/plugins/in_podman_metrics/CMakeLists.txt6
-rw-r--r--src/fluent-bit/plugins/in_podman_metrics/podman_metrics.c515
-rw-r--r--src/fluent-bit/plugins/in_podman_metrics/podman_metrics.h98
-rw-r--r--src/fluent-bit/plugins/in_podman_metrics/podman_metrics_config.h211
-rw-r--r--src/fluent-bit/plugins/in_podman_metrics/podman_metrics_data.c407
-rw-r--r--src/fluent-bit/plugins/in_podman_metrics/podman_metrics_data.h51
-rw-r--r--src/fluent-bit/plugins/in_proc/CMakeLists.txt4
-rw-r--r--src/fluent-bit/plugins/in_proc/in_proc.c534
-rw-r--r--src/fluent-bit/plugins/in_proc/in_proc.h78
-rw-r--r--src/fluent-bit/plugins/in_prometheus_scrape/CMakeLists.txt4
-rw-r--r--src/fluent-bit/plugins/in_prometheus_scrape/prom_scrape.c261
-rw-r--r--src/fluent-bit/plugins/in_prometheus_scrape/prom_scrape.h45
-rw-r--r--src/fluent-bit/plugins/in_random/CMakeLists.txt4
-rw-r--r--src/fluent-bit/plugins/in_random/random.c245
-rw-r--r--src/fluent-bit/plugins/in_serial/CMakeLists.txt4
-rw-r--r--src/fluent-bit/plugins/in_serial/in_serial.c443
-rw-r--r--src/fluent-bit/plugins/in_serial/in_serial.h63
-rw-r--r--src/fluent-bit/plugins/in_serial/in_serial_config.c82
-rw-r--r--src/fluent-bit/plugins/in_serial/in_serial_config.h77
-rw-r--r--src/fluent-bit/plugins/in_splunk/CMakeLists.txt12
-rw-r--r--src/fluent-bit/plugins/in_splunk/splunk.c213
-rw-r--r--src/fluent-bit/plugins/in_splunk/splunk.h60
-rw-r--r--src/fluent-bit/plugins/in_splunk/splunk_config.c184
-rw-r--r--src/fluent-bit/plugins/in_splunk/splunk_config.h29
-rw-r--r--src/fluent-bit/plugins/in_splunk/splunk_conn.c306
-rw-r--r--src/fluent-bit/plugins/in_splunk/splunk_conn.h54
-rw-r--r--src/fluent-bit/plugins/in_splunk/splunk_prot.c779
-rw-r--r--src/fluent-bit/plugins/in_splunk/splunk_prot.h36
-rw-r--r--src/fluent-bit/plugins/in_statsd/CMakeLists.txt4
-rw-r--r--src/fluent-bit/plugins/in_statsd/statsd.c386
-rw-r--r--src/fluent-bit/plugins/in_stdin/CMakeLists.txt10
-rw-r--r--src/fluent-bit/plugins/in_stdin/in_stdin.c472
-rw-r--r--src/fluent-bit/plugins/in_stdin/in_stdin.h48
-rw-r--r--src/fluent-bit/plugins/in_storage_backlog/CMakeLists.txt5
-rw-r--r--src/fluent-bit/plugins/in_storage_backlog/sb.c713
-rw-r--r--src/fluent-bit/plugins/in_stream_processor/CMakeLists.txt5
-rw-r--r--src/fluent-bit/plugins/in_stream_processor/sp.c173
-rw-r--r--src/fluent-bit/plugins/in_syslog/CMakeLists.txt8
-rw-r--r--src/fluent-bit/plugins/in_syslog/syslog.c263
-rw-r--r--src/fluent-bit/plugins/in_syslog/syslog.h82
-rw-r--r--src/fluent-bit/plugins/in_syslog/syslog_conf.c193
-rw-r--r--src/fluent-bit/plugins/in_syslog/syslog_conf.h32
-rw-r--r--src/fluent-bit/plugins/in_syslog/syslog_conn.c247
-rw-r--r--src/fluent-bit/plugins/in_syslog/syslog_conn.h53
-rw-r--r--src/fluent-bit/plugins/in_syslog/syslog_prot.c324
-rw-r--r--src/fluent-bit/plugins/in_syslog/syslog_prot.h35
-rw-r--r--src/fluent-bit/plugins/in_syslog/syslog_server.c235
-rw-r--r--src/fluent-bit/plugins/in_syslog/syslog_server.h31
-rw-r--r--src/fluent-bit/plugins/in_systemd/CMakeLists.txt11
-rw-r--r--src/fluent-bit/plugins/in_systemd/systemd.c555
-rw-r--r--src/fluent-bit/plugins/in_systemd/systemd_config.c314
-rw-r--r--src/fluent-bit/plugins/in_systemd/systemd_config.h82
-rw-r--r--src/fluent-bit/plugins/in_systemd/systemd_db.c197
-rw-r--r--src/fluent-bit/plugins/in_systemd/systemd_db.h64
-rw-r--r--src/fluent-bit/plugins/in_tail/CMakeLists.txt37
-rw-r--r--src/fluent-bit/plugins/in_tail/tail.c783
-rw-r--r--src/fluent-bit/plugins/in_tail/tail.h45
-rw-r--r--src/fluent-bit/plugins/in_tail/tail_config.c472
-rw-r--r--src/fluent-bit/plugins/in_tail/tail_config.h168
-rw-r--r--src/fluent-bit/plugins/in_tail/tail_db.c277
-rw-r--r--src/fluent-bit/plugins/in_tail/tail_db.h43
-rw-r--r--src/fluent-bit/plugins/in_tail/tail_dockermode.c459
-rw-r--r--src/fluent-bit/plugins/in_tail/tail_dockermode.h38
-rw-r--r--src/fluent-bit/plugins/in_tail/tail_file.c1860
-rw-r--r--src/fluent-bit/plugins/in_tail/tail_file.h137
-rw-r--r--src/fluent-bit/plugins/in_tail/tail_file_internal.h130
-rw-r--r--src/fluent-bit/plugins/in_tail/tail_fs.h96
-rw-r--r--src/fluent-bit/plugins/in_tail/tail_fs_inotify.c433
-rw-r--r--src/fluent-bit/plugins/in_tail/tail_fs_inotify.h37
-rw-r--r--src/fluent-bit/plugins/in_tail/tail_fs_stat.c253
-rw-r--r--src/fluent-bit/plugins/in_tail/tail_fs_stat.h37
-rw-r--r--src/fluent-bit/plugins/in_tail/tail_multiline.c606
-rw-r--r--src/fluent-bit/plugins/in_tail/tail_multiline.h57
-rw-r--r--src/fluent-bit/plugins/in_tail/tail_scan.c71
-rw-r--r--src/fluent-bit/plugins/in_tail/tail_scan.h29
-rw-r--r--src/fluent-bit/plugins/in_tail/tail_scan_glob.c278
-rw-r--r--src/fluent-bit/plugins/in_tail/tail_scan_win32.c245
-rw-r--r--src/fluent-bit/plugins/in_tail/tail_signal.h98
-rw-r--r--src/fluent-bit/plugins/in_tail/tail_sql.h65
-rw-r--r--src/fluent-bit/plugins/in_tail/win32.h67
-rw-r--r--src/fluent-bit/plugins/in_tail/win32/interface.h44
-rw-r--r--src/fluent-bit/plugins/in_tail/win32/io.c47
-rw-r--r--src/fluent-bit/plugins/in_tail/win32/stat.c332
-rw-r--r--src/fluent-bit/plugins/in_tcp/CMakeLists.txt6
-rw-r--r--src/fluent-bit/plugins/in_tcp/tcp.c184
-rw-r--r--src/fluent-bit/plugins/in_tcp/tcp.h50
-rw-r--r--src/fluent-bit/plugins/in_tcp/tcp_config.c155
-rw-r--r--src/fluent-bit/plugins/in_tcp/tcp_config.h28
-rw-r--r--src/fluent-bit/plugins/in_tcp/tcp_conn.c412
-rw-r--r--src/fluent-bit/plugins/in_tcp/tcp_conn.h59
-rw-r--r--src/fluent-bit/plugins/in_thermal/CMakeLists.txt4
-rw-r--r--src/fluent-bit/plugins/in_thermal/in_thermal.c372
-rw-r--r--src/fluent-bit/plugins/in_thermal/in_thermal.h55
-rw-r--r--src/fluent-bit/plugins/in_udp/CMakeLists.txt6
-rw-r--r--src/fluent-bit/plugins/in_udp/udp.c197
-rw-r--r--src/fluent-bit/plugins/in_udp/udp.h54
-rw-r--r--src/fluent-bit/plugins/in_udp/udp_config.c155
-rw-r--r--src/fluent-bit/plugins/in_udp/udp_config.h28
-rw-r--r--src/fluent-bit/plugins/in_udp/udp_conn.c500
-rw-r--r--src/fluent-bit/plugins/in_udp/udp_conn.h57
-rw-r--r--src/fluent-bit/plugins/in_unix_socket/CMakeLists.txt6
-rw-r--r--src/fluent-bit/plugins/in_unix_socket/unix_socket.c320
-rw-r--r--src/fluent-bit/plugins/in_unix_socket/unix_socket.h55
-rw-r--r--src/fluent-bit/plugins/in_unix_socket/unix_socket_config.c153
-rw-r--r--src/fluent-bit/plugins/in_unix_socket/unix_socket_config.h28
-rw-r--r--src/fluent-bit/plugins/in_unix_socket/unix_socket_conn.c433
-rw-r--r--src/fluent-bit/plugins/in_unix_socket/unix_socket_conn.h60
-rw-r--r--src/fluent-bit/plugins/in_windows_exporter_metrics/CMakeLists.txt28
-rw-r--r--src/fluent-bit/plugins/in_windows_exporter_metrics/we.c1144
-rw-r--r--src/fluent-bit/plugins/in_windows_exporter_metrics/we.h332
-rw-r--r--src/fluent-bit/plugins/in_windows_exporter_metrics/we_config.c154
-rw-r--r--src/fluent-bit/plugins/in_windows_exporter_metrics/we_config.h32
-rw-r--r--src/fluent-bit/plugins/in_windows_exporter_metrics/we_cpu.c304
-rw-r--r--src/fluent-bit/plugins/in_windows_exporter_metrics/we_cpu.h30
-rw-r--r--src/fluent-bit/plugins/in_windows_exporter_metrics/we_cs.c112
-rw-r--r--src/fluent-bit/plugins/in_windows_exporter_metrics/we_cs.h29
-rw-r--r--src/fluent-bit/plugins/in_windows_exporter_metrics/we_logical_disk.c272
-rw-r--r--src/fluent-bit/plugins/in_windows_exporter_metrics/we_logical_disk.h29
-rw-r--r--src/fluent-bit/plugins/in_windows_exporter_metrics/we_metric.c368
-rw-r--r--src/fluent-bit/plugins/in_windows_exporter_metrics/we_metric.h98
-rw-r--r--src/fluent-bit/plugins/in_windows_exporter_metrics/we_net.c253
-rw-r--r--src/fluent-bit/plugins/in_windows_exporter_metrics/we_net.h29
-rw-r--r--src/fluent-bit/plugins/in_windows_exporter_metrics/we_os.c268
-rw-r--r--src/fluent-bit/plugins/in_windows_exporter_metrics/we_os.h32
-rw-r--r--src/fluent-bit/plugins/in_windows_exporter_metrics/we_perflib.c1048
-rw-r--r--src/fluent-bit/plugins/in_windows_exporter_metrics/we_perflib.h72
-rw-r--r--src/fluent-bit/plugins/in_windows_exporter_metrics/we_util.c167
-rw-r--r--src/fluent-bit/plugins/in_windows_exporter_metrics/we_util.h37
-rw-r--r--src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi.c572
-rw-r--r--src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi.h59
-rw-r--r--src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi_cpu_info.c116
-rw-r--r--src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi_cpu_info.h29
-rw-r--r--src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi_logon.c198
-rw-r--r--src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi_logon.h29
-rw-r--r--src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi_memory.c557
-rw-r--r--src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi_memory.h29
-rw-r--r--src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi_paging_file.c156
-rw-r--r--src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi_paging_file.h29
-rw-r--r--src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi_process.c417
-rw-r--r--src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi_process.h29
-rw-r--r--src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi_service.c493
-rw-r--r--src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi_service.h29
-rw-r--r--src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi_system.c190
-rw-r--r--src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi_system.h29
-rw-r--r--src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi_thermalzone.c171
-rw-r--r--src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi_thermalzone.h29
-rw-r--r--src/fluent-bit/plugins/in_winevtlog/CMakeLists.txt6
-rw-r--r--src/fluent-bit/plugins/in_winevtlog/in_winevtlog.c279
-rw-r--r--src/fluent-bit/plugins/in_winevtlog/pack.c625
-rw-r--r--src/fluent-bit/plugins/in_winevtlog/winevtlog.c840
-rw-r--r--src/fluent-bit/plugins/in_winevtlog/winevtlog.h134
-rw-r--r--src/fluent-bit/plugins/in_winlog/CMakeLists.txt6
-rw-r--r--src/fluent-bit/plugins/in_winlog/in_winlog.c267
-rw-r--r--src/fluent-bit/plugins/in_winlog/pack.c451
-rw-r--r--src/fluent-bit/plugins/in_winlog/winlog.c300
-rw-r--r--src/fluent-bit/plugins/in_winlog/winlog.h110
-rw-r--r--src/fluent-bit/plugins/in_winstat/CMakeLists.txt4
-rw-r--r--src/fluent-bit/plugins/in_winstat/winstat.c340
-rw-r--r--src/fluent-bit/plugins/out_azure/CMakeLists.txt6
-rw-r--r--src/fluent-bit/plugins/out_azure/azure.c452
-rw-r--r--src/fluent-bit/plugins/out_azure/azure.h62
-rw-r--r--src/fluent-bit/plugins/out_azure/azure_conf.c219
-rw-r--r--src/fluent-bit/plugins/out_azure/azure_conf.h29
-rw-r--r--src/fluent-bit/plugins/out_azure_blob/CMakeLists.txt10
-rw-r--r--src/fluent-bit/plugins/out_azure_blob/azure_blob.c594
-rw-r--r--src/fluent-bit/plugins/out_azure_blob/azure_blob.h74
-rw-r--r--src/fluent-bit/plugins/out_azure_blob/azure_blob_appendblob.c44
-rw-r--r--src/fluent-bit/plugins/out_azure_blob/azure_blob_appendblob.h28
-rw-r--r--src/fluent-bit/plugins/out_azure_blob/azure_blob_blockblob.c238
-rw-r--r--src/fluent-bit/plugins/out_azure_blob/azure_blob_blockblob.h32
-rw-r--r--src/fluent-bit/plugins/out_azure_blob/azure_blob_conf.c245
-rw-r--r--src/fluent-bit/plugins/out_azure_blob/azure_blob_conf.h29
-rw-r--r--src/fluent-bit/plugins/out_azure_blob/azure_blob_http.c361
-rw-r--r--src/fluent-bit/plugins/out_azure_blob/azure_blob_http.h36
-rw-r--r--src/fluent-bit/plugins/out_azure_blob/azure_blob_uri.c150
-rw-r--r--src/fluent-bit/plugins/out_azure_blob/azure_blob_uri.h34
-rw-r--r--src/fluent-bit/plugins/out_azure_kusto/CMakeLists.txt7
-rw-r--r--src/fluent-bit/plugins/out_azure_kusto/azure_kusto.c477
-rw-r--r--src/fluent-bit/plugins/out_azure_kusto/azure_kusto.h110
-rw-r--r--src/fluent-bit/plugins/out_azure_kusto/azure_kusto_conf.c665
-rw-r--r--src/fluent-bit/plugins/out_azure_kusto/azure_kusto_conf.h31
-rw-r--r--src/fluent-bit/plugins/out_azure_kusto/azure_kusto_ingest.c496
-rw-r--r--src/fluent-bit/plugins/out_azure_kusto/azure_kusto_ingest.h28
-rw-r--r--src/fluent-bit/plugins/out_azure_logs_ingestion/CMakeLists.txt6
-rw-r--r--src/fluent-bit/plugins/out_azure_logs_ingestion/azure_logs_ingestion.c445
-rw-r--r--src/fluent-bit/plugins/out_azure_logs_ingestion/azure_logs_ingestion.h74
-rw-r--r--src/fluent-bit/plugins/out_azure_logs_ingestion/azure_logs_ingestion_conf.c172
-rw-r--r--src/fluent-bit/plugins/out_azure_logs_ingestion/azure_logs_ingestion_conf.h29
-rw-r--r--src/fluent-bit/plugins/out_bigquery/CMakeLists.txt6
-rw-r--r--src/fluent-bit/plugins/out_bigquery/bigquery.c1159
-rw-r--r--src/fluent-bit/plugins/out_bigquery/bigquery.h132
-rw-r--r--src/fluent-bit/plugins/out_bigquery/bigquery_conf.c435
-rw-r--r--src/fluent-bit/plugins/out_bigquery/bigquery_conf.h29
-rw-r--r--src/fluent-bit/plugins/out_calyptia/CMakeLists.txt4
-rw-r--r--src/fluent-bit/plugins/out_calyptia/calyptia.c1025
-rw-r--r--src/fluent-bit/plugins/out_calyptia/calyptia.h85
-rw-r--r--src/fluent-bit/plugins/out_chronicle/CMakeLists.txt6
-rw-r--r--src/fluent-bit/plugins/out_chronicle/chronicle.c962
-rw-r--r--src/fluent-bit/plugins/out_chronicle/chronicle.h96
-rw-r--r--src/fluent-bit/plugins/out_chronicle/chronicle_conf.c421
-rw-r--r--src/fluent-bit/plugins/out_chronicle/chronicle_conf.h29
-rw-r--r--src/fluent-bit/plugins/out_cloudwatch_logs/CMakeLists.txt5
-rw-r--r--src/fluent-bit/plugins/out_cloudwatch_logs/cloudwatch_api.c1564
-rw-r--r--src/fluent-bit/plugins/out_cloudwatch_logs/cloudwatch_api.h57
-rw-r--r--src/fluent-bit/plugins/out_cloudwatch_logs/cloudwatch_logs.c670
-rw-r--r--src/fluent-bit/plugins/out_cloudwatch_logs/cloudwatch_logs.h158
-rw-r--r--src/fluent-bit/plugins/out_counter/CMakeLists.txt4
-rw-r--r--src/fluent-bit/plugins/out_counter/counter.c106
-rw-r--r--src/fluent-bit/plugins/out_datadog/CMakeLists.txt6
-rw-r--r--src/fluent-bit/plugins/out_datadog/datadog.c568
-rw-r--r--src/fluent-bit/plugins/out_datadog/datadog.h81
-rw-r--r--src/fluent-bit/plugins/out_datadog/datadog_conf.c223
-rw-r--r--src/fluent-bit/plugins/out_datadog/datadog_conf.h33
-rw-r--r--src/fluent-bit/plugins/out_datadog/datadog_remap.c277
-rw-r--r--src/fluent-bit/plugins/out_datadog/datadog_remap.h37
-rw-r--r--src/fluent-bit/plugins/out_es/CMakeLists.txt8
-rw-r--r--src/fluent-bit/plugins/out_es/es.c1230
-rw-r--r--src/fluent-bit/plugins/out_es/es.h140
-rw-r--r--src/fluent-bit/plugins/out_es/es_bulk.c113
-rw-r--r--src/fluent-bit/plugins/out_es/es_bulk.h46
-rw-r--r--src/fluent-bit/plugins/out_es/es_conf.c537
-rw-r--r--src/fluent-bit/plugins/out_es/es_conf.h33
-rw-r--r--src/fluent-bit/plugins/out_es/murmur3.c314
-rw-r--r--src/fluent-bit/plugins/out_es/murmur3.h29
-rw-r--r--src/fluent-bit/plugins/out_exit/CMakeLists.txt4
-rw-r--r--src/fluent-bit/plugins/out_exit/exit.c108
-rw-r--r--src/fluent-bit/plugins/out_file/CMakeLists.txt4
-rw-r--r--src/fluent-bit/plugins/out_file/file.c705
-rw-r--r--src/fluent-bit/plugins/out_file/file.h32
-rw-r--r--src/fluent-bit/plugins/out_flowcounter/CMakeLists.txt4
-rw-r--r--src/fluent-bit/plugins/out_flowcounter/out_flowcounter.c297
-rw-r--r--src/fluent-bit/plugins/out_flowcounter/out_flowcounter.h49
-rw-r--r--src/fluent-bit/plugins/out_forward/CMakeLists.txt6
-rw-r--r--src/fluent-bit/plugins/out_forward/README.md12
-rw-r--r--src/fluent-bit/plugins/out_forward/forward.c1832
-rw-r--r--src/fluent-bit/plugins/out_forward/forward.h146
-rw-r--r--src/fluent-bit/plugins/out_forward/forward_format.c640
-rw-r--r--src/fluent-bit/plugins/out_forward/forward_format.h48
-rw-r--r--src/fluent-bit/plugins/out_gelf/CMakeLists.txt5
-rw-r--r--src/fluent-bit/plugins/out_gelf/gelf.c556
-rw-r--r--src/fluent-bit/plugins/out_gelf/gelf.h47
-rw-r--r--src/fluent-bit/plugins/out_http/CMakeLists.txt6
-rw-r--r--src/fluent-bit/plugins/out_http/http.c774
-rw-r--r--src/fluent-bit/plugins/out_http/http.h103
-rw-r--r--src/fluent-bit/plugins/out_http/http_conf.c298
-rw-r--r--src/fluent-bit/plugins/out_http/http_conf.h32
-rw-r--r--src/fluent-bit/plugins/out_influxdb/CMakeLists.txt5
-rw-r--r--src/fluent-bit/plugins/out_influxdb/influxdb.c682
-rw-r--r--src/fluent-bit/plugins/out_influxdb/influxdb.h78
-rw-r--r--src/fluent-bit/plugins/out_influxdb/influxdb_bulk.c233
-rw-r--r--src/fluent-bit/plugins/out_influxdb/influxdb_bulk.h54
-rw-r--r--src/fluent-bit/plugins/out_kafka/CMakeLists.txt8
-rw-r--r--src/fluent-bit/plugins/out_kafka/kafka.c658
-rw-r--r--src/fluent-bit/plugins/out_kafka/kafka_callbacks.h31
-rw-r--r--src/fluent-bit/plugins/out_kafka/kafka_config.c253
-rw-r--r--src/fluent-bit/plugins/out_kafka/kafka_config.h129
-rw-r--r--src/fluent-bit/plugins/out_kafka/kafka_topic.c120
-rw-r--r--src/fluent-bit/plugins/out_kafka/kafka_topic.h34
-rw-r--r--src/fluent-bit/plugins/out_kafka_rest/CMakeLists.txt5
-rw-r--r--src/fluent-bit/plugins/out_kafka_rest/kafka.c351
-rw-r--r--src/fluent-bit/plugins/out_kafka_rest/kafka.h66
-rw-r--r--src/fluent-bit/plugins/out_kafka_rest/kafka_conf.c223
-rw-r--r--src/fluent-bit/plugins/out_kafka_rest/kafka_conf.h33
-rw-r--r--src/fluent-bit/plugins/out_kinesis_firehose/CMakeLists.txt5
-rw-r--r--src/fluent-bit/plugins/out_kinesis_firehose/firehose.c503
-rw-r--r--src/fluent-bit/plugins/out_kinesis_firehose/firehose.h104
-rw-r--r--src/fluent-bit/plugins/out_kinesis_firehose/firehose_api.c959
-rw-r--r--src/fluent-bit/plugins/out_kinesis_firehose/firehose_api.h45
-rw-r--r--src/fluent-bit/plugins/out_kinesis_streams/CMakeLists.txt5
-rw-r--r--src/fluent-bit/plugins/out_kinesis_streams/kinesis.c499
-rw-r--r--src/fluent-bit/plugins/out_kinesis_streams/kinesis.h109
-rw-r--r--src/fluent-bit/plugins/out_kinesis_streams/kinesis_api.c987
-rw-r--r--src/fluent-bit/plugins/out_kinesis_streams/kinesis_api.h44
-rw-r--r--src/fluent-bit/plugins/out_lib/CMakeLists.txt5
-rw-r--r--src/fluent-bit/plugins/out_lib/out_lib.c222
-rw-r--r--src/fluent-bit/plugins/out_lib/out_lib.h42
-rw-r--r--src/fluent-bit/plugins/out_logdna/CMakeLists.txt5
-rw-r--r--src/fluent-bit/plugins/out_logdna/logdna.c591
-rw-r--r--src/fluent-bit/plugins/out_logdna/logdna.h51
-rw-r--r--src/fluent-bit/plugins/out_loki/CMakeLists.txt5
-rw-r--r--src/fluent-bit/plugins/out_loki/loki.c1868
-rw-r--r--src/fluent-bit/plugins/out_loki/loki.h98
-rw-r--r--src/fluent-bit/plugins/out_nats/CMakeLists.txt4
-rw-r--r--src/fluent-bit/plugins/out_nats/nats.c252
-rw-r--r--src/fluent-bit/plugins/out_nats/nats.h33
-rw-r--r--src/fluent-bit/plugins/out_nrlogs/CMakeLists.txt5
-rw-r--r--src/fluent-bit/plugins/out_nrlogs/newrelic.c566
-rw-r--r--src/fluent-bit/plugins/out_nrlogs/newrelic.h52
-rw-r--r--src/fluent-bit/plugins/out_null/CMakeLists.txt4
-rw-r--r--src/fluent-bit/plugins/out_null/null.c178
-rw-r--r--src/fluent-bit/plugins/out_opensearch/CMakeLists.txt6
-rw-r--r--src/fluent-bit/plugins/out_opensearch/opensearch.c1291
-rw-r--r--src/fluent-bit/plugins/out_opensearch/opensearch.h155
-rw-r--r--src/fluent-bit/plugins/out_opensearch/os_conf.c411
-rw-r--r--src/fluent-bit/plugins/out_opensearch/os_conf.h33
-rw-r--r--src/fluent-bit/plugins/out_opentelemetry/CMakeLists.txt6
-rw-r--r--src/fluent-bit/plugins/out_opentelemetry/opentelemetry.c1207
-rw-r--r--src/fluent-bit/plugins/out_opentelemetry/opentelemetry.h80
-rw-r--r--src/fluent-bit/plugins/out_opentelemetry/opentelemetry_conf.c262
-rw-r--r--src/fluent-bit/plugins/out_opentelemetry/opentelemetry_conf.h33
-rw-r--r--src/fluent-bit/plugins/out_oracle_log_analytics/CMakeLists.txt6
-rw-r--r--src/fluent-bit/plugins/out_oracle_log_analytics/oci_logan.c1313
-rw-r--r--src/fluent-bit/plugins/out_oracle_log_analytics/oci_logan.h215
-rw-r--r--src/fluent-bit/plugins/out_oracle_log_analytics/oci_logan_conf.c493
-rw-r--r--src/fluent-bit/plugins/out_oracle_log_analytics/oci_logan_conf.h34
-rw-r--r--src/fluent-bit/plugins/out_pgsql/CMakeLists.txt8
-rw-r--r--src/fluent-bit/plugins/out_pgsql/pgsql.c389
-rw-r--r--src/fluent-bit/plugins/out_pgsql/pgsql.h91
-rw-r--r--src/fluent-bit/plugins/out_pgsql/pgsql_connections.c193
-rw-r--r--src/fluent-bit/plugins/out_pgsql/pgsql_connections.h27
-rw-r--r--src/fluent-bit/plugins/out_plot/CMakeLists.txt4
-rw-r--r--src/fluent-bit/plugins/out_plot/plot.c242
-rw-r--r--src/fluent-bit/plugins/out_prometheus_exporter/CMakeLists.txt14
-rw-r--r--src/fluent-bit/plugins/out_prometheus_exporter/prom.c298
-rw-r--r--src/fluent-bit/plugins/out_prometheus_exporter/prom.h46
-rw-r--r--src/fluent-bit/plugins/out_prometheus_exporter/prom_http.c268
-rw-r--r--src/fluent-bit/plugins/out_prometheus_exporter/prom_http.h56
-rw-r--r--src/fluent-bit/plugins/out_prometheus_remote_write/CMakeLists.txt6
-rw-r--r--src/fluent-bit/plugins/out_prometheus_remote_write/remote_write.c466
-rw-r--r--src/fluent-bit/plugins/out_prometheus_remote_write/remote_write.h83
-rw-r--r--src/fluent-bit/plugins/out_prometheus_remote_write/remote_write_conf.c254
-rw-r--r--src/fluent-bit/plugins/out_prometheus_remote_write/remote_write_conf.h33
-rw-r--r--src/fluent-bit/plugins/out_retry/CMakeLists.txt4
-rw-r--r--src/fluent-bit/plugins/out_retry/retry.c116
-rw-r--r--src/fluent-bit/plugins/out_s3/CMakeLists.txt6
-rw-r--r--src/fluent-bit/plugins/out_s3/s3.c2500
-rw-r--r--src/fluent-bit/plugins/out_s3/s3.h203
-rw-r--r--src/fluent-bit/plugins/out_s3/s3_multipart.c707
-rw-r--r--src/fluent-bit/plugins/out_s3/s3_store.c543
-rw-r--r--src/fluent-bit/plugins/out_s3/s3_store.h68
-rw-r--r--src/fluent-bit/plugins/out_skywalking/CMakeLists.txt4
-rw-r--r--src/fluent-bit/plugins/out_skywalking/skywalking.c427
-rw-r--r--src/fluent-bit/plugins/out_skywalking/skywalking.h41
-rw-r--r--src/fluent-bit/plugins/out_slack/CMakeLists.txt5
-rw-r--r--src/fluent-bit/plugins/out_slack/slack.c336
-rw-r--r--src/fluent-bit/plugins/out_slack/slack.h43
-rw-r--r--src/fluent-bit/plugins/out_splunk/CMakeLists.txt6
-rw-r--r--src/fluent-bit/plugins/out_splunk/splunk.c873
-rw-r--r--src/fluent-bit/plugins/out_splunk/splunk.h119
-rw-r--r--src/fluent-bit/plugins/out_splunk/splunk_conf.c313
-rw-r--r--src/fluent-bit/plugins/out_splunk/splunk_conf.h29
-rw-r--r--src/fluent-bit/plugins/out_stackdriver/CMakeLists.txt13
-rw-r--r--src/fluent-bit/plugins/out_stackdriver/gce_metadata.c222
-rw-r--r--src/fluent-bit/plugins/out_stackdriver/gce_metadata.h48
-rw-r--r--src/fluent-bit/plugins/out_stackdriver/stackdriver.c2867
-rw-r--r--src/fluent-bit/plugins/out_stackdriver/stackdriver.h241
-rw-r--r--src/fluent-bit/plugins/out_stackdriver/stackdriver_conf.c667
-rw-r--r--src/fluent-bit/plugins/out_stackdriver/stackdriver_conf.h29
-rw-r--r--src/fluent-bit/plugins/out_stackdriver/stackdriver_helper.c63
-rw-r--r--src/fluent-bit/plugins/out_stackdriver/stackdriver_helper.h51
-rw-r--r--src/fluent-bit/plugins/out_stackdriver/stackdriver_http_request.c393
-rw-r--r--src/fluent-bit/plugins/out_stackdriver/stackdriver_http_request.h120
-rw-r--r--src/fluent-bit/plugins/out_stackdriver/stackdriver_operation.c147
-rw-r--r--src/fluent-bit/plugins/out_stackdriver/stackdriver_operation.h82
-rw-r--r--src/fluent-bit/plugins/out_stackdriver/stackdriver_resource_types.c143
-rw-r--r--src/fluent-bit/plugins/out_stackdriver/stackdriver_resource_types.h41
-rw-r--r--src/fluent-bit/plugins/out_stackdriver/stackdriver_source_location.c139
-rw-r--r--src/fluent-bit/plugins/out_stackdriver/stackdriver_source_location.h80
-rw-r--r--src/fluent-bit/plugins/out_stackdriver/stackdriver_timestamp.c180
-rw-r--r--src/fluent-bit/plugins/out_stackdriver/stackdriver_timestamp.h47
-rw-r--r--src/fluent-bit/plugins/out_stdout/CMakeLists.txt4
-rw-r--r--src/fluent-bit/plugins/out_stdout/stdout.c301
-rw-r--r--src/fluent-bit/plugins/out_stdout/stdout.h34
-rw-r--r--src/fluent-bit/plugins/out_syslog/CMakeLists.txt5
-rw-r--r--src/fluent-bit/plugins/out_syslog/syslog.c1170
-rw-r--r--src/fluent-bit/plugins/out_syslog/syslog_conf.c162
-rw-r--r--src/fluent-bit/plugins/out_syslog/syslog_conf.h70
-rw-r--r--src/fluent-bit/plugins/out_tcp/CMakeLists.txt6
-rw-r--r--src/fluent-bit/plugins/out_tcp/tcp.c269
-rw-r--r--src/fluent-bit/plugins/out_tcp/tcp.h46
-rw-r--r--src/fluent-bit/plugins/out_tcp/tcp_conf.c154
-rw-r--r--src/fluent-bit/plugins/out_tcp/tcp_conf.h32
-rw-r--r--src/fluent-bit/plugins/out_td/CMakeLists.txt7
-rw-r--r--src/fluent-bit/plugins/out_td/td.c271
-rw-r--r--src/fluent-bit/plugins/out_td/td.h24
-rw-r--r--src/fluent-bit/plugins/out_td/td_config.c86
-rw-r--r--src/fluent-bit/plugins/out_td/td_config.h41
-rw-r--r--src/fluent-bit/plugins/out_td/td_http.c94
-rw-r--r--src/fluent-bit/plugins/out_td/td_http.h35
-rw-r--r--src/fluent-bit/plugins/out_udp/CMakeLists.txt6
-rw-r--r--src/fluent-bit/plugins/out_udp/udp.c351
-rw-r--r--src/fluent-bit/plugins/out_udp/udp.h47
-rw-r--r--src/fluent-bit/plugins/out_udp/udp_conf.c135
-rw-r--r--src/fluent-bit/plugins/out_udp/udp_conf.h32
-rw-r--r--src/fluent-bit/plugins/out_vivo_exporter/CMakeLists.txt15
-rw-r--r--src/fluent-bit/plugins/out_vivo_exporter/vivo.c343
-rw-r--r--src/fluent-bit/plugins/out_vivo_exporter/vivo.h45
-rw-r--r--src/fluent-bit/plugins/out_vivo_exporter/vivo_http.c266
-rw-r--r--src/fluent-bit/plugins/out_vivo_exporter/vivo_http.h56
-rw-r--r--src/fluent-bit/plugins/out_vivo_exporter/vivo_stream.c239
-rw-r--r--src/fluent-bit/plugins/out_vivo_exporter/vivo_stream.h59
-rw-r--r--src/fluent-bit/plugins/out_websocket/CMakeLists.txt5
-rw-r--r--src/fluent-bit/plugins/out_websocket/websocket.c331
-rw-r--r--src/fluent-bit/plugins/out_websocket/websocket.h54
-rw-r--r--src/fluent-bit/plugins/out_websocket/websocket_conf.c159
-rw-r--r--src/fluent-bit/plugins/out_websocket/websocket_conf.h32
-rw-r--r--src/fluent-bit/plugins/processor_attributes/CMakeLists.txt4
-rw-r--r--src/fluent-bit/plugins/processor_attributes/attributes.c1408
-rw-r--r--src/fluent-bit/plugins/processor_attributes/variant_utils.h626
-rw-r--r--src/fluent-bit/plugins/processor_labels/CMakeLists.txt4
-rw-r--r--src/fluent-bit/plugins/processor_labels/labels.c1784
669 files changed, 149123 insertions, 0 deletions
diff --git a/src/fluent-bit/plugins/CMakeLists.txt b/src/fluent-bit/plugins/CMakeLists.txt
new file mode 100644
index 000000000..36299074a
--- /dev/null
+++ b/src/fluent-bit/plugins/CMakeLists.txt
@@ -0,0 +1,416 @@
+set(flb_plugins "" CACHE INTERNAL "flb_plugins")
+
+# REGISTER_CUSTOM_PLUGIN
+macro(REGISTER_CUSTOM_PLUGIN name)
+ string(FIND ${name} "=" pos)
+ if(pos GREATER -1)
+ string(REPLACE "=" ";" list ${name})
+ list(GET list 0 p_name)
+ list(GET list 1 p_path)
+ message(STATUS "EXTERNAL CUSTOM PLUGIN name='${p_name}' path='${p_path}'")
+ else()
+ set(p_name ${name})
+ endif()
+
+ string(TOUPPER ${p_name} NAME)
+ if(FLB_${NAME} OR p_path)
+ set(FLB_IN_PLUGINS_DECL "${FLB_CUSTOM_PLUGINS_DECL}extern struct flb_custom_plugin ${p_name}_plugin;\n")
+
+ # C code
+ set(C_CODE " custom = flb_malloc(sizeof(struct flb_custom_plugin));\n")
+ set(C_CODE "${C_CODE} if (!custom) {\n")
+ set(C_CODE "${C_CODE} flb_errno();\n")
+ set(C_CODE "${C_CODE} return -1;\n")
+ set(C_CODE "${C_CODE} }\n")
+ set(C_CODE "${C_CODE} memcpy(custom, &${p_name}_plugin, sizeof(struct flb_custom_plugin));\n")
+ set(C_CODE "${C_CODE} mk_list_add(&custom->_head, &config->custom_plugins);\n\n")
+
+ set(FLB_IN_PLUGINS_ADD "${FLB_CUSTOM_PLUGINS_ADD}${C_CODE}")
+
+ if (p_path)
+ add_subdirectory(${p_path} ${p_path})
+ else()
+ add_subdirectory(${p_name})
+ endif()
+ set(flb_plugins "${flb_plugins}flb-plugin-${p_name};")
+ endif()
+endmacro()
+
+# REGISTER_IN_PLUGIN
+macro(REGISTER_IN_PLUGIN name)
+ string(FIND ${name} "=" pos)
+ if(pos GREATER -1)
+ string(REPLACE "=" ";" list ${name})
+ list(GET list 0 p_name)
+ list(GET list 1 p_path)
+ message(STATUS "EXTERNAL IN PLUGIN name='${p_name}' path='${p_path}'")
+ else()
+ set(p_name ${name})
+ endif()
+
+ string(TOUPPER ${p_name} NAME)
+ if(FLB_${NAME} OR p_path)
+ set(FLB_IN_PLUGINS_DECL "${FLB_IN_PLUGINS_DECL}extern struct flb_input_plugin ${p_name}_plugin;\n")
+
+ # C code
+ set(C_CODE " in = flb_malloc(sizeof(struct flb_input_plugin));\n")
+ set(C_CODE "${C_CODE} if (!in) {\n")
+ set(C_CODE "${C_CODE} flb_errno();\n")
+ set(C_CODE "${C_CODE} return -1;\n")
+ set(C_CODE "${C_CODE} }\n")
+ set(C_CODE "${C_CODE} memcpy(in, &${p_name}_plugin, sizeof(struct flb_input_plugin));\n")
+ set(C_CODE "${C_CODE} mk_list_add(&in->_head, &config->in_plugins);\n\n")
+
+ set(FLB_IN_PLUGINS_ADD "${FLB_IN_PLUGINS_ADD}${C_CODE}")
+
+ if (p_path)
+ add_subdirectory(${p_path} ${p_path})
+ else()
+ add_subdirectory(${p_name})
+ endif()
+ set(flb_plugins "${flb_plugins}flb-plugin-${p_name};")
+ endif()
+endmacro()
+
+# REGISTER_OUT_PLUGIN
+macro(REGISTER_OUT_PLUGIN name)
+ string(FIND ${name} "=" pos)
+ if(pos GREATER -1)
+ string(REPLACE "=" ";" list ${name})
+ list(GET list 0 p_name)
+ list(GET list 1 p_path)
+ message(STATUS "EXTERNAL OUT PLUGIN name='${p_name}' path='${p_path}'")
+ else()
+ set(p_name ${name})
+ endif()
+
+ string(TOUPPER ${p_name} NAME)
+ if(FLB_${NAME} OR p_path)
+ set(FLB_OUT_PLUGINS_DECL "${FLB_OUT_PLUGINS_DECL}extern struct flb_output_plugin ${p_name}_plugin;\n")
+
+ # C code
+ set(C_CODE " out = flb_malloc(sizeof(struct flb_output_plugin));\n")
+ set(C_CODE "${C_CODE} if (!out) {\n")
+ set(C_CODE "${C_CODE} flb_errno();\n")
+ set(C_CODE "${C_CODE} return -1;\n")
+ set(C_CODE "${C_CODE} }\n")
+ set(C_CODE "${C_CODE} memcpy(out, &${p_name}_plugin, sizeof(struct flb_output_plugin));\n")
+ set(C_CODE "${C_CODE} mk_list_add(&out->_head, &config->out_plugins);\n\n")
+
+ set(FLB_OUT_PLUGINS_ADD "${FLB_OUT_PLUGINS_ADD}${C_CODE}")
+ if (p_path)
+ add_subdirectory(${p_path} ${p_path})
+ else()
+ add_subdirectory(${p_name})
+ endif()
+ set(flb_plugins "${flb_plugins}flb-plugin-${p_name};")
+ endif()
+endmacro()
+
+
+# REGISTER_PROCESSOR_PLUGIN
+macro(REGISTER_PROCESSOR_PLUGIN name)
+ string(FIND ${name} "=" pos)
+ if(pos GREATER -1)
+ string(REPLACE "=" ";" list ${name})
+ list(GET list 0 p_name)
+ list(GET list 1 p_path)
+ message(STATUS "EXTERNAL PROCESSOR PLUGIN name='${p_name}' path='${p_path}'")
+ else()
+ set(p_name ${name})
+ endif()
+
+ string(TOUPPER ${p_name} NAME)
+ if(FLB_${NAME} OR p_path)
+ set(FLB_PROCESSOR_PLUGINS_DECL "${FLB_PROCESSOR_PLUGINS_DECL}extern struct flb_processor_plugin ${p_name}_plugin;\n")
+
+ # C code
+ set(C_CODE " processor = flb_malloc(sizeof(struct flb_processor_plugin));\n")
+ set(C_CODE "${C_CODE} if (!processor) {\n")
+ set(C_CODE "${C_CODE} flb_errno();\n")
+ set(C_CODE "${C_CODE} return -1;\n")
+ set(C_CODE "${C_CODE} }\n")
+ set(C_CODE "${C_CODE} memcpy(processor, &${p_name}_plugin, sizeof(struct flb_processor_plugin));\n")
+ set(C_CODE "${C_CODE} mk_list_add(&processor->_head, &config->processor_plugins);\n\n")
+
+ set(FLB_PROCESSOR_PLUGINS_ADD "${FLB_PROCESSOR_PLUGINS_ADD}${C_CODE}")
+ if (p_path)
+ add_subdirectory(${p_path} ${p_path})
+ else()
+ add_subdirectory(${p_name})
+ endif()
+ set(flb_plugins "${flb_plugins}flb-plugin-${p_name};")
+ endif()
+endmacro()
+
+# REGISTER_FILTER_PLUGIN
+macro(REGISTER_FILTER_PLUGIN name)
+ string(FIND ${name} "=" pos)
+ if(pos GREATER -1)
+ string(REPLACE "=" ";" list ${name})
+ list(GET list 0 p_name)
+ list(GET list 1 p_path)
+ message(STATUS "EXTERNAL FILTER PLUGIN name='${p_name}' path='${p_path}'")
+ else()
+ set(p_name ${name})
+ endif()
+
+ string(TOUPPER ${p_name} NAME)
+ if(FLB_${NAME} OR p_path)
+ set(FLB_FILTER_PLUGINS_DECL "${FLB_FILTER_PLUGINS_DECL}extern struct flb_filter_plugin ${p_name}_plugin;\n")
+
+ # C code
+ set(C_CODE " filter = flb_malloc(sizeof(struct flb_filter_plugin));\n")
+ set(C_CODE "${C_CODE} if (!filter) {\n")
+ set(C_CODE "${C_CODE} flb_errno();\n")
+ set(C_CODE "${C_CODE} return -1;\n")
+ set(C_CODE "${C_CODE} }\n")
+ set(C_CODE "${C_CODE} memcpy(filter, &${p_name}_plugin, sizeof(struct flb_filter_plugin));\n")
+ set(C_CODE "${C_CODE} mk_list_add(&filter->_head, &config->filter_plugins);\n\n")
+
+ set(FLB_FILTER_PLUGINS_ADD "${FLB_FILTER_PLUGINS_ADD}${C_CODE}")
+ if (p_path)
+ add_subdirectory(${p_path} ${p_path})
+ else()
+ add_subdirectory(${p_name})
+ endif()
+ set(flb_plugins "${flb_plugins}flb-plugin-${p_name};")
+ endif()
+endmacro()
+
+# FLB_PLUGIN: used by plugins to perform registration and linking
+macro(FLB_PLUGIN name src deps)
+ add_library(flb-plugin-${name} STATIC ${src})
+ add_sanitizers(flb-plugin-${name})
+ target_link_libraries(flb-plugin-${name} fluent-bit-static msgpack-c-static ${deps})
+endmacro()
+
+
+# ======================
+# Plugins Registration
+# ======================
+
+# Custom Plugins
+REGISTER_CUSTOM_PLUGIN("custom_calyptia")
+
+# These plugins works only on Linux
+if(${CMAKE_SYSTEM_NAME} MATCHES "Linux")
+ REGISTER_IN_PLUGIN("in_cpu")
+ REGISTER_IN_PLUGIN("in_mem")
+ REGISTER_IN_PLUGIN("in_thermal")
+ REGISTER_IN_PLUGIN("in_kmsg")
+ REGISTER_IN_PLUGIN("in_proc")
+ REGISTER_IN_PLUGIN("in_disk")
+ REGISTER_IN_PLUGIN("in_systemd")
+ REGISTER_IN_PLUGIN("in_netif")
+ REGISTER_IN_PLUGIN("in_docker")
+ REGISTER_IN_PLUGIN("in_docker_events")
+ REGISTER_IN_PLUGIN("in_node_exporter_metrics")
+ REGISTER_IN_PLUGIN("in_podman_metrics")
+endif()
+
+REGISTER_IN_PLUGIN("in_kubernetes_events")
+REGISTER_IN_PLUGIN("in_kafka")
+REGISTER_IN_PLUGIN("in_fluentbit_metrics")
+REGISTER_IN_PLUGIN("in_prometheus_scrape")
+REGISTER_IN_PLUGIN("in_emitter")
+REGISTER_IN_PLUGIN("in_tail")
+REGISTER_IN_PLUGIN("in_dummy")
+REGISTER_IN_PLUGIN("in_head")
+REGISTER_IN_PLUGIN("in_health")
+REGISTER_IN_PLUGIN("in_http")
+REGISTER_IN_PLUGIN("in_collectd")
+REGISTER_IN_PLUGIN("in_statsd")
+REGISTER_IN_PLUGIN("in_opentelemetry")
+REGISTER_IN_PLUGIN("in_elasticsearch")
+REGISTER_IN_PLUGIN("in_calyptia_fleet")
+REGISTER_IN_PLUGIN("in_splunk")
+
+# Test the event loop messaging when used in threaded mode
+REGISTER_IN_PLUGIN("in_event_test")
+
+# Send different event types: logs, metrics and traces
+REGISTER_IN_PLUGIN("in_event_type")
+
+
+if (FLB_IN_STORAGE_BACKLOG)
+ REGISTER_IN_PLUGIN("in_storage_backlog")
+endif()
+
+REGISTER_IN_PLUGIN("in_nginx_exporter_metrics")
+
+if (FLB_STREAM_PROCESSOR)
+ REGISTER_IN_PLUGIN("in_stream_processor")
+endif()
+
+if (FLB_SYSTEM_WINDOWS)
+ REGISTER_IN_PLUGIN("in_winlog")
+ REGISTER_IN_PLUGIN("in_winstat")
+ REGISTER_IN_PLUGIN("in_winevtlog")
+ REGISTER_IN_PLUGIN("in_windows_exporter_metrics")
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /W2")
+else()
+ REGISTER_IN_PLUGIN("in_serial")
+endif()
+
+if(FLB_REGEX)
+ REGISTER_IN_PLUGIN("in_stdin")
+endif()
+
+if(FLB_PARSER)
+ REGISTER_IN_PLUGIN("in_syslog")
+ REGISTER_IN_PLUGIN("in_exec")
+endif()
+
+REGISTER_IN_PLUGIN("in_udp")
+
+if(FLB_WASM)
+ REGISTER_IN_PLUGIN("in_exec_wasi")
+endif()
+
+REGISTER_IN_PLUGIN("in_tcp")
+REGISTER_IN_PLUGIN("in_unix_socket")
+REGISTER_IN_PLUGIN("in_mqtt")
+REGISTER_IN_PLUGIN("in_lib")
+REGISTER_IN_PLUGIN("in_forward")
+REGISTER_IN_PLUGIN("in_random")
+
+# PROCESSORS
+# ==========
+REGISTER_PROCESSOR_PLUGIN("processor_labels")
+REGISTER_PROCESSOR_PLUGIN("processor_attributes")
+
+# OUTPUTS
+# =======
+REGISTER_OUT_PLUGIN("out_azure")
+REGISTER_OUT_PLUGIN("out_azure_blob")
+REGISTER_OUT_PLUGIN("out_azure_logs_ingestion")
+REGISTER_OUT_PLUGIN("out_azure_kusto")
+REGISTER_OUT_PLUGIN("out_bigquery")
+REGISTER_OUT_PLUGIN("out_calyptia")
+REGISTER_OUT_PLUGIN("out_counter")
+REGISTER_OUT_PLUGIN("out_datadog")
+REGISTER_OUT_PLUGIN("out_es")
+REGISTER_OUT_PLUGIN("out_exit")
+REGISTER_OUT_PLUGIN("out_file")
+REGISTER_OUT_PLUGIN("out_forward")
+REGISTER_OUT_PLUGIN("out_http")
+REGISTER_OUT_PLUGIN("out_influxdb")
+REGISTER_OUT_PLUGIN("out_logdna")
+REGISTER_OUT_PLUGIN("out_loki")
+REGISTER_OUT_PLUGIN("out_kafka")
+REGISTER_OUT_PLUGIN("out_kafka_rest")
+REGISTER_OUT_PLUGIN("out_nats")
+REGISTER_OUT_PLUGIN("out_nrlogs")
+REGISTER_OUT_PLUGIN("out_null")
+REGISTER_OUT_PLUGIN("out_opensearch")
+REGISTER_OUT_PLUGIN("out_oracle_log_analytics")
+
+if (NOT CMAKE_SYSTEM_NAME MATCHES "Windows")
+ REGISTER_OUT_PLUGIN("out_plot")
+endif()
+
+REGISTER_OUT_PLUGIN("out_pgsql")
+REGISTER_OUT_PLUGIN("out_retry")
+REGISTER_OUT_PLUGIN("out_skywalking")
+REGISTER_OUT_PLUGIN("out_slack")
+REGISTER_OUT_PLUGIN("out_splunk")
+REGISTER_OUT_PLUGIN("out_stackdriver")
+REGISTER_OUT_PLUGIN("out_stdout")
+REGISTER_OUT_PLUGIN("out_syslog")
+REGISTER_OUT_PLUGIN("out_tcp")
+REGISTER_OUT_PLUGIN("out_udp")
+REGISTER_OUT_PLUGIN("out_td")
+REGISTER_OUT_PLUGIN("out_lib")
+REGISTER_OUT_PLUGIN("out_flowcounter")
+REGISTER_OUT_PLUGIN("out_gelf")
+REGISTER_OUT_PLUGIN("out_websocket")
+REGISTER_OUT_PLUGIN("out_cloudwatch_logs")
+REGISTER_OUT_PLUGIN("out_kinesis_firehose")
+REGISTER_OUT_PLUGIN("out_kinesis_streams")
+REGISTER_OUT_PLUGIN("out_opentelemetry")
+REGISTER_OUT_PLUGIN("out_prometheus_exporter")
+REGISTER_OUT_PLUGIN("out_prometheus_remote_write")
+REGISTER_OUT_PLUGIN("out_s3")
+REGISTER_OUT_PLUGIN("out_vivo_exporter")
+REGISTER_OUT_PLUGIN("out_chronicle")
+
+# FILTERS
+# =======
+REGISTER_FILTER_PLUGIN("filter_alter_size")
+REGISTER_FILTER_PLUGIN("filter_aws")
+REGISTER_FILTER_PLUGIN("filter_checklist")
+REGISTER_FILTER_PLUGIN("filter_ecs")
+REGISTER_FILTER_PLUGIN("filter_record_modifier")
+REGISTER_FILTER_PLUGIN("filter_throttle")
+REGISTER_FILTER_PLUGIN("filter_throttle_size")
+REGISTER_FILTER_PLUGIN("filter_tensorflow")
+REGISTER_FILTER_PLUGIN("filter_type_converter")
+
+if(FLB_REGEX)
+ REGISTER_FILTER_PLUGIN("filter_kubernetes")
+ REGISTER_FILTER_PLUGIN("filter_modify")
+ REGISTER_FILTER_PLUGIN("filter_multiline")
+ REGISTER_FILTER_PLUGIN("filter_nest")
+ REGISTER_FILTER_PLUGIN("filter_parser")
+endif()
+
+if(FLB_RECORD_ACCESSOR)
+ REGISTER_FILTER_PLUGIN("filter_expect")
+ REGISTER_FILTER_PLUGIN("filter_grep")
+ REGISTER_FILTER_PLUGIN("filter_rewrite_tag")
+endif()
+
+if(FLB_METRICS)
+ REGISTER_FILTER_PLUGIN("filter_log_to_metrics")
+endif()
+
+if(FLB_LUAJIT)
+ REGISTER_FILTER_PLUGIN("filter_lua")
+endif()
+
+REGISTER_FILTER_PLUGIN("filter_stdout")
+
+REGISTER_FILTER_PLUGIN("filter_geoip2")
+
+REGISTER_FILTER_PLUGIN("filter_nightfall")
+if (FLB_WASM)
+ REGISTER_FILTER_PLUGIN("filter_wasm")
+endif ()
+
+# Register external input and output plugins
+if(EXT_IN_PLUGINS)
+ string(REPLACE "," ";" plugins ${EXT_IN_PLUGINS})
+ foreach(entry ${plugins})
+ REGISTER_IN_PLUGIN(${entry})
+ endforeach()
+endif()
+
+if(EXT_OUT_PLUGINS)
+ string(REPLACE "," ";" plugins ${EXT_OUT_PLUGINS})
+ foreach(entry ${plugins})
+ REGISTER_OUT_PLUGIN(${entry})
+ endforeach()
+endif()
+
+if(EXT_PROCESSOR_PLUGINS)
+ string(REPLACE "," ";" plugins ${EXT_PROCESSOR_PLUGINS})
+ foreach(entry ${plugins})
+ REGISTER_PROCESSOR_PLUGIN(${entry})
+ endforeach()
+endif()
+
+if(EXT_FILTER_PLUGINS)
+ string(REPLACE "," ";" plugins ${EXT_FILTER_PLUGINS})
+ foreach(entry ${plugins})
+ REGISTER_FILTER_PLUGIN(${entry})
+ endforeach()
+endif()
+
+# Generate the header from the template
+configure_file(
+ "${PROJECT_SOURCE_DIR}/include/fluent-bit/flb_plugins.h.in"
+ "${PROJECT_SOURCE_DIR}/include/fluent-bit/flb_plugins.h"
+ )
+
+set(FLB_PLUGINS "${flb_plugins}" PARENT_SCOPE)
diff --git a/src/fluent-bit/plugins/custom_calyptia/CMakeLists.txt b/src/fluent-bit/plugins/custom_calyptia/CMakeLists.txt
new file mode 100644
index 000000000..6449c74e3
--- /dev/null
+++ b/src/fluent-bit/plugins/custom_calyptia/CMakeLists.txt
@@ -0,0 +1,4 @@
+set(src
+ calyptia.c)
+
+FLB_PLUGIN(custom_calyptia "${src}" "")
diff --git a/src/fluent-bit/plugins/custom_calyptia/calyptia.c b/src/fluent-bit/plugins/custom_calyptia/calyptia.c
new file mode 100644
index 000000000..1cfbbd5ce
--- /dev/null
+++ b/src/fluent-bit/plugins/custom_calyptia/calyptia.c
@@ -0,0 +1,615 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_custom_plugin.h>
+#include <fluent-bit/flb_kv.h>
+#include <fluent-bit/flb_env.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_router.h>
+
+/* pipeline plugins */
+#include <fluent-bit/flb_input.h>
+#include <fluent-bit/flb_filter.h>
+#include <fluent-bit/flb_output.h>
+
+#include <fluent-bit/flb_hash.h>
+
+struct calyptia {
+ /* config map options */
+ flb_sds_t api_key;
+ flb_sds_t store_path;
+ flb_sds_t cloud_host;
+ flb_sds_t cloud_port;
+ flb_sds_t machine_id;
+
+/* used for reporting chunk trace records. */
+#ifdef FLB_HAVE_CHUNK_TRACE
+ flb_sds_t pipeline_id;
+#endif /* FLB_HAVE_CHUNK_TRACE */
+
+ int cloud_tls;
+ int cloud_tls_verify;
+
+ /* config reader for 'add_label' */
+ struct mk_list *add_labels;
+
+ /* instances */
+ struct flb_input_instance *i;
+ struct flb_output_instance *o;
+ struct flb_input_instance *fleet;
+ struct flb_custom_instance *ins;
+
+ /* Fleet configuration */
+ flb_sds_t fleet_id; /* fleet-id */
+ flb_sds_t fleet_name;
+ flb_sds_t fleet_config_dir; /* fleet configuration directory */
+ int fleet_interval_sec;
+ int fleet_interval_nsec;
+};
+
+/*
+ * Check if the key belongs to a sensitive data field, if so report it. We never
+ * share any sensitive data.
+ */
+static int is_sensitive_property(char *key)
+{
+
+ if (strcasecmp(key, "password") == 0 ||
+ strcasecmp(key, "passwd") == 0 ||
+ strcasecmp(key, "user") == 0 ||
+ strcasecmp(key, "http_user") == 0 ||
+ strcasecmp(key, "http_passwd") == 0 ||
+ strcasecmp(key, "shared_key") == 0 ||
+ strcasecmp(key, "endpoint") == 0 ||
+ strcasecmp(key, "apikey") == 0 ||
+ strcasecmp(key, "private_key") == 0 ||
+ strcasecmp(key, "service_account_secret") == 0 ||
+ strcasecmp(key, "splunk_token") == 0 ||
+ strcasecmp(key, "logdna_host") == 0 ||
+ strcasecmp(key, "api_key") == 0 ||
+ strcasecmp(key, "hostname") == 0 ||
+ strcasecmp(key, "license_key") == 0 ||
+ strcasecmp(key, "base_uri") == 0 ||
+ strcasecmp(key, "api") == 0) {
+
+ return FLB_TRUE;
+ }
+
+ return FLB_FALSE;
+}
+
+static void pipeline_config_add_properties(flb_sds_t *buf, struct mk_list *props)
+{
+ struct mk_list *head;
+ struct flb_kv *kv;
+
+ mk_list_foreach(head, props) {
+ kv = mk_list_entry(head, struct flb_kv, _head);
+
+ if (kv->key != NULL && kv->val != NULL) {
+ flb_sds_printf(buf, " %s ", kv->key);
+
+ if (is_sensitive_property(kv->key)) {
+ flb_sds_cat_safe(buf, "--redacted--", strlen("--redacted--"));
+ }
+ else {
+ flb_sds_cat_safe(buf, kv->val, strlen(kv->val));
+ }
+
+ flb_sds_cat_safe(buf, "\n", 1);
+ }
+ }
+}
+
+flb_sds_t custom_calyptia_pipeline_config_get(struct flb_config *ctx)
+{
+ char tmp[32];
+ flb_sds_t buf;
+ struct mk_list *head;
+ struct flb_input_instance *i_ins;
+ struct flb_filter_instance *f_ins;
+ struct flb_output_instance *o_ins;
+
+ buf = flb_sds_create_size(2048);
+
+ if (!buf) {
+ return NULL;
+ }
+
+ /* [INPUT] */
+ mk_list_foreach(head, &ctx->inputs) {
+ i_ins = mk_list_entry(head, struct flb_input_instance, _head);
+ flb_sds_printf(&buf, "[INPUT]\n");
+ flb_sds_printf(&buf, " name %s\n", i_ins->name);
+
+ if (i_ins->alias) {
+ flb_sds_printf(&buf, " alias %s\n", i_ins->alias);
+ }
+
+ if (i_ins->tag) {
+ flb_sds_printf(&buf, " tag %s\n", i_ins->tag);
+ }
+
+ if (i_ins->mem_buf_limit > 0) {
+ flb_utils_bytes_to_human_readable_size(i_ins->mem_buf_limit,
+ tmp, sizeof(tmp) - 1);
+ flb_sds_printf(&buf, " mem_buf_limit %s\n", tmp);
+ }
+
+ pipeline_config_add_properties(&buf, &i_ins->properties);
+ }
+ flb_sds_printf(&buf, "\n");
+
+ /* Config: [FILTER] */
+ mk_list_foreach(head, &ctx->filters) {
+ f_ins = mk_list_entry(head, struct flb_filter_instance, _head);
+
+ flb_sds_printf(&buf, "[FILTER]\n");
+ flb_sds_printf(&buf, " name %s\n", f_ins->name);
+ flb_sds_printf(&buf, " match %s\n", f_ins->match);
+
+ pipeline_config_add_properties(&buf, &f_ins->properties);
+ }
+ flb_sds_printf(&buf, "\n");
+
+ /* Config: [OUTPUT] */
+ mk_list_foreach(head, &ctx->outputs) {
+ o_ins = mk_list_entry(head, struct flb_output_instance, _head);
+
+ flb_sds_printf(&buf, "[OUTPUT]\n");
+ flb_sds_printf(&buf, " name %s\n", o_ins->name);
+
+ if (o_ins->match) {
+ flb_sds_printf(&buf, " match %s\n", o_ins->match);
+ }
+ else {
+ flb_sds_printf(&buf, " match *\n");
+ }
+
+#ifdef FLB_HAVE_TLS
+ if (o_ins->use_tls == FLB_TRUE) {
+ flb_sds_printf(&buf, " tls %s\n", o_ins->use_tls ? "on" : "off");
+ flb_sds_printf(&buf, " tls.verify %s\n",
+ o_ins->tls_verify ? "on": "off");
+
+ if (o_ins->tls_ca_file) {
+ flb_sds_printf(&buf, " tls.ca_file %s\n",
+ o_ins->tls_ca_file);
+ }
+
+ if (o_ins->tls_crt_file) {
+ flb_sds_printf(&buf, " tls.crt_file %s\n",
+ o_ins->tls_crt_file);
+ }
+
+ if (o_ins->tls_key_file) {
+ flb_sds_printf(&buf, " tls.key_file %s\n",
+ o_ins->tls_key_file);
+ }
+
+ if (o_ins->tls_key_passwd) {
+ flb_sds_printf(&buf, " tls.key_passwd --redacted--\n");
+ }
+ }
+#endif
+
+ if (o_ins->retry_limit == FLB_OUT_RETRY_UNLIMITED) {
+ flb_sds_printf(&buf, " retry_limit no_limits\n");
+ }
+ else if (o_ins->retry_limit == FLB_OUT_RETRY_NONE) {
+ flb_sds_printf(&buf, " retry_limit no_retries\n");
+ }
+ else {
+ flb_sds_printf(&buf, " retry_limit %i\n", o_ins->retry_limit);
+ }
+
+ if (o_ins->host.name) {
+ flb_sds_printf(&buf, " host --redacted--\n");
+ }
+
+ pipeline_config_add_properties(&buf, &o_ins->properties);
+ flb_sds_printf(&buf, "\n");
+ }
+
+ return buf;
+}
+
+static struct flb_output_instance *setup_cloud_output(struct flb_config *config, struct calyptia *ctx)
+{
+ int ret;
+ struct flb_output_instance *cloud;
+ struct mk_list *head;
+ struct flb_slist_entry *key = NULL;
+ struct flb_slist_entry *val = NULL;
+ flb_sds_t label;
+ struct flb_config_map_val *mv;
+
+ cloud = flb_output_new(config, "calyptia", ctx, FLB_FALSE);
+
+ if (!cloud) {
+ flb_plg_error(ctx->ins, "could not load Calyptia Cloud connector");
+ flb_free(ctx);
+ return NULL;
+ }
+
+ /* direct connect / routing */
+ ret = flb_router_connect_direct(ctx->i, cloud);
+
+ if (ret != 0) {
+ flb_plg_error(ctx->ins, "could not load Calyptia Cloud connector");
+ flb_free(ctx);
+ return NULL;
+ }
+
+ if (ctx->add_labels && mk_list_size(ctx->add_labels) > 0) {
+
+ /* iterate all 'add_label' definitions */
+ flb_config_map_foreach(head, mv, ctx->add_labels) {
+ key = mk_list_entry_first(mv->val.list, struct flb_slist_entry, _head);
+ val = mk_list_entry_last(mv->val.list, struct flb_slist_entry, _head);
+
+ label = flb_sds_create_size(strlen(key->str) + strlen(val->str) + 1);
+
+ if (!label) {
+ flb_free(ctx);
+ return NULL;
+ }
+
+ flb_sds_printf(&label, "%s %s", key->str, val->str);
+ flb_output_set_property(cloud, "add_label", label);
+ flb_sds_destroy(label);
+ }
+ }
+
+ flb_output_set_property(cloud, "match", "_calyptia_cloud");
+ flb_output_set_property(cloud, "api_key", ctx->api_key);
+
+ if (ctx->store_path) {
+ flb_output_set_property(cloud, "store_path", ctx->store_path);
+ }
+
+ if (ctx->machine_id) {
+ flb_output_set_property(cloud, "machine_id", ctx->machine_id);
+ }
+
+ /* Override network details: development purposes only */
+ if (ctx->cloud_host) {
+ flb_output_set_property(cloud, "cloud_host", ctx->cloud_host);
+ }
+
+ if (ctx->cloud_port) {
+ flb_output_set_property(cloud, "cloud_port", ctx->cloud_port);
+ }
+
+ if (ctx->cloud_tls) {
+ flb_output_set_property(cloud, "tls", "true");
+ }
+ else {
+ flb_output_set_property(cloud, "tls", "false");
+ }
+
+ if (ctx->cloud_tls_verify) {
+ flb_output_set_property(cloud, "tls.verify", "true");
+ }
+ else {
+ flb_output_set_property(cloud, "tls.verify", "false");
+ }
+
+ if (ctx->fleet_id) {
+ flb_output_set_property(cloud, "fleet_id", ctx->fleet_id);
+ label = flb_sds_create_size(strlen("fleet_id") + strlen(ctx->fleet_id) + 1);
+
+ if (!label) {
+ flb_free(ctx);
+ return NULL;
+ }
+
+ flb_sds_printf(&label, "fleet_id %s", ctx->fleet_id);
+ flb_output_set_property(cloud, "add_label", label);
+ flb_sds_destroy(label);
+ }
+
+#ifdef FLB_HAVE_CHUNK_TRACE
+ flb_output_set_property(cloud, "pipeline_id", ctx->pipeline_id);
+#endif /* FLB_HAVE_CHUNK_TRACE */
+
+ return cloud;
+}
+
+static flb_sds_t sha256_to_hex(unsigned char *sha256)
+{
+ int idx;
+ flb_sds_t hex;
+ flb_sds_t tmp;
+
+ hex = flb_sds_create_size(64);
+
+ if (!hex) {
+ return NULL;
+ }
+
+ for (idx = 0; idx < 32; idx++) {
+ tmp = flb_sds_printf(&hex, "%02x", sha256[idx]);
+
+ if (!tmp) {
+ flb_sds_destroy(hex);
+ return NULL;
+ }
+
+ hex = tmp;
+ }
+
+ flb_sds_len_set(hex, 64);
+ return hex;
+}
+
+static flb_sds_t get_machine_id(struct calyptia *ctx)
+{
+ int ret;
+ char *buf;
+ size_t blen;
+ unsigned char sha256_buf[64] = {0};
+
+ /* retrieve raw machine id */
+ ret = flb_utils_get_machine_id(&buf, &blen);
+
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "could not obtain machine id");
+ return NULL;
+ }
+
+ ret = flb_hash_simple(FLB_HASH_SHA256,
+ (unsigned char *) buf,
+ blen,
+ sha256_buf,
+ sizeof(sha256_buf));
+ flb_free(buf);
+
+ if (ret != FLB_CRYPTO_SUCCESS) {
+ return NULL;
+ }
+
+ /* convert to hex */
+ return sha256_to_hex(sha256_buf);
+}
+
+static int cb_calyptia_init(struct flb_custom_instance *ins,
+ struct flb_config *config,
+ void *data)
+{
+ int ret;
+ struct calyptia *ctx;
+ int is_fleet_mode;
+ (void) data;
+
+ ctx = flb_calloc(1, sizeof(struct calyptia));
+
+ if (!ctx) {
+ flb_errno();
+ return -1;
+ }
+ ctx->ins = ins;
+
+ /* Load the config map */
+ ret = flb_custom_config_map_set(ins, (void *) ctx);
+
+ if (ret == -1) {
+ flb_free(ctx);
+ return -1;
+ }
+
+ /* map instance and local context */
+ flb_custom_set_context(ins, ctx);
+
+ /* If no machine_id has been provided via a configuration option get it from the local machine-id. */
+ if (!ctx->machine_id) {
+ /* machine id */
+ ctx->machine_id = get_machine_id(ctx);
+
+ if (ctx->machine_id == NULL) {
+ flb_plg_error(ctx->ins, "unable to retrieve machine_id");
+ return -1;
+ }
+ }
+
+ /* input collector */
+ ctx->i = flb_input_new(config, "fluentbit_metrics", NULL, FLB_TRUE);
+
+ if (!ctx->i) {
+ flb_plg_error(ctx->ins, "could not load metrics collector");
+ return -1;
+ }
+
+ flb_input_set_property(ctx->i, "tag", "_calyptia_cloud");
+ flb_input_set_property(ctx->i, "scrape_on_start", "true");
+ flb_input_set_property(ctx->i, "scrape_interval", "30");
+
+ if (ctx->fleet_name || ctx->fleet_id) {
+ is_fleet_mode = FLB_TRUE;
+ }
+ else {
+ is_fleet_mode = FLB_FALSE;
+ }
+
+ /* output cloud connector */
+ if ((is_fleet_mode == FLB_TRUE && ctx->fleet_id != NULL) ||
+ (is_fleet_mode == FLB_FALSE)) {
+ ctx->o = setup_cloud_output(config, ctx);
+
+ if (ctx->o == NULL) {
+ return -1;
+ }
+ }
+
+ if (ctx->fleet_id || ctx->fleet_name) {
+ ctx->fleet = flb_input_new(config, "calyptia_fleet", NULL, FLB_FALSE);
+
+ if (!ctx->fleet) {
+ flb_plg_error(ctx->ins, "could not load Calyptia Fleet plugin");
+ return -1;
+ }
+
+ if (ctx->fleet_name) {
+ // TODO: set this once the fleet_id has been retrieved...
+ // flb_output_set_property(ctx->o, "fleet_id", ctx->fleet_id);
+ flb_input_set_property(ctx->fleet, "fleet_name", ctx->fleet_name);
+ }
+ else {
+ flb_output_set_property(ctx->o, "fleet_id", ctx->fleet_id);
+ flb_input_set_property(ctx->fleet, "fleet_id", ctx->fleet_id);
+ }
+
+ flb_input_set_property(ctx->fleet, "api_key", ctx->api_key);
+ flb_input_set_property(ctx->fleet, "host", ctx->cloud_host);
+ flb_input_set_property(ctx->fleet, "port", ctx->cloud_port);
+
+ if (ctx->cloud_tls == 1) {
+ flb_input_set_property(ctx->fleet, "tls", "on");
+ }
+ else {
+ flb_input_set_property(ctx->fleet, "tls", "off");
+ }
+
+ if (ctx->cloud_tls_verify == 1) {
+ flb_input_set_property(ctx->fleet, "tls.verify", "on");
+ }
+ else {
+ flb_input_set_property(ctx->fleet, "tls.verify", "off");
+ }
+
+ if (ctx->fleet_config_dir) {
+ flb_input_set_property(ctx->fleet, "config_dir", ctx->fleet_config_dir);
+ }
+
+ if (ctx->machine_id) {
+ flb_input_set_property(ctx->fleet, "machine_id", ctx->machine_id);
+ }
+ }
+
+ if (ctx->o) {
+ flb_router_connect(ctx->i, ctx->o);
+ }
+ flb_plg_info(ins, "custom initialized!");
+ return 0;
+}
+
+static int cb_calyptia_exit(void *data, struct flb_config *config)
+{
+ struct calyptia *ctx = data;
+
+ if (!ctx) {
+ return 0;
+ }
+
+ flb_free(ctx);
+ return 0;
+}
+
+/* Configuration properties map */
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_STR, "api_key", NULL,
+ 0, FLB_TRUE, offsetof(struct calyptia, api_key),
+ "Calyptia Cloud API Key."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "store_path", NULL,
+ 0, FLB_TRUE, offsetof(struct calyptia, store_path)
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "calyptia_host", "cloud-api.calyptia.com",
+ 0, FLB_TRUE, offsetof(struct calyptia, cloud_host),
+ ""
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "calyptia_port", "443",
+ 0, FLB_TRUE, offsetof(struct calyptia, cloud_port),
+ ""
+ },
+
+ {
+ FLB_CONFIG_MAP_BOOL, "calyptia_tls", "true",
+ 0, FLB_TRUE, offsetof(struct calyptia, cloud_tls),
+ ""
+ },
+
+ {
+ FLB_CONFIG_MAP_BOOL, "calyptia_tls.verify", "true",
+ 0, FLB_TRUE, offsetof(struct calyptia, cloud_tls_verify),
+ ""
+ },
+
+ {
+ FLB_CONFIG_MAP_SLIST_1, "add_label", NULL,
+ FLB_CONFIG_MAP_MULT, FLB_TRUE, offsetof(struct calyptia, add_labels),
+ "Label to append to the generated metric."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "machine_id", NULL,
+ 0, FLB_TRUE, offsetof(struct calyptia, machine_id),
+ "Custom machine_id to be used when registering agent"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "fleet_id", NULL,
+ 0, FLB_TRUE, offsetof(struct calyptia, fleet_id),
+ "Fleet id to be used when registering agent in a fleet"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "fleet.config_dir", NULL,
+ 0, FLB_TRUE, offsetof(struct calyptia, fleet_config_dir),
+ "Base path for the configuration directory."
+ },
+ {
+ FLB_CONFIG_MAP_INT, "fleet.interval_sec", "-1",
+ 0, FLB_TRUE, offsetof(struct calyptia, fleet_interval_sec),
+ "Set the collector interval"
+ },
+ {
+ FLB_CONFIG_MAP_INT, "fleet.interval_nsec", "-1",
+ 0, FLB_TRUE, offsetof(struct calyptia, fleet_interval_nsec),
+ "Set the collector interval (nanoseconds)"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "fleet_name", NULL,
+ 0, FLB_TRUE, offsetof(struct calyptia, fleet_name),
+ "Fleet name to be used when registering agent in a fleet"
+ },
+
+#ifdef FLB_HAVE_CHUNK_TRACE
+ {
+ FLB_CONFIG_MAP_STR, "pipeline_id", NULL,
+ 0, FLB_TRUE, offsetof(struct calyptia, pipeline_id),
+ "Pipeline ID for reporting to calyptia cloud."
+ },
+#endif /* FLB_HAVE_CHUNK_TRACE */
+
+ /* EOF */
+ {0}
+};
+
+struct flb_custom_plugin custom_calyptia_plugin = {
+ .name = "calyptia",
+ .description = "Calyptia Cloud",
+ .config_map = config_map,
+ .cb_init = cb_calyptia_init,
+ .cb_exit = cb_calyptia_exit,
+};
diff --git a/src/fluent-bit/plugins/filter_alter_size/CMakeLists.txt b/src/fluent-bit/plugins/filter_alter_size/CMakeLists.txt
new file mode 100644
index 000000000..c34d4f464
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_alter_size/CMakeLists.txt
@@ -0,0 +1,4 @@
+set(src
+ alter_size.c)
+
+FLB_PLUGIN(filter_alter_size "${src}" "")
diff --git a/src/fluent-bit/plugins/filter_alter_size/alter_size.c b/src/fluent-bit/plugins/filter_alter_size/alter_size.c
new file mode 100644
index 000000000..e20cccce7
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_alter_size/alter_size.c
@@ -0,0 +1,212 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_filter_plugin.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_log_event_decoder.h>
+#include <fluent-bit/flb_log_event_encoder.h>
+
+#include <msgpack.h>
+
+struct flb_alter_size {
+ int add;
+ int remove;
+ struct flb_log_event_decoder *log_decoder;
+ struct flb_log_event_encoder *log_encoder;
+};
+
+static int cb_alter_size_init(struct flb_filter_instance *ins,
+ struct flb_config *config,
+ void *data)
+{
+ int ret;
+ (void) data;
+ struct flb_alter_size *ctx;
+
+ ctx = flb_calloc(1, sizeof(struct flb_alter_size));
+ if (!ctx) {
+ flb_errno();
+ return -1;
+ }
+
+ ctx->log_decoder = flb_log_event_decoder_create(NULL, 0);
+
+ if (ctx->log_decoder == NULL) {
+ flb_plg_error(ins, "could not initialize event decoder");
+
+ return -1;
+ }
+
+ ctx->log_encoder = flb_log_event_encoder_create(FLB_LOG_EVENT_FORMAT_DEFAULT);
+
+ if (ctx->log_encoder == NULL) {
+ flb_plg_error(ins, "could not initialize event encoder");
+ flb_log_event_decoder_destroy(ctx->log_decoder);
+
+ return -1;
+ }
+
+ ret = flb_filter_config_map_set(ins, (void *) ctx);
+ if (ret == -1) {
+ flb_log_event_decoder_destroy(ctx->log_decoder);
+ flb_log_event_encoder_destroy(ctx->log_encoder);
+ flb_free(ctx);
+ return -1;
+ }
+
+ if (ctx->add > 0 && ctx->remove > 0) {
+ flb_plg_error(ins, "cannot use 'add' and 'remove' at the same time");
+ flb_log_event_decoder_destroy(ctx->log_decoder);
+ flb_log_event_encoder_destroy(ctx->log_encoder);
+ flb_free(ctx);
+ return -1;
+ }
+
+ flb_filter_set_context(ins, ctx);
+ return 0;
+}
+
+static int cb_alter_size_filter(const void *data, size_t bytes,
+ const char *tag, int tag_len,
+ void **out_buf, size_t *out_size,
+ struct flb_filter_instance *ins,
+ struct flb_input_instance *i_ins,
+ void *filter_context,
+ struct flb_config *config)
+{
+ int i;
+ int len;
+ int ret;
+ int total;
+ int count = 0;
+ char tmp[32];
+ struct flb_log_event event;
+ struct flb_alter_size *ctx;
+
+ (void) config;
+ (void) i_ins;
+
+ ctx = (struct flb_alter_size *) filter_context;
+
+ if (ctx->add > 0) {
+ flb_plg_debug(ins, "add %i records", ctx->add);
+
+ /* append old data */
+ ret = flb_log_event_encoder_emit_raw_record(
+ ctx->log_encoder, data, bytes);
+
+ for (i = 0; i < ctx->add; i++) {
+ ret = flb_log_event_encoder_begin_record(ctx->log_encoder);
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_current_timestamp(ctx->log_encoder);
+ }
+
+ len = snprintf(tmp, sizeof(tmp) - 1, "alter_size %i", i);
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_append_body_values(
+ ctx->log_encoder,
+ FLB_LOG_EVENT_CSTRING_VALUE("key"),
+ FLB_LOG_EVENT_STRING_VALUE(tmp, len));
+ }
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_commit_record(ctx->log_encoder);
+ }
+ }
+ else if (ctx->remove > 0) {
+ flb_plg_debug(ins, "remove %i records", ctx->remove);
+ count = 0;
+
+ /* Count number of current items */
+ total = flb_mp_count(data, bytes);
+ total -= ctx->remove;
+ if (total <= 0) {
+ /* zero records */
+ goto exit;
+ }
+
+ ret = flb_log_event_decoder_init(ctx->log_decoder,
+ (char *) data,
+ bytes);
+
+ while (count < total &&
+ flb_log_event_decoder_next(
+ ctx->log_decoder, &event) == FLB_EVENT_DECODER_SUCCESS) {
+
+ ret = flb_log_event_encoder_emit_raw_record(
+ ctx->log_encoder,
+ ctx->log_decoder->record_base,
+ ctx->log_decoder->record_length);
+
+ count++;
+ }
+ }
+
+ exit:
+ /* link new buffers */
+ *out_buf = ctx->log_encoder->output_buffer;
+ *out_size = ctx->log_encoder->output_length;
+
+ flb_log_event_encoder_claim_internal_buffer_ownership(
+ ctx->log_encoder);
+
+ return FLB_FILTER_MODIFIED;
+}
+
+static int cb_alter_size_exit(void *data, struct flb_config *config)
+{
+ (void) config;
+ struct flb_alter_size *ctx = data;
+
+ if (!ctx) {
+ return 0;
+ }
+
+ flb_free(ctx);
+ return 0;
+}
+
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_INT, "add", "0",
+ FLB_FALSE, FLB_TRUE, offsetof(struct flb_alter_size, add),
+ "add N records to the chunk"
+ },
+ {
+ FLB_CONFIG_MAP_INT, "remove", "0",
+ FLB_FALSE, FLB_TRUE, offsetof(struct flb_alter_size, remove),
+ "remove N records from the chunk"
+ },
+ /* EOF */
+ {0}
+};
+
+struct flb_filter_plugin filter_alter_size_plugin = {
+ .name = "alter_size",
+ .description = "Alter incoming chunk size",
+ .cb_init = cb_alter_size_init,
+ .cb_filter = cb_alter_size_filter,
+ .cb_exit = cb_alter_size_exit,
+ .config_map = config_map,
+ .flags = 0
+};
diff --git a/src/fluent-bit/plugins/filter_alter_size/alter_size.h b/src/fluent-bit/plugins/filter_alter_size/alter_size.h
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_alter_size/alter_size.h
diff --git a/src/fluent-bit/plugins/filter_aws/CMakeLists.txt b/src/fluent-bit/plugins/filter_aws/CMakeLists.txt
new file mode 100644
index 000000000..ec0b0f703
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_aws/CMakeLists.txt
@@ -0,0 +1,5 @@
+set(src
+ aws.c
+ )
+
+FLB_PLUGIN(filter_aws "${src}" "")
diff --git a/src/fluent-bit/plugins/filter_aws/aws.c b/src/fluent-bit/plugins/filter_aws/aws.c
new file mode 100644
index 000000000..726b8b709
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_aws/aws.c
@@ -0,0 +1,1062 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <fluent-bit/flb_aws_util.h>
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_filter.h>
+#include <fluent-bit/flb_filter_plugin.h>
+#include <fluent-bit/flb_http_client.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_str.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_sds.h>
+#include <fluent-bit/flb_upstream.h>
+#include <fluent-bit/flb_io.h>
+#include <fluent-bit/flb_kv.h>
+#include <fluent-bit/flb_env.h>
+#include <fluent-bit/aws/flb_aws_imds.h>
+#include <fluent-bit/flb_log_event_decoder.h>
+#include <fluent-bit/flb_log_event_encoder.h>
+
+#include <monkey/mk_core/mk_list.h>
+#include <msgpack.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <string.h>
+
+#include "aws.h"
+
+static int get_ec2_metadata(struct flb_filter_aws *ctx);
+
+static void expose_aws_meta(struct flb_filter_aws *ctx)
+{
+ struct flb_env *env;
+ struct flb_config *config = ctx->ins->config;
+
+ env = config->env;
+
+ flb_env_set(env, "aws", "enabled");
+
+ if (ctx->availability_zone_include) {
+ flb_env_set(env,
+ "aws." FLB_FILTER_AWS_AVAILABILITY_ZONE_KEY,
+ ctx->availability_zone);
+ }
+
+ if (ctx->instance_id_include) {
+ flb_env_set(env,
+ "aws." FLB_FILTER_AWS_INSTANCE_ID_KEY,
+ ctx->instance_id);
+ }
+
+ if (ctx->instance_type_include) {
+ flb_env_set(env,
+ "aws." FLB_FILTER_AWS_INSTANCE_TYPE_KEY,
+ ctx->instance_type);
+ }
+
+ if (ctx->private_ip_include) {
+ flb_env_set(env,
+ "aws." FLB_FILTER_AWS_PRIVATE_IP_KEY,
+ ctx->private_ip);
+ }
+
+ if (ctx->vpc_id_include) {
+ flb_env_set(env,
+ "aws." FLB_FILTER_AWS_VPC_ID_KEY,
+ ctx->vpc_id);
+ }
+
+ if (ctx->ami_id_include) {
+ flb_env_set(env,
+ "aws." FLB_FILTER_AWS_AMI_ID_KEY,
+ ctx->ami_id);
+ }
+
+ if (ctx->account_id_include) {
+ flb_env_set(env,
+ "aws." FLB_FILTER_AWS_ACCOUNT_ID_KEY,
+ ctx->account_id);
+ }
+
+ if (ctx->hostname_include) {
+ flb_env_set(env,
+ "aws." FLB_FILTER_AWS_HOSTNAME_KEY,
+ ctx->hostname);
+ }
+}
+
+static int cb_aws_init(struct flb_filter_instance *f_ins,
+ struct flb_config *config,
+ void *data)
+{
+ int imds_version = FLB_AWS_IMDS_VERSION_2;
+ int ret;
+ struct flb_filter_aws *ctx = NULL;
+ struct flb_filter_aws_init_options *options = data;
+ const char *tmp = NULL;
+
+ /* Create context */
+ ctx = flb_calloc(1, sizeof(struct flb_filter_aws));
+ if (!ctx) {
+ flb_errno();
+ return -1;
+ }
+
+ ctx->options = options;
+ ctx->ins = f_ins;
+
+ tmp = flb_filter_get_property("imds_version", f_ins);
+ if (tmp != NULL) {
+ if (strcasecmp(tmp, "v1") == 0) {
+ imds_version = FLB_AWS_IMDS_VERSION_1;
+ }
+ else if (strcasecmp(tmp, "v2") != 0) {
+ flb_plg_error(ctx->ins, "Invalid value %s for config option "
+ "'imds_version'. Valid values are 'v1' and 'v2'",
+ tmp);
+ flb_free(ctx);
+ return -1;
+ }
+ }
+
+ struct flb_aws_client_generator *generator;
+ if (options && options->client_generator) {
+ generator = options->client_generator;
+ } else {
+ generator = flb_aws_client_generator();
+ }
+ ctx->aws_ec2_filter_client = generator->create();
+ ctx->aws_ec2_filter_client->name = "ec2_imds_provider_client";
+ ctx->aws_ec2_filter_client->has_auth = FLB_FALSE;
+ ctx->aws_ec2_filter_client->provider = NULL;
+ ctx->aws_ec2_filter_client->region = NULL;
+ ctx->aws_ec2_filter_client->service = NULL;
+ ctx->aws_ec2_filter_client->port = FLB_AWS_IMDS_PORT;
+ ctx->aws_ec2_filter_client->flags = 0;
+ ctx->aws_ec2_filter_client->proxy = NULL;
+
+ struct flb_upstream *upstream;
+ upstream = flb_upstream_create(config, FLB_AWS_IMDS_HOST, FLB_AWS_IMDS_PORT,
+ FLB_IO_TCP, NULL);
+ if (!upstream) {
+ flb_plg_debug(ctx->ins, "unable to connect to EC2 IMDS");
+ return -1;
+ }
+
+ /* IMDSv2 token request will timeout if hops = 1 and running within container */
+ upstream->base.net.connect_timeout = FLB_AWS_IMDS_TIMEOUT;
+ upstream->base.net.io_timeout = FLB_AWS_IMDS_TIMEOUT;
+ upstream->base.net.keepalive = FLB_FALSE; /* On timeout, the connection is broken */
+ ctx->aws_ec2_filter_client->upstream = upstream;
+ flb_stream_disable_async_mode(&ctx->aws_ec2_filter_client->upstream->base);
+
+ ctx->client_imds = flb_aws_imds_create(&flb_aws_imds_config_default,
+ ctx->aws_ec2_filter_client);
+ if (!ctx->client_imds) {
+ flb_plg_error(ctx->ins, "failed to create aws client");
+ flb_free(ctx);
+ return -1;
+ }
+ ctx->client_imds->imds_version = imds_version;
+
+ /* Populate context with config map defaults and incoming properties */
+ ret = flb_filter_config_map_set(f_ins, (void *) ctx);
+ if (ret == -1) {
+ flb_plg_error(f_ins, "configuration error");
+ flb_free(ctx);
+ return -1;
+ }
+
+ ctx->metadata_retrieved = FLB_FALSE;
+
+ /* Retrieve metadata */
+ ret = get_ec2_metadata(ctx);
+ if (ret < 0) {
+ /* If the metadata fetch fails, the plugin continues to work. */
+ /* Every flush will attempt to fetch ec2 metadata, if needed. */
+ /* In the error is unrecoverable (-3), it exits and does not retry. */
+ if (ret == -3) {
+ flb_free(ctx);
+ return -1;
+ }
+ }
+ else {
+ expose_aws_meta(ctx);
+ }
+
+ flb_filter_set_context(f_ins, ctx);
+ return 0;
+}
+
+
+/* Get VPC ID from the metadata server.
+ * Initializes ctx->vpc_id and ctx->vpc_id_len.
+ */
+static int get_vpc_id(struct flb_filter_aws *ctx)
+{
+ ctx->vpc_id = flb_aws_imds_get_vpc_id(ctx->client_imds);
+ if (ctx->vpc_id == NULL) {
+ return -1;
+ }
+ ctx->vpc_id_len = flb_sds_len(ctx->vpc_id);
+ return 0;
+}
+
+void flb_filter_aws_tags_destroy(struct flb_filter_aws *ctx)
+{
+ int i;
+ if (!ctx) {
+ return;
+ }
+ if (ctx->tag_keys) {
+ for (i = 0; i < ctx->tags_count; i++) {
+ if (ctx->tag_keys[i]) {
+ flb_sds_destroy(ctx->tag_keys[i]);
+ }
+ }
+ flb_free(ctx->tag_keys);
+ ctx->tag_keys = NULL;
+ }
+ if (ctx->tag_values) {
+ for (i = 0; i < ctx->tags_count; i++) {
+ if (ctx->tag_values[i]) {
+ flb_sds_destroy(ctx->tag_values[i]);
+ }
+ }
+ flb_free(ctx->tag_values);
+ ctx->tag_values = NULL;
+ }
+ if (ctx->tag_keys_len) {
+ flb_free(ctx->tag_keys_len);
+ }
+ ctx->tag_keys_len = NULL;
+ if (ctx->tag_values_len) {
+ flb_free(ctx->tag_values_len);
+ }
+ ctx->tag_values_len = NULL;
+ if (ctx->tag_is_enabled) {
+ flb_free(ctx->tag_is_enabled);
+ }
+ ctx->tag_is_enabled = NULL;
+ ctx->tags_count = 0;
+}
+
+/* Get EC2 instance tag keys from /latest/meta-data/tags/instance.
+ * Initializes ctx->tags_count, ctx->tag_keys and ctx->tag_keys_len.
+ *
+ * In case EC2 metadata server doesn't return tags, either due to the fact that tags are
+ * disabled in the metadata server or EC2 has no tags, function returns -2.
+ */
+static int get_ec2_tag_keys(struct flb_filter_aws *ctx)
+{
+ int ret;
+ flb_sds_t tags_list = NULL;
+ size_t len = 0;
+ size_t tag_index = 0;
+ size_t tag_start = 0;
+ size_t tag_end = 0;
+ flb_sds_t tag_key;
+ flb_sds_t tmp;
+ size_t tag_key_len;
+ int i;
+
+ /* get a list of tag keys from the meta data server */
+ ret = flb_aws_imds_request(ctx->client_imds, FLB_AWS_IMDS_INSTANCE_TAG, &tags_list,
+ &len);
+ if (ret < 0) {
+ ctx->tags_count = 0;
+ if (ret == -2) { /* if there are no tags, response status code is 404 */
+ flb_plg_warn(ctx->ins, "EC2 instance metadata tag request returned 404. "
+ "This likely indicates your instance has no tags "
+ "or the EC2 tagging metadata API is not enabled");
+ return -2;
+ }
+ flb_sds_destroy(tags_list);
+ return -1;
+ }
+
+ /* if endpoint returned 200, normally at least 1 tag should be present */
+ /* for the sake of correctness, let's check the edge case when response is empty */
+ if (len == 0) {
+ ctx->tags_count = 0;
+ flb_sds_destroy(tags_list);
+ return -1;
+ }
+
+ /* count number of tag keys and allocate memory for pointers and lengths */
+ /* since get_metadata returned 0, we assume there is at least 1 tag */
+ /* \n is separator, therefore number of items = number of \n + 1 */
+ ctx->tags_count = 1;
+ for (i = 0; i < len; i++) {
+ if (tags_list[i] == '\n') {
+ ctx->tags_count++;
+ }
+ }
+ ctx->tag_keys = flb_calloc(ctx->tags_count, sizeof(flb_sds_t));
+ if (!ctx->tag_keys) {
+ flb_errno();
+ flb_sds_destroy(tags_list);
+ return -1;
+ }
+ ctx->tag_keys_len = flb_calloc(ctx->tags_count, sizeof(size_t*));
+ if (!ctx->tag_keys_len) {
+ flb_errno();
+ flb_sds_destroy(tags_list);
+ return -1;
+ }
+
+ /* go over the response and initialize tag_keys values */
+ /* code below finds two indices which define tag key and copies them to ctx */
+ while (tag_end <= len) {
+ /* replace \n with \0 to 'clearly' separate tag key strings */
+ if (tags_list[tag_end] == '\n') {
+ tags_list[tag_end] = '\0';
+ }
+ if ((tags_list[tag_end] == '\0' || tag_end == len) && (tag_start < tag_end)) {
+ /* length of tag key characters is the difference between start and end */
+ /* for instance, if tag name is 'Name\0...', the corresponding values are */
+ /* tag_start = 0, points to 'N' */
+ /* tag_end = 4, points to '\0' just after 'e' */
+ /* f.e.: 4 - 0 = 4, which is equal to len("Name") */
+ tag_key_len = tag_end - tag_start;
+ ctx->tag_keys_len[tag_index] = tag_key_len;
+
+ /* allocate new memory for the tag key value */
+ /* + 1, because we need one more character for \0 */
+ tmp = flb_sds_create_size(tag_key_len + 1);
+ if (!tmp) {
+ flb_errno();
+ flb_sds_destroy(tags_list);
+ return -2;
+ }
+ tmp[tag_key_len] = '\0';
+ ctx->tag_keys[tag_index] = tmp;
+
+ /* tag_key points to the first character of tag key as char* */
+ tag_key = tags_list + tag_start;
+ memcpy(ctx->tag_keys[tag_index], tag_key, tag_key_len);
+
+ tag_index++;
+ tag_start = tag_end + 1;
+ }
+ tag_end++;
+ }
+
+ flb_sds_destroy(tags_list);
+
+ return ret;
+}
+
+/* Get EC2 instance tag values from /latest/meta-data/tags/instance/{tag_key}.
+ * Initializes ctx->tag_values and ctx->tag_values_len.
+ */
+static int get_ec2_tag_values(struct flb_filter_aws *ctx)
+{
+ int ret;
+ size_t i;
+ flb_sds_t tag_value = NULL;
+ size_t tag_value_len = 0;
+ size_t tag_value_path_len;
+ flb_sds_t tag_value_path;
+ flb_sds_t tmp;
+
+ /* initialize array for the tag values */
+ ctx->tag_values = flb_calloc(ctx->tags_count, sizeof(flb_sds_t));
+ if (!ctx->tag_values) {
+ flb_errno();
+ return -1;
+ }
+ ctx->tag_values_len = flb_calloc(ctx->tags_count, sizeof(size_t));
+ if (!ctx->tag_values_len) {
+ flb_errno();
+ return -1;
+ }
+
+ for (i = 0; i < ctx->tags_count; i++) {
+ /* fetch tag value using path: /latest/meta-data/tags/instance/{tag_name} */
+ tag_value_path_len = ctx->tag_keys_len[i] + 1 +
+ strlen(FLB_AWS_IMDS_INSTANCE_TAG);
+ tag_value_path = flb_sds_create_size(tag_value_path_len + 1);
+ if (!tag_value_path) {
+ flb_errno();
+ return -1;
+ }
+ tmp = flb_sds_printf(&tag_value_path, "%s/%s",
+ FLB_AWS_IMDS_INSTANCE_TAG,
+ ctx->tag_keys[i]);
+ if (!tmp) {
+ flb_errno();
+ flb_sds_destroy(tag_value_path);
+ return -1;
+ }
+ tag_value_path = tmp;
+
+ ret = flb_aws_imds_request(ctx->client_imds, tag_value_path, &tag_value,
+ &tag_value_len);
+ if (ret < 0) {
+ flb_sds_destroy(tag_value_path);
+ if (ret == -2) {
+ flb_plg_error(ctx->ins, "no value for tag %s", ctx->tag_keys[i]);
+ } else {
+ flb_plg_error(ctx->ins, "could not fetch value for tag %s",
+ ctx->tag_keys[i]);
+ }
+ return ret;
+ }
+
+ ctx->tag_values[i] = tag_value;
+ ctx->tag_values_len[i] = tag_value_len;
+
+ flb_sds_destroy(tag_value_path);
+ }
+
+ return 0;
+}
+
+static int tag_is_present_in_list(struct flb_filter_aws *ctx, flb_sds_t tag,
+ flb_sds_t *tags, int tags_n)
+{
+ int i;
+ for (i = 0; i < tags_n; i++) {
+ if (strcmp(tag, tags[i]) == 0) {
+ return FLB_TRUE;
+ }
+ }
+ return FLB_FALSE;
+}
+
+static int tags_split(char *tags, flb_sds_t **tags_list, int *tags_list_n) {
+ flb_sds_t token;
+ int i;
+ int n;
+ n = 1;
+ for (i = 0; i < strlen(tags); i++) {
+ if (tags[i] == ',') {
+ n++;
+ }
+ }
+
+ *tags_list = flb_calloc(sizeof(flb_sds_t), n);
+ if (*tags_list == NULL) {
+ return -2;
+ }
+
+ token = strtok(tags, ",");
+ i = 0;
+ while (token != NULL) {
+ (*tags_list)[i] = token;
+ i++;
+ token = strtok(NULL, ",");
+ }
+
+ *tags_list_n = n;
+
+ return 0;
+}
+
+static int get_ec2_tag_enabled(struct flb_filter_aws *ctx)
+{
+ const char *tags_include;
+ const char *tags_exclude;
+ char *tags_copy;
+ flb_sds_t *tags;
+ int tags_n;
+ int i;
+ int tag_present;
+ int result;
+
+ /* if there are no tags, there is no need to evaluate which tag is enabled */
+ if (ctx->tags_count == 0) {
+ return 0;
+ }
+
+
+ /* allocate memory for 'tag_is_enabled' for all tags */
+ ctx->tag_is_enabled = flb_calloc(ctx->tags_count, sizeof(int));
+ if (!ctx->tag_is_enabled) {
+ flb_plg_error(ctx->ins, "Failed to allocate memory for tag_is_enabled");
+ return -1;
+ }
+
+ /* if tags_include and tags_exclude are not defined, set all tags as enabled */
+ for (i = 0; i < ctx->tags_count; i++) {
+ ctx->tag_is_enabled[i] = FLB_TRUE;
+ }
+
+ /* apply tags_included configuration */
+ tags_include = flb_filter_get_property("tags_include", ctx->ins);
+ if (tags_include) {
+ /* copy const string in order to use strtok which modifes the string */
+ tags_copy = flb_strdup(tags_include);
+ if (!tags_copy) {
+ return -1;
+ }
+ result = tags_split(tags_copy, &tags, &tags_n);
+ if (result < 0) {
+ free(tags_copy);
+ return -1;
+ }
+ for (i = 0; i < ctx->tags_count; i++) {
+ tag_present = tag_is_present_in_list(ctx, ctx->tag_keys[i], tags, tags_n);
+ /* tag is enabled if present in included list */
+ ctx->tag_is_enabled[i] = tag_present;
+ }
+ free(tags_copy);
+ free(tags);
+ }
+
+ /* apply tags_excluded configuration, only if tags_included is not defined */
+ tags_exclude = flb_filter_get_property("tags_exclude", ctx->ins);
+ if (tags_include && tags_exclude) {
+ flb_plg_error(ctx->ins, "configuration is invalid, both tags_include"
+ " and tags_exclude are specified at the same time");
+ return -3;
+ }
+ if (!tags_include && tags_exclude) {
+ /* copy const string in order to use strtok which modifes the string */
+ tags_copy = flb_strdup(tags_exclude);
+ if (!tags_copy) {
+ return -1;
+ }
+ result = tags_split(tags_copy, &tags, &tags_n);
+ if (result < 0) {
+ free(tags_copy);
+ return -1;
+ }
+ for (i = 0; i < ctx->tags_count; i++) {
+ tag_present = tag_is_present_in_list(ctx, ctx->tag_keys[i], tags, tags_n);
+ if (tag_present == FLB_TRUE) {
+ /* tag is excluded, so should be disabled */
+ ctx->tag_is_enabled[i] = FLB_FALSE;
+ } else {
+ /* tag is not excluded, therefore should be enabled */
+ ctx->tag_is_enabled[i] = FLB_TRUE;
+ }
+ }
+ free(tags_copy);
+ free(tags);
+ }
+
+ return 0;
+}
+
+static int get_ec2_tags(struct flb_filter_aws *ctx)
+{
+ int i;
+ int ret;
+
+ ctx->tags_fetched = FLB_FALSE;
+
+ /* get_ec2_tags function might be called multiple times, so we need to always */
+ /* free memory for tags in case of previous allocations */
+ flb_filter_aws_tags_destroy(ctx);
+
+ ret = get_ec2_tag_keys(ctx);
+ if (ret < 0) {
+ flb_filter_aws_tags_destroy(ctx);
+ if (ret == -2) {
+ /* -2 means there are no tags, */
+ /* to avoid requesting ec2 tags repeatedly for each flush */
+ /* it marks fetching tags as done */
+ ctx->tags_fetched = FLB_TRUE;
+ return 0;
+ }
+ return ret;
+ }
+ ret = get_ec2_tag_values(ctx);
+ if (ret < 0) {
+ flb_filter_aws_tags_destroy(ctx);
+ return ret;
+ }
+
+ ret = get_ec2_tag_enabled(ctx);
+ if (ret < 0) {
+ flb_filter_aws_tags_destroy(ctx);
+ return ret;
+ }
+
+ /* log tags debug information */
+ for (i = 0; i < ctx->tags_count; i++) {
+ flb_plg_debug(ctx->ins, "found tag %s which is included=%d",
+ ctx->tag_keys[i], ctx->tag_is_enabled[i]);
+ }
+
+ ctx->tags_fetched = FLB_TRUE;
+ return 0;
+}
+
+/*
+ * Makes a call to IMDS to set get the values of all metadata fields.
+ * It can be called repeatedly if some metadata calls initially do not succeed.
+ */
+static int get_ec2_metadata(struct flb_filter_aws *ctx)
+{
+ int ret;
+ int i;
+
+ if (ctx->instance_id_include && !ctx->instance_id) {
+ ret = flb_aws_imds_request(ctx->client_imds, FLB_AWS_IMDS_INSTANCE_ID_PATH,
+ &ctx->instance_id,
+ &ctx->instance_id_len);
+ if (ret < 0) {
+ flb_plg_error(ctx->ins, "Failed to get instance ID");
+ return -1;
+ }
+ ctx->new_keys++;
+ }
+
+ if (ctx->availability_zone_include && !ctx->availability_zone) {
+ ret = flb_aws_imds_request(ctx->client_imds, FLB_AWS_IMDS_AZ_PATH,
+ &ctx->availability_zone,
+ &ctx->availability_zone_len);
+
+ if (ret < 0) {
+ flb_plg_error(ctx->ins, "Failed to get instance AZ");
+ return -1;
+ }
+ ctx->new_keys++;
+ }
+
+ if (ctx->instance_type_include && !ctx->instance_type) {
+ ret = flb_aws_imds_request(ctx->client_imds, FLB_AWS_IMDS_INSTANCE_TYPE_PATH,
+ &ctx->instance_type, &ctx->instance_type_len);
+
+ if (ret < 0) {
+ flb_plg_error(ctx->ins, "Failed to get instance type");
+ return -1;
+ }
+ ctx->new_keys++;
+ }
+
+ if (ctx->private_ip_include && !ctx->private_ip) {
+ ret = flb_aws_imds_request(ctx->client_imds, FLB_AWS_IMDS_PRIVATE_IP_PATH,
+ &ctx->private_ip, &ctx->private_ip_len);
+
+ if (ret < 0) {
+ flb_plg_error(ctx->ins, "Failed to get instance private IP");
+ return -1;
+ }
+ ctx->new_keys++;
+ }
+
+ if (ctx->vpc_id_include && !ctx->vpc_id) {
+ ret = get_vpc_id(ctx);
+
+ if (ret < 0) {
+ flb_plg_error(ctx->ins, "Failed to get instance VPC ID");
+ return -1;
+ }
+ ctx->new_keys++;
+ }
+
+ if (ctx->ami_id_include && !ctx->ami_id) {
+ ret = flb_aws_imds_request(ctx->client_imds, FLB_AWS_IMDS_AMI_ID_PATH,
+ &ctx->ami_id, &ctx->ami_id_len);
+
+ if (ret < 0) {
+ flb_plg_error(ctx->ins, "Failed to get AMI ID");
+ return -1;
+ }
+ ctx->new_keys++;
+ }
+
+ if (ctx->account_id_include && !ctx->account_id) {
+ ret = flb_aws_imds_request_by_key(ctx->client_imds, FLB_AWS_IMDS_ACCOUNT_ID_PATH,
+ &ctx->account_id, &ctx->account_id_len,
+ "accountId");
+
+ if (ret < 0) {
+ flb_plg_error(ctx->ins, "Failed to get Account ID");
+ return -1;
+ }
+ ctx->new_keys++;
+ }
+
+ if (ctx->hostname_include && !ctx->hostname) {
+ ret = flb_aws_imds_request(ctx->client_imds, FLB_AWS_IMDS_HOSTNAME_PATH,
+ &ctx->hostname, &ctx->hostname_len);
+
+ if (ret < 0) {
+ flb_plg_error(ctx->ins, "Failed to get Hostname");
+ return -1;
+ }
+ ctx->new_keys++;
+ }
+
+ if (ctx->tags_enabled && !ctx->tags_fetched) {
+ ret = get_ec2_tags(ctx);
+ if (ret < 0) {
+ flb_plg_error(ctx->ins, "Failed to get instance EC2 Tags");
+ return ret;
+ }
+ for (i = 0; i < ctx->tags_count; i++) {
+ if (ctx->tag_is_enabled[i] == FLB_TRUE) {
+ ctx->new_keys++;
+ }
+ }
+ }
+
+ ctx->metadata_retrieved = FLB_TRUE;
+ return 0;
+}
+
+static int cb_aws_filter(const void *data, size_t bytes,
+ const char *tag, int tag_len,
+ void **out_buf, size_t *out_size,
+ struct flb_filter_instance *f_ins,
+ struct flb_input_instance *i_ins,
+ void *context,
+ struct flb_config *config)
+{
+ struct flb_filter_aws *ctx = context;
+ int i = 0;
+ int ret;
+ msgpack_object *obj;
+ msgpack_object_kv *kv;
+ struct flb_log_event_encoder log_encoder;
+ struct flb_log_event_decoder log_decoder;
+ struct flb_log_event log_event;
+
+ (void) f_ins;
+ (void) i_ins;
+ (void) config;
+
+ /* First check that the metadata has been retrieved */
+ if (!ctx->metadata_retrieved) {
+ ret = get_ec2_metadata(ctx);
+ if (ret < 0) {
+ return FLB_FILTER_NOTOUCH;
+ }
+ expose_aws_meta(ctx);
+ }
+
+ ret = flb_log_event_decoder_init(&log_decoder, (char *) data, bytes);
+
+ if (ret != FLB_EVENT_DECODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event decoder initialization error : %d", ret);
+
+ return FLB_FILTER_NOTOUCH;
+ }
+
+ ret = flb_log_event_encoder_init(&log_encoder,
+ FLB_LOG_EVENT_FORMAT_DEFAULT);
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event encoder initialization error : %d", ret);
+
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ return FLB_FILTER_NOTOUCH;
+ }
+
+ while ((ret = flb_log_event_decoder_next(
+ &log_decoder,
+ &log_event)) == FLB_EVENT_DECODER_SUCCESS) {
+ obj = log_event.body;
+
+ ret = flb_log_event_encoder_begin_record(&log_encoder);
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_timestamp(
+ &log_encoder,
+ &log_event.timestamp);
+ }
+
+ /* iterate through the old record map and add it to the new buffer */
+ kv = obj->via.map.ptr;
+
+ for(i=0;
+ i < obj->via.map.size &&
+ ret == FLB_EVENT_ENCODER_SUCCESS;
+ i++) {
+ ret = flb_log_event_encoder_append_body_values(
+ &log_encoder,
+ FLB_LOG_EVENT_MSGPACK_OBJECT_VALUE(&kv[i].key),
+ FLB_LOG_EVENT_MSGPACK_OBJECT_VALUE(&kv[i].val));
+ }
+
+ /* append new keys */
+ if (ctx->availability_zone_include &&
+ ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_append_body_values(
+ &log_encoder,
+ FLB_LOG_EVENT_CSTRING_VALUE(FLB_FILTER_AWS_AVAILABILITY_ZONE_KEY),
+ FLB_LOG_EVENT_STRING_VALUE(ctx->availability_zone,
+ ctx->availability_zone_len));
+ }
+
+ if (ctx->instance_id_include &&
+ ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_append_body_values(
+ &log_encoder,
+ FLB_LOG_EVENT_CSTRING_VALUE(FLB_FILTER_AWS_INSTANCE_ID_KEY),
+ FLB_LOG_EVENT_STRING_VALUE(ctx->instance_id,
+ ctx->instance_id_len));
+ }
+
+ if (ctx->instance_type_include &&
+ ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_append_body_values(
+ &log_encoder,
+ FLB_LOG_EVENT_CSTRING_VALUE(FLB_FILTER_AWS_INSTANCE_TYPE_KEY),
+ FLB_LOG_EVENT_STRING_VALUE(ctx->instance_type,
+ ctx->instance_type_len));
+ }
+
+ if (ctx->private_ip_include &&
+ ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_append_body_values(
+ &log_encoder,
+ FLB_LOG_EVENT_CSTRING_VALUE(FLB_FILTER_AWS_PRIVATE_IP_KEY),
+ FLB_LOG_EVENT_STRING_VALUE(ctx->private_ip,
+ ctx->private_ip_len));
+ }
+
+ if (ctx->vpc_id_include &&
+ ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_append_body_values(
+ &log_encoder,
+ FLB_LOG_EVENT_CSTRING_VALUE(FLB_FILTER_AWS_VPC_ID_KEY),
+ FLB_LOG_EVENT_STRING_VALUE(ctx->vpc_id,
+ ctx->vpc_id_len));
+ }
+
+ if (ctx->ami_id_include &&
+ ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_append_body_values(
+ &log_encoder,
+ FLB_LOG_EVENT_CSTRING_VALUE(FLB_FILTER_AWS_AMI_ID_KEY),
+ FLB_LOG_EVENT_STRING_VALUE(ctx->ami_id,
+ ctx->ami_id_len));
+ }
+
+ if (ctx->account_id_include &&
+ ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_append_body_values(
+ &log_encoder,
+ FLB_LOG_EVENT_CSTRING_VALUE(FLB_FILTER_AWS_ACCOUNT_ID_KEY),
+ FLB_LOG_EVENT_STRING_VALUE(ctx->account_id,
+ ctx->account_id_len));
+ }
+
+ if (ctx->hostname_include &&
+ ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_append_body_values(
+ &log_encoder,
+ FLB_LOG_EVENT_CSTRING_VALUE(FLB_FILTER_AWS_HOSTNAME_KEY),
+ FLB_LOG_EVENT_STRING_VALUE(ctx->hostname,
+ ctx->hostname_len));
+ }
+
+ if (ctx->tags_enabled && ctx->tags_fetched) {
+ for (i = 0;
+ i < ctx->tags_count &&
+ ret == FLB_EVENT_ENCODER_SUCCESS;
+ i++) {
+ if (ctx->tag_is_enabled[i] == FLB_TRUE) {
+ ret = flb_log_event_encoder_append_body_values(
+ &log_encoder,
+ FLB_LOG_EVENT_STRING_VALUE(ctx->tag_keys[i],
+ ctx->tag_keys_len[i]),
+ FLB_LOG_EVENT_STRING_VALUE(ctx->tag_values[i],
+ ctx->tag_values_len[i]));
+ }
+ }
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_commit_record(&log_encoder);
+ }
+ }
+
+ if (ret == FLB_EVENT_DECODER_ERROR_INSUFFICIENT_DATA &&
+ log_decoder.offset == bytes) {
+ ret = FLB_EVENT_ENCODER_SUCCESS;
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ *out_buf = log_encoder.output_buffer;
+ *out_size = log_encoder.output_length;
+
+ ret = FLB_FILTER_MODIFIED;
+
+ flb_log_event_encoder_claim_internal_buffer_ownership(&log_encoder);
+ }
+ else {
+ flb_plg_error(ctx->ins,
+ "Log event encoder error : %d", ret);
+
+ ret = FLB_FILTER_NOTOUCH;
+ }
+
+ flb_log_event_decoder_destroy(&log_decoder);
+ flb_log_event_encoder_destroy(&log_encoder);
+
+ return ret;
+}
+
+static void flb_filter_aws_destroy(struct flb_filter_aws *ctx)
+{
+ if (ctx->options == NULL) {
+ /* non null options are only provided by unit tests and since */
+ /* aws client mock must clean up the memory with some special behaviour */
+ /* if options are NOT null (which means we are running unit tests), */
+ /* we rely on unit tests to perform memory cleanup */
+ if (ctx->aws_ec2_filter_client) {
+ flb_aws_client_destroy(ctx->aws_ec2_filter_client);
+ }
+ }
+ if (ctx->client_imds) {
+ flb_aws_imds_destroy(ctx->client_imds);
+ }
+
+ if (ctx->availability_zone) {
+ flb_sds_destroy(ctx->availability_zone);
+ }
+
+ if (ctx->instance_id) {
+ flb_sds_destroy(ctx->instance_id);
+ }
+
+ if (ctx->instance_type) {
+ flb_sds_destroy(ctx->instance_type);
+ }
+
+ if (ctx->private_ip) {
+ flb_sds_destroy(ctx->private_ip);
+ }
+
+ if (ctx->vpc_id) {
+ flb_sds_destroy(ctx->vpc_id);
+ }
+
+ if (ctx->ami_id) {
+ flb_sds_destroy(ctx->ami_id);
+ }
+
+ if (ctx->account_id) {
+ flb_sds_destroy(ctx->account_id);
+ }
+
+ if (ctx->hostname) {
+ flb_sds_destroy(ctx->hostname);
+ }
+
+ flb_filter_aws_tags_destroy(ctx);
+
+ flb_free(ctx);
+}
+
+static int cb_aws_exit(void *data, struct flb_config *config)
+{
+ struct flb_filter_aws *ctx = data;
+
+ if (ctx != NULL) {
+ flb_filter_aws_destroy(ctx);
+ }
+ return 0;
+}
+
+/* Configuration properties map */
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_STR, "imds_version", "v2",
+ 0, FLB_FALSE, 0,
+ "Specifies which version of the EC2 instance metadata service"
+ " will be used: 'v1' or 'v2'. 'v2' may not work"
+ " if you run Fluent Bit in a container."
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "az", "true",
+ 0, FLB_TRUE, offsetof(struct flb_filter_aws, availability_zone_include),
+ "Enable EC2 instance availability zone"
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "ec2_instance_id", "true",
+ 0, FLB_TRUE, offsetof(struct flb_filter_aws, instance_id_include),
+ "Enable EC2 instance ID"
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "ec2_instance_type", "false",
+ 0, FLB_TRUE, offsetof(struct flb_filter_aws, instance_type_include),
+ "Enable EC2 instance type"
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "private_ip", "false",
+ 0, FLB_TRUE, offsetof(struct flb_filter_aws, private_ip_include),
+ "Enable EC2 instance private IP"
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "vpc_id", "false",
+ 0, FLB_TRUE, offsetof(struct flb_filter_aws, vpc_id_include),
+ "Enable EC2 instance VPC ID"
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "ami_id", "false",
+ 0, FLB_TRUE, offsetof(struct flb_filter_aws, ami_id_include),
+ "Enable EC2 instance Image ID"
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "account_id", "false",
+ 0, FLB_TRUE, offsetof(struct flb_filter_aws, account_id_include),
+ "Enable EC2 instance Account ID"
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "hostname", "false",
+ 0, FLB_TRUE, offsetof(struct flb_filter_aws, hostname_include),
+ "Enable EC2 instance hostname"
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "tags_enabled", "false",
+ 0, FLB_TRUE, offsetof(struct flb_filter_aws, tags_enabled),
+ "Enable EC2 instance tags, "
+ "injects all tags if tags_include and tags_exclude are empty"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "tags_include", "",
+ 0, FLB_FALSE, 0,
+ "Defines list of specific EC2 tag keys to inject into the logs; "
+ "tag keys must be separated by \",\" character; "
+ "tags which are not present in this list will be ignored; "
+ "e.g.: \"Name,tag1,tag2\""
+ },
+ {
+ FLB_CONFIG_MAP_STR, "tags_exclude", "",
+ 0, FLB_FALSE, 0,
+ "Defines list of specific EC2 tag keys not to inject into the logs; "
+ "tag keys must be separated by \",\" character; "
+ "if both tags_include and tags_exclude are specified, configuration is invalid"
+ " and plugin fails"
+ },
+ {0}
+};
+
+struct flb_filter_plugin filter_aws_plugin = {
+ .name = "aws",
+ .description = "Add AWS Metadata",
+ .cb_init = cb_aws_init,
+ .cb_filter = cb_aws_filter,
+ .cb_exit = cb_aws_exit,
+ .config_map = config_map,
+ .flags = 0
+};
diff --git a/src/fluent-bit/plugins/filter_aws/aws.h b/src/fluent-bit/plugins/filter_aws/aws.h
new file mode 100644
index 000000000..d165de5a2
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_aws/aws.h
@@ -0,0 +1,131 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_FILTER_AWS_H
+#define FLB_FILTER_AWS_H
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_filter.h>
+
+#define FLB_FILTER_AWS_AVAILABILITY_ZONE_KEY "az"
+#define FLB_FILTER_AWS_AVAILABILITY_ZONE_KEY_LEN 2
+#define FLB_FILTER_AWS_INSTANCE_ID_KEY "ec2_instance_id"
+#define FLB_FILTER_AWS_INSTANCE_ID_KEY_LEN 15
+#define FLB_FILTER_AWS_INSTANCE_TYPE_KEY "ec2_instance_type"
+#define FLB_FILTER_AWS_INSTANCE_TYPE_KEY_LEN 17
+#define FLB_FILTER_AWS_PRIVATE_IP_KEY "private_ip"
+#define FLB_FILTER_AWS_PRIVATE_IP_KEY_LEN 10
+#define FLB_FILTER_AWS_VPC_ID_KEY "vpc_id"
+#define FLB_FILTER_AWS_VPC_ID_KEY_LEN 6
+#define FLB_FILTER_AWS_AMI_ID_KEY "ami_id"
+#define FLB_FILTER_AWS_AMI_ID_KEY_LEN 6
+#define FLB_FILTER_AWS_ACCOUNT_ID_KEY "account_id"
+#define FLB_FILTER_AWS_ACCOUNT_ID_KEY_LEN 10
+#define FLB_FILTER_AWS_HOSTNAME_KEY "hostname"
+#define FLB_FILTER_AWS_HOSTNAME_KEY_LEN 8
+
+struct flb_filter_aws {
+ struct flb_filter_aws_init_options *options;
+
+ /* upstream connection to ec2 IMDS */
+ struct flb_aws_client *aws_ec2_filter_client;
+ struct flb_aws_imds *client_imds;
+
+ /*
+ * IMDSv2 requires a token which must be present in metadata requests
+ * This plugin does not refresh the token
+ */
+ flb_sds_t imds_v2_token;
+ size_t imds_v2_token_len;
+
+ /* Metadata fields
+ * These are queried only once; ec2 metadata is assumed to be immutable
+ */
+ flb_sds_t availability_zone;
+ size_t availability_zone_len;
+ int availability_zone_include;
+
+ flb_sds_t instance_id;
+ size_t instance_id_len;
+ int instance_id_include;
+
+ flb_sds_t instance_type;
+ size_t instance_type_len;
+ int instance_type_include;
+
+ flb_sds_t private_ip;
+ size_t private_ip_len;
+ int private_ip_include;
+
+ flb_sds_t vpc_id;
+ size_t vpc_id_len;
+ int vpc_id_include;
+
+ flb_sds_t ami_id;
+ size_t ami_id_len;
+ int ami_id_include;
+
+ flb_sds_t account_id;
+ size_t account_id_len;
+ int account_id_include;
+
+ flb_sds_t hostname;
+ size_t hostname_len;
+ int hostname_include;
+
+ /* tags_* fields are related to exposing EC2 tags in log labels
+ * tags_enabled defines if EC2 tags functionality is enabled */
+ int tags_enabled;
+
+ /* tags_fetched defines if tag keys and values were fetched successfully
+ * and might be used to inject into msgpack */
+ int tags_fetched;
+ /* tags_count defines how many tags are available to use
+ * it could be 0 if there are no tags defined or if metadata server has
+ * disabled exposing tags functionality */
+ size_t tags_count;
+ /* tag_keys is an array of tag key strings */
+ flb_sds_t *tag_keys;
+ /* tag_keys_len is an array of lengths corresponding to tag_keys items */
+ size_t *tag_keys_len;
+ /* tag_values is an array of tag values strings */
+ flb_sds_t *tag_values;
+ /* tag_values_len is an array of lengths related to tag_values items */
+ size_t *tag_values_len;
+ /* tag_is_enabled is an array of bools which define if corresponding tag should be injected */
+ /* e.g.: if tag_is_enabled[0] = FALSE, then filter aws should not inject first tag */
+ int *tag_is_enabled;
+
+ /* number of new keys added by this plugin */
+ int new_keys;
+
+ int metadata_retrieved;
+
+ /* Plugin can use EC2 metadata v1 or v2; default is v2 */
+ int use_v2;
+
+ /* Filter plugin instance reference */
+ struct flb_filter_instance *ins;
+};
+
+struct flb_filter_aws_init_options {
+ struct flb_aws_client_generator *client_generator;
+};
+
+#endif
diff --git a/src/fluent-bit/plugins/filter_checklist/CMakeLists.txt b/src/fluent-bit/plugins/filter_checklist/CMakeLists.txt
new file mode 100644
index 000000000..eb8a13529
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_checklist/CMakeLists.txt
@@ -0,0 +1,4 @@
+set(src
+ checklist.c)
+
+FLB_PLUGIN(filter_checklist "${src}" "")
diff --git a/src/fluent-bit/plugins/filter_checklist/checklist.c b/src/fluent-bit/plugins/filter_checklist/checklist.c
new file mode 100644
index 000000000..0664777c6
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_checklist/checklist.c
@@ -0,0 +1,656 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_filter_plugin.h>
+#include <fluent-bit/flb_mem.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_ra_key.h>
+#include <fluent-bit/flb_sqldb.h>
+#include <fluent-bit/flb_log_event_decoder.h>
+#include <fluent-bit/flb_log_event_encoder.h>
+
+#include "checklist.h"
+#include <ctype.h>
+
+static int db_init(struct checklist *ctx)
+{
+ int ret;
+
+ /* initialize databse */
+ ctx->db = flb_sqldb_open(":memory:", "filter_check", ctx->config);
+ if (!ctx->db) {
+ flb_plg_error(ctx->ins, "could not create in-memory database");
+ return -1;
+ }
+
+ /* create table */
+ ret = flb_sqldb_query(ctx->db, SQL_CREATE_TABLE, NULL, NULL);
+ if (ret != FLB_OK) {
+ flb_plg_error(ctx->ins, "db: could not create table");
+ return -1;
+ }
+
+ /* create table */
+ ret = flb_sqldb_query(ctx->db, SQL_CASE_SENSITIVE, NULL, NULL);
+ if (ret != FLB_OK) {
+ flb_plg_error(ctx->ins, "db: could not set CASE SENSITIVE");
+ return -1;
+ }
+
+ /*
+ * Prepare SQL statements
+ * -----------------------
+ */
+
+ /* SQL_INSERT */
+ ret = sqlite3_prepare_v2(ctx->db->handler,
+ SQL_INSERT,
+ -1,
+ &ctx->stmt_insert,
+ 0);
+ if (ret != SQLITE_OK) {
+ flb_plg_error(ctx->ins, "error preparing database SQL statement: insert");
+ return -1;
+ }
+
+ /* SQL_CHECK */
+ ret = sqlite3_prepare_v2(ctx->db->handler,
+ SQL_CHECK,
+ -1,
+ &ctx->stmt_check,
+ 0);
+ if (ret != SQLITE_OK) {
+ flb_plg_error(ctx->ins, "error preparing database SQL statement: check");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int db_insert(struct checklist *ctx, char *buf, int len)
+{
+ int ret;
+
+ /* Bind parameter */
+ sqlite3_bind_text(ctx->stmt_insert, 1, buf, len, 0);
+
+ /* Run the insert */
+ ret = sqlite3_step(ctx->stmt_insert);
+ if (ret != SQLITE_DONE) {
+ sqlite3_clear_bindings(ctx->stmt_insert);
+ sqlite3_reset(ctx->stmt_insert);
+ flb_plg_warn(ctx->ins, "cannot execute insert for value: %s", buf);
+ return -1;
+ }
+
+ sqlite3_clear_bindings(ctx->stmt_insert);
+ sqlite3_reset(ctx->stmt_insert);
+
+ return flb_sqldb_last_id(ctx->db);
+}
+
+static int db_check(struct checklist *ctx, char *buf, size_t size)
+{
+ int ret;
+ int match = FLB_FALSE;
+
+ /* Bind parameter */
+ sqlite3_bind_text(ctx->stmt_check, 1, buf, size, 0);
+
+ /* Run the check */
+ ret = sqlite3_step(ctx->stmt_check);
+ if (ret == SQLITE_ROW) {
+ match = FLB_TRUE;
+ }
+
+ sqlite3_clear_bindings(ctx->stmt_check);
+ sqlite3_reset(ctx->stmt_check);
+
+ return match;
+}
+
+static int load_file_patterns(struct checklist *ctx)
+{
+ int i;
+ int ret;
+ int len;
+ int line = 0;
+ int size = LINE_SIZE;
+ char buf[LINE_SIZE];
+ FILE *f;
+
+ /* open file */
+ f = fopen(ctx->file, "r");
+ if (!f) {
+ flb_errno();
+ flb_plg_error(ctx->ins, "could not open file: %s", ctx->file);
+ return -1;
+ }
+
+ /* read and process rules on lines */
+ while (fgets(buf, size - 1, f)) {
+ len = strlen(buf);
+ if (buf[len - 1] == '\n') {
+ buf[--len] = 0;
+ if (len && buf[len - 1] == '\r') {
+ buf[--len] = 0;
+ }
+ }
+ else if (!feof(f)) {
+ flb_plg_error(ctx->ins, "length of content has exceeded limit");
+ fclose(f);
+ return -1;
+ }
+
+ /* skip empty and commented lines */
+ if (!buf[0] || buf[0] == '#') {
+ line++;
+ continue;
+ }
+
+ /* convert to lowercase if needed */
+ if (ctx->ignore_case) {
+ for (i = 0; i < len; i++) {
+ buf[i] = tolower(buf[i]);
+ }
+ }
+
+ /* add the entry as a hash table key, no value reference is needed */
+ if (ctx->mode == CHECK_EXACT_MATCH) {
+ ret = flb_hash_table_add(ctx->ht, buf, len, "", 0);
+ }
+ else if (ctx->mode == CHECK_PARTIAL_MATCH) {
+ ret = db_insert(ctx, buf, len);
+ }
+
+ if (ret >= 0) {
+ flb_plg_debug(ctx->ins, "file list: line=%i adds value='%s'", line, buf);
+ }
+ line++;
+ }
+
+ fclose(f);
+ return 0;
+}
+
+static int init_config(struct checklist *ctx)
+{
+ int ret;
+ char *tmp;
+ struct flb_time t0;
+ struct flb_time t1;
+ struct flb_time t_diff;
+
+ /* check if we have 'records' to add */
+ if (mk_list_size(ctx->records) == 0) {
+ flb_plg_warn(ctx->ins, "no 'record' options has been specified");
+ }
+
+ /* lookup mode */
+ ctx->mode = CHECK_EXACT_MATCH;
+ tmp = (char *) flb_filter_get_property("mode", ctx->ins);
+ if (tmp) {
+ if (strcasecmp(tmp, "exact") == 0) {
+ ctx->mode = CHECK_EXACT_MATCH;
+ }
+ else if (strcasecmp(tmp, "partial") == 0) {
+ ctx->mode = CHECK_PARTIAL_MATCH;
+ }
+ }
+
+ if (ctx->mode == CHECK_EXACT_MATCH) {
+ /* create hash table */
+ ctx->ht = flb_hash_table_create(FLB_HASH_TABLE_EVICT_NONE,
+ CHECK_HASH_TABLE_SIZE, -1);
+ if (!ctx->ht) {
+ flb_plg_error(ctx->ins, "could not create hash table");
+ return -1;
+ }
+ }
+ else if (ctx->mode == CHECK_PARTIAL_MATCH) {
+ ret = db_init(ctx);
+ if (ret < 0) {
+ return -1;
+ }
+ }
+
+ /* record accessor pattern / key name */
+ ctx->ra_lookup_key = flb_ra_create(ctx->lookup_key, FLB_TRUE);
+ if (!ctx->ra_lookup_key) {
+ flb_plg_error(ctx->ins, "invalid lookup_key pattern: %s",
+ ctx->lookup_key);
+ return -1;
+ }
+
+ /* validate file */
+ if (!ctx->file) {
+ flb_plg_error(ctx->ins, "option 'file' is not set");
+ return -1;
+ }
+
+
+ /* load file content */
+ flb_time_get(&t0);
+ ret = load_file_patterns(ctx);
+ flb_time_get(&t1);
+
+ /* load time */
+ flb_time_diff(&t1, &t0, &t_diff);
+ flb_plg_info(ctx->ins, "load file elapsed time (sec.ns): %lu.%lu",
+ t_diff.tm.tv_sec, t_diff.tm.tv_nsec);
+
+ return ret;
+}
+
+static int cb_checklist_init(struct flb_filter_instance *ins,
+ struct flb_config *config,
+ void *data)
+{
+ int ret;
+ struct checklist *ctx;
+
+ ctx = flb_calloc(1, sizeof(struct checklist));
+ if (!ctx) {
+ flb_errno();
+ return -1;
+ }
+ ctx->ins = ins;
+ ctx->config = config;
+
+ /* set context */
+ flb_filter_set_context(ins, ctx);
+
+ /* Set config_map properties in our local context */
+ ret = flb_filter_config_map_set(ins, (void *) ctx);
+ if (ret == -1) {
+ flb_free(ctx);
+ return -1;
+ }
+
+ ret = init_config(ctx);
+
+ return 0;
+}
+
+static int set_record(struct checklist *ctx,
+ struct flb_log_event_encoder *log_encoder,
+ struct flb_log_event *log_event)
+{
+ int i;
+ int len;
+ int ret;
+ int skip;
+ msgpack_object k;
+ msgpack_object v;
+ msgpack_object *map;
+ struct mk_list *head;
+ struct flb_slist_entry *r_key;
+ struct flb_slist_entry *r_val;
+ struct flb_config_map_val *mv;
+
+ ret = flb_log_event_encoder_begin_record(log_encoder);
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ return -1;
+ }
+
+ ret = flb_log_event_encoder_set_timestamp(log_encoder, &log_event->timestamp);
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ return -2;
+ }
+
+ ret = flb_log_event_encoder_set_metadata_from_msgpack_object(log_encoder,
+ log_event->metadata);
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ return -3;
+ }
+
+ map = log_event->body;
+
+ for (i = 0; i < map->via.map.size; i++) {
+ k = map->via.map.ptr[i].key;
+ v = map->via.map.ptr[i].val;
+
+ if (k.type != MSGPACK_OBJECT_STR) {
+ continue;
+ }
+
+ /* iterate 'records' list, check if this key is a duplicated */
+ skip = FLB_FALSE;
+ flb_config_map_foreach(head, mv, ctx->records) {
+ r_key = mk_list_entry_first(mv->val.list, struct flb_slist_entry, _head);
+ r_val = mk_list_entry_last(mv->val.list, struct flb_slist_entry, _head);
+
+ len = flb_sds_len(r_key->str);
+ if (k.via.str.size != len) {
+ continue;
+ }
+
+ if (strncmp(k.via.str.ptr, r_key->str, len) == 0) {
+ skip = FLB_TRUE;
+ break;
+ }
+ }
+
+ /*
+ * skip is true if the current key will be overrided by some entry of
+ * the 'records' list.
+ */
+ if (skip) {
+ continue;
+ }
+
+ ret = flb_log_event_encoder_append_body_values(
+ log_encoder,
+ FLB_LOG_EVENT_MSGPACK_OBJECT_VALUE(&k),
+ FLB_LOG_EVENT_MSGPACK_OBJECT_VALUE(&v));
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ return -4;
+ }
+ }
+
+ /* Pack custom records */
+ flb_config_map_foreach(head, mv, ctx->records) {
+ r_key = mk_list_entry_first(mv->val.list, struct flb_slist_entry, _head);
+ r_val = mk_list_entry_last(mv->val.list, struct flb_slist_entry, _head);
+
+ ret = flb_log_event_encoder_append_body_string(
+ log_encoder, r_key->str, flb_sds_len(r_key->str));
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ return -5;
+ }
+
+ if (strcasecmp(r_val->str, "true") == 0) {
+ ret = flb_log_event_encoder_append_body_boolean(
+ log_encoder, FLB_TRUE);
+ }
+ else if (strcasecmp(r_val->str, "false") == 0) {
+ ret = flb_log_event_encoder_append_body_boolean(
+ log_encoder, FLB_FALSE);
+ }
+ else if (strcasecmp(r_val->str, "null") == 0) {
+ ret = flb_log_event_encoder_append_body_null(
+ log_encoder);
+ }
+ else {
+ ret = flb_log_event_encoder_append_body_string(
+ log_encoder, r_val->str, flb_sds_len(r_val->str));
+ }
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ return -3;
+ }
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_commit_record(log_encoder);
+ }
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ return -4;
+ }
+
+ return 0;
+}
+
+static int cb_checklist_filter(const void *data, size_t bytes,
+ const char *tag, int tag_len,
+ void **out_buf, size_t *out_bytes,
+ struct flb_filter_instance *ins,
+ struct flb_input_instance *i_ins,
+ void *filter_context,
+ struct flb_config *config)
+{
+ int i;
+ int id;
+ int found;
+ int matches = 0;
+ size_t pre = 0;
+ size_t off = 0;
+ size_t cmp_size;
+ char *cmp_buf;
+ char *tmp_buf;
+ size_t tmp_size;
+ struct checklist *ctx = filter_context;
+ struct flb_ra_value *rval;
+ struct flb_time t0;
+ struct flb_time t1;
+ struct flb_time t_diff;
+ struct flb_log_event_encoder log_encoder;
+ struct flb_log_event_decoder log_decoder;
+ struct flb_log_event log_event;
+ int ret;
+
+ (void) ins;
+ (void) i_ins;
+ (void) config;
+
+ ret = flb_log_event_decoder_init(&log_decoder, (char *) data, bytes);
+
+ if (ret != FLB_EVENT_DECODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event decoder initialization error : %d", ret);
+
+ return FLB_FILTER_NOTOUCH;
+ }
+
+ ret = flb_log_event_encoder_init(&log_encoder,
+ FLB_LOG_EVENT_FORMAT_DEFAULT);
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ flb_plg_error(ins, "Log event encoder initialization error : %d", ret);
+
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ return FLB_FILTER_NOTOUCH;
+ }
+
+ while ((ret = flb_log_event_decoder_next(
+ &log_decoder,
+ &log_event)) == FLB_EVENT_DECODER_SUCCESS) {
+ off = log_decoder.offset;
+ found = FLB_FALSE;
+
+ rval = flb_ra_get_value_object(ctx->ra_lookup_key, *log_event.body);
+ if (rval) {
+ if (ctx->print_query_time) {
+ flb_time_get(&t0);
+ }
+
+ cmp_buf = NULL;
+ if (rval->type == FLB_RA_STRING) {
+ /* convert to lowercase */
+ if (ctx->ignore_case) {
+ cmp_buf = flb_calloc(1, rval->o.via.str.size + 1);
+ if (!cmp_buf) {
+ flb_errno();
+ flb_ra_key_value_destroy(rval);
+ continue;
+ }
+ memcpy(cmp_buf, rval->o.via.str.ptr, rval->o.via.str.size);
+ for (i = 0; i < rval->o.via.str.size; i++) {
+ cmp_buf[i] = tolower(cmp_buf[i]);
+ }
+ }
+ else {
+ cmp_buf = (char *) rval->o.via.str.ptr;
+ }
+ cmp_size = rval->o.via.str.size;
+
+ if (ctx->mode == CHECK_EXACT_MATCH) {
+ id = flb_hash_table_get(ctx->ht, cmp_buf, cmp_size,
+ (void *) &tmp_buf, &tmp_size);
+ if (id >= 0) {
+ found = FLB_TRUE;
+ }
+ }
+ else if (ctx->mode == CHECK_PARTIAL_MATCH) {
+ found = db_check(ctx, cmp_buf, cmp_size);
+ }
+
+ if (cmp_buf && cmp_buf != (char *) rval->o.via.str.ptr) {
+ flb_free(cmp_buf);
+ }
+ }
+
+ /* print elapsed time */
+ if (ctx->print_query_time && found) {
+ flb_time_get(&t1);
+ flb_time_diff(&t1, &t0, &t_diff);
+
+ flb_plg_info(ctx->ins,
+ "query time (sec.ns): %lu.%lu : '%.*s'",
+ t_diff.tm.tv_sec,
+ t_diff.tm.tv_nsec,
+ (int) rval->o.via.str.size,
+ rval->o.via.str.ptr);
+ }
+
+ flb_ra_key_value_destroy(rval);
+ }
+
+ if (found) {
+ /* add any previous content that not matched */
+ if (log_encoder.output_length == 0 && pre > 0) {
+ ret = flb_log_event_encoder_emit_raw_record(
+ &log_encoder,
+ data,
+ pre);
+ }
+
+ ret = set_record(ctx, &log_encoder, &log_event);
+
+ if (ret < -1) {
+ flb_log_event_encoder_rollback_record(&log_encoder);
+ }
+
+ matches++;
+ }
+ else {
+ if (log_encoder.output_length > 0) {
+ /* append current record to new buffer */
+ ret = flb_log_event_encoder_emit_raw_record(
+ &log_encoder,
+ &((char *) data)[pre],
+ off - pre);
+ }
+ }
+ pre = off;
+ }
+
+ if (log_encoder.output_length > 0 && matches > 0) {
+ *out_buf = log_encoder.output_buffer;
+ *out_bytes = log_encoder.output_length;
+
+ flb_log_event_encoder_claim_internal_buffer_ownership(&log_encoder);
+
+ ret = FLB_FILTER_MODIFIED;
+ }
+ else {
+ ret = FLB_FILTER_NOTOUCH;
+ }
+
+ flb_log_event_decoder_destroy(&log_decoder);
+ flb_log_event_encoder_destroy(&log_encoder);
+
+ return ret;
+}
+
+static int cb_exit(void *data, struct flb_config *config)
+{
+ struct checklist *ctx = data;
+
+ if (!ctx) {
+ return 0;
+ }
+
+ if (ctx->ra_lookup_key) {
+ flb_ra_destroy(ctx->ra_lookup_key);
+ }
+
+ if (ctx->ht) {
+ flb_hash_table_destroy(ctx->ht);
+ }
+
+ if (ctx->db) {
+ sqlite3_finalize(ctx->stmt_insert);
+ sqlite3_finalize(ctx->stmt_check);
+ flb_sqldb_close(ctx->db);
+ }
+
+ flb_free(ctx);
+ return 0;
+}
+
+/* Configuration properties map */
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_STR, "file", NULL,
+ 0, FLB_TRUE, offsetof(struct checklist, file),
+ "Specify the file that contains the patterns to lookup."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "mode", "exact",
+ 0, FLB_FALSE, 0,
+ "Set the check mode: 'exact' or 'partial'."
+ },
+
+ {
+ FLB_CONFIG_MAP_BOOL, "print_query_time", "false",
+ 0, FLB_TRUE, offsetof(struct checklist, print_query_time),
+ "Print to stdout the elapseed query time for every matched record"
+ },
+
+ {
+ FLB_CONFIG_MAP_BOOL, "ignore_case", "false",
+ 0, FLB_TRUE, offsetof(struct checklist, ignore_case),
+ "Compare strings by ignoring case."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "lookup_key", "log",
+ 0, FLB_TRUE, offsetof(struct checklist, lookup_key),
+ "Name of the key to lookup."
+ },
+
+ {
+ FLB_CONFIG_MAP_SLIST_2, "record", NULL,
+ FLB_CONFIG_MAP_MULT, FLB_TRUE, offsetof(struct checklist, records),
+ "Name of record key to add and its value, it accept two values,e.g "
+ "'record mykey my val'. You can add many 'record' entries as needed."
+ },
+
+ /* EOF */
+ {0}
+};
+
+struct flb_filter_plugin filter_checklist_plugin = {
+ .name = "checklist",
+ .description = "Check records and flag them",
+ .cb_init = cb_checklist_init,
+ .cb_filter = cb_checklist_filter,
+ .cb_exit = cb_exit,
+ .config_map = config_map,
+ .flags = 0
+};
diff --git a/src/fluent-bit/plugins/filter_checklist/checklist.h b/src/fluent-bit/plugins/filter_checklist/checklist.h
new file mode 100644
index 000000000..8cd39516d
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_checklist/checklist.h
@@ -0,0 +1,69 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_FILTER_CHECK_H
+#define FLB_FILTER_CHECK_H
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_sqldb.h>
+#include <fluent-bit/flb_hash_table.h>
+#include <fluent-bit/flb_record_accessor.h>
+
+#define LINE_SIZE 2048
+#define CHECK_HASH_TABLE_SIZE 100000
+#define CHECK_EXACT_MATCH 0 /* exact string match */
+#define CHECK_PARTIAL_MATCH 1 /* partial match */
+
+/* plugin context */
+struct checklist {
+ /* config options */
+ int mode;
+ int ignore_case;
+ int print_query_time;
+ flb_sds_t file;
+ flb_sds_t lookup_key;
+ struct mk_list *records;
+
+ /* internal */
+ struct flb_sqldb *db;
+ sqlite3_stmt *stmt_insert;
+ sqlite3_stmt *stmt_check;
+ struct flb_hash_table *ht;
+ struct flb_record_accessor *ra_lookup_key;
+ struct flb_filter_instance *ins;
+ struct flb_config *config;
+};
+
+/* create table */
+#define SQL_CREATE_TABLE \
+ "CREATE TABLE IF NOT EXISTS list (" \
+ " pattern text " \
+ ");"
+
+#define SQL_CASE_SENSITIVE \
+ "PRAGMA case_sensitive_like = true;"
+
+/* insert pattern into list table */
+#define SQL_INSERT "INSERT INTO list (pattern) VALUES (@val);"
+
+/* validate incoming value against list */
+#define SQL_CHECK \
+ "SELECT pattern FROM list WHERE @val LIKE (pattern || '%');"
+
+#endif
diff --git a/src/fluent-bit/plugins/filter_ecs/CMakeLists.txt b/src/fluent-bit/plugins/filter_ecs/CMakeLists.txt
new file mode 100644
index 000000000..335a870f7
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_ecs/CMakeLists.txt
@@ -0,0 +1,5 @@
+set(src
+ ecs.c
+ )
+
+FLB_PLUGIN(filter_ecs "${src}" "")
diff --git a/src/fluent-bit/plugins/filter_ecs/ecs.c b/src/fluent-bit/plugins/filter_ecs/ecs.c
new file mode 100644
index 000000000..82339e60e
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_ecs/ecs.c
@@ -0,0 +1,1760 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <fluent-bit/flb_aws_util.h>
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_filter.h>
+#include <fluent-bit/flb_filter_plugin.h>
+#include <fluent-bit/flb_http_client.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_str.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_sds.h>
+#include <fluent-bit/flb_upstream.h>
+#include <fluent-bit/flb_io.h>
+#include <fluent-bit/flb_kv.h>
+#include <fluent-bit/flb_env.h>
+#include <fluent-bit/flb_log_event_decoder.h>
+#include <fluent-bit/flb_log_event_encoder.h>
+
+#include <monkey/mk_core/mk_list.h>
+#include <msgpack.h>
+#include <stdlib.h>
+#include <errno.h>
+
+#include "ecs.h"
+
+static int get_ecs_cluster_metadata(struct flb_filter_ecs *ctx);
+static void flb_filter_ecs_destroy(struct flb_filter_ecs *ctx);
+
+/* cluster meta is static so we can expose it on global ctx for other plugins to use */
+static void expose_ecs_cluster_meta(struct flb_filter_ecs *ctx)
+{
+ struct flb_env *env;
+ struct flb_config *config = ctx->ins->config;
+
+ env = config->env;
+
+ flb_env_set(env, "ecs", "enabled");
+
+ if (ctx->cluster_metadata.cluster_name) {
+ flb_env_set(env,
+ "aws.ecs.cluster_name",
+ ctx->cluster_metadata.cluster_name);
+ }
+
+ if (ctx->cluster_metadata.container_instance_arn) {
+ flb_env_set(env,
+ "aws.ecs.container_instance_arn",
+ ctx->cluster_metadata.container_instance_arn);
+ }
+
+ if (ctx->cluster_metadata.container_instance_id) {
+ flb_env_set(env,
+ "aws.ecs.container_instance_id",
+ ctx->cluster_metadata.container_instance_id);
+ }
+
+ if (ctx->cluster_metadata.ecs_agent_version) {
+ flb_env_set(env,
+ "aws.ecs.ecs_agent_version",
+ ctx->cluster_metadata.container_instance_id);
+ }
+}
+
+static int cb_ecs_init(struct flb_filter_instance *f_ins,
+ struct flb_config *config,
+ void *data)
+{
+ int ret;
+ struct flb_filter_ecs *ctx = NULL;
+ struct mk_list *head;
+ struct mk_list *split;
+ struct flb_kv *kv;
+ struct flb_split_entry *sentry;
+ int list_size;
+ struct flb_ecs_metadata_key *ecs_meta = NULL;
+ (void) data;
+
+ /* Create context */
+ ctx = flb_calloc(1, sizeof(struct flb_filter_ecs));
+ if (!ctx) {
+ flb_errno();
+ return -1;
+ }
+
+ ctx->ins = f_ins;
+
+ /* Populate context with config map defaults and incoming properties */
+ ret = flb_filter_config_map_set(f_ins, (void *) ctx);
+ if (ret == -1) {
+ flb_plg_error(f_ins, "configuration error");
+ flb_free(ctx);
+ return -1;
+ }
+
+ mk_list_init(&ctx->metadata_keys);
+ ctx->metadata_keys_len = 0;
+ mk_list_init(&ctx->metadata_buffers);
+
+ mk_list_foreach(head, &f_ins->properties) {
+ kv = mk_list_entry(head, struct flb_kv, _head);
+
+ if (strcasecmp(kv->key, "add") == 0) {
+ split = flb_utils_split(kv->val, ' ', 2);
+ list_size = mk_list_size(split);
+
+ if (list_size == 0 || list_size > 2) {
+ flb_plg_error(ctx->ins, "Invalid config for %s", kv->key);
+ flb_utils_split_free(split);
+ goto error;
+ }
+
+ sentry = mk_list_entry_first(split, struct flb_split_entry, _head);
+ ecs_meta = flb_calloc(1, sizeof(struct flb_ecs_metadata_key));
+ if (!ecs_meta) {
+ flb_errno();
+ flb_utils_split_free(split);
+ goto error;
+ }
+
+ ecs_meta->key = flb_sds_create_len(sentry->value, sentry->len);
+ if (!ecs_meta->key) {
+ flb_errno();
+ flb_utils_split_free(split);
+ goto error;
+ }
+
+ sentry = mk_list_entry_last(split, struct flb_split_entry, _head);
+
+ ecs_meta->template = flb_sds_create_len(sentry->value, sentry->len);
+ if (!ecs_meta->template) {
+ flb_errno();
+ flb_utils_split_free(split);
+ goto error;
+ }
+
+ ecs_meta->ra = flb_ra_create(ecs_meta->template, FLB_FALSE);
+ if (ecs_meta->ra == NULL) {
+ flb_plg_error(ctx->ins, "Could not parse template for `%s`", ecs_meta->key);
+ flb_utils_split_free(split);
+ goto error;
+ }
+
+ mk_list_add(&ecs_meta->_head, &ctx->metadata_keys);
+ ctx->metadata_keys_len++;
+ flb_utils_split_free(split);
+ }
+ }
+
+ ctx->ecs_upstream = flb_upstream_create(config,
+ ctx->ecs_host,
+ ctx->ecs_port,
+ FLB_IO_TCP,
+ NULL);
+
+ if (!ctx->ecs_upstream) {
+ flb_errno();
+ flb_plg_error(ctx->ins, "Could not create upstream connection to ECS Agent");
+ goto error;
+ }
+
+ flb_stream_disable_async_mode(&ctx->ecs_upstream->base);
+ ctx->has_cluster_metadata = FLB_FALSE;
+
+ /* entries are only evicted when TTL is reached and a get is issued */
+ ctx->container_hash_table = flb_hash_table_create_with_ttl(ctx->ecs_meta_cache_ttl,
+ FLB_HASH_TABLE_EVICT_OLDER,
+ FLB_ECS_FILTER_HASH_TABLE_SIZE,
+ FLB_ECS_FILTER_HASH_TABLE_SIZE);
+ if (!ctx->container_hash_table) {
+ flb_plg_error(f_ins, "failed to create container_hash_table");
+ goto error;
+ }
+
+ ctx->failed_metadata_request_tags = flb_hash_table_create_with_ttl(ctx->ecs_meta_cache_ttl,
+ FLB_HASH_TABLE_EVICT_OLDER,
+ FLB_ECS_FILTER_HASH_TABLE_SIZE,
+ FLB_ECS_FILTER_HASH_TABLE_SIZE);
+ if (!ctx->failed_metadata_request_tags) {
+ flb_plg_error(f_ins, "failed to create failed_metadata_request_tags table");
+ goto error;
+ }
+
+ ctx->ecs_tag_prefix_len = strlen(ctx->ecs_tag_prefix);
+
+ /* attempt to get metadata in init, can retry in cb_filter */
+ ret = get_ecs_cluster_metadata(ctx);
+
+ flb_filter_set_context(f_ins, ctx);
+ return 0;
+
+error:
+ flb_plg_error(ctx->ins, "Initialization failed.");
+ flb_filter_ecs_destroy(ctx);
+ return -1;
+}
+
+static int plugin_under_test()
+{
+ if (getenv("FLB_ECS_PLUGIN_UNDER_TEST") != NULL) {
+ return FLB_TRUE;
+ }
+
+ return FLB_FALSE;
+}
+
+static char *mock_error_response(char *error_env_var)
+{
+ char *err_val = NULL;
+ char *error = NULL;
+ int len = 0;
+
+ err_val = getenv(error_env_var);
+ if (err_val != NULL && strlen(err_val) > 0) {
+ error = flb_malloc(strlen(err_val) + sizeof(char));
+ if (error == NULL) {
+ flb_errno();
+ return NULL;
+ }
+
+ len = strlen(err_val);
+ memcpy(error, err_val, len);
+ error[len] = '\0';
+ return error;
+ }
+
+ return NULL;
+}
+
+static struct flb_http_client *mock_http_call(char *error_env_var, char *api)
+{
+ /* create an http client so that we can set the response */
+ struct flb_http_client *c = NULL;
+ char *error = mock_error_response(error_env_var);
+
+ c = flb_calloc(1, sizeof(struct flb_http_client));
+ if (!c) {
+ flb_errno();
+ flb_free(error);
+ return NULL;
+ }
+ mk_list_init(&c->headers);
+
+ if (error != NULL) {
+ c->resp.status = 400;
+ /* resp.data is freed on destroy, payload is supposed to reference it */
+ c->resp.data = error;
+ c->resp.payload = c->resp.data;
+ c->resp.payload_size = strlen(error);
+ }
+ else {
+ c->resp.status = 200;
+ if (strcmp(api, "Cluster") == 0) {
+ /* mocked success response */
+ c->resp.payload = "{\"Cluster\": \"cluster_name\",\"ContainerInstanceArn\": \"arn:aws:ecs:region:aws_account_id:container-instance/cluster_name/container_instance_id\",\"Version\": \"Amazon ECS Agent - v1.30.0 (02ff320c)\"}";
+ c->resp.payload_size = strlen(c->resp.payload);
+ }
+ else {
+ c->resp.payload = "{\"Arn\": \"arn:aws:ecs:us-west-2:012345678910:task/default/e01d58a8-151b-40e8-bc01-22647b9ecfec\",\"Containers\": [{\"DockerId\": \"79c796ed2a7f864f485c76f83f3165488097279d296a7c05bd5201a1c69b2920\",\"DockerName\": \"ecs-nginx-efs-2-nginx-9ac0808dd0afa495f001\",\"Name\": \"nginx\"}],\"DesiredStatus\": \"RUNNING\",\"Family\": \"nginx-efs\",\"KnownStatus\": \"RUNNING\",\"Version\": \"2\"}";
+ c->resp.payload_size = strlen(c->resp.payload);
+ }
+ }
+
+ return c;
+}
+
+/*
+ * Both container instance and task ARNs have the ID at the end after last '/'
+ */
+static flb_sds_t parse_id_from_arn(const char *arn, int len)
+{
+ int i;
+ flb_sds_t ID = NULL;
+ int last_slash = 0;
+ int id_start = 0;
+
+ for (i = 0; i < len; i++) {
+ if (arn[i] == '/') {
+ last_slash = i;
+ }
+ }
+
+ if (last_slash == 0 || last_slash >= len - 2) {
+ return NULL;
+ }
+ id_start = last_slash + 1;
+
+ ID = flb_sds_create_len(arn + id_start, len - id_start);
+ if (ID == NULL) {
+ flb_errno();
+ return NULL;
+ }
+
+ return ID;
+}
+
+/*
+ * This deserializes the msgpack metadata buf to msgpack_object
+ * which can be used with flb_ra_translate in the main filter callback
+ */
+static int flb_ecs_metadata_buffer_init(struct flb_filter_ecs *ctx,
+ struct flb_ecs_metadata_buffer *meta)
+{
+ msgpack_unpacked result;
+ msgpack_object root;
+ size_t off = 0;
+ int ret;
+
+ msgpack_unpacked_init(&result);
+ ret = msgpack_unpack_next(&result, meta->buf, meta->size, &off);
+ if (ret != MSGPACK_UNPACK_SUCCESS) {
+ flb_plg_error(ctx->ins, "Cannot unpack flb_ecs_metadata_buffer");
+ msgpack_unpacked_destroy(&result);
+ return -1;
+ }
+
+ root = result.data;
+ if (root.type != MSGPACK_OBJECT_MAP) {
+ flb_plg_error(ctx->ins, "Cannot unpack flb_ecs_metadata_buffer, msgpack_type=%i",
+ root.type);
+ msgpack_unpacked_destroy(&result);
+ return -1;
+ }
+
+ meta->unpacked = result;
+ meta->obj = root;
+ meta->last_used_time = time(NULL);
+ meta->free_packer = FLB_TRUE;
+
+ return 0;
+}
+
+static void flb_ecs_metadata_buffer_destroy(struct flb_ecs_metadata_buffer *meta)
+{
+ if (meta) {
+ flb_free(meta->buf);
+ if (meta->free_packer == FLB_TRUE) {
+ msgpack_unpacked_destroy(&meta->unpacked);
+ }
+ if (meta->id) {
+ flb_sds_destroy(meta->id);
+ }
+ flb_free(meta);
+ }
+}
+
+/*
+ * Get cluster and container instance info, which are static and never change
+ */
+static int get_ecs_cluster_metadata(struct flb_filter_ecs *ctx)
+{
+ struct flb_http_client *c;
+ struct flb_connection *u_conn;
+ int ret;
+ int root_type;
+ int found_cluster = FLB_FALSE;
+ int found_version = FLB_FALSE;
+ int found_instance = FLB_FALSE;
+ int free_conn = FLB_FALSE;
+ int i;
+ int len;
+ char *buffer;
+ size_t size;
+ size_t b_sent;
+ size_t off = 0;
+ msgpack_unpacked result;
+ msgpack_object root;
+ msgpack_object key;
+ msgpack_object val;
+ msgpack_sbuffer tmp_sbuf;
+ msgpack_packer tmp_pck;
+ flb_sds_t container_instance_id = NULL;
+ flb_sds_t tmp = NULL;
+
+ /* Compose HTTP Client request*/
+ if (plugin_under_test() == FLB_TRUE) {
+ c = mock_http_call("TEST_CLUSTER_ERROR", "Cluster");
+ ret = 0;
+ }
+ else {
+ u_conn = flb_upstream_conn_get(ctx->ecs_upstream);
+
+ if (!u_conn) {
+ flb_plg_error(ctx->ins, "ECS agent introspection endpoint connection error");
+ return -1;
+ }
+ free_conn = FLB_TRUE;
+ c = flb_http_client(u_conn, FLB_HTTP_GET,
+ FLB_ECS_FILTER_CLUSTER_PATH,
+ NULL, 0,
+ ctx->ecs_host, ctx->ecs_port,
+ NULL, 0);
+ flb_http_buffer_size(c, 0); /* 0 means unlimited */
+
+ flb_http_add_header(c, "User-Agent", 10, "Fluent-Bit", 10);
+
+ ret = flb_http_do(c, &b_sent);
+ flb_plg_debug(ctx->ins, "http_do=%i, "
+ "HTTP Status: %i",
+ ret, c->resp.status);
+ }
+
+ if (ret != 0 || c->resp.status != 200) {
+ if (c->resp.payload_size > 0) {
+ flb_plg_warn(ctx->ins, "Failed to get metadata from %s, will retry",
+ FLB_ECS_FILTER_CLUSTER_PATH);
+ flb_plg_debug(ctx->ins, "HTTP response\n%s",
+ c->resp.payload);
+ } else {
+ flb_plg_warn(ctx->ins, "%s response status was %d with no payload, will retry",
+ FLB_ECS_FILTER_CLUSTER_PATH,
+ c->resp.status);
+ }
+ flb_http_client_destroy(c);
+ if (free_conn == FLB_TRUE) {
+ flb_upstream_conn_release(u_conn);
+ }
+ return -1;
+ }
+
+ if (free_conn == FLB_TRUE) {
+ flb_upstream_conn_release(u_conn);
+ }
+
+ ret = flb_pack_json(c->resp.payload, c->resp.payload_size,
+ &buffer, &size, &root_type, NULL);
+
+ if (ret < 0) {
+ flb_plg_warn(ctx->ins, "Could not parse response from %s; response=\n%s",
+ FLB_ECS_FILTER_CLUSTER_PATH, c->resp.payload);
+ flb_http_client_destroy(c);
+ return -1;
+ }
+
+ /* parse metadata response */
+ msgpack_unpacked_init(&result);
+ ret = msgpack_unpack_next(&result, buffer, size, &off);
+ if (ret != MSGPACK_UNPACK_SUCCESS) {
+ flb_plg_error(ctx->ins, "Cannot unpack %s response to find metadata\n%s",
+ FLB_ECS_FILTER_CLUSTER_PATH, c->resp.payload);
+ flb_free(buffer);
+ msgpack_unpacked_destroy(&result);
+ flb_http_client_destroy(c);
+ return -1;
+ }
+
+ flb_http_client_destroy(c);
+
+ root = result.data;
+ if (root.type != MSGPACK_OBJECT_MAP) {
+ flb_plg_error(ctx->ins, "%s response parsing failed, msgpack_type=%i",
+ FLB_ECS_FILTER_CLUSTER_PATH,
+ root.type);
+ flb_free(buffer);
+ msgpack_unpacked_destroy(&result);
+ return -1;
+ }
+
+ /*
+Metadata Response:
+{
+ "Cluster": "cluster_name",
+ "ContainerInstanceArn": "arn:aws:ecs:region:aws_account_id:container-instance/cluster_name/container_instance_id",
+ "Version": "Amazon ECS Agent - v1.30.0 (02ff320c)"
+}
+But our metadata keys names are:
+{
+ "ClusterName": "cluster_name",
+ "ContainerInstanceArn": "arn:aws:ecs:region:aws_account_id:container-instance/cluster_name/container_instance_id",
+ "ContainerInstanceID": "container_instance_id"
+ "ECSAgentVersion": "Amazon ECS Agent - v1.30.0 (02ff320c)"
+}
+ */
+
+ for (i = 0; i < root.via.map.size; i++) {
+ key = root.via.map.ptr[i].key;
+ if (key.type != MSGPACK_OBJECT_STR) {
+ flb_plg_error(ctx->ins, "%s response parsing failed, msgpack key type=%i",
+ FLB_ECS_FILTER_CLUSTER_PATH,
+ key.type);
+ continue;
+ }
+
+ if (key.via.str.size == 7 && strncmp(key.via.str.ptr, "Cluster", 7) == 0) {
+ val = root.via.map.ptr[i].val;
+ if (val.type != MSGPACK_OBJECT_STR) {
+ flb_plg_error(ctx->ins, "metadata parsing: unexpected 'Cluster' value type=%i",
+ val.type);
+ flb_free(buffer);
+ msgpack_unpacked_destroy(&result);
+ return -1;
+ }
+
+ found_cluster = FLB_TRUE;
+ if (ctx->cluster_metadata.cluster_name == NULL) {
+ tmp = flb_sds_create_len(val.via.str.ptr, (int) val.via.str.size);
+ if (!tmp) {
+ flb_errno();
+ flb_free(buffer);
+ msgpack_unpacked_destroy(&result);
+ return -1;
+ }
+ ctx->cluster_metadata.cluster_name = tmp;
+ }
+
+ }
+ else if (key.via.str.size == 20 && strncmp(key.via.str.ptr, "ContainerInstanceArn", 20) == 0) {
+ val = root.via.map.ptr[i].val;
+ if (val.type != MSGPACK_OBJECT_STR) {
+ flb_plg_error(ctx->ins, "metadata parsing: unexpected 'ContainerInstanceArn' value type=%i",
+ val.type);
+ flb_free(buffer);
+ msgpack_unpacked_destroy(&result);
+ return -1;
+ }
+
+ /* first the ARN */
+ found_instance = FLB_TRUE;
+ if (ctx->cluster_metadata.container_instance_arn == NULL) {
+ tmp = flb_sds_create_len(val.via.str.ptr, (int) val.via.str.size);
+ if (!tmp) {
+ flb_errno();
+ flb_free(buffer);
+ msgpack_unpacked_destroy(&result);
+ return -1;
+ }
+ ctx->cluster_metadata.container_instance_arn = tmp;
+ }
+
+ /* then the ID */
+ if (ctx->cluster_metadata.container_instance_id == NULL) {
+ container_instance_id = parse_id_from_arn(val.via.str.ptr, (int) val.via.str.size);
+ if (container_instance_id == NULL) {
+ flb_plg_error(ctx->ins, "metadata parsing: failed to get ID from %.*s",
+ (int) val.via.str.size, val.via.str.ptr);
+ flb_free(buffer);
+ msgpack_unpacked_destroy(&result);
+ return -1;
+ }
+ ctx->cluster_metadata.container_instance_id = container_instance_id;
+ }
+
+ } else if (key.via.str.size == 7 && strncmp(key.via.str.ptr, "Version", 7) == 0) {
+ val = root.via.map.ptr[i].val;
+ if (val.type != MSGPACK_OBJECT_STR) {
+ flb_plg_error(ctx->ins, "metadata parsing: unexpected 'Version' value type=%i",
+ val.type);
+ flb_free(buffer);
+ msgpack_unpacked_destroy(&result);
+ return -1;
+ }
+
+ found_version = FLB_TRUE;
+ if (ctx->cluster_metadata.ecs_agent_version == NULL) {
+ tmp = flb_sds_create_len(val.via.str.ptr, (int) val.via.str.size);
+ if (!tmp) {
+ flb_errno();
+ flb_free(buffer);
+ msgpack_unpacked_destroy(&result);
+ return -1;
+ }
+ ctx->cluster_metadata.ecs_agent_version = tmp;
+ }
+ }
+
+ }
+
+ flb_free(buffer);
+ msgpack_unpacked_destroy(&result);
+
+ if (found_cluster == FLB_FALSE) {
+ flb_plg_error(ctx->ins, "Could not parse 'Cluster' from %s response",
+ FLB_ECS_FILTER_CLUSTER_PATH);
+ return -1;
+ }
+ if (found_instance == FLB_FALSE) {
+ flb_plg_error(ctx->ins, "Could not parse 'ContainerInstanceArn' from %s response",
+ FLB_ECS_FILTER_CLUSTER_PATH);
+ return -1;
+ }
+ if (found_version == FLB_FALSE) {
+ flb_plg_error(ctx->ins, "Could not parse 'Version' from %s response",
+ FLB_ECS_FILTER_CLUSTER_PATH);
+ return -1;
+ }
+
+ /*
+ * We also create a standalone cluster metadata msgpack object
+ * This is used as a fallback for logs when we can't find the
+ * task metadata for a log. It is valid to attach cluster meta
+ * to eg. Docker daemon logs which are not an AWS ECS Task via
+ * the `cluster_metadata_only` setting.
+ */
+ msgpack_sbuffer_init(&tmp_sbuf);
+ msgpack_packer_init(&tmp_pck, &tmp_sbuf, msgpack_sbuffer_write);
+ msgpack_pack_map(&tmp_pck, 4);
+
+ msgpack_pack_str(&tmp_pck, 11);
+ msgpack_pack_str_body(&tmp_pck,
+ "ClusterName",
+ 11);
+ len = flb_sds_len(ctx->cluster_metadata.cluster_name);
+ msgpack_pack_str(&tmp_pck, len);
+ msgpack_pack_str_body(&tmp_pck,
+ ctx->cluster_metadata.cluster_name,
+ len);
+
+ msgpack_pack_str(&tmp_pck, 20);
+ msgpack_pack_str_body(&tmp_pck,
+ "ContainerInstanceArn",
+ 20);
+ len = flb_sds_len(ctx->cluster_metadata.container_instance_arn);
+ msgpack_pack_str(&tmp_pck, len);
+ msgpack_pack_str_body(&tmp_pck,
+ ctx->cluster_metadata.container_instance_arn,
+ len);
+
+ msgpack_pack_str(&tmp_pck, 19);
+ msgpack_pack_str_body(&tmp_pck,
+ "ContainerInstanceID",
+ 19);
+ len = flb_sds_len(ctx->cluster_metadata.container_instance_id);
+ msgpack_pack_str(&tmp_pck, len);
+ msgpack_pack_str_body(&tmp_pck,
+ ctx->cluster_metadata.container_instance_id,
+ len);
+
+ msgpack_pack_str(&tmp_pck, 15);
+ msgpack_pack_str_body(&tmp_pck,
+ "ECSAgentVersion",
+ 15);
+ len = flb_sds_len(ctx->cluster_metadata.ecs_agent_version);
+ msgpack_pack_str(&tmp_pck, len);
+ msgpack_pack_str_body(&tmp_pck,
+ ctx->cluster_metadata.ecs_agent_version,
+ len);
+
+ ctx->cluster_meta_buf.buf = tmp_sbuf.data;
+ ctx->cluster_meta_buf.size = tmp_sbuf.size;
+
+ ret = flb_ecs_metadata_buffer_init(ctx, &ctx->cluster_meta_buf);
+ if (ret < 0) {
+ flb_plg_error(ctx->ins, "Could not init metadata buffer from %s response",
+ FLB_ECS_FILTER_CLUSTER_PATH);
+ msgpack_sbuffer_destroy(&tmp_sbuf);
+ ctx->cluster_meta_buf.buf = NULL;
+ ctx->cluster_meta_buf.size = 0;
+ return -1;
+ }
+
+ ctx->has_cluster_metadata = FLB_TRUE;
+ expose_ecs_cluster_meta(ctx);
+ return 0;
+}
+
+/*
+ * This is the helper function used by get_task_metadata()
+ * that actually creates the final metadata msgpack buffer
+ * with our final key names.
+ * It collects cluster, task, and container metadata into one
+The new metadata msgpack is flat and looks like:
+{
+ "ContainerID": "79c796ed2a7f864f485c76f83f3165488097279d296a7c05bd5201a1c69b2920",
+ "DockerContainerName": "ecs-nginx-efs-2-nginx-9ac0808dd0afa495f001",
+ "ECSContainerName": "nginx",
+
+ "ClusterName": "cluster_name",
+ "ContainerInstanceArn": "arn:aws:ecs:region:aws_account_id:container-instance/cluster_name/container_instance_id",
+ "ContainerInstanceID": "container_instance_id"
+ "ECSAgentVersion": "Amazon ECS Agent - v1.30.0 (02ff320c)"
+
+ "TaskARN": "arn:aws:ecs:us-west-2:012345678910:task/default/example5-58ff-46c9-ae05-543f8example",
+ "TaskID: "example5-58ff-46c9-ae05-543f8example",
+ "TaskDefinitionFamily": "hello_world",
+ "TaskDefinitionVersion": "8",
+}
+ */
+static int process_container_response(struct flb_filter_ecs *ctx,
+ msgpack_object container,
+ struct flb_ecs_task_metadata task_meta)
+{
+ int ret;
+ int found_id = FLB_FALSE;
+ int found_ecs_name = FLB_FALSE;
+ int found_docker_name = FLB_FALSE;
+ int i;
+ int len;
+ struct flb_ecs_metadata_buffer *cont_meta_buf;
+ msgpack_object key;
+ msgpack_object val;
+ msgpack_sbuffer tmp_sbuf;
+ msgpack_packer tmp_pck;
+ flb_sds_t short_id = NULL;
+
+ /*
+ * We copy the metadata response to a new buffer
+ * So we can define the metadata key names
+ */
+ msgpack_sbuffer_init(&tmp_sbuf);
+ msgpack_packer_init(&tmp_pck, &tmp_sbuf, msgpack_sbuffer_write);
+
+ /* 3 container metadata keys, 4 for instance/cluster, 4 for the task */
+ msgpack_pack_map(&tmp_pck, 11);
+
+ /* 1st- process/pack the raw container metadata response */
+ for (i = 0; i < container.via.map.size; i++) {
+ key = container.via.map.ptr[i].key;
+ if (key.type != MSGPACK_OBJECT_STR) {
+ flb_plg_error(ctx->ins, "Container metadata parsing failed, msgpack key type=%i",
+ key.type);
+ continue;
+ }
+
+ if (key.via.str.size == 8 && strncmp(key.via.str.ptr, "DockerId", 8) == 0) {
+ val = container.via.map.ptr[i].val;
+ if (val.type != MSGPACK_OBJECT_STR) {
+ flb_plg_error(ctx->ins, "metadata parsing: unexpected 'DockerId' value type=%i",
+ val.type);
+ msgpack_sbuffer_destroy(&tmp_sbuf);
+ if (short_id != NULL) {
+ flb_sds_destroy(short_id);
+ }
+ return -1;
+ }
+
+ /* save the short ID for hash table key */
+ short_id = flb_sds_create_len(val.via.str.ptr, 12);
+ if (!short_id) {
+ flb_errno();
+ msgpack_sbuffer_destroy(&tmp_sbuf);
+ return -1;
+ }
+
+ found_id = FLB_TRUE;
+ msgpack_pack_str(&tmp_pck, 11);
+ msgpack_pack_str_body(&tmp_pck,
+ "ContainerID",
+ 11);
+ msgpack_pack_str(&tmp_pck, (int) val.via.str.size);
+ msgpack_pack_str_body(&tmp_pck,
+ val.via.str.ptr,
+ (int) val.via.str.size);
+ }
+ else if (key.via.str.size == 10 && strncmp(key.via.str.ptr, "DockerName", 10) == 0) {
+ val = container.via.map.ptr[i].val;
+ if (val.type != MSGPACK_OBJECT_STR) {
+ flb_plg_error(ctx->ins, "metadata parsing: unexpected 'DockerName' value type=%i",
+ val.type);
+ msgpack_sbuffer_destroy(&tmp_sbuf);
+ if (short_id != NULL) {
+ flb_sds_destroy(short_id);
+ }
+ return -1;
+ }
+
+ /* first pack the ARN */
+ found_docker_name = FLB_TRUE;
+ msgpack_pack_str(&tmp_pck, 19);
+ msgpack_pack_str_body(&tmp_pck,
+ "DockerContainerName",
+ 19);
+ msgpack_pack_str(&tmp_pck, (int) val.via.str.size);
+ msgpack_pack_str_body(&tmp_pck,
+ val.via.str.ptr,
+ (int) val.via.str.size);
+ } else if (key.via.str.size == 4 && strncmp(key.via.str.ptr, "Name", 4) == 0) {
+ val = container.via.map.ptr[i].val;
+ if (val.type != MSGPACK_OBJECT_STR) {
+ flb_plg_error(ctx->ins, "metadata parsing: unexpected 'Name' value type=%i",
+ val.type);
+ msgpack_sbuffer_destroy(&tmp_sbuf);
+ if (short_id != NULL) {
+ flb_sds_destroy(short_id);
+ }
+ return -1;
+ }
+
+ found_ecs_name = FLB_TRUE;
+ msgpack_pack_str(&tmp_pck, 16);
+ msgpack_pack_str_body(&tmp_pck,
+ "ECSContainerName",
+ 16);
+ msgpack_pack_str(&tmp_pck, (int) val.via.str.size);
+ msgpack_pack_str_body(&tmp_pck,
+ val.via.str.ptr,
+ (int) val.via.str.size);
+ }
+ }
+
+ if (found_id == FLB_FALSE) {
+ flb_plg_error(ctx->ins, "Could not parse Task 'DockerId' from container response");
+ msgpack_sbuffer_destroy(&tmp_sbuf);
+ return -1;
+ }
+ if (found_docker_name == FLB_FALSE) {
+ flb_plg_error(ctx->ins, "Could not parse 'DockerName' from container response");
+ msgpack_sbuffer_destroy(&tmp_sbuf);
+ if (short_id != NULL) {
+ flb_sds_destroy(short_id);
+ }
+ return -1;
+ }
+ if (found_ecs_name == FLB_FALSE) {
+ flb_plg_error(ctx->ins, "Could not parse 'Name' from container response");
+ msgpack_sbuffer_destroy(&tmp_sbuf);
+ if (short_id != NULL) {
+ flb_sds_destroy(short_id);
+ }
+ return -1;
+ }
+
+ /* 2nd - Add the task fields from the task_meta temp buf we were given */
+ msgpack_pack_str(&tmp_pck, 20);
+ msgpack_pack_str_body(&tmp_pck,
+ "TaskDefinitionFamily",
+ 20);
+ msgpack_pack_str(&tmp_pck, task_meta.task_def_family_len);
+ msgpack_pack_str_body(&tmp_pck,
+ task_meta.task_def_family,
+ task_meta.task_def_family_len);
+
+ msgpack_pack_str(&tmp_pck, 7);
+ msgpack_pack_str_body(&tmp_pck,
+ "TaskARN",
+ 7);
+ msgpack_pack_str(&tmp_pck, task_meta.task_arn_len);
+ msgpack_pack_str_body(&tmp_pck,
+ task_meta.task_arn,
+ task_meta.task_arn_len);
+ msgpack_pack_str(&tmp_pck, 6);
+ msgpack_pack_str_body(&tmp_pck,
+ "TaskID",
+ 6);
+ msgpack_pack_str(&tmp_pck, task_meta.task_id_len);
+ msgpack_pack_str_body(&tmp_pck,
+ task_meta.task_id,
+ task_meta.task_id_len);
+
+ msgpack_pack_str(&tmp_pck, 21);
+ msgpack_pack_str_body(&tmp_pck,
+ "TaskDefinitionVersion",
+ 21);
+ msgpack_pack_str(&tmp_pck, task_meta.task_def_version_len);
+ msgpack_pack_str_body(&tmp_pck,
+ task_meta.task_def_version,
+ task_meta.task_def_version_len);
+
+ /* 3rd - Add the static cluster fields from the plugin context */
+ msgpack_pack_str(&tmp_pck, 11);
+ msgpack_pack_str_body(&tmp_pck,
+ "ClusterName",
+ 11);
+ len = flb_sds_len(ctx->cluster_metadata.cluster_name);
+ msgpack_pack_str(&tmp_pck, len);
+ msgpack_pack_str_body(&tmp_pck,
+ ctx->cluster_metadata.cluster_name,
+ len);
+
+ msgpack_pack_str(&tmp_pck, 20);
+ msgpack_pack_str_body(&tmp_pck,
+ "ContainerInstanceArn",
+ 20);
+ len = flb_sds_len(ctx->cluster_metadata.container_instance_arn);
+ msgpack_pack_str(&tmp_pck, len);
+ msgpack_pack_str_body(&tmp_pck,
+ ctx->cluster_metadata.container_instance_arn,
+ len);
+
+ msgpack_pack_str(&tmp_pck, 19);
+ msgpack_pack_str_body(&tmp_pck,
+ "ContainerInstanceID",
+ 19);
+ len = flb_sds_len(ctx->cluster_metadata.container_instance_id);
+ msgpack_pack_str(&tmp_pck, len);
+ msgpack_pack_str_body(&tmp_pck,
+ ctx->cluster_metadata.container_instance_id,
+ len);
+
+ msgpack_pack_str(&tmp_pck, 15);
+ msgpack_pack_str_body(&tmp_pck,
+ "ECSAgentVersion",
+ 15);
+ len = flb_sds_len(ctx->cluster_metadata.ecs_agent_version);
+ msgpack_pack_str(&tmp_pck, len);
+ msgpack_pack_str_body(&tmp_pck,
+ ctx->cluster_metadata.ecs_agent_version,
+ len);
+
+ cont_meta_buf = flb_calloc(1, sizeof(struct flb_ecs_metadata_buffer));
+ if (!cont_meta_buf) {
+ flb_errno();
+ msgpack_sbuffer_destroy(&tmp_sbuf);
+ flb_sds_destroy(short_id);
+ return -1;
+ }
+
+ cont_meta_buf->buf = tmp_sbuf.data;
+ cont_meta_buf->size = tmp_sbuf.size;
+
+ ret = flb_ecs_metadata_buffer_init(ctx, cont_meta_buf);
+ if (ret < 0) {
+ flb_plg_error(ctx->ins, "Could not init metadata buffer from container response");
+ msgpack_sbuffer_destroy(&tmp_sbuf);
+ flb_free(cont_meta_buf);
+ flb_sds_destroy(short_id);
+ return -1;
+ }
+ cont_meta_buf->id = short_id;
+ mk_list_add(&cont_meta_buf->_head, &ctx->metadata_buffers);
+
+ /*
+ * Size is set to 0 so the table just stores our pointer
+ * Otherwise it will try to copy the memory to a new buffer
+ */
+ ret = flb_hash_table_add(ctx->container_hash_table,
+ short_id, strlen(short_id),
+ cont_meta_buf, 0);
+
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "Could not add container ID %s to metadata hash table",
+ short_id);
+ flb_ecs_metadata_buffer_destroy(cont_meta_buf);
+ } else {
+ ret = 0;
+ flb_plg_debug(ctx->ins, "Added `%s` to container metadata hash table",
+ short_id);
+ }
+ return ret;
+}
+
+/*
+ * Gets the container and task metadata for a task via a container's
+ * 12 char short ID. This can be used with the ECS Agent
+ * Introspection API: http://localhost:51678/v1/tasks?dockerid={short_id}
+ * Entries in the hash table will be added for all containers in the task
+ */
+static int get_task_metadata(struct flb_filter_ecs *ctx, char* short_id)
+{
+ struct flb_http_client *c;
+ struct flb_connection *u_conn;
+ int ret;
+ int root_type;
+ int found_task = FLB_FALSE;
+ int found_version = FLB_FALSE;
+ int found_family = FLB_FALSE;
+ int found_containers = FLB_FALSE;
+ int free_conn = FLB_FALSE;
+ int i;
+ int k;
+ char *buffer;
+ size_t size;
+ size_t b_sent;
+ size_t off = 0;
+ msgpack_unpacked result;
+ msgpack_object root;
+ msgpack_object key;
+ msgpack_object val;
+ msgpack_object container;
+ flb_sds_t tmp;
+ flb_sds_t http_path;
+ flb_sds_t task_id = NULL;
+ struct flb_ecs_task_metadata task_meta;
+
+ tmp = flb_sds_create_size(64);
+ if (!tmp) {
+ return -1;
+ }
+ http_path = flb_sds_printf(&tmp, FLB_ECS_FILTER_TASK_PATH_FORMAT, short_id);
+ if (!http_path) {
+ flb_sds_destroy(tmp);
+ return -1;
+ }
+
+ /* Compose HTTP Client request*/
+ if (plugin_under_test() == FLB_TRUE) {
+ c = mock_http_call("TEST_TASK_ERROR", "Task");
+ ret = 0;
+ }
+ else {
+ u_conn = flb_upstream_conn_get(ctx->ecs_upstream);
+
+ if (!u_conn) {
+ flb_plg_error(ctx->ins, "ECS agent introspection endpoint connection error");
+ flb_sds_destroy(http_path);
+ return -1;
+ }
+ free_conn = FLB_TRUE;
+ c = flb_http_client(u_conn, FLB_HTTP_GET,
+ http_path,
+ NULL, 0,
+ ctx->ecs_host, ctx->ecs_port,
+ NULL, 0);
+ flb_http_buffer_size(c, 0); /* 0 means unlimited */
+
+ flb_http_add_header(c, "User-Agent", 10, "Fluent-Bit", 10);
+
+ ret = flb_http_do(c, &b_sent);
+ flb_plg_debug(ctx->ins, "http_do=%i, "
+ "HTTP Status: %i",
+ ret, c->resp.status);
+ }
+
+ if (ret != 0 || c->resp.status != 200) {
+ if (c->resp.payload_size > 0) {
+ flb_plg_warn(ctx->ins, "Failed to get metadata from %s, will retry",
+ http_path);
+ flb_plg_debug(ctx->ins, "HTTP response\n%s",
+ c->resp.payload);
+ } else {
+ flb_plg_warn(ctx->ins, "%s response status was %d with no payload, will retry",
+ http_path,
+ c->resp.status);
+ }
+ flb_http_client_destroy(c);
+ if (free_conn == FLB_TRUE) {
+ flb_upstream_conn_release(u_conn);
+ }
+ flb_sds_destroy(http_path);
+ return -1;
+ }
+
+ if (free_conn == FLB_TRUE) {
+ flb_upstream_conn_release(u_conn);
+ }
+
+ ret = flb_pack_json(c->resp.payload, c->resp.payload_size,
+ &buffer, &size, &root_type, NULL);
+
+ if (ret < 0) {
+ flb_plg_warn(ctx->ins, "Could not parse response from %s; response=\n%s",
+ http_path, c->resp.payload);
+ flb_sds_destroy(http_path);
+ flb_http_client_destroy(c);
+ return -1;
+ }
+
+ /* parse metadata response */
+ msgpack_unpacked_init(&result);
+ ret = msgpack_unpack_next(&result, buffer, size, &off);
+ if (ret != MSGPACK_UNPACK_SUCCESS) {
+ flb_plg_error(ctx->ins, "Cannot unpack %s response to find metadata\n%s",
+ http_path, c->resp.payload);
+ flb_free(buffer);
+ msgpack_unpacked_destroy(&result);
+ flb_sds_destroy(http_path);
+ flb_http_client_destroy(c);
+ return -1;
+ }
+
+ flb_http_client_destroy(c);
+
+ root = result.data;
+ if (root.type != MSGPACK_OBJECT_MAP) {
+ flb_plg_error(ctx->ins, "%s response parsing failed, msgpack_type=%i",
+ http_path,
+ root.type);
+ flb_free(buffer);
+ msgpack_unpacked_destroy(&result);
+ flb_sds_destroy(http_path);
+ return -1;
+ }
+
+ /*
+Metadata Response:
+{
+ "Arn": "arn:aws:ecs:us-west-2:012345678910:task/default/e01d58a8-151b-40e8-bc01-22647b9ecfec",
+ "Containers": [
+ {
+ "DockerId": "79c796ed2a7f864f485c76f83f3165488097279d296a7c05bd5201a1c69b2920",
+ "DockerName": "ecs-nginx-efs-2-nginx-9ac0808dd0afa495f001",
+ "Name": "nginx"
+ }
+ ],
+ "DesiredStatus": "RUNNING",
+ "Family": "nginx-efs",
+ "KnownStatus": "RUNNING",
+ "Version": "2"
+}
+ */
+
+ for (i = 0; i < root.via.map.size; i++) {
+ key = root.via.map.ptr[i].key;
+ if (key.type != MSGPACK_OBJECT_STR) {
+ flb_plg_error(ctx->ins, "%s response parsing failed, msgpack key type=%i",
+ http_path,
+ key.type);
+ continue;
+ }
+
+ if (key.via.str.size == 6 && strncmp(key.via.str.ptr, "Family", 6) == 0) {
+ val = root.via.map.ptr[i].val;
+ if (val.type != MSGPACK_OBJECT_STR) {
+ flb_plg_error(ctx->ins, "metadata parsing: unexpected 'Family' value type=%i",
+ val.type);
+ flb_free(buffer);
+ msgpack_unpacked_destroy(&result);
+ flb_sds_destroy(http_path);
+ if (task_id) {
+ flb_sds_destroy(task_id);
+ }
+ return -1;
+ }
+ found_family = FLB_TRUE;
+ task_meta.task_def_family = val.via.str.ptr;
+ task_meta.task_def_family_len = (int) val.via.str.size;
+ }
+ else if (key.via.str.size == 3 && strncmp(key.via.str.ptr, "Arn", 3) == 0) {
+ val = root.via.map.ptr[i].val;
+ if (val.type != MSGPACK_OBJECT_STR) {
+ flb_plg_error(ctx->ins, "metadata parsing: unexpected 'Arn' value type=%i",
+ val.type);
+ flb_free(buffer);
+ msgpack_unpacked_destroy(&result);
+ flb_sds_destroy(http_path);
+ if (task_id) {
+ flb_sds_destroy(task_id);
+ }
+ return -1;
+ }
+ /* first get the ARN */
+ found_task = FLB_TRUE;
+ task_meta.task_arn = val.via.str.ptr;
+ task_meta.task_arn_len = (int) val.via.str.size;
+
+ /* then get the ID */
+ task_id = parse_id_from_arn(val.via.str.ptr, (int) val.via.str.size);
+ if (task_id == NULL) {
+ flb_plg_error(ctx->ins, "metadata parsing: failed to get ID from %.*s",
+ (int) val.via.str.size, val.via.str.ptr);
+ flb_free(buffer);
+ msgpack_unpacked_destroy(&result);
+ flb_sds_destroy(http_path);
+ if (task_id) {
+ flb_sds_destroy(task_id);
+ }
+ return -1;
+ }
+
+ task_meta.task_id = task_id;
+ task_meta.task_id_len = flb_sds_len(task_id);
+ } else if (key.via.str.size == 7 && strncmp(key.via.str.ptr, "Version", 7) == 0) {
+ val = root.via.map.ptr[i].val;
+ if (val.type != MSGPACK_OBJECT_STR) {
+ flb_plg_error(ctx->ins, "metadata parsing: unexpected 'Version' value type=%i",
+ val.type);
+ flb_free(buffer);
+ msgpack_unpacked_destroy(&result);
+ flb_sds_destroy(http_path);
+ if (task_id) {
+ flb_sds_destroy(task_id);
+ }
+ return -1;
+ }
+ found_version = FLB_TRUE;
+ task_meta.task_def_version = val.via.str.ptr;
+ task_meta.task_def_version_len = (int) val.via.str.size;
+ } else if (key.via.str.size == 10 && strncmp(key.via.str.ptr, "Containers", 10) == 0) {
+ val = root.via.map.ptr[i].val;
+ if (val.type != MSGPACK_OBJECT_ARRAY ) {
+ flb_plg_error(ctx->ins, "metadata parsing: unexpected 'Containers' value type=%i",
+ val.type);
+ flb_free(buffer);
+ msgpack_unpacked_destroy(&result);
+ flb_sds_destroy(http_path);
+ if (task_id) {
+ flb_sds_destroy(task_id);
+ }
+ return -1;
+ }
+ found_containers = FLB_TRUE;
+ }
+ }
+
+ if (found_task == FLB_FALSE) {
+ flb_plg_error(ctx->ins, "Could not parse Task 'Arn' from %s response",
+ http_path);
+ flb_sds_destroy(http_path);
+ flb_free(buffer);
+ msgpack_unpacked_destroy(&result);
+ return -1;
+ }
+ if (found_family == FLB_FALSE) {
+ flb_plg_error(ctx->ins, "Could not parse 'Family' from %s response",
+ http_path);
+ flb_sds_destroy(http_path);
+ flb_free(buffer);
+ msgpack_unpacked_destroy(&result);
+ if (task_id) {
+ flb_sds_destroy(task_id);
+ }
+ return -1;
+ }
+ if (found_version == FLB_FALSE) {
+ flb_plg_error(ctx->ins, "Could not parse 'Version' from %s response",
+ http_path);
+ flb_sds_destroy(http_path);
+ flb_free(buffer);
+ msgpack_unpacked_destroy(&result);
+ if (task_id) {
+ flb_sds_destroy(task_id);
+ }
+ return -1;
+ }
+ if (found_containers == FLB_FALSE) {
+ flb_plg_error(ctx->ins, "Could not parse 'Containers' from %s response",
+ http_path);
+ flb_sds_destroy(http_path);
+ flb_free(buffer);
+ msgpack_unpacked_destroy(&result);
+ if (task_id) {
+ flb_sds_destroy(task_id);
+ }
+ return -1;
+ }
+
+ /*
+ * Process metadata response a 2nd time to get the Containers list
+ * This is because we need one complete metadata buf per container
+ * with all task metadata. So we collect task before we process containers.
+ */
+ for (i = 0; i < root.via.map.size; i++) {
+ key = root.via.map.ptr[i].key;
+ if (key.type != MSGPACK_OBJECT_STR) {
+ flb_plg_error(ctx->ins, "%s response parsing failed, msgpack key type=%i",
+ http_path,
+ key.type);
+ continue;
+ }
+
+ if (key.via.str.size == 10 && strncmp(key.via.str.ptr, "Containers", 10) == 0) {
+ val = root.via.map.ptr[i].val;
+ if (val.type != MSGPACK_OBJECT_ARRAY ) {
+ flb_plg_error(ctx->ins, "metadata parsing: unexpected 'Containers' value type=%i",
+ val.type);
+ flb_free(buffer);
+ msgpack_unpacked_destroy(&result);
+ flb_sds_destroy(http_path);
+ flb_sds_destroy(task_id);
+ return -1;
+ }
+
+ /* iterate through list of containers and process them*/
+ for (k = 0; k < val.via.array.size; k++) {
+ container = val.via.array.ptr[k];
+ if (container.type != MSGPACK_OBJECT_MAP) {
+ flb_plg_error(ctx->ins, "metadata parsing: unexpected 'Containers[%d]' inner value type=%i",
+ k,
+ container.type);
+ flb_free(buffer);
+ msgpack_unpacked_destroy(&result);
+ flb_sds_destroy(http_path);
+ flb_sds_destroy(task_id);
+ return -1;
+ }
+ ret = process_container_response(ctx, container, task_meta);
+ if (ret < 0) {
+ flb_plg_error(ctx->ins, "metadata parsing: failed to parse 'Containers[%d]'",
+ k);
+ flb_free(buffer);
+ msgpack_unpacked_destroy(&result);
+ flb_sds_destroy(http_path);
+ flb_sds_destroy(task_id);
+ return -1;
+ }
+ }
+ }
+ }
+
+ flb_free(buffer);
+ msgpack_unpacked_destroy(&result);
+ flb_sds_destroy(task_id);
+ flb_sds_destroy(http_path);
+ return 0;
+}
+
+static int get_metadata_by_id(struct flb_filter_ecs *ctx,
+ const char *tag, int tag_len,
+ struct flb_ecs_metadata_buffer **metadata_buffer)
+{
+ flb_sds_t container_short_id = NULL;
+ const char *tmp;
+ int ret;
+ size_t size;
+
+ if (ctx->ecs_tag_prefix_len + 12 > tag_len) {
+ flb_plg_warn(ctx->ins, "Tag '%s' length check failed: tag is expected "
+ "to be or be prefixed with '{ecs_tag_prefix}{12 character container short ID}'",
+ tag);
+ return -1;
+ }
+
+ ret = strncmp(ctx->ecs_tag_prefix, tag, ctx->ecs_tag_prefix_len);
+ if (ret != 0) {
+ flb_plg_warn(ctx->ins, "Tag '%s' is not prefixed with ecs_tag_prefix '%s'",
+ tag, ctx->ecs_tag_prefix);
+ return -1;
+ }
+
+ tmp = tag + ctx->ecs_tag_prefix_len;
+ container_short_id = flb_sds_create_len(tmp, 12);
+ if (!container_short_id) {
+ flb_errno();
+ return -1;
+ }
+
+ /* get metadata for this container */
+ ret = flb_hash_table_get(ctx->container_hash_table,
+ container_short_id, flb_sds_len(container_short_id),
+ (void **) metadata_buffer, &size);
+
+ if (ret == -1) {
+ /* try fetch metadata */
+ ret = get_task_metadata(ctx, container_short_id);
+ if (ret < 0) {
+ flb_plg_info(ctx->ins, "Requesting metadata from ECS Agent introspection endpoint failed for tag %s",
+ tag);
+ flb_sds_destroy(container_short_id);
+ return -1;
+ }
+ /* get from hash table */
+ ret = flb_hash_table_get(ctx->container_hash_table,
+ container_short_id, flb_sds_len(container_short_id),
+ (void **) metadata_buffer, &size);
+ }
+
+ flb_sds_destroy(container_short_id);
+ return ret;
+}
+
+static void clean_old_metadata_buffers(struct flb_filter_ecs *ctx)
+{
+ struct mk_list *tmp;
+ struct mk_list *head;
+ struct flb_ecs_metadata_buffer *buf;
+ time_t now = time(NULL);
+
+ mk_list_foreach_safe(head, tmp, &ctx->metadata_buffers) {
+ buf = mk_list_entry(head, struct flb_ecs_metadata_buffer, _head);
+ if (now > (buf->last_used_time + ctx->ecs_meta_cache_ttl)) {
+ flb_plg_debug(ctx->ins, "cleaning buffer: now=%ld, ttl=%d, last_used_time=%ld",
+ (long)now, ctx->ecs_meta_cache_ttl, (long)buf->last_used_time);
+ mk_list_del(&buf->_head);
+ flb_hash_table_del(ctx->container_hash_table, buf->id);
+ flb_ecs_metadata_buffer_destroy(buf);
+ }
+ }
+}
+
+static int is_tag_marked_failed(struct flb_filter_ecs *ctx,
+ const char *tag, int tag_len)
+{
+ int ret;
+ int *val = NULL;
+ size_t val_size;
+
+ ret = flb_hash_table_get(ctx->failed_metadata_request_tags,
+ tag, tag_len,
+ (void **) &val, &val_size);
+ if (ret != -1) {
+ if (*val >= ctx->agent_endpoint_retries) {
+ return FLB_TRUE;
+ }
+ }
+
+ return FLB_FALSE;
+}
+
+static void mark_tag_failed(struct flb_filter_ecs *ctx,
+ const char *tag, int tag_len)
+{
+ int ret;
+ int *val = NULL;
+ int *new_val = NULL;
+ size_t val_size;
+
+ ret = flb_hash_table_get(ctx->failed_metadata_request_tags,
+ tag, tag_len,
+ (void **) &val, &val_size);
+
+ if (ret == -1) {
+ /* hash table copies memory to new heap block */
+ val = flb_malloc(sizeof(int));
+ if (!val) {
+ flb_errno();
+ return;
+ }
+ *val = 1;
+ flb_hash_table_add(ctx->failed_metadata_request_tags,
+ tag, tag_len,
+ val, sizeof(int));
+ /* hash table will contain a copy */
+ flb_free(val);
+ } else {
+ /*
+ * val is memory returned from hash table
+ * if we simply update the value here and call flb_hash_add
+ * it first frees the old memory (which is what we passed it)
+ * then tries to copy over the memory we passed in to a new location
+ * flb_hash stores all entries as if they were strings, so we also
+ * can't simply increment the value returned by flb_hash_get
+ */
+ new_val = flb_malloc(sizeof(int));
+ if (!new_val) {
+ flb_errno();
+ return;
+ }
+ /* increment number of failed metadata requests for this tag */
+ *new_val = *val + 1;
+ flb_hash_table_add(ctx->failed_metadata_request_tags,
+ tag, tag_len,
+ new_val, sizeof(int));
+ flb_plg_info(ctx->ins, "Failed to get ECS Metadata for tag %s %d times. "
+ "This might be because the logs for this tag do not come from an ECS Task Container. "
+ "This plugin will retry metadata requests at most %d times total for this tag.",
+ tag, *new_val, ctx->agent_endpoint_retries);
+ flb_free(new_val);
+ }
+}
+
+static int cb_ecs_filter(const void *data, size_t bytes,
+ const char *tag, int tag_len,
+ void **out_buf, size_t *out_size,
+ struct flb_filter_instance *f_ins,
+ struct flb_input_instance *i_ins,
+ void *context,
+ struct flb_config *config)
+{
+ struct flb_filter_ecs *ctx = context;
+ int i = 0;
+ int ret;
+ int check = FLB_FALSE;
+ msgpack_object *obj;
+ msgpack_object_kv *kv;
+ struct mk_list *tmp;
+ struct mk_list *head;
+ struct flb_ecs_metadata_key *metadata_key;
+ struct flb_ecs_metadata_buffer *metadata_buffer;
+ flb_sds_t val;
+ struct flb_log_event_encoder log_encoder;
+ struct flb_log_event_decoder log_decoder;
+ struct flb_log_event log_event;
+
+ (void) f_ins;
+ (void) i_ins;
+ (void) config;
+
+ /* First check that the static cluster metadata has been retrieved */
+ if (ctx->has_cluster_metadata == FLB_FALSE) {
+ ret = get_ecs_cluster_metadata(ctx);
+ if (ret < 0) {
+ flb_plg_warn(ctx->ins, "Could not retrieve cluster metadata "
+ "from ECS Agent");
+ return FLB_FILTER_NOTOUCH;
+ }
+ }
+
+ /* check if the current tag is marked as failed */
+ check = is_tag_marked_failed(ctx, tag, tag_len);
+ if (check == FLB_TRUE) {
+ flb_plg_debug(ctx->ins, "Failed to get ECS Metadata for tag %s %d times. "
+ "Will not attempt to retry the metadata request. Will attach cluster metadata only.",
+ tag, ctx->agent_endpoint_retries);
+ }
+
+ if (check == FLB_FALSE && ctx->cluster_metadata_only == FLB_FALSE) {
+ ret = get_metadata_by_id(ctx, tag, tag_len, &metadata_buffer);
+ if (ret == -1) {
+ flb_plg_info(ctx->ins, "Failed to get ECS Task metadata for %s, "
+ "falling back to process cluster metadata only. If "
+ "this is intentional, set `Cluster_Metadata_Only On`",
+ tag);
+ mark_tag_failed(ctx, tag, tag_len);
+ metadata_buffer = &ctx->cluster_meta_buf;
+ }
+ } else {
+ metadata_buffer = &ctx->cluster_meta_buf;
+ }
+
+ metadata_buffer->last_used_time = time(NULL);
+
+ ret = flb_log_event_decoder_init(&log_decoder, (char *) data, bytes);
+
+ if (ret != FLB_EVENT_DECODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event decoder initialization error : %d", ret);
+
+ return FLB_FILTER_NOTOUCH;
+ }
+
+ ret = flb_log_event_encoder_init(&log_encoder,
+ FLB_LOG_EVENT_FORMAT_DEFAULT);
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event encoder initialization error : %d", ret);
+
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ return FLB_FILTER_NOTOUCH;
+ }
+
+ while ((ret = flb_log_event_decoder_next(
+ &log_decoder,
+ &log_event)) == FLB_EVENT_DECODER_SUCCESS) {
+ obj = log_event.body;
+
+ ret = flb_log_event_encoder_begin_record(&log_encoder);
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_timestamp(
+ &log_encoder, &log_event.timestamp);
+ }
+
+ /* iterate through the old record map and add it to the new buffer */
+ kv = obj->via.map.ptr;
+ for(i=0;
+ i < obj->via.map.size &&
+ ret == FLB_EVENT_ENCODER_SUCCESS;
+ i++) {
+ ret = flb_log_event_encoder_append_body_values(
+ &log_encoder,
+ FLB_LOG_EVENT_MSGPACK_OBJECT_VALUE(&kv[i].key),
+ FLB_LOG_EVENT_MSGPACK_OBJECT_VALUE(&kv[i].val));
+ }
+
+ /* append new keys */
+ mk_list_foreach_safe(head, tmp, &ctx->metadata_keys) {
+ metadata_key = mk_list_entry(head, struct flb_ecs_metadata_key, _head);
+ val = flb_ra_translate(metadata_key->ra, NULL, 0,
+ metadata_buffer->obj, NULL);
+ if (!val) {
+ flb_plg_info(ctx->ins, "Translation failed for %s : %s",
+ metadata_key->key, metadata_key->template);
+
+ flb_log_event_decoder_destroy(&log_decoder);
+ flb_log_event_encoder_destroy(&log_encoder);
+
+ return FLB_FILTER_NOTOUCH;
+ }
+
+ ret = flb_log_event_encoder_append_body_values(
+ &log_encoder,
+ FLB_LOG_EVENT_STRING_VALUE(metadata_key->key,
+ flb_sds_len(metadata_key->key)),
+ FLB_LOG_EVENT_STRING_VALUE(val, flb_sds_len(val)));
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ flb_plg_info(ctx->ins,
+ "Metadata appendage failed for %.*s",
+ (int) flb_sds_len(metadata_key->key),
+ metadata_key->key);
+
+ flb_log_event_decoder_destroy(&log_decoder);
+ flb_log_event_encoder_destroy(&log_encoder);
+
+ return FLB_FILTER_NOTOUCH;
+ }
+
+ flb_sds_destroy(val);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ flb_log_event_encoder_commit_record(&log_encoder);
+ }
+ }
+
+ if (ctx->cluster_metadata_only == FLB_FALSE) {
+ clean_old_metadata_buffers(ctx);
+ }
+
+ if (ret == FLB_EVENT_DECODER_ERROR_INSUFFICIENT_DATA &&
+ log_decoder.offset == bytes) {
+ ret = FLB_EVENT_ENCODER_SUCCESS;
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ *out_buf = log_encoder.output_buffer;
+ *out_size = log_encoder.output_length;
+
+ ret = FLB_FILTER_MODIFIED;
+
+ flb_log_event_encoder_claim_internal_buffer_ownership(&log_encoder);
+ }
+ else {
+ flb_plg_error(ctx->ins,
+ "Log event encoder error : %d", ret);
+
+ ret = FLB_FILTER_NOTOUCH;
+ }
+
+ flb_log_event_decoder_destroy(&log_decoder);
+ flb_log_event_encoder_destroy(&log_encoder);
+
+ return ret;
+}
+
+static void flb_ecs_metadata_key_destroy(struct flb_ecs_metadata_key *metadata_key)
+{
+ if (metadata_key) {
+ if (metadata_key->key) {
+ flb_sds_destroy(metadata_key->key);
+ }
+ if (metadata_key->template) {
+ flb_sds_destroy(metadata_key->template);
+ }
+ if (metadata_key->ra) {
+ flb_ra_destroy(metadata_key->ra);
+ }
+ flb_free(metadata_key);
+ }
+}
+
+static void flb_filter_ecs_destroy(struct flb_filter_ecs *ctx)
+{
+ struct mk_list *tmp;
+ struct mk_list *head;
+ struct flb_ecs_metadata_key *metadata_key;
+ struct flb_ecs_metadata_buffer *buf;
+
+ if (ctx) {
+ if (ctx->ecs_upstream) {
+ flb_upstream_destroy(ctx->ecs_upstream);
+ }
+ if (ctx->cluster_metadata.cluster_name) {
+ flb_sds_destroy(ctx->cluster_metadata.cluster_name);
+ }
+ if (ctx->cluster_metadata.container_instance_arn) {
+ flb_sds_destroy(ctx->cluster_metadata.container_instance_arn);
+ }
+ if (ctx->cluster_metadata.container_instance_id) {
+ flb_sds_destroy(ctx->cluster_metadata.container_instance_id);
+ }
+ if (ctx->cluster_metadata.ecs_agent_version) {
+ flb_sds_destroy(ctx->cluster_metadata.ecs_agent_version);
+ }
+ if (ctx->cluster_meta_buf.buf) {
+ flb_free(ctx->cluster_meta_buf.buf);
+ msgpack_unpacked_destroy(&ctx->cluster_meta_buf.unpacked);
+ }
+ mk_list_foreach_safe(head, tmp, &ctx->metadata_keys) {
+ metadata_key = mk_list_entry(head, struct flb_ecs_metadata_key, _head);
+ mk_list_del(&metadata_key->_head);
+ flb_ecs_metadata_key_destroy(metadata_key);
+ }
+ mk_list_foreach_safe(head, tmp, &ctx->metadata_buffers) {
+ buf = mk_list_entry(head, struct flb_ecs_metadata_buffer, _head);
+ mk_list_del(&buf->_head);
+ flb_hash_table_del(ctx->container_hash_table, buf->id);
+ flb_ecs_metadata_buffer_destroy(buf);
+ }
+ if (ctx->container_hash_table) {
+ flb_hash_table_destroy(ctx->container_hash_table);
+ }
+ if (ctx->failed_metadata_request_tags) {
+ flb_hash_table_destroy(ctx->failed_metadata_request_tags);
+ }
+ flb_free(ctx);
+ }
+}
+
+static int cb_ecs_exit(void *data, struct flb_config *config)
+{
+ struct flb_filter_ecs *ctx = data;
+
+ flb_filter_ecs_destroy(ctx);
+ return 0;
+}
+
+/* Configuration properties map */
+static struct flb_config_map config_map[] = {
+
+ {
+ FLB_CONFIG_MAP_STR, "add", NULL,
+ FLB_CONFIG_MAP_MULT, FLB_FALSE, 0,
+ "Add a metadata key/value pair with the given key and given value from the given template. "
+ "Format is `Add KEY TEMPLATE`."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "ecs_tag_prefix", "",
+ 0, FLB_TRUE, offsetof(struct flb_filter_ecs, ecs_tag_prefix),
+ "This filter must obtain the 12 character container short ID to query "
+ "for ECS Task metadata. The filter removes the prefx from the tag and then assumes "
+ "the next 12 characters are the short container ID. If the container short ID, "
+ "is not found in the tag, the filter can/must fallback to only attaching cluster metadata "
+ "(cluster name, container instance ID/ARN, and ECS Agent version)."
+ },
+
+ {
+ FLB_CONFIG_MAP_BOOL, "cluster_metadata_only", "false",
+ 0, FLB_TRUE, offsetof(struct flb_filter_ecs, cluster_metadata_only),
+ "Only attempt to attach the cluster related metadata to logs "
+ "(cluster name, container instance ID/ARN, and ECS Agent version). "
+ "With this option off, if this filter can not obtain the task metadata for a log, it will "
+ "output errors. Use this option if you have logs that are not part of an "
+ "ECS task (ex: Docker Daemon logs)."
+ },
+
+ {
+ FLB_CONFIG_MAP_TIME, "ecs_meta_cache_ttl", "3600",
+ 0, FLB_TRUE, offsetof(struct flb_filter_ecs, ecs_meta_cache_ttl),
+ "Configurable TTL for cached ECS Task Metadata. Default 3600s (1 hour)"
+ "For example, set this value to 600 or 600s or 10m and cache entries "
+ "which have been created more than 10 minutes will be evicted."
+ "Cache eviction is needed to purge task metadata for tasks that "
+ "have been stopped."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "ecs_meta_host", FLB_ECS_FILTER_HOST,
+ 0, FLB_TRUE, offsetof(struct flb_filter_ecs, ecs_host),
+ "The host name at which the ECS Agent Introspection endpoint is reachable. "
+ "Defaults to 127.0.0.1"
+ },
+
+ {
+ FLB_CONFIG_MAP_INT, "ecs_meta_port", FLB_ECS_FILTER_PORT,
+ 0, FLB_TRUE, offsetof(struct flb_filter_ecs, ecs_port),
+ "The port at which the ECS Agent Introspection endpoint is reachable. "
+ "Defaults to 51678"
+ },
+
+ {
+ FLB_CONFIG_MAP_INT, "agent_endpoint_retries", FLB_ECS_FILTER_METADATA_RETRIES,
+ 0, FLB_TRUE, offsetof(struct flb_filter_ecs, agent_endpoint_retries),
+ "Number of retries for failed metadata requests to ECS Agent Introspection "
+ "endpoint. The most common cause of failed metadata requests is that the "
+ "container the metadata request was made for is not part of an ECS Task. "
+ "Check if you have non-task containers and docker dual logging enabled."
+ },
+
+ {0}
+};
+
+struct flb_filter_plugin filter_ecs_plugin = {
+ .name = "ecs",
+ .description = "Add AWS ECS Metadata",
+ .cb_init = cb_ecs_init,
+ .cb_filter = cb_ecs_filter,
+ .cb_exit = cb_ecs_exit,
+ .config_map = config_map,
+ .flags = 0
+};
diff --git a/src/fluent-bit/plugins/filter_ecs/ecs.h b/src/fluent-bit/plugins/filter_ecs/ecs.h
new file mode 100644
index 000000000..71d0248fa
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_ecs/ecs.h
@@ -0,0 +1,152 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_FILTER_ECS_H
+#define FLB_FILTER_ECS_H
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_filter.h>
+#include <fluent-bit/flb_record_accessor.h>
+#include <fluent-bit/flb_ra_key.h>
+#include <fluent-bit/record_accessor/flb_ra_parser.h>
+
+#define FLB_ECS_FILTER_HOST "127.0.0.1"
+#define FLB_ECS_FILTER_PORT "51678"
+#define FLB_ECS_FILTER_CLUSTER_PATH "/v1/metadata"
+#define FLB_ECS_FILTER_TASK_PATH_FORMAT "/v1/tasks?dockerid=%s"
+#define FLB_ECS_FILTER_METADATA_RETRIES "2"
+
+/*
+ * Kubernetes recommends not running more than 110 pods per node
+ * In ECS, the number of tasks per instance will vary considerably
+ * But this should be a very safe starting size for the table
+ * Since we use the TTL hash table there is no max size.
+ */
+#define FLB_ECS_FILTER_HASH_TABLE_SIZE 100
+
+
+struct flb_ecs_metadata_key {
+ flb_sds_t key;
+ flb_sds_t template;
+ struct flb_record_accessor *ra;
+
+ struct mk_list _head;
+};
+
+struct flb_ecs_metadata_buffer {
+ /* msgpack_sbuffer */
+ char *buf;
+ size_t size;
+
+ /* unpacked object to use with flb_ra_translate */
+ msgpack_unpacked unpacked;
+ msgpack_object obj;
+ int free_packer;
+
+ /* the hash table only stores a pointer- we need the list to track and free these */
+ struct mk_list _head;
+ /* we clean up the memory for these once ecs_meta_cache_ttl has expired */
+ time_t last_used_time;
+
+ /*
+ * To remove from the hash table on TTL expiration, we need the ID
+ * While we use a TTL hash, it won't clean up the memory, so we have a separate routine for that
+ * and it needs to ensure that the list and hash table has the same contents
+ */
+ flb_sds_t id;
+};
+
+struct flb_ecs_cluster_metadata {
+ flb_sds_t cluster_name;
+ flb_sds_t container_instance_arn;
+ flb_sds_t container_instance_id;
+ flb_sds_t ecs_agent_version;
+};
+
+/*
+ * The ECS Agent task response gives us both task & container at the same time
+ * We need a temporary structure to organize the task metadata
+ * Before we create the final flb_ecs_metadata_buffer objects with all metadata
+ * So this struct just stores tmp pointers to the deserialized msgpack
+ */
+struct flb_ecs_task_metadata {
+ const char* task_arn;
+ int task_arn_len;
+ const char *task_id;
+ int task_id_len;
+ const char *task_def_family;
+ int task_def_family_len;
+ const char *task_def_version;
+ int task_def_version_len;
+};
+
+struct flb_filter_ecs {
+ /* upstream connection to ECS Agent */
+ struct flb_upstream *ecs_upstream;
+
+ /* Filter plugin instance reference */
+ struct flb_filter_instance *ins;
+
+ struct mk_list metadata_keys;
+ int metadata_keys_len;
+
+ flb_sds_t ecs_host;
+ int ecs_port;
+
+ int agent_endpoint_retries;
+
+ /*
+ * This field is used when we build new container metadata objects
+ */
+ struct flb_ecs_cluster_metadata cluster_metadata;
+ int has_cluster_metadata;
+ /*
+ * If looking up the container fails, we should still always be able to
+ * attach cluster metadata. So we have a fallback metadata buffer for that.
+ * For example, users may want to attach cluster name to Docker Daemon logs,
+ * even though Docker is not an AWS ECS Task/container.
+ */
+ struct flb_ecs_metadata_buffer cluster_meta_buf;
+
+ /*
+ * Maps 12 char container short ID to metadata buffer
+ */
+ struct flb_hash_table *container_hash_table;
+
+ /*
+ * The hash table only stores pointers, so we keep a list of meta objects
+ * that need to be freed
+ */
+ struct mk_list metadata_buffers;
+
+ /*
+ * Fluent Bit may pick up logs for containers that were not scheduled by ECS
+ * These will lead to continuous error messages. Therefore, we store
+ * a hash table of tags for which we could not get metadata so we can stop
+ * retrying on them.
+ */
+ struct flb_hash_table *failed_metadata_request_tags;
+
+ int ecs_meta_cache_ttl;
+ char *ecs_tag_prefix;
+ int ecs_tag_prefix_len;
+ int cluster_metadata_only;
+};
+
+#endif
diff --git a/src/fluent-bit/plugins/filter_expect/CMakeLists.txt b/src/fluent-bit/plugins/filter_expect/CMakeLists.txt
new file mode 100644
index 000000000..cc6c03d1a
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_expect/CMakeLists.txt
@@ -0,0 +1,4 @@
+set(src
+ expect.c)
+
+FLB_PLUGIN(filter_expect "${src}" "")
diff --git a/src/fluent-bit/plugins/filter_expect/expect.c b/src/fluent-bit/plugins/filter_expect/expect.c
new file mode 100644
index 000000000..102085c02
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_expect/expect.c
@@ -0,0 +1,614 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_filter_plugin.h>
+#include <fluent-bit/flb_filter.h>
+#include <fluent-bit/flb_ra_key.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_kv.h>
+#include <fluent-bit/flb_mem.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_log_event_decoder.h>
+#include <fluent-bit/flb_log_event_encoder.h>
+
+#include "expect.h"
+#include <msgpack.h>
+
+static int key_to_type(char *key)
+{
+ if (strcasecmp(key, "key_exists") == 0) {
+ return FLB_EXP_KEY_EXISTS;
+ }
+ else if (strcasecmp(key, "key_not_exists") == 0) {
+ return FLB_EXP_KEY_NOT_EXISTS;
+ }
+ else if (strcasecmp(key, "key_val_is_null") == 0) {
+ return FLB_EXP_KEY_VAL_NULL;
+ }
+ else if (strcasecmp(key, "key_val_is_not_null") == 0) {
+ return FLB_EXP_KEY_VAL_NOT_NULL;
+ }
+ else if (strcasecmp(key, "key_val_eq") == 0) {
+ return FLB_EXP_KEY_VAL_EQ;
+ }
+
+ return -1;
+}
+
+/* Create a rule */
+static struct flb_expect_rule *rule_create(struct flb_expect *ctx,
+ int type, char *value)
+{
+ int ret;
+ struct mk_list *list;
+ struct flb_slist_entry *key;
+ struct flb_slist_entry *val;
+ struct flb_expect_rule *rule;
+
+ rule = flb_calloc(1, sizeof(struct flb_expect_rule));
+ if (!rule) {
+ flb_errno();
+ return NULL;
+ }
+ rule->type = type;
+ rule->value = value;
+ rule->expect = NULL;
+
+ /* Only the rule 'key_val_eq' expects two values from the configuration */
+ if (type == FLB_EXP_KEY_VAL_EQ) {
+ list = flb_malloc(sizeof(struct mk_list));
+ if (!list) {
+ flb_errno();
+ flb_free(rule);
+ return NULL;
+ }
+ mk_list_init(list);
+ ret = flb_slist_split_string(list, value, ' ', 1);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "error reading list of options '%s'",
+ value);
+ flb_free(rule);
+ return NULL;
+ }
+
+ /* Get the 'key' and the expected value */
+ key = mk_list_entry_first(list, struct flb_slist_entry, _head);
+ val = mk_list_entry_last(list, struct flb_slist_entry, _head);
+
+ rule->ra = flb_ra_create(key->str, FLB_TRUE);
+ if (!rule->ra) {
+ flb_plg_error(ctx->ins, "error processing accessor key '%s'",
+ key->str);
+ flb_slist_destroy(list);
+ flb_free(list);
+ flb_free(rule);
+ return NULL;
+ }
+ rule->expect = flb_sds_create(val->str);
+ flb_slist_destroy(list);
+ flb_free(list);
+ }
+ else {
+ rule->ra = flb_ra_create(value, FLB_TRUE);
+ if (!rule->ra) {
+ flb_plg_error(ctx->ins, "error processing accessor key '%s'",
+ value);
+ flb_free(rule);
+ return NULL;
+ }
+ }
+
+ return rule;
+}
+
+static void rule_destroy(struct flb_expect_rule *rule)
+{
+ if (rule->expect) {
+ flb_sds_destroy(rule->expect);
+ }
+ if (rule->ra) {
+ flb_ra_destroy(rule->ra);
+ }
+
+ flb_free(rule);
+}
+
+static void context_destroy(struct flb_expect *ctx)
+{
+ struct mk_list *tmp;
+ struct mk_list *head;
+ struct flb_expect_rule *rule;
+
+ mk_list_foreach_safe(head, tmp, &ctx->rules) {
+ rule = mk_list_entry(head, struct flb_expect_rule, _head);
+ mk_list_del(&rule->_head);
+ rule_destroy(rule);
+ }
+ flb_free(ctx);
+}
+
+static struct flb_expect *context_create(struct flb_filter_instance *ins,
+ struct flb_config *config)
+{
+ int i = 0;
+ int type;
+ int ret;
+ flb_sds_t tmp;
+ struct flb_kv *kv;
+ struct mk_list *head;
+ struct flb_expect *ctx;
+ struct flb_expect_rule *rule;
+
+ ctx = flb_calloc(1, sizeof(struct flb_expect));
+ if (!ctx) {
+ flb_errno();
+ return NULL;
+ }
+ ctx->ins = ins;
+ ctx->action = FLB_EXP_WARN;
+ mk_list_init(&ctx->rules);
+
+ /* Get the action property */
+ tmp = (char *) flb_filter_get_property("action", ins);
+ if (tmp) {
+ if (strcasecmp(tmp, "warn") == 0) {
+ ctx->action = FLB_EXP_WARN;
+ }
+ else if (strcasecmp(tmp, "exit") == 0) {
+ ctx->action = FLB_EXP_EXIT;
+ }
+ else if (strcasecmp(tmp, "result_key") == 0) {
+ ctx->action = FLB_EXP_RESULT_KEY;
+ }
+ else {
+ flb_plg_error(ctx->ins, "unexpected 'action' value '%s'", tmp);
+ flb_free(ctx);
+ return NULL;
+ }
+ }
+
+ /* Load config map */
+ ret = flb_filter_config_map_set(ins, ctx);
+ if (ret == -1) {
+ flb_free(ctx);
+ return NULL;
+ }
+
+ /* Read the configuration properties */
+ mk_list_foreach(head, &ins->properties) {
+ kv = mk_list_entry(head, struct flb_kv, _head);
+
+ /* Validate the type of the rule */
+ type = key_to_type(kv->key);
+ if (strcasecmp(kv->key, "result_key") == 0) {
+ /* skip */
+ continue;
+ }
+
+ if (type == -1 && strcasecmp(kv->key, "action") != 0) {
+ flb_plg_error(ctx->ins, "unknown configuration rule '%s'", kv->key);
+ context_destroy(ctx);
+ return NULL;
+ }
+
+ rule = rule_create(ctx, type, kv->val);
+ if (!rule) {
+ context_destroy(ctx);
+ return NULL;
+ }
+ mk_list_add(&rule->_head, &ctx->rules);
+
+ /* Debug message */
+ if (rule->type == -1) {
+ flb_plg_debug(ctx->ins, "action : '%s'", kv->val);
+ }
+ else {
+ flb_plg_debug(ctx->ins, "rule #%i: '%s', expects: '%s'",
+ i, kv->key, kv->val);
+ }
+ i++;
+ }
+
+ return ctx;
+
+}
+
+static int cb_expect_init(struct flb_filter_instance *ins,
+ struct flb_config *config,
+ void *data)
+{
+ struct flb_expect *ctx;
+
+ /* Create the plugin context */
+ ctx = context_create(ins, config);
+ if (!ctx) {
+ return -1;
+ }
+
+ /* Set filter context */
+ flb_filter_set_context(ins, ctx);
+
+ if (mk_list_size(&ctx->rules) == 0) {
+ flb_plg_warn(ctx->ins, "no rules has been defined");
+ }
+
+ return 0;
+}
+
+static char *ra_value_type_to_str(struct flb_ra_value *val)
+{
+ if (val->type == FLB_RA_BOOL) {
+ return "boolean";
+ }
+ else if (val->type == FLB_RA_INT) {
+ return "integer";
+ }
+ else if (val->type == FLB_RA_FLOAT) {
+ return "float / double";
+ }
+ else if (val->type == FLB_RA_STRING) {
+ return "string";
+ }
+ else if (val->type == FLB_RA_NULL) {
+ return "null";
+ }
+
+ return "UNKNOWN";
+}
+
+static int rule_apply(struct flb_expect *ctx, msgpack_object map)
+{
+ int n = 0;
+ char *json;
+ size_t size = 1024;
+ struct mk_list *head;
+ struct flb_expect_rule *rule;
+ struct flb_ra_value *val;
+
+ mk_list_foreach(head, &ctx->rules) {
+ rule = mk_list_entry(head, struct flb_expect_rule, _head);
+
+ val = flb_ra_get_value_object(rule->ra, map);
+ if (rule->type == FLB_EXP_KEY_EXISTS) {
+ if (val) {
+ flb_ra_key_value_destroy(val);
+ n++;
+ continue;
+ }
+
+ json = flb_msgpack_to_json_str(size, &map);
+ flb_plg_error(ctx->ins,
+ "exception on rule #%i 'key_exists', key '%s' "
+ "not found. Record content:\n%s",
+ n, rule->value, json);
+ flb_free(json);
+ return FLB_FALSE;
+ }
+ else if (rule->type == FLB_EXP_KEY_NOT_EXISTS) {
+ if (!val) {
+ n++;
+ continue;
+ }
+ json = flb_msgpack_to_json_str(size, &map);
+ flb_plg_error(ctx->ins,
+ "exception on rule #%i 'key_not_exists', key '%s' "
+ "exists. Record content:\n%s",
+ n, rule->value, json);
+ flb_free(json);
+ flb_ra_key_value_destroy(val);
+ return FLB_FALSE;
+ }
+ else if (rule->type == FLB_EXP_KEY_VAL_NULL) {
+ if (!val) {
+ json = flb_msgpack_to_json_str(size, &map);
+ flb_plg_error(ctx->ins,
+ "exception on rule #%i 'key_val_is_null', "
+ "key '%s' not found. Record content:\n%s",
+ n, rule->value, json);
+ flb_free(json);
+ return FLB_FALSE;
+ }
+ if (val->type != FLB_RA_NULL) {
+ json = flb_msgpack_to_json_str(size, &map);
+ flb_plg_error(ctx->ins,
+ "exception on rule #%i 'key_val_is_null', "
+ "key '%s' contains a value type '%s'. "
+ "Record content:\n%s",
+ n, rule->value,
+ ra_value_type_to_str(val), json);
+ flb_free(json);
+ flb_ra_key_value_destroy(val);
+ return FLB_FALSE;
+ }
+ flb_ra_key_value_destroy(val);
+ }
+ else if (rule->type == FLB_EXP_KEY_VAL_NOT_NULL) {
+ if (!val) {
+ json = flb_msgpack_to_json_str(size, &map);
+ flb_plg_error(ctx->ins,
+ "exception on rule #%i 'key_val_is_not_null', "
+ "key '%s' not found. Record content:\n%s",
+ n, rule->value, json);
+ flb_free(json);
+ return FLB_FALSE;
+ }
+ if (val->type == FLB_RA_NULL) {
+ json = flb_msgpack_to_json_str(size, &map);
+ flb_plg_error(ctx->ins,
+ "exception on rule #%i 'key_val_is_not_null', "
+ "key '%s' contains a value type '%s'. "
+ "Record content:\n%s",
+ n, rule->value,
+ ra_value_type_to_str(val), json);
+ flb_free(json);
+ flb_ra_key_value_destroy(val);
+ return FLB_FALSE;
+ }
+ flb_ra_key_value_destroy(val);
+ }
+ else if (rule->type == FLB_EXP_KEY_VAL_EQ) {
+ if (!val) {
+ json = flb_msgpack_to_json_str(size, &map);
+ flb_plg_error(ctx->ins,
+ "exception on rule #%i 'key_val_is_null', "
+ "key '%s' not found. Record content:\n%s",
+ n, rule->value, json);
+ flb_free(json);
+ return FLB_FALSE;
+ }
+
+ if (val->type == FLB_RA_STRING) {
+ if (flb_sds_cmp(val->val.string, rule->expect,
+ flb_sds_len(rule->expect)) != 0) {
+ json = flb_msgpack_to_json_str(size, &map);
+ flb_plg_error(ctx->ins,
+ "exception on rule #%i 'key_val_eq', "
+ "key value '%s' is different than "
+ "expected: '%s'. Record content:\n%s",
+ n, val->val.string, rule->expect, json);
+ flb_free(json);
+ flb_ra_key_value_destroy(val);
+ return FLB_FALSE;
+ }
+ }
+ flb_ra_key_value_destroy(val);
+ }
+ n++;
+ }
+
+ return FLB_TRUE;
+}
+
+static int cb_expect_filter(const void *data, size_t bytes,
+ const char *tag, int tag_len,
+ void **out_buf, size_t *out_bytes,
+ struct flb_filter_instance *f_ins,
+ struct flb_input_instance *i_ins,
+ void *filter_context,
+ struct flb_config *config)
+{
+ int ret;
+ int i;
+ int rule_matched = FLB_TRUE;
+ msgpack_object_kv *kv;
+ struct flb_expect *ctx = filter_context;
+ struct flb_log_event_encoder log_encoder;
+ struct flb_log_event_decoder log_decoder;
+ struct flb_log_event log_event;
+
+ (void) out_buf;
+ (void) out_bytes;
+ (void) f_ins;
+ (void) i_ins;
+ (void) config;
+
+ ret = flb_log_event_decoder_init(&log_decoder, (char *) data, bytes);
+
+ if (ret != FLB_EVENT_DECODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event decoder initialization error : %d", ret);
+
+ return FLB_FILTER_NOTOUCH;
+ }
+
+ while ((ret = flb_log_event_decoder_next(
+ &log_decoder,
+ &log_event)) == FLB_EVENT_DECODER_SUCCESS) {
+ ret = rule_apply(ctx, *log_event.body);
+ if (ret == FLB_TRUE) {
+ /* rule matches, we are good */
+ continue;
+ }
+ else {
+ if (ctx->action == FLB_EXP_WARN) {
+ flb_plg_warn(ctx->ins, "expect check failed");
+ }
+ else if (ctx->action == FLB_EXP_EXIT) {
+ flb_engine_exit_status(config, 255);
+ }
+ else if (ctx->action == FLB_EXP_RESULT_KEY) {
+ rule_matched = FLB_FALSE;
+ }
+ break;
+ }
+ }
+
+ ret = 0;
+ /* Append result key when action is "result_key"*/
+ if (ctx->action == FLB_EXP_RESULT_KEY) {
+ flb_log_event_decoder_reset(&log_decoder, (char *) data, bytes);
+
+ ret = flb_log_event_encoder_init(&log_encoder,
+ FLB_LOG_EVENT_FORMAT_DEFAULT);
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event encoder initialization error : %d", ret);
+
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ return FLB_FILTER_NOTOUCH;
+ }
+
+ while ((ret = flb_log_event_decoder_next(
+ &log_decoder,
+ &log_event)) == FLB_EVENT_DECODER_SUCCESS) {
+ ret = flb_log_event_encoder_begin_record(&log_encoder);
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_timestamp(
+ &log_encoder, &log_event.timestamp);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_metadata_from_msgpack_object(&log_encoder,
+ log_event.metadata);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_append_body_values(
+ &log_encoder,
+ FLB_LOG_EVENT_STRING_VALUE(ctx->result_key, flb_sds_len(ctx->result_key)),
+ FLB_LOG_EVENT_BOOLEAN_VALUE(rule_matched));
+ }
+
+ kv = log_event.body->via.map.ptr;
+ for (i=0 ;
+ i < log_event.body->via.map.size &&
+ ret == FLB_EVENT_ENCODER_SUCCESS ;
+ i++) {
+ ret = flb_log_event_encoder_append_body_values(
+ &log_encoder,
+ FLB_LOG_EVENT_MSGPACK_OBJECT_VALUE(&kv[i].key),
+ FLB_LOG_EVENT_MSGPACK_OBJECT_VALUE(&kv[i].val));
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_commit_record(&log_encoder);
+ }
+ }
+
+ if (ret == FLB_EVENT_DECODER_ERROR_INSUFFICIENT_DATA &&
+ log_decoder.offset == bytes) {
+ ret = FLB_EVENT_ENCODER_SUCCESS;
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ *out_buf = log_encoder.output_buffer;
+ *out_bytes = log_encoder.output_length;
+
+ ret = FLB_FILTER_MODIFIED;
+
+ flb_log_event_encoder_claim_internal_buffer_ownership(&log_encoder);
+ }
+ else {
+ flb_plg_error(ctx->ins,
+ "Log event encoder error : %d", ret);
+
+ ret = FLB_FILTER_NOTOUCH;
+ }
+
+ flb_log_event_decoder_destroy(&log_decoder);
+ flb_log_event_encoder_destroy(&log_encoder);
+
+ return ret;
+ }
+
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ return FLB_FILTER_NOTOUCH;
+}
+
+static int cb_expect_exit(void *data, struct flb_config *config)
+{
+ struct flb_expect *ctx = data;
+ (void) config;
+
+ if (!ctx) {
+ return 0;
+ }
+
+ context_destroy(ctx);
+ return 0;
+}
+
+/* Configuration properties map */
+static struct flb_config_map config_map[] =
+{
+ /* rule: the key exists in the record */
+ {
+ FLB_CONFIG_MAP_STR, "key_exists", NULL,
+ FLB_CONFIG_MAP_MULT, FLB_FALSE, 0,
+ "check that the given key name exists in the record"
+ },
+
+ /* rule: the key not exists in the record */
+ {
+ FLB_CONFIG_MAP_STR, "key_not_exists", NULL,
+ FLB_CONFIG_MAP_MULT, FLB_FALSE, 0,
+ "check that the given key name do not exists in the record"
+ },
+
+ /* rule: the value of the key is NULL */
+ {
+ FLB_CONFIG_MAP_STR, "key_val_is_null", NULL,
+ FLB_CONFIG_MAP_MULT, FLB_FALSE, 0,
+ "check that the value of the key is NULL"
+ },
+
+ /* rule: the value of the key is NOT NULL */
+ {
+ FLB_CONFIG_MAP_STR, "key_val_is_not_null", NULL,
+ FLB_CONFIG_MAP_MULT, FLB_FALSE, 0,
+ "check that the value of the key is NOT NULL"
+ },
+
+ /* rule: the value of the key is equal a given value */
+ {
+ FLB_CONFIG_MAP_SLIST_1, "key_val_eq", NULL,
+ FLB_CONFIG_MAP_MULT, FLB_FALSE, 0,
+ "check that the value of the key equals the given value"
+ },
+
+ /* rule action: the value of the key is equal a given value */
+ {
+ FLB_CONFIG_MAP_STR, "action", "warn",
+ 0, FLB_FALSE, 0,
+ "action to take when a rule does not match: 'warn', 'exit' or 'result_key'."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "result_key", "matched",
+ 0, FLB_TRUE, offsetof(struct flb_expect, result_key),
+ "specify the key name to append a boolean that indicates rule is matched or not. "
+ "This key is to be used only when 'action' is 'result_key'."
+ },
+
+ /* EOF */
+ {0}
+};
+
+struct flb_filter_plugin filter_expect_plugin = {
+ .name = "expect",
+ .description = "Validate expected keys and values",
+ .cb_init = cb_expect_init,
+ .cb_filter = cb_expect_filter,
+ .cb_exit = cb_expect_exit,
+ .config_map = config_map,
+ .flags = 0
+};
diff --git a/src/fluent-bit/plugins/filter_expect/expect.h b/src/fluent-bit/plugins/filter_expect/expect.h
new file mode 100644
index 000000000..bc7939d3d
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_expect/expect.h
@@ -0,0 +1,53 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_FILTER_EXPECT_H
+#define FLB_FILTER_EXPECT_H
+
+#include <fluent-bit/flb_filter_plugin.h>
+#include <fluent-bit/flb_sds.h>
+#include <fluent-bit/flb_record_accessor.h>
+
+#define FLB_EXP_WARN 0
+#define FLB_EXP_EXIT 1
+#define FLB_EXP_RESULT_KEY 2
+
+/* Rule types */
+#define FLB_EXP_KEY_EXISTS 0 /* key exists */
+#define FLB_EXP_KEY_NOT_EXISTS 1 /* key not exists */
+#define FLB_EXP_KEY_VAL_NULL 2 /* key value has a NULL value */
+#define FLB_EXP_KEY_VAL_NOT_NULL 3 /* key value has a NULL value */
+#define FLB_EXP_KEY_VAL_EQ 4 /* key value is equal some given value */
+
+struct flb_expect_rule {
+ int type;
+ flb_sds_t value; /* original value given in the config */
+ flb_sds_t expect; /* specific value match (FLB_EXP_KEY_VAL_EQ */
+ struct flb_record_accessor *ra;
+ struct mk_list _head;
+};
+
+struct flb_expect {
+ int action;
+ flb_sds_t result_key;
+ struct mk_list rules;
+ struct flb_filter_instance *ins;
+};
+
+#endif
diff --git a/src/fluent-bit/plugins/filter_geoip2/.gitignore b/src/fluent-bit/plugins/filter_geoip2/.gitignore
new file mode 100644
index 000000000..54b22adfa
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_geoip2/.gitignore
@@ -0,0 +1 @@
+libmaxminddb/include/maxminddb_config.h
diff --git a/src/fluent-bit/plugins/filter_geoip2/CMakeLists.txt b/src/fluent-bit/plugins/filter_geoip2/CMakeLists.txt
new file mode 100644
index 000000000..b33799e79
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_geoip2/CMakeLists.txt
@@ -0,0 +1,19 @@
+# libmaxminddb
+
+# The subdirectory 'libmaxminddb' was imported from the following repo.
+# (The minimum set of files required to compile geoip2.c were imported)
+#
+# https://github.com/maxmind/libmaxminddb
+#
+# We keep the exact version in the file 'libmaxminddb/VERSION',
+# Please update the content when you upgrade libmaxminddb.
+#
+option(BUILD_TESTING "" OFF)
+set(CMAKE_C_FLAGS "-std=gnu99 ${CMAKE_C_FLAGS}")
+add_subdirectory(libmaxminddb EXCLUDE_FROM_ALL)
+include_directories(libmaxminddb/include/)
+
+set(src
+ geoip2.c)
+
+FLB_PLUGIN(filter_geoip2 "${src}" "maxminddb")
diff --git a/src/fluent-bit/plugins/filter_geoip2/geoip2.c b/src/fluent-bit/plugins/filter_geoip2/geoip2.c
new file mode 100644
index 000000000..28559dfef
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_geoip2/geoip2.c
@@ -0,0 +1,519 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdio.h>
+#include <sys/types.h>
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_kv.h>
+#include <fluent-bit/flb_mem.h>
+#include <fluent-bit/flb_str.h>
+#include <fluent-bit/flb_filter.h>
+#include <fluent-bit/flb_hash_table.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_filter_plugin.h>
+#include <fluent-bit/flb_log_event_decoder.h>
+#include <fluent-bit/flb_log_event_encoder.h>
+#include <msgpack.h>
+
+#include "geoip2.h"
+
+static int configure(struct geoip2_ctx *ctx,
+ struct flb_filter_instance *f_ins)
+{
+ struct flb_kv *kv = NULL;
+ struct mk_list *head = NULL;
+ struct mk_list *split;
+ int status;
+ struct geoip2_record *record;
+ struct flb_split_entry *sentry;
+ struct flb_config_map_val *record_key;
+ int ret;
+
+ ctx->mmdb = flb_malloc(sizeof(MMDB_s));
+ ctx->lookup_keys_num = 0;
+ ctx->records_num = 0;
+
+ ret = flb_filter_config_map_set(f_ins, (void *)ctx);
+ if (ret == -1) {
+ flb_plg_error(f_ins, "unable to load configuration");
+ flb_free(ctx->mmdb);
+ return -1;
+ }
+
+ if (ctx->database) {
+ status = MMDB_open(ctx->database, MMDB_MODE_MMAP, ctx->mmdb);
+ if (status != MMDB_SUCCESS) {
+ flb_plg_error(f_ins, "Cannot open geoip2 database: %s: %s",
+ ctx->database, MMDB_strerror(status));
+ flb_free(ctx->mmdb);
+ return -1;
+ }
+ } else {
+ flb_plg_error(f_ins, "no geoip2 database has been loaded");
+ flb_free(ctx->mmdb);
+ return -1;
+ }
+
+ mk_list_foreach(head, ctx->lookup_keys) {
+ ctx->lookup_keys_num++;
+ }
+
+ flb_config_map_foreach(head, record_key, ctx->record_keys) {
+ record = flb_malloc(sizeof(struct geoip2_record));
+ if (!record) {
+ flb_errno();
+ continue;
+ }
+ split = flb_utils_split(record_key->val.str, ' ', 2);
+ if (mk_list_size(split) != 3) {
+ flb_plg_error(f_ins, "invalid record parameter: '%s'", kv->val);
+ flb_plg_error(f_ins, "expects 'KEY LOOKUP_KEY VALUE'");
+ flb_free(record);
+ flb_utils_split_free(split);
+ continue;
+ }
+
+ /* Get first value (field) */
+ sentry = mk_list_entry_first(split, struct flb_split_entry, _head);
+ record->key = flb_strndup(sentry->value, sentry->len);
+ record->key_len = sentry->len;
+
+ sentry = mk_list_entry_next(&sentry->_head, struct flb_split_entry,
+ _head, split);
+ record->lookup_key = flb_strndup(sentry->value, sentry->len);
+ record->lookup_key_len = sentry->len;
+
+ sentry = mk_list_entry_last(split, struct flb_split_entry, _head);
+ record->val = flb_strndup(sentry->value, sentry->len);
+ record->val_len = sentry->len;
+
+ flb_utils_split_free(split);
+ mk_list_add(&record->_head, &ctx->records);
+ ctx->records_num++;
+ }
+
+ if (ctx->lookup_keys_num <= 0) {
+ flb_plg_error(f_ins, "at least one lookup_key is required");
+ return -1;
+ }
+ if (ctx->records_num <= 0) {
+ flb_plg_error(f_ins, "at least one record is required");
+ return -1;
+ }
+ return 0;
+}
+
+static int delete_list(struct geoip2_ctx *ctx)
+{
+ struct mk_list *tmp;
+ struct mk_list *head;
+ struct geoip2_record *record;
+
+ mk_list_foreach_safe(head, tmp, &ctx->records) {
+ record = mk_list_entry(head, struct geoip2_record, _head);
+ flb_free(record->lookup_key);
+ flb_free(record->key);
+ flb_free(record->val);
+ mk_list_del(&record->_head);
+ flb_free(record);
+ }
+ return 0;
+}
+
+static struct flb_hash_table *prepare_lookup_keys(msgpack_object *map,
+ struct geoip2_ctx *ctx)
+{
+ msgpack_object_kv *kv;
+ msgpack_object *key;
+ msgpack_object *val;
+ struct mk_list *head;
+ struct flb_config_map_val *lookup_key;
+ struct flb_hash_table *ht;
+
+ ht = flb_hash_table_create(FLB_HASH_TABLE_EVICT_NONE, ctx->lookup_keys_num, -1);
+ if (!ht) {
+ return NULL;
+ }
+
+ kv = map->via.map.ptr;
+ for (int i = 0; i < map->via.map.size; i++) {
+ key = &(kv + i)->key;
+ val = &(kv + i)->val;
+ if (key->type != MSGPACK_OBJECT_STR) {
+ continue;
+ }
+ if (val->type != MSGPACK_OBJECT_STR) {
+ continue;
+ }
+
+ flb_config_map_foreach(head, lookup_key, ctx->lookup_keys) {
+ if (strncasecmp(key->via.str.ptr, lookup_key->val.str,
+ flb_sds_len(lookup_key->val.str)) == 0) {
+ flb_hash_table_add(ht, lookup_key->val.str, flb_sds_len(lookup_key->val.str),
+ (void *) val->via.str.ptr, val->via.str.size);
+ }
+ }
+ }
+
+ return ht;
+}
+
+static MMDB_lookup_result_s mmdb_lookup(struct geoip2_ctx *ctx, const char *ip)
+{
+ int gai_error;
+ int mmdb_error;
+ MMDB_lookup_result_s result;
+
+ result = MMDB_lookup_string(ctx->mmdb, ip, &gai_error, &mmdb_error);
+ if (gai_error != 0) {
+ flb_plg_error(ctx->ins, "getaddrinfo failed: %s", gai_strerror(gai_error));
+ }
+ if (mmdb_error != MMDB_SUCCESS) {
+ flb_plg_error(ctx->ins, "lookup failed : %s", MMDB_strerror(mmdb_error));
+ }
+
+ return result;
+}
+
+static void add_geoip_fields(msgpack_object *map,
+ struct flb_hash_table *lookup_keys,
+ struct geoip2_ctx *ctx,
+ struct flb_log_event_encoder *encoder)
+{
+ int ret;
+ struct mk_list *head;
+ struct mk_list *tmp;
+ struct geoip2_record *record;
+ const char *ip;
+ size_t ip_size;
+ MMDB_lookup_result_s result;
+ MMDB_entry_s entry;
+ MMDB_entry_data_s entry_data;
+ char **path;
+ int status;
+ char *pos;
+ char key[64];
+ struct mk_list *split;
+ int split_size;
+ struct mk_list *path_head;
+ struct mk_list *path_tmp;
+ struct flb_split_entry *sentry;
+ int i = 0;
+
+ mk_list_foreach_safe(head, tmp, &ctx->records) {
+ record = mk_list_entry(head, struct geoip2_record, _head);
+
+ flb_log_event_encoder_append_body_string(
+ encoder, record->key, record->key_len);
+
+ ret = flb_hash_table_get(lookup_keys, record->lookup_key, record->lookup_key_len,
+ (void *) &ip, &ip_size);
+ if (ret == -1) {
+ flb_log_event_encoder_append_body_null(encoder);
+ continue;
+ }
+
+ result = mmdb_lookup(ctx, ip);
+ if (!result.found_entry) {
+ flb_log_event_encoder_append_body_null(encoder);
+ continue;
+ }
+ entry = result.entry;
+ pos = strstr(record->val, "}");
+ memset(key, '\0', sizeof(key));
+ strncpy(key, record->val + 2, pos - (record->val + 2));
+ split = flb_utils_split(key, '.', 2);
+ split_size = mk_list_size(split);
+ path = flb_malloc(sizeof(char *) * (split_size + 1));
+ i = 0;
+ mk_list_foreach_safe(path_head, path_tmp, split) {
+ sentry = mk_list_entry(path_head, struct flb_split_entry, _head);
+ path[i] = flb_strndup(sentry->value, sentry->len);
+ i++;
+ }
+ path[split_size] = NULL;
+ status = MMDB_aget_value(&entry, &entry_data, (const char *const *const)path);
+ flb_utils_split_free(split);
+ for (int j = 0; j < split_size; j++) {
+ flb_free(path[j]);
+ }
+ flb_free(path);
+ if (status != MMDB_SUCCESS) {
+ flb_plg_warn(ctx->ins, "cannot get value: %s", MMDB_strerror(status));
+ flb_log_event_encoder_append_body_null(encoder);
+ continue;
+ }
+ if (!entry_data.has_data) {
+ flb_plg_warn(ctx->ins, "found entry does not have data");
+ flb_log_event_encoder_append_body_null(encoder);
+ continue;
+ }
+ if (entry_data.type == MMDB_DATA_TYPE_MAP ||
+ entry_data.type == MMDB_DATA_TYPE_ARRAY) {
+ flb_plg_warn(ctx->ins, "Not supported MAP and ARRAY");
+ flb_log_event_encoder_append_body_null(encoder);
+ continue;
+ }
+
+ switch (entry_data.type) {
+ case MMDB_DATA_TYPE_EXTENDED:
+ /* TODO: not implemented */
+ flb_log_event_encoder_append_body_null(encoder);
+ break;
+ case MMDB_DATA_TYPE_POINTER:
+ /* TODO: not implemented */
+ flb_log_event_encoder_append_body_null(encoder);
+ break;
+ case MMDB_DATA_TYPE_UTF8_STRING:
+ flb_log_event_encoder_append_body_string(
+ encoder,
+ (char *) entry_data.utf8_string,
+ entry_data.data_size);
+ break;
+ case MMDB_DATA_TYPE_DOUBLE:
+ flb_log_event_encoder_append_body_double(
+ encoder, entry_data.double_value);
+ break;
+ case MMDB_DATA_TYPE_BYTES:
+ flb_log_event_encoder_append_body_string(
+ encoder,
+ (char *) entry_data.bytes,
+ entry_data.data_size);
+ break;
+ case MMDB_DATA_TYPE_UINT16:
+ flb_log_event_encoder_append_body_uint16(
+ encoder, entry_data.uint16);
+ break;
+ case MMDB_DATA_TYPE_UINT32:
+ flb_log_event_encoder_append_body_uint32(
+ encoder, entry_data.uint32);
+ break;
+ case MMDB_DATA_TYPE_MAP:
+ /* TODO: not implemented */
+ flb_log_event_encoder_append_body_null(encoder);
+ break;
+ case MMDB_DATA_TYPE_INT32:
+ flb_log_event_encoder_append_body_int32(
+ encoder, entry_data.int32);
+ break;
+ case MMDB_DATA_TYPE_UINT64:
+ flb_log_event_encoder_append_body_uint64(
+ encoder, entry_data.uint64);
+ break;
+ case MMDB_DATA_TYPE_UINT128:
+#if !(MMDB_UINT128_IS_BYTE_ARRAY)
+ /* entry_data.uint128; */
+ flb_warn("Not supported uint128");
+#else
+ flb_warn("Not implemented when MMDB_UINT128_IS_BYTE_ARRAY");
+#endif
+ flb_log_event_encoder_append_body_null(encoder);
+ break;
+ case MMDB_DATA_TYPE_ARRAY:
+ /* TODO: not implemented */
+ flb_log_event_encoder_append_body_null(encoder);
+ break;
+ case MMDB_DATA_TYPE_CONTAINER:
+ /* TODO: not implemented */
+ flb_log_event_encoder_append_body_null(encoder);
+ break;
+ case MMDB_DATA_TYPE_END_MARKER:
+ break;
+ case MMDB_DATA_TYPE_BOOLEAN:
+ flb_log_event_encoder_append_body_boolean(
+ encoder, (int) entry_data.boolean);
+ break;
+ case MMDB_DATA_TYPE_FLOAT:
+ flb_log_event_encoder_append_body_double(
+ encoder, entry_data.float_value);
+ break;
+ default:
+ flb_error("Unknown type: %d", entry_data.type);
+ break;
+ }
+ }
+}
+
+static int cb_geoip2_init(struct flb_filter_instance *f_ins,
+ struct flb_config *config,
+ void *data)
+{
+ struct geoip2_ctx *ctx = NULL;
+ /* Create context */
+ ctx = flb_calloc(1, sizeof(struct geoip2_ctx));
+ if (!ctx) {
+ flb_errno();
+ return -1;
+ }
+ mk_list_init(&ctx->records);
+
+
+ if (configure(ctx, f_ins) < 0) {
+ delete_list(ctx);
+ return -1;
+ }
+
+ ctx->ins = f_ins;
+ flb_filter_set_context(f_ins, ctx);
+
+ return 0;
+}
+
+static int cb_geoip2_filter(const void *data, size_t bytes,
+ const char *tag, int tag_len,
+ void **out_buf, size_t *out_size,
+ struct flb_filter_instance *f_ins,
+ struct flb_input_instance *i_ins,
+ void *context,
+ struct flb_config *config)
+{
+ struct geoip2_ctx *ctx = context;
+ msgpack_object_kv *kv;
+ struct flb_hash_table *lookup_keys_hash;
+ struct flb_log_event_encoder log_encoder;
+ struct flb_log_event_decoder log_decoder;
+ struct flb_log_event log_event;
+ int ret;
+ int i;
+
+ (void) i_ins;
+
+ ret = flb_log_event_decoder_init(&log_decoder, (char *) data, bytes);
+
+ if (ret != FLB_EVENT_DECODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event decoder initialization error : %d", ret);
+
+ return FLB_FILTER_NOTOUCH;
+ }
+
+ ret = flb_log_event_encoder_init(&log_encoder,
+ FLB_LOG_EVENT_FORMAT_DEFAULT);
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event encoder initialization error : %d", ret);
+
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ return FLB_FILTER_NOTOUCH;
+ }
+
+ while ((ret = flb_log_event_decoder_next(
+ &log_decoder,
+ &log_event)) == FLB_EVENT_DECODER_SUCCESS) {
+
+ ret = flb_log_event_encoder_begin_record(&log_encoder);
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_timestamp(
+ &log_encoder, &log_event.timestamp);
+ }
+
+ kv = log_event.body->via.map.ptr;
+ for (i = 0;
+ i < log_event.body->via.map.size &&
+ ret == FLB_EVENT_ENCODER_SUCCESS ;
+ i++) {
+ ret = flb_log_event_encoder_append_body_values(
+ &log_encoder,
+ FLB_LOG_EVENT_MSGPACK_OBJECT_VALUE(&kv[i].key),
+ FLB_LOG_EVENT_MSGPACK_OBJECT_VALUE(&kv[i].val));
+ }
+
+ lookup_keys_hash = prepare_lookup_keys(log_event.body, ctx);
+ add_geoip_fields(log_event.body, lookup_keys_hash, ctx, &log_encoder);
+ flb_hash_table_destroy(lookup_keys_hash);
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_commit_record(&log_encoder);
+ }
+ }
+
+ if (ret == FLB_EVENT_DECODER_ERROR_INSUFFICIENT_DATA &&
+ log_decoder.offset == bytes) {
+ ret = FLB_EVENT_ENCODER_SUCCESS;
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ *out_buf = log_encoder.output_buffer;
+ *out_size = log_encoder.output_length;
+
+ ret = FLB_FILTER_MODIFIED;
+
+ flb_log_event_encoder_claim_internal_buffer_ownership(&log_encoder);
+ }
+ else {
+ flb_plg_error(ctx->ins,
+ "Log event encoder error : %d", ret);
+
+ ret = FLB_FILTER_NOTOUCH;
+ }
+
+ flb_log_event_decoder_destroy(&log_decoder);
+ flb_log_event_encoder_destroy(&log_encoder);
+
+ return ret;
+}
+
+static int cb_geoip2_exit(void *data, struct flb_config *config)
+{
+ struct geoip2_ctx *ctx = data;
+
+ if (ctx != NULL) {
+ delete_list(ctx);
+ MMDB_close(ctx->mmdb);
+ flb_free(ctx->mmdb);
+ flb_free(ctx);
+ }
+
+ return 0;
+}
+
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_STR, "database", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct geoip2_ctx, database),
+ "Set the geoip2 database path"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "lookup_key", NULL,
+ FLB_CONFIG_MAP_MULT, FLB_TRUE, offsetof(struct geoip2_ctx, lookup_keys),
+ "Add a lookup_key"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "record", NULL,
+ FLB_CONFIG_MAP_MULT, FLB_TRUE, offsetof(struct geoip2_ctx, record_keys),
+ "Add a record to the output base on geoip2"
+ },
+ /* EOF */
+ {0}
+};
+
+struct flb_filter_plugin filter_geoip2_plugin = {
+ .name = "geoip2",
+ .description = "add geoip information to records",
+ .cb_init = cb_geoip2_init,
+ .cb_filter = cb_geoip2_filter,
+ .cb_exit = cb_geoip2_exit,
+ .config_map = config_map,
+ .flags = 0,
+};
diff --git a/src/fluent-bit/plugins/filter_geoip2/geoip2.h b/src/fluent-bit/plugins/filter_geoip2/geoip2.h
new file mode 100644
index 000000000..5c23d60f6
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_geoip2/geoip2.h
@@ -0,0 +1,46 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <maxminddb.h>
+
+#ifndef FLB_FILTER_GEOIP2_H
+#define FLB_FILTER_GEOIP2_H
+
+struct geoip2_record {
+ char *lookup_key;
+ char *key;
+ char *val;
+ int lookup_key_len;
+ int key_len;
+ int val_len;
+ struct mk_list _head;
+};
+
+struct geoip2_ctx {
+ flb_sds_t database;
+ MMDB_s *mmdb;
+ int lookup_keys_num;
+ int records_num;
+ struct mk_list *lookup_keys;
+ struct mk_list *record_keys;
+ struct mk_list records;
+ struct flb_filter_instance *ins;
+};
+
+#endif
diff --git a/src/fluent-bit/plugins/filter_geoip2/libmaxminddb/CMakeLists.txt b/src/fluent-bit/plugins/filter_geoip2/libmaxminddb/CMakeLists.txt
new file mode 100644
index 000000000..2ed068af2
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_geoip2/libmaxminddb/CMakeLists.txt
@@ -0,0 +1,101 @@
+project(maxminddb
+ LANGUAGES C
+)
+set(MAXMINDDB_SOVERSION 0.0.7)
+
+option(BUILD_SHARED_LIBS "Build shared libraries (.dll/.so) instead of static ones (.lib/.a)" OFF)
+option(BUILD_TESTING "Build test programs" ON)
+
+include(CheckTypeSize)
+check_type_size("unsigned __int128" UINT128)
+check_type_size("unsigned int __attribute__((mode(TI)))" UINT128_USING_MODE)
+if(HAVE_UINT128)
+ set(MMDB_UINT128_USING_MODE 0)
+ set(MMDB_UINT128_IS_BYTE_ARRAY 0)
+elseif(HAVE_UINT128_USING_MODE)
+ set(MMDB_UINT128_USING_MODE 1)
+ set(MMDB_UINT128_IS_BYTE_ARRAY 0)
+else()
+ set(MMDB_UINT128_USING_MODE 0)
+ set(MMDB_UINT128_IS_BYTE_ARRAY 1)
+endif()
+
+include (TestBigEndian)
+TEST_BIG_ENDIAN(IS_BIG_ENDIAN)
+
+if (${CMAKE_SYSTEM_NAME} MATCHES "Linux")
+ set(CMAKE_POSITION_INDEPENDENT_CODE ON)
+endif()
+
+configure_file(${PROJECT_SOURCE_DIR}/include/maxminddb_config.h.cmake.in
+ ${PROJECT_SOURCE_DIR}/include/maxminddb_config.h)
+
+add_library(maxminddb STATIC
+ src/maxminddb.c
+ src/data-pool.c
+)
+add_library(maxminddb::maxminddb ALIAS maxminddb)
+
+set_target_properties(maxminddb PROPERTIES VERSION ${MAXMINDDB_SOVERSION})
+
+target_compile_definitions(maxminddb PUBLIC PACKAGE_VERSION="${PROJECT_VERSION}")
+
+if(NOT IS_BIG_ENDIAN)
+ target_compile_definitions(maxminddb PRIVATE MMDB_LITTLE_ENDIAN=1)
+endif()
+
+if(MSVC)
+ target_compile_definitions(maxminddb PRIVATE _CRT_SECURE_NO_WARNINGS)
+endif()
+
+if(WIN32)
+ target_link_libraries(maxminddb ws2_32)
+endif()
+
+set(CMAKE_SHARED_LIBRARY_PREFIX lib)
+set(CMAKE_STATIC_LIBRARY_PREFIX lib)
+
+set(MAXMINDB_INCLUDE_DIR
+ .
+ include
+)
+
+
+
+#
+# NOTE: This function call was modified for Fluent Bit.
+# The original first argument was the following:
+#
+# $<BUILD_INTERFACE:${MAXMINDB_INCLUDE_DIR}>
+#
+target_include_directories(maxminddb PUBLIC
+ $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
+ $<INSTALL_INTERFACE:include>
+)
+
+set(MAXMINDB_HEADERS
+ include/maxminddb.h
+ include/maxminddb_config.h
+)
+set_target_properties(maxminddb PROPERTIES PUBLIC_HEADER "${MAXMINDB_HEADERS}")
+
+#install(TARGETS maxminddb
+# EXPORT maxminddb
+# ARCHIVE DESTINATION lib
+# PUBLIC_HEADER DESTINATION include/
+#)
+#
+## This is required to work with FetchContent
+#install(EXPORT maxminddb
+# FILE maxminddb-config.cmake
+# NAMESPACE maxminddb::
+# DESTINATION lib/cmake/maxminddb)
+
+# We always want to build mmdblookup
+add_subdirectory(bin)
+
+if (BUILD_TESTING)
+ enable_testing()
+ add_subdirectory(t)
+endif()
+
diff --git a/src/fluent-bit/plugins/filter_geoip2/libmaxminddb/LICENSE b/src/fluent-bit/plugins/filter_geoip2/libmaxminddb/LICENSE
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_geoip2/libmaxminddb/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/src/fluent-bit/plugins/filter_geoip2/libmaxminddb/NOTICE b/src/fluent-bit/plugins/filter_geoip2/libmaxminddb/NOTICE
new file mode 100644
index 000000000..6b8694752
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_geoip2/libmaxminddb/NOTICE
@@ -0,0 +1,13 @@
+Copyright 2013-2014 MaxMind, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License. \ No newline at end of file
diff --git a/src/fluent-bit/plugins/filter_geoip2/libmaxminddb/VERSION b/src/fluent-bit/plugins/filter_geoip2/libmaxminddb/VERSION
new file mode 100644
index 000000000..dbdbf984c
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_geoip2/libmaxminddb/VERSION
@@ -0,0 +1 @@
+ad35e6af
diff --git a/src/fluent-bit/plugins/filter_geoip2/libmaxminddb/bin/CMakeLists.txt b/src/fluent-bit/plugins/filter_geoip2/libmaxminddb/bin/CMakeLists.txt
new file mode 100644
index 000000000..9026be83b
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_geoip2/libmaxminddb/bin/CMakeLists.txt
@@ -0,0 +1,13 @@
+# getopt is required by mmdblookup which is not available by default on Windows
+if(NOT WIN32)
+ add_executable(mmdblookup
+ mmdblookup.c
+ )
+
+ target_link_libraries(mmdblookup maxminddb pthread)
+
+ install(
+ TARGETS mmdblookup
+ RUNTIME DESTINATION bin
+ )
+endif()
diff --git a/src/fluent-bit/plugins/filter_geoip2/libmaxminddb/bin/Makefile.am b/src/fluent-bit/plugins/filter_geoip2/libmaxminddb/bin/Makefile.am
new file mode 100644
index 000000000..c00ba95b5
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_geoip2/libmaxminddb/bin/Makefile.am
@@ -0,0 +1,10 @@
+include $(top_srcdir)/common.mk
+
+AM_LDFLAGS = $(top_builddir)/src/libmaxminddb.la
+
+bin_PROGRAMS = mmdblookup
+
+if !WINDOWS
+AM_CPPFLAGS += -pthread
+AM_LDFLAGS += -pthread
+endif
diff --git a/src/fluent-bit/plugins/filter_geoip2/libmaxminddb/bin/mmdblookup.c b/src/fluent-bit/plugins/filter_geoip2/libmaxminddb/bin/mmdblookup.c
new file mode 100644
index 000000000..d7ec3fff2
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_geoip2/libmaxminddb/bin/mmdblookup.c
@@ -0,0 +1,762 @@
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+#include "maxminddb.h"
+#include <errno.h>
+#include <getopt.h>
+#ifndef _WIN32
+#include <pthread.h>
+#endif
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+
+#ifdef _WIN32
+#ifndef UNICODE
+#define UNICODE
+#endif
+#include <malloc.h>
+#else
+#include <libgen.h>
+#include <unistd.h>
+#endif
+
+#define LOCAL static
+
+LOCAL void usage(char *program, int exit_code, const char *error);
+LOCAL const char **get_options(
+ int argc,
+ char **argv,
+ char **mmdb_file,
+ char **ip_address,
+ int *verbose,
+ int *iterations,
+ int *lookup_path_length,
+ int *const thread_count,
+ char **const ip_file);
+LOCAL MMDB_s open_or_die(const char *fname);
+LOCAL void dump_meta(MMDB_s *mmdb);
+LOCAL bool lookup_from_file(MMDB_s *const mmdb,
+ char const *const ip_file,
+ bool const dump);
+LOCAL int lookup_and_print(MMDB_s *mmdb, const char *ip_address,
+ const char **lookup_path,
+ int lookup_path_length,
+ bool verbose);
+LOCAL int benchmark(MMDB_s *mmdb, int iterations);
+LOCAL MMDB_lookup_result_s lookup_or_die(MMDB_s *mmdb, const char *ipstr);
+LOCAL void random_ipv4(char *ip);
+
+#ifndef _WIN32
+// These aren't with the automatically generated prototypes as we'd lose the
+// enclosing macros.
+static bool start_threaded_benchmark(
+ MMDB_s *const mmdb,
+ int const thread_count,
+ int const iterations);
+static long double get_time(void);
+static void *thread(void *arg);
+#endif
+
+#ifdef _WIN32
+int wmain(int argc, wchar_t **wargv)
+{
+ // Convert our argument list from UTF-16 to UTF-8.
+ char **argv = (char **)calloc(argc, sizeof(char *));
+ if (!argv) {
+ fprintf(stderr, "calloc(): %s\n", strerror(errno));
+ exit(1);
+ }
+ for (int i = 0; i < argc; i++) {
+ int utf8_width;
+ char *utf8_string;
+ utf8_width = WideCharToMultiByte(CP_UTF8, 0, wargv[i], -1, NULL, 0,
+ NULL, NULL);
+ if (utf8_width < 1) {
+ fprintf(stderr, "WideCharToMultiByte() failed: %d\n",
+ GetLastError());
+ exit(1);
+ }
+ utf8_string = calloc(utf8_width, sizeof(char));
+ if (!utf8_string) {
+ fprintf(stderr, "calloc(): %s\n", strerror(errno));
+ exit(1);
+ }
+ if (WideCharToMultiByte(CP_UTF8, 0, wargv[i], -1, utf8_string,
+ utf8_width, NULL, NULL) < 1) {
+ fprintf(stderr, "WideCharToMultiByte() failed: %d\n",
+ GetLastError());
+ exit(1);
+ }
+ argv[i] = utf8_string;
+ }
+#else // _WIN32
+int main(int argc, char **argv)
+{
+#endif // _WIN32
+ char *mmdb_file = NULL;
+ char *ip_address = NULL;
+ int verbose = 0;
+ int iterations = 0;
+ int lookup_path_length = 0;
+ int thread_count = 0;
+ char *ip_file = NULL;
+
+ const char **lookup_path =
+ get_options(argc, argv, &mmdb_file, &ip_address, &verbose, &iterations,
+ &lookup_path_length, &thread_count, &ip_file);
+
+ MMDB_s mmdb = open_or_die(mmdb_file);
+
+ if (verbose) {
+ dump_meta(&mmdb);
+ }
+
+ // The benchmarking and lookup from file modes are hidden features mainly
+ // intended for development right now. This means there are several flags
+ // that exist but are intentionally not mentioned in the usage or man page.
+
+ // The lookup from file mode may be useful to expose publicly in the usage,
+ // but we should have it respect the lookup_path functionality if we do so.
+ if (ip_file) {
+ free((void *)lookup_path);
+ if (!lookup_from_file(&mmdb, ip_file, verbose == 1)) {
+ MMDB_close(&mmdb);
+ return 1;
+ }
+ MMDB_close(&mmdb);
+ return 0;
+ }
+
+ if (0 == iterations) {
+ exit(lookup_and_print(&mmdb, ip_address, lookup_path,
+ lookup_path_length, verbose));
+ }
+
+ free((void *)lookup_path);
+
+ srand( (int)time(NULL) );
+
+#ifndef _WIN32
+ if (thread_count > 0) {
+ if (!start_threaded_benchmark(&mmdb, thread_count, iterations)) {
+ MMDB_close(&mmdb);
+ exit(1);
+ }
+ MMDB_close(&mmdb);
+ exit(0);
+ }
+#endif
+
+ exit(benchmark(&mmdb, iterations));
+}
+
+LOCAL void usage(char *program, int exit_code, const char *error)
+{
+ if (NULL != error) {
+ fprintf(stderr, "\n *ERROR: %s\n", error);
+ }
+
+ char *usage = "\n"
+ " %s --file /path/to/file.mmdb --ip 1.2.3.4 [path to lookup]\n"
+ "\n"
+ " This application accepts the following options:\n"
+ "\n"
+ " --file (-f) The path to the MMDB file. Required.\n"
+ "\n"
+ " --ip (-i) The IP address to look up. Required.\n"
+ "\n"
+ " --verbose (-v) Turns on verbose output. Specifically, this causes this\n"
+ " application to output the database metadata.\n"
+ "\n"
+ " --version Print the program's version number and exit.\n"
+ "\n"
+ " --help (-h -?) Show usage information.\n"
+ "\n"
+ " If an IP's data entry resolves to a map or array, you can provide\n"
+ " a lookup path to only show part of that data.\n"
+ "\n"
+ " For example, given a JSON structure like this:\n"
+ "\n"
+ " {\n"
+ " \"names\": {\n"
+ " \"en\": \"Germany\",\n"
+ " \"de\": \"Deutschland\"\n"
+ " },\n"
+ " \"cities\": [ \"Berlin\", \"Frankfurt\" ]\n"
+ " }\n"
+ "\n"
+ " You could look up just the English name by calling mmdblookup with a lookup path of:\n"
+ "\n"
+ " mmdblookup --file ... --ip ... names en\n"
+ "\n"
+ " Or you could look up the second city in the list with:\n"
+ "\n"
+ " mmdblookup --file ... --ip ... cities 1\n"
+ "\n"
+ " Array numbering begins with zero (0).\n"
+ "\n"
+ " If you do not provide a path to lookup, all of the information for a given IP\n"
+ " will be shown.\n"
+ "\n";
+
+ fprintf(stdout, usage, program);
+ exit(exit_code);
+}
+
+LOCAL const char **get_options(
+ int argc,
+ char **argv,
+ char **mmdb_file,
+ char **ip_address,
+ int *verbose,
+ int *iterations,
+ int *lookup_path_length,
+ int *const thread_count,
+ char **const ip_file)
+{
+ static int help = 0;
+ static int version = 0;
+
+ while (1) {
+ static struct option options[] = {
+ { "file", required_argument, 0, 'f' },
+ { "ip", required_argument, 0, 'i' },
+ { "verbose", no_argument, 0, 'v' },
+ { "version", no_argument, 0, 'n' },
+ { "benchmark", required_argument, 0, 'b' },
+#ifndef _WIN32
+ { "threads", required_argument, 0, 't' },
+#endif
+ { "ip-file", required_argument, 0, 'I' },
+ { "help", no_argument, 0, 'h' },
+ { "?", no_argument, 0, 1 },
+ { 0, 0, 0, 0 }
+ };
+
+ int opt_index;
+#ifdef _WIN32
+ char const * const optstring = "f:i:b:I:vnh?";
+#else
+ char const * const optstring = "f:i:b:t:I:vnh?";
+#endif
+ int opt_char = getopt_long(argc, argv, optstring, options,
+ &opt_index);
+
+ if (-1 == opt_char) {
+ break;
+ }
+
+ if ('f' == opt_char) {
+ *mmdb_file = optarg;
+ } else if ('i' == opt_char) {
+ *ip_address = optarg;
+ } else if ('v' == opt_char) {
+ *verbose = 1;
+ } else if ('n' == opt_char) {
+ version = 1;
+ } else if ('b' == opt_char) {
+ *iterations = strtol(optarg, NULL, 10);
+ } else if ('h' == opt_char || '?' == opt_char) {
+ help = 1;
+ } else if (opt_char == 't') {
+ *thread_count = strtol(optarg, NULL, 10);
+ } else if (opt_char == 'I') {
+ *ip_file = optarg;
+ }
+ }
+
+#ifdef _WIN32
+ char *program = alloca(strlen(argv[0]));
+ _splitpath(argv[0], NULL, NULL, program, NULL);
+ _splitpath(argv[0], NULL, NULL, NULL, program + strlen(program));
+#else
+ char *program = basename(argv[0]);
+#endif
+
+ if (help) {
+ usage(program, 0, NULL);
+ }
+
+ if (version) {
+ fprintf(stdout, "\n %s version %s\n\n", program, PACKAGE_VERSION);
+ exit(0);
+ }
+
+ if (NULL == *mmdb_file) {
+ usage(program, 1, "You must provide a filename with --file");
+ }
+
+ if (*ip_address == NULL && *iterations == 0 && !*ip_file) {
+ usage(program, 1, "You must provide an IP address with --ip");
+ }
+
+ const char **lookup_path =
+ calloc((argc - optind) + 1, sizeof(const char *));
+ int i;
+ for (i = 0; i < argc - optind; i++) {
+ lookup_path[i] = argv[i + optind];
+ (*lookup_path_length)++;
+ }
+ lookup_path[i] = NULL;
+
+ return lookup_path;
+}
+
+LOCAL MMDB_s open_or_die(const char *fname)
+{
+ MMDB_s mmdb;
+ int status = MMDB_open(fname, MMDB_MODE_MMAP, &mmdb);
+
+ if (MMDB_SUCCESS != status) {
+ fprintf(stderr, "\n Can't open %s - %s\n", fname,
+ MMDB_strerror(status));
+
+ if (MMDB_IO_ERROR == status) {
+ fprintf(stderr, " IO error: %s\n", strerror(errno));
+ }
+
+ fprintf(stderr, "\n");
+
+ exit(2);
+ }
+
+ return mmdb;
+}
+
+LOCAL void dump_meta(MMDB_s *mmdb)
+{
+ const char *meta_dump = "\n"
+ " Database metadata\n"
+ " Node count: %i\n"
+ " Record size: %i bits\n"
+ " IP version: IPv%i\n"
+ " Binary format: %i.%i\n"
+ " Build epoch: %llu (%s)\n"
+ " Type: %s\n"
+ " Languages: ";
+
+ char date[40];
+ const time_t epoch = (const time_t)mmdb->metadata.build_epoch;
+ strftime(date, 40, "%F %T UTC", gmtime(&epoch));
+
+ fprintf(stdout, meta_dump,
+ mmdb->metadata.node_count,
+ mmdb->metadata.record_size,
+ mmdb->metadata.ip_version,
+ mmdb->metadata.binary_format_major_version,
+ mmdb->metadata.binary_format_minor_version,
+ mmdb->metadata.build_epoch,
+ date,
+ mmdb->metadata.database_type);
+
+ for (size_t i = 0; i < mmdb->metadata.languages.count; i++) {
+ fprintf(stdout, "%s", mmdb->metadata.languages.names[i]);
+ if (i < mmdb->metadata.languages.count - 1) {
+ fprintf(stdout, " ");
+ }
+ }
+ fprintf(stdout, "\n");
+
+ fprintf(stdout, " Description:\n");
+ for (size_t i = 0; i < mmdb->metadata.description.count; i++) {
+ fprintf(stdout, " %s: %s\n",
+ mmdb->metadata.description.descriptions[i]->language,
+ mmdb->metadata.description.descriptions[i]->description);
+ }
+ fprintf(stdout, "\n");
+}
+
+// The input file should have one IP per line.
+//
+// We look up each IP.
+//
+// If dump is true, we dump the data for each IP to stderr. This is useful for
+// comparison in that you can dump out the data for the IPs before and after
+// making changes. It goes to stderr rather than stdout so that the report does
+// not get included in what you will compare (since it will almost always be
+// different).
+//
+// In addition to being useful for comparisons, this function provides a way to
+// have a more deterministic set of lookups for benchmarking.
+LOCAL bool lookup_from_file(MMDB_s *const mmdb,
+ char const *const ip_file,
+ bool const dump)
+{
+ FILE *const fh = fopen(ip_file, "r");
+ if (!fh) {
+ fprintf(stderr, "fopen(): %s: %s\n", ip_file, strerror(errno));
+ return false;
+ }
+
+ clock_t const clock_start = clock();
+ char buf[1024] = { 0 };
+ // I'd normally use uint64_t, but support for it is optional in C99.
+ unsigned long long i = 0;
+ while (1) {
+ if (fgets(buf, sizeof(buf), fh) == NULL) {
+ if (!feof(fh)) {
+ fprintf(stderr, "fgets(): %s\n", strerror(errno));
+ fclose(fh);
+ return false;
+ }
+ if (fclose(fh) != 0) {
+ fprintf(stderr, "fclose(): %s\n", strerror(errno));
+ return false;
+ }
+ break;
+ }
+
+ char *ptr = buf;
+ while (*ptr != '\0') {
+ if (*ptr == '\n') {
+ *ptr = '\0';
+ break;
+ }
+ ptr++;
+ }
+ if (strlen(buf) == 0) {
+ continue;
+ }
+
+ i++;
+
+ MMDB_lookup_result_s result = lookup_or_die(mmdb, buf);
+ if (!result.found_entry) {
+ continue;
+ }
+
+ MMDB_entry_data_list_s *entry_data_list = NULL;
+ int const status = MMDB_get_entry_data_list(&result.entry,
+ &entry_data_list);
+ if (status != MMDB_SUCCESS) {
+ fprintf(stderr, "MMDB_get_entry_data_list(): %s\n",
+ MMDB_strerror(status));
+ fclose(fh);
+ MMDB_free_entry_data_list(entry_data_list);
+ return false;
+ }
+
+ if (!entry_data_list) {
+ fprintf(stderr, "entry_data_list is NULL\n");
+ fclose(fh);
+ return false;
+ }
+
+ if (dump) {
+ fprintf(stdout, "%s:\n", buf);
+ int const status = MMDB_dump_entry_data_list(stderr,
+ entry_data_list, 0);
+ if (status != MMDB_SUCCESS) {
+ fprintf(stderr, "MMDB_dump_entry_data_list(): %s\n",
+ MMDB_strerror(status));
+ fclose(fh);
+ MMDB_free_entry_data_list(entry_data_list);
+ return false;
+ }
+ }
+
+ MMDB_free_entry_data_list(entry_data_list);
+ }
+
+ clock_t const clock_diff = clock() - clock_start;
+ double const seconds = (double)clock_diff / CLOCKS_PER_SEC;
+
+ fprintf(
+ stdout,
+ "Looked up %llu addresses in %.2f seconds. %.2f lookups per second.\n",
+ i, seconds, i / seconds);
+
+ return true;
+}
+
+LOCAL int lookup_and_print(MMDB_s *mmdb, const char *ip_address,
+ const char **lookup_path,
+ int lookup_path_length,
+ bool verbose)
+{
+
+ MMDB_lookup_result_s result = lookup_or_die(mmdb, ip_address);
+ MMDB_entry_data_list_s *entry_data_list = NULL;
+
+ int exit_code = 0;
+
+ if (verbose) {
+ fprintf(
+ stdout,
+ "\n Record prefix length: %d\n",
+ result.netmask
+ );
+ }
+
+ if (result.found_entry) {
+ int status;
+ if (lookup_path_length) {
+ MMDB_entry_data_s entry_data;
+ status = MMDB_aget_value(&result.entry, &entry_data,
+ lookup_path);
+ if (MMDB_SUCCESS == status) {
+ if (entry_data.offset) {
+ MMDB_entry_s entry =
+ { .mmdb = mmdb, .offset = entry_data.offset };
+ status = MMDB_get_entry_data_list(&entry,
+ &entry_data_list);
+ } else {
+ fprintf(
+ stdout,
+ "\n No data was found at the lookup path you provided\n\n");
+ }
+ }
+ } else {
+ status = MMDB_get_entry_data_list(&result.entry,
+ &entry_data_list);
+ }
+
+ if (MMDB_SUCCESS != status) {
+ fprintf(stderr, "Got an error looking up the entry data - %s\n",
+ MMDB_strerror(status));
+ exit_code = 5;
+ goto end;
+ }
+
+ if (NULL != entry_data_list) {
+ fprintf(stdout, "\n");
+ MMDB_dump_entry_data_list(stdout, entry_data_list, 2);
+ fprintf(stdout, "\n");
+ }
+ } else {
+ fprintf(stderr,
+ "\n Could not find an entry for this IP address (%s)\n\n",
+ ip_address);
+ exit_code = 6;
+ }
+
+ end:
+ MMDB_free_entry_data_list(entry_data_list);
+ MMDB_close(mmdb);
+ free((void *)lookup_path);
+
+ return exit_code;
+}
+
+LOCAL int benchmark(MMDB_s *mmdb, int iterations)
+{
+ char ip_address[16];
+ int exit_code = 0;
+
+ clock_t time = clock();
+
+ for (int i = 0; i < iterations; i++) {
+ random_ipv4(ip_address);
+
+ MMDB_lookup_result_s result = lookup_or_die(mmdb, ip_address);
+ MMDB_entry_data_list_s *entry_data_list = NULL;
+
+ if (result.found_entry) {
+
+ int status = MMDB_get_entry_data_list(&result.entry,
+ &entry_data_list);
+
+ if (MMDB_SUCCESS != status) {
+ fprintf(stderr, "Got an error looking up the entry data - %s\n",
+ MMDB_strerror(status));
+ exit_code = 5;
+ MMDB_free_entry_data_list(entry_data_list);
+ goto end;
+ }
+ }
+
+ MMDB_free_entry_data_list(entry_data_list);
+ }
+
+ time = clock() - time;
+ double seconds = ((double)time / CLOCKS_PER_SEC);
+ fprintf(
+ stdout,
+ "\n Looked up %i addresses in %.2f seconds. %.2f lookups per second.\n\n",
+ iterations, seconds, iterations / seconds);
+
+ end:
+ MMDB_close(mmdb);
+
+ return exit_code;
+}
+
+LOCAL MMDB_lookup_result_s lookup_or_die(MMDB_s *mmdb, const char *ipstr)
+{
+ int gai_error, mmdb_error;
+ MMDB_lookup_result_s result =
+ MMDB_lookup_string(mmdb, ipstr, &gai_error, &mmdb_error);
+
+ if (0 != gai_error) {
+ fprintf(stderr,
+ "\n Error from call to getaddrinfo for %s - %s\n\n",
+ ipstr,
+#ifdef _WIN32
+ gai_strerrorA(gai_error)
+#else
+ gai_strerror(gai_error)
+#endif
+ );
+ exit(3);
+ }
+
+ if (MMDB_SUCCESS != mmdb_error) {
+ fprintf(stderr, "\n Got an error from the maxminddb library: %s\n\n",
+ MMDB_strerror(mmdb_error));
+ exit(4);
+ }
+
+ return result;
+}
+
+LOCAL void random_ipv4(char *ip)
+{
+ // rand() is perfectly fine for this use case
+ // coverity[dont_call]
+ int ip_int = rand();
+ uint8_t *bytes = (uint8_t *)&ip_int;
+
+ snprintf(ip, 16, "%u.%u.%u.%u",
+ *bytes, *(bytes + 1), *(bytes + 2), *(bytes + 3));
+}
+
+#ifndef _WIN32
+struct thread_info {
+ pthread_t id;
+ int num;
+ MMDB_s *mmdb;
+ int iterations;
+};
+
+static bool start_threaded_benchmark(
+ MMDB_s *const mmdb,
+ int const thread_count,
+ int const iterations)
+{
+ struct thread_info *const tinfo = calloc(thread_count,
+ sizeof(struct thread_info));
+ if (!tinfo) {
+ fprintf(stderr, "calloc(): %s\n", strerror(errno));
+ return false;
+ }
+
+ // Using clock() isn't appropriate for multiple threads. It's CPU time, not
+ // wall time.
+ long double const start_time = get_time();
+ if (start_time == -1) {
+ free(tinfo);
+ return false;
+ }
+
+ for (int i = 0; i < thread_count; i++) {
+ tinfo[i].num = i;
+ tinfo[i].mmdb = mmdb;
+ tinfo[i].iterations = iterations;
+
+ if (pthread_create(&tinfo[i].id, NULL, &thread, &tinfo[i]) != 0) {
+ fprintf(stderr, "pthread_create() failed\n");
+ free(tinfo);
+ return false;
+ }
+ }
+
+ for (int i = 0; i < thread_count; i++) {
+ if (pthread_join(tinfo[i].id, NULL) != 0) {
+ fprintf(stderr, "pthread_join() failed\n");
+ free(tinfo);
+ return false;
+ }
+ }
+
+ free(tinfo);
+
+ long double const end_time = get_time();
+ if (end_time == -1) {
+ return false;
+ }
+
+ long double const elapsed = end_time - start_time;
+ unsigned long long const total_ips = iterations * thread_count;
+ long double rate = total_ips;
+ if (elapsed != 0) {
+ rate = total_ips / elapsed;
+ }
+
+ fprintf(
+ stdout,
+ "Looked up %llu addresses using %d threads in %.2Lf seconds. %.2Lf lookups per second.\n",
+ total_ips, thread_count, elapsed, rate);
+
+ return true;
+}
+
+static long double get_time(void)
+{
+ // clock_gettime() is not present on OSX until 10.12.
+#ifdef HAVE_CLOCK_GETTIME
+ struct timespec tp = {
+ .tv_sec = 0,
+ .tv_nsec = 0,
+ };
+ clockid_t clk_id = CLOCK_REALTIME;
+#ifdef _POSIX_MONOTONIC_CLOCK
+ clk_id = CLOCK_MONOTONIC;
+#endif
+ if (clock_gettime(clk_id, &tp) != 0) {
+ fprintf(stderr, "clock_gettime(): %s\n", strerror(errno));
+ return -1;
+ }
+ return tp.tv_sec + ((float)tp.tv_nsec / 1e9);
+#else
+ time_t t = time(NULL);
+ if (t == (time_t)-1) {
+ fprintf(stderr, "time(): %s\n", strerror(errno));
+ return -1;
+ }
+ return (long double)t;
+#endif
+}
+
+static void *thread(void *arg)
+{
+ const struct thread_info *const tinfo = arg;
+ if (!tinfo) {
+ fprintf(stderr, "thread(): %s\n", strerror(EINVAL));
+ return NULL;
+ }
+
+ char ip_address[16] = { 0 };
+
+ for (int i = 0; i < tinfo->iterations; i++) {
+ memset(ip_address, 0, 16);
+ random_ipv4(ip_address);
+
+ MMDB_lookup_result_s result = lookup_or_die(tinfo->mmdb, ip_address);
+ if (!result.found_entry) {
+ continue;
+ }
+
+ MMDB_entry_data_list_s *entry_data_list = NULL;
+ int const status = MMDB_get_entry_data_list(&result.entry,
+ &entry_data_list);
+ if (status != MMDB_SUCCESS) {
+ fprintf(stderr, "MMDB_get_entry_data_list(): %s\n",
+ MMDB_strerror(status));
+ MMDB_free_entry_data_list(entry_data_list);
+ return NULL;
+ }
+
+ if (!entry_data_list) {
+ fprintf(stderr, "entry_data_list is NULL\n");
+ return NULL;
+ }
+
+ MMDB_free_entry_data_list(entry_data_list);
+ }
+
+ return NULL;
+}
+#endif
diff --git a/src/fluent-bit/plugins/filter_geoip2/libmaxminddb/include/maxminddb.h b/src/fluent-bit/plugins/filter_geoip2/libmaxminddb/include/maxminddb.h
new file mode 100644
index 000000000..13b276f14
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_geoip2/libmaxminddb/include/maxminddb.h
@@ -0,0 +1,255 @@
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef MAXMINDDB_H
+#define MAXMINDDB_H
+
+/* Request POSIX.1-2008. However, we want to remain compatible with
+ * POSIX.1-2001 (since we have been historically and see no reason to drop
+ * compatibility). By requesting POSIX.1-2008, we can conditionally use
+ * features provided by that standard if the implementation provides it. We can
+ * check for what the implementation provides by checking the _POSIX_VERSION
+ * macro after including unistd.h. If a feature is in POSIX.1-2008 but not
+ * POSIX.1-2001, check that macro before using the feature (or check for the
+ * feature directly if possible). */
+#ifndef _POSIX_C_SOURCE
+#define _POSIX_C_SOURCE 200809L
+#endif
+
+#include "maxminddb_config.h"
+#include <stdarg.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <sys/types.h>
+
+#ifdef _WIN32
+#include <winsock2.h>
+#include <ws2tcpip.h>
+/* libmaxminddb package version from configure */
+#define PACKAGE_VERSION "1.4.3"
+
+typedef ADDRESS_FAMILY sa_family_t;
+
+#if defined(_MSC_VER)
+/* MSVC doesn't define signed size_t, copy it from configure */
+#define ssize_t SSIZE_T
+
+/* MSVC doesn't support restricted pointers */
+#define restrict
+#endif
+#else
+#include <netdb.h>
+#include <netinet/in.h>
+#include <sys/socket.h>
+#endif
+
+#define MMDB_DATA_TYPE_EXTENDED (0)
+#define MMDB_DATA_TYPE_POINTER (1)
+#define MMDB_DATA_TYPE_UTF8_STRING (2)
+#define MMDB_DATA_TYPE_DOUBLE (3)
+#define MMDB_DATA_TYPE_BYTES (4)
+#define MMDB_DATA_TYPE_UINT16 (5)
+#define MMDB_DATA_TYPE_UINT32 (6)
+#define MMDB_DATA_TYPE_MAP (7)
+#define MMDB_DATA_TYPE_INT32 (8)
+#define MMDB_DATA_TYPE_UINT64 (9)
+#define MMDB_DATA_TYPE_UINT128 (10)
+#define MMDB_DATA_TYPE_ARRAY (11)
+#define MMDB_DATA_TYPE_CONTAINER (12)
+#define MMDB_DATA_TYPE_END_MARKER (13)
+#define MMDB_DATA_TYPE_BOOLEAN (14)
+#define MMDB_DATA_TYPE_FLOAT (15)
+
+#define MMDB_RECORD_TYPE_SEARCH_NODE (0)
+#define MMDB_RECORD_TYPE_EMPTY (1)
+#define MMDB_RECORD_TYPE_DATA (2)
+#define MMDB_RECORD_TYPE_INVALID (3)
+
+/* flags for open */
+#define MMDB_MODE_MMAP (1)
+#define MMDB_MODE_MASK (7)
+
+/* error codes */
+#define MMDB_SUCCESS (0)
+#define MMDB_FILE_OPEN_ERROR (1)
+#define MMDB_CORRUPT_SEARCH_TREE_ERROR (2)
+#define MMDB_INVALID_METADATA_ERROR (3)
+#define MMDB_IO_ERROR (4)
+#define MMDB_OUT_OF_MEMORY_ERROR (5)
+#define MMDB_UNKNOWN_DATABASE_FORMAT_ERROR (6)
+#define MMDB_INVALID_DATA_ERROR (7)
+#define MMDB_INVALID_LOOKUP_PATH_ERROR (8)
+#define MMDB_LOOKUP_PATH_DOES_NOT_MATCH_DATA_ERROR (9)
+#define MMDB_INVALID_NODE_NUMBER_ERROR (10)
+#define MMDB_IPV6_LOOKUP_IN_IPV4_DATABASE_ERROR (11)
+
+#if !(MMDB_UINT128_IS_BYTE_ARRAY)
+#if MMDB_UINT128_USING_MODE
+typedef unsigned int mmdb_uint128_t __attribute__ ((__mode__(TI)));
+#else
+typedef unsigned __int128 mmdb_uint128_t;
+#endif
+#endif
+
+/* This is a pointer into the data section for a given IP address lookup */
+typedef struct MMDB_entry_s {
+ const struct MMDB_s *mmdb;
+ uint32_t offset;
+} MMDB_entry_s;
+
+typedef struct MMDB_lookup_result_s {
+ bool found_entry;
+ MMDB_entry_s entry;
+ uint16_t netmask;
+} MMDB_lookup_result_s;
+
+typedef struct MMDB_entry_data_s {
+ bool has_data;
+ union {
+ uint32_t pointer;
+ const char *utf8_string;
+ double double_value;
+ const uint8_t *bytes;
+ uint16_t uint16;
+ uint32_t uint32;
+ int32_t int32;
+ uint64_t uint64;
+#if MMDB_UINT128_IS_BYTE_ARRAY
+ uint8_t uint128[16];
+#else
+ mmdb_uint128_t uint128;
+#endif
+ bool boolean;
+ float float_value;
+ };
+ /* This is a 0 if a given entry cannot be found. This can only happen
+ * when a call to MMDB_(v)get_value() asks for hash keys or array
+ * indices that don't exist. */
+ uint32_t offset;
+ /* This is the next entry in the data section, but it's really only
+ * relevant for entries that part of a larger map or array
+ * struct. There's no good reason for an end user to look at this
+ * directly. */
+ uint32_t offset_to_next;
+ /* This is only valid for strings, utf8_strings or binary data */
+ uint32_t data_size;
+ /* This is an MMDB_DATA_TYPE_* constant */
+ uint32_t type;
+} MMDB_entry_data_s;
+
+/* This is the return type when someone asks for all the entry data in a map or array */
+typedef struct MMDB_entry_data_list_s {
+ MMDB_entry_data_s entry_data;
+ struct MMDB_entry_data_list_s *next;
+ void *pool;
+} MMDB_entry_data_list_s;
+
+typedef struct MMDB_description_s {
+ const char *language;
+ const char *description;
+} MMDB_description_s;
+
+/* WARNING: do not add new fields to this struct without bumping the SONAME.
+ * The struct is allocated by the users of this library and increasing the
+ * size will cause existing users to allocate too little space when the shared
+ * library is upgraded */
+typedef struct MMDB_metadata_s {
+ uint32_t node_count;
+ uint16_t record_size;
+ uint16_t ip_version;
+ const char *database_type;
+ struct {
+ size_t count;
+ const char **names;
+ } languages;
+ uint16_t binary_format_major_version;
+ uint16_t binary_format_minor_version;
+ uint64_t build_epoch;
+ struct {
+ size_t count;
+ MMDB_description_s **descriptions;
+ } description;
+ /* See above warning before adding fields */
+} MMDB_metadata_s;
+
+/* WARNING: do not add new fields to this struct without bumping the SONAME.
+ * The struct is allocated by the users of this library and increasing the
+ * size will cause existing users to allocate too little space when the shared
+ * library is upgraded */
+typedef struct MMDB_ipv4_start_node_s {
+ uint16_t netmask;
+ uint32_t node_value;
+ /* See above warning before adding fields */
+} MMDB_ipv4_start_node_s;
+
+/* WARNING: do not add new fields to this struct without bumping the SONAME.
+ * The struct is allocated by the users of this library and increasing the
+ * size will cause existing users to allocate too little space when the shared
+ * library is upgraded */
+typedef struct MMDB_s {
+ uint32_t flags;
+ const char *filename;
+ ssize_t file_size;
+ const uint8_t *file_content;
+ const uint8_t *data_section;
+ uint32_t data_section_size;
+ const uint8_t *metadata_section;
+ uint32_t metadata_section_size;
+ uint16_t full_record_byte_size;
+ uint16_t depth;
+ MMDB_ipv4_start_node_s ipv4_start_node;
+ MMDB_metadata_s metadata;
+ /* See above warning before adding fields */
+} MMDB_s;
+
+typedef struct MMDB_search_node_s {
+ uint64_t left_record;
+ uint64_t right_record;
+ uint8_t left_record_type;
+ uint8_t right_record_type;
+ MMDB_entry_s left_record_entry;
+ MMDB_entry_s right_record_entry;
+} MMDB_search_node_s;
+
+extern int MMDB_open(const char *const filename, uint32_t flags,
+ MMDB_s *const mmdb);
+extern MMDB_lookup_result_s MMDB_lookup_string(const MMDB_s *const mmdb,
+ const char *const ipstr,
+ int *const gai_error,
+ int *const mmdb_error);
+extern MMDB_lookup_result_s MMDB_lookup_sockaddr(
+ const MMDB_s *const mmdb,
+ const struct sockaddr *const sockaddr,
+ int *const mmdb_error);
+extern int MMDB_read_node(const MMDB_s *const mmdb,
+ uint32_t node_number,
+ MMDB_search_node_s *const node);
+extern int MMDB_get_value(MMDB_entry_s *const start,
+ MMDB_entry_data_s *const entry_data,
+ ...);
+extern int MMDB_vget_value(MMDB_entry_s *const start,
+ MMDB_entry_data_s *const entry_data,
+ va_list va_path);
+extern int MMDB_aget_value(MMDB_entry_s *const start,
+ MMDB_entry_data_s *const entry_data,
+ const char *const *const path);
+extern int MMDB_get_metadata_as_entry_data_list(
+ const MMDB_s *const mmdb, MMDB_entry_data_list_s **const entry_data_list);
+extern int MMDB_get_entry_data_list(
+ MMDB_entry_s *start, MMDB_entry_data_list_s **const entry_data_list);
+extern void MMDB_free_entry_data_list(
+ MMDB_entry_data_list_s *const entry_data_list);
+extern void MMDB_close(MMDB_s *const mmdb);
+extern const char *MMDB_lib_version(void);
+extern int MMDB_dump_entry_data_list(FILE *const stream,
+ MMDB_entry_data_list_s *const entry_data_list,
+ int indent);
+extern const char *MMDB_strerror(int error_code);
+
+#endif /* MAXMINDDB_H */
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/src/fluent-bit/plugins/filter_geoip2/libmaxminddb/include/maxminddb_config.h.cmake.in b/src/fluent-bit/plugins/filter_geoip2/libmaxminddb/include/maxminddb_config.h.cmake.in
new file mode 100644
index 000000000..8b1977f86
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_geoip2/libmaxminddb/include/maxminddb_config.h.cmake.in
@@ -0,0 +1,14 @@
+#ifndef MAXMINDDB_CONFIG_H
+#define MAXMINDDB_CONFIG_H
+
+#ifndef MMDB_UINT128_USING_MODE
+/* Define as 1 if we use unsigned int __atribute__ ((__mode__(TI))) for uint128 values */
+#cmakedefine MMDB_UINT128_USING_MODE @MMDB_UINT128_USING_MODE@
+#endif
+
+#ifndef MMDB_UINT128_IS_BYTE_ARRAY
+/* Define as 1 if we don't have an unsigned __int128 type */
+#cmakedefine MMDB_UINT128_IS_BYTE_ARRAY @MMDB_UINT128_IS_BYTE_ARRAY@
+#endif
+
+#endif /* MAXMINDDB_CONFIG_H */
diff --git a/src/fluent-bit/plugins/filter_geoip2/libmaxminddb/include/maxminddb_config.h.in b/src/fluent-bit/plugins/filter_geoip2/libmaxminddb/include/maxminddb_config.h.in
new file mode 100644
index 000000000..314d559d3
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_geoip2/libmaxminddb/include/maxminddb_config.h.in
@@ -0,0 +1,14 @@
+#ifndef MAXMINDDB_CONFIG_H
+#define MAXMINDDB_CONFIG_H
+
+#ifndef MMDB_UINT128_USING_MODE
+/* Define as 1 if we use unsigned int __atribute__ ((__mode__(TI))) for uint128 values */
+#define MMDB_UINT128_USING_MODE 0
+#endif
+
+#ifndef MMDB_UINT128_IS_BYTE_ARRAY
+/* Define as 1 if we don't have an unsigned __int128 type */
+#undef MMDB_UINT128_IS_BYTE_ARRAY
+#endif
+
+#endif /* MAXMINDDB_CONFIG_H */
diff --git a/src/fluent-bit/plugins/filter_geoip2/libmaxminddb/src/Makefile.am b/src/fluent-bit/plugins/filter_geoip2/libmaxminddb/src/Makefile.am
new file mode 100644
index 000000000..6d57acaae
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_geoip2/libmaxminddb/src/Makefile.am
@@ -0,0 +1,25 @@
+include $(top_srcdir)/common.mk
+
+lib_LTLIBRARIES = libmaxminddb.la
+
+libmaxminddb_la_SOURCES = maxminddb.c maxminddb-compat-util.h \
+ data-pool.c data-pool.h
+libmaxminddb_la_LDFLAGS = -version-info 0:7:0 -export-symbols-regex '^MMDB_.*'
+include_HEADERS = $(top_srcdir)/include/maxminddb.h
+
+pkgconfig_DATA = libmaxminddb.pc
+
+TESTS = test-data-pool
+
+check_PROGRAMS = test-data-pool
+
+test_data_pool_SOURCES = data-pool.c data-pool.h
+test_data_pool_CPPFLAGS = $(AM_CPPFLAGS) -I$(top_srcdir)/t -DTEST_DATA_POOL
+test_data_pool_LDADD = $(top_srcdir)/t/libmmdbtest.la \
+ $(top_srcdir)/t/libtap/libtap.a
+
+$(top_srcdir)/t/libmmdbtest.la:
+ $(MAKE) -C $(top_srcdir)/t libmmdbtest.la
+
+$(top_srcdir)/t/libtap/libtap.a:
+ $(MAKE) -C $(top_srcdir)/t/libtap libtap.a
diff --git a/src/fluent-bit/plugins/filter_geoip2/libmaxminddb/src/data-pool.c b/src/fluent-bit/plugins/filter_geoip2/libmaxminddb/src/data-pool.c
new file mode 100644
index 000000000..48521b64d
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_geoip2/libmaxminddb/src/data-pool.c
@@ -0,0 +1,180 @@
+#include "data-pool.h"
+#include "maxminddb.h"
+
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdlib.h>
+
+static bool can_multiply(size_t const, size_t const, size_t const);
+
+// Allocate an MMDB_data_pool_s. It initially has space for size
+// MMDB_entry_data_list_s structs.
+MMDB_data_pool_s *data_pool_new(size_t const size)
+{
+ MMDB_data_pool_s *const pool = calloc(1, sizeof(MMDB_data_pool_s));
+ if (!pool) {
+ return NULL;
+ }
+
+ if (size == 0 ||
+ !can_multiply(SIZE_MAX, size, sizeof(MMDB_entry_data_list_s))) {
+ data_pool_destroy(pool);
+ return NULL;
+ }
+ pool->size = size;
+ pool->blocks[0] = calloc(pool->size, sizeof(MMDB_entry_data_list_s));
+ if (!pool->blocks[0]) {
+ data_pool_destroy(pool);
+ return NULL;
+ }
+ pool->blocks[0]->pool = pool;
+
+ pool->sizes[0] = size;
+
+ pool->block = pool->blocks[0];
+
+ return pool;
+}
+
+// Determine if we can multiply m*n. We can do this if the result will be below
+// the given max. max will typically be SIZE_MAX.
+//
+// We want to know if we'll wrap around.
+static bool can_multiply(size_t const max, size_t const m, size_t const n)
+{
+ if (m == 0) {
+ return false;
+ }
+
+ return n <= max / m;
+}
+
+// Clean up the data pool.
+void data_pool_destroy(MMDB_data_pool_s *const pool)
+{
+ if (!pool) {
+ return;
+ }
+
+ for (size_t i = 0; i <= pool->index; i++) {
+ free(pool->blocks[i]);
+ }
+
+ free(pool);
+}
+
+// Claim a new struct from the pool. Doing this may cause the pool's size to
+// grow.
+MMDB_entry_data_list_s *data_pool_alloc(MMDB_data_pool_s *const pool)
+{
+ if (!pool) {
+ return NULL;
+ }
+
+ if (pool->used < pool->size) {
+ MMDB_entry_data_list_s *const element = pool->block + pool->used;
+ pool->used++;
+ return element;
+ }
+
+ // Take it from a new block of memory.
+
+ size_t const new_index = pool->index + 1;
+ if (new_index == DATA_POOL_NUM_BLOCKS) {
+ // See the comment about not growing this on DATA_POOL_NUM_BLOCKS.
+ return NULL;
+ }
+
+ if (!can_multiply(SIZE_MAX, pool->size, 2)) {
+ return NULL;
+ }
+ size_t const new_size = pool->size * 2;
+
+ if (!can_multiply(SIZE_MAX, new_size, sizeof(MMDB_entry_data_list_s))) {
+ return NULL;
+ }
+ pool->blocks[new_index] = calloc(new_size, sizeof(MMDB_entry_data_list_s));
+ if (!pool->blocks[new_index]) {
+ return NULL;
+ }
+
+ // We don't need to set this, but it's useful for introspection in tests.
+ pool->blocks[new_index]->pool = pool;
+
+ pool->index = new_index;
+ pool->block = pool->blocks[pool->index];
+
+ pool->size = new_size;
+ pool->sizes[pool->index] = pool->size;
+
+ MMDB_entry_data_list_s *const element = pool->block;
+ pool->used = 1;
+ return element;
+}
+
+// Turn the structs in the array-like pool into a linked list.
+//
+// Before calling this function, the list isn't linked up.
+MMDB_entry_data_list_s *data_pool_to_list(MMDB_data_pool_s *const pool)
+{
+ if (!pool) {
+ return NULL;
+ }
+
+ if (pool->index == 0 && pool->used == 0) {
+ return NULL;
+ }
+
+ for (size_t i = 0; i <= pool->index; i++) {
+ MMDB_entry_data_list_s *const block = pool->blocks[i];
+
+ size_t size = pool->sizes[i];
+ if (i == pool->index) {
+ size = pool->used;
+ }
+
+ for (size_t j = 0; j < size - 1; j++) {
+ MMDB_entry_data_list_s *const cur = block + j;
+ cur->next = block + j + 1;
+ }
+
+ if (i < pool->index) {
+ MMDB_entry_data_list_s *const last = block + size - 1;
+ last->next = pool->blocks[i + 1];
+ }
+ }
+
+ return pool->blocks[0];
+}
+
+#ifdef TEST_DATA_POOL
+
+#include <libtap/tap.h>
+#include <maxminddb_test_helper.h>
+
+static void test_can_multiply(void);
+
+int main(void)
+{
+ plan(NO_PLAN);
+ test_can_multiply();
+ done_testing();
+}
+
+static void test_can_multiply(void)
+{
+ {
+ ok(can_multiply(SIZE_MAX, 1, SIZE_MAX), "1*SIZE_MAX is ok");
+ }
+
+ {
+ ok(!can_multiply(SIZE_MAX, 2, SIZE_MAX), "2*SIZE_MAX is not ok");
+ }
+
+ {
+ ok(can_multiply(SIZE_MAX, 10240, sizeof(MMDB_entry_data_list_s)),
+ "1024 entry_data_list_s's are okay");
+ }
+}
+
+#endif
diff --git a/src/fluent-bit/plugins/filter_geoip2/libmaxminddb/src/data-pool.h b/src/fluent-bit/plugins/filter_geoip2/libmaxminddb/src/data-pool.h
new file mode 100644
index 000000000..25d09923e
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_geoip2/libmaxminddb/src/data-pool.h
@@ -0,0 +1,52 @@
+#ifndef DATA_POOL_H
+#define DATA_POOL_H
+
+#include "maxminddb.h"
+
+#include <stdbool.h>
+#include <stddef.h>
+
+// This should be large enough that we never need to grow the array of pointers
+// to blocks. 32 is enough. Even starting out of with size 1 (1 struct), the
+// 32nd element alone will provide 2**32 structs as we exponentially increase
+// the number in each block. Being confident that we do not have to grow the
+// array lets us avoid writing code to do that. That code would be risky as it
+// would rarely be hit and likely not be well tested.
+#define DATA_POOL_NUM_BLOCKS 32
+
+// A pool of memory for MMDB_entry_data_list_s structs. This is so we can
+// allocate multiple up front rather than one at a time for performance
+// reasons.
+//
+// The order you add elements to it (by calling data_pool_alloc()) ends up as
+// the order of the list.
+//
+// The memory only grows. There is no support for releasing an element you take
+// back to the pool.
+typedef struct MMDB_data_pool_s {
+ // Index of the current block we're allocating out of.
+ size_t index;
+
+ // The size of the current block, counting by structs.
+ size_t size;
+
+ // How many used in the current block, counting by structs.
+ size_t used;
+
+ // The current block we're allocating out of.
+ MMDB_entry_data_list_s *block;
+
+ // The size of each block.
+ size_t sizes[DATA_POOL_NUM_BLOCKS];
+
+ // An array of pointers to blocks of memory holding space for list
+ // elements.
+ MMDB_entry_data_list_s *blocks[DATA_POOL_NUM_BLOCKS];
+} MMDB_data_pool_s;
+
+MMDB_data_pool_s *data_pool_new(size_t const);
+void data_pool_destroy(MMDB_data_pool_s *const);
+MMDB_entry_data_list_s *data_pool_alloc(MMDB_data_pool_s *const);
+MMDB_entry_data_list_s *data_pool_to_list(MMDB_data_pool_s *const);
+
+#endif
diff --git a/src/fluent-bit/plugins/filter_geoip2/libmaxminddb/src/libmaxminddb.pc.in b/src/fluent-bit/plugins/filter_geoip2/libmaxminddb/src/libmaxminddb.pc.in
new file mode 100644
index 000000000..00ced3ba9
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_geoip2/libmaxminddb/src/libmaxminddb.pc.in
@@ -0,0 +1,11 @@
+prefix=@prefix@
+exec_prefix=@prefix@
+libdir=@libdir@
+includedir=@includedir@
+
+Name: libmaxminddb
+Description: C library for the MaxMind DB file format
+URL: http://maxmind.github.io/libmaxminddb/
+Version: @PACKAGE_VERSION@
+Libs: -L${libdir} -lmaxminddb
+Cflags: -I${includedir}
diff --git a/src/fluent-bit/plugins/filter_geoip2/libmaxminddb/src/maxminddb-compat-util.h b/src/fluent-bit/plugins/filter_geoip2/libmaxminddb/src/maxminddb-compat-util.h
new file mode 100644
index 000000000..e3f0320f2
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_geoip2/libmaxminddb/src/maxminddb-compat-util.h
@@ -0,0 +1,167 @@
+#include <stdlib.h>
+#include <string.h>
+
+/* *INDENT-OFF* */
+
+/* The memmem, strdup, and strndup functions were all copied from the
+ * FreeBSD source, along with the relevant copyright notice.
+ *
+ * It'd be nicer to simply use the functions available on the system if they
+ * exist, but there doesn't seem to be a good way to detect them without also
+ * defining things like _GNU_SOURCE, which we want to avoid, because then we
+ * end up _accidentally_ using GNU features without noticing, which then
+ * breaks on systems like OSX.
+ *
+ * C is fun! */
+
+/* Applies to memmem implementation */
+/*-
+ * Copyright (c) 2005 Pascal Gloor <pascal.gloor@spale.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+static void *
+mmdb_memmem(const void *l, size_t l_len, const void *s, size_t s_len)
+{
+ register char *cur, *last;
+ const char *cl = (const char *)l;
+ const char *cs = (const char *)s;
+
+ /* we need something to compare */
+ if (l_len == 0 || s_len == 0)
+ return NULL;
+
+ /* "s" must be smaller or equal to "l" */
+ if (l_len < s_len)
+ return NULL;
+
+ /* special case where s_len == 1 */
+ if (s_len == 1)
+ return memchr(l, (int)*cs, l_len);
+
+ /* the last position where its possible to find "s" in "l" */
+ last = (char *)cl + l_len - s_len;
+
+ for (cur = (char *)cl; cur <= last; cur++)
+ if (cur[0] == cs[0] && memcmp(cur, cs, s_len) == 0)
+ return cur;
+
+ return NULL;
+}
+
+/* Applies to strnlen implementation */
+/*-
+ * Copyright (c) 2009 David Schultz <das@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+static size_t
+mmdb_strnlen(const char *s, size_t maxlen)
+{
+ size_t len;
+
+ for (len = 0; len < maxlen; len++, s++) {
+ if (!*s)
+ break;
+ }
+ return (len);
+}
+
+/* Applies to strdup and strndup implementation */
+/*
+ * Copyright (c) 1988, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+static char *
+mmdb_strdup(const char *str)
+{
+ size_t len;
+ char *copy;
+
+ len = strlen(str) + 1;
+ if ((copy = malloc(len)) == NULL)
+ return (NULL);
+ memcpy(copy, str, len);
+ return (copy);
+}
+
+static char *
+mmdb_strndup(const char *str, size_t n)
+{
+ size_t len;
+ char *copy;
+
+ len = mmdb_strnlen(str, n);
+ if ((copy = malloc(len + 1)) == NULL)
+ return (NULL);
+ memcpy(copy, str, len);
+ copy[len] = '\0';
+ return (copy);
+}
+/* *INDENT-ON* */
diff --git a/src/fluent-bit/plugins/filter_geoip2/libmaxminddb/src/maxminddb.c b/src/fluent-bit/plugins/filter_geoip2/libmaxminddb/src/maxminddb.c
new file mode 100644
index 000000000..427f48afd
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_geoip2/libmaxminddb/src/maxminddb.c
@@ -0,0 +1,2157 @@
+#if HAVE_CONFIG_H
+#include <config.h>
+#endif
+#include "data-pool.h"
+#include "maxminddb.h"
+#include "maxminddb-compat-util.h"
+#include <assert.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <inttypes.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/stat.h>
+
+#ifdef _WIN32
+#ifndef UNICODE
+#define UNICODE
+#endif
+#include <windows.h>
+#include <ws2ipdef.h>
+#else
+#include <arpa/inet.h>
+#include <sys/mman.h>
+#include <unistd.h>
+#endif
+
+#define MMDB_DATA_SECTION_SEPARATOR (16)
+#define MAXIMUM_DATA_STRUCTURE_DEPTH (512)
+
+#ifdef MMDB_DEBUG
+#define LOCAL
+#define DEBUG_MSG(msg) fprintf(stderr, msg "\n")
+#define DEBUG_MSGF(fmt, ...) fprintf(stderr, fmt "\n", __VA_ARGS__)
+#define DEBUG_BINARY(fmt, byte) \
+ do { \
+ char *binary = byte_to_binary(byte); \
+ if (NULL == binary) { \
+ fprintf(stderr, "Calloc failed in DEBUG_BINARY\n"); \
+ abort(); \
+ } \
+ fprintf(stderr, fmt "\n", binary); \
+ free(binary); \
+ } while (0)
+#define DEBUG_NL fprintf(stderr, "\n")
+#else
+#define LOCAL static
+#define DEBUG_MSG(...)
+#define DEBUG_MSGF(...)
+#define DEBUG_BINARY(...)
+#define DEBUG_NL
+#endif
+
+#ifdef MMDB_DEBUG
+char *byte_to_binary(uint8_t byte)
+{
+ char *bits = calloc(9, sizeof(char));
+ if (NULL == bits) {
+ return bits;
+ }
+
+ for (uint8_t i = 0; i < 8; i++) {
+ bits[i] = byte & (128 >> i) ? '1' : '0';
+ }
+ bits[8] = '\0';
+
+ return bits;
+}
+
+char *type_num_to_name(uint8_t num)
+{
+ switch (num) {
+ case 0:
+ return "extended";
+ case 1:
+ return "pointer";
+ case 2:
+ return "utf8_string";
+ case 3:
+ return "double";
+ case 4:
+ return "bytes";
+ case 5:
+ return "uint16";
+ case 6:
+ return "uint32";
+ case 7:
+ return "map";
+ case 8:
+ return "int32";
+ case 9:
+ return "uint64";
+ case 10:
+ return "uint128";
+ case 11:
+ return "array";
+ case 12:
+ return "container";
+ case 13:
+ return "end_marker";
+ case 14:
+ return "boolean";
+ case 15:
+ return "float";
+ default:
+ return "unknown type";
+ }
+}
+#endif
+
+/* None of the values we check on the lhs are bigger than uint32_t, so on
+ * platforms where SIZE_MAX is a 64-bit integer, this would be a no-op, and it
+ * makes the compiler complain if we do the check anyway. */
+#if SIZE_MAX == UINT32_MAX
+#define MAYBE_CHECK_SIZE_OVERFLOW(lhs, rhs, error) \
+ if ((lhs) > (rhs)) { \
+ return error; \
+ }
+#else
+#define MAYBE_CHECK_SIZE_OVERFLOW(...)
+#endif
+
+typedef struct record_info_s {
+ uint16_t record_length;
+ uint32_t (*left_record_getter)(const uint8_t *);
+ uint32_t (*right_record_getter)(const uint8_t *);
+ uint8_t right_record_offset;
+} record_info_s;
+
+#define METADATA_MARKER "\xab\xcd\xefMaxMind.com"
+/* This is 128kb */
+#define METADATA_BLOCK_MAX_SIZE 131072
+
+// 64 leads us to allocating 4 KiB on a 64bit system.
+#define MMDB_POOL_INIT_SIZE 64
+
+LOCAL int map_file(MMDB_s *const mmdb);
+LOCAL const uint8_t *find_metadata(const uint8_t *file_content,
+ ssize_t file_size, uint32_t *metadata_size);
+LOCAL int read_metadata(MMDB_s *mmdb);
+LOCAL MMDB_s make_fake_metadata_db(const MMDB_s *const mmdb);
+LOCAL int value_for_key_as_uint16(MMDB_entry_s *start, char *key,
+ uint16_t *value);
+LOCAL int value_for_key_as_uint32(MMDB_entry_s *start, char *key,
+ uint32_t *value);
+LOCAL int value_for_key_as_uint64(MMDB_entry_s *start, char *key,
+ uint64_t *value);
+LOCAL int value_for_key_as_string(MMDB_entry_s *start, char *key,
+ char const **value);
+LOCAL int populate_languages_metadata(MMDB_s *mmdb, MMDB_s *metadata_db,
+ MMDB_entry_s *metadata_start);
+LOCAL int populate_description_metadata(MMDB_s *mmdb, MMDB_s *metadata_db,
+ MMDB_entry_s *metadata_start);
+LOCAL int resolve_any_address(const char *ipstr, struct addrinfo **addresses);
+LOCAL int find_address_in_search_tree(const MMDB_s *const mmdb,
+ uint8_t *address,
+ sa_family_t address_family,
+ MMDB_lookup_result_s *result);
+LOCAL record_info_s record_info_for_database(const MMDB_s *const mmdb);
+LOCAL int find_ipv4_start_node(MMDB_s *const mmdb);
+LOCAL uint8_t record_type(const MMDB_s *const mmdb, uint64_t record);
+LOCAL uint32_t get_left_28_bit_record(const uint8_t *record);
+LOCAL uint32_t get_right_28_bit_record(const uint8_t *record);
+LOCAL uint32_t data_section_offset_for_record(const MMDB_s *const mmdb,
+ uint64_t record);
+LOCAL int path_length(va_list va_path);
+LOCAL int lookup_path_in_array(const char *path_elem, const MMDB_s *const mmdb,
+ MMDB_entry_data_s *entry_data);
+LOCAL int lookup_path_in_map(const char *path_elem, const MMDB_s *const mmdb,
+ MMDB_entry_data_s *entry_data);
+LOCAL int skip_map_or_array(const MMDB_s *const mmdb,
+ MMDB_entry_data_s *entry_data);
+LOCAL int decode_one_follow(const MMDB_s *const mmdb, uint32_t offset,
+ MMDB_entry_data_s *entry_data);
+LOCAL int decode_one(const MMDB_s *const mmdb, uint32_t offset,
+ MMDB_entry_data_s *entry_data);
+LOCAL int get_ext_type(int raw_ext_type);
+LOCAL uint32_t get_ptr_from(uint8_t ctrl, uint8_t const *const ptr,
+ int ptr_size);
+LOCAL int get_entry_data_list(const MMDB_s *const mmdb,
+ uint32_t offset,
+ MMDB_entry_data_list_s *const entry_data_list,
+ MMDB_data_pool_s *const pool,
+ int depth);
+LOCAL float get_ieee754_float(const uint8_t *restrict p);
+LOCAL double get_ieee754_double(const uint8_t *restrict p);
+LOCAL uint32_t get_uint32(const uint8_t *p);
+LOCAL uint32_t get_uint24(const uint8_t *p);
+LOCAL uint32_t get_uint16(const uint8_t *p);
+LOCAL uint64_t get_uintX(const uint8_t *p, int length);
+LOCAL int32_t get_sintX(const uint8_t *p, int length);
+LOCAL void free_mmdb_struct(MMDB_s *const mmdb);
+LOCAL void free_languages_metadata(MMDB_s *mmdb);
+LOCAL void free_descriptions_metadata(MMDB_s *mmdb);
+LOCAL MMDB_entry_data_list_s *dump_entry_data_list(
+ FILE *stream, MMDB_entry_data_list_s *entry_data_list, int indent,
+ int *status);
+LOCAL void print_indentation(FILE *stream, int i);
+LOCAL char *bytes_to_hex(uint8_t *bytes, uint32_t size);
+
+#define CHECKED_DECODE_ONE(mmdb, offset, entry_data) \
+ do { \
+ int status = decode_one(mmdb, offset, entry_data); \
+ if (MMDB_SUCCESS != status) { \
+ DEBUG_MSGF("CHECKED_DECODE_ONE failed." \
+ " status = %d (%s)", status, MMDB_strerror(status)); \
+ return status; \
+ } \
+ } while (0)
+
+#define CHECKED_DECODE_ONE_FOLLOW(mmdb, offset, entry_data) \
+ do { \
+ int status = decode_one_follow(mmdb, offset, entry_data); \
+ if (MMDB_SUCCESS != status) { \
+ DEBUG_MSGF("CHECKED_DECODE_ONE_FOLLOW failed." \
+ " status = %d (%s)", status, MMDB_strerror(status)); \
+ return status; \
+ } \
+ } while (0)
+
+#define FREE_AND_SET_NULL(p) { free((void *)(p)); (p) = NULL; }
+
+int MMDB_open(const char *const filename, uint32_t flags, MMDB_s *const mmdb)
+{
+ int status = MMDB_SUCCESS;
+
+ mmdb->file_content = NULL;
+ mmdb->data_section = NULL;
+ mmdb->metadata.database_type = NULL;
+ mmdb->metadata.languages.count = 0;
+ mmdb->metadata.languages.names = NULL;
+ mmdb->metadata.description.count = 0;
+
+ mmdb->filename = mmdb_strdup(filename);
+ if (NULL == mmdb->filename) {
+ status = MMDB_OUT_OF_MEMORY_ERROR;
+ goto cleanup;
+ }
+
+ if ((flags & MMDB_MODE_MASK) == 0) {
+ flags |= MMDB_MODE_MMAP;
+ }
+ mmdb->flags = flags;
+
+ if (MMDB_SUCCESS != (status = map_file(mmdb))) {
+ goto cleanup;
+ }
+
+#ifdef _WIN32
+ WSADATA wsa;
+ WSAStartup(MAKEWORD(2, 2), &wsa);
+#endif
+
+ uint32_t metadata_size = 0;
+ const uint8_t *metadata = find_metadata(mmdb->file_content, mmdb->file_size,
+ &metadata_size);
+ if (NULL == metadata) {
+ status = MMDB_INVALID_METADATA_ERROR;
+ goto cleanup;
+ }
+
+ mmdb->metadata_section = metadata;
+ mmdb->metadata_section_size = metadata_size;
+
+ status = read_metadata(mmdb);
+ if (MMDB_SUCCESS != status) {
+ goto cleanup;
+ }
+
+ if (mmdb->metadata.binary_format_major_version != 2) {
+ status = MMDB_UNKNOWN_DATABASE_FORMAT_ERROR;
+ goto cleanup;
+ }
+
+ uint32_t search_tree_size = mmdb->metadata.node_count *
+ mmdb->full_record_byte_size;
+
+ mmdb->data_section = mmdb->file_content + search_tree_size
+ + MMDB_DATA_SECTION_SEPARATOR;
+ if (search_tree_size + MMDB_DATA_SECTION_SEPARATOR >
+ (uint32_t)mmdb->file_size) {
+ status = MMDB_INVALID_METADATA_ERROR;
+ goto cleanup;
+ }
+ mmdb->data_section_size = (uint32_t)mmdb->file_size - search_tree_size -
+ MMDB_DATA_SECTION_SEPARATOR;
+
+ // Although it is likely not possible to construct a database with valid
+ // valid metadata, as parsed above, and a data_section_size less than 3,
+ // we do this check as later we assume it is at least three when doing
+ // bound checks.
+ if (mmdb->data_section_size < 3) {
+ status = MMDB_INVALID_DATA_ERROR;
+ goto cleanup;
+ }
+
+ mmdb->metadata_section = metadata;
+ mmdb->ipv4_start_node.node_value = 0;
+ mmdb->ipv4_start_node.netmask = 0;
+
+ // We do this immediately as otherwise there is a race to set
+ // ipv4_start_node.node_value and ipv4_start_node.netmask.
+ if (mmdb->metadata.ip_version == 6) {
+ status = find_ipv4_start_node(mmdb);
+ if (status != MMDB_SUCCESS) {
+ goto cleanup;
+ }
+ }
+
+ cleanup:
+ if (MMDB_SUCCESS != status) {
+ int saved_errno = errno;
+ free_mmdb_struct(mmdb);
+ errno = saved_errno;
+ }
+ return status;
+}
+
+#ifdef _WIN32
+
+LOCAL LPWSTR utf8_to_utf16(const char *utf8_str)
+{
+ int wide_chars = MultiByteToWideChar(CP_UTF8, 0, utf8_str, -1, NULL, 0);
+ wchar_t *utf16_str = (wchar_t *)calloc(wide_chars, sizeof(wchar_t));
+
+ if (MultiByteToWideChar(CP_UTF8, 0, utf8_str, -1, utf16_str,
+ wide_chars) < 1) {
+ free(utf16_str);
+ return NULL;
+ }
+
+ return utf16_str;
+}
+
+LOCAL int map_file(MMDB_s *const mmdb)
+{
+ DWORD size;
+ int status = MMDB_SUCCESS;
+ HANDLE mmh = NULL;
+ HANDLE fd = INVALID_HANDLE_VALUE;
+ LPWSTR utf16_filename = utf8_to_utf16(mmdb->filename);
+ if (!utf16_filename) {
+ status = MMDB_FILE_OPEN_ERROR;
+ goto cleanup;
+ }
+ fd = CreateFileW(utf16_filename, GENERIC_READ, FILE_SHARE_READ, NULL,
+ OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
+ if (fd == INVALID_HANDLE_VALUE) {
+ status = MMDB_FILE_OPEN_ERROR;
+ goto cleanup;
+ }
+ size = GetFileSize(fd, NULL);
+ if (size == INVALID_FILE_SIZE) {
+ status = MMDB_FILE_OPEN_ERROR;
+ goto cleanup;
+ }
+ mmh = CreateFileMapping(fd, NULL, PAGE_READONLY, 0, size, NULL);
+ /* Microsoft documentation for CreateFileMapping indicates this returns
+ NULL not INVALID_HANDLE_VALUE on error */
+ if (NULL == mmh) {
+ status = MMDB_IO_ERROR;
+ goto cleanup;
+ }
+ uint8_t *file_content =
+ (uint8_t *)MapViewOfFile(mmh, FILE_MAP_READ, 0, 0, 0);
+ if (file_content == NULL) {
+ status = MMDB_IO_ERROR;
+ goto cleanup;
+ }
+
+ mmdb->file_size = size;
+ mmdb->file_content = file_content;
+
+ cleanup:;
+ int saved_errno = errno;
+ if (INVALID_HANDLE_VALUE != fd) {
+ CloseHandle(fd);
+ }
+ if (NULL != mmh) {
+ CloseHandle(mmh);
+ }
+ errno = saved_errno;
+ free(utf16_filename);
+
+ return status;
+}
+
+#else // _WIN32
+
+LOCAL int map_file(MMDB_s *const mmdb)
+{
+ ssize_t size;
+ int status = MMDB_SUCCESS;
+
+ int flags = O_RDONLY;
+#ifdef O_CLOEXEC
+ flags |= O_CLOEXEC;
+#endif
+ int fd = open(mmdb->filename, flags);
+ struct stat s;
+ if (fd < 0 || fstat(fd, &s)) {
+ status = MMDB_FILE_OPEN_ERROR;
+ goto cleanup;
+ }
+
+ size = s.st_size;
+ if (size < 0 || size != s.st_size) {
+ status = MMDB_OUT_OF_MEMORY_ERROR;
+ goto cleanup;
+ }
+
+ uint8_t *file_content =
+ (uint8_t *)mmap(NULL, size, PROT_READ, MAP_SHARED, fd, 0);
+ if (MAP_FAILED == file_content) {
+ if (ENOMEM == errno) {
+ status = MMDB_OUT_OF_MEMORY_ERROR;
+ } else {
+ status = MMDB_IO_ERROR;
+ }
+ goto cleanup;
+ }
+
+ mmdb->file_size = size;
+ mmdb->file_content = file_content;
+
+ cleanup:;
+ int saved_errno = errno;
+ if (fd >= 0) {
+ close(fd);
+ }
+ errno = saved_errno;
+
+ return status;
+}
+
+#endif // _WIN32
+
+LOCAL const uint8_t *find_metadata(const uint8_t *file_content,
+ ssize_t file_size, uint32_t *metadata_size)
+{
+ const ssize_t marker_len = sizeof(METADATA_MARKER) - 1;
+ ssize_t max_size = file_size >
+ METADATA_BLOCK_MAX_SIZE ? METADATA_BLOCK_MAX_SIZE :
+ file_size;
+
+ uint8_t *search_area = (uint8_t *)(file_content + (file_size - max_size));
+ uint8_t *start = search_area;
+ uint8_t *tmp;
+ do {
+ tmp = mmdb_memmem(search_area, max_size,
+ METADATA_MARKER, marker_len);
+
+ if (NULL != tmp) {
+ max_size -= tmp - search_area;
+ search_area = tmp;
+
+ /* Continue searching just after the marker we just read, in case
+ * there are multiple markers in the same file. This would be odd
+ * but is certainly not impossible. */
+ max_size -= marker_len;
+ search_area += marker_len;
+ }
+ } while (NULL != tmp);
+
+ if (search_area == start) {
+ return NULL;
+ }
+
+ *metadata_size = (uint32_t)max_size;
+
+ return search_area;
+}
+
+LOCAL int read_metadata(MMDB_s *mmdb)
+{
+ /* We need to create a fake MMDB_s struct in order to decode values from
+ the metadata. The metadata is basically just like the data section, so we
+ want to use the same functions we use for the data section to get metadata
+ values. */
+ MMDB_s metadata_db = make_fake_metadata_db(mmdb);
+
+ MMDB_entry_s metadata_start = {
+ .mmdb = &metadata_db,
+ .offset = 0
+ };
+
+ int status =
+ value_for_key_as_uint32(&metadata_start, "node_count",
+ &mmdb->metadata.node_count);
+ if (MMDB_SUCCESS != status) {
+ return status;
+ }
+ if (!mmdb->metadata.node_count) {
+ DEBUG_MSG("could not find node_count value in metadata");
+ return MMDB_INVALID_METADATA_ERROR;
+ }
+
+ status = value_for_key_as_uint16(&metadata_start, "record_size",
+ &mmdb->metadata.record_size);
+ if (MMDB_SUCCESS != status) {
+ return status;
+ }
+ if (!mmdb->metadata.record_size) {
+ DEBUG_MSG("could not find record_size value in metadata");
+ return MMDB_INVALID_METADATA_ERROR;
+ }
+
+ if (mmdb->metadata.record_size != 24 && mmdb->metadata.record_size != 28
+ && mmdb->metadata.record_size != 32) {
+ DEBUG_MSGF("bad record size in metadata: %i",
+ mmdb->metadata.record_size);
+ return MMDB_UNKNOWN_DATABASE_FORMAT_ERROR;
+ }
+
+ status = value_for_key_as_uint16(&metadata_start, "ip_version",
+ &mmdb->metadata.ip_version);
+ if (MMDB_SUCCESS != status) {
+ return status;
+ }
+ if (!mmdb->metadata.ip_version) {
+ DEBUG_MSG("could not find ip_version value in metadata");
+ return MMDB_INVALID_METADATA_ERROR;
+ }
+ if (!(mmdb->metadata.ip_version == 4 || mmdb->metadata.ip_version == 6)) {
+ DEBUG_MSGF("ip_version value in metadata is not 4 or 6 - it was %i",
+ mmdb->metadata.ip_version);
+ return MMDB_INVALID_METADATA_ERROR;
+ }
+
+ status = value_for_key_as_string(&metadata_start, "database_type",
+ &mmdb->metadata.database_type);
+ if (MMDB_SUCCESS != status) {
+ DEBUG_MSG("error finding database_type value in metadata");
+ return status;
+ }
+
+ status =
+ populate_languages_metadata(mmdb, &metadata_db, &metadata_start);
+ if (MMDB_SUCCESS != status) {
+ DEBUG_MSG("could not populate languages from metadata");
+ return status;
+ }
+
+ status = value_for_key_as_uint16(
+ &metadata_start, "binary_format_major_version",
+ &mmdb->metadata.binary_format_major_version);
+ if (MMDB_SUCCESS != status) {
+ return status;
+ }
+ if (!mmdb->metadata.binary_format_major_version) {
+ DEBUG_MSG(
+ "could not find binary_format_major_version value in metadata");
+ return MMDB_INVALID_METADATA_ERROR;
+ }
+
+ status = value_for_key_as_uint16(
+ &metadata_start, "binary_format_minor_version",
+ &mmdb->metadata.binary_format_minor_version);
+ if (MMDB_SUCCESS != status) {
+ return status;
+ }
+
+ status = value_for_key_as_uint64(&metadata_start, "build_epoch",
+ &mmdb->metadata.build_epoch);
+ if (MMDB_SUCCESS != status) {
+ return status;
+ }
+ if (!mmdb->metadata.build_epoch) {
+ DEBUG_MSG("could not find build_epoch value in metadata");
+ return MMDB_INVALID_METADATA_ERROR;
+ }
+
+ status = populate_description_metadata(mmdb, &metadata_db, &metadata_start);
+ if (MMDB_SUCCESS != status) {
+ DEBUG_MSG("could not populate description from metadata");
+ return status;
+ }
+
+ mmdb->full_record_byte_size = mmdb->metadata.record_size * 2 / 8U;
+
+ mmdb->depth = mmdb->metadata.ip_version == 4 ? 32 : 128;
+
+ return MMDB_SUCCESS;
+}
+
+LOCAL MMDB_s make_fake_metadata_db(const MMDB_s *const mmdb)
+{
+ MMDB_s fake_metadata_db = {
+ .data_section = mmdb->metadata_section,
+ .data_section_size = mmdb->metadata_section_size
+ };
+
+ return fake_metadata_db;
+}
+
+LOCAL int value_for_key_as_uint16(MMDB_entry_s *start, char *key,
+ uint16_t *value)
+{
+ MMDB_entry_data_s entry_data;
+ const char *path[] = { key, NULL };
+ int status = MMDB_aget_value(start, &entry_data, path);
+ if (MMDB_SUCCESS != status) {
+ return status;
+ }
+ if (MMDB_DATA_TYPE_UINT16 != entry_data.type) {
+ DEBUG_MSGF("expect uint16 for %s but received %s", key,
+ type_num_to_name(
+ entry_data.type));
+ return MMDB_INVALID_METADATA_ERROR;
+ }
+ *value = entry_data.uint16;
+ return MMDB_SUCCESS;
+}
+
+LOCAL int value_for_key_as_uint32(MMDB_entry_s *start, char *key,
+ uint32_t *value)
+{
+ MMDB_entry_data_s entry_data;
+ const char *path[] = { key, NULL };
+ int status = MMDB_aget_value(start, &entry_data, path);
+ if (MMDB_SUCCESS != status) {
+ return status;
+ }
+ if (MMDB_DATA_TYPE_UINT32 != entry_data.type) {
+ DEBUG_MSGF("expect uint32 for %s but received %s", key,
+ type_num_to_name(
+ entry_data.type));
+ return MMDB_INVALID_METADATA_ERROR;
+ }
+ *value = entry_data.uint32;
+ return MMDB_SUCCESS;
+}
+
+LOCAL int value_for_key_as_uint64(MMDB_entry_s *start, char *key,
+ uint64_t *value)
+{
+ MMDB_entry_data_s entry_data;
+ const char *path[] = { key, NULL };
+ int status = MMDB_aget_value(start, &entry_data, path);
+ if (MMDB_SUCCESS != status) {
+ return status;
+ }
+ if (MMDB_DATA_TYPE_UINT64 != entry_data.type) {
+ DEBUG_MSGF("expect uint64 for %s but received %s", key,
+ type_num_to_name(
+ entry_data.type));
+ return MMDB_INVALID_METADATA_ERROR;
+ }
+ *value = entry_data.uint64;
+ return MMDB_SUCCESS;
+}
+
+LOCAL int value_for_key_as_string(MMDB_entry_s *start, char *key,
+ char const **value)
+{
+ MMDB_entry_data_s entry_data;
+ const char *path[] = { key, NULL };
+ int status = MMDB_aget_value(start, &entry_data, path);
+ if (MMDB_SUCCESS != status) {
+ return status;
+ }
+ if (MMDB_DATA_TYPE_UTF8_STRING != entry_data.type) {
+ DEBUG_MSGF("expect string for %s but received %s", key,
+ type_num_to_name(
+ entry_data.type));
+ return MMDB_INVALID_METADATA_ERROR;
+ }
+ *value = mmdb_strndup((char *)entry_data.utf8_string, entry_data.data_size);
+ if (NULL == *value) {
+ return MMDB_OUT_OF_MEMORY_ERROR;
+ }
+ return MMDB_SUCCESS;
+}
+
+LOCAL int populate_languages_metadata(MMDB_s *mmdb, MMDB_s *metadata_db,
+ MMDB_entry_s *metadata_start)
+{
+ MMDB_entry_data_s entry_data;
+
+ const char *path[] = { "languages", NULL };
+ int status = MMDB_aget_value(metadata_start, &entry_data, path);
+ if (MMDB_SUCCESS != status) {
+ return status;
+ }
+ if (MMDB_DATA_TYPE_ARRAY != entry_data.type) {
+ return MMDB_INVALID_METADATA_ERROR;
+ }
+
+ MMDB_entry_s array_start = {
+ .mmdb = metadata_db,
+ .offset = entry_data.offset
+ };
+
+ MMDB_entry_data_list_s *member;
+ status = MMDB_get_entry_data_list(&array_start, &member);
+ if (MMDB_SUCCESS != status) {
+ return status;
+ }
+
+ MMDB_entry_data_list_s *first_member = member;
+
+ uint32_t array_size = member->entry_data.data_size;
+ MAYBE_CHECK_SIZE_OVERFLOW(array_size, SIZE_MAX / sizeof(char *),
+ MMDB_INVALID_METADATA_ERROR);
+
+ mmdb->metadata.languages.count = 0;
+ mmdb->metadata.languages.names = calloc(array_size, sizeof(char *));
+ if (NULL == mmdb->metadata.languages.names) {
+ return MMDB_OUT_OF_MEMORY_ERROR;
+ }
+
+ for (uint32_t i = 0; i < array_size; i++) {
+ member = member->next;
+ if (MMDB_DATA_TYPE_UTF8_STRING != member->entry_data.type) {
+ return MMDB_INVALID_METADATA_ERROR;
+ }
+
+ mmdb->metadata.languages.names[i] =
+ mmdb_strndup((char *)member->entry_data.utf8_string,
+ member->entry_data.data_size);
+
+ if (NULL == mmdb->metadata.languages.names[i]) {
+ return MMDB_OUT_OF_MEMORY_ERROR;
+ }
+ // We assign this as we go so that if we fail a calloc and need to
+ // free it, the count is right.
+ mmdb->metadata.languages.count = i + 1;
+ }
+
+ MMDB_free_entry_data_list(first_member);
+
+ return MMDB_SUCCESS;
+}
+
+LOCAL int populate_description_metadata(MMDB_s *mmdb, MMDB_s *metadata_db,
+ MMDB_entry_s *metadata_start)
+{
+ MMDB_entry_data_s entry_data;
+
+ const char *path[] = { "description", NULL };
+ int status = MMDB_aget_value(metadata_start, &entry_data, path);
+ if (MMDB_SUCCESS != status) {
+ return status;
+ }
+
+ if (MMDB_DATA_TYPE_MAP != entry_data.type) {
+ DEBUG_MSGF("Unexpected entry_data type: %d", entry_data.type);
+ return MMDB_INVALID_METADATA_ERROR;
+ }
+
+ MMDB_entry_s map_start = {
+ .mmdb = metadata_db,
+ .offset = entry_data.offset
+ };
+
+ MMDB_entry_data_list_s *member;
+ status = MMDB_get_entry_data_list(&map_start, &member);
+ if (MMDB_SUCCESS != status) {
+ DEBUG_MSGF(
+ "MMDB_get_entry_data_list failed while populating description."
+ " status = %d (%s)", status, MMDB_strerror(status));
+ return status;
+ }
+
+ MMDB_entry_data_list_s *first_member = member;
+
+ uint32_t map_size = member->entry_data.data_size;
+ mmdb->metadata.description.count = 0;
+ if (0 == map_size) {
+ mmdb->metadata.description.descriptions = NULL;
+ goto cleanup;
+ }
+ MAYBE_CHECK_SIZE_OVERFLOW(map_size, SIZE_MAX / sizeof(MMDB_description_s *),
+ MMDB_INVALID_METADATA_ERROR);
+
+ mmdb->metadata.description.descriptions =
+ calloc(map_size, sizeof(MMDB_description_s *));
+ if (NULL == mmdb->metadata.description.descriptions) {
+ status = MMDB_OUT_OF_MEMORY_ERROR;
+ goto cleanup;
+ }
+
+ for (uint32_t i = 0; i < map_size; i++) {
+ mmdb->metadata.description.descriptions[i] =
+ calloc(1, sizeof(MMDB_description_s));
+ if (NULL == mmdb->metadata.description.descriptions[i]) {
+ status = MMDB_OUT_OF_MEMORY_ERROR;
+ goto cleanup;
+ }
+
+ mmdb->metadata.description.count = i + 1;
+ mmdb->metadata.description.descriptions[i]->language = NULL;
+ mmdb->metadata.description.descriptions[i]->description = NULL;
+
+ member = member->next;
+
+ if (MMDB_DATA_TYPE_UTF8_STRING != member->entry_data.type) {
+ status = MMDB_INVALID_METADATA_ERROR;
+ goto cleanup;
+ }
+
+ mmdb->metadata.description.descriptions[i]->language =
+ mmdb_strndup((char *)member->entry_data.utf8_string,
+ member->entry_data.data_size);
+
+ if (NULL == mmdb->metadata.description.descriptions[i]->language) {
+ status = MMDB_OUT_OF_MEMORY_ERROR;
+ goto cleanup;
+ }
+
+ member = member->next;
+
+ if (MMDB_DATA_TYPE_UTF8_STRING != member->entry_data.type) {
+ status = MMDB_INVALID_METADATA_ERROR;
+ goto cleanup;
+ }
+
+ mmdb->metadata.description.descriptions[i]->description =
+ mmdb_strndup((char *)member->entry_data.utf8_string,
+ member->entry_data.data_size);
+
+ if (NULL == mmdb->metadata.description.descriptions[i]->description) {
+ status = MMDB_OUT_OF_MEMORY_ERROR;
+ goto cleanup;
+ }
+ }
+
+ cleanup:
+ MMDB_free_entry_data_list(first_member);
+
+ return status;
+}
+
+MMDB_lookup_result_s MMDB_lookup_string(const MMDB_s *const mmdb,
+ const char *const ipstr,
+ int *const gai_error,
+ int *const mmdb_error)
+{
+ MMDB_lookup_result_s result = {
+ .found_entry = false,
+ .netmask = 0,
+ .entry = {
+ .mmdb = mmdb,
+ .offset = 0
+ }
+ };
+
+ struct addrinfo *addresses = NULL;
+ *gai_error = resolve_any_address(ipstr, &addresses);
+
+ if (!*gai_error) {
+ result = MMDB_lookup_sockaddr(mmdb, addresses->ai_addr, mmdb_error);
+ }
+
+ if (NULL != addresses) {
+ freeaddrinfo(addresses);
+ }
+
+ return result;
+}
+
+LOCAL int resolve_any_address(const char *ipstr, struct addrinfo **addresses)
+{
+ struct addrinfo hints = {
+ .ai_family = AF_UNSPEC,
+ .ai_flags = AI_NUMERICHOST,
+ // We set ai_socktype so that we only get one result back
+ .ai_socktype = SOCK_STREAM
+ };
+
+ int gai_status = getaddrinfo(ipstr, NULL, &hints, addresses);
+ if (gai_status) {
+ return gai_status;
+ }
+
+ return 0;
+}
+
+MMDB_lookup_result_s MMDB_lookup_sockaddr(
+ const MMDB_s *const mmdb,
+ const struct sockaddr *const sockaddr,
+ int *const mmdb_error)
+{
+ MMDB_lookup_result_s result = {
+ .found_entry = false,
+ .netmask = 0,
+ .entry = {
+ .mmdb = mmdb,
+ .offset = 0
+ }
+ };
+
+ uint8_t mapped_address[16], *address;
+ if (mmdb->metadata.ip_version == 4) {
+ if (sockaddr->sa_family == AF_INET6) {
+ *mmdb_error = MMDB_IPV6_LOOKUP_IN_IPV4_DATABASE_ERROR;
+ return result;
+ }
+ address = (uint8_t *)&((struct sockaddr_in *)sockaddr)->sin_addr.s_addr;
+ } else {
+ if (sockaddr->sa_family == AF_INET6) {
+ address =
+ (uint8_t *)&((struct sockaddr_in6 *)sockaddr)->sin6_addr.
+ s6_addr;
+ } else {
+ address = mapped_address;
+ memset(address, 0, 12);
+ memcpy(address + 12,
+ &((struct sockaddr_in *)sockaddr)->sin_addr.s_addr, 4);
+ }
+ }
+
+ *mmdb_error =
+ find_address_in_search_tree(mmdb, address, sockaddr->sa_family,
+ &result);
+
+ return result;
+}
+
+LOCAL int find_address_in_search_tree(const MMDB_s *const mmdb,
+ uint8_t *address,
+ sa_family_t address_family,
+ MMDB_lookup_result_s *result)
+{
+ record_info_s record_info = record_info_for_database(mmdb);
+ if (0 == record_info.right_record_offset) {
+ return MMDB_UNKNOWN_DATABASE_FORMAT_ERROR;
+ }
+
+ uint32_t value = 0;
+ uint16_t current_bit = 0;
+ if (mmdb->metadata.ip_version == 6 && address_family == AF_INET) {
+ value = mmdb->ipv4_start_node.node_value;
+ current_bit = mmdb->ipv4_start_node.netmask;
+ }
+
+ uint32_t node_count = mmdb->metadata.node_count;
+ const uint8_t *search_tree = mmdb->file_content;
+ const uint8_t *record_pointer;
+ for (; current_bit < mmdb->depth && value < node_count; current_bit++) {
+ uint8_t bit = 1U &
+ (address[current_bit >> 3] >> (7 - (current_bit % 8)));
+
+ record_pointer = &search_tree[value * record_info.record_length];
+ if (record_pointer + record_info.record_length > mmdb->data_section) {
+ return MMDB_CORRUPT_SEARCH_TREE_ERROR;
+ }
+ if (bit) {
+ record_pointer += record_info.right_record_offset;
+ value = record_info.right_record_getter(record_pointer);
+ } else {
+ value = record_info.left_record_getter(record_pointer);
+ }
+ }
+
+ result->netmask = current_bit;
+
+ if (value >= node_count + mmdb->data_section_size) {
+ // The pointer points off the end of the database.
+ return MMDB_CORRUPT_SEARCH_TREE_ERROR;
+ }
+
+ if (value == node_count) {
+ // record is empty
+ result->found_entry = false;
+ return MMDB_SUCCESS;
+ }
+ result->found_entry = true;
+ result->entry.offset = data_section_offset_for_record(mmdb, value);
+
+ return MMDB_SUCCESS;
+}
+
+LOCAL record_info_s record_info_for_database(const MMDB_s *const mmdb)
+{
+ record_info_s record_info = {
+ .record_length = mmdb->full_record_byte_size,
+ .right_record_offset = 0
+ };
+
+ if (record_info.record_length == 6) {
+ record_info.left_record_getter = &get_uint24;
+ record_info.right_record_getter = &get_uint24;
+ record_info.right_record_offset = 3;
+ } else if (record_info.record_length == 7) {
+ record_info.left_record_getter = &get_left_28_bit_record;
+ record_info.right_record_getter = &get_right_28_bit_record;
+ record_info.right_record_offset = 3;
+ } else if (record_info.record_length == 8) {
+ record_info.left_record_getter = &get_uint32;
+ record_info.right_record_getter = &get_uint32;
+ record_info.right_record_offset = 4;
+ } else {
+ assert(false);
+ }
+
+ return record_info;
+}
+
+LOCAL int find_ipv4_start_node(MMDB_s *const mmdb)
+{
+ /* In a pathological case of a database with a single node search tree,
+ * this check will be true even after we've found the IPv4 start node, but
+ * that doesn't seem worth trying to fix. */
+ if (mmdb->ipv4_start_node.node_value != 0) {
+ return MMDB_SUCCESS;
+ }
+
+ record_info_s record_info = record_info_for_database(mmdb);
+
+ const uint8_t *search_tree = mmdb->file_content;
+ uint32_t node_value = 0;
+ const uint8_t *record_pointer;
+ uint16_t netmask;
+ uint32_t node_count = mmdb->metadata.node_count;
+
+ for (netmask = 0; netmask < 96 && node_value < node_count; netmask++) {
+ record_pointer = &search_tree[node_value * record_info.record_length];
+ if (record_pointer + record_info.record_length > mmdb->data_section) {
+ return MMDB_CORRUPT_SEARCH_TREE_ERROR;
+ }
+ node_value = record_info.left_record_getter(record_pointer);
+ }
+
+ mmdb->ipv4_start_node.node_value = node_value;
+ mmdb->ipv4_start_node.netmask = netmask;
+
+ return MMDB_SUCCESS;
+}
+
+LOCAL uint8_t record_type(const MMDB_s *const mmdb, uint64_t record)
+{
+ uint32_t node_count = mmdb->metadata.node_count;
+
+ /* Ideally we'd check to make sure that a record never points to a
+ * previously seen value, but that's more complicated. For now, we can
+ * at least check that we don't end up at the top of the tree again. */
+ if (record == 0) {
+ DEBUG_MSG("record has a value of 0");
+ return MMDB_RECORD_TYPE_INVALID;
+ }
+
+ if (record < node_count) {
+ return MMDB_RECORD_TYPE_SEARCH_NODE;
+ }
+
+ if (record == node_count) {
+ return MMDB_RECORD_TYPE_EMPTY;
+ }
+
+ if (record - node_count < mmdb->data_section_size) {
+ return MMDB_RECORD_TYPE_DATA;
+ }
+
+ DEBUG_MSG("record has a value that points outside of the database");
+ return MMDB_RECORD_TYPE_INVALID;
+}
+
+LOCAL uint32_t get_left_28_bit_record(const uint8_t *record)
+{
+ return record[0] * 65536 + record[1] * 256 + record[2] +
+ ((record[3] & 0xf0) << 20);
+}
+
+LOCAL uint32_t get_right_28_bit_record(const uint8_t *record)
+{
+ uint32_t value = get_uint32(record);
+ return value & 0xfffffff;
+}
+
+int MMDB_read_node(const MMDB_s *const mmdb, uint32_t node_number,
+ MMDB_search_node_s *const node)
+{
+ record_info_s record_info = record_info_for_database(mmdb);
+ if (0 == record_info.right_record_offset) {
+ return MMDB_UNKNOWN_DATABASE_FORMAT_ERROR;
+ }
+
+ if (node_number > mmdb->metadata.node_count) {
+ return MMDB_INVALID_NODE_NUMBER_ERROR;
+ }
+
+ const uint8_t *search_tree = mmdb->file_content;
+ const uint8_t *record_pointer =
+ &search_tree[node_number * record_info.record_length];
+ node->left_record = record_info.left_record_getter(record_pointer);
+ record_pointer += record_info.right_record_offset;
+ node->right_record = record_info.right_record_getter(record_pointer);
+
+ node->left_record_type = record_type(mmdb, node->left_record);
+ node->right_record_type = record_type(mmdb, node->right_record);
+
+ // Note that offset will be invalid if the record type is not
+ // MMDB_RECORD_TYPE_DATA, but that's ok. Any use of the record entry
+ // for other data types is a programming error.
+ node->left_record_entry = (struct MMDB_entry_s) {
+ .mmdb = mmdb,
+ .offset = data_section_offset_for_record(mmdb, node->left_record),
+ };
+ node->right_record_entry = (struct MMDB_entry_s) {
+ .mmdb = mmdb,
+ .offset = data_section_offset_for_record(mmdb, node->right_record),
+ };
+
+ return MMDB_SUCCESS;
+}
+
+LOCAL uint32_t data_section_offset_for_record(const MMDB_s *const mmdb,
+ uint64_t record)
+{
+ return (uint32_t)record - mmdb->metadata.node_count -
+ MMDB_DATA_SECTION_SEPARATOR;
+}
+
+int MMDB_get_value(MMDB_entry_s *const start,
+ MMDB_entry_data_s *const entry_data,
+ ...)
+{
+ va_list path;
+ va_start(path, entry_data);
+ int status = MMDB_vget_value(start, entry_data, path);
+ va_end(path);
+ return status;
+}
+
+int MMDB_vget_value(MMDB_entry_s *const start,
+ MMDB_entry_data_s *const entry_data,
+ va_list va_path)
+{
+ int length = path_length(va_path);
+ const char *path_elem;
+ int i = 0;
+
+ MAYBE_CHECK_SIZE_OVERFLOW(length, SIZE_MAX / sizeof(const char *) - 1,
+ MMDB_INVALID_METADATA_ERROR);
+
+ const char **path = calloc(length + 1, sizeof(const char *));
+ if (NULL == path) {
+ return MMDB_OUT_OF_MEMORY_ERROR;
+ }
+
+ while (NULL != (path_elem = va_arg(va_path, char *))) {
+ path[i] = path_elem;
+ i++;
+ }
+ path[i] = NULL;
+
+ int status = MMDB_aget_value(start, entry_data, path);
+
+ free((char **)path);
+
+ return status;
+}
+
+LOCAL int path_length(va_list va_path)
+{
+ int i = 0;
+ const char *ignore;
+ va_list path_copy;
+ va_copy(path_copy, va_path);
+
+ while (NULL != (ignore = va_arg(path_copy, char *))) {
+ i++;
+ }
+
+ va_end(path_copy);
+
+ return i;
+}
+
+int MMDB_aget_value(MMDB_entry_s *const start,
+ MMDB_entry_data_s *const entry_data,
+ const char *const *const path)
+{
+ const MMDB_s *const mmdb = start->mmdb;
+ uint32_t offset = start->offset;
+
+ memset(entry_data, 0, sizeof(MMDB_entry_data_s));
+ DEBUG_NL;
+ DEBUG_MSG("looking up value by path");
+
+ CHECKED_DECODE_ONE_FOLLOW(mmdb, offset, entry_data);
+
+ DEBUG_NL;
+ DEBUG_MSGF("top level element is a %s", type_num_to_name(entry_data->type));
+
+ /* Can this happen? It'd probably represent a pathological case under
+ * normal use, but there's nothing preventing someone from passing an
+ * invalid MMDB_entry_s struct to this function */
+ if (!entry_data->has_data) {
+ return MMDB_INVALID_LOOKUP_PATH_ERROR;
+ }
+
+ const char *path_elem;
+ int i = 0;
+ while (NULL != (path_elem = path[i++])) {
+ DEBUG_NL;
+ DEBUG_MSGF("path elem = %s", path_elem);
+
+ /* XXX - it'd be good to find a quicker way to skip through these
+ entries that doesn't involve decoding them
+ completely. Basically we need to just use the size from the
+ control byte to advance our pointer rather than calling
+ decode_one(). */
+ if (entry_data->type == MMDB_DATA_TYPE_ARRAY) {
+ int status = lookup_path_in_array(path_elem, mmdb, entry_data);
+ if (MMDB_SUCCESS != status) {
+ memset(entry_data, 0, sizeof(MMDB_entry_data_s));
+ return status;
+ }
+ } else if (entry_data->type == MMDB_DATA_TYPE_MAP) {
+ int status = lookup_path_in_map(path_elem, mmdb, entry_data);
+ if (MMDB_SUCCESS != status) {
+ memset(entry_data, 0, sizeof(MMDB_entry_data_s));
+ return status;
+ }
+ } else {
+ /* Once we make the code traverse maps & arrays without calling
+ * decode_one() we can get rid of this. */
+ memset(entry_data, 0, sizeof(MMDB_entry_data_s));
+ return MMDB_LOOKUP_PATH_DOES_NOT_MATCH_DATA_ERROR;
+ }
+ }
+
+ return MMDB_SUCCESS;
+}
+
+LOCAL int lookup_path_in_array(const char *path_elem,
+ const MMDB_s *const mmdb,
+ MMDB_entry_data_s *entry_data)
+{
+ uint32_t size = entry_data->data_size;
+ char *first_invalid;
+
+ int saved_errno = errno;
+ errno = 0;
+ int array_index = strtol(path_elem, &first_invalid, 10);
+ if (ERANGE == errno) {
+ errno = saved_errno;
+ return MMDB_INVALID_LOOKUP_PATH_ERROR;
+ }
+ errno = saved_errno;
+
+ if (array_index < 0) {
+ array_index += size;
+
+ if (array_index < 0) {
+ return MMDB_LOOKUP_PATH_DOES_NOT_MATCH_DATA_ERROR;
+ }
+ }
+
+ if (*first_invalid || (uint32_t)array_index >= size) {
+ return MMDB_LOOKUP_PATH_DOES_NOT_MATCH_DATA_ERROR;
+ }
+
+ for (int i = 0; i < array_index; i++) {
+ /* We don't want to follow a pointer here. If the next element is a
+ * pointer we simply skip it and keep going */
+ CHECKED_DECODE_ONE(mmdb, entry_data->offset_to_next, entry_data);
+ int status = skip_map_or_array(mmdb, entry_data);
+ if (MMDB_SUCCESS != status) {
+ return status;
+ }
+ }
+
+ MMDB_entry_data_s value;
+ CHECKED_DECODE_ONE_FOLLOW(mmdb, entry_data->offset_to_next, &value);
+ memcpy(entry_data, &value, sizeof(MMDB_entry_data_s));
+
+ return MMDB_SUCCESS;
+}
+
+LOCAL int lookup_path_in_map(const char *path_elem,
+ const MMDB_s *const mmdb,
+ MMDB_entry_data_s *entry_data)
+{
+ uint32_t size = entry_data->data_size;
+ uint32_t offset = entry_data->offset_to_next;
+ size_t path_elem_len = strlen(path_elem);
+
+ while (size-- > 0) {
+ MMDB_entry_data_s key, value;
+ CHECKED_DECODE_ONE_FOLLOW(mmdb, offset, &key);
+
+ uint32_t offset_to_value = key.offset_to_next;
+
+ if (MMDB_DATA_TYPE_UTF8_STRING != key.type) {
+ return MMDB_INVALID_DATA_ERROR;
+ }
+
+ if (key.data_size == path_elem_len &&
+ !memcmp(path_elem, key.utf8_string, path_elem_len)) {
+
+ DEBUG_MSG("found key matching path elem");
+
+ CHECKED_DECODE_ONE_FOLLOW(mmdb, offset_to_value, &value);
+ memcpy(entry_data, &value, sizeof(MMDB_entry_data_s));
+ return MMDB_SUCCESS;
+ } else {
+ /* We don't want to follow a pointer here. If the next element is
+ * a pointer we simply skip it and keep going */
+ CHECKED_DECODE_ONE(mmdb, offset_to_value, &value);
+ int status = skip_map_or_array(mmdb, &value);
+ if (MMDB_SUCCESS != status) {
+ return status;
+ }
+ offset = value.offset_to_next;
+ }
+ }
+
+ memset(entry_data, 0, sizeof(MMDB_entry_data_s));
+ return MMDB_LOOKUP_PATH_DOES_NOT_MATCH_DATA_ERROR;
+}
+
+LOCAL int skip_map_or_array(const MMDB_s *const mmdb,
+ MMDB_entry_data_s *entry_data)
+{
+ if (entry_data->type == MMDB_DATA_TYPE_MAP) {
+ uint32_t size = entry_data->data_size;
+ while (size-- > 0) {
+ CHECKED_DECODE_ONE(mmdb, entry_data->offset_to_next, entry_data); // key
+ CHECKED_DECODE_ONE(mmdb, entry_data->offset_to_next, entry_data); // value
+ int status = skip_map_or_array(mmdb, entry_data);
+ if (MMDB_SUCCESS != status) {
+ return status;
+ }
+ }
+ } else if (entry_data->type == MMDB_DATA_TYPE_ARRAY) {
+ uint32_t size = entry_data->data_size;
+ while (size-- > 0) {
+ CHECKED_DECODE_ONE(mmdb, entry_data->offset_to_next, entry_data); // value
+ int status = skip_map_or_array(mmdb, entry_data);
+ if (MMDB_SUCCESS != status) {
+ return status;
+ }
+ }
+ }
+
+ return MMDB_SUCCESS;
+}
+
+LOCAL int decode_one_follow(const MMDB_s *const mmdb, uint32_t offset,
+ MMDB_entry_data_s *entry_data)
+{
+ CHECKED_DECODE_ONE(mmdb, offset, entry_data);
+ if (entry_data->type == MMDB_DATA_TYPE_POINTER) {
+ uint32_t next = entry_data->offset_to_next;
+ CHECKED_DECODE_ONE(mmdb, entry_data->pointer, entry_data);
+ /* Pointers to pointers are illegal under the spec */
+ if (entry_data->type == MMDB_DATA_TYPE_POINTER) {
+ DEBUG_MSG("pointer points to another pointer");
+ return MMDB_INVALID_DATA_ERROR;
+ }
+
+ /* The pointer could point to any part of the data section but the
+ * next entry for this particular offset may be the one after the
+ * pointer, not the one after whatever the pointer points to. This
+ * depends on whether the pointer points to something that is a simple
+ * value or a compound value. For a compound value, the next one is
+ * the one after the pointer result, not the one after the pointer. */
+ if (entry_data->type != MMDB_DATA_TYPE_MAP
+ && entry_data->type != MMDB_DATA_TYPE_ARRAY) {
+
+ entry_data->offset_to_next = next;
+ }
+ }
+
+ return MMDB_SUCCESS;
+}
+
+#if !MMDB_UINT128_IS_BYTE_ARRAY
+LOCAL mmdb_uint128_t get_uint128(const uint8_t *p, int length)
+{
+ mmdb_uint128_t value = 0;
+ while (length-- > 0) {
+ value <<= 8;
+ value += *p++;
+ }
+ return value;
+}
+#endif
+
+LOCAL int decode_one(const MMDB_s *const mmdb, uint32_t offset,
+ MMDB_entry_data_s *entry_data)
+{
+ const uint8_t *mem = mmdb->data_section;
+
+ // We subtract rather than add as it possible that offset + 1
+ // could overflow for a corrupt database while an underflow
+ // from data_section_size - 1 should not be possible.
+ if (offset > mmdb->data_section_size - 1) {
+ DEBUG_MSGF("Offset (%d) past data section (%d)", offset,
+ mmdb->data_section_size);
+ return MMDB_INVALID_DATA_ERROR;
+ }
+
+ entry_data->offset = offset;
+ entry_data->has_data = true;
+
+ DEBUG_NL;
+ DEBUG_MSGF("Offset: %i", offset);
+
+ uint8_t ctrl = mem[offset++];
+ DEBUG_BINARY("Control byte: %s", ctrl);
+
+ int type = (ctrl >> 5) & 7;
+ DEBUG_MSGF("Type: %i (%s)", type, type_num_to_name(type));
+
+ if (type == MMDB_DATA_TYPE_EXTENDED) {
+ // Subtracting 1 to avoid possible overflow on offset + 1
+ if (offset > mmdb->data_section_size - 1) {
+ DEBUG_MSGF("Extended type offset (%d) past data section (%d)",
+ offset,
+ mmdb->data_section_size);
+ return MMDB_INVALID_DATA_ERROR;
+ }
+ type = get_ext_type(mem[offset++]);
+ DEBUG_MSGF("Extended type: %i (%s)", type, type_num_to_name(type));
+ }
+
+ entry_data->type = type;
+
+ if (type == MMDB_DATA_TYPE_POINTER) {
+ uint8_t psize = ((ctrl >> 3) & 3) + 1;
+ DEBUG_MSGF("Pointer size: %i", psize);
+
+ // We check that the offset does not extend past the end of the
+ // database and that the subtraction of psize did not underflow.
+ if (offset > mmdb->data_section_size - psize ||
+ mmdb->data_section_size < psize) {
+ DEBUG_MSGF("Pointer offset (%d) past data section (%d)", offset +
+ psize,
+ mmdb->data_section_size);
+ return MMDB_INVALID_DATA_ERROR;
+ }
+ entry_data->pointer = get_ptr_from(ctrl, &mem[offset], psize);
+ DEBUG_MSGF("Pointer to: %i", entry_data->pointer);
+
+ entry_data->data_size = psize;
+ entry_data->offset_to_next = offset + psize;
+ return MMDB_SUCCESS;
+ }
+
+ uint32_t size = ctrl & 31;
+ switch (size) {
+ case 29:
+ // We subtract when checking offset to avoid possible overflow
+ if (offset > mmdb->data_section_size - 1) {
+ DEBUG_MSGF("String end (%d, case 29) past data section (%d)",
+ offset,
+ mmdb->data_section_size);
+ return MMDB_INVALID_DATA_ERROR;
+ }
+ size = 29 + mem[offset++];
+ break;
+ case 30:
+ // We subtract when checking offset to avoid possible overflow
+ if (offset > mmdb->data_section_size - 2) {
+ DEBUG_MSGF("String end (%d, case 30) past data section (%d)",
+ offset,
+ mmdb->data_section_size);
+ return MMDB_INVALID_DATA_ERROR;
+ }
+ size = 285 + get_uint16(&mem[offset]);
+ offset += 2;
+ break;
+ case 31:
+ // We subtract when checking offset to avoid possible overflow
+ if (offset > mmdb->data_section_size - 3) {
+ DEBUG_MSGF("String end (%d, case 31) past data section (%d)",
+ offset,
+ mmdb->data_section_size);
+ return MMDB_INVALID_DATA_ERROR;
+ }
+ size = 65821 + get_uint24(&mem[offset]);
+ offset += 3;
+ default:
+ break;
+ }
+
+ DEBUG_MSGF("Size: %i", size);
+
+ if (type == MMDB_DATA_TYPE_MAP || type == MMDB_DATA_TYPE_ARRAY) {
+ entry_data->data_size = size;
+ entry_data->offset_to_next = offset;
+ return MMDB_SUCCESS;
+ }
+
+ if (type == MMDB_DATA_TYPE_BOOLEAN) {
+ entry_data->boolean = size ? true : false;
+ entry_data->data_size = 0;
+ entry_data->offset_to_next = offset;
+ DEBUG_MSGF("boolean value: %s", entry_data->boolean ? "true" : "false");
+ return MMDB_SUCCESS;
+ }
+
+ // Check that the data doesn't extend past the end of the memory
+ // buffer and that the calculation in doing this did not underflow.
+ if (offset > mmdb->data_section_size - size ||
+ mmdb->data_section_size < size) {
+ DEBUG_MSGF("Data end (%d) past data section (%d)", offset + size,
+ mmdb->data_section_size);
+ return MMDB_INVALID_DATA_ERROR;
+ }
+
+ if (type == MMDB_DATA_TYPE_UINT16) {
+ if (size > 2) {
+ DEBUG_MSGF("uint16 of size %d", size);
+ return MMDB_INVALID_DATA_ERROR;
+ }
+ entry_data->uint16 = (uint16_t)get_uintX(&mem[offset], size);
+ DEBUG_MSGF("uint16 value: %u", entry_data->uint16);
+ } else if (type == MMDB_DATA_TYPE_UINT32) {
+ if (size > 4) {
+ DEBUG_MSGF("uint32 of size %d", size);
+ return MMDB_INVALID_DATA_ERROR;
+ }
+ entry_data->uint32 = (uint32_t)get_uintX(&mem[offset], size);
+ DEBUG_MSGF("uint32 value: %u", entry_data->uint32);
+ } else if (type == MMDB_DATA_TYPE_INT32) {
+ if (size > 4) {
+ DEBUG_MSGF("int32 of size %d", size);
+ return MMDB_INVALID_DATA_ERROR;
+ }
+ entry_data->int32 = get_sintX(&mem[offset], size);
+ DEBUG_MSGF("int32 value: %i", entry_data->int32);
+ } else if (type == MMDB_DATA_TYPE_UINT64) {
+ if (size > 8) {
+ DEBUG_MSGF("uint64 of size %d", size);
+ return MMDB_INVALID_DATA_ERROR;
+ }
+ entry_data->uint64 = get_uintX(&mem[offset], size);
+ DEBUG_MSGF("uint64 value: %" PRIu64, entry_data->uint64);
+ } else if (type == MMDB_DATA_TYPE_UINT128) {
+ if (size > 16) {
+ DEBUG_MSGF("uint128 of size %d", size);
+ return MMDB_INVALID_DATA_ERROR;
+ }
+#if MMDB_UINT128_IS_BYTE_ARRAY
+ memset(entry_data->uint128, 0, 16);
+ if (size > 0) {
+ memcpy(entry_data->uint128 + 16 - size, &mem[offset], size);
+ }
+#else
+ entry_data->uint128 = get_uint128(&mem[offset], size);
+#endif
+ } else if (type == MMDB_DATA_TYPE_FLOAT) {
+ if (size != 4) {
+ DEBUG_MSGF("float of size %d", size);
+ return MMDB_INVALID_DATA_ERROR;
+ }
+ size = 4;
+ entry_data->float_value = get_ieee754_float(&mem[offset]);
+ DEBUG_MSGF("float value: %f", entry_data->float_value);
+ } else if (type == MMDB_DATA_TYPE_DOUBLE) {
+ if (size != 8) {
+ DEBUG_MSGF("double of size %d", size);
+ return MMDB_INVALID_DATA_ERROR;
+ }
+ size = 8;
+ entry_data->double_value = get_ieee754_double(&mem[offset]);
+ DEBUG_MSGF("double value: %f", entry_data->double_value);
+ } else if (type == MMDB_DATA_TYPE_UTF8_STRING) {
+ entry_data->utf8_string = size == 0 ? "" : (char *)&mem[offset];
+ entry_data->data_size = size;
+#ifdef MMDB_DEBUG
+ char *string = mmdb_strndup(entry_data->utf8_string,
+ size > 50 ? 50 : size);
+ if (NULL == string) {
+ abort();
+ }
+ DEBUG_MSGF("string value: %s", string);
+ free(string);
+#endif
+ } else if (type == MMDB_DATA_TYPE_BYTES) {
+ entry_data->bytes = &mem[offset];
+ entry_data->data_size = size;
+ }
+
+ entry_data->offset_to_next = offset + size;
+
+ return MMDB_SUCCESS;
+}
+
+LOCAL int get_ext_type(int raw_ext_type)
+{
+ return 7 + raw_ext_type;
+}
+
+LOCAL uint32_t get_ptr_from(uint8_t ctrl, uint8_t const *const ptr,
+ int ptr_size)
+{
+ uint32_t new_offset;
+ switch (ptr_size) {
+ case 1:
+ new_offset = ( (ctrl & 7) << 8) + ptr[0];
+ break;
+ case 2:
+ new_offset = 2048 + ( (ctrl & 7) << 16 ) + ( ptr[0] << 8) + ptr[1];
+ break;
+ case 3:
+ new_offset = 2048 + 524288 + ( (ctrl & 7) << 24 ) + get_uint24(ptr);
+ break;
+ case 4:
+ default:
+ new_offset = get_uint32(ptr);
+ break;
+ }
+ return new_offset;
+}
+
+int MMDB_get_metadata_as_entry_data_list(
+ const MMDB_s *const mmdb, MMDB_entry_data_list_s **const entry_data_list)
+{
+ MMDB_s metadata_db = make_fake_metadata_db(mmdb);
+
+ MMDB_entry_s metadata_start = {
+ .mmdb = &metadata_db,
+ .offset = 0
+ };
+
+ return MMDB_get_entry_data_list(&metadata_start, entry_data_list);
+}
+
+int MMDB_get_entry_data_list(
+ MMDB_entry_s *start, MMDB_entry_data_list_s **const entry_data_list)
+{
+ MMDB_data_pool_s *const pool = data_pool_new(MMDB_POOL_INIT_SIZE);
+ if (!pool) {
+ return MMDB_OUT_OF_MEMORY_ERROR;
+ }
+
+ MMDB_entry_data_list_s *const list = data_pool_alloc(pool);
+ if (!list) {
+ data_pool_destroy(pool);
+ return MMDB_OUT_OF_MEMORY_ERROR;
+ }
+
+ int const status = get_entry_data_list(start->mmdb, start->offset, list,
+ pool, 0);
+
+ *entry_data_list = data_pool_to_list(pool);
+ if (!*entry_data_list) {
+ data_pool_destroy(pool);
+ return MMDB_OUT_OF_MEMORY_ERROR;
+ }
+
+ return status;
+}
+
+LOCAL int get_entry_data_list(const MMDB_s *const mmdb,
+ uint32_t offset,
+ MMDB_entry_data_list_s *const entry_data_list,
+ MMDB_data_pool_s *const pool,
+ int depth)
+{
+ if (depth >= MAXIMUM_DATA_STRUCTURE_DEPTH) {
+ DEBUG_MSG("reached the maximum data structure depth");
+ return MMDB_INVALID_DATA_ERROR;
+ }
+ depth++;
+ CHECKED_DECODE_ONE(mmdb, offset, &entry_data_list->entry_data);
+
+ switch (entry_data_list->entry_data.type) {
+ case MMDB_DATA_TYPE_POINTER:
+ {
+ uint32_t next_offset = entry_data_list->entry_data.offset_to_next;
+ uint32_t last_offset;
+ CHECKED_DECODE_ONE(mmdb, last_offset =
+ entry_data_list->entry_data.pointer,
+ &entry_data_list->entry_data);
+
+ /* Pointers to pointers are illegal under the spec */
+ if (entry_data_list->entry_data.type == MMDB_DATA_TYPE_POINTER) {
+ DEBUG_MSG("pointer points to another pointer");
+ return MMDB_INVALID_DATA_ERROR;
+ }
+
+ if (entry_data_list->entry_data.type == MMDB_DATA_TYPE_ARRAY
+ || entry_data_list->entry_data.type == MMDB_DATA_TYPE_MAP) {
+
+ int status =
+ get_entry_data_list(mmdb, last_offset, entry_data_list,
+ pool, depth);
+ if (MMDB_SUCCESS != status) {
+ DEBUG_MSG("get_entry_data_list on pointer failed.");
+ return status;
+ }
+ }
+ entry_data_list->entry_data.offset_to_next = next_offset;
+ }
+ break;
+ case MMDB_DATA_TYPE_ARRAY:
+ {
+ uint32_t array_size = entry_data_list->entry_data.data_size;
+ uint32_t array_offset = entry_data_list->entry_data.offset_to_next;
+ while (array_size-- > 0) {
+ MMDB_entry_data_list_s *entry_data_list_to =
+ data_pool_alloc(pool);
+ if (!entry_data_list_to) {
+ return MMDB_OUT_OF_MEMORY_ERROR;
+ }
+
+ int status =
+ get_entry_data_list(mmdb, array_offset, entry_data_list_to,
+ pool, depth);
+ if (MMDB_SUCCESS != status) {
+ DEBUG_MSG("get_entry_data_list on array element failed.");
+ return status;
+ }
+
+ array_offset = entry_data_list_to->entry_data.offset_to_next;
+ }
+ entry_data_list->entry_data.offset_to_next = array_offset;
+
+ }
+ break;
+ case MMDB_DATA_TYPE_MAP:
+ {
+ uint32_t size = entry_data_list->entry_data.data_size;
+
+ offset = entry_data_list->entry_data.offset_to_next;
+ while (size-- > 0) {
+ MMDB_entry_data_list_s *list_key = data_pool_alloc(pool);
+ if (!list_key) {
+ return MMDB_OUT_OF_MEMORY_ERROR;
+ }
+
+ int status =
+ get_entry_data_list(mmdb, offset, list_key, pool, depth);
+ if (MMDB_SUCCESS != status) {
+ DEBUG_MSG("get_entry_data_list on map key failed.");
+ return status;
+ }
+
+ offset = list_key->entry_data.offset_to_next;
+
+ MMDB_entry_data_list_s *list_value = data_pool_alloc(pool);
+ if (!list_value) {
+ return MMDB_OUT_OF_MEMORY_ERROR;
+ }
+
+ status = get_entry_data_list(mmdb, offset, list_value, pool,
+ depth);
+ if (MMDB_SUCCESS != status) {
+ DEBUG_MSG("get_entry_data_list on map element failed.");
+ return status;
+ }
+ offset = list_value->entry_data.offset_to_next;
+ }
+ entry_data_list->entry_data.offset_to_next = offset;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return MMDB_SUCCESS;
+}
+
+LOCAL float get_ieee754_float(const uint8_t *restrict p)
+{
+ volatile float f;
+ uint8_t *q = (void *)&f;
+/* Windows builds don't use autoconf but we can assume they're all
+ * little-endian. */
+#if MMDB_LITTLE_ENDIAN || _WIN32
+ q[3] = p[0];
+ q[2] = p[1];
+ q[1] = p[2];
+ q[0] = p[3];
+#else
+ memcpy(q, p, 4);
+#endif
+ return f;
+}
+
+LOCAL double get_ieee754_double(const uint8_t *restrict p)
+{
+ volatile double d;
+ uint8_t *q = (void *)&d;
+#if MMDB_LITTLE_ENDIAN || _WIN32
+ q[7] = p[0];
+ q[6] = p[1];
+ q[5] = p[2];
+ q[4] = p[3];
+ q[3] = p[4];
+ q[2] = p[5];
+ q[1] = p[6];
+ q[0] = p[7];
+#else
+ memcpy(q, p, 8);
+#endif
+
+ return d;
+}
+
+LOCAL uint32_t get_uint32(const uint8_t *p)
+{
+ return p[0] * 16777216U + p[1] * 65536 + p[2] * 256 + p[3];
+}
+
+LOCAL uint32_t get_uint24(const uint8_t *p)
+{
+ return p[0] * 65536U + p[1] * 256 + p[2];
+}
+
+LOCAL uint32_t get_uint16(const uint8_t *p)
+{
+ return p[0] * 256U + p[1];
+}
+
+LOCAL uint64_t get_uintX(const uint8_t *p, int length)
+{
+ uint64_t value = 0;
+ while (length-- > 0) {
+ value <<= 8;
+ value += *p++;
+ }
+ return value;
+}
+
+LOCAL int32_t get_sintX(const uint8_t *p, int length)
+{
+ return (int32_t)get_uintX(p, length);
+}
+
+void MMDB_free_entry_data_list(MMDB_entry_data_list_s *const entry_data_list)
+{
+ if (entry_data_list == NULL) {
+ return;
+ }
+ data_pool_destroy(entry_data_list->pool);
+}
+
+void MMDB_close(MMDB_s *const mmdb)
+{
+ free_mmdb_struct(mmdb);
+}
+
+LOCAL void free_mmdb_struct(MMDB_s *const mmdb)
+{
+ if (!mmdb) {
+ return;
+ }
+
+ if (NULL != mmdb->filename) {
+ FREE_AND_SET_NULL(mmdb->filename);
+ }
+ if (NULL != mmdb->file_content) {
+#ifdef _WIN32
+ UnmapViewOfFile(mmdb->file_content);
+ /* Winsock is only initialized if open was successful so we only have
+ * to cleanup then. */
+ WSACleanup();
+#else
+ munmap((void *)mmdb->file_content, mmdb->file_size);
+#endif
+ }
+
+ if (NULL != mmdb->metadata.database_type) {
+ FREE_AND_SET_NULL(mmdb->metadata.database_type);
+ }
+
+ free_languages_metadata(mmdb);
+ free_descriptions_metadata(mmdb);
+}
+
+LOCAL void free_languages_metadata(MMDB_s *mmdb)
+{
+ if (!mmdb->metadata.languages.names) {
+ return;
+ }
+
+ for (size_t i = 0; i < mmdb->metadata.languages.count; i++) {
+ FREE_AND_SET_NULL(mmdb->metadata.languages.names[i]);
+ }
+ FREE_AND_SET_NULL(mmdb->metadata.languages.names);
+}
+
+LOCAL void free_descriptions_metadata(MMDB_s *mmdb)
+{
+ if (!mmdb->metadata.description.count) {
+ return;
+ }
+
+ for (size_t i = 0; i < mmdb->metadata.description.count; i++) {
+ if (NULL != mmdb->metadata.description.descriptions[i]) {
+ if (NULL !=
+ mmdb->metadata.description.descriptions[i]->language) {
+ FREE_AND_SET_NULL(
+ mmdb->metadata.description.descriptions[i]->language);
+ }
+
+ if (NULL !=
+ mmdb->metadata.description.descriptions[i]->description) {
+ FREE_AND_SET_NULL(
+ mmdb->metadata.description.descriptions[i]->description);
+ }
+ FREE_AND_SET_NULL(mmdb->metadata.description.descriptions[i]);
+ }
+ }
+
+ FREE_AND_SET_NULL(mmdb->metadata.description.descriptions);
+}
+
+const char *MMDB_lib_version(void)
+{
+ return PACKAGE_VERSION;
+}
+
+int MMDB_dump_entry_data_list(FILE *const stream,
+ MMDB_entry_data_list_s *const entry_data_list,
+ int indent)
+{
+ int status;
+ dump_entry_data_list(stream, entry_data_list, indent, &status);
+ return status;
+}
+
+LOCAL MMDB_entry_data_list_s *dump_entry_data_list(
+ FILE *stream, MMDB_entry_data_list_s *entry_data_list, int indent,
+ int *status)
+{
+ switch (entry_data_list->entry_data.type) {
+ case MMDB_DATA_TYPE_MAP:
+ {
+ uint32_t size = entry_data_list->entry_data.data_size;
+
+ print_indentation(stream, indent);
+ fprintf(stream, "{\n");
+ indent += 2;
+
+ for (entry_data_list = entry_data_list->next;
+ size && entry_data_list; size--) {
+
+ if (MMDB_DATA_TYPE_UTF8_STRING !=
+ entry_data_list->entry_data.type) {
+ *status = MMDB_INVALID_DATA_ERROR;
+ return NULL;
+ }
+ char *key =
+ mmdb_strndup(
+ (char *)entry_data_list->entry_data.utf8_string,
+ entry_data_list->entry_data.data_size);
+ if (NULL == key) {
+ *status = MMDB_OUT_OF_MEMORY_ERROR;
+ return NULL;
+ }
+
+ print_indentation(stream, indent);
+ fprintf(stream, "\"%s\": \n", key);
+ free(key);
+
+ entry_data_list = entry_data_list->next;
+ entry_data_list =
+ dump_entry_data_list(stream, entry_data_list, indent + 2,
+ status);
+
+ if (MMDB_SUCCESS != *status) {
+ return NULL;
+ }
+ }
+
+ indent -= 2;
+ print_indentation(stream, indent);
+ fprintf(stream, "}\n");
+ }
+ break;
+ case MMDB_DATA_TYPE_ARRAY:
+ {
+ uint32_t size = entry_data_list->entry_data.data_size;
+
+ print_indentation(stream, indent);
+ fprintf(stream, "[\n");
+ indent += 2;
+
+ for (entry_data_list = entry_data_list->next;
+ size && entry_data_list; size--) {
+ entry_data_list =
+ dump_entry_data_list(stream, entry_data_list, indent,
+ status);
+ if (MMDB_SUCCESS != *status) {
+ return NULL;
+ }
+ }
+
+ indent -= 2;
+ print_indentation(stream, indent);
+ fprintf(stream, "]\n");
+ }
+ break;
+ case MMDB_DATA_TYPE_UTF8_STRING:
+ {
+ char *string =
+ mmdb_strndup((char *)entry_data_list->entry_data.utf8_string,
+ entry_data_list->entry_data.data_size);
+ if (NULL == string) {
+ *status = MMDB_OUT_OF_MEMORY_ERROR;
+ return NULL;
+ }
+ print_indentation(stream, indent);
+ fprintf(stream, "\"%s\" <utf8_string>\n", string);
+ free(string);
+ entry_data_list = entry_data_list->next;
+ }
+ break;
+ case MMDB_DATA_TYPE_BYTES:
+ {
+ char *hex_string =
+ bytes_to_hex((uint8_t *)entry_data_list->entry_data.bytes,
+ entry_data_list->entry_data.data_size);
+
+ if (NULL == hex_string) {
+ *status = MMDB_OUT_OF_MEMORY_ERROR;
+ return NULL;
+ }
+
+ print_indentation(stream, indent);
+ fprintf(stream, "%s <bytes>\n", hex_string);
+ free(hex_string);
+
+ entry_data_list = entry_data_list->next;
+ }
+ break;
+ case MMDB_DATA_TYPE_DOUBLE:
+ print_indentation(stream, indent);
+ fprintf(stream, "%f <double>\n",
+ entry_data_list->entry_data.double_value);
+ entry_data_list = entry_data_list->next;
+ break;
+ case MMDB_DATA_TYPE_FLOAT:
+ print_indentation(stream, indent);
+ fprintf(stream, "%f <float>\n",
+ entry_data_list->entry_data.float_value);
+ entry_data_list = entry_data_list->next;
+ break;
+ case MMDB_DATA_TYPE_UINT16:
+ print_indentation(stream, indent);
+ fprintf(stream, "%u <uint16>\n", entry_data_list->entry_data.uint16);
+ entry_data_list = entry_data_list->next;
+ break;
+ case MMDB_DATA_TYPE_UINT32:
+ print_indentation(stream, indent);
+ fprintf(stream, "%u <uint32>\n", entry_data_list->entry_data.uint32);
+ entry_data_list = entry_data_list->next;
+ break;
+ case MMDB_DATA_TYPE_BOOLEAN:
+ print_indentation(stream, indent);
+ fprintf(stream, "%s <boolean>\n",
+ entry_data_list->entry_data.boolean ? "true" : "false");
+ entry_data_list = entry_data_list->next;
+ break;
+ case MMDB_DATA_TYPE_UINT64:
+ print_indentation(stream, indent);
+ fprintf(stream, "%" PRIu64 " <uint64>\n",
+ entry_data_list->entry_data.uint64);
+ entry_data_list = entry_data_list->next;
+ break;
+ case MMDB_DATA_TYPE_UINT128:
+ print_indentation(stream, indent);
+#if MMDB_UINT128_IS_BYTE_ARRAY
+ char *hex_string =
+ bytes_to_hex((uint8_t *)entry_data_list->entry_data.uint128, 16);
+ if (NULL == hex_string) {
+ *status = MMDB_OUT_OF_MEMORY_ERROR;
+ return NULL;
+ }
+ fprintf(stream, "0x%s <uint128>\n", hex_string);
+ free(hex_string);
+#else
+ uint64_t high = entry_data_list->entry_data.uint128 >> 64;
+ uint64_t low = (uint64_t)entry_data_list->entry_data.uint128;
+ fprintf(stream, "0x%016" PRIX64 "%016" PRIX64 " <uint128>\n", high,
+ low);
+#endif
+ entry_data_list = entry_data_list->next;
+ break;
+ case MMDB_DATA_TYPE_INT32:
+ print_indentation(stream, indent);
+ fprintf(stream, "%d <int32>\n", entry_data_list->entry_data.int32);
+ entry_data_list = entry_data_list->next;
+ break;
+ default:
+ *status = MMDB_INVALID_DATA_ERROR;
+ return NULL;
+ }
+
+ *status = MMDB_SUCCESS;
+ return entry_data_list;
+}
+
+LOCAL void print_indentation(FILE *stream, int i)
+{
+ char buffer[1024];
+ int size = i >= 1024 ? 1023 : i;
+ memset(buffer, 32, size);
+ buffer[size] = '\0';
+ fputs(buffer, stream);
+}
+
+LOCAL char *bytes_to_hex(uint8_t *bytes, uint32_t size)
+{
+ char *hex_string;
+ MAYBE_CHECK_SIZE_OVERFLOW(size, SIZE_MAX / 2 - 1, NULL);
+
+ hex_string = calloc((size * 2) + 1, sizeof(char));
+ if (NULL == hex_string) {
+ return NULL;
+ }
+
+ for (uint32_t i = 0; i < size; i++) {
+ sprintf(hex_string + (2 * i), "%02X", bytes[i]);
+ }
+
+
+
+ return hex_string;
+}
+
+const char *MMDB_strerror(int error_code)
+{
+ switch (error_code) {
+ case MMDB_SUCCESS:
+ return "Success (not an error)";
+ case MMDB_FILE_OPEN_ERROR:
+ return "Error opening the specified MaxMind DB file";
+ case MMDB_CORRUPT_SEARCH_TREE_ERROR:
+ return "The MaxMind DB file's search tree is corrupt";
+ case MMDB_INVALID_METADATA_ERROR:
+ return "The MaxMind DB file contains invalid metadata";
+ case MMDB_IO_ERROR:
+ return "An attempt to read data from the MaxMind DB file failed";
+ case MMDB_OUT_OF_MEMORY_ERROR:
+ return "A memory allocation call failed";
+ case MMDB_UNKNOWN_DATABASE_FORMAT_ERROR:
+ return
+ "The MaxMind DB file is in a format this library can't handle (unknown record size or binary format version)";
+ case MMDB_INVALID_DATA_ERROR:
+ return
+ "The MaxMind DB file's data section contains bad data (unknown data type or corrupt data)";
+ case MMDB_INVALID_LOOKUP_PATH_ERROR:
+ return
+ "The lookup path contained an invalid value (like a negative integer for an array index)";
+ case MMDB_LOOKUP_PATH_DOES_NOT_MATCH_DATA_ERROR:
+ return
+ "The lookup path does not match the data (key that doesn't exist, array index bigger than the array, expected array or map where none exists)";
+ case MMDB_INVALID_NODE_NUMBER_ERROR:
+ return
+ "The MMDB_read_node function was called with a node number that does not exist in the search tree";
+ case MMDB_IPV6_LOOKUP_IN_IPV4_DATABASE_ERROR:
+ return
+ "You attempted to look up an IPv6 address in an IPv4-only database";
+ default:
+ return "Unknown error code";
+ }
+}
diff --git a/src/fluent-bit/plugins/filter_grep/CMakeLists.txt b/src/fluent-bit/plugins/filter_grep/CMakeLists.txt
new file mode 100644
index 000000000..056b94092
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_grep/CMakeLists.txt
@@ -0,0 +1,4 @@
+set(src
+ grep.c)
+
+FLB_PLUGIN(filter_grep "${src}" "")
diff --git a/src/fluent-bit/plugins/filter_grep/grep.c b/src/fluent-bit/plugins/filter_grep/grep.c
new file mode 100644
index 000000000..7a4657093
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_grep/grep.c
@@ -0,0 +1,434 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdio.h>
+#include <sys/types.h>
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_kv.h>
+#include <fluent-bit/flb_mem.h>
+#include <fluent-bit/flb_str.h>
+#include <fluent-bit/flb_filter.h>
+#include <fluent-bit/flb_filter_plugin.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_regex.h>
+#include <fluent-bit/flb_record_accessor.h>
+#include <fluent-bit/flb_log_event_decoder.h>
+#include <fluent-bit/flb_log_event_encoder.h>
+#include <msgpack.h>
+
+#include "grep.h"
+
+static void delete_rules(struct grep_ctx *ctx)
+{
+ struct mk_list *tmp;
+ struct mk_list *head;
+ struct grep_rule *rule;
+
+ mk_list_foreach_safe(head, tmp, &ctx->rules) {
+ rule = mk_list_entry(head, struct grep_rule, _head);
+ flb_sds_destroy(rule->field);
+ flb_free(rule->regex_pattern);
+ flb_ra_destroy(rule->ra);
+ flb_regex_destroy(rule->regex);
+ mk_list_del(&rule->_head);
+ flb_free(rule);
+ }
+}
+
+static int set_rules(struct grep_ctx *ctx, struct flb_filter_instance *f_ins)
+{
+ int first_rule = GREP_NO_RULE;
+ flb_sds_t tmp;
+ struct mk_list *head;
+ struct mk_list *split;
+ struct flb_split_entry *sentry;
+ struct flb_kv *kv;
+ struct grep_rule *rule;
+
+ /* Iterate all filter properties */
+ mk_list_foreach(head, &f_ins->properties) {
+ kv = mk_list_entry(head, struct flb_kv, _head);
+
+ /* Create a new rule */
+ rule = flb_malloc(sizeof(struct grep_rule));
+ if (!rule) {
+ flb_errno();
+ return -1;
+ }
+
+ /* Get the type */
+ if (strcasecmp(kv->key, "regex") == 0) {
+ rule->type = GREP_REGEX;
+ }
+ else if (strcasecmp(kv->key, "exclude") == 0) {
+ rule->type = GREP_EXCLUDE;
+ }
+ else {
+ /* Other property. Skip */
+ flb_free(rule);
+ continue;
+ }
+
+ if (ctx->logical_op != GREP_LOGICAL_OP_LEGACY && first_rule != GREP_NO_RULE) {
+ /* 'AND'/'OR' case */
+ if (first_rule != rule->type) {
+ flb_plg_error(ctx->ins, "Both 'regex' and 'exclude' are set.");
+ delete_rules(ctx);
+ flb_free(rule);
+ return -1;
+ }
+ }
+ first_rule = rule->type;
+
+ /* As a value we expect a pair of field name and a regular expression */
+ split = flb_utils_split(kv->val, ' ', 1);
+ if (mk_list_size(split) != 2) {
+ flb_plg_error(ctx->ins,
+ "invalid regex, expected field and regular expression");
+ delete_rules(ctx);
+ flb_free(rule);
+ flb_utils_split_free(split);
+ return -1;
+ }
+
+ /* Get first value (field) */
+ sentry = mk_list_entry_first(split, struct flb_split_entry, _head);
+ if (*sentry->value == '$') {
+ rule->field = flb_sds_create_len(sentry->value, sentry->len);
+ }
+ else {
+ rule->field = flb_sds_create_size(sentry->len + 2);
+ tmp = flb_sds_cat(rule->field, "$", 1);
+ rule->field = tmp;
+
+ tmp = flb_sds_cat(rule->field, sentry->value, sentry->len);
+ rule->field = tmp;
+ }
+
+ /* Get remaining content (regular expression) */
+ sentry = mk_list_entry_last(split, struct flb_split_entry, _head);
+ rule->regex_pattern = flb_strndup(sentry->value, sentry->len);
+ if (rule->regex_pattern == NULL) {
+ flb_errno();
+ delete_rules(ctx);
+ flb_free(rule);
+ flb_utils_split_free(split);
+ return -1;
+ }
+
+ /* Release split */
+ flb_utils_split_free(split);
+
+ /* Create a record accessor context for this rule */
+ rule->ra = flb_ra_create(rule->field, FLB_FALSE);
+ if (!rule->ra) {
+ flb_plg_error(ctx->ins, "invalid record accessor? '%s'", rule->field);
+ delete_rules(ctx);
+ flb_free(rule);
+ return -1;
+ }
+
+ /* Convert string to regex pattern */
+ rule->regex = flb_regex_create(rule->regex_pattern);
+ if (!rule->regex) {
+ flb_plg_error(ctx->ins, "could not compile regex pattern '%s'",
+ rule->regex_pattern);
+ delete_rules(ctx);
+ flb_free(rule);
+ return -1;
+ }
+
+ /* Link to parent list */
+ mk_list_add(&rule->_head, &ctx->rules);
+ }
+
+ return 0;
+}
+
+/* Given a msgpack record, do some filter action based on the defined rules */
+static inline int grep_filter_data(msgpack_object map, struct grep_ctx *ctx)
+{
+ ssize_t ret;
+ struct mk_list *head;
+ struct grep_rule *rule;
+
+ /* For each rule, validate against map fields */
+ mk_list_foreach(head, &ctx->rules) {
+ rule = mk_list_entry(head, struct grep_rule, _head);
+
+ ret = flb_ra_regex_match(rule->ra, map, rule->regex, NULL);
+ if (ret <= 0) { /* no match */
+ if (rule->type == GREP_REGEX) {
+ return GREP_RET_EXCLUDE;
+ }
+ }
+ else {
+ if (rule->type == GREP_EXCLUDE) {
+ return GREP_RET_EXCLUDE;
+ }
+ else {
+ return GREP_RET_KEEP;
+ }
+ }
+ }
+
+ return GREP_RET_KEEP;
+}
+
+static int cb_grep_init(struct flb_filter_instance *f_ins,
+ struct flb_config *config,
+ void *data)
+{
+ int ret;
+ size_t len;
+ const char* val;
+ struct grep_ctx *ctx;
+
+ /* Create context */
+ ctx = flb_malloc(sizeof(struct grep_ctx));
+ if (!ctx) {
+ flb_errno();
+ return -1;
+ }
+ if (flb_filter_config_map_set(f_ins, ctx) < 0) {
+ flb_errno();
+ flb_plg_error(f_ins, "configuration error");
+ flb_free(ctx);
+ return -1;
+ }
+ mk_list_init(&ctx->rules);
+ ctx->ins = f_ins;
+
+ ctx->logical_op = GREP_LOGICAL_OP_LEGACY;
+ val = flb_filter_get_property("logical_op", f_ins);
+ if (val != NULL) {
+ len = strlen(val);
+ if (len == 3 && strncasecmp("AND", val, len) == 0) {
+ flb_plg_info(ctx->ins, "AND mode");
+ ctx->logical_op = GREP_LOGICAL_OP_AND;
+ }
+ else if (len == 2 && strncasecmp("OR", val, len) == 0) {
+ flb_plg_info(ctx->ins, "OR mode");
+ ctx->logical_op = GREP_LOGICAL_OP_OR;
+ }
+ else if (len == 6 && strncasecmp("legacy", val, len) == 0) {
+ flb_plg_info(ctx->ins, "legacy mode");
+ ctx->logical_op = GREP_LOGICAL_OP_LEGACY;
+ }
+ }
+
+ /* Load rules */
+ ret = set_rules(ctx, f_ins);
+ if (ret == -1) {
+ flb_free(ctx);
+ return -1;
+ }
+
+ /* Set our context */
+ flb_filter_set_context(f_ins, ctx);
+ return 0;
+}
+
+static inline int grep_filter_data_and_or(msgpack_object map, struct grep_ctx *ctx)
+{
+ ssize_t ra_ret;
+ int found = FLB_FALSE;
+ struct mk_list *head;
+ struct grep_rule *rule;
+
+ /* For each rule, validate against map fields */
+ mk_list_foreach(head, &ctx->rules) {
+ found = FLB_FALSE;
+ rule = mk_list_entry(head, struct grep_rule, _head);
+
+ ra_ret = flb_ra_regex_match(rule->ra, map, rule->regex, NULL);
+ if (ra_ret > 0) {
+ found = FLB_TRUE;
+ }
+
+ if (ctx->logical_op == GREP_LOGICAL_OP_OR && found == FLB_TRUE) {
+ /* OR case: One rule is matched. */
+ goto grep_filter_data_and_or_end;
+ }
+ else if (ctx->logical_op == GREP_LOGICAL_OP_AND && found == FLB_FALSE) {
+ /* AND case: One rule is not matched */
+ goto grep_filter_data_and_or_end;
+ }
+ }
+
+ grep_filter_data_and_or_end:
+ if (rule->type == GREP_REGEX) {
+ return found ? GREP_RET_KEEP : GREP_RET_EXCLUDE;
+ }
+
+ /* rule is exclude */
+ return found ? GREP_RET_EXCLUDE : GREP_RET_KEEP;
+}
+
+static int cb_grep_filter(const void *data, size_t bytes,
+ const char *tag, int tag_len,
+ void **out_buf, size_t *out_size,
+ struct flb_filter_instance *f_ins,
+ struct flb_input_instance *i_ins,
+ void *context,
+ struct flb_config *config)
+{
+ int ret;
+ int old_size = 0;
+ int new_size = 0;
+ msgpack_object map;
+ struct flb_log_event_encoder log_encoder;
+ struct flb_log_event_decoder log_decoder;
+ struct flb_log_event log_event;
+ struct grep_ctx *ctx;
+
+ (void) f_ins;
+ (void) i_ins;
+ (void) config;
+
+ ctx = (struct grep_ctx *) context;
+
+ ret = flb_log_event_decoder_init(&log_decoder, (char *) data, bytes);
+
+ if (ret != FLB_EVENT_DECODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event decoder initialization error : %d", ret);
+
+ return FLB_FILTER_NOTOUCH;
+ }
+
+ ret = flb_log_event_encoder_init(&log_encoder,
+ FLB_LOG_EVENT_FORMAT_DEFAULT);
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event encoder initialization error : %d", ret);
+
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ return FLB_FILTER_NOTOUCH;
+ }
+
+ while ((ret = flb_log_event_decoder_next(
+ &log_decoder,
+ &log_event)) == FLB_EVENT_DECODER_SUCCESS) {
+ old_size++;
+
+ /* get time and map */
+ map = *log_event.body;
+
+ if (ctx->logical_op == GREP_LOGICAL_OP_LEGACY) {
+ ret = grep_filter_data(map, ctx);
+ }
+ else {
+ ret = grep_filter_data_and_or(map, ctx);
+ }
+
+ if (ret == GREP_RET_KEEP) {
+ ret = flb_log_event_encoder_emit_raw_record(
+ &log_encoder,
+ log_decoder.record_base,
+ log_decoder.record_length);
+
+ new_size++;
+ }
+ else if (ret == GREP_RET_EXCLUDE) {
+ /* Do nothing */
+ }
+ }
+
+ if (ret == FLB_EVENT_DECODER_ERROR_INSUFFICIENT_DATA &&
+ log_decoder.offset == bytes) {
+ ret = FLB_EVENT_ENCODER_SUCCESS;
+ }
+
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ /* we keep everything ? */
+ if (old_size == new_size) {
+ flb_log_event_encoder_destroy(&log_encoder);
+
+ /* Destroy the buffer to avoid more overhead */
+ return FLB_FILTER_NOTOUCH;
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ *out_buf = log_encoder.output_buffer;
+ *out_size = log_encoder.output_length;
+
+ ret = FLB_FILTER_MODIFIED;
+
+ flb_log_event_encoder_claim_internal_buffer_ownership(&log_encoder);
+ }
+ else {
+ flb_plg_error(ctx->ins,
+ "Log event encoder error : %d", ret);
+
+ ret = FLB_FILTER_NOTOUCH;
+ }
+
+ flb_log_event_decoder_destroy(&log_decoder);
+ flb_log_event_encoder_destroy(&log_encoder);
+
+ return ret;
+}
+
+static int cb_grep_exit(void *data, struct flb_config *config)
+{
+ struct grep_ctx *ctx = data;
+
+ if (!ctx) {
+ return 0;
+ }
+
+ delete_rules(ctx);
+ flb_free(ctx);
+ return 0;
+}
+
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_STR, "regex", NULL,
+ FLB_CONFIG_MAP_MULT, FLB_FALSE, 0,
+ "Keep records in which the content of KEY matches the regular expression."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "exclude", NULL,
+ FLB_CONFIG_MAP_MULT, FLB_FALSE, 0,
+ "Exclude records in which the content of KEY matches the regular expression."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "logical_op", "legacy",
+ 0, FLB_FALSE, 0,
+ "Specify whether to use logical conjuciton or disjunction. legacy, AND and OR are allowed."
+ },
+ {0}
+};
+
+struct flb_filter_plugin filter_grep_plugin = {
+ .name = "grep",
+ .description = "grep events by specified field values",
+ .cb_init = cb_grep_init,
+ .cb_filter = cb_grep_filter,
+ .cb_exit = cb_grep_exit,
+ .config_map = config_map,
+ .flags = 0
+};
diff --git a/src/fluent-bit/plugins/filter_grep/grep.h b/src/fluent-bit/plugins/filter_grep/grep.h
new file mode 100644
index 000000000..dc48c8f61
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_grep/grep.h
@@ -0,0 +1,58 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_FILTER_GREP_H
+#define FLB_FILTER_GREP_H
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_filter.h>
+#include <fluent-bit/flb_sds.h>
+#include <fluent-bit/flb_record_accessor.h>
+
+/* rule types */
+#define GREP_NO_RULE 0
+#define GREP_REGEX 1
+#define GREP_EXCLUDE 2
+
+/* actions */
+#define GREP_RET_KEEP 0
+#define GREP_RET_EXCLUDE 1
+
+enum _logical_op{
+ GREP_LOGICAL_OP_LEGACY,
+ GREP_LOGICAL_OP_OR,
+ GREP_LOGICAL_OP_AND
+} logical_op;
+
+struct grep_ctx {
+ struct mk_list rules;
+ int logical_op;
+ struct flb_filter_instance *ins;
+};
+
+struct grep_rule {
+ int type;
+ flb_sds_t field;
+ char *regex_pattern;
+ struct flb_regex *regex;
+ struct flb_record_accessor *ra;
+ struct mk_list _head;
+};
+
+#endif
diff --git a/src/fluent-bit/plugins/filter_kubernetes/CMakeLists.txt b/src/fluent-bit/plugins/filter_kubernetes/CMakeLists.txt
new file mode 100644
index 000000000..d3ebac7ef
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_kubernetes/CMakeLists.txt
@@ -0,0 +1,14 @@
+set(src
+ kube_conf.c
+ kube_meta.c
+ kube_regex.c
+ kube_property.c
+ kubernetes.c
+ )
+
+FLB_PLUGIN(filter_kubernetes "${src}" "")
+
+# K8s token command is currently Linux-only.
+if (${CMAKE_SYSTEM_NAME} MATCHES "Linux")
+ FLB_DEFINITION(FLB_HAVE_KUBE_TOKEN_COMMAND)
+endif() \ No newline at end of file
diff --git a/src/fluent-bit/plugins/filter_kubernetes/kube_conf.c b/src/fluent-bit/plugins/filter_kubernetes/kube_conf.c
new file mode 100644
index 000000000..9dc360b48
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_kubernetes/kube_conf.c
@@ -0,0 +1,232 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_filter_plugin.h>
+#include <fluent-bit/flb_mem.h>
+#include <fluent-bit/flb_log.h>
+#include <fluent-bit/flb_str.h>
+#include <fluent-bit/flb_filter.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_parser.h>
+#include <fluent-bit/flb_http_client.h>
+#include <fluent-bit/flb_hash_table.h>
+
+#ifndef FLB_HAVE_TLS
+#error "Fluent Bit was built without TLS support"
+#endif
+
+#include "kube_meta.h"
+#include "kube_conf.h"
+
+struct flb_kube *flb_kube_conf_create(struct flb_filter_instance *ins,
+ struct flb_config *config)
+{
+ int off;
+ int ret;
+ const char *url;
+ const char *tmp;
+ const char *p;
+ const char *cmd;
+ struct flb_kube *ctx;
+
+ ctx = flb_calloc(1, sizeof(struct flb_kube));
+ if (!ctx) {
+ flb_errno();
+ return NULL;
+ }
+ ctx->config = config;
+ ctx->ins = ins;
+
+ /* Set config_map properties in our local context */
+ ret = flb_filter_config_map_set(ins, (void *) ctx);
+ if (ret == -1) {
+ flb_free(ctx);
+ return NULL;
+ }
+
+ /* K8s Token Command */
+ cmd = flb_filter_get_property("kube_token_command", ins);
+ if (cmd) {
+ ctx->kube_token_command = cmd;
+ }
+ else {
+ ctx->kube_token_command = NULL;
+ }
+ ctx->kube_token_create = 0;
+
+ /* Merge Parser */
+ tmp = flb_filter_get_property("merge_parser", ins);
+ if (tmp) {
+ ctx->merge_parser = flb_parser_get(tmp, config);
+ if (!ctx->merge_parser) {
+ flb_plg_error(ctx->ins, "parser '%s' is not registered", tmp);
+ }
+ }
+ else {
+ ctx->merge_parser = NULL;
+ }
+
+ /* Get Kubernetes API server */
+ url = flb_filter_get_property("kube_url", ins);
+
+ if (ctx->use_tag_for_meta) {
+ ctx->api_https = FLB_FALSE;
+ }
+ else if (ctx->use_kubelet) {
+ ctx->api_host = flb_strdup(ctx->kubelet_host);
+ ctx->api_port = ctx->kubelet_port;
+ ctx->api_https = FLB_TRUE;
+
+ /* This is for unit test diagnostic purposes */
+ if (ctx->meta_preload_cache_dir) {
+ ctx->api_https = FLB_FALSE;
+ }
+
+ }
+ else if (!url) {
+ ctx->api_host = flb_strdup(FLB_API_HOST);
+ ctx->api_port = FLB_API_PORT;
+ ctx->api_https = FLB_API_TLS;
+ }
+ else {
+ tmp = url;
+
+ /* Check the protocol */
+ if (strncmp(tmp, "http://", 7) == 0) {
+ off = 7;
+ ctx->api_https = FLB_FALSE;
+ }
+ else if (strncmp(tmp, "https://", 8) == 0) {
+ off = 8;
+ ctx->api_https = FLB_TRUE;
+ }
+ else {
+ flb_kube_conf_destroy(ctx);
+ return NULL;
+ }
+
+ /* Get hostname and TCP port */
+ p = url + off;
+ tmp = strchr(p, ':');
+ if (tmp) {
+ ctx->api_host = flb_strndup(p, tmp - p);
+ tmp++;
+ ctx->api_port = atoi(tmp);
+ }
+ else {
+ ctx->api_host = flb_strdup(p);
+ ctx->api_port = FLB_API_PORT;
+ }
+ }
+
+ snprintf(ctx->kube_url, sizeof(ctx->kube_url) - 1,
+ "%s://%s:%i",
+ ctx->api_https ? "https" : "http",
+ ctx->api_host, ctx->api_port);
+
+ if (ctx->kube_meta_cache_ttl > 0) {
+ ctx->hash_table = flb_hash_table_create_with_ttl(ctx->kube_meta_cache_ttl,
+ FLB_HASH_TABLE_EVICT_OLDER,
+ FLB_HASH_TABLE_SIZE,
+ FLB_HASH_TABLE_SIZE);
+ }
+ else {
+ ctx->hash_table = flb_hash_table_create(FLB_HASH_TABLE_EVICT_RANDOM,
+ FLB_HASH_TABLE_SIZE,
+ FLB_HASH_TABLE_SIZE);
+ }
+
+ if (!ctx->hash_table) {
+ flb_kube_conf_destroy(ctx);
+ return NULL;
+ }
+
+ /* Merge log buffer */
+ if (ctx->merge_log == FLB_TRUE) {
+ ctx->unesc_buf = flb_malloc(FLB_MERGE_BUF_SIZE);
+ ctx->unesc_buf_size = FLB_MERGE_BUF_SIZE;
+ }
+
+ /* Custom Regex */
+ tmp = flb_filter_get_property("regex_parser", ins);
+ if (tmp) {
+ /* Get custom parser */
+ ctx->parser = flb_parser_get(tmp, config);
+ if (!ctx->parser) {
+ flb_plg_error(ctx->ins, "invalid parser '%s'", tmp);
+ flb_kube_conf_destroy(ctx);
+ return NULL;
+ }
+
+ /* Force to regex parser */
+ if (ctx->parser->type != FLB_PARSER_REGEX) {
+ flb_plg_error(ctx->ins, "invalid parser type '%s'", tmp);
+ flb_kube_conf_destroy(ctx);
+ return NULL;
+ }
+ else {
+ ctx->regex = ctx->parser->regex;
+ }
+ }
+
+ if (!ctx->use_tag_for_meta) {
+ flb_plg_info(ctx->ins, "https=%i host=%s port=%i",
+ ctx->api_https, ctx->api_host, ctx->api_port);
+ }
+ return ctx;
+}
+
+void flb_kube_conf_destroy(struct flb_kube *ctx)
+{
+ if (ctx == NULL) {
+ return;
+ }
+
+ if (ctx->hash_table) {
+ flb_hash_table_destroy(ctx->hash_table);
+ }
+
+ if (ctx->merge_log == FLB_TRUE) {
+ flb_free(ctx->unesc_buf);
+ }
+
+ /* Destroy regex content only if a parser was not defined */
+ if (ctx->parser == NULL && ctx->regex) {
+ flb_regex_destroy(ctx->regex);
+ }
+
+ flb_free(ctx->api_host);
+ flb_free(ctx->token);
+ flb_free(ctx->namespace);
+ flb_free(ctx->podname);
+ flb_free(ctx->auth);
+
+ if (ctx->upstream) {
+ flb_upstream_destroy(ctx->upstream);
+ }
+
+#ifdef FLB_HAVE_TLS
+ if (ctx->tls) {
+ flb_tls_destroy(ctx->tls);
+ }
+#endif
+
+ flb_free(ctx);
+}
diff --git a/src/fluent-bit/plugins/filter_kubernetes/kube_conf.h b/src/fluent-bit/plugins/filter_kubernetes/kube_conf.h
new file mode 100644
index 000000000..f12b6dc27
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_kubernetes/kube_conf.h
@@ -0,0 +1,174 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_FILTER_KUBE_CONF_H
+#define FLB_FILTER_KUBE_CONF_H
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_filter.h>
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_upstream.h>
+#include <fluent-bit/flb_macros.h>
+#include <fluent-bit/flb_io.h>
+#include <fluent-bit/flb_sds.h>
+#include <fluent-bit/flb_regex.h>
+#include <fluent-bit/flb_hash_table.h>
+
+/*
+ * Since this filter might get a high number of request per second,
+ * we need to keep some cached data to perform filtering, e.g:
+ *
+ * tag -> regex: pod name, container ID, container name, etc
+ *
+ * By default, we define a hash table for 256 entries.
+ */
+#define FLB_HASH_TABLE_SIZE 256
+
+/*
+ * When merging nested JSON strings from Docker logs, we need a temporary
+ * buffer to perform the convertion. To optimize the process, we pre-allocate
+ * a buffer for that purpose. The FLB_MERGE_BUF_SIZE defines the buffer size.
+ *
+ * Note: this is only the initial buffer size, it can grow depending on needs
+ * for every incoming json-string.
+ */
+#define FLB_MERGE_BUF_SIZE 2048 /* 2KB */
+
+/* Kubernetes API server info */
+#define FLB_API_HOST "kubernetes.default.svc"
+#define FLB_API_PORT 443
+#define FLB_API_TLS FLB_TRUE
+
+/*
+ * Default expected Kubernetes tag prefix, this is used mostly when source
+ * data comes from in_tail with custom tags like: kube.service.*
+ */
+#ifdef FLB_SYSTEM_WINDOWS
+#define FLB_KUBE_TAG_PREFIX "kube.c.var.log.containers."
+#else
+#define FLB_KUBE_TAG_PREFIX "kube.var.log.containers."
+#endif
+
+struct kube_meta;
+
+/* Filter context */
+struct flb_kube {
+ /* Configuration parameters */
+ char *api_host;
+ int api_port;
+ int api_https;
+ int use_journal;
+ int cache_use_docker_id;
+ int labels;
+ int annotations;
+ int dummy_meta;
+ int tls_debug;
+ int tls_verify;
+ int kube_token_ttl;
+ flb_sds_t meta_preload_cache_dir;
+
+ /* Configuration proposed through Annotations (boolean) */
+ int k8s_logging_parser; /* allow to process a suggested parser ? */
+ int k8s_logging_exclude; /* allowed to suggest to exclude logs ? */
+
+ /* HTTP Client Setup */
+ size_t buffer_size;
+
+ /* Merge Log feature */
+ int merge_log; /* old merge_json_log */
+
+ struct flb_parser *merge_parser;
+
+ /* Temporal buffer to unescape strings */
+ size_t unesc_buf_size;
+ size_t unesc_buf_len;
+ char *unesc_buf;
+
+ /*
+ * Merge Log Trim: if merge_log is enabled, this flag allows to trim
+ * the value and remove any trailing \n or \r.
+ */
+ int merge_log_trim;
+
+ /* Log key, old merge_json_key (default 'log') */
+ flb_sds_t merge_log_key;
+
+ /* Keep original log key after successful parsing */
+ int keep_log;
+
+ /* API Server end point */
+ char kube_url[1024];
+
+ /* Kubernetes tag prefix */
+ flb_sds_t kube_tag_prefix;
+
+ /* Regex context to parse records */
+ struct flb_regex *regex;
+ struct flb_parser *parser;
+
+ /* TLS CA certificate file */
+ char *tls_ca_path;
+ char *tls_ca_file;
+
+ /* TLS virtual host (optional), set by configmap */
+ flb_sds_t tls_vhost;
+
+ /* Kubernetes Namespace */
+ char *namespace;
+ size_t namespace_len;
+
+ /* POD Name where Fluent Bit is running */
+ char *podname;
+ size_t podname_len;
+
+ /* Kubernetes Token from FLB_KUBE_TOKEN file */
+ char *token_file;
+ char *token;
+ size_t token_len;
+ /* Command to get Kubernetes Authorization Token */
+ const char *kube_token_command;
+ int kube_token_create;
+
+ /* Pre-formatted HTTP Authorization header value */
+ char *auth;
+ size_t auth_len;
+
+ int dns_retries;
+ int dns_wait_time;
+
+ int use_tag_for_meta;
+ int use_kubelet;
+ char *kubelet_host;
+ int kubelet_port;
+
+ int kube_meta_cache_ttl;
+
+ struct flb_tls *tls;
+
+ struct flb_config *config;
+ struct flb_hash_table *hash_table;
+ struct flb_upstream *upstream;
+ struct flb_filter_instance *ins;
+};
+
+struct flb_kube *flb_kube_conf_create(struct flb_filter_instance *i,
+ struct flb_config *config);
+void flb_kube_conf_destroy(struct flb_kube *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/filter_kubernetes/kube_meta.c b/src/fluent-bit/plugins/filter_kubernetes/kube_meta.c
new file mode 100644
index 000000000..fbad2bb02
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_kubernetes/kube_meta.c
@@ -0,0 +1,1650 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_filter_plugin.h>
+#include <fluent-bit/flb_compat.h>
+#include <fluent-bit/flb_hash_table.h>
+#include <fluent-bit/flb_regex.h>
+#include <fluent-bit/flb_io.h>
+#include <fluent-bit/flb_upstream.h>
+#include <fluent-bit/flb_http_client.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_env.h>
+#include <fluent-bit/tls/flb_tls.h>
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <msgpack.h>
+
+#include "kube_conf.h"
+#include "kube_meta.h"
+#include "kube_property.h"
+
+#define FLB_KUBE_META_CONTAINER_STATUSES_KEY "containerStatuses"
+#define FLB_KUBE_META_CONTAINER_STATUSES_KEY_LEN \
+ (sizeof(FLB_KUBE_META_CONTAINER_STATUSES_KEY) - 1)
+#define FLB_KUBE_META_INIT_CONTAINER_STATUSES_KEY "initContainerStatuses"
+#define FLB_KUBE_META_INIT_CONTAINER_STATUSES_KEY_LEN \
+ (sizeof(FLB_KUBE_META_INIT_CONTAINER_STATUSES_KEY) - 1)
+#define FLB_KUBE_TOKEN_BUF_SIZE 8192 /* 8KB */
+
+static int file_to_buffer(const char *path,
+ char **out_buf, size_t *out_size)
+{
+ int ret;
+ char *buf;
+ ssize_t bytes;
+ FILE *fp;
+ struct stat st;
+
+ if (!(fp = fopen(path, "r"))) {
+ return -1;
+ }
+
+ ret = stat(path, &st);
+ if (ret == -1) {
+ flb_errno();
+ fclose(fp);
+ return -1;
+ }
+
+ buf = flb_calloc(1, (st.st_size + 1));
+ if (!buf) {
+ flb_errno();
+ fclose(fp);
+ return -1;
+ }
+
+ bytes = fread(buf, st.st_size, 1, fp);
+ if (bytes < 1) {
+ flb_free(buf);
+ fclose(fp);
+ return -1;
+ }
+
+ fclose(fp);
+
+ *out_buf = buf;
+ *out_size = st.st_size;
+
+ return 0;
+}
+
+#ifdef FLB_HAVE_KUBE_TOKEN_COMMAND
+/* Run command to get Kubernetes authorization token */
+static int get_token_with_command(const char *command,
+ char **out_buf, size_t *out_size)
+{
+ FILE *fp;
+ char buf[FLB_KUBE_TOKEN_BUF_SIZE];
+ char *temp;
+ char *res;
+ size_t size = 0;
+ size_t len = 0;
+
+ fp = popen(command, "r");
+ if (fp == NULL) {
+ return -1;
+ }
+
+ res = flb_calloc(1, FLB_KUBE_TOKEN_BUF_SIZE);
+ if (!res) {
+ flb_errno();
+ pclose(fp);
+ return -1;
+ }
+
+ while (fgets(buf, sizeof(buf), fp) != NULL) {
+ len = strlen(buf);
+ if (len >= FLB_KUBE_TOKEN_BUF_SIZE - 1) {
+ temp = flb_realloc(res, (FLB_KUBE_TOKEN_BUF_SIZE + size) * 2);
+ if (temp == NULL) {
+ flb_errno();
+ flb_free(res);
+ pclose(fp);
+ return -1;
+ }
+ res = temp;
+ }
+ strcpy(res + size, buf);
+ size += len;
+ }
+
+ if (strlen(res) < 1) {
+ flb_free(res);
+ pclose(fp);
+ return -1;
+ }
+
+ pclose(fp);
+
+ *out_buf = res;
+ *out_size = strlen(res);
+
+ return 0;
+}
+#endif
+
+/* Set K8s Authorization Token and get HTTP Auth Header */
+static int get_http_auth_header(struct flb_kube *ctx)
+{
+ int ret;
+ char *temp;
+ char *tk = NULL;
+ size_t tk_size = 0;
+
+ if (ctx->kube_token_command != NULL) {
+#ifdef FLB_HAVE_KUBE_TOKEN_COMMAND
+ ret = get_token_with_command(ctx->kube_token_command, &tk, &tk_size);
+#else
+ ret = -1;
+#endif
+ if (ret == -1) {
+ flb_plg_warn(ctx->ins, "failed to run command %s", ctx->kube_token_command);
+ }
+ }
+ else {
+ ret = file_to_buffer(ctx->token_file, &tk, &tk_size);
+ if (ret == -1) {
+ flb_plg_warn(ctx->ins, "cannot open %s", FLB_KUBE_TOKEN);
+ }
+ flb_plg_info(ctx->ins, " token updated");
+ }
+ ctx->kube_token_create = time(NULL);
+
+ /* Token */
+ if (ctx->token != NULL) {
+ flb_free(ctx->token);
+ }
+ ctx->token = tk;
+ ctx->token_len = tk_size;
+
+ /* HTTP Auth Header */
+ if (ctx->auth == NULL) {
+ ctx->auth = flb_malloc(tk_size + 32);
+ }
+ else if (ctx->auth_len < tk_size + 32) {
+ temp = flb_realloc(ctx->auth, tk_size + 32);
+ if (temp == NULL) {
+ flb_free(ctx->auth);
+ ctx->auth = NULL;
+ return -1;
+ }
+ ctx->auth = temp;
+ }
+
+ if (!ctx->auth) {
+ return -1;
+ }
+ ctx->auth_len = snprintf(ctx->auth, tk_size + 32,
+ "Bearer %s",
+ tk);
+
+ return 0;
+}
+
+/* Refresh HTTP Auth Header if K8s Authorization Token is expired */
+static int refresh_token_if_needed(struct flb_kube *ctx)
+{
+ int expired = 0;
+ int ret;
+
+ if (ctx->kube_token_create > 0) {
+ if (time(NULL) > ctx->kube_token_create + ctx->kube_token_ttl) {
+ expired = FLB_TRUE;
+ }
+ }
+
+ if (expired || ctx->kube_token_create == 0) {
+ ret = get_http_auth_header(ctx);
+ if (ret == -1) {
+ flb_plg_warn(ctx->ins, "failed to set http auth header");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static void expose_k8s_meta(struct flb_kube *ctx)
+{
+ char *tmp;
+ struct flb_env *env;
+
+ env = ctx->config->env;
+
+ flb_env_set(env, "k8s", "enabled");
+ flb_env_set(env, "k8s.namespace", ctx->namespace);
+ flb_env_set(env, "k8s.pod_name", ctx->podname);
+
+ tmp = (char *) flb_env_get(env, "NODE_NAME");
+ if (tmp) {
+ flb_env_set(env, "k8s.node_name", tmp);
+ }
+}
+
+/* Load local information from a POD context */
+static int get_local_pod_info(struct flb_kube *ctx)
+{
+ int ret;
+ char *ns;
+ size_t ns_size;
+ char *hostname;
+
+ /* Get the namespace name */
+ ret = file_to_buffer(FLB_KUBE_NAMESPACE, &ns, &ns_size);
+ if (ret == -1) {
+ /*
+ * If it fails, it's just informational, as likely the caller
+ * wanted to connect using the Proxy instead from inside a POD.
+ */
+ flb_plg_warn(ctx->ins, "cannot open %s", FLB_KUBE_NAMESPACE);
+ return FLB_FALSE;
+ }
+
+ /* Namespace */
+ ctx->namespace = ns;
+ ctx->namespace_len = ns_size;
+
+ /* POD Name */
+ hostname = getenv("HOSTNAME");
+ if (hostname) {
+ ctx->podname = flb_strdup(hostname);
+ ctx->podname_len = strlen(ctx->podname);
+ }
+ else {
+ char tmp[256];
+ gethostname(tmp, 256);
+ ctx->podname = flb_strdup(tmp);
+ ctx->podname_len = strlen(ctx->podname);
+ }
+
+ /* If a namespace was recognized, a token is mandatory */
+ /* Use the token to get HTTP Auth Header*/
+ ret = get_http_auth_header(ctx);
+ if (ret == -1) {
+ flb_plg_warn(ctx->ins, "failed to set http auth header");
+ return FLB_FALSE;
+ }
+
+ expose_k8s_meta(ctx);
+ return FLB_TRUE;
+}
+
+/*
+ * If a file exists called namespace_podname.meta, load it and use it.
+ * If not, fall back to API. This is primarily for diagnostic purposes,
+ * e.g. debugging new parsers.
+ */
+static int get_meta_file_info(struct flb_kube *ctx, const char *namespace,
+ const char *podname, char **buffer, size_t *size,
+ int *root_type) {
+
+ int fd = -1;
+ char *payload = NULL;
+ size_t payload_size = 0;
+ struct stat sb;
+ int packed = -1;
+ int ret;
+ char uri[1024];
+
+ if (ctx->meta_preload_cache_dir && namespace && podname) {
+
+ ret = snprintf(uri, sizeof(uri) - 1, "%s/%s_%s.meta",
+ ctx->meta_preload_cache_dir, namespace, podname);
+ if (ret > 0) {
+ fd = open(uri, O_RDONLY, 0);
+ if (fd != -1) {
+ if (fstat(fd, &sb) == 0) {
+ payload = flb_malloc(sb.st_size);
+ if (!payload) {
+ flb_errno();
+ }
+ else {
+ ret = read(fd, payload, sb.st_size);
+ if (ret == sb.st_size) {
+ payload_size = ret;
+ }
+ }
+ }
+ close(fd);
+ }
+ }
+
+ if (payload_size) {
+ packed = flb_pack_json(payload, payload_size,
+ buffer, size, root_type,
+ NULL);
+ }
+
+ if (payload) {
+ flb_free(payload);
+ }
+ }
+
+ return packed;
+}
+
+/* Gather metadata from HTTP Request,
+ * this could send out HTTP Request either to KUBE Server API or Kubelet
+ */
+static int get_meta_info_from_request(struct flb_kube *ctx,
+ const char *namespace,
+ const char *podname,
+ char **buffer, size_t *size,
+ int *root_type,
+ char* uri)
+{
+ struct flb_http_client *c;
+ struct flb_connection *u_conn;
+ int ret;
+ size_t b_sent;
+ int packed;
+
+ if (!ctx->upstream) {
+ return -1;
+ }
+
+ u_conn = flb_upstream_conn_get(ctx->upstream);
+
+ if (!u_conn) {
+ flb_plg_error(ctx->ins, "kubelet upstream connection error");
+ return -1;
+ }
+
+ ret = refresh_token_if_needed(ctx);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "failed to refresh token");
+ flb_upstream_conn_release(u_conn);
+ return -1;
+ }
+
+ /* Compose HTTP Client request*/
+ c = flb_http_client(u_conn, FLB_HTTP_GET,
+ uri,
+ NULL, 0, NULL, 0, NULL, 0);
+ flb_http_buffer_size(c, ctx->buffer_size);
+
+ flb_http_add_header(c, "User-Agent", 10, "Fluent-Bit", 10);
+ flb_http_add_header(c, "Connection", 10, "close", 5);
+ if (ctx->auth_len > 0) {
+ flb_http_add_header(c, "Authorization", 13, ctx->auth, ctx->auth_len);
+ }
+
+ ret = flb_http_do(c, &b_sent);
+ flb_plg_debug(ctx->ins, "Request (ns=%s, pod=%s) http_do=%i, "
+ "HTTP Status: %i",
+ namespace, podname, ret, c->resp.status);
+
+ if (ret != 0 || c->resp.status != 200) {
+ if (c->resp.payload_size > 0) {
+ flb_plg_debug(ctx->ins, "HTTP response\n%s",
+ c->resp.payload);
+ }
+ flb_http_client_destroy(c);
+ flb_upstream_conn_release(u_conn);
+ return -1;
+ }
+
+ packed = flb_pack_json(c->resp.payload, c->resp.payload_size,
+ buffer, size, root_type, NULL);
+
+ /* release resources */
+ flb_http_client_destroy(c);
+ flb_upstream_conn_release(u_conn);
+
+ return packed;
+
+}
+
+/* Gather pods list information from Kubelet */
+static int get_pods_from_kubelet(struct flb_kube *ctx,
+ const char *namespace, const char *podname,
+ char **out_buf, size_t *out_size)
+{
+ int ret;
+ int packed = -1;
+ int root_type;
+ char uri[1024];
+ char *buf;
+ size_t size;
+
+ *out_buf = NULL;
+ *out_size = 0;
+
+ /* used for unit test purposes*/
+ packed = get_meta_file_info(ctx, namespace, podname, &buf, &size,
+ &root_type);
+
+ if (packed == -1) {
+
+ ret = snprintf(uri, sizeof(uri) - 1, FLB_KUBELET_PODS);
+ if (ret == -1) {
+ return -1;
+ }
+ flb_plg_debug(ctx->ins,
+ "Send out request to Kubelet for pods information.");
+ packed = get_meta_info_from_request(ctx, namespace, podname,
+ &buf, &size, &root_type, uri);
+ }
+
+ /* validate pack */
+ if (packed == -1) {
+ return -1;
+ }
+
+ *out_buf = buf;
+ *out_size = size;
+
+ return 0;
+}
+
+/* Gather metadata from API Server */
+static int get_api_server_info(struct flb_kube *ctx,
+ const char *namespace, const char *podname,
+ char **out_buf, size_t *out_size)
+{
+ int ret;
+ int packed = -1;
+ int root_type;
+ char uri[1024];
+ char *buf;
+ size_t size;
+
+ *out_buf = NULL;
+ *out_size = 0;
+
+ /* used for unit test purposes*/
+ packed = get_meta_file_info(ctx, namespace, podname,
+ &buf, &size, &root_type);
+
+ if (packed == -1) {
+
+ ret = snprintf(uri, sizeof(uri) - 1, FLB_KUBE_API_FMT, namespace,
+ podname);
+
+ if (ret == -1) {
+ return -1;
+ }
+ flb_plg_debug(ctx->ins,
+ "Send out request to API Server for pods information");
+ packed = get_meta_info_from_request(ctx, namespace, podname,
+ &buf, &size, &root_type, uri);
+ }
+
+ /* validate pack */
+ if (packed == -1) {
+ return -1;
+ }
+
+ *out_buf = buf;
+ *out_size = size;
+
+ return 0;
+}
+
+static void cb_results(const char *name, const char *value,
+ size_t vlen, void *data)
+{
+ struct flb_kube_meta *meta = data;
+
+ if (vlen == 0) {
+ return;
+ }
+
+ if (meta->podname == NULL && strcmp(name, "pod_name") == 0) {
+ meta->podname = flb_strndup(value, vlen);
+ meta->podname_len = vlen;
+ meta->fields++;
+ }
+ else if (meta->namespace == NULL &&
+ strcmp(name, "namespace_name") == 0) {
+ meta->namespace = flb_strndup(value, vlen);
+ meta->namespace_len = vlen;
+ meta->fields++;
+ }
+ else if (meta->container_name == NULL &&
+ strcmp(name, "container_name") == 0) {
+ meta->container_name = flb_strndup(value, vlen);
+ meta->container_name_len = vlen;
+ meta->fields++;
+ }
+ else if (meta->docker_id == NULL &&
+ strcmp(name, "docker_id") == 0) {
+ meta->docker_id = flb_strndup(value, vlen);
+ meta->docker_id_len = vlen;
+ meta->fields++;
+ }
+ else if (meta->container_hash == NULL &&
+ strcmp(name, "container_hash") == 0) {
+ meta->container_hash = flb_strndup(value, vlen);
+ meta->container_hash_len = vlen;
+ meta->fields++;
+ }
+
+ return;
+}
+
+static int extract_hash(const char * im, int sz, const char ** out, int * outsz)
+{
+ char * colon = NULL;
+ char * slash = NULL;
+
+ *out = NULL;
+ *outsz = 0;
+
+ if (sz <= 1) {
+ return -1;
+ }
+
+ colon = memchr(im, ':', sz);
+
+ if (colon == NULL) {
+ return -1;
+ } else {
+ slash = colon;
+ while ((im + sz - slash + 1) > 0 && *(slash + 1) == '/') {
+ slash++;
+ }
+ if (slash == colon) {
+ slash = NULL;
+ }
+ }
+
+ if (slash == NULL && (im + sz - colon) > 0) {
+ *out = im;
+ }
+
+ if (slash != NULL && (colon - slash) < 0 && (im + sz - slash) > 0) {
+ *out = slash + 1;
+ }
+
+ if (*out) {
+ *outsz = im + sz - *out;
+ return 0;
+ }
+ return -1;
+}
+
+/*
+ * As per Kubernetes Pod spec,
+ * https://kubernetes.io/docs/concepts/workloads/pods/pod/, we look
+ * for status.{initContainerStatuses, containerStatuses}.{containerID, imageID, image}
+ * where status.{initContainerStatuses, containerStatus}.name == our container
+ * name
+ * status:
+ * ...
+ * containerStatuses:
+ * - containerID: XXX
+ * image: YYY
+ * imageID: ZZZ
+ * ...
+ * name: nginx-ingress-microk8s
+*/
+static void extract_container_hash(struct flb_kube_meta *meta,
+ msgpack_object status)
+{
+ int i;
+ msgpack_object k, v;
+ int docker_id_len = 0;
+ int container_hash_len = 0;
+ int container_image_len = 0;
+ const char *container_hash;
+ const char *docker_id;
+ const char *container_image;
+ const char *tmp;
+ int tmp_len = 0;
+ int name_found = FLB_FALSE;
+ /* Process status/containerStatus map for docker_id, container_hash, container_image */
+ for (i = 0;
+ (meta->docker_id_len == 0 || meta->container_hash_len == 0 ||
+ meta->container_image_len == 0) &&
+ i < status.via.map.size; i++) {
+ k = status.via.map.ptr[i].key;
+ if ((k.via.str.size == FLB_KUBE_META_CONTAINER_STATUSES_KEY_LEN &&
+ strncmp(k.via.str.ptr,
+ FLB_KUBE_META_CONTAINER_STATUSES_KEY,
+ FLB_KUBE_META_CONTAINER_STATUSES_KEY_LEN) == 0) ||
+ (k.via.str.size == FLB_KUBE_META_INIT_CONTAINER_STATUSES_KEY_LEN &&
+ strncmp(k.via.str.ptr,
+ FLB_KUBE_META_INIT_CONTAINER_STATUSES_KEY,
+ FLB_KUBE_META_INIT_CONTAINER_STATUSES_KEY_LEN) == 0)) {
+ int j;
+ v = status.via.map.ptr[i].val;
+ for (j = 0;
+ (meta->docker_id_len == 0 ||
+ meta->container_hash_len == 0 ||
+ meta->container_image_len == 0) && j < v.via.array.size;
+ j++) {
+ int l;
+ msgpack_object k1, k2;
+ msgpack_object_str v2;
+ k1 = v.via.array.ptr[j];
+ for (l = 0;
+ (meta->docker_id_len == 0 ||
+ meta->container_hash_len == 0 ||
+ meta->container_image_len == 0) &&
+ l < k1.via.map.size; l++) {
+ k2 = k1.via.map.ptr[l].key;
+ v2 = k1.via.map.ptr[l].val.via.str;
+ if (k2.via.str.size == sizeof("name") - 1 &&
+ !strncmp(k2.via.str.ptr, "name", k2.via.str.size)) {
+ if (v2.size == meta->container_name_len &&
+ !strncmp(v2.ptr,
+ meta->container_name,
+ meta->container_name_len)) {
+ name_found = FLB_TRUE;
+ }
+ else {
+ break;
+ }
+ }
+ else if (k2.via.str.size == sizeof("containerID") - 1 &&
+ !strncmp(k2.via.str.ptr,
+ "containerID",
+ k2.via.str.size)) {
+ if (extract_hash(v2.ptr, v2.size, &tmp, &tmp_len) == 0) {
+ docker_id = tmp;
+ docker_id_len = tmp_len;
+ }
+ }
+ else if (k2.via.str.size == sizeof("imageID") - 1 &&
+ !strncmp(k2.via.str.ptr,
+ "imageID",
+ k2.via.str.size)) {
+ if (extract_hash(v2.ptr, v2.size, &tmp, &tmp_len) == 0) {
+ container_hash = tmp;
+ container_hash_len = tmp_len;
+ }
+ }
+ else if (k2.via.str.size == sizeof("image") - 1 &&
+ !strncmp(k2.via.str.ptr,
+ "image",
+ k2.via.str.size)) {
+ container_image = v2.ptr;
+ container_image_len = v2.size;
+ }
+ }
+ if (name_found) {
+ if (container_hash_len && !meta->container_hash_len) {
+ meta->container_hash_len = container_hash_len;
+ meta->container_hash = flb_strndup(container_hash,
+ container_hash_len);
+ meta->fields++;
+ }
+ if (docker_id_len && !meta->docker_id_len) {
+ meta->docker_id_len = docker_id_len;
+ meta->docker_id = flb_strndup(docker_id, docker_id_len);
+ meta->fields++;
+ }
+ if (container_image_len && !meta->container_image_len) {
+ meta->container_image_len = container_image_len;
+ meta->container_image = flb_strndup(container_image, container_image_len);
+ meta->fields++;
+ }
+ return;
+ }
+ }
+ }
+ }
+}
+
+static int search_podname_and_namespace(struct flb_kube_meta *meta,
+ struct flb_kube *ctx,
+ msgpack_object map)
+{
+ int i;
+ int podname_found = FLB_FALSE;
+ int namespace_found = FLB_FALSE;
+ int target_podname_found = FLB_FALSE;
+ int target_namespace_found = FLB_FALSE;
+
+ msgpack_object k;
+ msgpack_object v;
+
+ for (i = 0; (!podname_found || !namespace_found) &&
+ i < map.via.map.size; i++) {
+
+ k = map.via.map.ptr[i].key;
+ v = map.via.map.ptr[i].val;
+ if (k.via.str.size == 4 && !strncmp(k.via.str.ptr, "name", 4)) {
+
+ podname_found = FLB_TRUE;
+ if (!strncmp(v.via.str.ptr, meta->podname, meta->podname_len)) {
+ target_podname_found = FLB_TRUE;
+ }
+
+ }
+ else if (k.via.str.size == 9 && !strncmp(k.via.str.ptr,
+ "namespace", 9)) {
+
+ namespace_found = FLB_TRUE;
+ if (!strncmp((char *)v.via.str.ptr,
+ meta->namespace,
+ meta->namespace_len)) {
+ target_namespace_found = FLB_TRUE;
+ }
+ }
+ }
+
+ if (!target_podname_found || !target_namespace_found) {
+ return -1;
+ }
+
+ return 0;
+}
+
+static int search_metadata_in_items(struct flb_kube_meta *meta,
+ struct flb_kube *ctx,
+ msgpack_object items_array,
+ msgpack_object *target_item_map)
+{
+ int i, j;
+
+ int target_found = FLB_FALSE;
+ msgpack_object item_info_map;
+ msgpack_object k;
+ msgpack_object v;
+
+ for (i = 0; !target_found && i < items_array.via.array.size; i++) {
+
+ item_info_map = items_array.via.array.ptr[i];
+ if (item_info_map.type != MSGPACK_OBJECT_MAP) {
+ continue;
+ }
+
+ for (j = 0; j < item_info_map.via.map.size; j++) {
+
+ k = item_info_map.via.map.ptr[j].key;
+ if (k.via.str.size == 8 &&
+ !strncmp(k.via.str.ptr, "metadata", 8)) {
+
+ v = item_info_map.via.map.ptr[j].val;
+ if (search_podname_and_namespace(meta, ctx, v) == 0) {
+ target_found = FLB_TRUE;
+ *target_item_map = item_info_map;
+ flb_plg_debug(ctx->ins,
+ "kubelet find pod: %s and ns: %s match",
+ meta->podname, meta->namespace);
+ }
+ break;
+ }
+ }
+ }
+
+ if (!target_found) {
+ flb_plg_debug(ctx->ins,
+ "kubelet didn't find pod: %s, ns: %s match",
+ meta->podname, meta->namespace);
+ return -1;
+ }
+ return 0;
+}
+
+/* At this point map points to the ROOT map, eg:
+ *
+ * {
+ * "kind": "PodList",
+ * "apiVersion": "v1",
+ * "metadata": {},
+ * "items": [{
+ * "metadata": {
+ * "name": "fluent-bit-rz47v",
+ * "generateName": "fluent-bit-",
+ * "namespace": "kube-system",
+ * "selfLink": "/api/v1/namespaces/kube-system/pods/fluent-bit-rz47v",
+ * ....
+ * }
+ * }]
+ *
+ */
+static int search_item_in_items(struct flb_kube_meta *meta,
+ struct flb_kube *ctx,
+ msgpack_object api_map,
+ msgpack_object *target_item_map)
+{
+
+ int i;
+ int items_array_found = FLB_FALSE;
+
+ msgpack_object k;
+ msgpack_object v;
+ msgpack_object items_array;
+
+ for (i = 0; !items_array_found && i < api_map.via.map.size; i++) {
+
+ k = api_map.via.map.ptr[i].key;
+ if (k.via.str.size == 5 && !strncmp(k.via.str.ptr, "items", 5)) {
+
+ v = api_map.via.map.ptr[i].val;
+ if (v.type == MSGPACK_OBJECT_ARRAY) {
+ items_array = v;
+ items_array_found = FLB_TRUE;
+ }
+ }
+ }
+
+ int ret = search_metadata_in_items(meta, ctx, items_array,
+ target_item_map);
+
+ return ret;
+}
+
+
+static int merge_meta_from_tag(struct flb_kube *ctx, struct flb_kube_meta *meta,
+ char **out_buf, size_t *out_size)
+{
+ msgpack_sbuffer mp_sbuf;
+ msgpack_packer mp_pck;
+ struct flb_mp_map_header mh;
+
+ /* Initialize output msgpack buffer */
+ msgpack_sbuffer_init(&mp_sbuf);
+ msgpack_packer_init(&mp_pck, &mp_sbuf, msgpack_sbuffer_write);
+
+ flb_mp_map_header_init(&mh, &mp_pck);
+
+ if (meta->podname != NULL) {
+ flb_mp_map_header_append(&mh);
+ msgpack_pack_str(&mp_pck, 8);
+ msgpack_pack_str_body(&mp_pck, "pod_name", 8);
+ msgpack_pack_str(&mp_pck, meta->podname_len);
+ msgpack_pack_str_body(&mp_pck, meta->podname, meta->podname_len);
+ }
+
+ if (meta->namespace != NULL) {
+ flb_mp_map_header_append(&mh);
+ msgpack_pack_str(&mp_pck, 14);
+ msgpack_pack_str_body(&mp_pck, "namespace_name", 14);
+ msgpack_pack_str(&mp_pck, meta->namespace_len);
+ msgpack_pack_str_body(&mp_pck, meta->namespace, meta->namespace_len);
+ }
+
+ if (meta->container_name != NULL) {
+ flb_mp_map_header_append(&mh);
+ msgpack_pack_str(&mp_pck, 14);
+ msgpack_pack_str_body(&mp_pck, "container_name", 14);
+ msgpack_pack_str(&mp_pck, meta->container_name_len);
+ msgpack_pack_str_body(&mp_pck, meta->container_name,
+ meta->container_name_len);
+ }
+ if (meta->docker_id != NULL) {
+ flb_mp_map_header_append(&mh);
+ msgpack_pack_str(&mp_pck, 9);
+ msgpack_pack_str_body(&mp_pck, "docker_id", 9);
+ msgpack_pack_str(&mp_pck, meta->docker_id_len);
+ msgpack_pack_str_body(&mp_pck, meta->docker_id,
+ meta->docker_id_len);
+ }
+
+ flb_mp_map_header_end(&mh);
+
+ /* Set outgoing msgpack buffer */
+ *out_buf = mp_sbuf.data;
+ *out_size = mp_sbuf.size;
+
+ return 0;
+}
+
+static int merge_meta(struct flb_kube_meta *meta, struct flb_kube *ctx,
+ const char *api_buf, size_t api_size,
+ char **out_buf, size_t *out_size)
+{
+ int i;
+ int ret;
+ int map_size = 0;
+ int meta_found = FLB_FALSE;
+ int spec_found = FLB_FALSE;
+ int status_found = FLB_FALSE;
+ int target_found = FLB_TRUE;
+ int have_uid = -1;
+ int have_labels = -1;
+ int have_annotations = -1;
+ int have_nodename = -1;
+ size_t off = 0;
+ msgpack_sbuffer mp_sbuf;
+ msgpack_packer mp_pck;
+
+ msgpack_unpacked api_result;
+ msgpack_unpacked meta_result;
+ msgpack_object item_result;
+ msgpack_object k;
+ msgpack_object v;
+ msgpack_object meta_val;
+ msgpack_object spec_val;
+ msgpack_object status_val;
+ msgpack_object api_map;
+ msgpack_object ann_map;
+ struct flb_kube_props props = {0};
+
+ /*
+ * - reg_buf: is a msgpack Map containing meta captured using Regex
+ *
+ * - api_buf: metadata associated to namespace and POD Name coming from
+ * the API server.
+ *
+ * When merging data we aim to add the following keys from the API server:
+ *
+ * - pod_id
+ * - labels
+ * - annotations
+ */
+
+ /* Initialize output msgpack buffer */
+ msgpack_sbuffer_init(&mp_sbuf);
+ msgpack_packer_init(&mp_pck, &mp_sbuf, msgpack_sbuffer_write);
+
+ /* Iterate API server msgpack and lookup specific fields */
+ if (api_buf != NULL) {
+ msgpack_unpacked_init(&api_result);
+ ret = msgpack_unpack_next(&api_result, api_buf, api_size, &off);
+ if (ret == MSGPACK_UNPACK_SUCCESS) {
+
+ if (ctx->use_kubelet) {
+ ret = search_item_in_items(meta, ctx, api_result.data, &item_result);
+ if (ret == -1) {
+ target_found = FLB_FALSE;
+ }
+ api_map = target_found ? item_result : api_result.data;
+ } else {
+ api_map = api_result.data;
+ }
+
+ /* At this point map points to the ROOT map, eg:
+ *
+ * {
+ * "kind": "Pod",
+ * "apiVersion": "v1",
+ * "metadata": {
+ * "name": "fluent-bit-rz47v",
+ * "generateName": "fluent-bit-",
+ * "namespace": "kube-system",
+ * "selfLink": "/api/v1/namespaces/kube-system/pods/fluent-bit-rz47v",
+ * ....
+ * }
+ *
+ * We are interested into the 'metadata' map value.
+ * We are also interested in the spec.nodeName.
+ * We are also interested in the status.containerStatuses.
+ */
+ for (i = 0; target_found && !(meta_found && spec_found && status_found) &&
+ i < api_map.via.map.size; i++) {
+ k = api_map.via.map.ptr[i].key;
+ if (k.via.str.size == 8 && !strncmp(k.via.str.ptr, "metadata", 8)) {
+ meta_val = api_map.via.map.ptr[i].val;
+ if (meta_val.type == MSGPACK_OBJECT_MAP) {
+ meta_found = FLB_TRUE;
+ }
+ }
+ else if (k.via.str.size == 4 && !strncmp(k.via.str.ptr, "spec", 4)) {
+ spec_val = api_map.via.map.ptr[i].val;
+ spec_found = FLB_TRUE;
+ }
+ else if (k.via.str.size == 6 && !strncmp(k.via.str.ptr, "status", 6)) {
+ status_val = api_map.via.map.ptr[i].val;
+ status_found = FLB_TRUE;
+ }
+ }
+
+ if (meta_found == FLB_TRUE) {
+ /* Process metadata map value */
+ msgpack_unpacked_init(&meta_result);
+ for (i = 0; i < meta_val.via.map.size; i++) {
+ k = meta_val.via.map.ptr[i].key;
+
+ char *ptr = (char *) k.via.str.ptr;
+ size_t size = k.via.str.size;
+
+ if (size == 3 && strncmp(ptr, "uid", 3) == 0) {
+ have_uid = i;
+ map_size++;
+ }
+ else if (size == 6 && strncmp(ptr, "labels", 6) == 0) {
+ have_labels = i;
+ if (ctx->labels == FLB_TRUE) {
+ map_size++;
+ }
+ }
+
+ else if (size == 11 && strncmp(ptr, "annotations", 11) == 0) {
+ have_annotations = i;
+ if (ctx->annotations == FLB_TRUE) {
+ map_size++;
+ }
+ }
+
+ if (have_uid >= 0 && have_labels >= 0 && have_annotations >= 0) {
+ break;
+ }
+ }
+ }
+
+ /* Process spec map value for nodeName */
+ if (spec_found == FLB_TRUE) {
+ for (i = 0; i < spec_val.via.map.size; i++) {
+ k = spec_val.via.map.ptr[i].key;
+ if (k.via.str.size == 8 &&
+ strncmp(k.via.str.ptr, "nodeName", 8) == 0) {
+ have_nodename = i;
+ map_size++;
+ break;
+ }
+ }
+ }
+
+ if ((!meta->container_hash || !meta->docker_id || !meta->container_image) && status_found) {
+ extract_container_hash(meta, status_val);
+ }
+ }
+ }
+
+ /* Set map size: current + pod_id, labels and annotations */
+ map_size += meta->fields;
+
+ /* Append Regex fields */
+ msgpack_pack_map(&mp_pck, map_size);
+ if (meta->podname != NULL) {
+ msgpack_pack_str(&mp_pck, 8);
+ msgpack_pack_str_body(&mp_pck, "pod_name", 8);
+ msgpack_pack_str(&mp_pck, meta->podname_len);
+ msgpack_pack_str_body(&mp_pck, meta->podname, meta->podname_len);
+ }
+ if (meta->namespace != NULL) {
+ msgpack_pack_str(&mp_pck, 14);
+ msgpack_pack_str_body(&mp_pck, "namespace_name", 14);
+ msgpack_pack_str(&mp_pck, meta->namespace_len);
+ msgpack_pack_str_body(&mp_pck, meta->namespace, meta->namespace_len);
+ }
+
+ /* Append API Server content */
+ if (have_uid >= 0) {
+ v = meta_val.via.map.ptr[have_uid].val;
+
+ msgpack_pack_str(&mp_pck, 6);
+ msgpack_pack_str_body(&mp_pck, "pod_id", 6);
+ msgpack_pack_object(&mp_pck, v);
+ }
+
+ if (have_labels >= 0 && ctx->labels == FLB_TRUE) {
+ k = meta_val.via.map.ptr[have_labels].key;
+ v = meta_val.via.map.ptr[have_labels].val;
+
+ msgpack_pack_object(&mp_pck, k);
+ msgpack_pack_object(&mp_pck, v);
+ }
+
+ if (have_annotations >= 0 && ctx->annotations == FLB_TRUE) {
+ k = meta_val.via.map.ptr[have_annotations].key;
+ v = meta_val.via.map.ptr[have_annotations].val;
+
+ msgpack_pack_object(&mp_pck, k);
+ msgpack_pack_object(&mp_pck, v);
+ }
+
+ if (have_nodename >= 0) {
+ v = spec_val.via.map.ptr[have_nodename].val;
+
+ msgpack_pack_str(&mp_pck, 4);
+ msgpack_pack_str_body(&mp_pck, "host", 4);
+ msgpack_pack_object(&mp_pck, v);
+ }
+
+ if (meta->container_name != NULL) {
+ msgpack_pack_str(&mp_pck, 14);
+ msgpack_pack_str_body(&mp_pck, "container_name", 14);
+ msgpack_pack_str(&mp_pck, meta->container_name_len);
+ msgpack_pack_str_body(&mp_pck, meta->container_name,
+ meta->container_name_len);
+ }
+ if (meta->docker_id != NULL) {
+ msgpack_pack_str(&mp_pck, 9);
+ msgpack_pack_str_body(&mp_pck, "docker_id", 9);
+ msgpack_pack_str(&mp_pck, meta->docker_id_len);
+ msgpack_pack_str_body(&mp_pck, meta->docker_id,
+ meta->docker_id_len);
+ }
+ if (meta->container_hash != NULL) {
+ msgpack_pack_str(&mp_pck, 14);
+ msgpack_pack_str_body(&mp_pck, "container_hash", 14);
+ msgpack_pack_str(&mp_pck, meta->container_hash_len);
+ msgpack_pack_str_body(&mp_pck, meta->container_hash,
+ meta->container_hash_len);
+ }
+ if (meta->container_image != NULL) {
+ msgpack_pack_str(&mp_pck, 15);
+ msgpack_pack_str_body(&mp_pck, "container_image", 15);
+ msgpack_pack_str(&mp_pck, meta->container_image_len);
+ msgpack_pack_str_body(&mp_pck, meta->container_image,
+ meta->container_image_len);
+ }
+
+ /* Process configuration suggested through Annotations */
+ if (have_annotations >= 0) {
+ ann_map = meta_val.via.map.ptr[have_annotations].val;
+
+ /* Iterate annotations keys and look for 'logging' key */
+ if (ann_map.type == MSGPACK_OBJECT_MAP) {
+ for (i = 0; i < ann_map.via.map.size; i++) {
+ k = ann_map.via.map.ptr[i].key;
+ v = ann_map.via.map.ptr[i].val;
+
+ if (k.via.str.size > 13 && /* >= 'fluentbit.io/' */
+ strncmp(k.via.str.ptr, "fluentbit.io/", 13) == 0) {
+
+ /* Validate and set the property */
+ flb_kube_prop_set(ctx, meta,
+ k.via.str.ptr + 13,
+ k.via.str.size - 13,
+ v.via.str.ptr,
+ v.via.str.size,
+ &props);
+ }
+ }
+ }
+
+ /* Pack Annotation properties */
+ void *prop_buf;
+ size_t prop_size;
+ flb_kube_prop_pack(&props, &prop_buf, &prop_size);
+ msgpack_sbuffer_write(&mp_sbuf, prop_buf, prop_size);
+ flb_kube_prop_destroy(&props);
+ flb_free(prop_buf);
+ }
+
+ if (api_buf != NULL) {
+ msgpack_unpacked_destroy(&api_result);
+ if (meta_found == FLB_TRUE) {
+ msgpack_unpacked_destroy(&meta_result);
+ }
+ }
+
+ /* Set outgoing msgpack buffer */
+ *out_buf = mp_sbuf.data;
+ *out_size = mp_sbuf.size;
+
+ return 0;
+}
+
+static inline int extract_meta(struct flb_kube *ctx,
+ const char *tag, int tag_len,
+ const char *data, size_t data_size,
+ struct flb_kube_meta *meta)
+{
+ int i;
+ size_t off = 0;
+ ssize_t n;
+ int kube_tag_len;
+ const char *kube_tag_str;
+ const char *container = NULL;
+ int container_found = FLB_FALSE;
+ int container_length = 0;
+ struct flb_regex_search result;
+ msgpack_unpacked mp_result;
+ msgpack_object root;
+ msgpack_object map;
+ msgpack_object key;
+ msgpack_object val;
+
+ /* Reset meta context */
+ memset(meta, '\0', sizeof(struct flb_kube_meta));
+
+ /* Journald */
+ if (ctx->use_journal == FLB_TRUE) {
+ off = 0;
+ msgpack_unpacked_init(&mp_result);
+ while (msgpack_unpack_next(&mp_result, data, data_size, &off) == MSGPACK_UNPACK_SUCCESS) {
+ root = mp_result.data;
+ if (root.type != MSGPACK_OBJECT_ARRAY) {
+ continue;
+ }
+
+ /* Lookup the CONTAINER_NAME key/value */
+ map = root.via.array.ptr[1];
+ for (i = 0; i < map.via.map.size; i++) {
+ key = map.via.map.ptr[i].key;
+ if (key.via.str.size != 14) {
+ continue;
+ }
+
+ if (strncmp(key.via.str.ptr, "CONTAINER_NAME", 14) == 0) {
+ val = map.via.map.ptr[i].val;
+ container = val.via.str.ptr;
+ container_length = val.via.str.size;
+ container_found = FLB_TRUE;
+ break;
+ }
+ }
+
+ if (container_found == FLB_TRUE) {
+ break;
+ }
+ }
+
+ if (container_found == FLB_FALSE) {
+ msgpack_unpacked_destroy(&mp_result);
+ return -1;
+ }
+ n = flb_regex_do(ctx->regex,
+ container, container_length,
+ &result);
+ msgpack_unpacked_destroy(&mp_result);
+ }
+ else {
+ /*
+ * Lookup metadata using regular expression. In order to let the
+ * regex work we need to know before hand what's the Tag prefix
+ * set and make sure the adjustment can be done.
+ */
+ kube_tag_len = flb_sds_len(ctx->kube_tag_prefix);
+ if (kube_tag_len + 1 >= tag_len) {
+ flb_plg_error(ctx->ins, "incoming record tag (%s) is shorter "
+ "than kube_tag_prefix value (%s), skip filter",
+ tag, ctx->kube_tag_prefix);
+ return -1;
+ }
+ kube_tag_str = tag + kube_tag_len;
+ kube_tag_len = tag_len - kube_tag_len;
+
+ n = flb_regex_do(ctx->regex, kube_tag_str, kube_tag_len, &result);
+ }
+
+ if (n <= 0) {
+ flb_plg_warn(ctx->ins, "invalid pattern for given tag %s", tag);
+ return -1;
+ }
+
+ /* Parse the regex results */
+ flb_regex_parse(ctx->regex, &result, cb_results, meta);
+
+ /* Compose API server cache key */
+ if (meta->podname && meta->namespace) {
+ /* calculate estimated buffer size */
+ n = meta->namespace_len + 1 + meta->podname_len + 1;
+ if (meta->container_name) {
+ n += meta->container_name_len + 1;
+ }
+ if (ctx->cache_use_docker_id && meta->docker_id) {
+ n += meta->docker_id_len + 1;
+ }
+ meta->cache_key = flb_malloc(n);
+ if (!meta->cache_key) {
+ flb_errno();
+ return -1;
+ }
+
+ /* Copy namespace */
+ memcpy(meta->cache_key, meta->namespace, meta->namespace_len);
+ off = meta->namespace_len;
+
+ /* Separator */
+ meta->cache_key[off++] = ':';
+
+ /* Copy podname */
+ memcpy(meta->cache_key + off, meta->podname, meta->podname_len);
+ off += meta->podname_len;
+
+ if (meta->container_name) {
+ /* Separator */
+ meta->cache_key[off++] = ':';
+ memcpy(meta->cache_key + off, meta->container_name, meta->container_name_len);
+ off += meta->container_name_len;
+ }
+
+ if (ctx->cache_use_docker_id && meta->docker_id) {
+ /* Separator */
+ meta->cache_key[off++] = ':';
+ memcpy(meta->cache_key + off, meta->docker_id, meta->docker_id_len);
+ off += meta->docker_id_len;
+ }
+
+ meta->cache_key[off] = '\0';
+ meta->cache_key_len = off;
+ }
+ else {
+ meta->cache_key = NULL;
+ meta->cache_key_len = 0;
+ }
+
+ return 0;
+}
+
+/*
+ * Given a fixed meta data (namespace and podname), get API server information
+ * and merge buffers.
+ */
+static int get_and_merge_meta(struct flb_kube *ctx, struct flb_kube_meta *meta,
+ char **out_buf, size_t *out_size)
+{
+ int ret;
+ char *api_buf;
+ size_t api_size;
+
+ if (ctx->use_tag_for_meta) {
+ ret = merge_meta_from_tag(ctx, meta, out_buf, out_size);
+ return ret;
+ }
+ else if (ctx->use_kubelet) {
+ ret = get_pods_from_kubelet(ctx, meta->namespace, meta->podname,
+ &api_buf, &api_size);
+ }
+ else {
+ ret = get_api_server_info(ctx, meta->namespace, meta->podname,
+ &api_buf, &api_size);
+ }
+ if (ret == -1) {
+ return -1;
+ }
+
+ ret = merge_meta(meta, ctx,
+ api_buf, api_size,
+ out_buf, out_size);
+
+ if (api_buf != NULL) {
+ flb_free(api_buf);
+ }
+
+ return ret;
+}
+
+/*
+ * Work around kubernetes/kubernetes/issues/78479 by waiting
+ * for DNS to start up.
+ */
+static int wait_for_dns(struct flb_kube *ctx)
+{
+ int i;
+ struct addrinfo *res;
+ struct addrinfo hints;
+
+ memset(&hints, 0, sizeof(hints));
+ hints.ai_family = AF_UNSPEC;
+ hints.ai_socktype = SOCK_STREAM;
+
+ for (i = 0; i < ctx->dns_retries; i++) {
+ if (getaddrinfo(ctx->api_host, NULL, &hints, &res) == 0) {
+ freeaddrinfo(res);
+ return 0;
+ }
+ flb_plg_info(ctx->ins, "host: %s Wait %i secs until DNS starts up (%i/%i)",
+ ctx->api_host, ctx->dns_wait_time, i + 1, ctx->dns_retries);
+ sleep(ctx->dns_wait_time);
+ }
+ return -1;
+}
+
+static int flb_kube_network_init(struct flb_kube *ctx, struct flb_config *config)
+{
+ int io_type = FLB_IO_TCP;
+
+ ctx->upstream = NULL;
+
+ if (ctx->api_https == FLB_TRUE) {
+ if (!ctx->tls_ca_path && !ctx->tls_ca_file) {
+ ctx->tls_ca_file = flb_strdup(FLB_KUBE_CA);
+ }
+ ctx->tls = flb_tls_create(FLB_TLS_CLIENT_MODE,
+ ctx->tls_verify,
+ ctx->tls_debug,
+ ctx->tls_vhost,
+ ctx->tls_ca_path,
+ ctx->tls_ca_file,
+ NULL, NULL, NULL);
+ if (!ctx->tls) {
+ return -1;
+ }
+
+ io_type = FLB_IO_TLS;
+ }
+
+ /* Create an Upstream context */
+ ctx->upstream = flb_upstream_create(config,
+ ctx->api_host,
+ ctx->api_port,
+ io_type,
+ ctx->tls);
+ if (!ctx->upstream) {
+ /* note: if ctx->tls.context is set, it's destroyed upon context exit */
+ flb_plg_debug(ctx->ins, "kube network init create upstream failed");
+ return -1;
+ }
+
+ /* Remove async flag from upstream */
+ flb_stream_disable_async_mode(&ctx->upstream->base);
+
+ return 0;
+}
+
+/* Initialize local context */
+int flb_kube_meta_init(struct flb_kube *ctx, struct flb_config *config)
+{
+ int ret;
+ char *meta_buf;
+ size_t meta_size;
+
+ if (ctx->dummy_meta == FLB_TRUE) {
+ flb_plg_warn(ctx->ins, "using Dummy Metadata");
+ return 0;
+ }
+
+ if (ctx->use_tag_for_meta) {
+ flb_plg_info(ctx->ins, "no network access required (OK)");
+ return 0;
+ }
+
+ /* Init network */
+ flb_kube_network_init(ctx, config);
+
+ /* Gather local info */
+ ret = get_local_pod_info(ctx);
+ if (ret == FLB_TRUE && !ctx->use_tag_for_meta) {
+ flb_plg_info(ctx->ins, "local POD info OK");
+
+ ret = wait_for_dns(ctx);
+ if (ret == -1) {
+ flb_plg_warn(ctx->ins, "could not resolve %s", ctx->api_host);
+ return -1;
+ }
+
+ if (ctx->use_kubelet) {
+ /* Gather info from Kubelet */
+ flb_plg_info(ctx->ins, "testing connectivity with Kubelet...");
+ ret = get_pods_from_kubelet(ctx, ctx->namespace, ctx->podname,
+ &meta_buf, &meta_size);
+ }
+ else {
+ /* Gather info from API server */
+ flb_plg_info(ctx->ins, "testing connectivity with API server...");
+ ret = get_api_server_info(ctx, ctx->namespace, ctx->podname,
+ &meta_buf, &meta_size);
+ }
+ if (ret == -1) {
+ if (!ctx->podname) {
+ flb_plg_warn(ctx->ins, "could not get meta for local POD");
+ }
+ else {
+ flb_plg_warn(ctx->ins, "could not get meta for POD %s",
+ ctx->podname);
+ }
+ return -1;
+ }
+ flb_plg_info(ctx->ins, "connectivity OK");
+ flb_free(meta_buf);
+ }
+ else {
+ flb_plg_info(ctx->ins, "Fluent Bit not running in a POD");
+ }
+
+ return 0;
+}
+
+int flb_kube_dummy_meta_get(char **out_buf, size_t *out_size)
+{
+ int len;
+ time_t t;
+ char stime[32];
+ struct tm result;
+ msgpack_sbuffer mp_sbuf;
+ msgpack_packer mp_pck;
+
+ t = time(NULL);
+ localtime_r(&t, &result);
+#ifdef FLB_SYSTEM_WINDOWS
+ asctime_s(stime, sizeof(stime), &result);
+#else
+ asctime_r(&result, stime);
+#endif
+ len = strlen(stime) - 1;
+
+ msgpack_sbuffer_init(&mp_sbuf);
+ msgpack_packer_init(&mp_pck, &mp_sbuf, msgpack_sbuffer_write);
+
+ msgpack_pack_map(&mp_pck, 1);
+ msgpack_pack_str(&mp_pck, 5 /* dummy */ );
+ msgpack_pack_str_body(&mp_pck, "dummy", 5);
+ msgpack_pack_str(&mp_pck, len);
+ msgpack_pack_str_body(&mp_pck, stime, len);
+
+ *out_buf = mp_sbuf.data;
+ *out_size = mp_sbuf.size;
+
+ return 0;
+}
+
+int flb_kube_meta_get(struct flb_kube *ctx,
+ const char *tag, int tag_len,
+ const char *data, size_t data_size,
+ const char **out_buf, size_t *out_size,
+ struct flb_kube_meta *meta,
+ struct flb_kube_props *props)
+{
+ int id;
+ int ret;
+ const char *hash_meta_buf;
+ char *tmp_hash_meta_buf;
+ size_t off = 0;
+ size_t hash_meta_size;
+ msgpack_unpacked result;
+
+ /* Get metadata from tag or record (cache key is the important one) */
+ ret = extract_meta(ctx, tag, tag_len, data, data_size, meta);
+ if (ret != 0) {
+ return -1;
+ }
+
+ /* Check if we have some data associated to the cache key */
+ ret = flb_hash_table_get(ctx->hash_table,
+ meta->cache_key, meta->cache_key_len,
+ (void *) &hash_meta_buf, &hash_meta_size);
+ if (ret == -1) {
+ /* Retrieve API server meta and merge with local meta */
+ ret = get_and_merge_meta(ctx, meta,
+ &tmp_hash_meta_buf, &hash_meta_size);
+ if (ret == -1) {
+ *out_buf = NULL;
+ *out_size = 0;
+ return 0;
+ }
+
+ id = flb_hash_table_add(ctx->hash_table,
+ meta->cache_key, meta->cache_key_len,
+ tmp_hash_meta_buf, hash_meta_size);
+ if (id >= 0) {
+ /*
+ * Release the original buffer created on extract_meta() as a new
+ * copy have been generated into the hash table, then re-set
+ * the outgoing buffer and size.
+ */
+ flb_free(tmp_hash_meta_buf);
+ flb_hash_table_get_by_id(ctx->hash_table, id, meta->cache_key,
+ &hash_meta_buf, &hash_meta_size);
+ }
+ }
+
+ /*
+ * The retrieved buffer may have two serialized items:
+ *
+ * [0] = kubernetes metadata (annotations, labels)
+ * [1] = Annotation properties
+ *
+ * note: annotation properties are optional.
+ */
+ msgpack_unpacked_init(&result);
+
+ /* Unpack to get the offset/bytes of the first item */
+ msgpack_unpack_next(&result, hash_meta_buf, hash_meta_size, &off);
+
+ /* Set the pointer and proper size for the caller */
+ *out_buf = hash_meta_buf;
+ *out_size = off;
+
+ /* A new unpack_next() call will succeed If annotation properties exists */
+ ret = msgpack_unpack_next(&result, hash_meta_buf, hash_meta_size, &off);
+ if (ret == MSGPACK_UNPACK_SUCCESS) {
+ /* Unpack the remaining data into properties structure */
+ flb_kube_prop_unpack(props,
+ hash_meta_buf + *out_size,
+ hash_meta_size - *out_size);
+ }
+ msgpack_unpacked_destroy(&result);
+
+ return 0;
+}
+
+int flb_kube_meta_release(struct flb_kube_meta *meta)
+{
+ int r = 0;
+
+ if (meta->namespace) {
+ flb_free(meta->namespace);
+ r++;
+ }
+
+ if (meta->podname) {
+ flb_free(meta->podname);
+ r++;
+ }
+
+ if (meta->container_name) {
+ flb_free(meta->container_name);
+ r++;
+ }
+
+ if (meta->docker_id) {
+ flb_free(meta->docker_id);
+ r++;
+ }
+
+ if (meta->container_hash) {
+ flb_free(meta->container_hash);
+ r++;
+ }
+
+ if (meta->container_image) {
+ flb_free(meta->container_image);
+ r++;
+ }
+
+ if (meta->cache_key) {
+ flb_free(meta->cache_key);
+ }
+
+ return r;
+}
diff --git a/src/fluent-bit/plugins/filter_kubernetes/kube_meta.h b/src/fluent-bit/plugins/filter_kubernetes/kube_meta.h
new file mode 100644
index 000000000..fb0278afc
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_kubernetes/kube_meta.h
@@ -0,0 +1,69 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_FILTER_KUBE_META_H
+#define FLB_FILTER_KUBE_META_H
+
+#include "kube_props.h"
+
+struct flb_kube;
+
+struct flb_kube_meta {
+ int fields;
+
+ int namespace_len;
+ int podname_len;
+ int cache_key_len;
+ int container_name_len;
+ int docker_id_len;
+ int container_hash_len;
+ int container_image_len;
+
+ char *namespace;
+ char *podname;
+ char *container_name;
+ char *container_image;
+ char *docker_id;
+
+ char *container_hash; /* set only on Systemd mode */
+
+ char *cache_key;
+};
+
+/* Constant Kubernetes paths */
+#define FLB_KUBE_NAMESPACE "/var/run/secrets/kubernetes.io/serviceaccount/namespace"
+#define FLB_KUBE_TOKEN "/var/run/secrets/kubernetes.io/serviceaccount/token"
+#define FLB_KUBE_CA "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
+#define FLB_KUBE_API_HOST "kubernetes.default.svc"
+#define FLB_KUBE_API_PORT 443
+#define FLB_KUBE_API_FMT "/api/v1/namespaces/%s/pods/%s"
+#define FLB_KUBELET_PODS "/pods"
+
+int flb_kube_meta_init(struct flb_kube *ctx, struct flb_config *config);
+int flb_kube_meta_fetch(struct flb_kube *ctx);
+int flb_kube_dummy_meta_get(char **out_buf, size_t *out_size);
+int flb_kube_meta_get(struct flb_kube *ctx,
+ const char *tag, int tag_len,
+ const char *data, size_t data_size,
+ const char **out_buf, size_t *out_size,
+ struct flb_kube_meta *meta,
+ struct flb_kube_props *props);
+int flb_kube_meta_release(struct flb_kube_meta *meta);
+
+#endif
diff --git a/src/fluent-bit/plugins/filter_kubernetes/kube_property.c b/src/fluent-bit/plugins/filter_kubernetes/kube_property.c
new file mode 100644
index 000000000..4399d692c
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_kubernetes/kube_property.c
@@ -0,0 +1,360 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_filter_plugin.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_parser.h>
+
+#include <msgpack.h>
+
+#include "kube_conf.h"
+#include "kube_meta.h"
+#include "kube_property.h"
+
+#define FLB_KUBE_PROP_PARSER "parser"
+#define FLB_KUBE_PROP_PARSER_LEN (sizeof(FLB_KUBE_PROP_PARSER) - 1)
+#define FLB_KUBE_PROP_EXCLUDE "exclude"
+#define FLB_KUBE_PROP_EXCLUDE_LEN (sizeof(FLB_KUBE_PROP_EXCLUDE) - 1)
+
+static inline int prop_cmp(const char *key, size_t keylen,
+ const char *property, size_t proplen)
+{
+ return proplen >= keylen && strncmp(key, property, keylen) == 0;
+}
+
+static inline void prop_not_allowed(const char *prop, struct flb_kube_meta *meta,
+ struct flb_kube *ctx)
+{
+ flb_plg_warn(ctx->ins, "annotation '%s' not allowed "
+ "(ns='%s' pod_name='%s')",
+ prop, meta->namespace, meta->podname);
+}
+
+/* Property: parser */
+static int prop_set_parser(struct flb_kube *ctx, struct flb_kube_meta *meta,
+ int is_container_specific, int stream,
+ const char *val_buf, size_t val_len,
+ struct flb_kube_props *props)
+{
+ char *tmp;
+ struct flb_parser *parser;
+
+ /* Parser property must be allowed by k8s-logging.parser */
+ if (ctx->k8s_logging_parser == FLB_FALSE) {
+ prop_not_allowed("fluentbit.io/parser", meta, ctx);
+ return -1;
+ }
+
+ /* Check the parser exists */
+ tmp = flb_strndup(val_buf, val_len);
+ if (!tmp) {
+ flb_errno();
+ return -1;
+ }
+
+ /* Get parser context */
+ parser = flb_parser_get(tmp, ctx->config);
+ if (!parser) {
+ flb_plg_warn(ctx->ins, "annotation parser '%s' not found "
+ "(ns='%s' pod_name='%s', container_name='%s')",
+ tmp, meta->namespace, meta->podname, meta->container_name);
+ flb_free(tmp);
+ return -1;
+ }
+
+ /* Save the parser in the properties context */
+ if ((stream == FLB_KUBE_PROP_NO_STREAM ||
+ stream == FLB_KUBE_PROP_STREAM_STDOUT) &&
+ (is_container_specific == FLB_TRUE ||
+ props->stdout_parser == FLB_KUBE_PROP_UNDEF)) {
+ props->stdout_parser = flb_sds_create(tmp);
+ }
+ if ((stream == FLB_KUBE_PROP_NO_STREAM ||
+ stream == FLB_KUBE_PROP_STREAM_STDERR) &&
+ (is_container_specific == FLB_TRUE ||
+ props->stderr_parser == FLB_KUBE_PROP_UNDEF)) {
+ props->stderr_parser = flb_sds_create(tmp);
+ }
+
+ flb_free(tmp);
+
+ return 0;
+}
+
+static int prop_set_exclude(struct flb_kube *ctx, struct flb_kube_meta *meta,
+ int is_container_specific, int stream,
+ const char *val_buf, size_t val_len,
+ struct flb_kube_props *props)
+{
+ char *tmp;
+ int exclude;
+
+ /* Exclude property must be allowed by k8s-logging.exclude */
+ if (ctx->k8s_logging_exclude == FLB_FALSE) {
+ prop_not_allowed("fluentbit.io/exclude", meta, ctx);
+ return -1;
+ }
+
+ /* Get the bool value */
+ tmp = flb_strndup(val_buf, val_len);
+ if (!tmp) {
+ flb_errno();
+ return -1;
+ }
+
+ exclude = flb_utils_bool(tmp) == FLB_TRUE ?
+ FLB_KUBE_PROP_TRUE : FLB_KUBE_PROP_FALSE;
+
+ /* Save the exclude property in the context */
+ if ((stream == FLB_KUBE_PROP_NO_STREAM ||
+ stream == FLB_KUBE_PROP_STREAM_STDOUT) &&
+ (is_container_specific == FLB_TRUE ||
+ props->stdout_exclude == FLB_KUBE_PROP_UNDEF)) {
+ props->stdout_exclude = exclude;
+ }
+ if ((stream == FLB_KUBE_PROP_NO_STREAM ||
+ stream == FLB_KUBE_PROP_STREAM_STDERR) &&
+ (is_container_specific == FLB_TRUE ||
+ props->stderr_exclude == FLB_KUBE_PROP_UNDEF)) {
+ props->stderr_exclude = exclude;
+ }
+
+ flb_free(tmp);
+
+ return 0;
+}
+
+int flb_kube_prop_set(struct flb_kube *ctx, struct flb_kube_meta *meta,
+ const char *prop, int prop_len,
+ const char *val_buf, size_t val_len,
+ struct flb_kube_props *props)
+{
+ /*
+ * Property can take the following forms:
+ * <property> applies to streams stdout and stderr of every pod's containers
+ * <property>-<container> applies to streams stdout and stderr of a specific pod's container
+ * <property>_stdout applies to stream stdout of every pod's containers
+ * <property>_stderr applies to stream stderr of every pod's containers
+ * <property>_stdout-<container> applies to stream stdout of a specific pod's container
+ * <property>_stderr-<container> applies to stream stderr of a specific pod's container
+ */
+ const char *cur = prop;
+ size_t len = prop_len;
+ const char *container = NULL;
+ size_t container_len = 0;
+ int stream = FLB_KUBE_PROP_NO_STREAM;
+ int (*function)(struct flb_kube *ctx, struct flb_kube_meta *meta,
+ int is_container_specific, int stream,
+ const char *val_buf, size_t val_len,
+ struct flb_kube_props *props);
+
+ if (prop_cmp(FLB_KUBE_PROP_PARSER, FLB_KUBE_PROP_PARSER_LEN, prop, prop_len)) {
+ function = prop_set_parser;
+ cur += FLB_KUBE_PROP_PARSER_LEN;
+ }
+ else if (prop_cmp(FLB_KUBE_PROP_EXCLUDE, FLB_KUBE_PROP_EXCLUDE_LEN, prop, prop_len)) {
+ function = prop_set_exclude;
+ cur += FLB_KUBE_PROP_EXCLUDE_LEN;
+ }
+ else {
+ flb_plg_warn(ctx->ins, "unknown annotation 'fluentbit.io/%.*s' "
+ "(ns='%s' pod_name='%s')",
+ prop_len, prop, meta->namespace, meta->podname);
+ return -1;
+ }
+
+ len = prop_len - (cur - prop);
+
+ if (prop_cmp("_", 1, cur, len)) {
+ cur++;
+ len--;
+
+ if (prop_cmp("stdout", sizeof("stdout") - 1, cur, len)) {
+ stream = FLB_KUBE_PROP_STREAM_STDOUT;
+ cur += sizeof("stdout") - 1;
+ }
+ else if (prop_cmp("stderr", sizeof("stderr") - 1, cur, len)) {
+ stream = FLB_KUBE_PROP_STREAM_STDERR;
+ cur += sizeof("stderr") - 1;
+ }
+ else {
+ flb_plg_warn(ctx->ins, "invalid stream in annotation "
+ "'fluentbit.io/%.*s' (ns='%s' pod_name='%s')",
+ prop_len, prop, meta->namespace, meta->podname);
+ return -1;
+ }
+
+ len = prop_len - (cur - prop);
+ }
+
+ if (prop_cmp("-", 1, cur, len)) {
+ cur++;
+ len--;
+
+ if (len == 0) {
+ flb_plg_warn(ctx->ins, "invalid container in annotation "
+ "'fluentbit.io/%.*s' (ns='%s' pod_name='%s')",
+ prop_len, prop, meta->namespace, meta->podname);
+ return -1;
+ }
+
+ container = cur;
+ container_len = len;
+ len = 0;
+ }
+
+ if (len > 0) {
+ flb_plg_warn(ctx->ins, "invalid annotation 'fluentbit.io/%.*s' "
+ "(ns='%s' pod_name='%s')",
+ prop_len, prop, meta->namespace, meta->podname);
+ return -1;
+ }
+
+ /* If the property is for a specific container, and this is not
+ * that container, bail out
+ */
+ if (container && strncmp(container, meta->container_name, container_len)) {
+ return 0;
+ }
+
+ return function(ctx, meta,
+ (container ? FLB_TRUE : FLB_FALSE), stream,
+ val_buf, val_len, props);
+}
+
+int flb_kube_prop_pack(struct flb_kube_props *props,
+ void **out_buf, size_t *out_size)
+{
+ int size;
+ msgpack_packer pck;
+ msgpack_sbuffer sbuf;
+
+ /* Number of fields in props structure */
+ size = FLB_KUBE_NUMBER_OF_PROPS;
+
+ /* Create msgpack buffer */
+ msgpack_sbuffer_init(&sbuf);
+ msgpack_packer_init(&pck, &sbuf, msgpack_sbuffer_write);
+
+ /* Main array */
+ msgpack_pack_array(&pck, size);
+
+ /* Index 0: FLB_KUBE_PROPS_STDOUT_PARSER */
+ if (props->stdout_parser) {
+ msgpack_pack_str(&pck, flb_sds_len(props->stdout_parser));
+ msgpack_pack_str_body(&pck, props->stdout_parser, flb_sds_len(props->stdout_parser));
+ }
+ else {
+ msgpack_pack_nil(&pck);
+ }
+
+ /* Index 1: FLB_KUBE_PROPS_STDERR_PARSER */
+ if (props->stderr_parser) {
+ msgpack_pack_str(&pck, flb_sds_len(props->stderr_parser));
+ msgpack_pack_str_body(&pck, props->stderr_parser, flb_sds_len(props->stderr_parser));
+ }
+ else {
+ msgpack_pack_nil(&pck);
+ }
+
+ /* Index 2: FLB_KUBE_PROPS_STDOUT_EXCLUDE */
+ if (props->stdout_exclude == FLB_KUBE_PROP_TRUE) {
+ msgpack_pack_true(&pck);
+ }
+ else {
+ msgpack_pack_false(&pck);
+ }
+
+ /* Index 3: FLB_KUBE_PROPS_STDERR_EXCLUDE */
+ if (props->stderr_exclude == FLB_KUBE_PROP_TRUE) {
+ msgpack_pack_true(&pck);
+ }
+ else {
+ msgpack_pack_false(&pck);
+ }
+
+ /* Set outgoing msgpack buffer */
+ *out_buf = sbuf.data;
+ *out_size = sbuf.size;
+
+ return 0;
+}
+
+int flb_kube_prop_unpack(struct flb_kube_props *props,
+ const char *buf, size_t size)
+{
+ int ret;
+ size_t off = 0;
+ msgpack_object o;
+ msgpack_object root;
+ msgpack_unpacked result;
+
+ memset(props, '\0', sizeof(struct flb_kube_props));
+
+ msgpack_unpacked_init(&result);
+ ret = msgpack_unpack_next(&result, buf, size, &off);
+ if (ret == MSGPACK_UNPACK_PARSE_ERROR) {
+ msgpack_unpacked_destroy(&result);
+ return -1;
+ }
+ root = result.data;
+
+ /* Index 0: stdout_parser */
+ o = root.via.array.ptr[FLB_KUBE_PROPS_STDOUT_PARSER];
+ if (o.type == MSGPACK_OBJECT_NIL) {
+ props->stdout_parser = NULL;
+ }
+ else {
+ props->stdout_parser = flb_sds_create_len(o.via.str.ptr, o.via.str.size);
+ }
+
+ /* Index 1: stderr_parser */
+ o = root.via.array.ptr[FLB_KUBE_PROPS_STDERR_PARSER];
+ if (o.type == MSGPACK_OBJECT_NIL) {
+ props->stderr_parser = NULL;
+ }
+ else {
+ props->stderr_parser = flb_sds_create_len(o.via.str.ptr, o.via.str.size);
+ }
+
+ /* Index 2: stdout_exclude */
+ o = root.via.array.ptr[FLB_KUBE_PROPS_STDOUT_EXCLUDE];
+ props->stdout_exclude = o.via.boolean;
+
+ /* Index 3: stderr_exclude */
+ o = root.via.array.ptr[FLB_KUBE_PROPS_STDERR_EXCLUDE];
+ props->stderr_exclude = o.via.boolean;
+
+ msgpack_unpacked_destroy(&result);
+ return 0;
+}
+
+/* Destroy any resource held by a props element */
+void flb_kube_prop_destroy(struct flb_kube_props *props)
+{
+ if (props->stdout_parser) {
+ flb_sds_destroy(props->stdout_parser);
+ props->stdout_parser = NULL;
+ }
+ if (props->stderr_parser) {
+ flb_sds_destroy(props->stderr_parser);
+ props->stderr_parser = NULL;
+ }
+}
diff --git a/src/fluent-bit/plugins/filter_kubernetes/kube_property.h b/src/fluent-bit/plugins/filter_kubernetes/kube_property.h
new file mode 100644
index 000000000..c2c8503b6
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_kubernetes/kube_property.h
@@ -0,0 +1,40 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_FILTER_KUBE_PROP_H
+#define FLB_FILTER_KUBE_PROP_H
+
+#include "kube_meta.h"
+#include "kube_props.h"
+
+#define FLB_KUBE_PROP_NO_STREAM 0
+#define FLB_KUBE_PROP_STREAM_STDOUT 1
+#define FLB_KUBE_PROP_STREAM_STDERR 2
+#define FLB_KUBE_PROP_STREAM_UNKNOWN 3
+
+int flb_kube_prop_set(struct flb_kube *ctx, struct flb_kube_meta *meta,
+ const char *prop, int prop_len,
+ const char *val_buf, size_t val_len,
+ struct flb_kube_props *props);
+int flb_kube_prop_pack(struct flb_kube_props *props,
+ void **out_buf, size_t *out_size);
+int flb_kube_prop_unpack(struct flb_kube_props *props, const char *buf, size_t size);
+void flb_kube_prop_destroy(struct flb_kube_props *props);
+
+#endif
diff --git a/src/fluent-bit/plugins/filter_kubernetes/kube_props.h b/src/fluent-bit/plugins/filter_kubernetes/kube_props.h
new file mode 100644
index 000000000..79388e43c
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_kubernetes/kube_props.h
@@ -0,0 +1,44 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_FILTER_KUBE_PROPS_H
+#define FLB_FILTER_KUBE_PROPS_H
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_sds.h>
+
+/* Property structure/array index */
+#define FLB_KUBE_PROPS_STDOUT_PARSER 0
+#define FLB_KUBE_PROPS_STDERR_PARSER 1
+#define FLB_KUBE_PROPS_STDOUT_EXCLUDE 2
+#define FLB_KUBE_PROPS_STDERR_EXCLUDE 3
+#define FLB_KUBE_NUMBER_OF_PROPS 4
+
+#define FLB_KUBE_PROP_UNDEF 0
+#define FLB_KUBE_PROP_FALSE 1
+#define FLB_KUBE_PROP_TRUE 2
+
+struct flb_kube_props {
+ flb_sds_t stdout_parser; /* suggested parser for stdout */
+ flb_sds_t stderr_parser; /* suggested parser for stderr */
+ int stdout_exclude; /* bool: exclude stdout logs ? */
+ int stderr_exclude; /* bool: exclude stderr logs ? */
+};
+
+#endif
diff --git a/src/fluent-bit/plugins/filter_kubernetes/kube_regex.c b/src/fluent-bit/plugins/filter_kubernetes/kube_regex.c
new file mode 100644
index 000000000..e530ecf02
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_kubernetes/kube_regex.c
@@ -0,0 +1,43 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_regex.h>
+
+#include "kube_conf.h"
+#include "kube_regex.h"
+
+int flb_kube_regex_init(struct flb_kube *ctx)
+{
+ /* If a custom parser is not set, use the defaults */
+ if (!ctx->parser) {
+ if (ctx->use_journal == FLB_TRUE) {
+ ctx->regex = flb_regex_create(KUBE_JOURNAL_TO_REGEX);
+ }
+ else {
+ ctx->regex = flb_regex_create(KUBE_TAG_TO_REGEX);
+ }
+ }
+
+ if (!ctx->regex) {
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/filter_kubernetes/kube_regex.h b/src/fluent-bit/plugins/filter_kubernetes/kube_regex.h
new file mode 100644
index 000000000..ae648fd74
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_kubernetes/kube_regex.h
@@ -0,0 +1,31 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_FILTER_KUBE_REGEX_H
+#define FLB_FILTER_KUBE_REGEX_H
+
+#include "kube_conf.h"
+
+#define KUBE_TAG_TO_REGEX "(?<pod_name>[a-z0-9](?:[-a-z0-9]*[a-z0-9])?(?:\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*)_(?<namespace_name>[^_]+)_(?<container_name>.+)-(?<docker_id>[a-z0-9]{64})\\.log$"
+
+#define KUBE_JOURNAL_TO_REGEX "^(?<name_prefix>[^_]+)_(?<container_name>[^\\._]+)(\\.(?<container_hash>[^_]+))?_(?<pod_name>[^_]+)_(?<namespace_name>[^_]+)_[^_]+_[^_]+$"
+
+int flb_kube_regex_init(struct flb_kube *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/filter_kubernetes/kubernetes.c b/src/fluent-bit/plugins/filter_kubernetes/kubernetes.c
new file mode 100644
index 000000000..f54e08483
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_kubernetes/kubernetes.c
@@ -0,0 +1,1000 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_filter_plugin.h>
+#include <fluent-bit/flb_filter.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_parser.h>
+#include <fluent-bit/flb_unescape.h>
+#include <fluent-bit/flb_log_event_decoder.h>
+#include <fluent-bit/flb_log_event_encoder.h>
+
+#include "kube_conf.h"
+#include "kube_meta.h"
+#include "kube_regex.h"
+#include "kube_property.h"
+
+#include <stdio.h>
+#include <msgpack.h>
+
+/* Merge status used by merge_log_handler() */
+#define MERGE_NONE 0 /* merge unescaped string in temporary buffer */
+#define MERGE_PARSED 1 /* merge parsed string (log_buf) */
+#define MERGE_MAP 2 /* merge direct binary object (v) */
+
+static int get_stream(msgpack_object_map map)
+{
+ int i;
+ msgpack_object k;
+ msgpack_object v;
+
+ for (i = 0; i < map.size; i++) {
+ k = map.ptr[i].key;
+ v = map.ptr[i].val;
+
+ if (k.type == MSGPACK_OBJECT_STR &&
+ strncmp(k.via.str.ptr, "stream", k.via.str.size) == 0) {
+ if (strncmp(v.via.str.ptr, "stdout", v.via.str.size) == 0) {
+ return FLB_KUBE_PROP_STREAM_STDOUT;
+ }
+ else if (strncmp(v.via.str.ptr, "stderr", v.via.str.size) == 0) {
+ return FLB_KUBE_PROP_STREAM_STDERR;
+ }
+ else {
+ return FLB_KUBE_PROP_STREAM_UNKNOWN;
+ }
+ }
+ }
+
+ return FLB_KUBE_PROP_NO_STREAM;
+}
+
+static int value_trim_size(msgpack_object o)
+{
+ int i;
+ int size = o.via.str.size;
+
+ for (i = size - 1; i > 0; i--) {
+ if (o.via.str.ptr[i] == '\n') {
+ size -= 1;
+ continue;
+ }
+
+ if (o.via.str.ptr[i - 1] == '\\' &&
+ (o.via.str.ptr[i] == 'n' || o.via.str.ptr[i] == 'r')) {
+ size -= 2;
+ i--;
+ }
+ else {
+ break;
+ }
+ }
+
+ return size;
+}
+
+static int merge_log_handler(msgpack_object o,
+ struct flb_parser *parser,
+ void **out_buf, size_t *out_size,
+ struct flb_time *log_time,
+ struct flb_kube *ctx)
+{
+ int ret;
+ int new_size;
+ int root_type;
+ int records = 0;
+ char *tmp;
+
+ /* Reset vars */
+ *out_buf = NULL;
+ *out_size = 0;
+
+ /* Allocate more space if required */
+ if (o.via.str.size >= ctx->unesc_buf_size) {
+ new_size = o.via.str.size + 1;
+ tmp = flb_realloc(ctx->unesc_buf, new_size);
+ if (tmp) {
+ ctx->unesc_buf = tmp;
+ ctx->unesc_buf_size = new_size;
+ }
+ else {
+ flb_errno();
+ return -1;
+ }
+ }
+
+ /* Copy the string value and append the required NULL byte */
+ ctx->unesc_buf_len = (int) o.via.str.size;
+ memcpy(ctx->unesc_buf, o.via.str.ptr, o.via.str.size);
+ ctx->unesc_buf[ctx->unesc_buf_len] = '\0';
+
+ ret = -1;
+
+ /* Parser set by Annotation */
+ if (parser) {
+ ret = flb_parser_do(parser, ctx->unesc_buf, ctx->unesc_buf_len,
+ out_buf, out_size, log_time);
+ if (ret >= 0) {
+ if (flb_time_to_nanosec(log_time) == 0L) {
+ flb_time_get(log_time);
+ }
+ return MERGE_PARSED;
+ }
+ }
+ else if (ctx->merge_parser) { /* Custom parser 'merge_parser' option */
+ ret = flb_parser_do(ctx->merge_parser,
+ ctx->unesc_buf, ctx->unesc_buf_len,
+ out_buf, out_size, log_time);
+ if (ret >= 0) {
+ if (flb_time_to_nanosec(log_time) == 0L) {
+ flb_time_get(log_time);
+ }
+ return MERGE_PARSED;
+ }
+ }
+ else { /* Default JSON parser */
+ ret = flb_pack_json_recs(ctx->unesc_buf, ctx->unesc_buf_len,
+ (char **) out_buf, out_size, &root_type,
+ &records, NULL);
+ if (ret == 0 && root_type != FLB_PACK_JSON_OBJECT) {
+ flb_plg_debug(ctx->ins, "could not merge JSON, root_type=%i",
+ root_type);
+ flb_free(*out_buf);
+ return MERGE_NONE;
+ }
+
+ if (ret == 0 && records != 1) {
+ flb_plg_debug(ctx->ins,
+ "could not merge JSON, invalid number of records: %i",
+ records);
+ flb_free(*out_buf);
+ return MERGE_NONE;
+ }
+ }
+
+ if (ret == -1) {
+ return MERGE_NONE;
+ }
+
+ return MERGE_PARSED;
+}
+
+static int cb_kube_init(struct flb_filter_instance *f_ins,
+ struct flb_config *config,
+ void *data)
+{
+ int ret;
+ struct flb_kube *ctx;
+ (void) data;
+
+ /* Create configuration context */
+ ctx = flb_kube_conf_create(f_ins, config);
+ if (!ctx) {
+ return -1;
+ }
+
+ /* Initialize regex context */
+ ret = flb_kube_regex_init(ctx);
+ if (ret == -1) {
+ flb_kube_conf_destroy(ctx);
+ return -1;
+ }
+
+ /* Set context */
+ flb_filter_set_context(f_ins, ctx);
+
+ /*
+ * Get Kubernetes Metadata: we gather this at the beginning
+ * as we need this information to process logs in Kubernetes
+ * environment, otherwise the service should not start.
+ */
+ flb_kube_meta_init(ctx, config);
+
+ return 0;
+}
+
+static int pack_map_content(struct flb_log_event_encoder *log_encoder,
+ msgpack_object source_map,
+ const char *kube_buf, size_t kube_size,
+ struct flb_kube_meta *meta,
+ struct flb_time *time_lookup,
+ struct flb_parser *parser,
+ struct flb_kube *ctx)
+{
+ int append_original_objects;
+ int scope_opened;
+ int ret;
+ int i;
+ int map_size = 0;
+ int merge_status = -1;
+ int log_index = -1;
+ int log_buf_entries = 0;
+ size_t off = 0;
+ void *log_buf = NULL;
+ size_t log_size = 0;
+ msgpack_unpacked result;
+ msgpack_object k;
+ msgpack_object v;
+ msgpack_object root;
+ struct flb_time log_time;
+
+ /* Original map size */
+ map_size = source_map.via.map.size;
+
+ /* If merge_log is enabled, we need to lookup the 'log' field */
+ if (ctx->merge_log == FLB_TRUE) {
+ for (i = 0; i < map_size; i++) {
+ k = source_map.via.map.ptr[i].key;
+
+ /* Validate 'log' field */
+ if (k.via.str.size == 3 &&
+ strncmp(k.via.str.ptr, "log", 3) == 0) {
+ log_index = i;
+ break;
+ }
+ }
+ }
+
+ /* reset */
+ flb_time_zero(&log_time);
+
+ /*
+ * If a log_index exists, the application log content inside the
+ * Docker JSON map is a escaped string. Proceed to reserve a temporary
+ * buffer and create an unescaped version.
+ */
+ if (log_index != -1) {
+ v = source_map.via.map.ptr[log_index].val;
+ if (v.type == MSGPACK_OBJECT_MAP) {
+ /* This is the easiest way, no extra processing required */
+ merge_status = MERGE_MAP;
+ }
+ else if (v.type == MSGPACK_OBJECT_STR) {
+ merge_status = merge_log_handler(v, parser,
+ &log_buf, &log_size,
+ &log_time, ctx);
+ }
+ }
+
+ /* Append record timestamp */
+ if (merge_status == MERGE_PARSED) {
+ if (flb_time_to_nanosec(&log_time) == 0L) {
+ ret = flb_log_event_encoder_set_timestamp(
+ log_encoder, time_lookup);
+ }
+ else {
+ ret = flb_log_event_encoder_set_timestamp(
+ log_encoder, &log_time);
+ }
+ }
+ else {
+ ret = flb_log_event_encoder_set_timestamp(
+ log_encoder, time_lookup);
+ }
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ return -1;
+ }
+
+ /* If a merged status exists, check the number of entries to merge */
+ if (log_index != -1) {
+ if (merge_status == MERGE_PARSED) {
+ off = 0;
+ msgpack_unpacked_init(&result);
+ msgpack_unpack_next(&result, log_buf, log_size, &off);
+ root = result.data;
+ if (root.type == MSGPACK_OBJECT_MAP) {
+ log_buf_entries = root.via.map.size;
+ }
+ msgpack_unpacked_destroy(&result);
+ }
+ else if (merge_status == MERGE_MAP) {
+ /* object 'v' represents the original binary log */
+ log_buf_entries = v.via.map.size;
+ }
+ }
+
+ if ((merge_status == MERGE_PARSED || merge_status == MERGE_MAP) &&
+ ctx->keep_log == FLB_FALSE) {
+ }
+
+ /* Original map */
+ for (i = 0;
+ i < map_size &&
+ ret == FLB_EVENT_ENCODER_SUCCESS;
+ i++) {
+ k = source_map.via.map.ptr[i].key;
+ v = source_map.via.map.ptr[i].val;
+
+ /*
+ * If log_index is set, means a merge log is a requirement but
+ * will depend on merge_status. If the parsing failed we cannot
+ * merge so we keep the 'log' key/value.
+ */
+ append_original_objects = FLB_FALSE;
+
+ if (log_index == i) {
+ if (ctx->keep_log == FLB_TRUE) {
+ if (merge_status == MERGE_NONE || merge_status == MERGE_PARSED){
+ ret = flb_log_event_encoder_append_body_values(
+ log_encoder,
+ FLB_LOG_EVENT_MSGPACK_OBJECT_VALUE(&k),
+ FLB_LOG_EVENT_STRING_VALUE(ctx->unesc_buf,
+ ctx->unesc_buf_len));
+ }
+ else {
+ append_original_objects = FLB_TRUE;
+ }
+ }
+ else if (merge_status == MERGE_NONE) {
+ append_original_objects = FLB_TRUE;
+ }
+ }
+ else {
+ append_original_objects = FLB_TRUE;
+ }
+
+ if (append_original_objects) {
+ ret = flb_log_event_encoder_append_body_values(
+ log_encoder,
+ FLB_LOG_EVENT_MSGPACK_OBJECT_VALUE(&k),
+ FLB_LOG_EVENT_MSGPACK_OBJECT_VALUE(&v));
+ }
+ }
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ return -2;
+ }
+
+ scope_opened = FLB_FALSE;
+ /* Merge Log */
+ if (log_index != -1) {
+ if (merge_status == MERGE_PARSED) {
+ if (ctx->merge_log_key && log_buf_entries > 0) {
+ ret = flb_log_event_encoder_append_body_string(
+ log_encoder,
+ ctx->merge_log_key,
+ flb_sds_len(ctx->merge_log_key));
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_body_begin_map(log_encoder);
+ }
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ return -3;
+ }
+
+ scope_opened = FLB_TRUE;
+ }
+
+ off = 0;
+ msgpack_unpacked_init(&result);
+ msgpack_unpack_next(&result, log_buf, log_size, &off);
+ root = result.data;
+
+ for (i = 0;
+ i < log_buf_entries &&
+ ret == FLB_EVENT_ENCODER_SUCCESS;
+ i++) {
+ k = root.via.map.ptr[i].key;
+
+ ret = flb_log_event_encoder_append_body_msgpack_object(
+ log_encoder, &k);
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ return -4;
+ }
+
+ v = root.via.map.ptr[i].val;
+
+ /*
+ * If this is the last string value, trim any remaining
+ * break line or return carrier character.
+ */
+ if (v.type == MSGPACK_OBJECT_STR &&
+ ctx->merge_log_trim == FLB_TRUE) {
+ ret = flb_log_event_encoder_append_body_string(
+ log_encoder,
+ (char *) v.via.str.ptr,
+ value_trim_size(v));
+ }
+ else {
+ ret = flb_log_event_encoder_append_body_msgpack_object(
+ log_encoder, &v);
+ }
+ }
+
+ msgpack_unpacked_destroy(&result);
+
+ flb_free(log_buf);
+
+ if (scope_opened && ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_body_commit_map(log_encoder);
+ }
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ return -5;
+ }
+ }
+ else if (merge_status == MERGE_MAP) {
+ msgpack_object map;
+
+ if (ctx->merge_log_key && log_buf_entries > 0) {
+ ret = flb_log_event_encoder_append_body_string(
+ log_encoder,
+ ctx->merge_log_key,
+ flb_sds_len(ctx->merge_log_key));
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_body_begin_map(log_encoder);
+ }
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ return -6;
+ }
+
+ scope_opened = FLB_TRUE;
+ }
+
+ map = source_map.via.map.ptr[log_index].val;
+ for (i = 0;
+ i < map.via.map.size &&
+ ret == FLB_EVENT_ENCODER_SUCCESS;
+ i++) {
+ k = map.via.map.ptr[i].key;
+ v = map.via.map.ptr[i].val;
+
+ ret = flb_log_event_encoder_append_body_values(
+ log_encoder,
+ FLB_LOG_EVENT_MSGPACK_OBJECT_VALUE(&k),
+ FLB_LOG_EVENT_MSGPACK_OBJECT_VALUE(&v));
+ }
+
+ if (scope_opened && ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_body_commit_map(log_encoder);
+ }
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ return -7;
+ }
+ }
+ }
+
+ /* Kubernetes */
+ if (kube_buf && kube_size > 0) {
+ ret = flb_log_event_encoder_append_body_cstring(
+ log_encoder,
+ "kubernetes");
+
+ off = 0;
+ msgpack_unpacked_init(&result);
+ msgpack_unpack_next(&result, kube_buf, kube_size, &off);
+
+ if (kube_size != off) {
+ /* This buffer should contain a single map and we shouldn't
+ * have to unpack it in order to ensure that we are appending
+ * a single map but considering that the current code only
+ * appends the first entry without taking any actions I think
+ * we should warn the user if there is more than one entry in
+ * it so in the future we can remove the unpack code and just
+ * use flb_log_event_encoder_append_body_raw_msgpack with
+ * kube_size.
+ */
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_append_body_raw_msgpack(log_encoder,
+ (char *) kube_buf, off);
+ }
+
+ msgpack_unpacked_destroy(&result);
+ }
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ return -8;
+ }
+
+ return 0;
+}
+
+static int cb_kube_filter(const void *data, size_t bytes,
+ const char *tag, int tag_len,
+ void **out_buf, size_t *out_bytes,
+ struct flb_filter_instance *f_ins,
+ struct flb_input_instance *i_ins,
+ void *filter_context,
+ struct flb_config *config)
+{
+ int ret;
+ size_t pre = 0;
+ size_t off = 0;
+ char *dummy_cache_buf = NULL;
+ const char *cache_buf = NULL;
+ size_t cache_size = 0;
+ msgpack_object map;
+ struct flb_parser *parser = NULL;
+ struct flb_kube *ctx = filter_context;
+ struct flb_kube_meta meta = {0};
+ struct flb_kube_props props = {0};
+ struct flb_log_event_encoder log_encoder;
+ struct flb_log_event_decoder log_decoder;
+ struct flb_log_event log_event;
+
+ (void) f_ins;
+ (void) i_ins;
+ (void) config;
+
+ if (ctx->use_journal == FLB_FALSE || ctx->dummy_meta == FLB_TRUE) {
+ if (ctx->dummy_meta == FLB_TRUE) {
+ ret = flb_kube_dummy_meta_get(&dummy_cache_buf, &cache_size);
+ cache_buf = dummy_cache_buf;
+ }
+ else {
+ /* Check if we have some cached metadata for the incoming events */
+ ret = flb_kube_meta_get(ctx,
+ tag, tag_len,
+ data, bytes,
+ &cache_buf, &cache_size, &meta, &props);
+ }
+ if (ret == -1) {
+ return FLB_FILTER_NOTOUCH;
+ }
+ }
+
+ ret = flb_log_event_decoder_init(&log_decoder, (char *) data, bytes);
+
+ if (ret != FLB_EVENT_DECODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event decoder initialization error : %d", ret);
+
+ flb_kube_meta_release(&meta);
+ flb_kube_prop_destroy(&props);
+
+ return FLB_FILTER_NOTOUCH;
+ }
+
+ ret = flb_log_event_encoder_init(&log_encoder,
+ FLB_LOG_EVENT_FORMAT_DEFAULT);
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event encoder initialization error : %d", ret);
+
+ flb_log_event_decoder_destroy(&log_decoder);
+ flb_kube_meta_release(&meta);
+ flb_kube_prop_destroy(&props);
+
+ return FLB_FILTER_NOTOUCH;
+ }
+
+ while ((ret = flb_log_event_decoder_next(
+ &log_decoder,
+ &log_event)) == FLB_EVENT_DECODER_SUCCESS) {
+ off = log_decoder.offset;
+ /*
+ * Journal entries can be origined by different Pods, so we are forced
+ * to parse and check it metadata.
+ *
+ * note: when the source is in_tail the situation is different since all
+ * records passed to the filter have a unique source log file.
+ */
+ if (ctx->use_journal == FLB_TRUE && ctx->dummy_meta == FLB_FALSE) {
+ ret = flb_kube_meta_get(ctx,
+ tag, tag_len,
+ (char *) data + pre, off - pre,
+ &cache_buf, &cache_size, &meta, &props);
+ if (ret == -1) {
+ continue;
+ }
+
+ pre = off;
+ }
+
+ parser = NULL;
+
+ switch (get_stream(log_event.body->via.map)) {
+ case FLB_KUBE_PROP_STREAM_STDOUT:
+ {
+ if (props.stdout_exclude == FLB_TRUE) {
+ /* Skip this record */
+ if (ctx->use_journal == FLB_TRUE) {
+ flb_kube_meta_release(&meta);
+ flb_kube_prop_destroy(&props);
+ }
+ continue;
+ }
+ if (props.stdout_parser != NULL) {
+ parser = flb_parser_get(props.stdout_parser, config);
+ }
+ }
+ break;
+ case FLB_KUBE_PROP_STREAM_STDERR:
+ {
+ if (props.stderr_exclude == FLB_TRUE) {
+ /* Skip this record */
+ if (ctx->use_journal == FLB_TRUE) {
+ flb_kube_meta_release(&meta);
+ flb_kube_prop_destroy(&props);
+ }
+ continue;
+ }
+ if (props.stderr_parser != NULL) {
+ parser = flb_parser_get(props.stderr_parser, config);
+ }
+ }
+ break;
+ default:
+ {
+ if (props.stdout_exclude == props.stderr_exclude &&
+ props.stderr_exclude == FLB_TRUE) {
+ continue;
+ }
+ if (props.stdout_parser == props.stderr_parser &&
+ props.stderr_parser != NULL) {
+ parser = flb_parser_get(props.stdout_parser, config);
+ }
+ }
+ break;
+ }
+
+ /* get records map */
+ map = *log_event.body;
+
+ ret = flb_log_event_encoder_begin_record(&log_encoder);
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ break;
+ }
+
+ ret = pack_map_content(&log_encoder,
+ map,
+ cache_buf, cache_size,
+ &meta, &log_event.timestamp, parser, ctx);
+ if (ret != 0) {
+ flb_log_event_decoder_destroy(&log_decoder);
+ flb_log_event_encoder_destroy(&log_encoder);
+
+ if (ctx->dummy_meta == FLB_TRUE) {
+ flb_free(dummy_cache_buf);
+ }
+
+ flb_kube_meta_release(&meta);
+ flb_kube_prop_destroy(&props);
+
+ return FLB_FILTER_NOTOUCH;
+ }
+
+ ret = flb_log_event_encoder_commit_record(&log_encoder);
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ flb_log_event_encoder_rollback_record(&log_encoder);
+
+ break;
+ }
+
+ if (ctx->use_journal == FLB_TRUE) {
+ flb_kube_meta_release(&meta);
+ flb_kube_prop_destroy(&props);
+ }
+ }
+
+ /* Release meta fields */
+ if (ctx->use_journal == FLB_FALSE) {
+ flb_kube_meta_release(&meta);
+ flb_kube_prop_destroy(&props);
+ }
+
+ if (ctx->dummy_meta == FLB_TRUE) {
+ flb_free(dummy_cache_buf);
+ }
+
+ *out_buf = log_encoder.output_buffer;
+ *out_bytes = log_encoder.output_length;
+
+ flb_log_event_encoder_claim_internal_buffer_ownership(&log_encoder);
+
+ flb_log_event_decoder_destroy(&log_decoder);
+ flb_log_event_encoder_destroy(&log_encoder);
+
+ return FLB_FILTER_MODIFIED;
+}
+
+static int cb_kube_exit(void *data, struct flb_config *config)
+{
+ struct flb_kube *ctx;
+
+ ctx = data;
+ flb_kube_conf_destroy(ctx);
+
+ return 0;
+}
+
+/* Configuration properties map */
+static struct flb_config_map config_map[] = {
+
+ /* Buffer size for HTTP Client when reading responses from API Server */
+ {
+ FLB_CONFIG_MAP_SIZE, "buffer_size", "32K",
+ 0, FLB_TRUE, offsetof(struct flb_kube, buffer_size),
+ "buffer size to receive response from API server",
+ },
+
+ /* TLS: set debug 'level' */
+ {
+ FLB_CONFIG_MAP_INT, "tls.debug", "0",
+ 0, FLB_TRUE, offsetof(struct flb_kube, tls_debug),
+ "set TLS debug level: 0 (no debug), 1 (error), "
+ "2 (state change), 3 (info) and 4 (verbose)"
+ },
+
+ /* TLS: enable verification */
+ {
+ FLB_CONFIG_MAP_BOOL, "tls.verify", "true",
+ 0, FLB_TRUE, offsetof(struct flb_kube, tls_verify),
+ "enable or disable verification of TLS peer certificate"
+ },
+
+ /* TLS: set tls.vhost feature */
+ {
+ FLB_CONFIG_MAP_STR, "tls.vhost", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_kube, tls_vhost),
+ "set optional TLS virtual host"
+ },
+
+ /* Merge structured record as independent keys */
+ {
+ FLB_CONFIG_MAP_BOOL, "merge_log", "false",
+ 0, FLB_TRUE, offsetof(struct flb_kube, merge_log),
+ "merge 'log' key content as individual keys"
+ },
+
+ /* Optional parser for 'log' key content */
+ {
+ FLB_CONFIG_MAP_STR, "merge_parser", NULL,
+ 0, FLB_FALSE, 0,
+ "specify a 'parser' name to parse the 'log' key content"
+ },
+
+ /* New key name to merge the structured content of 'log' */
+ {
+ FLB_CONFIG_MAP_STR, "merge_log_key", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_kube, merge_log_key),
+ "set the 'key' name where the content of 'key' will be placed. Only "
+ "used if the option 'merge_log' is enabled"
+ },
+
+ /* On merge, trim field values (remove possible ending \n or \r) */
+ {
+ FLB_CONFIG_MAP_BOOL, "merge_log_trim", "true",
+ 0, FLB_TRUE, offsetof(struct flb_kube, merge_log_trim),
+ "remove ending '\\n' or '\\r' characters from the log content"
+ },
+
+ /* Keep original log key after successful merging/parsing */
+ {
+ FLB_CONFIG_MAP_BOOL, "keep_log", "true",
+ 0, FLB_TRUE, offsetof(struct flb_kube, keep_log),
+ "keep original log content if it was successfully parsed and merged"
+ },
+
+ /* Full Kubernetes API server URL */
+ {
+ FLB_CONFIG_MAP_STR, "kube_url", "https://kubernetes.default.svc",
+ 0, FLB_FALSE, 0,
+ "Kubernetes API server URL"
+ },
+
+ /*
+ * If set, meta-data load will be attempted from files in this dir,
+ * falling back to API if not existing.
+ */
+ {
+ FLB_CONFIG_MAP_STR, "kube_meta_preload_cache_dir", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_kube, meta_preload_cache_dir),
+ "set directory with metadata files"
+ },
+
+ /* Kubernetes TLS: CA file */
+ {
+ FLB_CONFIG_MAP_STR, "kube_ca_file", FLB_KUBE_CA,
+ 0, FLB_TRUE, offsetof(struct flb_kube, tls_ca_file),
+ "Kubernetes TLS CA file"
+ },
+
+ /* Kubernetes TLS: CA certs path */
+ {
+ FLB_CONFIG_MAP_STR, "kube_ca_path", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_kube, tls_ca_path),
+ "Kubernetes TLS ca path"
+ },
+
+ /* Kubernetes Tag prefix */
+ {
+ FLB_CONFIG_MAP_STR, "kube_tag_prefix", FLB_KUBE_TAG_PREFIX,
+ 0, FLB_TRUE, offsetof(struct flb_kube, kube_tag_prefix),
+ "prefix used in tag by the input plugin"
+ },
+
+ /* Kubernetes Token file */
+ {
+ FLB_CONFIG_MAP_STR, "kube_token_file", FLB_KUBE_TOKEN,
+ 0, FLB_TRUE, offsetof(struct flb_kube, token_file),
+ "Kubernetes authorization token file"
+ },
+
+ /* Kubernetes Token command */
+ {
+ FLB_CONFIG_MAP_STR, "kube_token_command", NULL,
+ 0, FLB_FALSE, 0,
+ "command to get Kubernetes authorization token"
+ },
+
+ /* Include Kubernetes Labels in the final record ? */
+ {
+ FLB_CONFIG_MAP_BOOL, "labels", "true",
+ 0, FLB_TRUE, offsetof(struct flb_kube, labels),
+ "include Kubernetes labels on every record"
+ },
+
+ /* Include Kubernetes Annotations in the final record ? */
+ {
+ FLB_CONFIG_MAP_BOOL, "annotations", "true",
+ 0, FLB_TRUE, offsetof(struct flb_kube, annotations),
+ "include Kubernetes annotations on every record"
+ },
+
+ /*
+ * The Application may 'propose' special configuration keys
+ * to the logging agent (Fluent Bit) through the annotations
+ * set in the Pod definition, e.g:
+ *
+ * "annotations": {
+ * "logging": {"parser": "apache"}
+ * }
+ *
+ * As of now, Fluent Bit/filter_kubernetes supports the following
+ * options under the 'logging' map value:
+ *
+ * - k8s-logging.parser: propose Fluent Bit to parse the content
+ * using the pre-defined parser in the
+ * value (e.g: apache).
+ * - k8s-logging.exclude: Fluent Bit allows Pods to exclude themselves
+ *
+ * By default all options are disabled, so each option needs to
+ * be enabled manually.
+ */
+ {
+ FLB_CONFIG_MAP_BOOL, "k8s-logging.parser", "false",
+ 0, FLB_TRUE, offsetof(struct flb_kube, k8s_logging_parser),
+ "allow Pods to suggest a parser"
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "k8s-logging.exclude", "false",
+ 0, FLB_TRUE, offsetof(struct flb_kube, k8s_logging_exclude),
+ "allow Pods to exclude themselves from the logging pipeline"
+ },
+
+ /* Use Systemd Journal mode ? */
+ {
+ FLB_CONFIG_MAP_BOOL, "use_journal", "false",
+ 0, FLB_TRUE, offsetof(struct flb_kube, use_journal),
+ "use Journald (Systemd) mode"
+ },
+
+ /* Custom Tag Regex */
+ {
+ FLB_CONFIG_MAP_STR, "regex_parser", NULL,
+ 0, FLB_FALSE, 0,
+ "optional regex parser to extract metadata from container name or container log file name"
+ },
+
+ /* Generate dummy metadata (only for test/dev purposes) */
+ {
+ FLB_CONFIG_MAP_BOOL, "dummy_meta", "false",
+ 0, FLB_TRUE, offsetof(struct flb_kube, dummy_meta),
+ "use 'dummy' metadata, do not talk to API server"
+ },
+
+ /*
+ * Poll DNS status to mitigate unreliable network issues.
+ * See fluent/fluent-bit/2144.
+ */
+ {
+ FLB_CONFIG_MAP_INT, "dns_retries", "6",
+ 0, FLB_TRUE, offsetof(struct flb_kube, dns_retries),
+ "dns lookup retries N times until the network start working"
+ },
+
+ {
+ FLB_CONFIG_MAP_TIME, "dns_wait_time", "30",
+ 0, FLB_TRUE, offsetof(struct flb_kube, dns_wait_time),
+ "dns interval between network status checks"
+ },
+ /* Fetch K8s meta when docker_id has changed */
+ {
+ FLB_CONFIG_MAP_BOOL, "cache_use_docker_id", "false",
+ 0, FLB_TRUE, offsetof(struct flb_kube, cache_use_docker_id),
+ "fetch K8s meta when docker_id is changed"
+ },
+
+ {
+ FLB_CONFIG_MAP_BOOL, "use_tag_for_meta", "false",
+ 0, FLB_TRUE, offsetof(struct flb_kube, use_tag_for_meta),
+ "use tag associated to retrieve metadata instead of kube-server"
+ },
+
+ /*
+ * Enable the feature for using kubelet to get pods information
+ */
+ {
+ FLB_CONFIG_MAP_BOOL, "use_kubelet", "false",
+ 0, FLB_TRUE, offsetof(struct flb_kube, use_kubelet),
+ "use kubelet to get metadata instead of kube-server"
+ },
+ /*
+ * The kubelet host for /pods endpoint, default is 127.0.0.1
+ * Will only check when "use_kubelet" config is set to true
+ */
+ {
+ FLB_CONFIG_MAP_STR, "kubelet_host", "127.0.0.1",
+ 0, FLB_TRUE, offsetof(struct flb_kube, kubelet_host),
+ "kubelet host to connect with when using kubelet"
+ },
+ /*
+ * The kubelet port for /pods endpoint, default is 10250
+ * Will only check when "use_kubelet" config is set to true
+ */
+ {
+ FLB_CONFIG_MAP_INT, "kubelet_port", "10250",
+ 0, FLB_TRUE, offsetof(struct flb_kube, kubelet_port),
+ "kubelet port to connect with when using kubelet"
+ },
+ {
+ FLB_CONFIG_MAP_TIME, "kube_token_ttl", "10m",
+ 0, FLB_TRUE, offsetof(struct flb_kube, kube_token_ttl),
+ "kubernetes token ttl, until it is reread from the token file. Default: 10m"
+ },
+ /*
+ * Set TTL for K8s cached metadata
+ */
+ {
+ FLB_CONFIG_MAP_TIME, "kube_meta_cache_ttl", "0",
+ 0, FLB_TRUE, offsetof(struct flb_kube, kube_meta_cache_ttl),
+ "configurable TTL for K8s cached metadata. "
+ "By default, it is set to 0 which means TTL for cache entries is disabled and "
+ "cache entries are evicted at random when capacity is reached. "
+ "In order to enable this option, you should set the number to a time interval. "
+ "For example, set this value to 60 or 60s and cache entries "
+ "which have been created more than 60s will be evicted"
+ },
+ /* EOF */
+ {0}
+};
+
+struct flb_filter_plugin filter_kubernetes_plugin = {
+ .name = "kubernetes",
+ .description = "Filter to append Kubernetes metadata",
+ .cb_init = cb_kube_init,
+ .cb_filter = cb_kube_filter,
+ .cb_exit = cb_kube_exit,
+ .config_map = config_map,
+ .flags = 0
+};
diff --git a/src/fluent-bit/plugins/filter_log_to_metrics/CMakeLists.txt b/src/fluent-bit/plugins/filter_log_to_metrics/CMakeLists.txt
new file mode 100644
index 000000000..bc52b4c7f
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_log_to_metrics/CMakeLists.txt
@@ -0,0 +1,8 @@
+if(NOT FLB_METRICS)
+ message(FATAL_ERROR "Log Metrics filter plugin requires FLB_METRICS=On.")
+endif()
+
+set(src
+ log_to_metrics.c)
+
+FLB_PLUGIN(filter_log_to_metrics "${src}" "")
diff --git a/src/fluent-bit/plugins/filter_log_to_metrics/log_to_metrics.c b/src/fluent-bit/plugins/filter_log_to_metrics/log_to_metrics.c
new file mode 100644
index 000000000..a61e4827f
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_log_to_metrics/log_to_metrics.c
@@ -0,0 +1,965 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2023 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "log_to_metrics.h"
+#include <fluent-bit/flb_filter.h>
+#include <fluent-bit/flb_filter_plugin.h>
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_kv.h>
+#include <fluent-bit/flb_mem.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_ra_key.h>
+#include <fluent-bit/flb_record_accessor.h>
+#include <fluent-bit/flb_regex.h>
+#include <fluent-bit/flb_storage.h>
+#include <fluent-bit/flb_str.h>
+#include <fluent-bit/flb_utils.h>
+#include <cmetrics/cmetrics.h>
+#include <cmetrics/cmt_gauge.h>
+#include <cmetrics/cmt_counter.h>
+#include <cmetrics/cmt_histogram.h>
+#include <msgpack.h>
+#include <stdio.h>
+#include <sys/types.h>
+
+
+static char kubernetes_label_keys[NUMBER_OF_KUBERNETES_LABELS][16] =
+ { "namespace_name",
+ "pod_name",
+ "container_name",
+ "docker_id",
+ "pod_id"
+ };
+
+static void delete_rules(struct log_to_metrics_ctx *ctx)
+{
+ struct mk_list *tmp;
+ struct mk_list *head;
+ struct grep_rule *rule;
+
+ mk_list_foreach_safe(head, tmp, &ctx->rules) {
+ rule = mk_list_entry(head, struct grep_rule, _head);
+ flb_sds_destroy(rule->field);
+ flb_free(rule->regex_pattern);
+ flb_ra_destroy(rule->ra);
+ flb_regex_destroy(rule->regex);
+ mk_list_del(&rule->_head);
+ flb_free(rule);
+ }
+}
+
+static int log_to_metrics_destroy(struct log_to_metrics_ctx *ctx)
+{
+ int i;
+ if (!ctx) {
+ return 0;
+ }
+ if(ctx->histogram_buckets){
+ cmt_histogram_buckets_destroy(ctx->histogram_buckets);
+ }
+
+ if (ctx->cmt) {
+ cmt_destroy(ctx->cmt);
+ }
+
+ delete_rules(ctx);
+
+ if (ctx->label_accessors != NULL) {
+ for (i = 0; i < MAX_LABEL_COUNT; i++) {
+ flb_free(ctx->label_accessors[i]);
+ }
+ flb_free(ctx->label_accessors);
+ }
+ if (ctx->label_keys != NULL) {
+ for (i = 0; i < MAX_LABEL_COUNT; i++) {
+ flb_free(ctx->label_keys[i]);
+ }
+ flb_free(ctx->label_keys);
+ }
+ flb_free(ctx->buckets);
+ flb_free(ctx->bucket_counter);
+ flb_free(ctx->label_counter);
+ flb_free(ctx);
+ return 0;
+}
+
+static int set_rules(struct log_to_metrics_ctx *ctx,
+ struct flb_filter_instance *f_ins)
+{
+ flb_sds_t tmp;
+ struct mk_list *head;
+ struct mk_list *split;
+ struct flb_split_entry *sentry;
+ struct flb_kv *kv;
+ struct grep_rule *rule;
+
+ /* Iterate all filter properties */
+ mk_list_foreach(head, &f_ins->properties) {
+ kv = mk_list_entry(head, struct flb_kv, _head);
+
+ /* Create a new rule */
+ rule = flb_malloc(sizeof(struct grep_rule));
+ if (!rule) {
+ flb_errno();
+ return -1;
+ }
+
+ /* Get the type */
+ if (strcasecmp(kv->key, "regex") == 0) {
+ rule->type = GREP_REGEX;
+ }
+ else if (strcasecmp(kv->key, "exclude") == 0) {
+ rule->type = GREP_EXCLUDE;
+ }
+ else {
+ flb_free(rule);
+ continue;
+ }
+
+ /* As a value we expect a pair of field name and a regular expression */
+ split = flb_utils_split(kv->val, ' ', 1);
+ if (mk_list_size(split) != 2) {
+ flb_plg_error(ctx->ins,
+ "invalid regex, expected field and regular expression");
+ delete_rules(ctx);
+ flb_free(rule);
+ flb_utils_split_free(split);
+ return -1;
+ }
+
+ /* Get first value (field) */
+ sentry = mk_list_entry_first(split, struct flb_split_entry, _head);
+ if (*sentry->value == '$') {
+ rule->field = flb_sds_create_len(sentry->value, sentry->len);
+ }
+ else {
+ rule->field = flb_sds_create_size(sentry->len + 2);
+ tmp = flb_sds_cat(rule->field, "$", 1);
+ rule->field = tmp;
+
+ tmp = flb_sds_cat(rule->field, sentry->value, sentry->len);
+ rule->field = tmp;
+ }
+
+ /* Get remaining content (regular expression) */
+ sentry = mk_list_entry_last(split, struct flb_split_entry, _head);
+ rule->regex_pattern = flb_strndup(sentry->value, sentry->len);
+ if (rule->regex_pattern == NULL) {
+ flb_errno();
+ delete_rules(ctx);
+ flb_free(rule);
+ flb_utils_split_free(split);
+ return -1;
+ }
+
+ /* Release split */
+ flb_utils_split_free(split);
+
+ /* Create a record accessor context for this rule */
+ rule->ra = flb_ra_create(rule->field, FLB_FALSE);
+ if (!rule->ra) {
+ flb_plg_error(ctx->ins, "invalid record accessor? '%s'",
+ rule->field);
+ delete_rules(ctx);
+ flb_free(rule);
+ return -1;
+ }
+
+ /* Convert string to regex pattern */
+ rule->regex = flb_regex_create(rule->regex_pattern);
+ if (!rule->regex) {
+ flb_plg_error(ctx->ins, "could not compile regex pattern '%s'",
+ rule->regex_pattern);
+ delete_rules(ctx);
+ flb_free(rule);
+ return -1;
+ }
+
+ /* Link to parent list */
+ mk_list_add(&rule->_head, &ctx->rules);
+ }
+
+ return 0;
+}
+
+/* Given a msgpack record, do some filter action based on the defined rules */
+static inline int grep_filter_data(msgpack_object map,
+ struct log_to_metrics_ctx *ctx)
+{
+ ssize_t ret;
+ struct mk_list *head;
+ struct grep_rule *rule;
+
+ /* For each rule, validate against map fields */
+ mk_list_foreach(head, &ctx->rules) {
+ rule = mk_list_entry(head, struct grep_rule, _head);
+
+ ret = flb_ra_regex_match(rule->ra, map, rule->regex, NULL);
+ if (ret <= 0) { /* no match */
+ if (rule->type == GREP_REGEX) {
+ return GREP_RET_EXCLUDE;
+ }
+ }
+ else {
+ if (rule->type == GREP_EXCLUDE) {
+ return GREP_RET_EXCLUDE;
+ }
+ else {
+ return GREP_RET_KEEP;
+ }
+ }
+ }
+
+ return GREP_RET_KEEP;
+}
+
+static int set_labels(struct log_to_metrics_ctx *ctx,
+ char **label_accessors,
+ char **label_keys,
+ int *label_counter,
+ struct flb_filter_instance *f_ins)
+{
+
+ struct mk_list *head;
+ struct mk_list *split;
+ flb_sds_t tmp;
+ struct flb_kv *kv;
+ struct flb_split_entry *sentry;
+ int counter = 0;
+ int i;
+ if (MAX_LABEL_COUNT < NUMBER_OF_KUBERNETES_LABELS){
+ flb_errno();
+ return -1;
+ }
+ if (ctx->kubernetes_mode){
+ for (i = 0; i < NUMBER_OF_KUBERNETES_LABELS; i++){
+ snprintf(label_keys[i], MAX_LABEL_LENGTH - 1, "%s",
+ kubernetes_label_keys[i]);
+ }
+ counter = NUMBER_OF_KUBERNETES_LABELS;
+ }
+
+ /* Iterate all filter properties */
+ mk_list_foreach(head, &f_ins->properties) {
+ kv = mk_list_entry(head, struct flb_kv, _head);
+
+ if (counter >= MAX_LABEL_COUNT) {
+ return MAX_LABEL_COUNT;
+ }
+
+ if (strcasecmp(kv->key, "label_field") == 0) {
+ snprintf(label_accessors[counter], MAX_LABEL_LENGTH - 1, "%s", kv->val);
+ snprintf(label_keys[counter], MAX_LABEL_LENGTH - 1, "%s", kv->val);
+ counter++;
+ }
+ else if (strcasecmp(kv->key, "add_label") == 0) {
+ split = flb_utils_split(kv->val, ' ', 1);
+ if (mk_list_size(split) != 2) {
+ flb_plg_error(ctx->ins, "invalid label, expected name and key");
+ flb_utils_split_free(split);
+ return -1;
+ }
+
+ sentry = mk_list_entry_first(split, struct flb_split_entry, _head);
+ tmp = flb_sds_create_len(sentry->value, sentry->len);
+ snprintf(label_keys[counter], MAX_LABEL_LENGTH - 1, "%s", tmp);
+ flb_sds_destroy(tmp);
+
+ sentry = mk_list_entry_last(split, struct flb_split_entry, _head);
+ tmp = flb_sds_create_len(sentry->value, sentry->len);
+ snprintf(label_accessors[counter], MAX_LABEL_LENGTH - 1, "%s", tmp);
+ flb_sds_destroy(tmp);
+ counter++;
+
+ flb_utils_split_free(split);
+ }
+ else {
+ continue;
+ }
+ }
+ *label_counter = counter;
+ return counter;
+}
+
+static int convert_double(char *str, double *value)
+{
+ char *endptr = str;
+ int valid = 1;
+ int i = 0;
+ /* input validation */
+ for (i = 0; str[i] != '\0'; i++) {
+ if (!(str[i]>='0') && !(str[i] <= '9') && str[i] != '.'
+ && str[i] != '-' && str[i] != '+') {
+ valid = 0;
+ break;
+ }
+ }
+ /* convert to double */
+ if (valid) {
+ *value = strtod(str, &endptr);
+ if (str == endptr) {
+ valid = 0;
+ }
+ }
+ return valid;
+}
+
+static void sort_doubles_ascending(double *arr, int size)
+{
+ int i, j;
+ double tmp;
+
+ for (i = 0; i < size - 1; i++) {
+ for (j = 0; j < size - i - 1; j++) {
+ if (arr[j] > arr[j + 1]) {
+ tmp = arr[j];
+ arr[j] = arr[j + 1];
+ arr[j + 1] = tmp;
+ }
+ }
+ }
+}
+static int set_buckets(struct log_to_metrics_ctx *ctx,
+ struct flb_filter_instance *f_ins)
+{
+
+ struct mk_list *head;
+ struct flb_kv *kv;
+ double parsed_double = 0.0;
+ int counter = 0;
+ int valid = 1;
+
+ /* Iterate filter properties to get count of buckets to allocate memory */
+ mk_list_foreach(head, &f_ins->properties) {
+ kv = mk_list_entry(head, struct flb_kv, _head);
+
+ if (strcasecmp(kv->key, "bucket") != 0) {
+ continue;
+ }
+ counter++;
+ }
+ /* Allocate the memory for buckets */
+ ctx->buckets = (double *) flb_malloc(counter * sizeof(double));
+ /* Set the buckets */
+ counter = 0;
+ mk_list_foreach(head, &f_ins->properties) {
+ kv = mk_list_entry(head, struct flb_kv, _head);
+
+ if (strcasecmp(kv->key, "bucket") != 0) {
+ continue;
+ }
+ valid = convert_double(kv->val, &parsed_double);
+ if(!valid){
+ flb_error("Error during conversion");
+ return -1;
+ }
+ else{
+ ctx->buckets[counter++] = parsed_double;
+ }
+ }
+ *ctx->bucket_counter = counter;
+ sort_doubles_ascending(ctx->buckets, counter);
+ return 0;
+}
+
+static int fill_labels(struct log_to_metrics_ctx *ctx, char **label_values,
+ char kubernetes_label_values
+ [NUMBER_OF_KUBERNETES_LABELS][MAX_LABEL_LENGTH],
+ char **label_accessors, int label_counter, msgpack_object map)
+{
+ int label_iterator_start = 0;
+ int i;
+ struct flb_record_accessor *ra = NULL;
+ struct flb_ra_value *rval = NULL;
+
+ if (label_counter == 0 && !ctx->kubernetes_mode){
+ return 0;
+ }
+ if (MAX_LABEL_COUNT < NUMBER_OF_KUBERNETES_LABELS){
+ flb_errno();
+ return -1;
+ }
+ if (ctx->kubernetes_mode){
+ for (i = 0; i < NUMBER_OF_KUBERNETES_LABELS; i++){
+ if (kubernetes_label_keys[i] == NULL){
+ return -1;
+ }
+ snprintf(label_values[i], MAX_LABEL_LENGTH - 1, "%s",
+ kubernetes_label_values[i]);
+ }
+ label_iterator_start = NUMBER_OF_KUBERNETES_LABELS;
+ }
+
+ for (i = label_iterator_start; i < label_counter; i++){
+ ra = flb_ra_create(label_accessors[i], FLB_TRUE);
+ if (!ra) {
+ flb_warn("invalid record accessor key, aborting");
+ break;
+ }
+
+ rval = flb_ra_get_value_object(ra, map);
+ if (!rval) {
+ /* Set value to empty string, so the value will be dropped in Cmetrics*/
+ label_values[i][0] = '\0';
+ } else if (rval->type == FLB_RA_STRING) {
+ snprintf(label_values[i], MAX_LABEL_LENGTH - 1, "%s",
+ rval->val.string);
+ }
+ else if (rval->type == FLB_RA_FLOAT) {
+ snprintf(label_values[i], MAX_LABEL_LENGTH - 1, "%f",
+ rval->val.f64);
+ }
+ else if (rval->type == FLB_RA_INT) {
+ snprintf(label_values[i], MAX_LABEL_LENGTH - 1, "%ld",
+ (long)rval->val.i64);
+ }
+ else {
+ flb_warn("cannot convert given value to metric");
+ break;
+ }
+ if (rval){
+ flb_ra_key_value_destroy(rval);
+ rval = NULL;
+ }
+ if (ra){
+ flb_ra_destroy(ra);
+ ra = NULL;
+ }
+ }
+ return label_counter;
+}
+
+static int cb_log_to_metrics_init(struct flb_filter_instance *f_ins,
+ struct flb_config *config, void *data)
+{
+ int ret;
+ struct log_to_metrics_ctx *ctx;
+ flb_sds_t tmp;
+ char metric_description[MAX_METRIC_LENGTH];
+ char metric_name[MAX_METRIC_LENGTH];
+ char value_field[MAX_METRIC_LENGTH];
+ struct flb_input_instance *input_ins;
+ int label_count;
+ int i;
+ /* Create context */
+ ctx = flb_malloc(sizeof(struct log_to_metrics_ctx));
+ if (!ctx) {
+ flb_errno();
+ return -1;
+ }
+ if (flb_filter_config_map_set(f_ins, ctx) < 0) {
+ flb_errno();
+ flb_plg_error(f_ins, "configuration error");
+ flb_free(ctx);
+ return -1;
+ }
+ mk_list_init(&ctx->rules);
+
+ ctx->ins = f_ins;
+
+ /* Load rules */
+ ret = set_rules(ctx, f_ins);
+ if (ret == -1) {
+ flb_free(ctx);
+ return -1;
+ }
+
+ /* Set the context */
+ flb_filter_set_context(f_ins, ctx);
+
+ /* Set buckets for histogram */
+ ctx->bucket_counter = NULL;
+ ctx->histogram_buckets = NULL;
+ ctx->buckets = NULL;
+ ctx->bucket_counter = flb_malloc(sizeof(int));
+ if(set_buckets(ctx, f_ins) != 0)
+ {
+ flb_plg_error(f_ins, "Setting buckets failed");
+ log_to_metrics_destroy(ctx);
+ return -1;
+ }
+
+ ctx->label_accessors = NULL;
+ ctx->label_accessors = (char **) flb_malloc(MAX_LABEL_COUNT * sizeof(char *));
+ for (i = 0; i < MAX_LABEL_COUNT; i++) {
+ ctx->label_accessors[i] = flb_malloc(MAX_LABEL_LENGTH * sizeof(char));
+ }
+ /* Set label keys */
+ ctx->label_keys = NULL;
+ ctx->label_keys = (char **) flb_malloc(MAX_LABEL_COUNT * sizeof(char *));
+ for (i = 0; i < MAX_LABEL_COUNT; i++) {
+ ctx->label_keys[i] = flb_malloc(MAX_LABEL_LENGTH * sizeof(char));
+ }
+ ctx->label_counter = NULL;
+ ctx->label_counter = flb_malloc(sizeof(int));
+ label_count = set_labels(ctx, ctx->label_accessors, ctx->label_keys, ctx->label_counter, f_ins);
+ if (label_count < 0){
+ log_to_metrics_destroy(ctx);
+ return -1;
+ }
+
+ /* Check metric tag */
+ if (ctx->tag == NULL || strlen(ctx->tag) == 0) {
+ flb_plg_error(f_ins, "Metric tag is not set");
+ log_to_metrics_destroy(ctx);
+ return -1;
+ }
+
+ /* Check property metric mode */
+ ctx->mode = 0;
+ tmp = (char *)flb_filter_get_property("metric_mode", f_ins);
+ if (tmp != NULL) {
+ if (strcasecmp(tmp, FLB_LOG_TO_METRICS_COUNTER_STR) == 0) {
+ ctx->mode = FLB_LOG_TO_METRICS_COUNTER;
+ }
+ else if (strcasecmp(tmp, FLB_LOG_TO_METRICS_GAUGE_STR) == 0) {
+ ctx->mode = FLB_LOG_TO_METRICS_GAUGE;
+ }
+ else if (strcasecmp(tmp, FLB_LOG_TO_METRICS_HISTOGRAM_STR) == 0) {
+ ctx->mode = FLB_LOG_TO_METRICS_HISTOGRAM;
+ }
+ else {
+ flb_plg_error(f_ins,
+ "invalid 'mode' value. Only "
+ "'counter', 'gauge' or "
+ "'histogram' types are allowed");
+ log_to_metrics_destroy(ctx);
+ return -1;
+ }
+ }
+ else {
+ flb_plg_error(f_ins, "configuration property not set");
+ log_to_metrics_destroy(ctx);
+ return -1;
+ }
+
+ /* Check property metric name */
+ if (ctx->metric_name == NULL || strlen(ctx->metric_name) == 0) {
+ flb_plg_error(f_ins, "metric_name is not set");
+ log_to_metrics_destroy(ctx);
+ return -1;
+ }
+ snprintf(metric_name, sizeof(metric_name) - 1, "%s", ctx->metric_name);
+
+ /* Check property metric description */
+ if (ctx->metric_description == NULL ||
+ strlen(ctx->metric_description) == 0) {
+ flb_plg_error(f_ins, "metric_description is not set");
+ log_to_metrics_destroy(ctx);
+ return -1;
+ }
+ snprintf(metric_description, sizeof(metric_description) - 1, "%s",
+ ctx->metric_description);
+
+ /* Value field only needed for modes gauge and histogram */
+ if (ctx->mode > 0) {
+ if (ctx->value_field == NULL || strlen(ctx->value_field) == 0) {
+ flb_plg_error(f_ins, "value_field is not set");
+ log_to_metrics_destroy(ctx);
+ return -1;
+ }
+ snprintf(value_field, sizeof(value_field) - 1, "%s",
+ ctx->value_field);
+ }
+
+
+ /* Check if buckets are defined for histogram, if not assume defaults */
+ if (ctx->mode == FLB_LOG_TO_METRICS_HISTOGRAM ){
+ if (ctx->bucket_counter == 0){
+ flb_plg_error(f_ins,
+ "buckets are not set for histogram."
+ "Will use defaults: 0.005, 0.01, 0.025, "
+ "0.05, 0.1, 0.25, 0.5, 1.0, 2.5, 5.0, 10.0");
+ ctx->histogram_buckets = cmt_histogram_buckets_default_create();
+ }
+ else{
+ ctx->histogram_buckets = cmt_histogram_buckets_create_size(
+ ctx->buckets, *ctx->bucket_counter);
+ }
+ }
+
+
+ /* create the metric */
+ ctx->cmt = NULL;
+ ctx->cmt = cmt_create();
+
+ /* Depending on mode create different types of cmetrics metrics */
+ switch (ctx->mode) {
+ case FLB_LOG_TO_METRICS_COUNTER:
+ ctx->c = cmt_counter_create(ctx->cmt, "log_metric", "counter",
+ metric_name, metric_description,
+ label_count, ctx->label_keys);
+ break;
+ case FLB_LOG_TO_METRICS_GAUGE:
+ ctx->g = cmt_gauge_create(ctx->cmt, "log_metric", "gauge",
+ metric_name, metric_description,
+ label_count, ctx->label_keys);
+ break;
+ case FLB_LOG_TO_METRICS_HISTOGRAM:
+ ctx->h = cmt_histogram_create(ctx->cmt, "log_metric", "histogram",
+ metric_name, metric_description,
+ ctx->histogram_buckets,
+ label_count, ctx->label_keys);
+ break;
+ default:
+ flb_plg_error(f_ins, "unsupported mode");
+ log_to_metrics_destroy(ctx);
+ return -1;
+ }
+
+ input_ins = flb_input_new(config, "emitter", NULL, FLB_FALSE);
+ if (!input_ins) {
+ flb_plg_error(f_ins, "cannot create metrics emitter instance");
+ log_to_metrics_destroy(ctx);
+ return -1;
+ }
+
+ /* Set the storage type for emitter */
+ ret = flb_input_set_property(input_ins, "storage.type", "memory");
+ if (ret == -1) {
+ flb_plg_error(f_ins, "cannot set storage type for emitter instance");
+ log_to_metrics_destroy(ctx);
+ return -1;
+ }
+
+ /* Initialize emitter plugin */
+ ret = flb_input_instance_init(input_ins, config);
+ if (ret == -1) {
+ flb_errno();
+ flb_plg_error(f_ins, "cannot initialize metrics emitter instance.");
+ log_to_metrics_destroy(ctx);
+ return -1;
+ }
+
+ ret = flb_storage_input_create(config->cio, input_ins);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "cannot initialize storage for metrics stream");
+ log_to_metrics_destroy(ctx);
+ return -1;
+ }
+ ctx->input_ins = input_ins;
+
+ return 0;
+}
+
+static int cb_log_to_metrics_filter(const void *data, size_t bytes,
+ const char *tag, int tag_len,
+ void **out_buf, size_t *out_size,
+ struct flb_filter_instance *f_ins,
+ struct flb_input_instance *i_ins, void *context,
+ struct flb_config *config)
+{
+ int ret;
+ msgpack_unpacked result;
+ msgpack_object map;
+ msgpack_object root;
+ size_t off = 0;
+ msgpack_sbuffer tmp_sbuf;
+ msgpack_packer tmp_pck;
+ uint64_t ts;
+ struct log_to_metrics_ctx *ctx = context;
+ struct flb_ra_value *rval = NULL;
+ struct flb_record_accessor *ra = NULL;
+ char fmt[MAX_LABEL_LENGTH];
+ char **label_values = NULL;
+ int label_count = 0;
+ int i;
+ double gauge_value = 0;
+ double histogram_value = 0;
+ char kubernetes_label_values
+ [NUMBER_OF_KUBERNETES_LABELS][MAX_LABEL_LENGTH];
+
+ /* Create temporary msgpack buffer */
+ msgpack_sbuffer_init(&tmp_sbuf);
+ msgpack_packer_init(&tmp_pck, &tmp_sbuf, msgpack_sbuffer_write);
+
+ /* Iterate each item array and apply rules and generate metric values */
+ msgpack_unpacked_init(&result);
+ while (msgpack_unpack_next(&result, data, bytes, &off) ==
+ MSGPACK_UNPACK_SUCCESS) {
+ root = result.data;
+ if (root.type != MSGPACK_OBJECT_ARRAY) {
+ continue;
+ }
+
+ /* get time and map */
+ map = root.via.array.ptr[1];
+
+ ret = grep_filter_data(map, context);
+ if (ret == GREP_RET_KEEP) {
+ ts = cfl_time_now();
+ if(ctx->kubernetes_mode){
+ for(i = 0; i < NUMBER_OF_KUBERNETES_LABELS; i++){
+ if (kubernetes_label_keys[i] == NULL){
+ flb_error("error during kubernetes label processing. "
+ "Skipping labels.");
+ ctx->label_counter = 0;
+ break;
+ }
+ snprintf(fmt, MAX_LABEL_LENGTH - 1, "$kubernetes['%s']",
+ kubernetes_label_keys[i]);
+ ra = flb_ra_create(fmt, FLB_TRUE);
+ if (!ra) {
+ flb_error("invalid record accessor key, aborting");
+ break;
+ }
+ rval = flb_ra_get_value_object(ra, map);
+ if (!rval) {
+ flb_error("given value field is empty or not "
+ "existent: %s. Skipping labels.", fmt);
+ ctx->label_counter = 0;
+ }
+ else if (rval->type != FLB_RA_STRING) {
+ flb_plg_error(f_ins,
+ "cannot access label %s", kubernetes_label_keys[i]);
+ break;
+ }
+ else {
+ snprintf(kubernetes_label_values[i],
+ MAX_LABEL_LENGTH - 1, "%s", rval->val.string);
+ }
+ if (rval){
+ flb_ra_key_value_destroy(rval);
+ rval = NULL;
+ }
+ if (ra){
+ flb_ra_destroy(ra);
+ ra = NULL;
+ }
+ }
+ }
+ if (ctx->label_counter > 0){
+ /* Fill optional labels */
+ label_values = flb_malloc(MAX_LABEL_COUNT * sizeof(char *));
+ for (i = 0; i < MAX_LABEL_COUNT; i++) {
+ label_values[i] = flb_malloc(MAX_LABEL_LENGTH *
+ sizeof(char));
+ }
+
+ label_count = fill_labels(ctx, label_values,
+ kubernetes_label_values, ctx->label_accessors,
+ *ctx->label_counter, map);
+ if (label_count != *ctx->label_counter){
+ label_count = 0;
+ }
+ }
+
+ /* Calculating and setting metric depending on the mode */
+ switch (ctx->mode) {
+ case FLB_LOG_TO_METRICS_COUNTER:
+ ret = cmt_counter_inc(ctx->c, ts, label_count,
+ label_values);
+ break;
+
+ case FLB_LOG_TO_METRICS_GAUGE:
+ ra = flb_ra_create(ctx->value_field, FLB_TRUE);
+ if (!ra) {
+ flb_error("invalid record accessor key, aborting");
+ break;
+ }
+
+ rval = flb_ra_get_value_object(ra, map);
+
+ if (!rval) {
+ flb_warn("given value field is empty or not existent");
+ break;
+ }
+ if (rval->type == FLB_RA_STRING) {
+ sscanf(rval->val.string, "%lf", &gauge_value);
+ }
+ else if (rval->type == FLB_RA_FLOAT) {
+ gauge_value = rval->val.f64;
+ }
+ else if (rval->type == FLB_RA_INT) {
+ gauge_value = (double)rval->val.i64;
+ }
+ else {
+ flb_plg_error(f_ins,
+ "cannot convert given value to metric");
+ break;
+ }
+
+ ret = cmt_gauge_set(ctx->g, ts, gauge_value,
+ label_count, label_values);
+ if (rval) {
+ flb_ra_key_value_destroy(rval);
+ rval = NULL;
+ }
+ if (ra) {
+ flb_ra_destroy(ra);
+ ra = NULL;
+ }
+ break;
+
+ case FLB_LOG_TO_METRICS_HISTOGRAM:
+ ra = flb_ra_create(ctx->value_field, FLB_TRUE);
+ if (!ra) {
+ flb_error("invalid record accessor key, aborting");
+ break;
+ }
+
+ rval = flb_ra_get_value_object(ra, map);
+
+ if (!rval) {
+ flb_warn("given value field is empty or not existent");
+ break;
+ }
+ if (rval->type == FLB_RA_STRING) {
+ sscanf(rval->val.string, "%lf", &histogram_value);
+ }
+ else if (rval->type == FLB_RA_FLOAT) {
+ histogram_value = rval->val.f64;
+ }
+ else if (rval->type == FLB_RA_INT) {
+ histogram_value = (double)rval->val.i64;
+ }
+ else {
+ flb_plg_error(f_ins,
+ "cannot convert given value to metric");
+ break;
+ }
+
+ ret = cmt_histogram_observe(ctx->h, ts, histogram_value,
+ label_count, label_values);
+ if (rval) {
+ flb_ra_key_value_destroy(rval);
+ rval = NULL;
+ }
+ if (ra) {
+ flb_ra_destroy(ra);
+ ra = NULL;
+ }
+ break;
+ default:
+ flb_plg_error(f_ins, "unsupported mode");
+ log_to_metrics_destroy(ctx);
+ return -1;
+ }
+
+ ret = flb_input_metrics_append(ctx->input_ins, ctx->tag, strlen(ctx->tag), ctx->cmt);
+
+ if (ret != 0) {
+ flb_plg_error(ctx->ins, "could not append metrics");
+ }
+
+ /* Cleanup */
+ msgpack_unpacked_destroy(&result);
+ if (label_values != NULL){
+ for (i = 0; i < MAX_LABEL_COUNT; i++) {
+ if (label_values[i] != NULL){
+ flb_free(label_values[i]);
+ }
+ }
+ flb_free(label_values);
+ }
+ }
+ else if (ret == GREP_RET_EXCLUDE) {
+ /* Do nothing */
+ }
+ }
+ /* Cleanup */
+ msgpack_unpacked_destroy(&result);
+ msgpack_sbuffer_destroy(&tmp_sbuf);
+
+ /* Do not modify message stream */
+ return FLB_FILTER_NOTOUCH;
+}
+
+static int cb_log_to_metrics_exit(void *data, struct flb_config *config)
+{
+ struct log_to_metrics_ctx *ctx = data;
+
+ return log_to_metrics_destroy(ctx);
+}
+
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_STR, "regex", NULL,
+ FLB_CONFIG_MAP_MULT, FLB_FALSE, 0,
+ "Optional filter for records in which the content of KEY "
+ "matches the regular expression."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "exclude", NULL,
+ FLB_CONFIG_MAP_MULT, FLB_FALSE, 0,
+ "Optional filter for records in which the content of KEY "
+ "does not matches the regular expression."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "metric_mode", "counter",
+ FLB_FALSE, FLB_TRUE,
+ offsetof(struct log_to_metrics_ctx, mode),
+ "Mode selector. Values counter, gauge,"
+ " or histogram. Summary is not supported"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "value_field", NULL,
+ FLB_FALSE, FLB_TRUE,
+ offsetof(struct log_to_metrics_ctx, value_field),
+ "Numeric field to use for gauge or histogram"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "metric_name", NULL,
+ FLB_FALSE, FLB_TRUE,
+ offsetof(struct log_to_metrics_ctx, metric_name),
+ "Name of metric"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "metric_description", NULL,
+ FLB_FALSE, FLB_TRUE,
+ offsetof(struct log_to_metrics_ctx, metric_description),
+ "Help text for metric"
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "kubernetes_mode", "false",
+ 0, FLB_TRUE, offsetof(struct log_to_metrics_ctx, kubernetes_mode),
+ "Enable kubernetes log metric fields"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "add_label", NULL,
+ FLB_CONFIG_MAP_MULT, FLB_FALSE, 0,
+ "Add a label to the metric by supporting record accessor pattern"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "label_field", NULL,
+ FLB_CONFIG_MAP_MULT, FLB_FALSE, 0,
+ "Specify message field that should be included in the metric"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "bucket", NULL,
+ FLB_CONFIG_MAP_MULT, FLB_FALSE, 0,
+ "Specify bucket for histogram metric"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "tag", NULL,
+ FLB_FALSE, FLB_TRUE,
+ offsetof(struct log_to_metrics_ctx, tag),
+ "Metric Tag"
+ },
+ {0}
+};
+
+struct flb_filter_plugin filter_log_to_metrics_plugin = {
+ .name = "log_to_metrics",
+ .description = "generate log derived metrics",
+ .cb_init = cb_log_to_metrics_init,
+ .cb_filter = cb_log_to_metrics_filter,
+ .cb_exit = cb_log_to_metrics_exit,
+ .config_map = config_map,
+ .flags = 0};
diff --git a/src/fluent-bit/plugins/filter_log_to_metrics/log_to_metrics.h b/src/fluent-bit/plugins/filter_log_to_metrics/log_to_metrics.h
new file mode 100644
index 000000000..6edb5ab30
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_log_to_metrics/log_to_metrics.h
@@ -0,0 +1,85 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2023 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_FILTER_LOG_TO_METRICS_H
+#define FLB_FILTER_LOG_TO_METRICS_H
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_filter.h>
+#include <fluent-bit/flb_sds.h>
+#include <fluent-bit/flb_record_accessor.h>
+
+/* rule types */
+#define GREP_REGEX 1
+#define GREP_EXCLUDE 2
+
+/* actions */
+#define GREP_RET_KEEP 0
+#define GREP_RET_EXCLUDE 1
+
+/* modes */
+#define FLB_LOG_TO_METRICS_COUNTER_STR "counter"
+#define FLB_LOG_TO_METRICS_GAUGE_STR "gauge"
+#define FLB_LOG_TO_METRICS_HISTOGRAM_STR "histogram"
+
+
+#define FLB_LOG_TO_METRICS_COUNTER 0
+#define FLB_LOG_TO_METRICS_GAUGE 1
+#define FLB_LOG_TO_METRICS_HISTOGRAM 2
+
+#define NUMBER_OF_KUBERNETES_LABELS 5
+#define MAX_LABEL_LENGTH 253
+#define MAX_METRIC_LENGTH 253
+#define MAX_LABEL_COUNT 32
+
+
+struct log_to_metrics_ctx
+{
+ struct mk_list rules;
+ struct flb_filter_instance *ins;
+ int mode;
+ flb_sds_t metric_name;
+ flb_sds_t metric_description;
+ struct cmt *cmt;
+ struct flb_input_instance *input_ins;
+ flb_sds_t value_field;
+ struct cmt_counter *c;
+ struct cmt_gauge *g;
+ struct cmt_histogram *h;
+ struct cmt_histogram_buckets *histogram_buckets;
+ char **label_accessors;
+ char **label_keys;
+ int *label_counter;
+ bool kubernetes_mode;
+ flb_sds_t tag;
+ int *bucket_counter;
+ double *buckets;
+};
+
+struct grep_rule
+{
+ int type;
+ flb_sds_t field;
+ char *regex_pattern;
+ struct flb_regex *regex;
+ struct flb_record_accessor *ra;
+ struct mk_list _head;
+};
+
+#endif
diff --git a/src/fluent-bit/plugins/filter_lua/CMakeLists.txt b/src/fluent-bit/plugins/filter_lua/CMakeLists.txt
new file mode 100644
index 000000000..2812e3622
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_lua/CMakeLists.txt
@@ -0,0 +1,13 @@
+set(src
+ lua_config.c
+ lua.c)
+
+if(MSVC)
+ FLB_PLUGIN(filter_lua "${src}" "")
+else()
+ FLB_PLUGIN(filter_lua "${src}" "m")
+endif()
+
+if(FLB_FILTER_LUA_USE_MPACK)
+ add_definitions(-DFLB_FILTER_LUA_USE_MPACK)
+endif()
diff --git a/src/fluent-bit/plugins/filter_lua/lua.c b/src/fluent-bit/plugins/filter_lua/lua.c
new file mode 100644
index 000000000..bb7bb566a
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_lua/lua.c
@@ -0,0 +1,713 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_compat.h>
+#include <fluent-bit/flb_filter.h>
+#include <fluent-bit/flb_filter_plugin.h>
+#include <fluent-bit/flb_luajit.h>
+#include <fluent-bit/flb_lua.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_sds.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_log_event_decoder.h>
+#include <fluent-bit/flb_log_event_encoder.h>
+#include <msgpack.h>
+
+#include "fluent-bit/flb_mem.h"
+#include "lua.h"
+#include "lua_config.h"
+#include "mpack/mpack.h"
+
+static int cb_lua_init(struct flb_filter_instance *f_ins,
+ struct flb_config *config,
+ void *data)
+{
+ int err;
+ int ret;
+ (void) data;
+ struct lua_filter *ctx;
+ struct flb_luajit *lj;
+
+ /* Create context */
+ ctx = lua_config_create(f_ins, config);
+ if (!ctx) {
+ flb_error("[filter_lua] filter cannot be loaded");
+ return -1;
+ }
+
+ /* Create LuaJIT state/vm */
+ lj = flb_luajit_create(config);
+ if (!lj) {
+ lua_config_destroy(ctx);
+ return -1;
+ }
+ ctx->lua = lj;
+
+ if (ctx->enable_flb_null) {
+ flb_lua_enable_flb_null(lj->state);
+ }
+
+ /* Lua script source code */
+ if (ctx->code) {
+ ret = flb_luajit_load_buffer(ctx->lua,
+ ctx->code, flb_sds_len(ctx->code),
+ "fluentbit.lua");
+ }
+ else {
+ /* Load Script / file path*/
+ ret = flb_luajit_load_script(ctx->lua, ctx->script);
+ }
+
+ if (ret == -1) {
+ lua_config_destroy(ctx);
+ return -1;
+ }
+
+ err = lua_pcall(ctx->lua->state, 0, 0, 0);
+ if (err != 0) {
+ flb_error("[luajit] invalid lua content, error=%d: %s",
+ err, lua_tostring(lj->state, -1));
+ lua_pop(lj->state, 1);
+ lua_config_destroy(ctx);
+ return -1;
+ }
+
+
+ if (flb_lua_is_valid_func(ctx->lua->state, ctx->call) != FLB_TRUE) {
+ flb_plg_error(ctx->ins, "function %s is not found", ctx->call);
+ lua_config_destroy(ctx);
+ return -1;
+ }
+
+ /* Initialize packing buffer */
+ ctx->packbuf = flb_sds_create_size(1024);
+ if (!ctx->packbuf) {
+ flb_error("[filter_lua] failed to allocate packbuf");
+ return -1;
+ }
+
+ /* Set context */
+ flb_filter_set_context(f_ins, ctx);
+
+ return 0;
+}
+
+#ifdef FLB_FILTER_LUA_USE_MPACK
+
+#pragma message "This code does not support the new log event encoding format"
+
+static void mpack_buffer_flush(mpack_writer_t* writer, const char* buffer, size_t count)
+{
+ struct lua_filter *ctx = writer->context;
+ flb_sds_cat_safe(&ctx->packbuf, buffer, count);
+}
+
+static void pack_result_mpack(lua_State *l,
+ mpack_writer_t *writer,
+ struct flb_lua_l2c_config *l2cc,
+ struct flb_time *t)
+{
+ int i;
+ int len;
+
+ if (lua_type(l, -1) != LUA_TTABLE) {
+ return;
+ }
+
+ len = flb_lua_arraylength(l);
+ if (len > 0) {
+ /* record split */
+ for (i = 1; i <= len; i++) {
+ /* write array tag */
+ mpack_write_tag(writer, mpack_tag_array(2));
+ /* write header tag */
+ mpack_write_tag(writer, mpack_tag_array(2));
+ /* write timestamp */
+ flb_time_append_to_mpack(writer, t, 0);
+ /* write metadata */
+ mpack_write_tag(writer, mpack_tag_map(0));
+ /* get the subrecord */
+ lua_rawgeti(l, -1, i);
+ /* convert */
+ flb_lua_tompack(l, writer, 0, l2cc);
+ lua_pop(l, 1);
+ }
+ }
+ else {
+ /* write array tag */
+ mpack_write_tag(writer, mpack_tag_array(2));
+ /* write header tag */
+ mpack_write_tag(writer, mpack_tag_array(2));
+ /* write timestamp */
+ flb_time_append_to_mpack(writer, t, 0);
+ /* write metadata */
+ mpack_write_tag(writer, mpack_tag_map(0));
+ /* convert */
+ flb_lua_tompack(l, writer, 0, l2cc);
+ }
+ /* pop */
+ lua_pop(l, 1);
+}
+
+static int cb_lua_filter_mpack(const void *data, size_t bytes,
+ const char *tag, int tag_len,
+ void **out_buf, size_t *out_bytes,
+ struct flb_filter_instance *f_ins,
+ struct flb_input_instance *i_ins,
+ void *filter_context,
+ struct flb_config *config)
+{
+ (void) i_ins;
+ int ret;
+ struct flb_time t_orig;
+ struct flb_time t;
+ struct lua_filter *ctx = filter_context;
+ double ts = 0;
+ int l_code;
+ double l_timestamp;
+ char *outbuf;
+ char writebuf[1024];
+ mpack_writer_t writer;
+
+ flb_sds_len_set(ctx->packbuf, 0);
+ mpack_reader_t reader;
+ mpack_reader_init_data(&reader, data, bytes);
+
+ while (bytes > 0) {
+ /* Save record start */
+ const char *record_start = reader.data;
+ size_t record_size = 0;
+
+ /* This is a hack, in order to have this thing work
+ * we rely on flb_time_pop_from_mpack skipping the
+ * metadata map.
+ */
+
+ /* Get timestamp */
+ if (flb_time_pop_from_mpack(&t, &reader)) {
+ /* failed to parse */
+ return FLB_FILTER_NOTOUCH;
+ }
+ t_orig = t;
+
+ /* Prepare function call, pass 3 arguments, expect 3 return values */
+ lua_getglobal(ctx->lua->state, ctx->call);
+ lua_pushstring(ctx->lua->state, tag);
+
+ /* Timestamp */
+ if (ctx->time_as_table == FLB_TRUE) {
+ flb_lua_pushtimetable(ctx->lua->state, &t);
+ }
+ else {
+ ts = flb_time_to_double(&t);
+ lua_pushnumber(ctx->lua->state, ts);
+ }
+
+ if (flb_lua_pushmpack(ctx->lua->state, &reader)) {
+ return FLB_FILTER_NOTOUCH;
+ }
+ record_size = reader.data - record_start;
+ bytes -= record_size;
+
+ if (ctx->protected_mode) {
+ ret = lua_pcall(ctx->lua->state, 3, 3, 0);
+ if (ret != 0) {
+ flb_plg_error(ctx->ins, "error code %d: %s",
+ ret, lua_tostring(ctx->lua->state, -1));
+ lua_pop(ctx->lua->state, 1);
+ return FLB_FILTER_NOTOUCH;
+ }
+ }
+ else {
+ lua_call(ctx->lua->state, 3, 3);
+ }
+
+ /* Returned values are on the stack in the following order:
+ * -1: table/record
+ * -2: timestamp
+ * -3: code
+ * since we will process code first, then timestamp then record,
+ * we need to swap
+ *
+ * use lua_insert to put the table/record on the bottom */
+ lua_insert(ctx->lua->state, -3);
+ /* now swap timestamp with code */
+ lua_insert(ctx->lua->state, -2);
+
+ /* check code */
+ l_code = (int) lua_tointeger(ctx->lua->state, -1);
+ lua_pop(ctx->lua->state, 1);
+
+ if (l_code == -1) { /* Skip record */
+ lua_pop(ctx->lua->state, 2);
+ continue;
+ }
+ else if (l_code == 0) { /* Keep record, copy original to packbuf */
+ flb_sds_cat_safe(&ctx->packbuf, record_start, record_size);
+ lua_pop(ctx->lua->state, 2);
+ continue;
+ }
+ else if (l_code != 1 && l_code != 2) {/* Unexpected return code, keep original content */
+ flb_sds_cat_safe(&ctx->packbuf, record_start, record_size);
+ lua_pop(ctx->lua->state, 2);
+ flb_plg_error(ctx->ins, "unexpected Lua script return code %i, "
+ "original record will be kept." , l_code);
+ continue;
+ }
+
+ /* process record timestamp */
+ l_timestamp = ts;
+ if (ctx->time_as_table == FLB_TRUE) {
+ if (lua_type(ctx->lua->state, -1) == LUA_TTABLE) {
+ /* Retrieve seconds */
+ lua_getfield(ctx->lua->state, -1, "sec");
+ t.tm.tv_sec = lua_tointeger(ctx->lua->state, -1);
+ lua_pop(ctx->lua->state, 1);
+
+ /* Retrieve nanoseconds */
+ lua_getfield(ctx->lua->state, -1, "nsec");
+ t.tm.tv_nsec = lua_tointeger(ctx->lua->state, -1);
+ lua_pop(ctx->lua->state, 2);
+ }
+ else {
+ flb_plg_error(ctx->ins, "invalid lua timestamp type returned");
+ t = t_orig;
+ }
+ }
+ else {
+ l_timestamp = (double) lua_tonumber(ctx->lua->state, -1);
+ lua_pop(ctx->lua->state, 1);
+ }
+
+ if (l_code == 1) {
+ if (ctx->time_as_table == FLB_FALSE) {
+ flb_time_from_double(&t, l_timestamp);
+ }
+ }
+ else if (l_code == 2) {
+ /* Keep the timestamp */
+ t = t_orig;
+ }
+
+ /* process the record table */
+ /* initialize writer and set packbuf as context */
+ mpack_writer_init(&writer, writebuf, sizeof(writebuf));
+ mpack_writer_set_context(&writer, ctx);
+ mpack_writer_set_flush(&writer, mpack_buffer_flush);
+ /* write the result */
+ pack_result_mpack(ctx->lua->state, &writer, &ctx->l2cc, &t);
+ /* flush the writer */
+ mpack_writer_flush_message(&writer);
+ mpack_writer_destroy(&writer);
+ }
+
+ if (flb_sds_len(ctx->packbuf) == 0) {
+ /* All records are removed */
+ *out_buf = NULL;
+ *out_bytes = 0;
+ return FLB_FILTER_MODIFIED;
+ }
+
+ /* allocate outbuf that contains the modified chunks */
+ outbuf = flb_malloc(flb_sds_len(ctx->packbuf));
+ if (!outbuf) {
+ flb_plg_error(ctx->ins, "failed to allocate outbuf");
+ return FLB_FILTER_NOTOUCH;
+ }
+ memcpy(outbuf, ctx->packbuf, flb_sds_len(ctx->packbuf));
+ /* link new buffer */
+ *out_buf = outbuf;
+ *out_bytes = flb_sds_len(ctx->packbuf);
+
+ return FLB_FILTER_MODIFIED;
+}
+
+#else
+
+static int pack_record(struct lua_filter *ctx,
+ struct flb_log_event_encoder *log_encoder,
+ struct flb_time *ts,
+ msgpack_object *metadata,
+ msgpack_object *body)
+{
+ int ret;
+
+ ret = flb_log_event_encoder_begin_record(log_encoder);
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_timestamp(log_encoder, ts);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS && metadata != NULL) {
+ ret = flb_log_event_encoder_set_metadata_from_msgpack_object(
+ log_encoder, metadata);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_body_from_msgpack_object(
+ log_encoder, body);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_commit_record(log_encoder);
+ }
+
+ return ret;
+}
+
+static int pack_result (struct lua_filter *ctx, struct flb_time *ts,
+ msgpack_object *metadata,
+ struct flb_log_event_encoder *log_encoder,
+ char *data, size_t bytes)
+{
+ int ret;
+ size_t index = 0;
+ size_t off = 0;
+ msgpack_object *entry;
+ msgpack_unpacked result;
+
+ msgpack_unpacked_init(&result);
+
+ ret = msgpack_unpack_next(&result, data, bytes, &off);
+
+ if (ret != MSGPACK_UNPACK_SUCCESS) {
+ msgpack_unpacked_destroy(&result);
+
+ return FLB_FALSE;
+ }
+
+ if (result.data.type == MSGPACK_OBJECT_MAP) {
+ ret = pack_record(ctx, log_encoder,
+ ts, metadata, &result.data);
+
+ msgpack_unpacked_destroy(&result);
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ return FLB_FALSE;
+ }
+
+ return FLB_TRUE;
+ }
+ else if (result.data.type == MSGPACK_OBJECT_ARRAY) {
+ for (index = 0 ; index < result.data.via.array.size ; index++) {
+ entry = &result.data.via.array.ptr[index];
+
+ if (entry->type == MSGPACK_OBJECT_MAP) {
+ ret = pack_record(ctx, log_encoder,
+ ts, metadata, entry);
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ msgpack_unpacked_destroy(&result);
+
+ return FLB_FALSE;
+ }
+ }
+ else {
+ msgpack_unpacked_destroy(&result);
+
+ return FLB_FALSE;
+ }
+ }
+
+ msgpack_unpacked_destroy(&result);
+
+ return FLB_TRUE;
+ }
+
+ msgpack_unpacked_destroy(&result);
+
+ return FLB_FALSE;
+}
+
+static int cb_lua_filter(const void *data, size_t bytes,
+ const char *tag, int tag_len,
+ void **out_buf, size_t *out_bytes,
+ struct flb_filter_instance *f_ins,
+ struct flb_input_instance *i_ins,
+ void *filter_context,
+ struct flb_config *config)
+{
+ int ret;
+ double ts = 0;
+ struct flb_time t_orig;
+ struct flb_time t;
+ struct lua_filter *ctx = filter_context;
+ /* Lua return values */
+ int l_code;
+ double l_timestamp;
+ msgpack_packer data_pck;
+ msgpack_sbuffer data_sbuf;
+ struct flb_log_event_encoder log_encoder;
+ struct flb_log_event_decoder log_decoder;
+ struct flb_log_event log_event;
+
+ (void) f_ins;
+ (void) i_ins;
+ (void) config;
+
+ ret = flb_log_event_decoder_init(&log_decoder, (char *) data, bytes);
+
+ if (ret != FLB_EVENT_DECODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event decoder initialization error : %d", ret);
+
+ return FLB_FILTER_NOTOUCH;
+ }
+
+ ret = flb_log_event_encoder_init(&log_encoder,
+ FLB_LOG_EVENT_FORMAT_DEFAULT);
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event encoder initialization error : %d", ret);
+
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ return FLB_FILTER_NOTOUCH;
+ }
+
+ while ((ret = flb_log_event_decoder_next(
+ &log_decoder,
+ &log_event)) == FLB_EVENT_DECODER_SUCCESS) {
+ msgpack_sbuffer_init(&data_sbuf);
+ msgpack_packer_init(&data_pck, &data_sbuf, msgpack_sbuffer_write);
+
+ /* Get timestamp */
+ flb_time_copy(&t, &log_event.timestamp);
+ flb_time_copy(&t_orig, &log_event.timestamp);
+
+ /* Prepare function call, pass 3 arguments, expect 3 return values */
+ lua_getglobal(ctx->lua->state, ctx->call);
+ lua_pushstring(ctx->lua->state, tag);
+
+ /* Timestamp */
+ if (ctx->time_as_table == FLB_TRUE) {
+ flb_lua_pushtimetable(ctx->lua->state, &t);
+ }
+ else {
+ ts = flb_time_to_double(&t);
+ lua_pushnumber(ctx->lua->state, ts);
+ }
+
+ flb_lua_pushmsgpack(ctx->lua->state, log_event.body);
+ if (ctx->protected_mode) {
+ ret = lua_pcall(ctx->lua->state, 3, 3, 0);
+ if (ret != 0) {
+ flb_plg_error(ctx->ins, "error code %d: %s",
+ ret, lua_tostring(ctx->lua->state, -1));
+ lua_pop(ctx->lua->state, 1);
+
+ msgpack_sbuffer_destroy(&data_sbuf);
+ flb_log_event_decoder_destroy(&log_decoder);
+ flb_log_event_encoder_destroy(&log_encoder);
+
+ return FLB_FILTER_NOTOUCH;
+ }
+ }
+ else {
+ lua_call(ctx->lua->state, 3, 3);
+ }
+
+ /* Initialize Return values */
+ l_code = 0;
+ l_timestamp = ts;
+
+ flb_lua_tomsgpack(ctx->lua->state, &data_pck, 0, &ctx->l2cc);
+ lua_pop(ctx->lua->state, 1);
+
+ /* Lua table */
+ if (ctx->time_as_table == FLB_TRUE) {
+ if (lua_type(ctx->lua->state, -1) == LUA_TTABLE) {
+ /* Retrieve seconds */
+ lua_getfield(ctx->lua->state, -1, "sec");
+ t.tm.tv_sec = lua_tointeger(ctx->lua->state, -1);
+ lua_pop(ctx->lua->state, 1);
+
+ /* Retrieve nanoseconds */
+ lua_getfield(ctx->lua->state, -1, "nsec");
+ t.tm.tv_nsec = lua_tointeger(ctx->lua->state, -1);
+ lua_pop(ctx->lua->state, 2);
+ }
+ else {
+ flb_plg_error(ctx->ins, "invalid lua timestamp type returned");
+ t = t_orig;
+ }
+ }
+ else {
+ l_timestamp = (double) lua_tonumber(ctx->lua->state, -1);
+ lua_pop(ctx->lua->state, 1);
+ }
+
+ l_code = (int) lua_tointeger(ctx->lua->state, -1);
+ lua_pop(ctx->lua->state, 1);
+
+ if (l_code == -1) { /* Skip record */
+ msgpack_sbuffer_destroy(&data_sbuf);
+ continue;
+ }
+ else if (l_code == 1 || l_code == 2) { /* Modified, pack new data */
+ if (l_code == 1) {
+ if (ctx->time_as_table == FLB_FALSE) {
+ flb_time_from_double(&t, l_timestamp);
+ }
+ }
+ else if (l_code == 2) {
+ /* Keep the timestamp */
+ t = t_orig;
+ }
+
+ ret = pack_result(ctx, &t, log_event.metadata, &log_encoder,
+ data_sbuf.data, data_sbuf.size);
+
+ if (ret == FLB_FALSE) {
+ flb_plg_error(ctx->ins, "invalid table returned at %s(), %s",
+ ctx->call, ctx->script);
+ msgpack_sbuffer_destroy(&data_sbuf);
+
+ flb_log_event_decoder_destroy(&log_decoder);
+ flb_log_event_encoder_destroy(&log_encoder);
+
+ return FLB_FILTER_NOTOUCH;
+ }
+ }
+ else { /* Unexpected return code, keep original content */
+ /* Code 0 means Keep record, so we don't emit the warning */
+ if (l_code != 0) {
+ flb_plg_error(ctx->ins,
+ "unexpected Lua script return code %i, "
+ "original record will be kept." , l_code);
+ }
+
+ ret = flb_log_event_encoder_emit_raw_record(
+ &log_encoder,
+ log_decoder.record_base,
+ log_decoder.record_length);
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event encoder error : %d", ret);
+ }
+ }
+
+ msgpack_sbuffer_destroy(&data_sbuf);
+ }
+
+ if (ret == FLB_EVENT_DECODER_ERROR_INSUFFICIENT_DATA) {
+ ret = FLB_EVENT_ENCODER_SUCCESS;
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ *out_buf = log_encoder.output_buffer;
+ *out_bytes = log_encoder.output_length;
+
+ ret = FLB_FILTER_MODIFIED;
+
+ flb_log_event_encoder_claim_internal_buffer_ownership(&log_encoder);
+ }
+ else {
+ flb_plg_error(ctx->ins,
+ "Log event encoder error : %d", ret);
+
+ ret = FLB_FILTER_NOTOUCH;
+ }
+
+ flb_log_event_decoder_destroy(&log_decoder);
+ flb_log_event_encoder_destroy(&log_encoder);
+
+ return ret;
+}
+#endif
+
+static int cb_lua_exit(void *data, struct flb_config *config)
+{
+ struct lua_filter *ctx;
+
+ ctx = data;
+ flb_luajit_destroy(ctx->lua);
+ lua_config_destroy(ctx);
+
+ return 0;
+}
+
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_STR, "script", NULL,
+ 0, FLB_FALSE, 0,
+ "The path of lua script."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "code", NULL,
+ 0, FLB_FALSE, 0,
+ "String that contains the Lua script source code"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "call", NULL,
+ 0, FLB_TRUE, offsetof(struct lua_filter, call),
+ "Lua function name that will be triggered to do filtering."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "type_int_key", NULL,
+ 0, FLB_FALSE, 0,
+ "If these keys are matched, the fields are converted to integer. "
+ "If more than one key, delimit by space."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "type_array_key", NULL,
+ 0, FLB_FALSE, 0,
+ "If these keys are matched, the fields are converted to array. "
+ "If more than one key, delimit by space."
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "protected_mode", "true",
+ 0, FLB_TRUE, offsetof(struct lua_filter, protected_mode),
+ "If enabled, Lua script will be executed in protected mode. "
+ "It prevents to crash when invalid Lua script is executed."
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "time_as_table", "false",
+ 0, FLB_TRUE, offsetof(struct lua_filter, time_as_table),
+ "If enabled, Fluent-bit will pass the timestamp as a Lua table "
+ "with keys \"sec\" for seconds since epoch and \"nsec\" for nanoseconds."
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "enable_flb_null", "false",
+ 0, FLB_TRUE, offsetof(struct lua_filter, enable_flb_null),
+ "If enabled, null will be converted to flb_null in Lua. "
+ "It is useful to prevent removing key/value "
+ "since nil is a special value to remove key value from map in Lua."
+ },
+
+ {0}
+};
+
+struct flb_filter_plugin filter_lua_plugin = {
+ .name = "lua",
+ .description = "Lua Scripting Filter",
+ .cb_init = cb_lua_init,
+#ifdef FLB_FILTER_LUA_USE_MPACK
+ .cb_filter = cb_lua_filter_mpack,
+#else
+ .cb_filter = cb_lua_filter,
+#endif
+ .cb_exit = cb_lua_exit,
+ .config_map = config_map,
+ .flags = 0
+};
diff --git a/src/fluent-bit/plugins/filter_lua/lua_config.c b/src/fluent-bit/plugins/filter_lua/lua_config.c
new file mode 100644
index 000000000..f0c154196
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_lua/lua_config.c
@@ -0,0 +1,206 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_compat.h>
+#include <fluent-bit/flb_filter_plugin.h>
+#include <fluent-bit/flb_mem.h>
+#include <fluent-bit/flb_log.h>
+#include <fluent-bit/flb_sds.h>
+#include <fluent-bit/flb_str.h>
+#include <fluent-bit/flb_utils.h>
+
+#include "lua_config.h"
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+struct lua_filter *lua_config_create(struct flb_filter_instance *ins,
+ struct flb_config *config)
+{
+ int ret;
+ char *tmp_key;
+ char buf[PATH_MAX];
+ const char *script = NULL;
+ const char *tmp = NULL;
+ (void) config;
+ struct stat st;
+ struct lua_filter *lf;
+ struct mk_list *split = NULL;
+ struct mk_list *head = NULL;
+ struct mk_list *tmp_list= NULL;
+ struct flb_lua_l2c_type *l2c = NULL;
+ struct flb_split_entry *sentry = NULL;
+
+ /* Allocate context */
+ lf = flb_calloc(1, sizeof(struct lua_filter));
+ if (!lf) {
+ flb_errno();
+ return NULL;
+ }
+ ret = flb_filter_config_map_set(ins, (void*)lf);
+ if (ret < 0) {
+ flb_errno();
+ flb_plg_error(ins, "configuration error");
+ flb_free(lf);
+ return NULL;
+ }
+
+ mk_list_init(&lf->l2cc.l2c_types);
+ lf->ins = ins;
+ lf->script = NULL;
+
+ /* config: code */
+ tmp = flb_filter_get_property("code", ins);
+ if (tmp) {
+ lf->code = flb_sds_create(tmp);
+ }
+ else {
+ /* Config: script */
+ script = flb_filter_get_property("script", ins);
+ if (!script) {
+ flb_plg_error(lf->ins, "no script path defined");
+ flb_free(lf);
+ return NULL;
+ }
+
+ /* Compose path */
+ ret = stat(script, &st);
+ if (ret == -1 && errno == ENOENT) {
+ if (script[0] == '/') {
+ flb_plg_error(lf->ins, "cannot access script '%s'", script);
+ flb_free(lf);
+ return NULL;
+ }
+
+ if (config->conf_path) {
+ snprintf(buf, sizeof(buf) - 1, "%s%s",
+ config->conf_path, script);
+ script = buf;
+ }
+ }
+
+ /* Validate script path */
+ ret = access(script, R_OK);
+ if (ret == -1) {
+ flb_plg_error(lf->ins, "cannot access script '%s'", script);
+ flb_free(lf);
+ return NULL;
+ }
+
+ lf->script = flb_sds_create(script);
+ if (!lf->script) {
+ flb_plg_error(lf->ins, "could not allocate string");
+ flb_free(lf);
+ return NULL;
+ }
+ }
+
+ if (!lf->call) {
+ flb_plg_error(lf->ins, "function name defined by 'call' is not set");
+ lua_config_destroy(lf);
+ return NULL;
+ }
+
+ lf->buffer = flb_sds_create_size(LUA_BUFFER_CHUNK);
+ if (!lf->buffer) {
+ flb_plg_error(lf->ins, "could not allocate decode buffer");
+ lua_config_destroy(lf);
+ return NULL;
+ }
+
+ lf->l2cc.l2c_types_num = 0;
+ tmp = flb_filter_get_property("type_int_key", ins);
+ if (tmp) {
+ split = flb_utils_split(tmp, ' ', FLB_LUA_L2C_TYPES_NUM_MAX);
+ mk_list_foreach_safe(head, tmp_list, split) {
+ l2c = flb_malloc(sizeof(struct flb_lua_l2c_type));
+
+ sentry = mk_list_entry(head, struct flb_split_entry, _head);
+
+ tmp_key = flb_strndup(sentry->value, sentry->len);
+ l2c->key = flb_sds_create(tmp_key);
+ l2c->type = FLB_LUA_L2C_TYPE_INT;
+ flb_free(tmp_key);
+
+ mk_list_add(&l2c->_head, &lf->l2cc.l2c_types);
+ lf->l2cc.l2c_types_num++;
+ }
+ flb_utils_split_free(split);
+ }
+
+ tmp = flb_filter_get_property("type_array_key", ins);
+ if (tmp) {
+ split = flb_utils_split(tmp, ' ', FLB_LUA_L2C_TYPES_NUM_MAX);
+ mk_list_foreach_safe(head, tmp_list, split) {
+ l2c = flb_malloc(sizeof(struct flb_lua_l2c_type));
+
+ sentry = mk_list_entry(head, struct flb_split_entry, _head);
+
+ tmp_key = flb_strndup(sentry->value, sentry->len);
+ l2c->key = flb_sds_create(tmp_key);
+ l2c->type = FLB_LUA_L2C_TYPE_ARRAY;
+ flb_free(tmp_key);
+
+ mk_list_add(&l2c->_head, &lf->l2cc.l2c_types);
+ lf->l2cc.l2c_types_num++;
+ }
+ flb_utils_split_free(split);
+ }
+
+ return lf;
+}
+
+void lua_config_destroy(struct lua_filter *lf)
+{
+ struct mk_list *tmp_list = NULL;
+ struct mk_list *head = NULL;
+ struct flb_lua_l2c_type *l2c = NULL;
+
+ if (!lf) {
+ return;
+ }
+
+ if (lf->code) {
+ flb_sds_destroy(lf->code);
+ }
+
+ if (lf->script) {
+ flb_sds_destroy(lf->script);
+ }
+
+ if (lf->buffer) {
+ flb_sds_destroy(lf->buffer);
+ }
+
+ mk_list_foreach_safe(head, tmp_list, &lf->l2cc.l2c_types) {
+ l2c = mk_list_entry(head, struct flb_lua_l2c_type, _head);
+ if (l2c) {
+ if (l2c->key) {
+ flb_sds_destroy(l2c->key);
+ }
+ mk_list_del(&l2c->_head);
+ flb_free(l2c);
+ }
+ }
+
+ flb_sds_destroy(lf->packbuf);
+ flb_free(lf);
+}
diff --git a/src/fluent-bit/plugins/filter_lua/lua_config.h b/src/fluent-bit/plugins/filter_lua/lua_config.h
new file mode 100644
index 000000000..af8d6f128
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_lua/lua_config.h
@@ -0,0 +1,49 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_LUA_CONFIG_H
+#define FLB_LUA_CONFIG_H
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_filter.h>
+#include <fluent-bit/flb_luajit.h>
+#include <fluent-bit/flb_sds.h>
+#include <fluent-bit/flb_lua.h>
+
+#define LUA_BUFFER_CHUNK 1024 * 8 /* 8K should be enough to get started */
+
+struct lua_filter {
+ flb_sds_t code; /* lua script source code */
+ flb_sds_t script; /* lua script path */
+ flb_sds_t call; /* function name */
+ flb_sds_t buffer; /* json dec buffer */
+ int protected_mode; /* exec lua function in protected mode */
+ int time_as_table; /* timestamp as a Lua table */
+ int enable_flb_null; /* Use flb_null in Lua */
+ struct flb_lua_l2c_config l2cc; /* lua -> C config */
+ struct flb_luajit *lua; /* state context */
+ struct flb_filter_instance *ins; /* filter instance */
+ flb_sds_t packbuf; /* dynamic buffer used for mpack write */
+};
+
+struct lua_filter *lua_config_create(struct flb_filter_instance *ins,
+ struct flb_config *config);
+void lua_config_destroy(struct lua_filter *lf);
+
+#endif
diff --git a/src/fluent-bit/plugins/filter_modify/CMakeLists.txt b/src/fluent-bit/plugins/filter_modify/CMakeLists.txt
new file mode 100644
index 000000000..e63bf25b2
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_modify/CMakeLists.txt
@@ -0,0 +1,4 @@
+set(src
+ modify.c)
+
+FLB_PLUGIN(filter_modify "${src}" "")
diff --git a/src/fluent-bit/plugins/filter_modify/modify.c b/src/fluent-bit/plugins/filter_modify/modify.c
new file mode 100644
index 000000000..22d2e21c0
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_modify/modify.c
@@ -0,0 +1,1659 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_filter.h>
+#include <fluent-bit/flb_filter_plugin.h>
+#include <fluent-bit/flb_mem.h>
+#include <fluent-bit/flb_kv.h>
+#include <fluent-bit/flb_str.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_regex.h>
+#include <fluent-bit/flb_sds.h>
+#include <fluent-bit/flb_record_accessor.h>
+#include <fluent-bit/flb_ra_key.h>
+#include <fluent-bit/flb_log_event_decoder.h>
+#include <fluent-bit/flb_log_event_encoder.h>
+#include <msgpack.h>
+
+#include "modify.h"
+
+#include <stdio.h>
+#include <sys/types.h>
+
+static void condition_free(struct modify_condition *condition)
+{
+ if (condition == NULL) {
+ return;
+ }
+
+ if (condition->a) {
+ flb_sds_destroy(condition->a);
+ }
+ if (condition->b) {
+ flb_free(condition->b);
+ }
+ if (condition->raw_k) {
+ flb_free(condition->raw_k);
+ }
+ if (condition->raw_v) {
+ flb_free(condition->raw_v);
+ }
+
+ if (condition->a_regex) {
+ flb_regex_destroy(condition->a_regex);
+ }
+ if (condition->b_regex) {
+ flb_regex_destroy(condition->b_regex);
+ }
+ if (condition->ra_a) {
+ flb_ra_destroy(condition->ra_a);
+ condition->ra_a = NULL;
+ }
+ if (!mk_list_entry_is_orphan(&condition->_head)) {
+ mk_list_del(&condition->_head);
+ }
+ flb_free(condition);
+}
+
+static void rule_free(struct modify_rule *rule)
+{
+ if (rule == NULL) {
+ return;
+ }
+
+ if (rule->key) {
+ flb_free(rule->key);
+ }
+ if (rule->val) {
+ flb_free(rule->val);
+ }
+ if (rule->raw_k) {
+ flb_free(rule->raw_k);
+ }
+ if (rule->raw_v) {
+ flb_free(rule->raw_v);
+ }
+ if (rule->key_regex) {
+ flb_regex_destroy(rule->key_regex);
+ }
+ if (rule->val_regex) {
+ flb_regex_destroy(rule->val_regex);
+ }
+ if (!mk_list_entry_is_orphan(&rule->_head)) {
+ mk_list_del(&rule->_head);
+ }
+ flb_free(rule);
+}
+
+static void teardown(struct filter_modify_ctx *ctx)
+{
+ struct mk_list *tmp;
+ struct mk_list *head;
+
+ struct modify_rule *rule;
+ struct modify_condition *condition;
+
+ mk_list_foreach_safe(head, tmp, &ctx->conditions) {
+ condition = mk_list_entry(head, struct modify_condition, _head);
+ condition_free(condition);
+ }
+
+ mk_list_foreach_safe(head, tmp, &ctx->rules) {
+ rule = mk_list_entry(head, struct modify_rule, _head);
+ rule_free(rule);
+ }
+}
+
+static void helper_pack_string(struct filter_modify_ctx *ctx,
+ msgpack_packer *packer, const char *str,
+ int len)
+{
+
+ if (str == NULL) {
+ flb_plg_error(ctx->ins, "helper_pack_string : NULL passed");
+ msgpack_pack_nil(packer);
+ }
+ else {
+ msgpack_pack_str(packer, len);
+ msgpack_pack_str_body(packer, str, len);
+ }
+}
+
+static int setup(struct filter_modify_ctx *ctx,
+ struct flb_filter_instance *f_ins, struct flb_config *config)
+{
+ struct mk_list *head;
+ struct mk_list *split;
+ struct flb_kv *kv;
+ struct flb_split_entry *sentry;
+ struct modify_rule *rule = NULL;
+ struct modify_condition *condition;
+
+ int list_size;
+
+ // Split list
+ // - Arg 1 is condition?
+ // --> Setup Condition
+ // - Malloc Condition
+ // - Switch list size
+ // --> Setup Rule
+ // - Malloc Rule
+ // - Switch list size
+
+ if (flb_filter_config_map_set(f_ins, ctx) < 0) {
+ flb_errno();
+ flb_plg_error(f_ins, "configuration error");
+ return -1;
+ }
+
+ mk_list_foreach(head, &f_ins->properties) {
+ kv = mk_list_entry(head, struct flb_kv, _head);
+
+ split = flb_utils_split_quoted(kv->val, ' ', 3);
+ list_size = mk_list_size(split);
+
+ // Conditions are,
+ // CONDITION CONDITIONTYPE VAL_A VAL_B
+
+ if (list_size == 0 || list_size > 3) {
+ flb_plg_error(ctx->ins, "Invalid config for %s", kv->key);
+ teardown(ctx);
+ flb_utils_split_free(split);
+ return -1;
+ }
+ else if (strcasecmp(kv->key, "condition") == 0) {
+
+ //
+ // Build a condition
+ //
+
+ condition = flb_calloc(1, sizeof(struct modify_condition));
+ if (!condition) {
+ flb_errno();
+ flb_plg_error(ctx->ins, "Unable to allocate memory for "
+ "condition");
+ teardown(ctx);
+ flb_utils_split_free(split);
+ return -1;
+ }
+
+ condition->a_is_regex = false;
+ condition->b_is_regex = false;
+ condition->ra_a = NULL;
+ condition->raw_k = flb_strndup(kv->key, flb_sds_len(kv->key));
+ if (condition->raw_k == NULL) {
+ flb_errno();
+ flb_plg_error(ctx->ins, "Unable to allocate memory for "
+ "condition->raw_k");
+ teardown(ctx);
+ condition_free(condition);
+ flb_utils_split_free(split);
+ return -1;
+ }
+ condition->raw_v = flb_strndup(kv->val, flb_sds_len(kv->val));
+ if (condition->raw_v == NULL) {
+ flb_errno();
+ flb_plg_error(ctx->ins, "Unable to allocate memory for "
+ "condition->raw_v");
+ teardown(ctx);
+ condition_free(condition);
+ flb_utils_split_free(split);
+ return -1;
+ }
+
+ sentry =
+ mk_list_entry_first(split, struct flb_split_entry, _head);
+
+ if (strcasecmp(sentry->value, "key_exists") == 0) {
+ condition->conditiontype = KEY_EXISTS;
+ }
+ else if (strcasecmp(sentry->value, "key_does_not_exist") == 0) {
+ condition->conditiontype = KEY_DOES_NOT_EXIST;
+ }
+ else if (strcasecmp(sentry->value, "a_key_matches") == 0) {
+ condition->conditiontype = A_KEY_MATCHES;
+ condition->a_is_regex = true;
+ }
+ else if (strcasecmp(sentry->value, "no_key_matches") == 0) {
+ condition->conditiontype = NO_KEY_MATCHES;
+ condition->a_is_regex = true;
+ }
+ else if (strcasecmp(sentry->value, "key_value_equals") == 0) {
+ condition->conditiontype = KEY_VALUE_EQUALS;
+ }
+ else if (strcasecmp(sentry->value, "key_value_does_not_equal") ==
+ 0) {
+ condition->conditiontype = KEY_VALUE_DOES_NOT_EQUAL;
+ }
+ else if (strcasecmp(sentry->value, "key_value_matches") == 0) {
+ condition->conditiontype = KEY_VALUE_MATCHES;
+ condition->b_is_regex = true;
+ }
+ else if (strcasecmp(sentry->value, "key_value_does_not_match") ==
+ 0) {
+ condition->conditiontype = KEY_VALUE_DOES_NOT_MATCH;
+ condition->b_is_regex = true;
+ }
+ else if (strcasecmp
+ (sentry->value,
+ "matching_keys_have_matching_values") == 0) {
+ condition->conditiontype = MATCHING_KEYS_HAVE_MATCHING_VALUES;
+ condition->a_is_regex = true;
+ condition->b_is_regex = true;
+ }
+ else if (strcasecmp
+ (sentry->value,
+ "matching_keys_do_not_have_matching_values") == 0) {
+ condition->conditiontype =
+ MATCHING_KEYS_DO_NOT_HAVE_MATCHING_VALUES;
+ condition->a_is_regex = true;
+ condition->b_is_regex = true;
+ }
+ else {
+ flb_plg_error(ctx->ins, "Invalid config for %s : %s",
+ kv->key, kv->val);
+ teardown(ctx);
+ condition_free(condition);
+ flb_utils_split_free(split);
+ return -1;
+ }
+
+ sentry =
+ mk_list_entry_next(&sentry->_head, struct flb_split_entry,
+ _head, split);
+ condition->a = flb_sds_create_len(sentry->value, sentry->len);
+ condition->a_len = sentry->len;
+ condition->ra_a = flb_ra_create(condition->a, FLB_FALSE);
+ if (list_size == 3) {
+ sentry =
+ mk_list_entry_last(split, struct flb_split_entry, _head);
+ condition->b = flb_strndup(sentry->value, sentry->len);
+ if (condition->b == NULL) {
+ flb_errno();
+ flb_plg_error(ctx->ins, "Unable to allocate memory for "
+ "condition->b");
+ teardown(ctx);
+ condition_free(condition);
+ flb_utils_split_free(split);
+ return -1;
+ }
+ condition->b_len = sentry->len;
+ }
+ else {
+ condition->b = NULL;
+ condition->b_len = 0;
+ }
+
+ if (condition->a_is_regex) {
+ if (condition->a_len < 1) {
+ flb_plg_error(ctx->ins, "Unable to create regex for "
+ "condition %s %s",
+ condition->raw_k, condition->raw_v);
+ teardown(ctx);
+ condition_free(condition);
+ flb_utils_split_free(split);
+ return -1;
+ }
+ else {
+ flb_plg_debug(ctx->ins, "Creating regex for condition A : "
+ "%s %s : %s",
+ condition->raw_k, condition->raw_v,
+ condition->a);
+ condition->a_regex =
+ flb_regex_create(condition->a);
+ }
+ }
+
+ if (condition->b_is_regex) {
+ if (condition->b_len < 1) {
+ flb_plg_error(ctx->ins, "Unable to create regex "
+ "for condition %s %s",
+ condition->raw_k, condition->raw_v);
+ teardown(ctx);
+ condition_free(condition);
+ flb_utils_split_free(split);
+ return -1;
+ }
+ else {
+ flb_plg_debug(ctx->ins, "Creating regex for condition B : %s "
+ "%s : %s",
+ condition->raw_k, condition->raw_v, condition->b);
+ condition->b_regex =
+ flb_regex_create(condition->b);
+ }
+ }
+
+ flb_utils_split_free(split);
+
+ mk_list_add(&condition->_head, &ctx->conditions);
+ ctx->conditions_cnt++;
+ }
+ else {
+
+ //
+ // Build a rule
+ //
+
+ rule = flb_calloc(1, sizeof(struct modify_rule));
+ if (!rule) {
+ flb_plg_error(ctx->ins, "Unable to allocate memory for rule");
+ teardown(ctx);
+ flb_utils_split_free(split);
+ return -1;
+ }
+
+ rule->key_is_regex = false;
+ rule->val_is_regex = false;
+ rule->raw_k = flb_strndup(kv->key, flb_sds_len(kv->key));
+ if (rule->raw_k == NULL) {
+ flb_errno();
+ flb_plg_error(ctx->ins, "Unable to allocate memory for rule->raw_k");
+ teardown(ctx);
+ rule_free(rule);
+ flb_utils_split_free(split);
+ return -1;
+ }
+ rule->raw_v = flb_strndup(kv->val, flb_sds_len(kv->val));
+ if (rule->raw_v == NULL) {
+ flb_errno();
+ flb_plg_error(ctx->ins, "Unable to allocate memory for rule->raw_v");
+ teardown(ctx);
+ rule_free(rule);
+ flb_utils_split_free(split);
+ return -1;
+ }
+
+ sentry =
+ mk_list_entry_first(split, struct flb_split_entry, _head);
+ rule->key = flb_strndup(sentry->value, sentry->len);
+ if (rule->key == NULL) {
+ flb_errno();
+ flb_plg_error(ctx->ins, "Unable to allocate memory for rule->key");
+ teardown(ctx);
+ rule_free(rule);
+ flb_utils_split_free(split);
+ return -1;
+ }
+ rule->key_len = sentry->len;
+
+ sentry = mk_list_entry_last(split, struct flb_split_entry, _head);
+ rule->val = flb_strndup(sentry->value, sentry->len);
+ if (rule->val == NULL) {
+ flb_errno();
+ flb_plg_error(ctx->ins, "Unable to allocate memory for rule->val");
+ teardown(ctx);
+ rule_free(rule);
+ flb_utils_split_free(split);
+ return -1;
+ }
+ rule->val_len = sentry->len;
+
+ flb_utils_split_free(split);
+
+ if (list_size == 1) {
+ if (strcasecmp(kv->key, "remove") == 0) {
+ rule->ruletype = REMOVE;
+ }
+ else if (strcasecmp(kv->key, "remove_wildcard") == 0) {
+ rule->ruletype = REMOVE_WILDCARD;
+ }
+ else if (strcasecmp(kv->key, "remove_regex") == 0) {
+ rule->ruletype = REMOVE_REGEX;
+ rule->key_is_regex = true;
+ }
+ else if (strcasecmp(kv->key, "move_to_start") == 0) {
+ rule->ruletype = MOVE_TO_START;
+ }
+ else if (strcasecmp(kv->key, "move_to_end") == 0) {
+ rule->ruletype = MOVE_TO_END;
+ }
+ else {
+ flb_plg_error(ctx->ins, "Invalid operation %s : %s in "
+ "configuration", kv->key, kv->val);
+ teardown(ctx);
+ rule_free(rule);
+ return -1;
+ }
+ }
+ else if (list_size == 2) {
+ if (strcasecmp(kv->key, "rename") == 0) {
+ rule->ruletype = RENAME;
+ }
+ else if (strcasecmp(kv->key, "hard_rename") == 0) {
+ rule->ruletype = HARD_RENAME;
+ }
+ else if (strcasecmp(kv->key, "add") == 0) {
+ rule->ruletype = ADD;
+ }
+ else if (strcasecmp(kv->key, "add_if_not_present") == 0) {
+ flb_plg_info(ctx->ins, "DEPRECATED : Operation "
+ "'add_if_not_present' has been replaced "
+ "by 'add'.");
+ rule->ruletype = ADD;
+ }
+ else if (strcasecmp(kv->key, "set") == 0) {
+ rule->ruletype = SET;
+ }
+ else if (strcasecmp(kv->key, "copy") == 0) {
+ rule->ruletype = COPY;
+ }
+ else if (strcasecmp(kv->key, "hard_copy") == 0) {
+ rule->ruletype = HARD_COPY;
+ }
+ else {
+ flb_plg_error(ctx->ins, "Invalid operation %s : %s in "
+ "configuration", kv->key, kv->val);
+ teardown(ctx);
+ rule_free(rule);
+ return -1;
+ }
+ }
+
+ if (rule->key_is_regex && rule->key_len == 0) {
+ flb_plg_error(ctx->ins, "Unable to create regex for rule %s %s",
+ rule->raw_k, rule->raw_v);
+ teardown(ctx);
+ rule_free(rule);
+ return -1;
+ }
+ else {
+ rule->key_regex =
+ flb_regex_create(rule->key);
+ if (rule->key_regex == NULL) {
+ flb_plg_error(ctx->ins, "Unable to create regex(key) from %s",
+ rule->key);
+ teardown(ctx);
+ rule_free(rule);
+ return -1;
+ }
+ }
+
+ if (rule->val_is_regex && rule->val_len == 0) {
+ flb_plg_error(ctx->ins, "Unable to create regex for rule %s %s",
+ rule->raw_k, rule->raw_v);
+ teardown(ctx);
+ rule_free(rule);
+ return -1;
+ }
+ else {
+ rule->val_regex =
+ flb_regex_create(rule->val);
+ if (rule->val_regex == NULL) {
+ flb_plg_error(ctx->ins, "Unable to create regex(val) from %s",
+ rule->val);
+ teardown(ctx);
+ rule_free(rule);
+ return -1;
+ }
+ }
+
+ mk_list_add(&rule->_head, &ctx->rules);
+ ctx->rules_cnt++;
+ }
+
+ }
+
+ flb_plg_debug(ctx->ins, "Initialized modify filter with %d conditions "
+ "and %d rules",
+ ctx->conditions_cnt, ctx->rules_cnt);
+ return 0;
+}
+
+
+/* Regex matchers */
+static inline bool helper_msgpack_object_matches_regex(msgpack_object * obj,
+ struct flb_regex
+ *regex)
+{
+ int len;
+ const char *key;
+
+ if (obj->type == MSGPACK_OBJECT_BIN) {
+ return false;
+ }
+ else if (obj->type == MSGPACK_OBJECT_STR) {
+ key = obj->via.str.ptr;
+ len = obj->via.str.size;
+ }
+ else if (obj->type == MSGPACK_OBJECT_BOOLEAN) {
+ if (obj->via.boolean) {
+ key = "true";
+ len = 4;
+ }
+ else {
+ key = "false";
+ len = 5;
+ }
+ }
+ else {
+ return false;
+ }
+
+ return flb_regex_match(regex, (unsigned char *) key, len) > 0;
+}
+
+static inline bool kv_key_matches_regex(msgpack_object_kv * kv,
+ struct flb_regex *regex)
+{
+ return helper_msgpack_object_matches_regex(&kv->key, regex);
+}
+
+static inline bool kv_val_matches_regex(msgpack_object_kv * kv,
+ struct flb_regex *regex)
+{
+ return helper_msgpack_object_matches_regex(&kv->val, regex);
+}
+
+static inline bool kv_key_matches_regex_rule_key(msgpack_object_kv * kv,
+ struct modify_rule *rule)
+{
+ return kv_key_matches_regex(kv, rule->key_regex);
+}
+
+static inline bool kv_key_does_not_match_regex_rule_key(msgpack_object_kv *
+ kv,
+ struct modify_rule
+ *rule)
+{
+ return !kv_key_matches_regex_rule_key(kv, rule);
+}
+
+static inline int map_count_keys_matching_regex(msgpack_object * map,
+ struct flb_regex *regex)
+{
+ int i;
+ int count = 0;
+
+ for (i = 0; i < map->via.map.size; i++) {
+ if (kv_key_matches_regex(&map->via.map.ptr[i], regex)) {
+ count++;
+ }
+ }
+ return count;
+}
+
+
+/*
+ * Wildcard matchers
+ */
+
+static inline bool helper_msgpack_object_matches_wildcard(msgpack_object *
+ obj, char *str,
+ int len)
+{
+ const char *key;
+
+ if (obj->type == MSGPACK_OBJECT_BIN) {
+ key = obj->via.bin.ptr;
+ }
+ else if (obj->type == MSGPACK_OBJECT_STR) {
+ key = obj->via.str.ptr;
+ }
+ else {
+ return false;
+ }
+
+ return (strncmp(str, key, len) == 0);
+}
+
+static inline bool kv_key_matches_wildcard(msgpack_object_kv * kv,
+ char *str, int len)
+{
+ return helper_msgpack_object_matches_wildcard(&kv->key, str, len);
+}
+
+static inline bool kv_key_matches_wildcard_rule_key(msgpack_object_kv * kv,
+ struct modify_rule *rule)
+{
+ return kv_key_matches_wildcard(kv, rule->key, rule->key_len);
+}
+
+static inline bool kv_key_does_not_match_wildcard_rule_key(msgpack_object_kv *
+ kv,
+ struct modify_rule
+ *rule)
+{
+ return !kv_key_matches_wildcard_rule_key(kv, rule);
+}
+
+static inline int map_count_keys_matching_wildcard(msgpack_object * map,
+ char *str, int len)
+{
+ int i;
+ int count = 0;
+
+ for (i = 0; i < map->via.map.size; i++) {
+ if (kv_key_matches_wildcard(&map->via.map.ptr[i], str, len)) {
+ count++;
+ }
+ }
+ return count;
+}
+
+//
+// String matchers
+//
+
+static inline bool helper_msgpack_object_matches_str(msgpack_object * obj,
+ char *str, int len)
+{
+
+ const char *key;
+ int klen;
+
+ if (obj->type == MSGPACK_OBJECT_BIN) {
+ key = obj->via.bin.ptr;
+ klen = obj->via.bin.size;
+ }
+ else if (obj->type == MSGPACK_OBJECT_STR) {
+ key = obj->via.str.ptr;
+ klen = obj->via.str.size;
+ }
+ else {
+ return false;
+ }
+
+ return ((len == klen) && (strncmp(str, key, klen) == 0)
+ );
+}
+
+static inline bool kv_key_matches_str(msgpack_object_kv * kv,
+ char *str, int len)
+{
+ return helper_msgpack_object_matches_str(&kv->key, str, len);
+}
+
+static inline bool kv_key_matches_str_rule_key(msgpack_object_kv * kv,
+ struct modify_rule *rule)
+{
+ return kv_key_matches_str(kv, rule->key, rule->key_len);
+}
+
+static inline bool kv_key_does_not_match_str_rule_key(msgpack_object_kv * kv,
+ struct modify_rule
+ *rule)
+{
+ return !kv_key_matches_str_rule_key(kv, rule);
+}
+
+static inline bool kv_key_matches_str_rule_val(msgpack_object_kv * kv,
+ struct modify_rule *rule)
+{
+ return kv_key_matches_str(kv, rule->val, rule->val_len);
+}
+
+static inline int map_count_keys_matching_str(msgpack_object * map,
+ char *str, int len)
+{
+ int i;
+ int count = 0;
+
+ for (i = 0; i < map->via.map.size; i++) {
+ if (kv_key_matches_str(&map->via.map.ptr[i], str, len)) {
+ count++;
+ }
+ }
+ return count;
+}
+
+static inline void map_pack_each(msgpack_packer * packer,
+ msgpack_object * map)
+{
+ int i;
+
+ for (i = 0; i < map->via.map.size; i++) {
+ msgpack_pack_object(packer, map->via.map.ptr[i].key);
+ msgpack_pack_object(packer, map->via.map.ptr[i].val);
+ }
+}
+
+static inline void map_pack_each_fn(msgpack_packer * packer,
+ msgpack_object * map,
+ struct modify_rule *rule,
+ bool(*f) (msgpack_object_kv * kv,
+ struct modify_rule * rule)
+ )
+{
+ int i;
+
+ for (i = 0; i < map->via.map.size; i++) {
+ if ((*f) (&map->via.map.ptr[i], rule)) {
+ msgpack_pack_object(packer, map->via.map.ptr[i].key);
+ msgpack_pack_object(packer, map->via.map.ptr[i].val);
+ }
+ }
+}
+
+static inline bool evaluate_condition_KEY_EXISTS(msgpack_object * map,
+ struct modify_condition
+ *condition)
+{
+ msgpack_object *skey = NULL;
+ msgpack_object *okey = NULL;
+ msgpack_object *oval = NULL;
+
+ flb_ra_get_kv_pair(condition->ra_a, *map, &skey, &okey, &oval);
+ if (skey == NULL || okey == NULL || oval == NULL) {
+ return false;
+ }
+ return true;
+}
+
+static inline bool evaluate_condition_KEY_DOES_NOT_EXIST(msgpack_object * map,
+ struct
+ modify_condition
+ *condition)
+{
+ return !evaluate_condition_KEY_EXISTS(map, condition);
+}
+
+static inline bool evaluate_condition_A_KEY_MATCHES(msgpack_object * map,
+ struct modify_condition
+ *condition)
+{
+ return (map_count_keys_matching_regex(map, condition->a_regex) > 0);
+}
+
+static inline bool evaluate_condition_NO_KEY_MATCHES(msgpack_object * map,
+ struct
+ modify_condition
+ *condition)
+{
+ return !evaluate_condition_A_KEY_MATCHES(map, condition);
+}
+
+static inline bool evaluate_condition_KEY_VALUE_EQUALS(struct filter_modify_ctx *ctx,
+ msgpack_object * map,
+ struct
+ modify_condition
+ *condition)
+{
+ msgpack_object *skey = NULL;
+ msgpack_object *okey = NULL;
+ msgpack_object *oval = NULL;
+ bool ret = false;
+
+ flb_ra_get_kv_pair(condition->ra_a, *map, &skey, &okey, &oval);
+ if (skey == NULL || okey == NULL || oval == NULL) {
+ return false;
+ }
+ ret = helper_msgpack_object_matches_str(oval, condition->b, condition->b_len);
+ if (ret) {
+ flb_plg_debug(ctx->ins, "Match for condition KEY_VALUE_EQUALS %s",
+ condition->b);
+ }
+ return ret;
+}
+
+static inline
+bool evaluate_condition_KEY_VALUE_DOES_NOT_EQUAL(struct filter_modify_ctx *ctx,
+ msgpack_object
+ *map,
+ struct
+ modify_condition
+ *condition)
+{
+ if (!evaluate_condition_KEY_EXISTS(map, condition)) {
+ return false;
+ }
+ return !evaluate_condition_KEY_VALUE_EQUALS(ctx, map, condition);
+}
+
+static inline bool evaluate_condition_KEY_VALUE_MATCHES(struct filter_modify_ctx *ctx,
+ msgpack_object *map,
+ struct
+ modify_condition
+ *condition)
+{
+ msgpack_object *skey = NULL;
+ msgpack_object *okey = NULL;
+ msgpack_object *oval = NULL;
+ bool ret = false;
+
+ flb_ra_get_kv_pair(condition->ra_a, *map, &skey, &okey, &oval);
+ if (skey == NULL || okey == NULL || oval == NULL) {
+ return false;
+ }
+ ret = helper_msgpack_object_matches_regex(oval, condition->b_regex);
+ if (ret) {
+ flb_plg_debug(ctx->ins, "Match for condition KEY_VALUE_MATCHES "
+ "%s", condition->b);
+ }
+ return ret;
+}
+
+static inline
+bool evaluate_condition_KEY_VALUE_DOES_NOT_MATCH(struct filter_modify_ctx *ctx,
+ msgpack_object
+ * map,
+ struct
+ modify_condition
+ *condition)
+{
+ if (!evaluate_condition_KEY_EXISTS(map, condition)) {
+ return false;
+ }
+ return !evaluate_condition_KEY_VALUE_MATCHES(ctx, map, condition);
+}
+
+static inline bool
+evaluate_condition_MATCHING_KEYS_HAVE_MATCHING_VALUES(struct filter_modify_ctx *ctx,
+ msgpack_object *map,
+ struct modify_condition
+ *condition)
+{
+ int i;
+ bool match = true;
+ msgpack_object_kv *kv;
+
+ for (i = 0; i < map->via.map.size; i++) {
+ kv = &map->via.map.ptr[i];
+ if (kv_key_matches_regex(kv, condition->a_regex)) {
+ if (!kv_val_matches_regex(kv, condition->b_regex)) {
+ flb_plg_debug(ctx->ins, "Match MISSED for condition "
+ "MATCHING_KEYS_HAVE_MATCHING_VALUES %s",
+ condition->b);
+ match = false;
+ break;
+ }
+ }
+ }
+ return match;
+}
+
+static inline bool
+evaluate_condition_MATCHING_KEYS_DO_NOT_HAVE_MATCHING_VALUES(struct filter_modify_ctx *ctx,
+ msgpack_object *
+ map,
+ struct
+ modify_condition
+ *condition)
+{
+ return !evaluate_condition_MATCHING_KEYS_HAVE_MATCHING_VALUES(ctx,
+ map,
+ condition);
+}
+
+static inline bool evaluate_condition(struct filter_modify_ctx *ctx,
+ msgpack_object * map,
+ struct modify_condition *condition)
+{
+ switch (condition->conditiontype) {
+ case KEY_EXISTS:
+ return evaluate_condition_KEY_EXISTS(map, condition);
+ case KEY_DOES_NOT_EXIST:
+ return evaluate_condition_KEY_DOES_NOT_EXIST(map, condition);
+ case A_KEY_MATCHES:
+ return evaluate_condition_A_KEY_MATCHES(map, condition);
+ case NO_KEY_MATCHES:
+ return evaluate_condition_NO_KEY_MATCHES(map, condition);
+ case KEY_VALUE_EQUALS:
+ return evaluate_condition_KEY_VALUE_EQUALS(ctx, map, condition);
+ case KEY_VALUE_DOES_NOT_EQUAL:
+ return evaluate_condition_KEY_VALUE_DOES_NOT_EQUAL(ctx, map, condition);
+ case KEY_VALUE_MATCHES:
+ return evaluate_condition_KEY_VALUE_MATCHES(ctx, map, condition);
+ case KEY_VALUE_DOES_NOT_MATCH:
+ return evaluate_condition_KEY_VALUE_DOES_NOT_MATCH(ctx, map, condition);
+ case MATCHING_KEYS_HAVE_MATCHING_VALUES:
+ return evaluate_condition_MATCHING_KEYS_HAVE_MATCHING_VALUES(ctx,
+ map,
+ condition);
+ case MATCHING_KEYS_DO_NOT_HAVE_MATCHING_VALUES:
+ return
+ evaluate_condition_MATCHING_KEYS_DO_NOT_HAVE_MATCHING_VALUES(ctx,
+ map,
+ condition);
+ default:
+ flb_plg_warn(ctx->ins, "Unknown conditiontype for condition %s : %s, "
+ "assuming result FAILED TO MEET CONDITION",
+ condition->raw_k, condition->raw_v);
+ }
+ return false;
+}
+
+static inline bool evaluate_conditions(msgpack_object * map,
+ struct filter_modify_ctx *ctx)
+{
+ bool ok = true;
+
+ struct mk_list *tmp;
+ struct mk_list *head;
+ struct modify_condition *condition;
+
+ mk_list_foreach_safe(head, tmp, &ctx->conditions) {
+ condition = mk_list_entry(head, struct modify_condition, _head);
+ if (!evaluate_condition(ctx, map, condition)) {
+ flb_plg_debug(ctx->ins, "Condition not met : %s",
+ condition->raw_v);
+ ok = false;
+ }
+ }
+
+ return ok;
+}
+
+static inline int apply_rule_RENAME(struct filter_modify_ctx *ctx,
+ msgpack_packer *packer,
+ msgpack_object *map,
+ struct modify_rule *rule)
+{
+ int i;
+
+ int match_keys =
+ map_count_keys_matching_str(map, rule->key, rule->key_len);
+ int conflict_keys =
+ map_count_keys_matching_str(map, rule->val, rule->val_len);
+
+ if (match_keys == 0) {
+ flb_plg_debug(ctx->ins, "Rule RENAME %s TO %s : No keys matching %s "
+ "found, not applying rule",
+ rule->key, rule->val, rule->key);
+ return FLB_FILTER_NOTOUCH;
+ }
+ else if (conflict_keys > 0) {
+ flb_plg_debug(ctx->ins, "Rule RENAME %s TO %s : Existing key %s found, "
+ "not applying rule",
+ rule->key, rule->val, rule->key);
+ return FLB_FILTER_NOTOUCH;
+ }
+ else {
+ msgpack_pack_map(packer, map->via.map.size);
+ for (i = 0; i < map->via.map.size; i++) {
+ if (kv_key_matches_str_rule_key(&map->via.map.ptr[i], rule)) {
+ helper_pack_string(ctx, packer, rule->val, rule->val_len);
+ }
+ else {
+ msgpack_pack_object(packer, map->via.map.ptr[i].key);
+ }
+ msgpack_pack_object(packer, map->via.map.ptr[i].val);
+ }
+ return FLB_FILTER_MODIFIED;
+ }
+}
+
+static inline int apply_rule_HARD_RENAME(struct filter_modify_ctx *ctx,
+ msgpack_packer *packer,
+ msgpack_object *map,
+ struct modify_rule *rule)
+{
+ int i;
+
+ int match_keys =
+ map_count_keys_matching_str(map, rule->key, rule->key_len);
+ int conflict_keys =
+ map_count_keys_matching_str(map, rule->val, rule->val_len);
+ msgpack_object_kv *kv;
+
+ if (match_keys == 0) {
+ flb_plg_debug(ctx->ins, "Rule HARD_RENAME %s TO %s : No keys matching "
+ "%s found, not applying rule",
+ rule->key, rule->val, rule->key);
+ return FLB_FILTER_NOTOUCH;
+ }
+ else if (conflict_keys == 0) {
+ msgpack_pack_map(packer, map->via.map.size);
+ for (i = 0; i < map->via.map.size; i++) {
+ kv = &map->via.map.ptr[i];
+ if (kv_key_matches_str_rule_key(kv, rule)) {
+ helper_pack_string(ctx, packer, rule->val, rule->val_len);
+ }
+ else {
+ msgpack_pack_object(packer, kv->key);
+ }
+ msgpack_pack_object(packer, kv->val);
+ }
+ return FLB_FILTER_MODIFIED;
+ }
+ else {
+ msgpack_pack_map(packer, map->via.map.size - conflict_keys);
+
+ for (i = 0; i < map->via.map.size; i++) {
+ kv = &map->via.map.ptr[i];
+ // If this kv->key matches rule->val it's a conflict source key and
+ // will be skipped
+ if (!kv_key_matches_str_rule_val(kv, rule)) {
+ if (kv_key_matches_str_rule_key(kv, rule)) {
+ helper_pack_string(ctx, packer, rule->val, rule->val_len);
+ }
+ else {
+ msgpack_pack_object(packer, kv->key);
+ }
+
+ msgpack_pack_object(packer, kv->val);
+ }
+ }
+ return FLB_FILTER_MODIFIED;
+ }
+}
+
+static inline int apply_rule_COPY(struct filter_modify_ctx *ctx,
+ msgpack_packer *packer,
+ msgpack_object *map,
+ struct modify_rule *rule)
+{
+ int match_keys =
+ map_count_keys_matching_str(map, rule->key, rule->key_len);
+ int conflict_keys =
+ map_count_keys_matching_str(map, rule->val, rule->val_len);
+ int i;
+ msgpack_object_kv *kv;
+
+ if (match_keys < 1) {
+ flb_plg_debug(ctx->ins, "Rule COPY %s TO %s : No keys matching %s "
+ "found, not applying rule",
+ rule->key, rule->val, rule->key);
+ return FLB_FILTER_NOTOUCH;
+ }
+ else if (match_keys > 1) {
+ flb_plg_debug(ctx->ins, "Rule COPY %s TO %s : Multiple keys matching "
+ "%s found, not applying rule",
+ rule->key, rule->val, rule->key);
+ return FLB_FILTER_NOTOUCH;
+ }
+ else if (conflict_keys > 0) {
+ flb_plg_debug(ctx->ins, "Rule COPY %s TO %s : Existing keys matching "
+ "target %s found, not applying rule",
+ rule->key, rule->val, rule->key);
+ return FLB_FILTER_NOTOUCH;
+ }
+ else {
+ msgpack_pack_map(packer, map->via.map.size + 1);
+ for (i = 0; i < map->via.map.size; i++) {
+ kv = &map->via.map.ptr[i];
+
+ msgpack_pack_object(packer, kv->key);
+ msgpack_pack_object(packer, kv->val);
+
+ if (kv_key_matches_str_rule_key(kv, rule)) {
+ helper_pack_string(ctx, packer, rule->val, rule->val_len);
+ msgpack_pack_object(packer, kv->val);
+ }
+ }
+ return FLB_FILTER_MODIFIED;
+ }
+}
+
+static inline int apply_rule_HARD_COPY(struct filter_modify_ctx *ctx,
+ msgpack_packer *packer,
+ msgpack_object *map,
+ struct modify_rule *rule)
+{
+ int i;
+
+ int match_keys =
+ map_count_keys_matching_str(map, rule->key, rule->key_len);
+ int conflict_keys =
+ map_count_keys_matching_str(map, rule->val, rule->val_len);
+ msgpack_object_kv *kv;
+
+ if (match_keys < 1) {
+ flb_plg_debug(ctx->ins, "Rule HARD_COPY %s TO %s : No keys matching %s "
+ "found, not applying rule",
+ rule->key, rule->val, rule->key);
+ return FLB_FILTER_NOTOUCH;
+ }
+ else if (match_keys > 1) {
+ flb_plg_warn(ctx->ins, "Rule HARD_COPY %s TO %s : Multiple keys "
+ "matching %s found, not applying rule",
+ rule->key, rule->val, rule->key);
+ return FLB_FILTER_NOTOUCH;
+ }
+ else if (conflict_keys > 1) {
+ flb_plg_warn(ctx->ins, "Rule HARD_COPY %s TO %s : Multiple target keys "
+ "matching %s found, not applying rule",
+ rule->key, rule->val, rule->val);
+ return FLB_FILTER_NOTOUCH;
+ }
+ else if (conflict_keys == 0) {
+ msgpack_pack_map(packer, map->via.map.size + 1);
+ for (i = 0; i < map->via.map.size; i++) {
+ kv = &map->via.map.ptr[i];
+ msgpack_pack_object(packer, kv->key);
+ msgpack_pack_object(packer, kv->val);
+
+ // This is our copy
+ if (kv_key_matches_str_rule_key(kv, rule)) {
+ helper_pack_string(ctx, packer, rule->val, rule->val_len);
+ msgpack_pack_object(packer, kv->val);
+ }
+ }
+ return FLB_FILTER_MODIFIED;
+ }
+ else {
+ msgpack_pack_map(packer, map->via.map.size);
+
+ for (i = 0; i < map->via.map.size; i++) {
+ kv = &map->via.map.ptr[i];
+
+ // Skip the conflict key, we will create a new one
+ if (!kv_key_matches_str_rule_val(kv, rule)) {
+ msgpack_pack_object(packer, kv->key);
+ msgpack_pack_object(packer, kv->val);
+
+ // This is our copy
+ if (kv_key_matches_str_rule_key(kv, rule)) {
+ helper_pack_string(ctx, packer, rule->val, rule->val_len);
+ msgpack_pack_object(packer, kv->val);
+ }
+ }
+ }
+
+ return FLB_FILTER_MODIFIED;
+ }
+}
+
+static inline int apply_rule_ADD(struct filter_modify_ctx *ctx,
+ msgpack_packer *packer,
+ msgpack_object *map,
+ struct modify_rule *rule)
+{
+ if (map_count_keys_matching_str(map, rule->key, rule->key_len) == 0) {
+ msgpack_pack_map(packer, map->via.map.size + 1);
+ map_pack_each(packer, map);
+ helper_pack_string(ctx, packer, rule->key, rule->key_len);
+ helper_pack_string(ctx, packer, rule->val, rule->val_len);
+ return FLB_FILTER_MODIFIED;
+ }
+ else {
+ flb_plg_debug(ctx->ins, "Rule ADD %s : this key already exists, "
+ "skipping", rule->key);
+ return FLB_FILTER_NOTOUCH;
+ }
+}
+
+static inline int apply_rule_SET(struct filter_modify_ctx *ctx,
+ msgpack_packer * packer,
+ msgpack_object * map,
+ struct modify_rule *rule)
+{
+ int matches = map_count_keys_matching_str(map, rule->key, rule->key_len);
+
+ msgpack_pack_map(packer, map->via.map.size - matches + 1);
+
+ if (matches == 0) {
+ map_pack_each(packer, map);
+ helper_pack_string(ctx, packer, rule->key, rule->key_len);
+ helper_pack_string(ctx, packer, rule->val, rule->val_len);
+ }
+ else {
+ map_pack_each_fn(packer, map, rule,
+ kv_key_does_not_match_str_rule_key);
+ helper_pack_string(ctx, packer, rule->key, rule->key_len);
+ helper_pack_string(ctx, packer, rule->val, rule->val_len);
+ }
+
+ return FLB_FILTER_MODIFIED;
+}
+
+static inline int apply_rule_REMOVE(msgpack_packer *packer,
+ msgpack_object *map,
+ struct modify_rule *rule)
+{
+ int matches = map_count_keys_matching_str(map, rule->key, rule->key_len);
+
+ if (matches == 0) {
+ return FLB_FILTER_NOTOUCH;
+ }
+ else {
+ msgpack_pack_map(packer, map->via.map.size - matches);
+ map_pack_each_fn(packer, map, rule,
+ kv_key_does_not_match_str_rule_key);
+ return FLB_FILTER_MODIFIED;
+ }
+}
+
+static inline int apply_rule_REMOVE_WILDCARD(msgpack_packer * packer,
+ msgpack_object * map,
+ struct modify_rule *rule)
+{
+ int matches =
+ map_count_keys_matching_wildcard(map, rule->key, rule->key_len);
+
+ if (matches == 0) {
+ return FLB_FILTER_NOTOUCH;
+ }
+ else {
+ msgpack_pack_map(packer, map->via.map.size - matches);
+ map_pack_each_fn(packer, map, rule,
+ kv_key_does_not_match_wildcard_rule_key);
+ return FLB_FILTER_MODIFIED;
+ }
+}
+
+static inline int apply_rule_REMOVE_REGEX(msgpack_packer * packer,
+ msgpack_object * map,
+ struct modify_rule *rule)
+{
+ int matches = map_count_keys_matching_regex(map, rule->key_regex);
+
+ if (matches == 0) {
+ return FLB_FILTER_NOTOUCH;
+ }
+ else {
+ msgpack_pack_map(packer, map->via.map.size - matches);
+ map_pack_each_fn(packer, map, rule,
+ kv_key_does_not_match_regex_rule_key);
+ return FLB_FILTER_MODIFIED;
+ }
+}
+
+static inline int apply_rule_MOVE_TO_END(struct filter_modify_ctx *ctx,
+ msgpack_packer *packer,
+ msgpack_object *map,
+ struct modify_rule *rule)
+{
+
+ int match_keys =
+ map_count_keys_matching_wildcard(map, rule->key, rule->key_len);
+
+ if (match_keys == 0) {
+ return FLB_FILTER_NOTOUCH;
+ }
+ else {
+ msgpack_pack_map(packer, map->via.map.size);
+ map_pack_each_fn(packer, map, rule,
+ kv_key_does_not_match_wildcard_rule_key);
+ map_pack_each_fn(packer, map, rule,
+ kv_key_matches_wildcard_rule_key);
+ return FLB_FILTER_MODIFIED;
+ }
+}
+
+static inline int apply_rule_MOVE_TO_START(struct filter_modify_ctx *ctx,
+ msgpack_packer *packer,
+ msgpack_object *map,
+ struct modify_rule *rule)
+{
+
+ int match_keys =
+ map_count_keys_matching_wildcard(map, rule->key, rule->key_len);
+
+ if (match_keys == 0) {
+ return FLB_FILTER_NOTOUCH;
+ }
+ else {
+ msgpack_pack_map(packer, map->via.map.size);
+ map_pack_each_fn(packer, map, rule,
+ kv_key_matches_wildcard_rule_key);
+ map_pack_each_fn(packer, map, rule,
+ kv_key_does_not_match_wildcard_rule_key);
+ return FLB_FILTER_MODIFIED;
+ }
+}
+
+static inline int apply_modifying_rule(struct filter_modify_ctx *ctx,
+ msgpack_packer *packer,
+ msgpack_object *map,
+ struct modify_rule *rule)
+{
+ switch (rule->ruletype) {
+ case RENAME:
+ return apply_rule_RENAME(ctx, packer, map, rule);
+ case HARD_RENAME:
+ return apply_rule_HARD_RENAME(ctx, packer, map, rule);
+ case ADD:
+ return apply_rule_ADD(ctx, packer, map, rule);
+ case SET:
+ return apply_rule_SET(ctx, packer, map, rule);
+ case REMOVE:
+ return apply_rule_REMOVE(packer, map, rule);
+ case REMOVE_WILDCARD:
+ return apply_rule_REMOVE_WILDCARD(packer, map, rule);
+ case REMOVE_REGEX:
+ return apply_rule_REMOVE_REGEX(packer, map, rule);
+ case COPY:
+ return apply_rule_COPY(ctx, packer, map, rule);
+ case HARD_COPY:
+ return apply_rule_HARD_COPY(ctx, packer, map, rule);
+ case MOVE_TO_START:
+ return apply_rule_MOVE_TO_START(ctx, packer, map, rule);
+ case MOVE_TO_END:
+ return apply_rule_MOVE_TO_END(ctx, packer, map, rule);
+ default:
+ flb_plg_warn(ctx->ins, "Unknown ruletype for rule with key %s, ignoring",
+ rule->key);
+ }
+ return FLB_FILTER_NOTOUCH;
+}
+
+
+
+static inline int apply_modifying_rules(
+ struct flb_log_event_encoder *log_encoder,
+ struct flb_log_event *log_event,
+ struct filter_modify_ctx *ctx)
+{
+ int ret;
+ int records_in;
+ msgpack_object map;
+ struct modify_rule *rule;
+ msgpack_sbuffer sbuffer;
+ msgpack_packer in_packer;
+ msgpack_unpacker unpacker;
+ msgpack_unpacked unpacked;
+ int initial_buffer_size = 1024 * 8;
+ int new_buffer_size = 0;
+ struct mk_list *tmp;
+ struct mk_list *head;
+ bool has_modifications = false;
+
+ map = *log_event->body;
+ records_in = map.via.map.size;
+
+ if (!evaluate_conditions(&map, ctx)) {
+ flb_plg_debug(ctx->ins, "Conditions not met, not touching record");
+ return 0;
+ }
+
+ msgpack_sbuffer_init(&sbuffer);
+ msgpack_packer_init(&in_packer, &sbuffer, msgpack_sbuffer_write);
+ msgpack_unpacked_init(&unpacked);
+
+ if (!msgpack_unpacker_init(&unpacker, initial_buffer_size)) {
+ flb_plg_error(ctx->ins, "Unable to allocate memory for unpacker, aborting");
+ return -1;
+ }
+
+ mk_list_foreach_safe(head, tmp, &ctx->rules) {
+ rule = mk_list_entry(head, struct modify_rule, _head);
+
+ msgpack_sbuffer_clear(&sbuffer);
+
+ if (apply_modifying_rule(ctx, &in_packer, &map, rule) !=
+ FLB_FILTER_NOTOUCH) {
+
+ has_modifications = true;
+ new_buffer_size = sbuffer.size * 2;
+
+ if (msgpack_unpacker_buffer_capacity(&unpacker) < new_buffer_size) {
+ if (!msgpack_unpacker_reserve_buffer
+ (&unpacker, new_buffer_size)) {
+ flb_plg_error(ctx->ins, "Unable to re-allocate memory for "
+ "unpacker, aborting");
+ return -1;
+ }
+ }
+
+ memcpy(msgpack_unpacker_buffer(&unpacker), sbuffer.data,
+ sbuffer.size);
+ msgpack_unpacker_buffer_consumed(&unpacker, sbuffer.size);
+
+ msgpack_unpacker_next(&unpacker, &unpacked);
+
+ if (unpacked.data.type == MSGPACK_OBJECT_MAP) {
+ map = unpacked.data;
+ }
+ else {
+ flb_plg_error(ctx->ins, "Expected MSGPACK_MAP, this is not a "
+ "valid return value, skipping");
+ }
+ }
+ }
+
+ if (has_modifications) {
+ ret = flb_log_event_encoder_begin_record(log_encoder);
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_timestamp(
+ log_encoder, &log_event->timestamp);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_metadata_from_msgpack_object(
+ log_encoder, log_event->metadata);
+ }
+
+ flb_plg_trace(ctx->ins, "Input map size %d elements, output map size "
+ "%d elements", records_in, map.via.map.size);
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_body_from_msgpack_object(
+ log_encoder, &map);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_commit_record(log_encoder);
+ }
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ flb_plg_error(ctx->ins, "log event encoding error : %d", ret);
+
+ flb_log_event_encoder_rollback_record(log_encoder);
+
+ has_modifications = FLB_FALSE;
+ }
+ }
+
+ msgpack_unpacked_destroy(&unpacked);
+ msgpack_unpacker_destroy(&unpacker);
+ msgpack_sbuffer_destroy(&sbuffer);
+
+ return has_modifications ? 1 : 0;
+
+}
+
+static int cb_modify_init(struct flb_filter_instance *f_ins,
+ struct flb_config *config, void *data)
+{
+ struct filter_modify_ctx *ctx;
+
+ // Create context
+ ctx = flb_malloc(sizeof(struct filter_modify_ctx));
+ if (!ctx) {
+ flb_errno();
+ return -1;
+ }
+ mk_list_init(&ctx->conditions);
+ mk_list_init(&ctx->rules);
+ ctx->ins = f_ins;
+ ctx->rules_cnt = 0;
+ ctx->conditions_cnt = 0;
+
+ if (setup(ctx, f_ins, config) < 0) {
+ flb_free(ctx);
+ return -1;
+ }
+
+ // Set context
+ flb_filter_set_context(f_ins, ctx);
+ return 0;
+}
+
+static int cb_modify_filter(const void *data, size_t bytes,
+ const char *tag, int tag_len,
+ void **out_buf, size_t * out_size,
+ struct flb_filter_instance *f_ins,
+ struct flb_input_instance *i_ins,
+ void *context, struct flb_config *config)
+{
+ struct flb_log_event_encoder log_encoder;
+ struct flb_log_event_decoder log_decoder;
+ struct flb_log_event log_event;
+ struct filter_modify_ctx *ctx = context;
+ int modifications = 0;
+ int total_modifications = 0;
+ int ret;
+
+ (void) f_ins;
+ (void) i_ins;
+ (void) config;
+
+ ret = flb_log_event_decoder_init(&log_decoder, (char *) data, bytes);
+
+ if (ret != FLB_EVENT_DECODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event decoder initialization error : %d", ret);
+
+ return FLB_FILTER_NOTOUCH;
+ }
+
+ ret = flb_log_event_encoder_init(&log_encoder,
+ FLB_LOG_EVENT_FORMAT_DEFAULT);
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event encoder initialization error : %d", ret);
+
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ return FLB_FILTER_NOTOUCH;
+ }
+
+ while ((ret = flb_log_event_decoder_next(
+ &log_decoder,
+ &log_event)) == FLB_EVENT_DECODER_SUCCESS) {
+ modifications =
+ apply_modifying_rules(&log_encoder, &log_event, ctx);
+
+ if (modifications == 0) {
+ /* not matched, so copy original event. */
+ ret = flb_log_event_encoder_emit_raw_record(
+ &log_encoder,
+ log_decoder.record_base,
+ log_decoder.record_length);
+ }
+
+ total_modifications += modifications;
+ }
+
+ if(total_modifications > 0) {
+ if (ret == FLB_EVENT_DECODER_ERROR_INSUFFICIENT_DATA &&
+ log_decoder.offset == bytes) {
+ ret = FLB_EVENT_ENCODER_SUCCESS;
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ *out_buf = log_encoder.output_buffer;
+ *out_size = log_encoder.output_length;
+
+ ret = FLB_FILTER_MODIFIED;
+
+ flb_log_event_encoder_claim_internal_buffer_ownership(&log_encoder);
+ }
+ else {
+ flb_plg_error(ctx->ins,
+ "Log event encoder error : %d", ret);
+
+ ret = FLB_FILTER_NOTOUCH;
+ }
+ }
+ else {
+ ret = FLB_FILTER_NOTOUCH;
+ }
+
+ flb_log_event_decoder_destroy(&log_decoder);
+ flb_log_event_encoder_destroy(&log_encoder);
+
+ return ret;
+}
+
+static int cb_modify_exit(void *data, struct flb_config *config)
+{
+ struct filter_modify_ctx *ctx = data;
+
+ teardown(ctx);
+ flb_free(ctx);
+ return 0;
+}
+
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_STR, "Set", NULL,
+ FLB_CONFIG_MAP_MULT, FLB_FALSE, 0,
+ "Add a key/value pair with key KEY and value VALUE. "
+ "If KEY already exists, this field is overwritten."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "Add", NULL,
+ FLB_CONFIG_MAP_MULT, FLB_FALSE, 0,
+ "Add a key/value pair with key KEY and value VALUE if KEY does not exist"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "Remove", NULL,
+ FLB_CONFIG_MAP_MULT, FLB_FALSE, 0,
+ "Remove a key/value pair with key KEY if it exists"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "Remove_wildcard", NULL,
+ FLB_CONFIG_MAP_MULT, FLB_FALSE, 0,
+ "Remove all key/value pairs with key matching wildcard KEY"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "Remove_regex", NULL,
+ FLB_CONFIG_MAP_MULT, FLB_FALSE, 0,
+ "Remove all key/value pairs with key matching regexp KEY"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "Move_To_Start", NULL,
+ FLB_CONFIG_MAP_MULT, FLB_FALSE, 0,
+ "Move key/value pairs with keys matching KEY to the start of the message"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "Move_To_End", NULL,
+ FLB_CONFIG_MAP_MULT, FLB_FALSE, 0,
+ "Move key/value pairs with keys matching KEY to the end of the message"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "Rename", NULL,
+ FLB_CONFIG_MAP_MULT, FLB_FALSE, 0,
+ "Rename a key/value pair with key KEY to RENAMED_KEY "
+ "if KEY exists AND RENAMED_KEY does not exist"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "Hard_Rename", NULL,
+ FLB_CONFIG_MAP_MULT, FLB_FALSE, 0,
+ "Rename a key/value pair with key KEY to RENAMED_KEY if KEY exists. "
+ "If RENAMED_KEY already exists, this field is overwritten"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "Copy", NULL,
+ FLB_CONFIG_MAP_MULT, FLB_FALSE, 0,
+ "Copy a key/value pair with key KEY to COPIED_KEY "
+ "if KEY exists AND COPIED_KEY does not exist"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "Hard_copy", NULL,
+ FLB_CONFIG_MAP_MULT, FLB_FALSE, 0,
+ "Copy a key/value pair with key KEY to COPIED_KEY if KEY exists. "
+ "If COPIED_KEY already exists, this field is overwritten"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "Condition", NULL,
+ FLB_CONFIG_MAP_MULT, FLB_FALSE, 0,
+ "Set the condition to modify. Key_exists, Key_does_not_exist, A_key_matches, "
+ "No_key_matches, Key_value_equals, Key_value_does_not_equal, Key_value_matches, "
+ "Key_value_does_not_match, Matching_keys_have_matching_values "
+ "and Matching_keys_do_not_have_matching_values are supported."
+ },
+ {0}
+};
+
+struct flb_filter_plugin filter_modify_plugin = {
+ .name = "modify",
+ .description = "modify records by applying rules",
+ .cb_init = cb_modify_init,
+ .cb_filter = cb_modify_filter,
+ .cb_exit = cb_modify_exit,
+ .config_map = config_map,
+ .flags = 0
+};
diff --git a/src/fluent-bit/plugins/filter_modify/modify.h b/src/fluent-bit/plugins/filter_modify/modify.h
new file mode 100644
index 000000000..92c590e01
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_modify/modify.h
@@ -0,0 +1,96 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_FILTER_MODIFY_H
+#define FLB_FILTER_MODIFY_H
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_filter.h>
+#include <fluent-bit/flb_record_accessor.h>
+#include <fluent-bit/flb_sds.h>
+
+enum FLB_FILTER_MODIFY_RULETYPE {
+ RENAME,
+ HARD_RENAME,
+ ADD,
+ SET,
+ REMOVE,
+ REMOVE_WILDCARD,
+ REMOVE_REGEX,
+ COPY,
+ HARD_COPY,
+ MOVE_TO_START,
+ MOVE_TO_END
+};
+
+enum FLB_FILTER_MODIFY_CONDITIONTYPE {
+ KEY_EXISTS,
+ KEY_DOES_NOT_EXIST,
+ A_KEY_MATCHES,
+ NO_KEY_MATCHES,
+ KEY_VALUE_EQUALS,
+ KEY_VALUE_DOES_NOT_EQUAL,
+ KEY_VALUE_MATCHES,
+ KEY_VALUE_DOES_NOT_MATCH,
+ MATCHING_KEYS_HAVE_MATCHING_VALUES,
+ MATCHING_KEYS_DO_NOT_HAVE_MATCHING_VALUES
+};
+
+struct filter_modify_ctx
+{
+ int rules_cnt;
+ struct mk_list rules;
+ int conditions_cnt;
+ struct mk_list conditions;
+ struct flb_filter_instance *ins;
+};
+
+struct modify_rule
+{
+ enum FLB_FILTER_MODIFY_RULETYPE ruletype;
+ int key_len;
+ int val_len;
+ char *key;
+ char *val;
+ bool key_is_regex;
+ bool val_is_regex;
+ struct flb_regex *key_regex;
+ struct flb_regex *val_regex;
+ char *raw_k;
+ char *raw_v;
+ struct mk_list _head;
+};
+
+struct modify_condition
+{
+ enum FLB_FILTER_MODIFY_CONDITIONTYPE conditiontype;
+ int a_len;
+ int b_len;
+ flb_sds_t a;
+ char *b;
+ bool a_is_regex;
+ bool b_is_regex;
+ struct flb_regex *a_regex;
+ struct flb_regex *b_regex;
+ struct flb_record_accessor *ra_a;
+ char *raw_k;
+ char *raw_v;
+ struct mk_list _head;
+};
+#endif
diff --git a/src/fluent-bit/plugins/filter_multiline/CMakeLists.txt b/src/fluent-bit/plugins/filter_multiline/CMakeLists.txt
new file mode 100644
index 000000000..5b677c50f
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_multiline/CMakeLists.txt
@@ -0,0 +1,5 @@
+set(src
+ ml.c
+ ml_concat.c)
+
+FLB_PLUGIN(filter_multiline "${src}" "")
diff --git a/src/fluent-bit/plugins/filter_multiline/ml.c b/src/fluent-bit/plugins/filter_multiline/ml.c
new file mode 100644
index 000000000..b63282628
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_multiline/ml.c
@@ -0,0 +1,931 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_filter_plugin.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_metrics.h>
+#include <fluent-bit/flb_storage.h>
+#include <fluent-bit/multiline/flb_ml.h>
+#include <fluent-bit/multiline/flb_ml_parser.h>
+#include <fluent-bit/flb_scheduler.h>
+#include <fluent-bit/flb_log_event_decoder.h>
+#include <fluent-bit/flb_log_event_encoder.h>
+
+#include "ml.h"
+#include "ml_concat.h"
+
+static struct ml_stream *get_by_id(struct ml_ctx *ctx, uint64_t stream_id);
+
+/* Create an emitter input instance */
+static int emitter_create(struct ml_ctx *ctx)
+{
+ int ret;
+ struct flb_input_instance *ins;
+
+ ret = flb_input_name_exists(ctx->emitter_name, ctx->config);
+ if (ret == FLB_TRUE) {
+ flb_plg_error(ctx->ins, "emitter_name '%s' already exists",
+ ctx->emitter_name);
+ return -1;
+ }
+
+ ins = flb_input_new(ctx->config, "emitter", NULL, FLB_FALSE);
+ if (!ins) {
+ flb_plg_error(ctx->ins, "cannot create emitter instance");
+ return -1;
+ }
+
+ /* Set the alias name */
+ ret = flb_input_set_property(ins, "alias", ctx->emitter_name);
+ if (ret == -1) {
+ flb_plg_warn(ctx->ins,
+ "cannot set emitter_name, using fallback name '%s'",
+ ins->name);
+ }
+
+ /* Set the emitter_mem_buf_limit */
+ if(ctx->emitter_mem_buf_limit > 0) {
+ ins->mem_buf_limit = ctx->emitter_mem_buf_limit;
+ }
+
+ /* Set the storage type */
+ ret = flb_input_set_property(ins, "storage.type",
+ ctx->emitter_storage_type);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "cannot set storage.type");
+ }
+
+ /* Initialize emitter plugin */
+ ret = flb_input_instance_init(ins, ctx->config);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "cannot initialize emitter instance '%s'",
+ ins->name);
+ flb_input_instance_exit(ins, ctx->config);
+ flb_input_instance_destroy(ins);
+ return -1;
+ }
+
+#ifdef FLB_HAVE_METRICS
+ /* Override Metrics title */
+ ret = flb_metrics_title(ctx->emitter_name, ins->metrics);
+ if (ret == -1) {
+ flb_plg_warn(ctx->ins, "cannot set metrics title, using fallback name %s",
+ ins->name);
+ }
+#endif
+
+ /* Storage context */
+ ret = flb_storage_input_create(ctx->config->cio, ins);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "cannot initialize storage for stream '%s'",
+ ctx->emitter_name);
+ flb_input_instance_exit(ins, ctx->config);
+ flb_input_instance_destroy(ins);
+ return -1;
+ }
+ ctx->ins_emitter = ins;
+ return 0;
+}
+
+static int multiline_load_parsers(struct ml_ctx *ctx)
+{
+ int ret;
+ struct mk_list *head;
+ struct mk_list *head_p;
+ struct flb_config_map_val *mv;
+ struct flb_slist_entry *val = NULL;
+ struct flb_ml_parser_ins *parser_i;
+
+ if (!ctx->multiline_parsers) {
+ return -1;
+ }
+
+ /*
+ * Iterate all 'multiline.parser' entries. Every entry is considered
+ * a group which can have multiple multiline parser instances.
+ */
+ flb_config_map_foreach(head, mv, ctx->multiline_parsers) {
+ mk_list_foreach(head_p, mv->val.list) {
+ val = mk_list_entry(head_p, struct flb_slist_entry, _head);
+
+ /* Create an instance of the defined parser */
+ parser_i = flb_ml_parser_instance_create(ctx->m, val->str);
+ if (!parser_i) {
+ return -1;
+ }
+
+ /* Always override parent parser values */
+ if (ctx->key_content) {
+ ret = flb_ml_parser_instance_set(parser_i,
+ "key_content",
+ ctx->key_content);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "could not override 'key_content'");
+ return -1;
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int flush_callback(struct flb_ml_parser *parser,
+ struct flb_ml_stream *mst,
+ void *data, char *buf_data, size_t buf_size)
+{
+ int ret;
+ struct ml_ctx *ctx = data;
+ struct ml_stream *stream;
+
+ if (ctx->debug_flush) {
+ flb_ml_flush_stdout(parser, mst, data, buf_data, buf_size);
+ }
+
+ if (ctx->use_buffer == FLB_FALSE) {
+ /* Append incoming record to our msgpack context buffer */
+ msgpack_sbuffer_write(&ctx->mp_sbuf, buf_data, buf_size);
+ return 0;
+
+ } else { /* buffered mode */
+ stream = get_by_id(ctx, mst->id);
+ if (!stream) {
+ flb_plg_error(ctx->ins, "Could not find tag to re-emit from stream %s",
+ mst->name);
+ return -1;
+ }
+
+ /* Emit record with original tag */
+ flb_plg_trace(ctx->ins, "emitting from %s to %s", stream->input_name, stream->tag);
+ ret = in_emitter_add_record(stream->tag, flb_sds_len(stream->tag), buf_data, buf_size,
+ ctx->ins_emitter);
+
+ return ret;
+ }
+}
+
+static int cb_ml_init(struct flb_filter_instance *ins,
+ struct flb_config *config,
+ void *data)
+{
+ int ret;
+ struct ml_ctx *ctx;
+ flb_sds_t tmp;
+ flb_sds_t emitter_name = NULL;
+ int len;
+ uint64_t stream_id;
+ (void) config;
+
+ ctx = flb_calloc(1, sizeof(struct ml_ctx));
+ if (!ctx) {
+ flb_errno();
+ return -1;
+ }
+ ctx->ins = ins;
+ ctx->debug_flush = FLB_FALSE;
+ ctx->config = config;
+ ctx->timer_created = FLB_FALSE;
+
+ /*
+ * Config map is not yet set at this point in the code
+ * user must explicitly set buffer to false to turn it off
+ */
+ ctx->use_buffer = FLB_TRUE;
+ tmp = (char *) flb_filter_get_property("buffer", ins);
+ if (tmp) {
+ ctx->use_buffer = flb_utils_bool(tmp);
+ }
+ ctx->partial_mode = FLB_FALSE;
+ tmp = (char *) flb_filter_get_property("mode", ins);
+ if (tmp != NULL) {
+ if (strcasecmp(tmp, FLB_MULTILINE_MODE_PARTIAL_MESSAGE) == 0) {
+ ctx->partial_mode = FLB_TRUE;
+ } else if (strcasecmp(tmp, FLB_MULTILINE_MODE_PARSER) == 0) {
+ ctx->partial_mode = FLB_FALSE;
+ } else {
+ flb_plg_error(ins, "'Mode' must be '%s' or '%s'",
+ FLB_MULTILINE_MODE_PARTIAL_MESSAGE,
+ FLB_MULTILINE_MODE_PARSER);
+ return -1;
+ }
+ }
+
+ if (ctx->partial_mode == FLB_TRUE && ctx->use_buffer == FLB_FALSE) {
+ flb_plg_error(ins, "'%s' 'Mode' requires 'Buffer' to be 'On'",
+ FLB_MULTILINE_MODE_PARTIAL_MESSAGE);
+ }
+
+ if (ctx->use_buffer == FLB_FALSE) {
+ /* Init buffers */
+ msgpack_sbuffer_init(&ctx->mp_sbuf);
+ msgpack_packer_init(&ctx->mp_pck, &ctx->mp_sbuf, msgpack_sbuffer_write);
+ } else {
+ /*
+ * Emitter name: every buffered multiline instance needs an emitter input plugin,
+ * with that one is able to emit records. We use a unique instance so we
+ * can use the metrics interface.
+ *
+ * If not set, we define an emitter name
+ *
+ * Validate if the emitter_name has been set before to check with the
+ * config map. If is not set, do a manual set of the property, so we let the
+ * config map handle the memory allocation.
+ */
+ tmp = (char *) flb_filter_get_property("emitter_name", ins);
+ if (!tmp) {
+ emitter_name = flb_sds_create_size(64);
+ if (!emitter_name) {
+ flb_free(ctx);
+ return -1;
+ }
+
+ tmp = flb_sds_printf(&emitter_name, "emitter_for_%s",
+ flb_filter_name(ins));
+ if (!tmp) {
+ flb_plg_error(ins, "cannot compose emitter_name");
+ flb_sds_destroy(emitter_name);
+ flb_free(ctx);
+ return -1;
+ }
+
+ flb_filter_set_property(ins, "emitter_name", emitter_name);
+ flb_plg_info(ins, "created emitter: %s", emitter_name);
+ flb_sds_destroy(emitter_name);
+ }
+ }
+
+ /* Load the config map */
+ ret = flb_filter_config_map_set(ins, (void *) ctx);
+ if (ret == -1) {
+ flb_free(ctx);
+ return -1;
+ }
+
+ /* Set plugin context */
+ flb_filter_set_context(ins, ctx);
+
+ if (ctx->key_content == NULL && ctx->partial_mode == FLB_TRUE) {
+ flb_plg_error(ins, "'Mode' '%s' requires 'multiline.key_content'",
+ FLB_MULTILINE_MODE_PARTIAL_MESSAGE);
+ flb_free(ctx);
+ return -1;
+ }
+
+ if (ctx->partial_mode == FLB_FALSE && mk_list_size(ctx->multiline_parsers) == 0) {
+ flb_plg_error(ins, "The default 'Mode' '%s' requires at least one 'multiline.parser'",
+ FLB_MULTILINE_MODE_PARSER);
+ flb_free(ctx);
+ return -1;
+ }
+
+
+ if (ctx->use_buffer == FLB_TRUE) {
+ /*
+ * Emitter Storage Type: the emitter input plugin to be created by default
+ * uses memory buffer, this option allows to define a filesystem mechanism
+ * for new records created (only if the main service is also filesystem
+ * enabled).
+ *
+ * On this code we just validate the input type: 'memory' or 'filesystem'.
+ */
+ tmp = ctx->emitter_storage_type;
+ if (strcasecmp(tmp, "memory") != 0 && strcasecmp(tmp, "filesystem") != 0) {
+ flb_plg_error(ins, "invalid 'emitter_storage.type' value. Only "
+ "'memory' or 'filesystem' types are allowed");
+ flb_free(ctx);
+ return -1;
+ }
+
+ /* Create the emitter context */
+ ret = emitter_create(ctx);
+ if (ret == -1) {
+ flb_free(ctx);
+ return -1;
+ }
+
+ /* Register a metric to count the number of emitted records */
+#ifdef FLB_HAVE_METRICS
+ ctx->cmt_emitted = cmt_counter_create(ins->cmt,
+ "fluentbit", "filter", "emit_records_total",
+ "Total number of emitted records",
+ 1, (char *[]) {"name"});
+
+ /* OLD api */
+ flb_metrics_add(FLB_MULTILINE_METRIC_EMITTED,
+ "emit_records", ctx->ins->metrics);
+#endif
+ }
+
+ mk_list_init(&ctx->ml_streams);
+ mk_list_init(&ctx->split_message_packers);
+
+ if (ctx->partial_mode == FLB_FALSE) {
+ /* Create multiline context */
+ ctx->m = flb_ml_create(config, ctx->ins->name);
+ if (!ctx->m) {
+ /*
+ * we don't free the context since upon init failure, the exit
+ * callback will be triggered with our context set above.
+ */
+ return -1;
+ }
+
+ /* Load the parsers/config */
+ ret = multiline_load_parsers(ctx);
+ if (ret == -1) {
+ return -1;
+ }
+
+ if (ctx->use_buffer == FLB_TRUE) {
+
+ ctx->m->flush_ms = ctx->flush_ms;
+ ret = flb_ml_auto_flush_init(ctx->m);
+ if (ret == -1) {
+ return -1;
+ }
+ } else {
+ /* Create a stream for this file */
+ len = strlen(ins->name);
+ ret = flb_ml_stream_create(ctx->m,
+ ins->name, len,
+ flush_callback, ctx,
+ &stream_id);
+ if (ret != 0) {
+ flb_plg_error(ctx->ins, "could not create multiline stream");
+ return -1;
+ }
+ ctx->stream_id = stream_id;
+ }
+ }
+
+ return 0;
+}
+
+void ml_stream_destroy(struct ml_stream *stream)
+{
+ if (!stream) {
+ return;
+ }
+
+ if (stream->input_name) {
+ flb_sds_destroy(stream->input_name);
+ }
+ if (stream->tag) {
+ flb_sds_destroy(stream->tag);
+ }
+ flb_free(stream);
+ return;
+}
+
+static struct ml_stream *get_by_id(struct ml_ctx *ctx, uint64_t stream_id)
+{
+ struct mk_list *tmp;
+ struct mk_list *head;
+ struct ml_stream *stream;
+
+ mk_list_foreach_safe(head, tmp, &ctx->ml_streams) {
+ stream = mk_list_entry(head, struct ml_stream, _head);
+ if (stream->stream_id == stream_id) {
+ return stream;
+ }
+ }
+
+ return NULL;
+}
+
+static struct ml_stream *get_or_create_stream(struct ml_ctx *ctx,
+ struct flb_input_instance *i_ins,
+ const char *tag, int tag_len)
+{
+ uint64_t stream_id;
+ struct mk_list *tmp;
+ struct mk_list *head;
+ struct ml_stream *stream;
+ flb_sds_t stream_name;
+ flb_sds_t tmp_sds;
+ int name_check;
+ int tag_check;
+ int len;
+ int ret;
+
+ mk_list_foreach_safe(head, tmp, &ctx->ml_streams) {
+ stream = mk_list_entry(head, struct ml_stream, _head);
+ name_check = strcmp(stream->input_name, i_ins->name);
+ tag_check = strcmp(stream->tag, tag);
+ if (tag_check == 0 && name_check == 0) {
+ flb_plg_trace(ctx->ins, "using stream %s_%s", stream->input_name, stream->tag);
+ return stream;
+ }
+ }
+
+ /* create a new stream */
+
+ stream_name = flb_sds_create_size(64);
+
+ tmp_sds = flb_sds_printf(&stream_name, "%s_%s", i_ins->name, tag);
+ if (!tmp_sds) {
+ flb_errno();
+ flb_sds_destroy(stream_name);
+ return NULL;
+ }
+ stream_name = tmp_sds;
+
+ stream = flb_calloc(1, sizeof(struct ml_stream));
+ if (!stream) {
+ flb_errno();
+ flb_sds_destroy(stream_name);
+ return NULL;
+ }
+
+ tmp_sds = flb_sds_create(tag);
+ if (!tmp) {
+ flb_errno();
+ flb_sds_destroy(stream_name);
+ ml_stream_destroy(stream);
+ return NULL;
+ }
+ stream->tag = tmp_sds;
+
+ tmp_sds = flb_sds_create(i_ins->name);
+ if (!tmp_sds) {
+ flb_errno();
+ flb_sds_destroy(stream_name);
+ ml_stream_destroy(stream);
+ return NULL;
+ }
+ stream->input_name = tmp_sds;
+
+ /* Create an flb_ml_stream for this stream */
+ flb_plg_info(ctx->ins, "created new multiline stream for %s", stream_name);
+ len = flb_sds_len(stream_name);
+ ret = flb_ml_stream_create(ctx->m,
+ stream_name, len,
+ flush_callback, ctx,
+ &stream_id);
+ if (ret != 0) {
+ flb_plg_error(ctx->ins, "could not create multiline stream for %s",
+ stream_name);
+ flb_sds_destroy(stream_name);
+ ml_stream_destroy(stream);
+ return NULL;
+ }
+ stream->stream_id = stream_id;
+ mk_list_add(&stream->_head, &ctx->ml_streams);
+ flb_plg_debug(ctx->ins, "Created new ML stream for %s", stream_name);
+ flb_sds_destroy(stream_name);
+
+ return stream;
+}
+
+static void partial_timer_cb(struct flb_config *config, void *data)
+{
+ struct ml_ctx *ctx = data;
+ (void) config;
+ struct mk_list *tmp;
+ struct mk_list *head;
+ struct split_message_packer *packer;
+ unsigned long long now;
+ unsigned long long diff;
+ int ret;
+
+ now = ml_current_timestamp();
+
+ mk_list_foreach_safe(head, tmp, &ctx->split_message_packers) {
+ packer = mk_list_entry(head, struct split_message_packer, _head);
+
+ diff = now - packer->last_write_time;
+ if (diff <= ctx->flush_ms) {
+ continue;
+ }
+
+ mk_list_del(&packer->_head);
+ ml_split_message_packer_complete(packer);
+ /* re-emit record with original tag */
+ if (packer->log_encoder.output_buffer != NULL &&
+ packer->log_encoder.output_length > 0) {
+
+ flb_plg_trace(ctx->ins, "emitting from %s to %s", packer->input_name, packer->tag);
+ ret = in_emitter_add_record(packer->tag, flb_sds_len(packer->tag),
+ packer->log_encoder.output_buffer,
+ packer->log_encoder.output_length,
+ ctx->ins_emitter);
+ if (ret < 0) {
+ /* this shouldn't happen in normal execution */
+ flb_plg_warn(ctx->ins,
+ "Couldn't send concatenated record of size %zu "
+ "bytes to in_emitter %s",
+ packer->log_encoder.output_length,
+ ctx->ins_emitter->name);
+ }
+ }
+ ml_split_message_packer_destroy(packer);
+ }
+}
+
+static int ml_filter_partial(const void *data, size_t bytes,
+ const char *tag, int tag_len,
+ void **out_buf, size_t *out_bytes,
+ struct flb_filter_instance *f_ins,
+ struct flb_input_instance *i_ins,
+ void *filter_context,
+ struct flb_config *config)
+{
+ int ret;
+ struct ml_ctx *ctx = filter_context;
+ msgpack_sbuffer tmp_sbuf;
+ msgpack_packer tmp_pck;
+ int partial_records = 0;
+ int return_records = 0;
+ int partial = FLB_FALSE;
+ int is_last_partial = FLB_FALSE;
+ struct split_message_packer *packer;
+ char *partial_id_str = NULL;
+ size_t partial_id_size = 0;
+ struct flb_sched *sched;
+ struct flb_log_event_encoder log_encoder;
+ struct flb_log_event_decoder log_decoder;
+ struct flb_log_event log_event;
+
+ (void) f_ins;
+ (void) config;
+
+ ret = flb_log_event_decoder_init(&log_decoder, (char *) data, bytes);
+
+ if (ret != FLB_EVENT_DECODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event decoder initialization error : %d", ret);
+
+ return FLB_FILTER_NOTOUCH;
+ }
+
+ ret = flb_log_event_encoder_init(&log_encoder,
+ FLB_LOG_EVENT_FORMAT_DEFAULT);
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event encoder initialization error : %d", ret);
+
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ return FLB_FILTER_NOTOUCH;
+ }
+
+ /*
+ * create a timer that will run periodically and check if pending buffers
+ * have expired
+ * this is created once on the first flush
+ */
+ if (ctx->timer_created == FLB_FALSE) {
+ flb_plg_debug(ctx->ins,
+ "Creating flush timer with frequency %dms",
+ ctx->flush_ms);
+
+ sched = flb_sched_ctx_get();
+
+ ret = flb_sched_timer_cb_create(sched, FLB_SCHED_TIMER_CB_PERM,
+ ctx->flush_ms / 2, partial_timer_cb,
+ ctx, NULL);
+ if (ret < 0) {
+ flb_plg_error(ctx->ins, "Failed to create flush timer");
+ } else {
+ ctx->timer_created = FLB_TRUE;
+ }
+ }
+
+ /*
+ * Create temporary msgpack buffer
+ * for non-partial messages which are passed on as-is
+ */
+ msgpack_sbuffer_init(&tmp_sbuf);
+ msgpack_packer_init(&tmp_pck, &tmp_sbuf, msgpack_sbuffer_write);
+
+ while ((ret = flb_log_event_decoder_next(
+ &log_decoder,
+ &log_event)) == FLB_EVENT_DECODER_SUCCESS) {
+ partial = ml_is_partial(log_event.body);
+ if (partial == FLB_TRUE) {
+ partial_records++;
+ ret = ml_get_partial_id(log_event.body, &partial_id_str, &partial_id_size);
+ if (ret == -1) {
+ flb_plg_warn(ctx->ins, "Could not find partial_id but partial_message key is FLB_TRUE for record with tag %s", tag);
+ /* handle this record as non-partial */
+ partial_records--;
+ goto pack_non_partial;
+ }
+ packer = ml_get_packer(&ctx->split_message_packers, tag,
+ i_ins->name, partial_id_str, partial_id_size);
+ if (packer == NULL) {
+ flb_plg_trace(ctx->ins, "Found new partial record with tag %s", tag);
+ packer = ml_create_packer(tag, i_ins->name, partial_id_str, partial_id_size,
+ log_event.body, ctx->key_content, &log_event.timestamp);
+ if (packer == NULL) {
+ flb_plg_warn(ctx->ins, "Could not create packer for partial record with tag %s", tag);
+ /* handle this record as non-partial */
+ partial_records--;
+ goto pack_non_partial;
+ }
+ mk_list_add(&packer->_head, &ctx->split_message_packers);
+ }
+ ret = ml_split_message_packer_write(packer, log_event.body, ctx->key_content);
+ if (ret < 0) {
+ flb_plg_warn(ctx->ins, "Could not append content for partial record with tag %s", tag);
+ /* handle this record as non-partial */
+ partial_records--;
+ goto pack_non_partial;
+ }
+ is_last_partial = ml_is_partial_last(log_event.body);
+ if (is_last_partial == FLB_TRUE) {
+ /* emit the record in this filter invocation */
+ return_records++;
+ ml_split_message_packer_complete(packer);
+ ml_append_complete_record(packer, &log_encoder);
+ mk_list_del(&packer->_head);
+ ml_split_message_packer_destroy(packer);
+ }
+ } else {
+
+pack_non_partial:
+ return_records++;
+ /* record passed from filter as-is */
+
+ ret = flb_log_event_encoder_emit_raw_record(
+ &log_encoder,
+ log_decoder.record_base,
+ log_decoder.record_length);
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event encoder initialization error : %d", ret);
+ }
+ }
+ }
+
+ if (partial_records == 0) {
+ /* if no records were partial, we didn't modify the chunk */
+ flb_log_event_decoder_destroy(&log_decoder);
+ flb_log_event_encoder_destroy(&log_encoder);
+
+ msgpack_sbuffer_destroy(&tmp_sbuf);
+
+ return FLB_FILTER_NOTOUCH;
+ } else if (return_records > 0) {
+ /* some new records can be returned now, return a new buffer */
+ if (log_encoder.output_length > 0) {
+ *out_buf = log_encoder.output_buffer;
+ *out_bytes = log_encoder.output_length;
+
+ ret = FLB_FILTER_MODIFIED;
+
+ flb_log_event_encoder_claim_internal_buffer_ownership(&log_encoder);
+ }
+ else {
+ ret = FLB_FILTER_NOTOUCH;
+ }
+
+ flb_log_event_decoder_destroy(&log_decoder);
+ flb_log_event_encoder_destroy(&log_encoder);
+
+ return ret;
+ } else {
+ /* no records to return right now, free buffer */
+ flb_log_event_decoder_destroy(&log_decoder);
+ flb_log_event_encoder_destroy(&log_encoder);
+
+ msgpack_sbuffer_destroy(&tmp_sbuf);
+ }
+
+ return FLB_FILTER_MODIFIED;
+}
+
+static int cb_ml_filter(const void *data, size_t bytes,
+ const char *tag, int tag_len,
+ void **out_buf, size_t *out_bytes,
+ struct flb_filter_instance *f_ins,
+ struct flb_input_instance *i_ins,
+ void *filter_context,
+ struct flb_config *config)
+{
+ size_t tmp_size;
+ char *tmp_buf;
+ struct flb_log_event_decoder decoder;
+ struct ml_stream *stream;
+ struct flb_log_event event;
+ int ret;
+ struct ml_ctx *ctx;
+
+ (void) f_ins;
+ (void) config;
+
+ ctx = (struct ml_ctx *) filter_context;
+
+ if (i_ins == ctx->ins_emitter) {
+ flb_plg_trace(ctx->ins, "not processing records from the emitter");
+ return FLB_FILTER_NOTOUCH;
+ }
+
+ /* 'partial_message' mode */
+ if (ctx->partial_mode == FLB_TRUE) {
+ return ml_filter_partial(data, bytes, tag, tag_len,
+ out_buf, out_bytes,
+ f_ins, i_ins,
+ filter_context, config);
+ }
+
+ /* 'parser' mode */
+ if (ctx->use_buffer == FLB_FALSE) {
+ /* reset mspgack size content */
+ ctx->mp_sbuf.size = 0;
+
+ /* process records */
+ flb_log_event_decoder_init(&decoder, (char *) data, bytes);
+
+ while (flb_log_event_decoder_next(&decoder, &event) ==
+ FLB_EVENT_DECODER_SUCCESS) {
+ ret = flb_ml_append_event(ctx->m, ctx->stream_id, &event);
+
+ if (ret != 0) {
+ flb_plg_debug(ctx->ins,
+ "could not append object from tag: %s", tag);
+ }
+ }
+
+ flb_log_event_decoder_destroy(&decoder);
+
+ /* flush all pending data (there is no auto-flush when unbuffered) */
+ flb_ml_flush_pending_now(ctx->m);
+
+ if (ctx->mp_sbuf.size > 0) {
+ /*
+ * If the filter will report a new set of records because the
+ * original data was modified, we make a copy to a new memory
+ * area, since the buffer might be invalidated in the filter
+ * chain.
+ */
+
+ tmp_buf = flb_malloc(ctx->mp_sbuf.size);
+ if (!tmp_buf) {
+ flb_errno();
+ return FLB_FILTER_NOTOUCH;
+ }
+ tmp_size = ctx->mp_sbuf.size;
+ memcpy(tmp_buf, ctx->mp_sbuf.data, tmp_size);
+ *out_buf = tmp_buf;
+ *out_bytes = tmp_size;
+ ctx->mp_sbuf.size = 0;
+
+ return FLB_FILTER_MODIFIED;
+ }
+
+ /* unlikely to happen.. but just in case */
+ return FLB_FILTER_NOTOUCH;
+
+ } else { /* buffered mode */
+ stream = get_or_create_stream(ctx, i_ins, tag, tag_len);
+
+ if (!stream) {
+ flb_plg_error(ctx->ins, "Could not find or create ML stream for %s", tag);
+ return FLB_FILTER_NOTOUCH;
+ }
+
+ /* process records */
+ flb_log_event_decoder_init(&decoder, (char *) data, bytes);
+
+ while (flb_log_event_decoder_next(&decoder, &event) ==
+ FLB_EVENT_DECODER_SUCCESS) {
+ ret = flb_ml_append_event(ctx->m, stream->stream_id, &event);
+
+ if (ret != 0) {
+ flb_plg_debug(ctx->ins,
+ "could not append object from tag: %s", tag);
+ }
+ }
+
+ flb_log_event_decoder_destroy(&decoder);
+
+ /*
+ * always returned modified, which will be 0 records, since the emitter takes
+ * all records.
+ */
+ return FLB_FILTER_MODIFIED;
+ }
+}
+
+static int cb_ml_exit(void *data, struct flb_config *config)
+{
+ struct ml_ctx *ctx = data;
+ struct mk_list *tmp;
+ struct mk_list *head;
+ struct ml_stream *stream;
+
+ if (!ctx) {
+ return 0;
+ }
+
+ if (ctx->m) {
+ flb_ml_destroy(ctx->m);
+ }
+
+ mk_list_foreach_safe(head, tmp, &ctx->ml_streams) {
+ stream = mk_list_entry(head, struct ml_stream, _head);
+ mk_list_del(&stream->_head);
+ ml_stream_destroy(stream);
+ }
+
+ msgpack_sbuffer_destroy(&ctx->mp_sbuf);
+ flb_free(ctx);
+
+ return 0;
+}
+
+/* Configuration properties map */
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_BOOL, "debug_flush", "false",
+ 0, FLB_TRUE, offsetof(struct ml_ctx, debug_flush),
+ "enable debugging for concatenation flush to stdout"
+ },
+
+ {
+ FLB_CONFIG_MAP_BOOL, "buffer", "true",
+ 0, FLB_TRUE, offsetof(struct ml_ctx, use_buffer),
+ "Enable buffered mode. In buffered mode, the filter can concatenate "
+ "multilines from inputs that ingest records one by one (ex: Forward), "
+ "rather than in chunks, re-emitting them into the beggining of the "
+ "pipeline using the in_emitter instance. "
+ "With buffer off, this filter will not work with most inputs, except tail."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "mode", "parser",
+ 0, FLB_TRUE, offsetof(struct ml_ctx, mode),
+ "Mode can be 'parser' for regex concat, or 'partial_message' to "
+ "concat split docker logs."
+ },
+
+ {
+ FLB_CONFIG_MAP_INT, "flush_ms", "2000",
+ 0, FLB_TRUE, offsetof(struct ml_ctx, flush_ms),
+ "Flush time for pending multiline records"
+ },
+
+ /* Multiline Core Engine based API */
+ {
+ FLB_CONFIG_MAP_CLIST, "multiline.parser", NULL,
+ FLB_CONFIG_MAP_MULT, FLB_TRUE, offsetof(struct ml_ctx, multiline_parsers),
+ "specify one or multiple multiline parsers: docker, cri, go, java, etc."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "multiline.key_content", NULL,
+ 0, FLB_TRUE, offsetof(struct ml_ctx, key_content),
+ "specify the key name that holds the content to process."
+ },
+
+ /* emitter config */
+ {
+ FLB_CONFIG_MAP_STR, "emitter_name", NULL,
+ FLB_FALSE, FLB_TRUE, offsetof(struct ml_ctx, emitter_name),
+ NULL
+ },
+ {
+ FLB_CONFIG_MAP_STR, "emitter_storage.type", "memory",
+ FLB_FALSE, FLB_TRUE, offsetof(struct ml_ctx, emitter_storage_type),
+ NULL
+ },
+ {
+ FLB_CONFIG_MAP_SIZE, "emitter_mem_buf_limit", FLB_MULTILINE_MEM_BUF_LIMIT_DEFAULT,
+ FLB_FALSE, FLB_TRUE, offsetof(struct ml_ctx, emitter_mem_buf_limit),
+ "set a memory buffer limit to restrict memory usage of emitter"
+ },
+
+ /* EOF */
+ {0}
+};
+
+struct flb_filter_plugin filter_multiline_plugin = {
+ .name = "multiline",
+ .description = "Concatenate multiline messages",
+ .cb_init = cb_ml_init,
+ .cb_filter = cb_ml_filter,
+ .cb_exit = cb_ml_exit,
+ .config_map = config_map,
+ .flags = 0
+};
diff --git a/src/fluent-bit/plugins/filter_multiline/ml.h b/src/fluent-bit/plugins/filter_multiline/ml.h
new file mode 100644
index 000000000..973346007
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_multiline/ml.h
@@ -0,0 +1,87 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_FILTER_MULTILINE_H
+#define FLB_FILTER_MULTILINE_H
+
+#include <fluent-bit/flb_filter_plugin.h>
+#include "ml_concat.h"
+
+#define FLB_MULTILINE_MEM_BUF_LIMIT_DEFAULT "10M"
+#define FLB_MULTILINE_METRIC_EMITTED 200
+#define FLB_MULTILINE_MODE_PARTIAL_MESSAGE "partial_message"
+#define FLB_MULTILINE_MODE_PARSER "parser"
+
+/*
+ * input instance + tag is the unique identifier
+ * for a multiline stream
+ * TODO: implement clean up of streams that haven't been used recently
+ */
+struct ml_stream {
+ flb_sds_t tag;
+ flb_sds_t input_name;
+ uint64_t stream_id;
+
+ struct mk_list _head;
+};
+
+struct ml_ctx {
+ int debug_flush;
+ int use_buffer;
+ flb_sds_t key_content;
+ flb_sds_t mode;
+
+ /* packaging buffers */
+ msgpack_sbuffer mp_sbuf; /* temporary msgpack buffer */
+ msgpack_packer mp_pck; /* temporary msgpack packer */
+
+ /* Multiline core engine */
+ uint64_t stream_id;
+ struct flb_ml *m;
+ struct mk_list *multiline_parsers;
+ int flush_ms;
+
+ int timer_created;
+
+ int partial_mode;
+
+ struct mk_list ml_streams;
+
+ struct mk_list split_message_packers;
+
+ struct flb_filter_instance *ins;
+
+ /* emitter */
+ flb_sds_t emitter_name; /* emitter input plugin name */
+ flb_sds_t emitter_storage_type; /* emitter storage type */
+ size_t emitter_mem_buf_limit; /* Emitter buffer limit */
+ struct flb_input_instance *ins_emitter; /* emitter input plugin instance */
+ struct flb_config *config; /* Fluent Bit context */
+
+#ifdef FLB_HAVE_METRICS
+ struct cmt_counter *cmt_emitted;
+#endif
+};
+
+/* Register external function to emit records, check 'plugins/in_emitter' */
+int in_emitter_add_record(const char *tag, int tag_len,
+ const char *buf_data, size_t buf_size,
+ struct flb_input_instance *in);
+
+#endif
diff --git a/src/fluent-bit/plugins/filter_multiline/ml_concat.c b/src/fluent-bit/plugins/filter_multiline/ml_concat.c
new file mode 100644
index 000000000..27121f48d
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_multiline/ml_concat.c
@@ -0,0 +1,473 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2019-2021 The Fluent Bit Authors
+ * Copyright (C) 2015-2018 Treasure Data Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_filter_plugin.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_metrics.h>
+#include <fluent-bit/flb_storage.h>
+#include <fluent-bit/multiline/flb_ml.h>
+#include <fluent-bit/multiline/flb_ml_parser.h>
+#include <fluent-bit/flb_compat.h>
+#include <stdio.h>
+
+#include "ml_concat.h"
+
+msgpack_object_kv *ml_get_key(msgpack_object *map, char *check_for_key)
+{
+ int i;
+ char *key_str = NULL;
+ size_t key_str_size = 0;
+ msgpack_object_kv *kv;
+ msgpack_object key;
+ int check_key = FLB_FALSE;
+
+ kv = map->via.map.ptr;
+
+ for(i=0; i < map->via.map.size; i++) {
+ check_key = FLB_FALSE;
+
+ key = (kv+i)->key;
+ if (key.type == MSGPACK_OBJECT_BIN) {
+ key_str = (char *) key.via.bin.ptr;
+ key_str_size = key.via.bin.size;
+ check_key = FLB_TRUE;
+ }
+ if (key.type == MSGPACK_OBJECT_STR) {
+ key_str = (char *) key.via.str.ptr;
+ key_str_size = key.via.str.size;
+ check_key = FLB_TRUE;
+ }
+
+ if (check_key == FLB_TRUE) {
+ if (strncmp(check_for_key, key_str, key_str_size) == 0) {
+ return (kv+i);
+ }
+ }
+ }
+ return NULL;
+}
+
+int ml_is_partial(msgpack_object *map)
+{
+ char *val_str = NULL;
+ msgpack_object_kv *kv;
+ msgpack_object val;
+
+ kv = ml_get_key(map, FLB_MULTILINE_PARTIAL_MESSAGE_KEY);
+
+ if (kv == NULL) {
+ return FLB_FALSE;
+ }
+
+ val = kv->val;
+ if (val.type == MSGPACK_OBJECT_BIN) {
+ val_str = (char *) val.via.bin.ptr;
+ }
+ if (val.type == MSGPACK_OBJECT_STR) {
+ val_str = (char *) val.via.str.ptr;
+ }
+
+ if (strncasecmp("true", val_str, 4) == 0) {
+ return FLB_TRUE;
+ }
+ return FLB_FALSE;
+}
+
+int ml_is_partial_last(msgpack_object *map)
+{
+ char *val_str = NULL;
+ msgpack_object_kv *kv;
+ msgpack_object val;
+
+ kv = ml_get_key(map, FLB_MULTILINE_PARTIAL_LAST_KEY);
+
+ if (kv == NULL) {
+ return FLB_FALSE;
+ }
+
+ val = kv->val;
+ if (val.type == MSGPACK_OBJECT_BIN) {
+ val_str = (char *) val.via.bin.ptr;
+ }
+ if (val.type == MSGPACK_OBJECT_STR) {
+ val_str = (char *) val.via.str.ptr;
+ }
+
+ if (strncasecmp("true", val_str, 4) == 0) {
+ return FLB_TRUE;
+ }
+ return FLB_FALSE;
+}
+
+int ml_get_partial_id(msgpack_object *map,
+ char **partial_id_str,
+ size_t *partial_id_size)
+{
+ char *val_str = NULL;
+ size_t val_str_size = 0;
+ msgpack_object_kv *kv;
+ msgpack_object val;
+
+ kv = ml_get_key(map, FLB_MULTILINE_PARTIAL_ID_KEY);
+
+ if (kv == NULL) {
+ return -1;
+ }
+
+ val = kv->val;
+ if (val.type == MSGPACK_OBJECT_BIN) {
+ val_str = (char *) val.via.bin.ptr;
+ val_str_size = val.via.bin.size;
+ }
+ if (val.type == MSGPACK_OBJECT_STR) {
+ val_str = (char *) val.via.str.ptr;
+ val_str_size = val.via.str.size;
+ }
+
+ *partial_id_str = val_str;
+ *partial_id_size = val_str_size;
+
+ return 0;
+}
+
+struct split_message_packer *ml_get_packer(struct mk_list *packers, const char *tag,
+ char *input_name,
+ char *partial_id_str, size_t partial_id_size)
+{
+ struct mk_list *tmp;
+ struct mk_list *head;
+ struct split_message_packer *packer;
+ int name_check;
+ int tag_check;
+ int id_check;
+
+
+ mk_list_foreach_safe(head, tmp, packers) {
+ packer = mk_list_entry(head, struct split_message_packer, _head);
+ id_check = strncmp(packer->partial_id, partial_id_str, partial_id_size);
+ if (id_check != 0) {
+ continue;
+ }
+ name_check = strcmp(packer->input_name, input_name);
+ if (name_check != 0) {
+ continue;
+ }
+ tag_check = strcmp(packer->tag, tag);
+ if (tag_check == 0) {
+ return packer;
+ }
+ }
+
+ return NULL;
+}
+
+struct split_message_packer *ml_create_packer(const char *tag, char *input_name,
+ char *partial_id_str, size_t partial_id_size,
+ msgpack_object *map, char *multiline_key_content,
+ struct flb_time *tm)
+{
+ struct split_message_packer *packer;
+ msgpack_object_kv *kv;
+ msgpack_object_kv *split_kv;
+ flb_sds_t tmp;
+ int i;
+ char *key_str = NULL;
+ size_t key_str_size = 0;
+ msgpack_object key;
+ int check_key = FLB_FALSE;
+ size_t len;
+ int ret;
+
+ packer = flb_calloc(1, sizeof(struct split_message_packer));
+ if (!packer) {
+ flb_errno();
+ return NULL;
+ }
+
+ tmp = flb_sds_create(input_name);
+ if (!tmp) {
+ flb_errno();
+ flb_free(packer);
+ return NULL;
+ }
+ packer->input_name = tmp;
+
+ tmp = flb_sds_create(tag);
+ if (!tmp) {
+ flb_errno();
+ ml_split_message_packer_destroy(packer);
+ return NULL;
+ }
+ packer->tag = tmp;
+
+ tmp = flb_sds_create_len(partial_id_str, partial_id_size);
+ if (!tmp) {
+ flb_errno();
+ ml_split_message_packer_destroy(packer);
+ return NULL;
+ }
+ packer->partial_id = tmp;
+
+ packer->buf = flb_sds_create_size(FLB_MULTILINE_PARTIAL_BUF_SIZE);
+ if (!packer->buf) {
+ flb_errno();
+ ml_split_message_packer_destroy(packer);
+ return NULL;
+ }
+
+ ret = flb_log_event_encoder_init(&packer->log_encoder,
+ FLB_LOG_EVENT_FORMAT_DEFAULT);
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ flb_error("[partial message concat] Log event encoder initialization error : %d", ret);
+
+ ml_split_message_packer_destroy(packer);
+
+ return NULL;
+ }
+
+ /* get the key that is split */
+ split_kv = ml_get_key(map, multiline_key_content);
+ if (split_kv == NULL) {
+ flb_error("[partial message concat] Could not find key %s in record", multiline_key_content);
+ ml_split_message_packer_destroy(packer);
+ return NULL;
+ }
+
+ ret = flb_log_event_encoder_begin_record(&packer->log_encoder);
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ flb_error("[partial message concat] Log event encoder error : %d", ret);
+
+ ml_split_message_packer_destroy(packer);
+
+ return NULL;
+ }
+
+ /* write all of the keys except the split one and the partial metadata */
+ ret = flb_log_event_encoder_set_timestamp(
+ &packer->log_encoder, tm);
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ flb_error("[partial message concat] Log event encoder error : %d", ret);
+
+ ml_split_message_packer_destroy(packer);
+
+ return NULL;
+ }
+
+ kv = map->via.map.ptr;
+
+ for(i=0;
+ i < map->via.map.size &&
+ ret == FLB_EVENT_ENCODER_SUCCESS;
+ i++) {
+ if ((kv+i) == split_kv) {
+ continue;
+ }
+
+ key = (kv+i)->key;
+ if (key.type == MSGPACK_OBJECT_BIN) {
+ key_str = (char *) key.via.bin.ptr;
+ key_str_size = key.via.bin.size;
+ check_key = FLB_TRUE;
+ }
+ if (key.type == MSGPACK_OBJECT_STR) {
+ key_str = (char *) key.via.str.ptr;
+ key_str_size = key.via.str.size;
+ check_key = FLB_TRUE;
+ }
+
+ len = FLB_MULTILINE_PARTIAL_PREFIX_LEN;
+ if (key_str_size < len) {
+ len = key_str_size;
+ }
+
+ if (check_key == FLB_TRUE) {
+ if (strncmp(FLB_MULTILINE_PARTIAL_PREFIX, key_str, len) == 0) {
+ /* don't pack the partial keys */
+ continue;
+ }
+ }
+
+ ret = flb_log_event_encoder_append_body_values(
+ &packer->log_encoder,
+ FLB_LOG_EVENT_MSGPACK_OBJECT_VALUE(&kv[i].key),
+ FLB_LOG_EVENT_MSGPACK_OBJECT_VALUE(&kv[i].val));
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ /* write split kv last, so we can append to it later as needed */
+ ret = flb_log_event_encoder_append_body_msgpack_object(
+ &packer->log_encoder,
+ &split_kv->key);
+ }
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ flb_error("[partial message concat] Log event encoder error : %d", ret);
+
+ ml_split_message_packer_destroy(packer);
+
+ return NULL;
+ }
+
+ return packer;
+}
+
+unsigned long long ml_current_timestamp() {
+ struct flb_time te;
+ flb_time_get(&te);
+ return flb_time_to_nanosec(&te) / 1000000LL;
+}
+
+int ml_split_message_packer_write(struct split_message_packer *packer,
+ msgpack_object *map, char *multiline_key_content)
+{
+ char *val_str = NULL;
+ size_t val_str_size = 0;
+ msgpack_object_kv *kv;
+ msgpack_object val;
+
+ kv = ml_get_key(map, multiline_key_content);
+
+ if (kv == NULL) {
+ flb_error("[partial message concat] Could not find key %s in record", multiline_key_content);
+ return -1;
+ }
+
+ val = kv->val;
+ if (val.type == MSGPACK_OBJECT_BIN) {
+ val_str = (char *) val.via.bin.ptr;
+ val_str_size = val.via.bin.size;
+ } else if (val.type == MSGPACK_OBJECT_STR) {
+ val_str = (char *) val.via.str.ptr;
+ val_str_size = val.via.str.size;
+ } else {
+ return -1;
+ }
+
+ flb_sds_cat_safe(&packer->buf, val_str, val_str_size);
+ packer->last_write_time = ml_current_timestamp();
+
+ return 0;
+}
+
+void ml_split_message_packer_complete(struct split_message_packer *packer)
+{
+ flb_log_event_encoder_append_body_string(&packer->log_encoder,
+ packer->buf,
+ flb_sds_len(packer->buf));
+
+ flb_log_event_encoder_commit_record(&packer->log_encoder);
+}
+
+void ml_append_complete_record(struct split_message_packer *packer,
+ struct flb_log_event_encoder *log_encoder)
+{
+ struct flb_log_event_decoder log_decoder;
+ struct flb_log_event log_event;
+ int ret;
+
+ ret = flb_log_event_decoder_init(
+ &log_decoder,
+ packer->log_encoder.output_buffer,
+ packer->log_encoder.output_length);
+
+ if (ret != FLB_EVENT_DECODER_SUCCESS) {
+ flb_error("[partial message concat] Log event decoder error : %d",
+ ret);
+
+ return;
+ }
+
+ while ((ret = flb_log_event_decoder_next(
+ &log_decoder,
+ &log_event)) == FLB_EVENT_DECODER_SUCCESS) {
+ ret = flb_log_event_encoder_begin_record(log_encoder);
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_timestamp(
+ log_encoder,
+ &log_event.timestamp);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_metadata_from_msgpack_object(
+ log_encoder,
+ log_event.metadata);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_body_from_msgpack_object(
+ log_encoder,
+ log_event.body);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_commit_record(log_encoder);
+ }
+ else {
+ flb_log_event_encoder_rollback_record(log_encoder);
+
+ break;
+ }
+ }
+
+ if (ret == FLB_EVENT_DECODER_ERROR_INSUFFICIENT_DATA &&
+ log_decoder.offset == packer->log_encoder.output_length) {
+ ret = FLB_EVENT_ENCODER_SUCCESS;
+ }
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ flb_error("[partial message concat] Log event encoder error : %d",
+ ret);
+
+ return;
+ }
+
+ flb_log_event_decoder_destroy(&log_decoder);
+}
+
+void ml_split_message_packer_destroy(struct split_message_packer *packer)
+{
+ if (!packer) {
+ return;
+ }
+
+ if (packer->tag) {
+ flb_sds_destroy(packer->tag);
+ }
+ if (packer->buf) {
+ flb_sds_destroy(packer->buf);
+ }
+ if (packer->input_name) {
+ flb_sds_destroy(packer->input_name);
+ }
+ if (packer->partial_id) {
+ flb_sds_destroy(packer->partial_id);
+ }
+
+ flb_log_event_encoder_destroy(&packer->log_encoder);
+
+ flb_free(packer);
+}
+
diff --git a/src/fluent-bit/plugins/filter_multiline/ml_concat.h b/src/fluent-bit/plugins/filter_multiline/ml_concat.h
new file mode 100644
index 000000000..a2d3a79e1
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_multiline/ml_concat.h
@@ -0,0 +1,84 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2019-2021 The Fluent Bit Authors
+ * Copyright (C) 2015-2018 Treasure Data Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_FILTER_MULTILINE_CONCAT_H
+#define FLB_FILTER_MULTILINE_CONCAT_H
+
+#include <fluent-bit/flb_filter_plugin.h>
+#include <fluent-bit/flb_log_event_decoder.h>
+#include <fluent-bit/flb_log_event_encoder.h>
+
+#define FLB_MULTILINE_MEM_BUF_LIMIT_DEFAULT "10M"
+#define FLB_MULTILINE_METRIC_EMITTED 200
+/* docker logs are split at 16KB */
+#define FLB_MULTILINE_PARTIAL_BUF_SIZE 24000
+
+/*
+ * Long term these keys could be made user configurable
+ * But everyone who's asking for this right now wants it for split
+ * Docker logs, which has a set series of keys
+ */
+#define FLB_MULTILINE_PARTIAL_PREFIX "partial_"
+#define FLB_MULTILINE_PARTIAL_PREFIX_LEN 8
+#define FLB_MULTILINE_PARTIAL_MESSAGE_KEY "partial_message"
+#define FLB_MULTILINE_PARTIAL_ID_KEY "partial_id"
+#define FLB_MULTILINE_PARTIAL_LAST_KEY "partial_last"
+
+struct split_message_packer {
+ flb_sds_t tag;
+ flb_sds_t input_name;
+ flb_sds_t partial_id;
+
+ /* packaging buffers */
+ // msgpack_sbuffer mp_sbuf; /* temporary msgpack buffer */
+ // msgpack_packer mp_pck; /* temporary msgpack packer */
+ struct flb_log_event_encoder log_encoder;
+
+ flb_sds_t buf;
+
+ /* used to flush buffers that have been pending for more than flush_ms */
+ unsigned long long last_write_time;
+
+ struct mk_list _head;
+};
+
+msgpack_object_kv *ml_get_key(msgpack_object *map, char *check_for_key);
+int ml_is_partial(msgpack_object *map);
+int ml_is_partial_last(msgpack_object *map);
+int ml_get_partial_id(msgpack_object *map,
+ char **partial_id_str,
+ size_t *partial_id_size);
+struct split_message_packer *ml_get_packer(struct mk_list *packers, const char *tag,
+ char *input_name,
+ char *partial_id_str, size_t partial_id_size);
+struct split_message_packer *ml_create_packer(const char *tag, char *input_name,
+ char *partial_id_str, size_t partial_id_size,
+ msgpack_object *map, char *multiline_key_content,
+ struct flb_time *tm);
+int ml_split_message_packer_write(struct split_message_packer *packer,
+ msgpack_object *map, char *multiline_key_content);
+void ml_split_message_packer_complete(struct split_message_packer *packer);
+void ml_split_message_packer_destroy(struct split_message_packer *packer);
+void ml_append_complete_record(struct split_message_packer *packer,
+ struct flb_log_event_encoder *log_encoder);
+unsigned long long ml_current_timestamp();
+
+
+#endif
diff --git a/src/fluent-bit/plugins/filter_nest/CMakeLists.txt b/src/fluent-bit/plugins/filter_nest/CMakeLists.txt
new file mode 100644
index 000000000..a78f591b2
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_nest/CMakeLists.txt
@@ -0,0 +1,4 @@
+set(src
+ nest.c)
+
+FLB_PLUGIN(filter_nest "${src}" "")
diff --git a/src/fluent-bit/plugins/filter_nest/nest.c b/src/fluent-bit/plugins/filter_nest/nest.c
new file mode 100644
index 000000000..96bcc2974
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_nest/nest.c
@@ -0,0 +1,761 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_filter.h>
+#include <fluent-bit/flb_filter_plugin.h>
+#include <fluent-bit/flb_mem.h>
+#include <fluent-bit/flb_kv.h>
+#include <fluent-bit/flb_str.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_log_event_decoder.h>
+#include <fluent-bit/flb_log_event_encoder.h>
+#include <msgpack.h>
+
+#include "nest.h"
+
+#include <stdio.h>
+#include <sys/types.h>
+
+
+static void teardown(struct filter_nest_ctx *ctx)
+{
+ struct mk_list *tmp;
+ struct mk_list *head;
+
+ struct filter_nest_wildcard *wildcard;
+
+ flb_free(ctx->prefix);
+ flb_free(ctx->key);
+
+ mk_list_foreach_safe(head, tmp, &ctx->wildcards) {
+ wildcard = mk_list_entry(head, struct filter_nest_wildcard, _head);
+ flb_free(wildcard->key);
+ mk_list_del(&wildcard->_head);
+ flb_free(wildcard);
+ }
+
+}
+
+static int configure(struct filter_nest_ctx *ctx,
+ struct flb_filter_instance *f_ins,
+ struct flb_config *config)
+{
+
+ struct mk_list *head;
+ struct flb_kv *kv;
+ struct filter_nest_wildcard *wildcard;
+
+ char *operation_nest = "nest";
+ char *operation_lift = "lift";
+
+ ctx->key = NULL;
+ ctx->key_len = 0;
+ ctx->prefix = NULL;
+ ctx->prefix_len = 0;
+ ctx->remove_prefix = false;
+ ctx->add_prefix = false;
+
+ if (flb_filter_config_map_set(f_ins, ctx) < 0) {
+ flb_plg_error(f_ins, "unable to load configuration");
+ return -1;
+ }
+
+ mk_list_foreach(head, &f_ins->properties) {
+ kv = mk_list_entry(head, struct flb_kv, _head);
+
+ if (strcasecmp(kv->key, "operation") == 0) {
+ if (strncmp(kv->val, operation_nest, 4) == 0) {
+ ctx->operation = NEST;
+ }
+ else if (strncmp(kv->val, operation_lift, 4) == 0) {
+ ctx->operation = LIFT;
+ }
+ else {
+ flb_plg_error(ctx->ins, "Key \"operation\" has invalid value "
+ "'%s'. Expected 'nest' or 'lift'\n",
+ kv->val);
+ return -1;
+ }
+ }
+ else if (strcasecmp(kv->key, "wildcard") == 0) {
+ wildcard = flb_malloc(sizeof(struct filter_nest_wildcard));
+ if (!wildcard) {
+ flb_plg_error(ctx->ins, "Unable to allocate memory for "
+ "wildcard");
+ flb_free(wildcard);
+ return -1;
+ }
+
+ wildcard->key = flb_strndup(kv->val, flb_sds_len(kv->val));
+ if (wildcard->key == NULL) {
+ flb_errno();
+ flb_free(wildcard);
+ return -1;
+ }
+ wildcard->key_len = flb_sds_len(kv->val);
+
+ if (wildcard->key[wildcard->key_len - 1] == '*') {
+ wildcard->key_is_dynamic = true;
+ wildcard->key_len--;
+ }
+ else {
+ wildcard->key_is_dynamic = false;
+ }
+
+ mk_list_add(&wildcard->_head, &ctx->wildcards);
+ ctx->wildcards_cnt++;
+
+ }
+ else if (strcasecmp(kv->key, "nest_under") == 0) {
+ ctx->key = flb_strdup(kv->val);
+ ctx->key_len = flb_sds_len(kv->val);
+ }
+ else if (strcasecmp(kv->key, "nested_under") == 0) {
+ ctx->key = flb_strdup(kv->val);
+ ctx->key_len = flb_sds_len(kv->val);
+ }
+ else if (strcasecmp(kv->key, "prefix_with") == 0) {
+ ctx->prefix = flb_strdup(kv->val);
+ ctx->prefix_len = flb_sds_len(kv->val);
+ ctx->add_prefix = true;
+ }
+ else if (strcasecmp(kv->key, "add_prefix") == 0) {
+ ctx->prefix = flb_strdup(kv->val);
+ ctx->prefix_len = flb_sds_len(kv->val);
+ ctx->add_prefix = true;
+ }
+ else if (strcasecmp(kv->key, "remove_prefix") == 0) {
+ ctx->prefix = flb_strdup(kv->val);
+ ctx->prefix_len = flb_sds_len(kv->val);
+ ctx->remove_prefix = true;
+ } else {
+ flb_plg_error(ctx->ins, "Invalid configuration key '%s'", kv->key);
+ return -1;
+ }
+ }
+
+ /* Sanity checks */
+ if (ctx->remove_prefix && ctx->add_prefix) {
+ flb_plg_error(ctx->ins, "Add_prefix and Remove_prefix are exclusive");
+ return -1;
+ }
+
+ if ((ctx->operation != NEST) &&
+ (ctx->operation != LIFT)) {
+ flb_plg_error(ctx->ins, "Operation can only be NEST or LIFT");
+ return -1;
+ }
+
+ if ((ctx->remove_prefix || ctx->add_prefix) && ctx->prefix == 0) {
+ flb_plg_error(ctx->ins, "A prefix has to be specified for prefix add "
+ "or remove operations");
+ return -1;
+ }
+
+ return 0;
+
+}
+
+static void helper_pack_string_remove_prefix(
+ struct flb_log_event_encoder *log_encoder,
+ struct filter_nest_ctx *ctx,
+ const char *str,
+ int len)
+{
+ if (strncmp(str, ctx->prefix, ctx->prefix_len) == 0) {
+ flb_log_event_encoder_append_body_string(
+ log_encoder,
+ (char *) &str[ctx->prefix_len],
+ len - ctx->prefix_len);
+ }
+ else {
+ /* Key does not contain specified prefix */
+ flb_log_event_encoder_append_body_string(
+ log_encoder, (char *) str, len);
+ }
+}
+
+static void helper_pack_string_add_prefix(struct flb_log_event_encoder *log_encoder,
+ struct filter_nest_ctx *ctx,
+ const char *str,
+ int len)
+{
+ flb_log_event_encoder_append_body_values(
+ log_encoder,
+ FLB_LOG_EVENT_STRING_LENGTH_VALUE(ctx->prefix_len + len),
+ FLB_LOG_EVENT_STRING_BODY_VALUE(ctx->prefix, ctx->prefix_len),
+ FLB_LOG_EVENT_STRING_BODY_VALUE(str, len));
+}
+
+static inline void map_pack_each_fn(struct flb_log_event_encoder *log_encoder,
+ msgpack_object * map,
+ struct filter_nest_ctx *ctx,
+ bool(*f) (msgpack_object_kv * kv,
+ struct filter_nest_ctx * ctx))
+{
+ int i;
+ int ret;
+
+ ret = FLB_EVENT_ENCODER_SUCCESS;
+ for (i = 0;
+ i < map->via.map.size &&
+ ret == FLB_EVENT_ENCODER_SUCCESS;
+ i++) {
+ if ((*f) (&map->via.map.ptr[i], ctx)) {
+ ret = flb_log_event_encoder_append_body_values(
+ log_encoder,
+ FLB_LOG_EVENT_MSGPACK_OBJECT_VALUE(
+ &map->via.map.ptr[i].key),
+ FLB_LOG_EVENT_MSGPACK_OBJECT_VALUE(
+ &map->via.map.ptr[i].val));
+ }
+ }
+}
+
+static inline void map_transform_and_pack_each_fn(struct flb_log_event_encoder *log_encoder,
+ msgpack_object * map,
+ struct filter_nest_ctx *ctx,
+ bool(*f) (msgpack_object_kv * kv,
+ struct filter_nest_ctx * ctx)
+ )
+{
+ int i;
+ int ret;
+ msgpack_object *key;
+
+ ret = FLB_EVENT_ENCODER_SUCCESS;
+ for (i = 0;
+ i < map->via.map.size &&
+ ret == FLB_EVENT_ENCODER_SUCCESS ;
+ i++) {
+ if ((*f) (&map->via.map.ptr[i], ctx)) {
+ key = &map->via.map.ptr[i].key;
+
+ if (ctx->add_prefix) {
+ helper_pack_string_add_prefix(log_encoder, ctx, key->via.str.ptr, key->via.str.size);
+ }
+ else if (ctx->remove_prefix) {
+ helper_pack_string_remove_prefix(log_encoder, ctx, key->via.str.ptr, key->via.str.size);
+ }
+ else {
+ ret = flb_log_event_encoder_append_body_msgpack_object(
+ log_encoder, key);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_append_body_msgpack_object(
+ log_encoder, &map->via.map.ptr[i].val);
+ }
+ }
+ }
+}
+
+static inline int map_count_fn(msgpack_object * map,
+ struct filter_nest_ctx *ctx,
+ bool(*f) (msgpack_object_kv * kv,
+ struct filter_nest_ctx * ctx)
+ )
+{
+ int i;
+ int count = 0;
+
+ for (i = 0; i < map->via.map.size; i++) {
+ if ((*f) (&map->via.map.ptr[i], ctx)) {
+ count++;
+ }
+ }
+ return count;
+}
+
+static inline bool is_kv_to_nest(msgpack_object_kv * kv,
+ struct filter_nest_ctx *ctx)
+{
+
+ const char *key;
+ int klen;
+
+ msgpack_object *obj = &kv->key;
+
+ struct mk_list *tmp;
+ struct mk_list *head;
+ struct filter_nest_wildcard *wildcard;
+
+ if (obj->type == MSGPACK_OBJECT_BIN) {
+ key = obj->via.bin.ptr;
+ klen = obj->via.bin.size;
+ }
+ else if (obj->type == MSGPACK_OBJECT_STR) {
+ key = obj->via.str.ptr;
+ klen = obj->via.str.size;
+ }
+ else {
+ /* If the key is not something we can match on, leave it alone */
+ return false;
+ }
+
+ mk_list_foreach_safe(head, tmp, &ctx->wildcards) {
+ wildcard = mk_list_entry(head, struct filter_nest_wildcard, _head);
+
+ if (wildcard->key_is_dynamic) {
+ /* This will positively match "ABC123" with prefix "ABC*" */
+ if (strncmp(key, wildcard->key, wildcard->key_len) == 0) {
+ return true;
+ }
+ }
+ else {
+ /* This will positively match "ABC" with prefix "ABC" */
+ if ((wildcard->key_len == klen) &&
+ (strncmp(key, wildcard->key, klen) == 0)
+ ) {
+ return true;
+ }
+ }
+ }
+
+ return false;
+
+}
+
+static inline bool is_not_kv_to_nest(msgpack_object_kv * kv,
+ struct filter_nest_ctx *ctx)
+{
+ return !is_kv_to_nest(kv, ctx);
+}
+
+static inline bool is_kv_to_lift(msgpack_object_kv * kv,
+ struct filter_nest_ctx *ctx)
+{
+
+ const char *key;
+ char *tmp;
+ int klen;
+ bool match;
+
+ msgpack_object *obj = &kv->key;
+
+ if (obj->type == MSGPACK_OBJECT_BIN) {
+ key = obj->via.bin.ptr;
+ klen = obj->via.bin.size;
+ }
+ else if (obj->type == MSGPACK_OBJECT_STR) {
+ key = obj->via.str.ptr;
+ klen = obj->via.str.size;
+ }
+ else {
+ /* If the key is not something we can match on, leave it alone */
+ return false;
+ }
+
+ match = ((ctx->key_len == klen) &&
+ (strncmp(key, ctx->key, klen) == 0));
+
+ if (match && (kv->val.type != MSGPACK_OBJECT_MAP)) {
+ tmp = flb_malloc(klen + 1);
+ if (!tmp) {
+ flb_errno();
+ return false;
+ }
+ memcpy(tmp, key, klen);
+ tmp[klen] = '\0';
+ flb_plg_warn(ctx->ins, "Value of key '%s' is not a map. "
+ "Will not attempt to lift from here",
+ tmp);
+ flb_free(tmp);
+ return false;
+ }
+ else {
+ return match;
+ }
+}
+
+static inline bool is_not_kv_to_lift(msgpack_object_kv * kv,
+ struct filter_nest_ctx *ctx)
+{
+ return !is_kv_to_lift(kv, ctx);
+}
+
+static inline int count_items_to_lift(msgpack_object * map,
+ struct filter_nest_ctx *ctx)
+{
+ int i;
+ int count = 0;
+ msgpack_object_kv *kv;
+
+ for (i = 0; i < map->via.map.size; i++) {
+ kv = &map->via.map.ptr[i];
+ if (is_kv_to_lift(kv, ctx)) {
+ count = count + kv->val.via.map.size;
+ }
+ }
+ return count;
+}
+
+static inline void pack_map(
+ struct flb_log_event_encoder *log_encoder,
+ msgpack_object * map,
+ struct filter_nest_ctx *ctx)
+{
+ int i;
+ int ret;
+ msgpack_object *key;
+
+ ret = FLB_EVENT_ENCODER_SUCCESS;
+
+ for (i = 0;
+ i < map->via.map.size &&
+ ret == FLB_EVENT_ENCODER_SUCCESS ;
+ i++) {
+ key = &map->via.map.ptr[i].key;
+
+ if (ctx->add_prefix) {
+ helper_pack_string_add_prefix(log_encoder, ctx, key->via.str.ptr, key->via.str.size);
+ }
+ else if (ctx->remove_prefix) {
+ helper_pack_string_remove_prefix(log_encoder, ctx, key->via.str.ptr, key->via.str.size);
+ }
+ else {
+ ret = flb_log_event_encoder_append_body_msgpack_object(log_encoder, key);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_append_body_msgpack_object(log_encoder,
+ &map->via.map.ptr[i].val);
+ }
+ }
+}
+
+static inline void map_lift_each_fn(struct flb_log_event_encoder *log_encoder,
+ msgpack_object * map,
+ struct filter_nest_ctx *ctx,
+ bool(*f) (msgpack_object_kv * kv,
+ struct filter_nest_ctx * ctx)
+ )
+{
+ int i;
+ msgpack_object_kv *kv;
+
+ for (i = 0; i < map->via.map.size; i++) {
+ kv = &map->via.map.ptr[i];
+ if ((*f) (kv, ctx)) {
+ pack_map(log_encoder, &kv->val, ctx);
+ }
+ }
+}
+
+static inline int apply_lifting_rules(struct flb_log_event_encoder *log_encoder,
+ struct flb_log_event *log_event,
+ struct filter_nest_ctx *ctx)
+{
+ int ret;
+ msgpack_object map = *log_event->body;
+
+ int items_to_lift = map_count_fn(&map, ctx, &is_kv_to_lift);
+
+ if (items_to_lift == 0) {
+ flb_plg_debug(ctx->ins, "Lift : No match found for %s", ctx->key);
+ return 0;
+ }
+
+ /*
+ * New items at top level =
+ * current size
+ * - number of maps to lift
+ * + number of element inside maps to lift
+ */
+ int toplevel_items =
+ (map.via.map.size - items_to_lift) + count_items_to_lift(&map, ctx);
+
+ flb_plg_debug(ctx->ins, "Lift : Outer map size is %d, will be %d, "
+ "lifting %d record(s)",
+ map.via.map.size, toplevel_items, items_to_lift);
+
+ ret = flb_log_event_encoder_begin_record(log_encoder);
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ return -1;
+ }
+
+ ret = flb_log_event_encoder_set_timestamp(
+ log_encoder, &log_event->timestamp);
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ return -2;
+ }
+
+ ret = flb_log_event_encoder_set_metadata_from_msgpack_object(
+ log_encoder, log_event->metadata);
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ return -3;
+ }
+
+ /* Pack all current top-level items excluding the key keys */
+ map_pack_each_fn(log_encoder, &map, ctx, &is_not_kv_to_lift);
+
+ /* Lift and pack all elements in key keys */
+ map_lift_each_fn(log_encoder, &map, ctx, &is_kv_to_lift);
+
+ ret = flb_log_event_encoder_commit_record(log_encoder);
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ return -2;
+ }
+
+ return 1;
+}
+
+static inline int apply_nesting_rules(struct flb_log_event_encoder *log_encoder,
+ struct flb_log_event *log_event,
+ struct filter_nest_ctx *ctx)
+{
+ int ret;
+ msgpack_object map = *log_event->body;
+
+ size_t items_to_nest = map_count_fn(&map, ctx, &is_kv_to_nest);
+
+ if (items_to_nest == 0) {
+ flb_plg_debug(ctx->ins, "no match found for %s", ctx->prefix);
+ return 0;
+ }
+
+ size_t toplevel_items = (map.via.map.size - items_to_nest + 1);
+
+ flb_plg_trace(ctx->ins, "outer map size is %d, will be %lu, nested "
+ "map size will be %lu",
+ map.via.map.size, toplevel_items, items_to_nest);
+
+ ret = flb_log_event_encoder_begin_record(log_encoder);
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ return -1;
+ }
+
+ ret = flb_log_event_encoder_set_timestamp(
+ log_encoder, &log_event->timestamp);
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ return -2;
+ }
+
+ ret = flb_log_event_encoder_set_metadata_from_msgpack_object(
+ log_encoder, log_event->metadata);
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ return -3;
+ }
+
+ /*
+ * Record array item 2/2
+ * Create a new map with toplevel items +1 for nested map
+ */
+ map_pack_each_fn(log_encoder, &map, ctx, &is_not_kv_to_nest);
+
+ /* Pack the nested map key */
+ ret = flb_log_event_encoder_append_body_string(
+ log_encoder, ctx->key, ctx->key_len);
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ return -4;
+ }
+
+ /* Create the nest map value */
+ ret = flb_log_event_encoder_body_begin_map(log_encoder);
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ return -5;
+ }
+
+ /* Pack the nested items */
+ map_transform_and_pack_each_fn(log_encoder, &map, ctx, &is_kv_to_nest);
+
+ ret = flb_log_event_encoder_commit_record(log_encoder);
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ return -6;
+ }
+
+ return 1;
+}
+
+static int cb_nest_init(struct flb_filter_instance *f_ins,
+ struct flb_config *config, void *data)
+{
+ struct filter_nest_ctx *ctx;
+
+ ctx = flb_malloc(sizeof(struct filter_nest_ctx));
+ if (!ctx) {
+ flb_errno();
+ return -1;
+ }
+ mk_list_init(&ctx->wildcards);
+ ctx->ins = f_ins;
+ ctx->wildcards_cnt = 0;
+
+ if (configure(ctx, f_ins, config) < 0) {
+ flb_free(ctx);
+ return -1;
+ }
+
+ flb_filter_set_context(f_ins, ctx);
+ return 0;
+}
+
+static int cb_nest_filter(const void *data, size_t bytes,
+ const char *tag, int tag_len,
+ void **out_buf, size_t * out_size,
+ struct flb_filter_instance *f_ins,
+ struct flb_input_instance *i_ins,
+ void *context, struct flb_config *config)
+{
+ struct flb_log_event_encoder log_encoder;
+ struct flb_log_event_decoder log_decoder;
+ struct flb_log_event log_event;
+ struct filter_nest_ctx *ctx = context;
+ int modified_records = 0;
+ int ret;
+
+ (void) f_ins;
+ (void) i_ins;
+ (void) config;
+
+
+ ret = flb_log_event_decoder_init(&log_decoder, (char *) data, bytes);
+
+ if (ret != FLB_EVENT_DECODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event decoder initialization error : %d", ret);
+
+ return FLB_FILTER_NOTOUCH;
+ }
+
+ ret = flb_log_event_encoder_init(&log_encoder,
+ FLB_LOG_EVENT_FORMAT_DEFAULT);
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event encoder initialization error : %d", ret);
+
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ return FLB_FILTER_NOTOUCH;
+ }
+
+ while ((ret = flb_log_event_decoder_next(
+ &log_decoder,
+ &log_event)) == FLB_EVENT_DECODER_SUCCESS) {
+ modified_records = 0;
+
+ if (ctx->operation == NEST) {
+ modified_records =
+ apply_nesting_rules(&log_encoder, &log_event, ctx);
+ }
+ else {
+ modified_records =
+ apply_lifting_rules(&log_encoder, &log_event, ctx);
+ }
+
+ if (modified_records == 0) {
+ ret = flb_log_event_encoder_emit_raw_record(
+ &log_encoder,
+ log_decoder.record_base,
+ log_decoder.record_length);
+ }
+ }
+
+ if (ret == FLB_EVENT_DECODER_ERROR_INSUFFICIENT_DATA &&
+ log_decoder.offset == bytes) {
+ ret = FLB_EVENT_ENCODER_SUCCESS;
+ }
+
+ if (log_encoder.output_length > 0) {
+ *out_buf = log_encoder.output_buffer;
+ *out_size = log_encoder.output_length;
+
+ ret = FLB_FILTER_MODIFIED;
+
+ flb_log_event_encoder_claim_internal_buffer_ownership(&log_encoder);
+ }
+ else {
+ flb_plg_error(ctx->ins,
+ "Log event encoder error : %d", ret);
+
+ ret = FLB_FILTER_NOTOUCH;
+ }
+
+ flb_log_event_decoder_destroy(&log_decoder);
+ flb_log_event_encoder_destroy(&log_encoder);
+
+ return ret;
+}
+
+static int cb_nest_exit(void *data, struct flb_config *config)
+{
+ struct filter_nest_ctx *ctx = data;
+
+ teardown(ctx);
+ flb_free(ctx);
+ return 0;
+}
+
+/* Configuration properties map */
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_STR, "Operation", NULL,
+ 0, FLB_FALSE, 0,
+ "Select the operation nest or lift"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "Wildcard", NULL,
+ FLB_CONFIG_MAP_MULT, FLB_FALSE, 0,
+ "Nest records which field matches the wildcard"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "Nest_under", NULL,
+ 0, FLB_FALSE, 0,
+ "Nest records matching the Wildcard under this key"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "Nested_under", NULL,
+ 0, FLB_FALSE, 0,
+ "Lift records nested under the Nested_under key"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "Add_prefix", NULL,
+ 0, FLB_FALSE, 0,
+ "Prefix affected keys with this string"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "Remove_prefix", NULL,
+ 0, FLB_FALSE, 0,
+ "Remove prefix from affected keys if it matches this string"
+ },
+ {0}
+};
+
+struct flb_filter_plugin filter_nest_plugin = {
+ .name = "nest",
+ .description = "nest events by specified field values",
+ .cb_init = cb_nest_init,
+ .cb_filter = cb_nest_filter,
+ .cb_exit = cb_nest_exit,
+ .config_map = config_map,
+ .flags = 0
+};
diff --git a/src/fluent-bit/plugins/filter_nest/nest.h b/src/fluent-bit/plugins/filter_nest/nest.h
new file mode 100644
index 000000000..47c35ee1e
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_nest/nest.h
@@ -0,0 +1,55 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_FILTER_NEST_H
+#define FLB_FILTER_NEST_H
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_filter.h>
+
+enum FILTER_NEST_OPERATION {
+ NEST,
+ LIFT
+};
+
+struct filter_nest_ctx
+{
+ enum FILTER_NEST_OPERATION operation;
+ char *key;
+ int key_len;
+ char *prefix;
+ int prefix_len;
+ // nest
+ struct mk_list wildcards;
+ int wildcards_cnt;
+ bool remove_prefix;
+ // lift
+ bool add_prefix;
+ struct flb_filter_instance *ins;
+};
+
+struct filter_nest_wildcard
+{
+ char *key;
+ int key_len;
+ bool key_is_dynamic;
+ struct mk_list _head;
+};
+
+#endif
diff --git a/src/fluent-bit/plugins/filter_nightfall/CMakeLists.txt b/src/fluent-bit/plugins/filter_nightfall/CMakeLists.txt
new file mode 100644
index 000000000..d3535d15a
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_nightfall/CMakeLists.txt
@@ -0,0 +1,6 @@
+set(src
+ nightfall.c
+ nightfall_api.c
+ )
+
+FLB_PLUGIN(filter_nightfall "${src}" "")
diff --git a/src/fluent-bit/plugins/filter_nightfall/nightfall.c b/src/fluent-bit/plugins/filter_nightfall/nightfall.c
new file mode 100644
index 000000000..3e37d2a0b
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_nightfall/nightfall.c
@@ -0,0 +1,654 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2019-2021 The Fluent Bit Authors
+ * Copyright (C) 2015-2018 Treasure Data Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_filter.h>
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_filter.h>
+#include <fluent-bit/flb_filter_plugin.h>
+#include <fluent-bit/flb_http_client.h>
+#include <fluent-bit/flb_upstream.h>
+#include <fluent-bit/flb_sds.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/tls/flb_tls.h>
+#include <fluent-bit/flb_log_event_decoder.h>
+#include <fluent-bit/flb_log_event_encoder.h>
+
+#include "nightfall.h"
+#include "nightfall_api.h"
+
+static int redact_array_fields(msgpack_packer *new_rec_pk, int *to_redact_index,
+ msgpack_object_array *to_redact, struct nested_obj *cur,
+ struct mk_list *stack, char *should_pop);
+static int redact_map_fields(msgpack_packer *new_rec_pk, int *to_redact_index,
+ msgpack_object_array *to_redact, struct nested_obj *cur,
+ struct mk_list *stack, char *should_pop);
+static void maybe_redact_field(msgpack_packer *new_rec_pk, msgpack_object *field,
+ msgpack_object_array *to_redact, int *to_redact_i,
+ int byte_offset);
+
+static int cb_nightfall_init(struct flb_filter_instance *f_ins,
+ struct flb_config *config,
+ void *data)
+{
+ struct flb_filter_nightfall *ctx = NULL;
+ int ret;
+
+ /* Create context */
+ ctx = flb_calloc(1, sizeof(struct flb_filter_nightfall));
+ if (!ctx) {
+ flb_errno();
+ return -1;
+ }
+
+ ctx->ins = f_ins;
+
+ /* Populate context with config map defaults and incoming properties */
+ ret = flb_filter_config_map_set(f_ins, (void *) ctx);
+ if (ret == -1) {
+ flb_plg_error(f_ins, "configuration error");
+ flb_free(ctx);
+ return -1;
+ }
+
+ if (ctx->sampling_rate <= 0 || ctx->sampling_rate > 1) {
+ flb_plg_error(f_ins, "invalid sampling rate, must be (0,1]");
+ flb_free(ctx);
+ return -1;
+ }
+
+ if (ctx->nightfall_api_key == NULL) {
+ flb_plg_error(f_ins, "invalid Nightfall API key");
+ flb_free(ctx);
+ return -1;
+ }
+
+ if (ctx->policy_id == NULL) {
+ flb_plg_error(f_ins, "invalid Nightfall policy ID");
+ flb_free(ctx);
+ return -1;
+ }
+
+ ctx->auth_header = flb_sds_create_size(42);
+ flb_sds_printf(&ctx->auth_header,
+ "Bearer %s",
+ ctx->nightfall_api_key);
+
+ ctx->tls = flb_tls_create(FLB_TLS_CLIENT_MODE,
+ ctx->tls_verify,
+ ctx->tls_debug,
+ ctx->tls_vhost,
+ ctx->tls_ca_path,
+ NULL,
+ NULL, NULL, NULL);
+ if (!ctx->tls) {
+ flb_plg_error(f_ins, "tls initialization error");
+ flb_free(ctx);
+ return -1;
+ }
+
+ ctx->upstream = flb_upstream_create_url(config,
+ FLB_FILTER_NIGHTFALL_API_URL,
+ FLB_IO_TLS,
+ ctx->tls);
+ if (!ctx->upstream) {
+ flb_plg_error(ctx->ins, "connection initialization error");
+ flb_free(ctx);
+ return -1;
+ }
+
+ flb_stream_disable_async_mode(&ctx->upstream->base);
+
+ flb_filter_set_context(f_ins, ctx);
+
+ srand((unsigned int)time(NULL));
+ return 0;
+}
+
+static int redact_record(msgpack_object *data, char **to_redact_data, size_t *to_redact_size,
+ struct flb_time t, msgpack_sbuffer *new_rec)
+{
+ int ret;
+ struct mk_list stack;
+ struct nested_obj *cur;
+ struct nested_obj *new_obj;
+ struct mk_list *head;
+ struct mk_list *tmp;
+
+ msgpack_sbuffer new_rec_sbuf;
+ msgpack_packer new_rec_pk;
+
+ char should_pop = FLB_TRUE;
+
+ int to_redact_index = 0;
+ msgpack_unpacked finding_list_unpacked;
+ size_t finding_list_off = 0;
+ msgpack_object_array to_redact;
+
+ /* Convert to_redact_data to a msgpack_object_array */
+ msgpack_unpacked_init(&finding_list_unpacked);
+ ret = msgpack_unpack_next(&finding_list_unpacked, *to_redact_data, *to_redact_size,
+ &finding_list_off);
+ if (ret == MSGPACK_UNPACK_SUCCESS) {
+ to_redact = finding_list_unpacked.data.via.array;
+ }
+
+ mk_list_init(&stack);
+
+ msgpack_sbuffer_init(&new_rec_sbuf);
+ msgpack_packer_init(&new_rec_pk, &new_rec_sbuf, msgpack_sbuffer_write);
+
+ new_obj = flb_calloc(1, sizeof(struct nested_obj));
+ new_obj->obj = data;
+ new_obj->cur_index = 0;
+ new_obj->start_at_val = FLB_FALSE;
+ mk_list_add(&new_obj->_head, &stack);
+
+ if (data->type == MSGPACK_OBJECT_ARRAY) {
+ msgpack_pack_array(&new_rec_pk, data->via.array.size);
+ }
+ else if (data->type == MSGPACK_OBJECT_MAP) {
+ msgpack_pack_map(&new_rec_pk, data->via.map.size);
+ }
+
+ /*
+ * Since logs can contain many levels of nested objects, use stack-based DFS here
+ * to build back and redact log.
+ */
+ while (mk_list_is_empty(&stack) == -1) {
+ cur = mk_list_entry_last(&stack, struct nested_obj, _head);
+ should_pop = FLB_TRUE;
+
+ switch(cur->obj->type) {
+ case MSGPACK_OBJECT_ARRAY:
+ ret = redact_array_fields(&new_rec_pk, &to_redact_index, &to_redact, cur,
+ &stack, &should_pop);
+ if (ret != 0) {
+ msgpack_unpacked_destroy(&finding_list_unpacked);
+ mk_list_foreach_safe(head, tmp, &stack) {
+ cur = mk_list_entry(head, struct nested_obj, _head);
+ mk_list_del(&cur->_head);
+ flb_free(cur);
+ }
+ return -1;
+ }
+ break;
+ case MSGPACK_OBJECT_MAP:
+ ret = redact_map_fields(&new_rec_pk, &to_redact_index, &to_redact, cur,
+ &stack, &should_pop);
+ if (ret != 0) {
+ msgpack_unpacked_destroy(&finding_list_unpacked);
+ mk_list_foreach_safe(head, tmp, &stack) {
+ cur = mk_list_entry(head, struct nested_obj, _head);
+ mk_list_del(&cur->_head);
+ flb_free(cur);
+ }
+ return -1;
+ }
+ break;
+ case MSGPACK_OBJECT_STR:
+ maybe_redact_field(&new_rec_pk, cur->obj, &to_redact, &to_redact_index, 0);
+ break;
+ case MSGPACK_OBJECT_POSITIVE_INTEGER:
+ maybe_redact_field(&new_rec_pk, cur->obj, &to_redact, &to_redact_index, 0);
+ break;
+ case MSGPACK_OBJECT_NEGATIVE_INTEGER:
+ maybe_redact_field(&new_rec_pk, cur->obj, &to_redact, &to_redact_index, 0);
+ break;
+ default:
+ msgpack_pack_object(&new_rec_pk, *cur->obj);
+ }
+
+ if (should_pop) {
+ mk_list_del(&cur->_head);
+ flb_free(cur);
+ }
+ }
+ msgpack_unpacked_destroy(&finding_list_unpacked);
+
+ *new_rec = new_rec_sbuf;
+ return 0;
+}
+
+static int redact_array_fields(msgpack_packer *new_rec_pk, int *to_redact_index,
+ msgpack_object_array *to_redact, struct nested_obj *cur,
+ struct mk_list *stack, char *should_pop)
+{
+ msgpack_object *item;
+ struct nested_obj *new_obj;
+ int i;
+
+ for (i = cur->cur_index; i < cur->obj->via.array.size; i++) {
+ item = &cur->obj->via.array.ptr[i];
+ if (item->type == MSGPACK_OBJECT_MAP || item->type == MSGPACK_OBJECT_ARRAY) {
+ /* A nested object, so add to stack and return to DFS to process immediately */
+ new_obj = flb_malloc(sizeof(struct nested_obj));
+ if (!new_obj) {
+ flb_errno();
+ return -1;
+ }
+ new_obj->obj = item;
+ new_obj->cur_index = 0;
+ new_obj->start_at_val = FLB_FALSE;
+ mk_list_add(&new_obj->_head, stack);
+
+ if (item->type == MSGPACK_OBJECT_ARRAY) {
+ msgpack_pack_array(new_rec_pk, item->via.array.size);
+ }
+ else {
+ msgpack_pack_map(new_rec_pk, item->via.map.size);
+ }
+
+ /*
+ * Since we are not done yet with the current array, increment the index that
+ * keeps track of progress and don't pop the current array so we can come
+ * back later.
+ */
+ cur->cur_index = i + 1;
+ *should_pop = FLB_FALSE;
+ break;
+ }
+ else if (item->type == MSGPACK_OBJECT_STR ||
+ item->type == MSGPACK_OBJECT_POSITIVE_INTEGER ||
+ item->type == MSGPACK_OBJECT_NEGATIVE_INTEGER) {
+ /*
+ * A field that could potentially contain sensitive content, so we check
+ * if there were any findings associated with it
+ */
+ maybe_redact_field(new_rec_pk, item, to_redact, to_redact_index, 0);
+ }
+ else {
+ /* Non scannable type, so just append as is. */
+ msgpack_pack_object(new_rec_pk, *item);
+ }
+ }
+
+ return 0;
+}
+
+static int redact_map_fields(msgpack_packer *new_rec_pk, int *to_redact_index,
+ msgpack_object_array *to_redact, struct nested_obj *cur,
+ struct mk_list *stack, char *should_pop)
+{
+ msgpack_object *k;
+ msgpack_object *v;
+ struct nested_obj *new_obj;
+ int i;
+
+ for (i = cur->cur_index; i < cur->obj->via.map.size; i++) {
+ k = &cur->obj->via.map.ptr[i].key;
+ if (!cur->start_at_val) {
+ /* Handle the key of this kv pair */
+ if (k->type == MSGPACK_OBJECT_MAP || k->type == MSGPACK_OBJECT_ARRAY) {
+ /* A nested object, so add to stack and return to DFS to process immediately */
+ new_obj = flb_malloc(sizeof(struct nested_obj));
+ if (!new_obj) {
+ flb_errno();
+ return -1;
+ }
+ new_obj->obj = k;
+ new_obj->cur_index = 0;
+ new_obj->start_at_val = FLB_FALSE;
+ mk_list_add(&new_obj->_head, stack);
+
+ if (k->type == MSGPACK_OBJECT_ARRAY) {
+ msgpack_pack_array(new_rec_pk, k->via.array.size);
+ }
+ else {
+ msgpack_pack_map(new_rec_pk, k->via.map.size);
+ }
+
+ /*
+ * Since we are not done yet with the current kv pair, don't increment
+ * the progress index and set flag so we know to start at the value later
+ */
+ cur->cur_index = i;
+ cur->start_at_val = FLB_TRUE;
+ /* Set should_pop to false because we are not done with the current map */
+ *should_pop = FLB_FALSE;
+ break;
+ }
+ else if (k->type == MSGPACK_OBJECT_STR ||
+ k->type == MSGPACK_OBJECT_POSITIVE_INTEGER ||
+ k->type == MSGPACK_OBJECT_NEGATIVE_INTEGER) {
+ /*
+ * A field that could potentially contain sensitive content, so we check
+ * if there were any findings associated with it
+ */
+ maybe_redact_field(new_rec_pk, k, to_redact, to_redact_index, 0);
+ }
+ else {
+ /* Non scannable type, so just append as is. */
+ msgpack_pack_object(new_rec_pk, *k);
+ }
+ }
+
+ /* Handle the value of this kv pair */
+ v = &cur->obj->via.map.ptr[i].val;
+ if (v->type == MSGPACK_OBJECT_MAP || v->type == MSGPACK_OBJECT_ARRAY) {
+ /* A nested object, so add to stack and return to DFS to process immediately */
+ new_obj = flb_malloc(sizeof(struct nested_obj));
+ if (!new_obj) {
+ flb_errno();
+ return -1;
+ }
+ new_obj->obj = v;
+ new_obj->cur_index = 0;
+ new_obj->start_at_val = FLB_FALSE;
+ mk_list_add(&new_obj->_head, stack);
+
+ if (v->type == MSGPACK_OBJECT_ARRAY) {
+ msgpack_pack_array(new_rec_pk, v->via.array.size);
+ }
+ else {
+ msgpack_pack_map(new_rec_pk, v->via.map.size);
+ }
+
+ /* Increment here because we are done with this kv pair */
+ cur->cur_index = i + 1;
+ cur->start_at_val = FLB_FALSE;
+ /* Set should_pop to false because we are not done with the current map */
+ *should_pop = FLB_FALSE;
+ break;
+ }
+ else if (v->type == MSGPACK_OBJECT_STR ||
+ v->type == MSGPACK_OBJECT_POSITIVE_INTEGER ||
+ v->type == MSGPACK_OBJECT_NEGATIVE_INTEGER) {
+ if (k->type == MSGPACK_OBJECT_STR) {
+ /*
+ * When building the request to scan the log, keys that are strings are
+ * appended to the beginning of the value to provide more context when
+ * scanning in the format of "<key> <val>", which is why we need to
+ * offset the length of the key plus a space when we do redaction on the
+ * value on its own.
+ */
+ maybe_redact_field(new_rec_pk, v, to_redact, to_redact_index,
+ k->via.str.size + 1);
+ }
+ else {
+ maybe_redact_field(new_rec_pk, v, to_redact, to_redact_index, 0);
+ }
+ }
+ else {
+ msgpack_pack_object(new_rec_pk, *v);
+ }
+ }
+
+ return 0;
+}
+
+static void maybe_redact_field(msgpack_packer *new_rec_pk, msgpack_object *field,
+ msgpack_object_array *to_redact, int *to_redact_i,
+ int byte_offset)
+{
+ flb_sds_t cur_str;
+ msgpack_object_array content_range;
+ int64_t content_start;
+ int64_t content_end;
+ int i;
+ int64_t replace_i;
+
+ /*
+ * Should not happen under normal circumstances as len of to_redact should be the
+ * same as the number of scannable fields (positive/negative ints, strings) in the
+ * event, but if that is the case just append the rest of the fields.
+ */
+ if (*to_redact_i >= to_redact->size) {
+ msgpack_pack_object(new_rec_pk, *field);
+ return;
+ }
+
+ /*
+ * Check if there was anything sensitive found for this field, if there wasn't we
+ * can leave it as is
+ */
+ if (to_redact->ptr[*to_redact_i].via.array.size == 0) {
+ msgpack_pack_object(new_rec_pk, *field);
+ *to_redact_i = *to_redact_i + 1;
+ return;
+ }
+
+ /* If field is an integer redact entire field */
+ if (field->type == MSGPACK_OBJECT_POSITIVE_INTEGER ||
+ field->type == MSGPACK_OBJECT_NEGATIVE_INTEGER) {
+ msgpack_pack_str_with_body(new_rec_pk, "******", 7);
+ *to_redact_i = *to_redact_i + 1;
+ return;
+ }
+
+ /* If field is a string redact only the sensitive parts */
+ cur_str = flb_sds_create_len(field->via.str.ptr, field->via.str.size);
+ for (i = 0; i < to_redact->ptr[*to_redact_i].via.array.size; i++) {
+ content_range = to_redact->ptr[*to_redact_i].via.array.ptr[i].via.array;
+ content_start = content_range.ptr[0].via.i64 - byte_offset;
+ if (content_start < 0) {
+ content_start = 0;
+ }
+ content_end = content_range.ptr[1].via.i64 - byte_offset;
+ for (replace_i = content_start; replace_i < content_end &&
+ replace_i < flb_sds_len(cur_str); replace_i++) {
+ cur_str[replace_i] = '*';
+ }
+ }
+ msgpack_pack_str_with_body(new_rec_pk, cur_str, flb_sds_len(cur_str));
+ *to_redact_i = *to_redact_i + 1;
+
+ flb_sds_destroy(cur_str);
+}
+
+static int cb_nightfall_filter(const void *data, size_t bytes,
+ const char *tag, int tag_len,
+ void **out_buf, size_t *out_size,
+ struct flb_filter_instance *f_ins,
+ struct flb_input_instance *i_ins,
+ void *context,
+ struct flb_config *config)
+{
+ struct flb_filter_nightfall *ctx = context;
+ int ret;
+ char is_modified = FLB_FALSE;
+
+ struct flb_time tmp = {0};
+
+ char *to_redact;
+ size_t to_redact_size;
+ char is_sensitive = FLB_FALSE;
+
+ msgpack_sbuffer new_rec_sbuf;
+ struct flb_log_event_encoder log_encoder;
+ struct flb_log_event_decoder log_decoder;
+ struct flb_log_event log_event;
+
+ (void) f_ins;
+ (void) i_ins;
+ (void) config;
+
+ /*
+ * Generate a random double between 0 and 1, if it is over the sampling rate
+ * configured don't scan this log.
+ */
+ if ((double)rand()/(double)RAND_MAX > ctx->sampling_rate) {
+ return FLB_FILTER_NOTOUCH;
+ }
+
+ ret = flb_log_event_decoder_init(&log_decoder, (char *) data, bytes);
+
+ if (ret != FLB_EVENT_DECODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event decoder initialization error : %d", ret);
+
+ return FLB_FILTER_NOTOUCH;
+ }
+
+ ret = flb_log_event_encoder_init(&log_encoder,
+ FLB_LOG_EVENT_FORMAT_DEFAULT);
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event encoder initialization error : %d", ret);
+
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ return FLB_FILTER_NOTOUCH;
+ }
+
+ while ((ret = flb_log_event_decoder_next(
+ &log_decoder,
+ &log_event)) == FLB_EVENT_DECODER_SUCCESS) {
+ ret = scan_log(ctx, log_event.body, &to_redact, &to_redact_size, &is_sensitive);
+ if (ret != 0) {
+ flb_plg_error(ctx->ins, "scanning error");
+
+ flb_log_event_decoder_destroy(&log_decoder);
+ flb_log_event_encoder_destroy(&log_encoder);
+
+ return FLB_FILTER_NOTOUCH;
+ }
+
+ if (is_sensitive == FLB_TRUE) {
+ ret = redact_record(log_event.body, &to_redact, &to_redact_size, tmp, &new_rec_sbuf);
+ if (ret != 0) {
+ flb_plg_error(ctx->ins, "redaction error");
+ flb_free(to_redact);
+ msgpack_sbuffer_destroy(&new_rec_sbuf);
+ flb_log_event_decoder_destroy(&log_decoder);
+ flb_log_event_encoder_destroy(&log_encoder);
+ return FLB_FILTER_NOTOUCH;
+ }
+ is_modified = FLB_TRUE;
+ }
+
+ if (is_modified) {
+ ret = flb_log_event_encoder_begin_record(&log_encoder);
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_timestamp(
+ &log_encoder, &log_event.timestamp);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_metadata_from_msgpack_object(
+ &log_encoder, log_event.metadata);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_body_from_raw_msgpack(
+ &log_encoder, new_rec_sbuf.data, new_rec_sbuf.size);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_commit_record(
+ &log_encoder);
+ }
+ }
+ }
+ flb_free(to_redact);
+
+ if (log_encoder.output_length > 0) {
+ *out_buf = log_encoder.output_buffer;
+ *out_size = log_encoder.output_length;
+
+ ret = FLB_FILTER_MODIFIED;
+
+ flb_log_event_encoder_claim_internal_buffer_ownership(&log_encoder);
+ }
+ else {
+ flb_plg_error(ctx->ins,
+ "Log event encoder error : %d", ret);
+
+ ret = FLB_FILTER_NOTOUCH;
+ }
+
+ flb_log_event_decoder_destroy(&log_decoder);
+ flb_log_event_encoder_destroy(&log_encoder);
+
+ return ret;
+}
+
+static int cb_nightfall_exit(void *data, struct flb_config *config)
+{
+ struct flb_filter_nightfall *ctx = data;
+
+ if (ctx == NULL) {
+ return 0;
+ }
+ if (ctx->upstream) {
+ flb_upstream_destroy(ctx->upstream);
+ }
+ if (ctx->tls) {
+ flb_tls_destroy(ctx->tls);
+ }
+ if (ctx->auth_header) {
+ flb_sds_destroy(ctx->auth_header);
+ }
+ flb_free(ctx);
+ return 0;
+}
+
+/* Configuration properties map */
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_STR, "nightfall_api_key", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_filter_nightfall, nightfall_api_key),
+ "The Nightfall API key to scan your logs with."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "policy_id", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_filter_nightfall, policy_id),
+ "The Nightfall policy ID to scan your logs with."
+ },
+ {
+ FLB_CONFIG_MAP_DOUBLE, "sampling_rate", "1",
+ 0, FLB_TRUE, offsetof(struct flb_filter_nightfall, sampling_rate),
+ "The sampling rate for scanning, must be (0,1]. 1 means all logs will be scanned."
+ },
+ {
+ FLB_CONFIG_MAP_INT, "tls.debug", "0",
+ 0, FLB_TRUE, offsetof(struct flb_filter_nightfall, tls_debug),
+ "Set TLS debug level: 0 (no debug), 1 (error), "
+ "2 (state change), 3 (info) and 4 (verbose)"
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "tls.verify", "true",
+ 0, FLB_TRUE, offsetof(struct flb_filter_nightfall, tls_verify),
+ "Enable or disable verification of TLS peer certificate"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "tls.vhost", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_filter_nightfall, tls_vhost),
+ "Set optional TLS virtual host"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "tls.ca_path", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_filter_nightfall, tls_ca_path),
+ "Path to root certificates on the system"
+ },
+ {0}
+};
+
+struct flb_filter_plugin filter_nightfall_plugin = {
+ .name = "nightfall",
+ .description = "scans records for sensitive content",
+ .cb_init = cb_nightfall_init,
+ .cb_filter = cb_nightfall_filter,
+ .cb_exit = cb_nightfall_exit,
+ .config_map = config_map,
+ .flags = 0
+};
diff --git a/src/fluent-bit/plugins/filter_nightfall/nightfall.h b/src/fluent-bit/plugins/filter_nightfall/nightfall.h
new file mode 100644
index 000000000..e190c1d51
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_nightfall/nightfall.h
@@ -0,0 +1,57 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2019 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_FILTER_NIGHTFALL_H
+#define FLB_FILTER_NIGHTFALL_H
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_filter.h>
+
+struct nested_obj {
+ msgpack_object *obj;
+ int cur_index;
+ char start_at_val;
+
+ struct mk_list _head;
+};
+
+struct payload {
+ msgpack_object *obj;
+ msgpack_object *key_to_scan_with;
+
+ struct mk_list _head;
+};
+
+struct flb_filter_nightfall {
+ /* Config values */
+ flb_sds_t nightfall_api_key;
+ flb_sds_t policy_id;
+ double sampling_rate;
+ int tls_debug;
+ int tls_verify;
+ char *tls_ca_path;
+ flb_sds_t tls_vhost;
+
+ struct flb_tls *tls;
+ struct flb_upstream *upstream;
+ struct flb_filter_instance *ins;
+ flb_sds_t auth_header;
+};
+
+#endif
diff --git a/src/fluent-bit/plugins/filter_nightfall/nightfall_api.c b/src/fluent-bit/plugins/filter_nightfall/nightfall_api.c
new file mode 100644
index 000000000..91ecf7948
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_nightfall/nightfall_api.c
@@ -0,0 +1,536 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2019-2021 The Fluent Bit Authors
+ * Copyright (C) 2015-2018 Treasure Data Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_filter.h>
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_filter.h>
+#include <fluent-bit/flb_filter_plugin.h>
+#include <fluent-bit/flb_http_client.h>
+#include <fluent-bit/flb_upstream.h>
+#include <fluent-bit/flb_sds.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/tls/flb_tls.h>
+
+#include "nightfall_api.h"
+
+static int extract_array_fields(struct nested_obj *cur, struct mk_list *stack,
+ struct mk_list *payload_list, char *should_pop);
+static int extract_map_fields(struct nested_obj *cur, struct mk_list *stack,
+ struct mk_list *payload_list, char *should_pop);
+
+static flb_sds_t build_request_body(struct flb_filter_nightfall *ctx,
+ msgpack_object *data)
+{
+ int ret;
+ struct mk_list stack;
+ struct nested_obj *cur;
+ struct nested_obj *new_obj;
+
+ struct mk_list payload_list;
+ struct mk_list *head;
+ struct mk_list *tmp;
+ struct payload *pl;
+
+ msgpack_sbuffer req_sbuf;
+ msgpack_packer req_pk;
+ flb_sds_t num_str;
+ int num_str_len;
+ flb_sds_t key_str;
+ flb_sds_t val_str;
+ flb_sds_t key_val_str;
+ int key_val_str_len;
+ flb_sds_t request_body;
+
+ char should_pop = FLB_TRUE;
+
+ new_obj = flb_malloc(sizeof(struct nested_obj));
+ if (!new_obj) {
+ flb_errno();
+ return NULL;
+ }
+ new_obj->obj = data;
+ new_obj->cur_index = 0;
+ new_obj->start_at_val = FLB_FALSE;
+ mk_list_init(&stack);
+ mk_list_add(&new_obj->_head, &stack);
+
+ mk_list_init(&payload_list);
+
+ /*
+ * Since logs can contain many levels of nested objects, use stack-based DFS here
+ * to extract scannable fields (positive/negative ints, strings)
+ */
+ while (mk_list_is_empty(&stack) == -1) {
+ cur = mk_list_entry_last(&stack, struct nested_obj, _head);
+ should_pop = FLB_TRUE;
+
+ switch (cur->obj->type) {
+ case MSGPACK_OBJECT_ARRAY:
+ ret = extract_array_fields(cur, &stack, &payload_list, &should_pop);
+ if (ret != 0) {
+ mk_list_foreach_safe(head, tmp, &stack) {
+ cur = mk_list_entry(head, struct nested_obj, _head);
+ mk_list_del(&cur->_head);
+ flb_free(cur);
+ }
+ mk_list_foreach_safe(head, tmp, &payload_list) {
+ pl = mk_list_entry(head, struct payload, _head);
+ mk_list_del(&pl->_head);
+ flb_free(pl);
+ }
+ return NULL;
+ }
+ break;
+ case MSGPACK_OBJECT_MAP:
+ ret = extract_map_fields(cur, &stack, &payload_list, &should_pop);
+ if (ret != 0) {
+ mk_list_foreach_safe(head, tmp, &stack) {
+ cur = mk_list_entry(head, struct nested_obj, _head);
+ mk_list_del(&cur->_head);
+ flb_free(cur);
+ }
+ mk_list_foreach_safe(head, tmp, &payload_list) {
+ pl = mk_list_entry(head, struct payload, _head);
+ mk_list_del(&pl->_head);
+ flb_free(pl);
+ }
+ return NULL;
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (should_pop) {
+ mk_list_del(&cur->_head);
+ flb_free(cur);
+ }
+ }
+
+ msgpack_sbuffer_init(&req_sbuf);
+ msgpack_packer_init(&req_pk, &req_sbuf, msgpack_sbuffer_write);
+
+ /*
+ * Build request according to schema at
+ * https://docs.nightfall.ai/reference/scanpayloadv3
+ */
+ msgpack_pack_map(&req_pk, 2);
+ msgpack_pack_str_with_body(&req_pk, "payload", 7);
+ msgpack_pack_array(&req_pk, mk_list_size(&payload_list));
+ /* Initialize buf to hold string representation of numbers */
+ num_str = flb_sds_create_size(21);
+ mk_list_foreach_safe(head, tmp, &payload_list) {
+ pl = mk_list_entry(head, struct payload, _head);
+ if (pl->obj->type == MSGPACK_OBJECT_STR) {
+ if (pl->key_to_scan_with != NULL) {
+ /*
+ * Payload is the value of a keyval pair with a string key that could
+ * provide context when scanning, so join them together and scan.
+ */
+ val_str = flb_sds_create_len(pl->obj->via.str.ptr,
+ pl->obj->via.str.size);
+ key_str = flb_sds_create_len(pl->key_to_scan_with->via.str.ptr,
+ pl->key_to_scan_with->via.str.size);
+ key_val_str = flb_sds_create_size(pl->key_to_scan_with->via.str.size +
+ pl->obj->via.str.size + 2);
+ key_val_str_len = flb_sds_snprintf(&key_val_str,
+ flb_sds_alloc(key_val_str),
+ "%s %s", key_str, val_str);
+ msgpack_pack_str_with_body(&req_pk, key_val_str, key_val_str_len);
+ flb_sds_destroy(val_str);
+ flb_sds_destroy(key_str);
+ flb_sds_destroy(key_val_str);
+ }
+ else {
+ msgpack_pack_str_with_body(&req_pk, pl->obj->via.str.ptr,
+ pl->obj->via.str.size);
+ }
+ }
+ else {
+ if (pl->key_to_scan_with != NULL) {
+ /*
+ * Payload is the value of a keyval pair with a string key that could
+ * provide context when scanning, so join them together and scan.
+ */
+ key_str = flb_sds_create_len(pl->key_to_scan_with->via.str.ptr,
+ pl->key_to_scan_with->via.str.size);
+ key_val_str = flb_sds_create_size(pl->key_to_scan_with->via.str.size +
+ num_str_len + 2);
+ num_str_len = flb_sds_snprintf(&num_str, flb_sds_alloc(num_str),
+ "%"PRIi64, pl->obj->via.i64);
+ key_val_str_len = flb_sds_snprintf(&key_val_str,
+ flb_sds_alloc(key_val_str),
+ "%s %s", key_str, num_str);
+ msgpack_pack_str_with_body(&req_pk, key_val_str, key_val_str_len);
+ flb_sds_destroy(key_str);
+ flb_sds_destroy(key_val_str);
+ }
+ else {
+ num_str_len = flb_sds_snprintf(&num_str, flb_sds_alloc(num_str),
+ "%"PRIi64, pl->obj->via.i64);
+ msgpack_pack_str_with_body(&req_pk, num_str, num_str_len);
+ }
+ }
+ mk_list_del(&pl->_head);
+ flb_free(pl);
+ }
+ msgpack_pack_str_with_body(&req_pk, "policyUUIDs", 11);
+ msgpack_pack_array(&req_pk, 1);
+ msgpack_pack_str_with_body(&req_pk, ctx->policy_id, 36);
+
+ request_body = flb_msgpack_raw_to_json_sds(req_sbuf.data, req_sbuf.size);
+
+ msgpack_sbuffer_destroy(&req_sbuf);
+ flb_sds_destroy(num_str);
+
+ return request_body;
+}
+
+static int extract_array_fields(struct nested_obj *cur, struct mk_list *stack,
+ struct mk_list *payload_list, char *should_pop)
+{
+ msgpack_object *item;
+ struct nested_obj *new_obj;
+ struct payload *pl;
+ int i;
+
+ for (i = cur->cur_index; i < cur->obj->via.array.size; i++) {
+ item = &cur->obj->via.array.ptr[i];
+ if (item->type == MSGPACK_OBJECT_MAP || item->type == MSGPACK_OBJECT_ARRAY) {
+ /* A nested object, so add to stack and return to DFS to process immediately */
+ new_obj = flb_malloc(sizeof(struct nested_obj));
+ if (!new_obj) {
+ flb_errno();
+ return -1;
+ }
+ new_obj->obj = item;
+ new_obj->cur_index = 0;
+ new_obj->start_at_val = FLB_FALSE;
+ mk_list_add(&new_obj->_head, stack);
+
+ /*
+ * Since we are not done yet with the current array, increment the index that
+ * keeps track of progress and don't pop the current array so we can come
+ * back later.
+ */
+ cur->cur_index = i + 1;
+ *should_pop = FLB_FALSE;
+ break;
+ }
+ else if (item->type == MSGPACK_OBJECT_STR ||
+ item->type == MSGPACK_OBJECT_POSITIVE_INTEGER ||
+ item->type == MSGPACK_OBJECT_NEGATIVE_INTEGER) {
+ /* Field is a scannable type, so add to payload list to build request later */
+ pl = flb_calloc(1, sizeof(struct payload));
+ if (!pl) {
+ flb_errno();
+ return -1;
+ }
+ pl->obj = item;
+ mk_list_add(&pl->_head, payload_list);
+ }
+ }
+
+ return 0;
+}
+
+static int extract_map_fields(struct nested_obj *cur, struct mk_list *stack,
+ struct mk_list *payload_list, char *should_pop)
+{
+ struct nested_obj *new_obj;
+ msgpack_object *k;
+ msgpack_object *v;
+ struct payload *pl;
+ int i;
+
+ for (i = cur->cur_index; i < cur->obj->via.map.size; i++) {
+ k = &cur->obj->via.map.ptr[i].key;
+ if (!cur->start_at_val) {
+ /* Handle the key of this kv pair */
+ if (k->type == MSGPACK_OBJECT_MAP || k->type == MSGPACK_OBJECT_ARRAY) {
+ /* A nested object, so add to stack and return to DFS to process immediately */
+ new_obj = flb_malloc(sizeof(struct nested_obj));
+ if (!new_obj) {
+ flb_errno();
+ return -1;
+ }
+ new_obj->obj = k;
+ new_obj->cur_index = 0;
+ new_obj->start_at_val = FLB_FALSE;
+ mk_list_add(&new_obj->_head, stack);
+
+ /*
+ * Since we are not done yet with the current kv pair, don't increment
+ * the progress index and set flag so we know to start at the value later
+ */
+ cur->cur_index = i;
+ cur->start_at_val = FLB_TRUE;
+ /* Set should_pop to false because we are not done with the current map */
+ *should_pop = FLB_FALSE;
+ break;
+ }
+ else if (k->type == MSGPACK_OBJECT_STR ||
+ k->type == MSGPACK_OBJECT_POSITIVE_INTEGER ||
+ k->type == MSGPACK_OBJECT_NEGATIVE_INTEGER) {
+ /* Field is a scannable type, so add to payload list to build request later */
+ pl = flb_calloc(1, sizeof(struct payload));
+ if (!pl) {
+ flb_errno();
+ return -1;
+ }
+ pl->obj = k;
+ mk_list_add(&pl->_head, payload_list);
+ }
+ }
+
+ /* Handle the value of this kv pair */
+ v = &cur->obj->via.map.ptr[i].val;
+ if (v->type == MSGPACK_OBJECT_MAP || v->type == MSGPACK_OBJECT_ARRAY) {
+ /* A nested object, so add to stack and return to DFS to process immediately */
+ new_obj = flb_malloc(sizeof(struct nested_obj));
+ if (!new_obj) {
+ flb_errno();
+ return -1;
+ }
+ new_obj->obj = v;
+ new_obj->cur_index = 0;
+ new_obj->start_at_val = FLB_FALSE;
+ mk_list_add(&new_obj->_head, stack);
+
+ /* Increment here because we are done with this kv pair */
+ cur->cur_index = i + 1;
+ cur->start_at_val = FLB_FALSE;
+ /* Set should_pop to false because we are not done with the current map */
+ *should_pop = FLB_FALSE;
+ break;
+ }
+ else if (v->type == MSGPACK_OBJECT_STR ||
+ v->type == MSGPACK_OBJECT_POSITIVE_INTEGER ||
+ v->type == MSGPACK_OBJECT_NEGATIVE_INTEGER) {
+ /* Field is a scannable type, so add to payload list to build request later */
+ pl = flb_calloc(1, sizeof(struct payload));
+ if (!pl) {
+ flb_errno();
+ return -1;
+ }
+ if (k->type == MSGPACK_OBJECT_STR) {
+ /*
+ * The key could provide more context for scanning so save it to scan
+ * with the val together.
+ */
+ pl->key_to_scan_with = k;
+ }
+ pl->obj = v;
+ mk_list_add(&pl->_head, payload_list);
+ }
+ }
+
+ return 0;
+}
+
+static int get_map_val(msgpack_object m, char *key, msgpack_object *ret)
+{
+ msgpack_object_kv kv;
+ int i;
+
+ if (m.type != MSGPACK_OBJECT_MAP) {
+ return -1;
+ }
+ for (i = 0; i < m.via.map.size; i++) {
+ kv = m.via.map.ptr[i];
+ if (kv.key.via.str.size == strlen(key) &&
+ !strncmp(kv.key.via.str.ptr, key, strlen(key))) {
+ *ret = kv.val;
+ return 0;
+ }
+ }
+ return -1;
+}
+
+static int process_response(const char *resp, size_t resp_size,
+ char **to_redact, size_t *to_redact_size,
+ char *is_sensitive)
+{
+ int root_type;
+ char *buf;
+ size_t size;
+ msgpack_unpacked resp_unpacked;
+ size_t off = 0;
+ int ret;
+ int i, j, k;
+ msgpack_sbuffer mp_sbuf;
+ msgpack_packer mp_pck;
+
+ msgpack_object resp_map;
+ msgpack_object findings_list;
+ msgpack_object findings;
+ msgpack_object finding;
+ msgpack_object location;
+ msgpack_object byteRange;
+
+ /* Convert json response body to msgpack */
+ ret = flb_pack_json(resp, resp_size, &buf, &size, &root_type, NULL);
+ if (ret != 0) {
+ flb_errno();
+ return -1;
+ }
+
+ msgpack_sbuffer_init(&mp_sbuf);
+ msgpack_packer_init(&mp_pck, &mp_sbuf, msgpack_sbuffer_write);
+ msgpack_unpacked_init(&resp_unpacked);
+
+ /*
+ * For every scannable field (positive/negative ints, strings) we sent to
+ * scan, Nightfall returns an array of finding objects that inform
+ * which portions of the field may be sensitive. We return those byte
+ * ranges here so we can do redaction later.
+ */
+ ret = msgpack_unpack_next(&resp_unpacked, buf, size, &off);
+ if (ret == MSGPACK_UNPACK_SUCCESS) {
+ resp_map = resp_unpacked.data;
+ ret = get_map_val(resp_map, "findings", &findings_list);
+ if (ret != 0) {
+ msgpack_unpacked_destroy(&resp_unpacked);
+ msgpack_sbuffer_destroy(&mp_sbuf);
+ flb_free(buf);
+ flb_errno();
+ return -1;
+ }
+ msgpack_pack_array(&mp_pck, findings_list.via.array.size);
+
+ for (i = 0; i < findings_list.via.array.size; i++) {
+ findings = findings_list.via.array.ptr[i];
+ msgpack_pack_array(&mp_pck, findings.via.array.size);
+
+ if (!*is_sensitive && findings.via.array.size > 0) {
+ *is_sensitive = FLB_TRUE;
+ }
+
+ for (j = 0; j < findings.via.array.size; j++) {
+ finding = findings.via.array.ptr[j];
+ ret = get_map_val(finding, "location", &location);
+ if (ret != 0) {
+ msgpack_unpacked_destroy(&resp_unpacked);
+ msgpack_sbuffer_destroy(&mp_sbuf);
+ flb_free(buf);
+ flb_errno();
+ return -1;
+ }
+
+ ret = get_map_val(location, "byteRange", &byteRange);
+ if (ret != 0) {
+ msgpack_unpacked_destroy(&resp_unpacked);
+ msgpack_sbuffer_destroy(&mp_sbuf);
+ flb_free(buf);
+ flb_errno();
+ return -1;
+ }
+
+ msgpack_pack_array(&mp_pck, byteRange.via.map.size);
+ for (k = 0; k < byteRange.via.map.size; k++) {
+ msgpack_pack_int64(&mp_pck, byteRange.via.map.ptr[k].val.via.i64);
+ }
+ }
+ }
+ }
+ msgpack_unpacked_destroy(&resp_unpacked);
+ flb_free(buf);
+
+ *to_redact = mp_sbuf.data;
+ *to_redact_size = mp_sbuf.size;
+
+ return 0;
+}
+
+/* Scans log for sensitive content and returns the locations of such content */
+int scan_log(struct flb_filter_nightfall *ctx, msgpack_object *data,
+ char **to_redact, size_t *to_redact_size, char *is_sensitive)
+{
+ struct flb_http_client *client;
+ struct flb_connection *u_conn;
+
+ flb_sds_t body;
+ int ret;
+ size_t b_sent;
+
+ body = build_request_body(ctx, data);
+ if (body == NULL) {
+ flb_plg_error(ctx->ins, "could not build request");
+ return -1;
+ }
+
+ u_conn = flb_upstream_conn_get(ctx->upstream);
+ if (!u_conn) {
+ flb_plg_error(ctx->ins, "connection initialization error");
+ flb_sds_destroy(body);
+ return -1;
+ }
+
+ /* Compose HTTP Client request */
+ client = flb_http_client(u_conn,
+ FLB_HTTP_POST, "/v3/scan",
+ body, flb_sds_len(body),
+ FLB_FILTER_NIGHTFALL_API_HOST, 443,
+ NULL, 0);
+
+ if (!client) {
+ flb_plg_error(ctx->ins, "could not create http client");
+ flb_sds_destroy(body);
+ flb_upstream_conn_release(u_conn);
+ return -1;
+ }
+
+ flb_http_buffer_size(client, 0);
+
+ flb_http_add_header(client, "Authorization", 13, ctx->auth_header, 42);
+ flb_http_add_header(client, "User-Agent", 10, "Fluent-Bit", 10);
+ flb_http_add_header(client, "Content-Type", 12, "application/json", 16);
+
+ /* Perform request */
+ ret = flb_http_do(client, &b_sent);
+ flb_plg_info(ctx->ins, "Nightfall request http_do=%i, HTTP Status: %i",
+ ret, client->resp.status);
+ flb_sds_destroy(body);
+
+ if (ret != 0 || client->resp.status != 200) {
+ if (client->resp.payload_size > 0) {
+ flb_plg_info(ctx->ins, "Nightfall request\n%s",
+ client->resp.payload);
+ }
+ flb_http_client_destroy(client);
+ flb_upstream_conn_release(u_conn);
+ return -1;
+ }
+
+ ret = process_response(client->resp.payload, client->resp.payload_size,
+ to_redact, to_redact_size, is_sensitive);
+ if (ret != 0) {
+ flb_plg_error(ctx->ins, "could not process response");
+ flb_http_client_destroy(client);
+ flb_upstream_conn_release(u_conn);
+ return -1;
+ }
+
+ flb_http_client_destroy(client);
+ flb_upstream_conn_release(u_conn);
+
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/filter_nightfall/nightfall_api.h b/src/fluent-bit/plugins/filter_nightfall/nightfall_api.h
new file mode 100644
index 000000000..0d1e6b647
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_nightfall/nightfall_api.h
@@ -0,0 +1,31 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2019 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_FILTER_NIGHTFALL_API_H
+#define FLB_FILTER_NIGHTFALL_API_H
+
+#define FLB_FILTER_NIGHTFALL_API_URL "https://api.nightfall.ai/"
+#define FLB_FILTER_NIGHTFALL_API_HOST "api.nightfall.ai"
+
+#include "nightfall.h"
+
+int scan_log(struct flb_filter_nightfall *ctx, msgpack_object *data,
+ char **to_redact, size_t *to_redact_size, char *is_sensitive);
+
+#endif
diff --git a/src/fluent-bit/plugins/filter_parser/CMakeLists.txt b/src/fluent-bit/plugins/filter_parser/CMakeLists.txt
new file mode 100644
index 000000000..e9a2d8a7f
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_parser/CMakeLists.txt
@@ -0,0 +1,4 @@
+set(src
+ filter_parser.c)
+
+FLB_PLUGIN(filter_parser "${src}" "")
diff --git a/src/fluent-bit/plugins/filter_parser/filter_parser.c b/src/fluent-bit/plugins/filter_parser/filter_parser.c
new file mode 100644
index 000000000..0f2dc039d
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_parser/filter_parser.c
@@ -0,0 +1,452 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_filter.h>
+#include <fluent-bit/flb_filter_plugin.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_parser.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_mem.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_kv.h>
+#include <fluent-bit/flb_log_event_decoder.h>
+#include <fluent-bit/flb_log_event_encoder.h>
+#include <msgpack.h>
+
+#include <string.h>
+#include <fluent-bit.h>
+
+#include "filter_parser.h"
+
+static int msgpackobj2char(msgpack_object *obj,
+ const char **ret_char, int *ret_char_size)
+{
+ int ret = -1;
+
+ if (obj->type == MSGPACK_OBJECT_STR) {
+ *ret_char = obj->via.str.ptr;
+ *ret_char_size = obj->via.str.size;
+ ret = 0;
+ }
+ else if (obj->type == MSGPACK_OBJECT_BIN) {
+ *ret_char = obj->via.bin.ptr;
+ *ret_char_size = obj->via.bin.size;
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static int add_parser(const char *parser, struct filter_parser_ctx *ctx,
+ struct flb_config *config)
+{
+ struct flb_parser *p;
+ struct filter_parser *fp;
+
+ p = flb_parser_get(parser, config);
+ if (!p) {
+ return -1;
+ }
+
+ fp = flb_malloc(sizeof(struct filter_parser));
+ if (!fp) {
+ flb_errno();
+ return -1;
+ }
+
+ fp->parser = p;
+ mk_list_add(&fp->_head, &ctx->parsers);
+ return 0;
+}
+
+static int delete_parsers(struct filter_parser_ctx *ctx)
+{
+ int c = 0;
+ struct mk_list *tmp;
+ struct mk_list *head;
+ struct filter_parser *fp;
+
+ mk_list_foreach_safe(head, tmp, &ctx->parsers) {
+ fp = mk_list_entry(head, struct filter_parser, _head);
+ mk_list_del(&fp->_head);
+ flb_free(fp);
+ c++;
+ }
+
+ return c;
+}
+
+static int configure(struct filter_parser_ctx *ctx,
+ struct flb_filter_instance *f_ins,
+ struct flb_config *config)
+{
+ int ret;
+ struct mk_list *head;
+ struct flb_kv *kv;
+
+ ctx->key_name = NULL;
+ ctx->reserve_data = FLB_FALSE;
+ ctx->preserve_key = FLB_FALSE;
+ mk_list_init(&ctx->parsers);
+
+ if (flb_filter_config_map_set(f_ins, ctx) < 0) {
+ flb_errno();
+ flb_plg_error(f_ins, "configuration error");
+ return -1;
+ }
+
+ if (ctx->key_name == NULL) {
+ flb_plg_error(ctx->ins, "missing 'key_name'");
+ return -1;
+ }
+ ctx->key_name_len = flb_sds_len(ctx->key_name);
+
+ /* Read all Parsers */
+ mk_list_foreach(head, &f_ins->properties) {
+ kv = mk_list_entry(head, struct flb_kv, _head);
+ if (strcasecmp("parser", kv->key) != 0) {
+ continue;
+ }
+ ret = add_parser(kv->val, ctx, config);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "requested parser '%s' not found", kv->val);
+ }
+ }
+
+ if (mk_list_size(&ctx->parsers) == 0) {
+ flb_plg_error(ctx->ins, "Invalid 'parser'");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int cb_parser_init(struct flb_filter_instance *f_ins,
+ struct flb_config *config,
+ void *data)
+{
+ (void) f_ins;
+ (void) config;
+ (void) data;
+
+ struct filter_parser_ctx *ctx = NULL;
+
+ /* Create context */
+ ctx = flb_malloc(sizeof(struct filter_parser_ctx));
+ if (!ctx) {
+ flb_errno();
+ return -1;
+ }
+ ctx->ins = f_ins;
+
+ if ( configure(ctx, f_ins, config) < 0 ){
+ flb_free(ctx);
+ return -1;
+ }
+
+ flb_filter_set_context(f_ins, ctx);
+
+ return 0;
+}
+
+static int cb_parser_filter(const void *data, size_t bytes,
+ const char *tag, int tag_len,
+ void **ret_buf, size_t *ret_bytes,
+ struct flb_filter_instance *f_ins,
+ struct flb_input_instance *i_ins,
+ void *context,
+ struct flb_config *config)
+{
+ int continue_parsing;
+ struct filter_parser_ctx *ctx = context;
+ struct flb_time tm;
+ msgpack_object *obj;
+
+ msgpack_object_kv *kv;
+ int i;
+ int ret = FLB_FILTER_NOTOUCH;
+ int parse_ret = -1;
+ int map_num;
+ const char *key_str;
+ int key_len;
+ const char *val_str;
+ int val_len;
+ char *out_buf;
+ size_t out_size;
+ struct flb_time parsed_time;
+
+ msgpack_object_kv **append_arr = NULL;
+ size_t append_arr_len = 0;
+ int append_arr_i;
+ struct mk_list *head;
+ struct filter_parser *fp;
+ struct flb_log_event_encoder log_encoder;
+ struct flb_log_event_decoder log_decoder;
+ struct flb_log_event log_event;
+ int encoder_result;
+
+ (void) f_ins;
+ (void) i_ins;
+ (void) config;
+
+ ret = flb_log_event_decoder_init(&log_decoder, (char *) data, bytes);
+
+ if (ret != FLB_EVENT_DECODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event decoder initialization error : %d", ret);
+
+ return FLB_FILTER_NOTOUCH;
+ }
+
+ ret = flb_log_event_encoder_init(&log_encoder,
+ FLB_LOG_EVENT_FORMAT_DEFAULT);
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event encoder initialization error : %d", ret);
+
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ return FLB_FILTER_NOTOUCH;
+ }
+
+ while ((ret = flb_log_event_decoder_next(
+ &log_decoder,
+ &log_event)) == FLB_EVENT_DECODER_SUCCESS) {
+ out_buf = NULL;
+ append_arr_i = 0;
+
+ flb_time_copy(&tm, &log_event.timestamp);
+ obj = log_event.body;
+
+ if (obj->type == MSGPACK_OBJECT_MAP) {
+ map_num = obj->via.map.size;
+ if (ctx->reserve_data) {
+ append_arr_len = obj->via.map.size;
+ append_arr = flb_calloc(append_arr_len, sizeof(msgpack_object_kv *));
+
+ if (append_arr == NULL) {
+ flb_errno();
+
+ flb_log_event_decoder_destroy(&log_decoder);
+ flb_log_event_encoder_destroy(&log_encoder);
+
+ return FLB_FILTER_NOTOUCH;
+ }
+ }
+
+ continue_parsing = FLB_TRUE;
+ for (i = 0; i < map_num && continue_parsing; i++) {
+ kv = &obj->via.map.ptr[i];
+ if (ctx->reserve_data) {
+ append_arr[append_arr_i] = kv;
+ append_arr_i++;
+ }
+ if ( msgpackobj2char(&kv->key, &key_str, &key_len) < 0 ) {
+ /* key is not string */
+ continue;
+ }
+ if (key_len == ctx->key_name_len &&
+ !strncmp(key_str, ctx->key_name, key_len)) {
+ if ( msgpackobj2char(&kv->val, &val_str, &val_len) < 0 ) {
+ /* val is not string */
+ continue;
+ }
+
+ /* Lookup parser */
+ mk_list_foreach(head, &ctx->parsers) {
+ fp = mk_list_entry(head, struct filter_parser, _head);
+
+ /* Reset time */
+ flb_time_zero(&parsed_time);
+
+ parse_ret = flb_parser_do(fp->parser, val_str, val_len,
+ (void **) &out_buf, &out_size,
+ &parsed_time);
+ if (parse_ret >= 0) {
+ /*
+ * If the parser succeeded we need to check the
+ * status of the parsed time. If the time was
+ * parsed successfully 'parsed_time' will be
+ * different than zero, if so, override the time
+ * holder with the new value, otherwise keep the
+ * original.
+ */
+ if (flb_time_to_nanosec(&parsed_time) != 0L) {
+ flb_time_copy(&tm, &parsed_time);
+ }
+
+ if (ctx->reserve_data) {
+ if (!ctx->preserve_key) {
+ append_arr_i--;
+ append_arr_len--;
+ append_arr[append_arr_i] = NULL;
+ }
+ }
+ else {
+ continue_parsing = FLB_FALSE;
+ }
+ break;
+ }
+ }
+ }
+ }
+
+ encoder_result = flb_log_event_encoder_begin_record(&log_encoder);
+
+ if (encoder_result == FLB_EVENT_ENCODER_SUCCESS) {
+ encoder_result = flb_log_event_encoder_set_timestamp(
+ &log_encoder, &tm);
+ }
+
+ if (encoder_result == FLB_EVENT_ENCODER_SUCCESS) {
+ encoder_result = \
+ flb_log_event_encoder_set_metadata_from_msgpack_object(
+ &log_encoder, log_event.metadata);
+ }
+
+ if (out_buf != NULL) {
+ if (ctx->reserve_data) {
+ char *new_buf = NULL;
+ int new_size;
+ int ret;
+ ret = flb_msgpack_expand_map(out_buf, out_size,
+ append_arr, append_arr_len,
+ &new_buf, &new_size);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "cannot expand map");
+
+ flb_log_event_decoder_destroy(&log_decoder);
+ flb_log_event_encoder_destroy(&log_encoder);
+ flb_free(append_arr);
+
+ return FLB_FILTER_NOTOUCH;
+ }
+
+ flb_free(out_buf);
+ out_buf = new_buf;
+ out_size = new_size;
+ }
+
+ if (encoder_result == FLB_EVENT_ENCODER_SUCCESS) {
+ encoder_result = \
+ flb_log_event_encoder_set_body_from_raw_msgpack(
+ &log_encoder, out_buf, out_size);
+ }
+
+ flb_free(out_buf);
+ ret = FLB_FILTER_MODIFIED;
+ }
+ else {
+ /* re-use original data*/
+ if (encoder_result == FLB_EVENT_ENCODER_SUCCESS) {
+ encoder_result = \
+ flb_log_event_encoder_set_body_from_msgpack_object(
+ &log_encoder, log_event.body);
+ }
+ }
+
+ if (encoder_result == FLB_EVENT_ENCODER_SUCCESS) {
+ encoder_result = flb_log_event_encoder_commit_record(&log_encoder);
+ }
+
+ flb_free(append_arr);
+ append_arr = NULL;
+ }
+ else {
+ continue;
+ }
+ }
+
+ if (log_encoder.output_length > 0) {
+ *ret_buf = log_encoder.output_buffer;
+ *ret_bytes = log_encoder.output_length;
+
+ ret = FLB_FILTER_MODIFIED;
+
+ flb_log_event_encoder_claim_internal_buffer_ownership(&log_encoder);
+ }
+ else {
+ flb_plg_error(ctx->ins,
+ "Log event encoder error : %d", ret);
+
+ ret = FLB_FILTER_NOTOUCH;
+ }
+
+ flb_log_event_decoder_destroy(&log_decoder);
+ flb_log_event_encoder_destroy(&log_encoder);
+
+ return ret;
+}
+
+
+static int cb_parser_exit(void *data, struct flb_config *config)
+{
+ struct filter_parser_ctx *ctx = data;
+
+ if (!ctx) {
+ return 0;
+ }
+
+ delete_parsers(ctx);
+ flb_free(ctx);
+ return 0;
+}
+
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_STR, "Key_Name", NULL,
+ 0, FLB_TRUE, offsetof(struct filter_parser_ctx, key_name),
+ "Specify field name in record to parse."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "Parser", NULL,
+ FLB_CONFIG_MAP_MULT, FLB_FALSE, 0,
+ "Specify the parser name to interpret the field. "
+ "Multiple Parser entries are allowed (one per line)."
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "Preserve_Key", "false",
+ 0, FLB_TRUE, offsetof(struct filter_parser_ctx, preserve_key),
+ "Keep original Key_Name field in the parsed result. If false, the field will be removed."
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "Reserve_Data", "false",
+ 0, FLB_TRUE, offsetof(struct filter_parser_ctx, reserve_data),
+ "Keep all other original fields in the parsed result. "
+ "If false, all other original fields will be removed."
+ },
+ {
+ FLB_CONFIG_MAP_DEPRECATED, "Unescape_key", NULL,
+ 0, FLB_FALSE, 0,
+ "(deprecated)"
+ },
+ {0}
+};
+
+struct flb_filter_plugin filter_parser_plugin = {
+ .name = "parser",
+ .description = "Parse events",
+ .cb_init = cb_parser_init,
+ .cb_filter = cb_parser_filter,
+ .cb_exit = cb_parser_exit,
+ .config_map = config_map,
+ .flags = 0
+};
diff --git a/src/fluent-bit/plugins/filter_parser/filter_parser.h b/src/fluent-bit/plugins/filter_parser/filter_parser.h
new file mode 100644
index 000000000..72749310e
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_parser/filter_parser.h
@@ -0,0 +1,42 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_FILTER_PARSER_H
+#define FLB_FILTER_PARSER_H
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_filter.h>
+#include <fluent-bit/flb_parser.h>
+#include <fluent-bit/flb_sds.h>
+
+struct filter_parser {
+ struct flb_parser *parser;
+ struct mk_list _head;
+};
+
+struct filter_parser_ctx {
+ flb_sds_t key_name;
+ int key_name_len;
+ int reserve_data;
+ int preserve_key;
+ struct mk_list parsers;
+ struct flb_filter_instance *ins;
+};
+
+#endif /* FLB_FILTER_PARSER_H */
diff --git a/src/fluent-bit/plugins/filter_record_modifier/CMakeLists.txt b/src/fluent-bit/plugins/filter_record_modifier/CMakeLists.txt
new file mode 100644
index 000000000..d971fa203
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_record_modifier/CMakeLists.txt
@@ -0,0 +1,4 @@
+set(src
+ filter_modifier.c)
+
+FLB_PLUGIN(filter_record_modifier "${src}" "")
diff --git a/src/fluent-bit/plugins/filter_record_modifier/filter_modifier.c b/src/fluent-bit/plugins/filter_record_modifier/filter_modifier.c
new file mode 100644
index 000000000..d95066a19
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_record_modifier/filter_modifier.c
@@ -0,0 +1,531 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_filter.h>
+#include <fluent-bit/flb_filter_plugin.h>
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_str.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_mem.h>
+#include <fluent-bit/flb_kv.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_log_event_decoder.h>
+#include <fluent-bit/flb_log_event_encoder.h>
+
+#include <msgpack.h>
+#include "filter_modifier.h"
+
+#define PLUGIN_NAME "filter_record_modifier"
+
+static int config_allowlist_key(struct record_modifier_ctx *ctx,
+ struct mk_list *list)
+{
+ struct modifier_key *mod_key = NULL;
+ struct mk_list *head = NULL;
+ struct flb_config_map_val *mv = NULL;
+
+ if (ctx == NULL || list == NULL) {
+ return -1;
+ }
+
+ flb_config_map_foreach(head, mv, list) {
+ mod_key = flb_malloc(sizeof(struct modifier_key));
+ if (!mod_key) {
+ flb_errno();
+ continue;
+ }
+ mod_key->key = mv->val.str;
+ mod_key->key_len = flb_sds_len(mv->val.str);
+ if (mod_key->key[mod_key->key_len - 1] == '*') {
+ mod_key->dynamic_key = FLB_TRUE;
+ mod_key->key_len--;
+ }
+ else {
+ mod_key->dynamic_key = FLB_FALSE;
+ }
+ mk_list_add(&mod_key->_head, &ctx->allowlist_keys);
+ ctx->allowlist_keys_num++;
+ }
+ return 0;
+}
+
+static int configure(struct record_modifier_ctx *ctx,
+ struct flb_filter_instance *f_ins)
+{
+ struct mk_list *head = NULL;
+ struct modifier_key *mod_key;
+ struct modifier_record *mod_record;
+ struct flb_config_map_val *mv;
+ struct flb_slist_entry *sentry = NULL;
+
+ ctx->records_num = 0;
+ ctx->remove_keys_num = 0;
+ ctx->allowlist_keys_num = 0;
+
+ if (flb_filter_config_map_set(f_ins, ctx) < 0) {
+ flb_errno();
+ flb_plg_error(f_ins, "configuration error");
+ return -1;
+ }
+
+ /* Check 'Record' properties */
+ flb_config_map_foreach(head, mv, ctx->records_map) {
+ mod_record = flb_malloc(sizeof(struct modifier_record));
+ if (!mod_record) {
+ flb_errno();
+ continue;
+ }
+
+ if (mk_list_size(mv->val.list) != 2) {
+ flb_plg_error(ctx->ins, "invalid record parameters, "
+ "expects 'KEY VALUE'");
+ flb_free(mod_record);
+ continue;
+ }
+ /* Get first value (field) */
+ sentry = mk_list_entry_first(mv->val.list, struct flb_slist_entry, _head);
+ mod_record->key_len = flb_sds_len(sentry->str);
+ mod_record->key = flb_strndup(sentry->str, mod_record->key_len);
+ if (mod_record->key == NULL) {
+ flb_errno();
+ flb_free(mod_record);
+ continue;
+ }
+
+ sentry = mk_list_entry_last(mv->val.list, struct flb_slist_entry, _head);
+ mod_record->val_len = flb_sds_len(sentry->str);
+ mod_record->val = flb_strndup(sentry->str, mod_record->val_len);
+ if (mod_record->val == NULL) {
+ flb_errno();
+ flb_free(mod_record->key);
+ flb_free(mod_record);
+ continue;
+ }
+
+ mk_list_add(&mod_record->_head, &ctx->records);
+ ctx->records_num++;
+ }
+ /* Check "Remove_Key" properties */
+ flb_config_map_foreach(head, mv, ctx->remove_keys_map) {
+ mod_key = flb_malloc(sizeof(struct modifier_key));
+ if (!mod_key) {
+ flb_errno();
+ continue;
+ }
+ mod_key->key = mv->val.str;
+ mod_key->key_len = flb_sds_len(mv->val.str);
+ if (mod_key->key[mod_key->key_len - 1] == '*') {
+ mod_key->dynamic_key = FLB_TRUE;
+ mod_key->key_len--;
+ }
+ else {
+ mod_key->dynamic_key = FLB_FALSE;
+ }
+ mk_list_add(&mod_key->_head, &ctx->remove_keys);
+ ctx->remove_keys_num++;
+ }
+
+ /* Check "Allowlist_key" and "Whitelist_key" properties */
+ config_allowlist_key(ctx, ctx->allowlist_keys_map);
+ config_allowlist_key(ctx, ctx->whitelist_keys_map);
+
+ if (ctx->remove_keys_num > 0 && ctx->allowlist_keys_num > 0) {
+ flb_plg_error(ctx->ins, "remove_keys and allowlist_keys are exclusive "
+ "with each other.");
+ return -1;
+ }
+ return 0;
+}
+
+static int delete_list(struct record_modifier_ctx *ctx)
+{
+ struct mk_list *tmp;
+ struct mk_list *head;
+ struct modifier_key *key;
+ struct modifier_record *record;
+
+ mk_list_foreach_safe(head, tmp, &ctx->remove_keys) {
+ key = mk_list_entry(head, struct modifier_key, _head);
+ mk_list_del(&key->_head);
+ flb_free(key);
+ }
+ mk_list_foreach_safe(head, tmp, &ctx->allowlist_keys) {
+ key = mk_list_entry(head, struct modifier_key, _head);
+ mk_list_del(&key->_head);
+ flb_free(key);
+ }
+ mk_list_foreach_safe(head, tmp, &ctx->records) {
+ record = mk_list_entry(head, struct modifier_record, _head);
+ flb_free(record->key);
+ flb_free(record->val);
+ mk_list_del(&record->_head);
+ flb_free(record);
+ }
+ return 0;
+}
+
+
+static int cb_modifier_init(struct flb_filter_instance *f_ins,
+ struct flb_config *config,
+ void *data)
+{
+ struct record_modifier_ctx *ctx = NULL;
+
+ /* Create context */
+ ctx = flb_calloc(1, sizeof(struct record_modifier_ctx));
+ if (!ctx) {
+ flb_errno();
+ return -1;
+ }
+ mk_list_init(&ctx->records);
+ mk_list_init(&ctx->remove_keys);
+ mk_list_init(&ctx->allowlist_keys);
+ ctx->ins = f_ins;
+
+ if ( configure(ctx, f_ins) < 0 ){
+ delete_list(ctx);
+ flb_free(ctx);
+ return -1;
+ }
+
+ flb_filter_set_context(f_ins, ctx);
+
+ return 0;
+}
+
+static int make_bool_map(struct record_modifier_ctx *ctx, msgpack_object *map,
+ bool_map_t *bool_map, int map_num)
+{
+ struct mk_list *tmp;
+ struct mk_list *head;
+ struct mk_list *check = NULL;
+ msgpack_object_kv *kv;
+ struct modifier_key *mod_key;
+
+ char result;
+ char is_to_delete;
+ msgpack_object *key;
+ int ret = map_num;
+ int i;
+
+ for (i=0; i<map_num; i++) {
+ bool_map[i] = TO_BE_REMAINED;
+ }
+ bool_map[map_num] = TAIL_OF_ARRAY;/* tail of map */
+
+ if (ctx->remove_keys_num > 0) {
+ check = &(ctx->remove_keys);
+ is_to_delete = FLB_TRUE;
+ }
+ else if(ctx->allowlist_keys_num > 0) {
+ check = &(ctx->allowlist_keys);
+ is_to_delete = FLB_FALSE;
+ }
+
+ if (check != NULL){
+ kv = map->via.map.ptr;
+ for(i=0; i<map_num; i++){
+ key = &(kv+i)->key;
+ result = FLB_FALSE;
+
+ mk_list_foreach_safe(head, tmp, check) {
+ mod_key = mk_list_entry(head, struct modifier_key, _head);
+ if (key->via.bin.size != mod_key->key_len &&
+ key->via.str.size != mod_key->key_len &&
+ mod_key->dynamic_key == FLB_FALSE) {
+ continue;
+ }
+ if (key->via.bin.size < mod_key->key_len &&
+ key->via.str.size < mod_key->key_len &&
+ mod_key->dynamic_key == FLB_TRUE) {
+ continue;
+ }
+ if ((key->type == MSGPACK_OBJECT_BIN &&
+ !strncasecmp(key->via.bin.ptr, mod_key->key,
+ mod_key->key_len)) ||
+ (key->type == MSGPACK_OBJECT_STR &&
+ !strncasecmp(key->via.str.ptr, mod_key->key,
+ mod_key->key_len))
+ ) {
+ result = FLB_TRUE;
+ break;
+ }
+ }
+ if (result == is_to_delete) {
+ bool_map[i] = TO_BE_REMOVED;
+ ret--;
+ }
+ }
+ }
+
+ return ret;
+}
+
+static int create_uuid(struct record_modifier_ctx *ctx, char *uuid)
+{
+ int ret;
+
+ if (uuid == NULL) {
+ return -1;
+ }
+
+ ret = flb_utils_uuid_v4_gen(uuid);
+ if (ret < 0) {
+ flb_plg_error(ctx->ins, "failed to append uuid");
+ return -1;
+ }
+ return 0;
+}
+
+#define BOOL_MAP_LIMIT 65535
+static int cb_modifier_filter(const void *data, size_t bytes,
+ const char *tag, int tag_len,
+ void **out_buf, size_t *out_size,
+ struct flb_filter_instance *f_ins,
+ struct flb_input_instance *i_ins,
+ void *context,
+ struct flb_config *config)
+{
+ struct record_modifier_ctx *ctx = context;
+ char is_modified = FLB_FALSE;
+ int i;
+ int removed_map_num = 0;
+ int map_num = 0;
+ int ret;
+ char uuid[40] = {0};
+ size_t uuid_len = 0;
+ bool_map_t *bool_map = NULL;
+ struct flb_time tm;
+ struct modifier_record *mod_rec;
+ msgpack_object *obj;
+ msgpack_object_kv *kv;
+ struct mk_list *tmp;
+ struct mk_list *head;
+ struct flb_log_event_encoder log_encoder;
+ struct flb_log_event_decoder log_decoder;
+ struct flb_log_event log_event;
+
+ (void) f_ins;
+ (void) i_ins;
+ (void) config;
+
+ ret = flb_log_event_decoder_init(&log_decoder, (char *) data, bytes);
+
+ if (ret != FLB_EVENT_DECODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event decoder initialization error : %d", ret);
+
+ return FLB_FILTER_NOTOUCH;
+ }
+
+ ret = flb_log_event_encoder_init(&log_encoder,
+ FLB_LOG_EVENT_FORMAT_DEFAULT);
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event encoder initialization error : %d", ret);
+
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ return FLB_FILTER_NOTOUCH;
+ }
+
+ /* Iterate each item to know map number */
+ while ((ret = flb_log_event_decoder_next(
+ &log_decoder,
+ &log_event)) == FLB_EVENT_DECODER_SUCCESS) {
+ map_num = 0;
+ removed_map_num = 0;
+ uuid_len = 0;
+
+ if (bool_map != NULL) {
+ flb_free(bool_map);
+ bool_map = NULL;
+ }
+
+ flb_time_copy(&tm, &log_event.timestamp);
+ obj = log_event.body;
+
+ /* grep keys */
+ if (obj->type == MSGPACK_OBJECT_MAP) {
+ map_num = obj->via.map.size;
+ if (map_num > BOOL_MAP_LIMIT) {
+ flb_plg_error(ctx->ins, "The number of elements exceeds limit %d",
+ BOOL_MAP_LIMIT);
+ return -1;
+ }
+ /* allocate map_num + guard byte */
+ bool_map = flb_calloc(map_num+1, sizeof(bool_map_t));
+ if (bool_map == NULL) {
+ flb_errno();
+ return -1;
+ }
+ removed_map_num = make_bool_map(ctx, obj,
+ bool_map, obj->via.map.size);
+ }
+ else {
+ continue;
+ }
+
+ if (removed_map_num != map_num) {
+ is_modified = FLB_TRUE;
+ }
+
+ removed_map_num += ctx->records_num;
+ if (ctx->uuid_key) {
+ memset(&uuid[0], 0, sizeof(uuid));
+ ret = create_uuid(ctx, &uuid[0]);
+ if (ret == 0) {
+ removed_map_num++;
+ uuid_len = strlen(&uuid[0]);
+ }
+ }
+ if (removed_map_num <= 0) {
+ continue;
+ }
+
+ ret = flb_log_event_encoder_begin_record(&log_encoder);
+
+ ret = flb_log_event_encoder_set_timestamp(&log_encoder, &tm);
+
+ ret = flb_log_event_encoder_set_metadata_from_msgpack_object(
+ &log_encoder, log_event.metadata);
+
+ kv = obj->via.map.ptr;
+ for(i=0;
+ bool_map[i] != TAIL_OF_ARRAY &&
+ ret == FLB_EVENT_ENCODER_SUCCESS;
+ i++) {
+ if (bool_map[i] == TO_BE_REMAINED) {
+ ret = flb_log_event_encoder_append_body_values(
+ &log_encoder,
+ FLB_LOG_EVENT_MSGPACK_OBJECT_VALUE(&kv[i].key),
+ FLB_LOG_EVENT_MSGPACK_OBJECT_VALUE(&kv[i].val));
+ }
+ }
+
+ flb_free(bool_map);
+ bool_map = NULL;
+
+ /* append record */
+ if (ctx->records_num > 0) {
+ is_modified = FLB_TRUE;
+
+ mk_list_foreach_safe(head, tmp, &ctx->records) {
+ mod_rec = mk_list_entry(head, struct modifier_record, _head);
+
+ ret = flb_log_event_encoder_append_body_values(
+ &log_encoder,
+ FLB_LOG_EVENT_STRING_VALUE(mod_rec->key, mod_rec->key_len),
+ FLB_LOG_EVENT_STRING_VALUE(mod_rec->val, mod_rec->val_len));
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ break;
+ }
+ }
+ }
+
+ if (uuid_len > 0) {
+ is_modified = FLB_TRUE;
+
+ ret = flb_log_event_encoder_append_body_values(
+ &log_encoder,
+ FLB_LOG_EVENT_STRING_VALUE(ctx->uuid_key, flb_sds_len(ctx->uuid_key)),
+ FLB_LOG_EVENT_STRING_VALUE(&uuid[0], uuid_len));
+ }
+
+ flb_log_event_encoder_commit_record(&log_encoder);
+ }
+
+ if (bool_map != NULL) {
+ flb_free(bool_map);
+ }
+
+ if (is_modified &&
+ log_encoder.output_length > 0) {
+ *out_buf = log_encoder.output_buffer;
+ *out_size = log_encoder.output_length;
+
+ ret = FLB_FILTER_MODIFIED;
+
+ flb_log_event_encoder_claim_internal_buffer_ownership(&log_encoder);
+ }
+ else {
+ ret = FLB_FILTER_NOTOUCH;
+ }
+
+ flb_log_event_decoder_destroy(&log_decoder);
+ flb_log_event_encoder_destroy(&log_encoder);
+
+ return ret;
+}
+
+static int cb_modifier_exit(void *data, struct flb_config *config)
+{
+ struct record_modifier_ctx *ctx = data;
+
+ if (ctx != NULL) {
+ delete_list(ctx);
+ flb_free(ctx);
+ }
+ return 0;
+}
+
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_SLIST_2, "record", NULL,
+ FLB_CONFIG_MAP_MULT, FLB_TRUE, offsetof(struct record_modifier_ctx, records_map),
+ "Append fields. This parameter needs key and value pair."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "remove_key", NULL,
+ FLB_CONFIG_MAP_MULT, FLB_TRUE, offsetof(struct record_modifier_ctx, remove_keys_map),
+ "If the key is matched, that field is removed."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "allowlist_key", NULL,
+ FLB_CONFIG_MAP_MULT, FLB_TRUE, offsetof(struct record_modifier_ctx, allowlist_keys_map),
+ "If the key is not matched, that field is removed."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "whitelist_key", NULL,
+ FLB_CONFIG_MAP_MULT, FLB_TRUE, offsetof(struct record_modifier_ctx, whitelist_keys_map),
+ "(Alias of allowlist_key)"
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "uuid_key", NULL,
+ 0, FLB_TRUE, offsetof(struct record_modifier_ctx, uuid_key),
+ "If set, the plugin generates uuid per record."
+ },
+
+ {0}
+};
+
+struct flb_filter_plugin filter_record_modifier_plugin = {
+ .name = "record_modifier",
+ .description = "modify record",
+ .cb_init = cb_modifier_init,
+ .cb_filter = cb_modifier_filter,
+ .cb_exit = cb_modifier_exit,
+ .config_map = config_map,
+ .flags = 0
+};
diff --git a/src/fluent-bit/plugins/filter_record_modifier/filter_modifier.h b/src/fluent-bit/plugins/filter_record_modifier/filter_modifier.h
new file mode 100644
index 000000000..b9d0818ef
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_record_modifier/filter_modifier.h
@@ -0,0 +1,68 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_FILTER_RECORD_MODIFIER_H
+#define FLB_FILTER_RECORD_MODIFIER_H
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_sds.h>
+#include <fluent-bit/flb_filter.h>
+
+struct modifier_record {
+ char *key;
+ char *val;
+ int key_len;
+ int val_len;
+ struct mk_list _head;
+};
+
+struct modifier_key {
+ char *key;
+ int key_len;
+ int dynamic_key;
+ struct mk_list _head;
+};
+
+struct record_modifier_ctx {
+ int records_num;
+ int remove_keys_num;
+ int allowlist_keys_num;
+
+ flb_sds_t uuid_key;
+
+ /* config map */
+ struct mk_list *records_map;
+ struct mk_list *remove_keys_map;
+ struct mk_list *allowlist_keys_map;
+ struct mk_list *whitelist_keys_map;
+
+ struct mk_list records;
+ struct mk_list remove_keys;
+ struct mk_list allowlist_keys;
+ struct flb_filter_instance *ins;
+};
+
+typedef enum {
+ TO_BE_REMOVED = 0,
+ TO_BE_REMAINED = 1,
+ TAIL_OF_ARRAY = 2
+} bool_map_t;
+
+
+#endif /* FLB_FILTER_RECORD_MODIFIER_H */
diff --git a/src/fluent-bit/plugins/filter_rewrite_tag/CMakeLists.txt b/src/fluent-bit/plugins/filter_rewrite_tag/CMakeLists.txt
new file mode 100644
index 000000000..7b98aa218
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_rewrite_tag/CMakeLists.txt
@@ -0,0 +1,4 @@
+set(src
+ rewrite_tag.c)
+
+FLB_PLUGIN(filter_rewrite_tag "${src}" "")
diff --git a/src/fluent-bit/plugins/filter_rewrite_tag/rewrite_tag.c b/src/fluent-bit/plugins/filter_rewrite_tag/rewrite_tag.c
new file mode 100644
index 000000000..5969d1582
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_rewrite_tag/rewrite_tag.c
@@ -0,0 +1,621 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_input.h>
+#include <fluent-bit/flb_plugin.h>
+#include <fluent-bit/flb_processor.h>
+#include <fluent-bit/flb_filter_plugin.h>
+#include <fluent-bit/flb_metrics.h>
+#include <fluent-bit/flb_storage.h>
+#include <fluent-bit/flb_regex.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_record_accessor.h>
+#include <fluent-bit/flb_log_event_decoder.h>
+#include <fluent-bit/flb_log_event_encoder.h>
+#include <msgpack.h>
+
+#include "rewrite_tag.h"
+
+/* Create an emitter input instance */
+static int emitter_create(struct flb_rewrite_tag *ctx)
+{
+ int ret;
+ struct flb_input_instance *ins;
+
+ ret = flb_input_name_exists(ctx->emitter_name, ctx->config);
+ if (ret == FLB_TRUE) {
+ flb_plg_error(ctx->ins, "emitter_name '%s' already exists",
+ ctx->emitter_name);
+ return -1;
+ }
+
+ ins = flb_input_new(ctx->config, "emitter", NULL, FLB_FALSE);
+ if (!ins) {
+ flb_plg_error(ctx->ins, "cannot create emitter instance");
+ return -1;
+ }
+
+ /* Set the alias name */
+ ret = flb_input_set_property(ins, "alias", ctx->emitter_name);
+ if (ret == -1) {
+ flb_plg_warn(ctx->ins,
+ "cannot set emitter_name, using fallback name '%s'",
+ ins->name);
+ }
+
+ /* Set the emitter_mem_buf_limit */
+ if(ctx->emitter_mem_buf_limit > 0) {
+ ins->mem_buf_limit = ctx->emitter_mem_buf_limit;
+ }
+
+ /* Set the storage type */
+ ret = flb_input_set_property(ins, "storage.type",
+ ctx->emitter_storage_type);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "cannot set storage.type");
+ }
+
+ /* Initialize emitter plugin */
+ ret = flb_input_instance_init(ins, ctx->config);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "cannot initialize emitter instance '%s'",
+ ins->name);
+ flb_input_instance_exit(ins, ctx->config);
+ flb_input_instance_destroy(ins);
+ return -1;
+ }
+
+#ifdef FLB_HAVE_METRICS
+ /* Override Metrics title */
+ ret = flb_metrics_title(ctx->emitter_name, ins->metrics);
+ if (ret == -1) {
+ flb_plg_warn(ctx->ins, "cannot set metrics title, using fallback name %s",
+ ins->name);
+ }
+#endif
+
+ /* Storage context */
+ ret = flb_storage_input_create(ctx->config->cio, ins);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "cannot initialize storage for stream '%s'",
+ ctx->emitter_name);
+ flb_input_instance_exit(ins, ctx->config);
+ flb_input_instance_destroy(ins);
+ return -1;
+ }
+ ctx->ins_emitter = ins;
+ return 0;
+}
+
+/*
+ * Validate and prepare internal contexts based on the received
+ * config_map values.
+ */
+static int process_config(struct flb_rewrite_tag *ctx)
+{
+ struct mk_list *head;
+ struct flb_slist_entry *entry;
+ struct rewrite_rule *rule;
+ struct flb_config_map_val *val;
+
+ if (!ctx->cm_rules) {
+ return -1;
+ }
+
+ mk_list_foreach(head, ctx->cm_rules) {
+ /*
+ * When multiple entries are allowed in a config map, this becomes
+ * a list of struct flb_config_map_val. Every entry is linked in the
+ * 'mult' field
+ */
+ val = mk_list_entry(head, struct flb_config_map_val, _head);
+
+ /* Allocate a rule */
+ rule = flb_malloc(sizeof(struct rewrite_rule));
+ if (!rule) {
+ flb_errno();
+ return -1;
+ }
+
+ /* key */
+ entry = flb_slist_entry_get(val->val.list, 0);
+ if (entry == NULL) {
+ flb_plg_error(ctx->ins, "failed to get entry");
+ flb_free(rule);
+ return -1;
+ }
+ rule->ra_key = flb_ra_create(entry->str, FLB_FALSE);
+ if (!rule->ra_key) {
+ flb_plg_error(ctx->ins, "invalid record accessor key ? '%s'",
+ entry->str);
+ flb_free(rule);
+ return -1;
+ }
+
+ /* regex */
+ entry = flb_slist_entry_get(val->val.list, 1);
+ rule->regex = flb_regex_create(entry->str);
+ if (!rule->regex) {
+ flb_plg_error(ctx->ins, "could not compile regex pattern '%s'",
+ entry->str);
+ flb_ra_destroy(rule->ra_key);
+ flb_free(rule);
+ return -1;
+ }
+
+ /* tag */
+ entry = flb_slist_entry_get(val->val.list, 2);
+ rule->ra_tag = flb_ra_create(entry->str, FLB_FALSE);
+
+ if (!rule->ra_tag) {
+ flb_plg_error(ctx->ins, "could not compose tag: %s", entry->str);
+ flb_ra_destroy(rule->ra_key);
+ flb_regex_destroy(rule->regex);
+ flb_free(rule);
+ return -1;
+ }
+
+ /* keep record ? */
+ entry = flb_slist_entry_get(val->val.list, 3);
+ rule->keep_record = flb_utils_bool(entry->str);
+
+ /* Link new rule */
+ mk_list_add(&rule->_head, &ctx->rules);
+ }
+
+ if (mk_list_size(&ctx->rules) == 0) {
+ flb_plg_warn(ctx->ins, "no rules have defined");
+ return 0;
+ }
+
+ return 0;
+}
+
+static int is_wildcard(char* match)
+{
+ size_t len;
+ size_t i;
+
+ if (match == NULL) {
+ return 0;
+ }
+ len = strlen(match);
+
+ /* '***' should be ignored. So we check every char. */
+ for (i=0; i<len; i++) {
+ if (match[i] != '*') {
+ return 0;
+ }
+ }
+ return 1;
+}
+
+static int cb_rewrite_tag_init(struct flb_filter_instance *ins,
+ struct flb_config *config,
+ void *data)
+{
+ int ret;
+ flb_sds_t tmp;
+ flb_sds_t emitter_name = NULL;
+ struct flb_rewrite_tag *ctx;
+ (void) data;
+
+ /* Create context */
+ ctx = flb_calloc(1, sizeof(struct flb_rewrite_tag));
+ if (!ctx) {
+ flb_errno();
+ return -1;
+ }
+ if (is_wildcard(ins->match)) {
+ flb_plg_warn(ins, "'Match' may cause infinite loop.");
+ }
+ ctx->ins = ins;
+ ctx->config = config;
+ mk_list_init(&ctx->rules);
+
+ /*
+ * Emitter name: every rewrite_tag instance needs an emitter input plugin,
+ * with that one is able to emit records. We use a unique instance so we
+ * can use the metrics interface.
+ *
+ * If not set, we define an emitter name
+ *
+ * Validate if the emitter_name has been set before to check with the
+ * config map. If is not set, do a manual set of the property, so we let the
+ * config map handle the memory allocation.
+ */
+ tmp = (char *) flb_filter_get_property("emitter_name", ins);
+ if (!tmp) {
+ emitter_name = flb_sds_create_size(64);
+ if (!emitter_name) {
+ flb_free(ctx);
+ return -1;
+ }
+
+ tmp = flb_sds_printf(&emitter_name, "emitter_for_%s",
+ flb_filter_name(ins));
+ if (!tmp) {
+ flb_error("[filter rewrite_tag] cannot compose emitter_name");
+ flb_sds_destroy(emitter_name);
+ flb_free(ctx);
+ return -1;
+ }
+
+ flb_filter_set_property(ins, "emitter_name", emitter_name);
+ flb_sds_destroy(emitter_name);
+ }
+
+ /* Set config_map properties in our local context */
+ ret = flb_filter_config_map_set(ins, (void *) ctx);
+ if (ret == -1) {
+ flb_free(ctx);
+ return -1;
+ }
+
+ /*
+ * Emitter Storage Type: the emitter input plugin to be created by default
+ * uses memory buffer, this option allows to define a filesystem mechanism
+ * for new records created (only if the main service is also filesystem
+ * enabled).
+ *
+ * On this code we just validate the input type: 'memory' or 'filesystem'.
+ */
+ tmp = ctx->emitter_storage_type;
+ if (strcasecmp(tmp, "memory") != 0 && strcasecmp(tmp, "filesystem") != 0) {
+ flb_plg_error(ins, "invalid 'emitter_storage.type' value. Only "
+ "'memory' or 'filesystem' types are allowed");
+ flb_free(ctx);
+ return -1;
+ }
+
+ /* Set plugin context */
+ flb_filter_set_context(ins, ctx);
+
+ /* Process the configuration */
+ ret = process_config(ctx);
+ if (ret == -1) {
+ return -1;
+ }
+
+ /* Create the emitter context */
+ ret = emitter_create(ctx);
+ if (ret == -1) {
+ return -1;
+ }
+
+ /* Register a metric to count the number of emitted records */
+#ifdef FLB_HAVE_METRICS
+ ctx->cmt_emitted = cmt_counter_create(ins->cmt,
+ "fluentbit", "filter", "emit_records_total",
+ "Total number of emitted records",
+ 1, (char *[]) {"name"});
+
+ /* OLD api */
+ flb_metrics_add(FLB_RTAG_METRIC_EMITTED,
+ "emit_records", ctx->ins->metrics);
+#endif
+
+ return 0;
+}
+
+static int ingest_inline(struct flb_rewrite_tag *ctx,
+ flb_sds_t out_tag,
+ const void *buf, size_t buf_size)
+{
+ struct flb_input_instance *input_instance;
+ struct flb_processor_unit *processor_unit;
+ struct flb_processor *processor;
+ int result;
+
+ if (ctx->ins->parent_processor != NULL) {
+ processor_unit = (struct flb_processor_unit *) \
+ ctx->ins->parent_processor;
+ processor = (struct flb_processor *) processor_unit->parent;
+ input_instance = (struct flb_input_instance *) processor->data;
+
+ if (processor->source_plugin_type == FLB_PLUGIN_INPUT) {
+ result = flb_input_log_append_skip_processor_stages(
+ input_instance,
+ processor_unit->stage + 1,
+ out_tag, flb_sds_len(out_tag),
+ buf, buf_size);
+
+ if (result == 0) {
+ return FLB_TRUE;
+ }
+ }
+ }
+
+ return FLB_FALSE;
+}
+
+
+/*
+ * On given record, check if a rule applies or not to the map, if so, compose
+ * the new tag, emit the record and return FLB_TRUE, otherwise just return
+ * FLB_FALSE and the original record will remain.
+ */
+static int process_record(const char *tag, int tag_len, msgpack_object map,
+ const void *buf, size_t buf_size, int *keep,
+ struct flb_rewrite_tag *ctx, int *matched)
+{
+ int ret;
+ flb_sds_t out_tag;
+ struct mk_list *head;
+ struct rewrite_rule *rule = NULL;
+ struct flb_regex_search result = {0};
+
+ if (matched == NULL) {
+ return FLB_FALSE;
+ }
+ *matched = FLB_FALSE;
+
+ mk_list_foreach(head, &ctx->rules) {
+ rule = mk_list_entry(head, struct rewrite_rule, _head);
+ if (rule) {
+ *keep = rule->keep_record;
+ }
+ ret = flb_ra_regex_match(rule->ra_key, map, rule->regex, &result);
+ if (ret < 0) { /* no match */
+ rule = NULL;
+ continue;
+ }
+
+ /* A record matched, just break and check 'rule' */
+ break;
+ }
+
+ if (!rule) {
+ return FLB_FALSE;
+ }
+ *matched = FLB_TRUE;
+
+ /* Compose new tag */
+ out_tag = flb_ra_translate(rule->ra_tag, (char *) tag, tag_len, map, &result);
+
+ /* Release any capture info from 'results' */
+ flb_regex_results_release(&result);
+
+ /* Validate new outgoing tag */
+ if (!out_tag) {
+ return FLB_FALSE;
+ }
+
+ ret = ingest_inline(ctx, out_tag, buf, buf_size);
+
+ if (!ret) {
+ /* Emit record with new tag */
+ ret = in_emitter_add_record(out_tag, flb_sds_len(out_tag), buf, buf_size,
+ ctx->ins_emitter);
+ }
+ else {
+ ret = 0;
+ }
+
+ /* Release the tag */
+ flb_sds_destroy(out_tag);
+
+ if (ret == -1) {
+ return FLB_FALSE;
+ }
+
+ return FLB_TRUE;
+}
+
+static int cb_rewrite_tag_filter(const void *data, size_t bytes,
+ const char *tag, int tag_len,
+ void **out_buf, size_t *out_bytes,
+ struct flb_filter_instance *f_ins,
+ struct flb_input_instance *i_ins,
+ void *filter_context,
+ struct flb_config *config)
+{
+ int keep;
+ int emitted_num = 0;
+ int is_matched = FLB_FALSE;
+ int is_emitted = FLB_FALSE;
+ size_t pre = 0;
+ size_t off = 0;
+#ifdef FLB_HAVE_METRICS
+ uint64_t ts;
+ char *name;
+#endif
+ msgpack_object map;
+ struct flb_rewrite_tag *ctx;
+ struct flb_log_event_encoder log_encoder;
+ struct flb_log_event_decoder log_decoder;
+ struct flb_log_event log_event;
+ int ret;
+
+ (void) config;
+ (void) i_ins;
+
+ ctx = (struct flb_rewrite_tag *) filter_context;
+
+#ifdef FLB_HAVE_METRICS
+ ts = cfl_time_now();
+ name = (char *) flb_filter_name(f_ins);
+#endif
+
+ ret = flb_log_event_decoder_init(&log_decoder, (char *) data, bytes);
+
+ if (ret != FLB_EVENT_DECODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event decoder initialization error : %d", ret);
+
+ return FLB_FILTER_NOTOUCH;
+ }
+
+ ret = flb_log_event_encoder_init(&log_encoder,
+ FLB_LOG_EVENT_FORMAT_DEFAULT);
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event encoder initialization error : %d", ret);
+
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ return FLB_FILTER_NOTOUCH;
+ }
+
+ while ((ret = flb_log_event_decoder_next(
+ &log_decoder,
+ &log_event)) == FLB_EVENT_DECODER_SUCCESS) {
+ off = log_decoder.offset;
+ map = *log_event.body;
+ is_matched = FLB_FALSE;
+ /*
+ * Process the record according the defined rules. If it returns FLB_TRUE means
+ * the record was emitter with a different tag.
+ *
+ * If a record was emitted, the variable 'keep' will define if the record must
+ * be preserved or not.
+ */
+ is_emitted = process_record(tag, tag_len, map, (char *) data + pre, off - pre, &keep, ctx, &is_matched);
+ if (is_emitted == FLB_TRUE) {
+ /* A record with the new tag was emitted */
+ emitted_num++;
+ }
+
+ /*
+ * Here we decide if the original record must be preserved or not:
+ *
+ * - record with new tag was emitted and the rule says it must be preserved
+ * - record was not emitted
+ */
+ if (keep == FLB_TRUE || is_matched != FLB_TRUE) {
+ ret = flb_log_event_encoder_emit_raw_record(
+ &log_encoder,
+ log_decoder.record_base,
+ log_decoder.record_length);
+ }
+
+ /* Adjust previous offset */
+ pre = off;
+ }
+
+ if (emitted_num == 0) {
+ flb_log_event_decoder_destroy(&log_decoder);
+ flb_log_event_encoder_destroy(&log_encoder);
+
+ return FLB_FILTER_NOTOUCH;
+ }
+#ifdef FLB_HAVE_METRICS
+ else if (emitted_num > 0) {
+ cmt_counter_add(ctx->cmt_emitted, ts, emitted_num,
+ 1, (char *[]) {name});
+
+ /* OLD api */
+ flb_metrics_sum(FLB_RTAG_METRIC_EMITTED, emitted_num, ctx->ins->metrics);
+ }
+#endif
+
+ if (ret == FLB_EVENT_DECODER_ERROR_INSUFFICIENT_DATA &&
+ log_decoder.offset == bytes) {
+ ret = FLB_EVENT_ENCODER_SUCCESS;
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ *out_buf = log_encoder.output_buffer;
+ *out_bytes = log_encoder.output_length;
+
+ ret = FLB_FILTER_MODIFIED;
+
+ flb_log_event_encoder_claim_internal_buffer_ownership(&log_encoder);
+ }
+ else {
+ flb_plg_error(ctx->ins,
+ "Log event encoder error : %d", ret);
+
+ ret = FLB_FILTER_NOTOUCH;
+ }
+
+ flb_log_event_decoder_destroy(&log_decoder);
+ flb_log_event_encoder_destroy(&log_encoder);
+
+ return ret;
+}
+
+/* Destroy rules from context */
+static void destroy_rules(struct flb_rewrite_tag *ctx)
+{
+ struct mk_list *tmp;
+ struct mk_list *head;
+ struct rewrite_rule *rule;
+
+ mk_list_foreach_safe(head, tmp, &ctx->rules) {
+ rule = mk_list_entry(head, struct rewrite_rule, _head);
+ flb_regex_destroy(rule->regex);
+ flb_ra_destroy(rule->ra_key);
+ flb_ra_destroy(rule->ra_tag);
+ mk_list_del(&rule->_head);
+ flb_free(rule);
+ }
+}
+
+static int cb_rewrite_tag_exit(void *data, struct flb_config *config)
+{
+ struct flb_rewrite_tag *ctx = (struct flb_rewrite_tag *) data;
+
+ if (!ctx) {
+ return 0;
+ }
+
+ destroy_rules(ctx);
+ flb_free(ctx);
+
+ return 0;
+}
+
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_SLIST_4, "rule", NULL,
+ FLB_TRUE, FLB_TRUE, offsetof(struct flb_rewrite_tag, cm_rules),
+ NULL
+ },
+ {
+ FLB_CONFIG_MAP_STR, "emitter_name", NULL,
+ FLB_FALSE, FLB_TRUE, offsetof(struct flb_rewrite_tag, emitter_name),
+ NULL
+ },
+ {
+ FLB_CONFIG_MAP_STR, "emitter_storage.type", "memory",
+ FLB_FALSE, FLB_TRUE, offsetof(struct flb_rewrite_tag, emitter_storage_type),
+ NULL
+ },
+ {
+ FLB_CONFIG_MAP_SIZE, "emitter_mem_buf_limit", FLB_RTAG_MEM_BUF_LIMIT_DEFAULT,
+ FLB_FALSE, FLB_TRUE, offsetof(struct flb_rewrite_tag, emitter_mem_buf_limit),
+ "set a memory buffer limit to restrict memory usage of emitter"
+ },
+ /* EOF */
+ {0}
+};
+
+struct flb_filter_plugin filter_rewrite_tag_plugin = {
+ .name = "rewrite_tag",
+ .description = "Rewrite records tags",
+ .cb_init = cb_rewrite_tag_init,
+ .cb_filter = cb_rewrite_tag_filter,
+ .cb_exit = cb_rewrite_tag_exit,
+ .config_map = config_map,
+ .flags = 0
+};
diff --git a/src/fluent-bit/plugins/filter_rewrite_tag/rewrite_tag.h b/src/fluent-bit/plugins/filter_rewrite_tag/rewrite_tag.h
new file mode 100644
index 000000000..1edcffd5c
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_rewrite_tag/rewrite_tag.h
@@ -0,0 +1,64 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_REWRITE_TAG_H
+#define FLB_REWRITE_TAG_H
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_regex.h>
+#include <fluent-bit/flb_sds.h>
+#include <fluent-bit/flb_input.h>
+#include <fluent-bit/flb_metrics.h>
+
+#define FLB_RTAG_METRIC_EMITTED 200
+#define FLB_RTAG_MEM_BUF_LIMIT_DEFAULT "10M"
+
+/* Rewrite rule */
+struct rewrite_rule {
+ int keep_record; /* keep original record ? */
+ struct flb_regex *regex; /* matching regex */
+ struct flb_record_accessor *ra_key; /* key record accessor */
+ struct flb_record_accessor *ra_tag; /* tag record accessor */
+ struct mk_list _head; /* link to flb_rewrite_tag->rules */
+};
+
+/* Plugin context */
+struct flb_rewrite_tag {
+ flb_sds_t emitter_name; /* emitter input plugin name */
+ flb_sds_t emitter_storage_type; /* emitter storage type */
+ size_t emitter_mem_buf_limit; /* Emitter buffer limit */
+ struct mk_list rules; /* processed rules */
+ struct mk_list *cm_rules; /* config_map rules (only strings) */
+ struct flb_input_instance *ins_emitter; /* emitter input plugin instance */
+ struct flb_filter_instance *ins; /* self-filter instance */
+ struct flb_config *config; /* Fluent Bit context */
+
+#ifdef FLB_HAVE_METRICS
+ struct cmt_counter *cmt_emitted;
+#endif
+};
+
+/* Register external function to emit records, check 'plugins/in_emitter' */
+int in_emitter_add_record(const char *tag, int tag_len,
+ const char *buf_data, size_t buf_size,
+ struct flb_input_instance *in);
+int in_emitter_get_collector_id(struct flb_input_instance *in);
+
+
+#endif
diff --git a/src/fluent-bit/plugins/filter_stdout/CMakeLists.txt b/src/fluent-bit/plugins/filter_stdout/CMakeLists.txt
new file mode 100644
index 000000000..7c7794d72
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_stdout/CMakeLists.txt
@@ -0,0 +1,4 @@
+set(src
+ stdout.c)
+
+FLB_PLUGIN(filter_stdout "${src}" "")
diff --git a/src/fluent-bit/plugins/filter_stdout/stdout.c b/src/fluent-bit/plugins/filter_stdout/stdout.c
new file mode 100644
index 000000000..1fb3fe040
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_stdout/stdout.c
@@ -0,0 +1,107 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdio.h>
+
+#include <fluent-bit/flb_filter.h>
+#include <fluent-bit/flb_filter_plugin.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_log_event_decoder.h>
+#include <fluent-bit/flb_log_event_encoder.h>
+#include <msgpack.h>
+
+static int cb_stdout_init(struct flb_filter_instance *f_ins,
+ struct flb_config *config,
+ void *data)
+{
+ (void) f_ins;
+ (void) config;
+ (void) data;
+
+ if (flb_filter_config_map_set(f_ins, (void *)config) == -1) {
+ flb_plg_error(f_ins, "unable to load configuration");
+ return -1;
+ }
+ return 0;
+}
+
+static int cb_stdout_filter(const void *data, size_t bytes,
+ const char *tag, int tag_len,
+ void **out_buf, size_t *out_bytes,
+ struct flb_filter_instance *f_ins,
+ struct flb_input_instance *i_ins,
+ void *filter_context,
+ struct flb_config *config)
+{
+ struct flb_log_event_decoder log_decoder;
+ struct flb_log_event log_event;
+ size_t cnt;
+ int ret;
+
+ (void) out_buf;
+ (void) out_bytes;
+ (void) f_ins;
+ (void) i_ins;
+ (void) filter_context;
+ (void) config;
+
+ ret = flb_log_event_decoder_init(&log_decoder, (char *) data, bytes);
+
+ if (ret != FLB_EVENT_DECODER_SUCCESS) {
+ flb_plg_error(f_ins,
+ "Log event decoder initialization error : %d", ret);
+
+ return FLB_FILTER_NOTOUCH;
+ }
+
+ cnt = 0;
+
+ while ((ret = flb_log_event_decoder_next(
+ &log_decoder,
+ &log_event)) == FLB_EVENT_DECODER_SUCCESS) {
+ printf("[%zd] %s: [", cnt++, tag);
+ printf("%"PRIu32".%09lu, ",
+ (uint32_t) log_event.timestamp.tm.tv_sec,
+ log_event.timestamp.tm.tv_nsec);
+ msgpack_object_print(stdout, *log_event.metadata);
+ printf(", ");
+ msgpack_object_print(stdout, *log_event.body);
+ printf("]\n");
+ }
+
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ return FLB_FILTER_NOTOUCH;
+}
+
+static struct flb_config_map config_map[] = {
+ /* EOF */
+ {0}
+};
+
+struct flb_filter_plugin filter_stdout_plugin = {
+ .name = "stdout",
+ .description = "Filter events to STDOUT",
+ .cb_init = cb_stdout_init,
+ .cb_filter = cb_stdout_filter,
+ .cb_exit = NULL,
+ .config_map = config_map,
+ .flags = 0
+};
diff --git a/src/fluent-bit/plugins/filter_tensorflow/CMakeLists.txt b/src/fluent-bit/plugins/filter_tensorflow/CMakeLists.txt
new file mode 100644
index 000000000..23bffe573
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_tensorflow/CMakeLists.txt
@@ -0,0 +1,6 @@
+set(src
+ tensorflow.c)
+
+FLB_PLUGIN(filter_tensorflow "${src}" "")
+target_include_directories(flb-plugin-filter_tensorflow PRIVATE ${Tensorflow_DIR})
+target_link_libraries(flb-plugin-filter_tensorflow -ltensorflowlite_c)
diff --git a/src/fluent-bit/plugins/filter_tensorflow/tensorflow.c b/src/fluent-bit/plugins/filter_tensorflow/tensorflow.c
new file mode 100644
index 000000000..3adf52ed0
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_tensorflow/tensorflow.c
@@ -0,0 +1,540 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdio.h>
+#include <unistd.h>
+
+#include <fluent-bit/flb_filter_plugin.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_config_map.h>
+#include <fluent-bit/flb_log_event_decoder.h>
+#include <fluent-bit/flb_log_event_encoder.h>
+
+#include "tensorflow/lite/c/c_api.h"
+#include "tensorflow/lite/c/common.h"
+
+#include <msgpack.h>
+#include <time.h>
+#include "tensorflow.h"
+
+#define MSGPACK_INTEGER(x) (x == MSGPACK_OBJECT_POSITIVE_INTEGER || \
+ x == MSGPACK_OBJECT_NEGATIVE_INTEGER)
+#define MSGPACK_FLOAT(x) (x == MSGPACK_OBJECT_FLOAT || \
+ x == MSGPACK_OBJECT_FLOAT32)
+#define MSGPACK_NUMBER(x) (MSGPACK_INTEGER(x) || MSGPACK_FLOAT(x))
+
+void print_tensor_info(struct flb_tensorflow *ctx, const TfLiteTensor* tensor)
+{
+ int i;
+ TfLiteType type;
+ char tensor_info[128] = "";
+ char tensor_dim[8];
+
+ type = TfLiteTensorType(tensor);
+ sprintf(tensor_info, "type: %s dimensions: {", TfLiteTypeGetName(type));
+ for (i = 0; i < TfLiteTensorNumDims(tensor) - 1; i++) {
+ sprintf(tensor_dim, "%d, ", TfLiteTensorDim(tensor, i));
+ strcat(tensor_info, tensor_dim);
+ }
+ sprintf(tensor_dim, "%d}", TfLiteTensorDim(tensor, i));
+ strcat(tensor_info, tensor_dim);
+
+ flb_plg_info(ctx->ins, "%s", tensor_info);
+}
+
+void print_model_io(struct flb_tensorflow *ctx)
+{
+ int i;
+ int num;
+ const TfLiteTensor* tensor;
+
+ /* Input information */
+ num = TfLiteInterpreterGetInputTensorCount(ctx->interpreter);
+ for (i = 0; i < num; i++) {
+ tensor = TfLiteInterpreterGetInputTensor(ctx->interpreter, i);
+ flb_plg_info(ctx->ins, "===== input #%d =====", i + 1);
+ print_tensor_info(ctx, tensor);
+ }
+
+ /* Output information */
+ num = TfLiteInterpreterGetOutputTensorCount(ctx->interpreter);
+ for (i = 0; i < num; i++) {
+ tensor = TfLiteInterpreterGetOutputTensor(ctx->interpreter, i);
+ flb_plg_info(ctx->ins, "===== output #%d ====", i + 1);
+ print_tensor_info(ctx, tensor);
+ }
+}
+
+void build_interpreter(struct flb_tensorflow *ctx, char* model_path)
+{
+ /* from c_api.h */
+ ctx->model = TfLiteModelCreateFromFile(model_path);
+ ctx->interpreter_options = TfLiteInterpreterOptionsCreate();
+ ctx->interpreter = TfLiteInterpreterCreate(ctx->model, ctx->interpreter_options);
+ TfLiteInterpreterAllocateTensors(ctx->interpreter);
+
+ flb_plg_info(ctx->ins, "TensorFlow Lite interpreter created!");
+ print_model_io(ctx);
+}
+
+void inference(TfLiteInterpreter* interpreter, void* input_data, void* output_data, int input_buf_size, int output_buf_size) {
+ /* from c_api.h */
+ TfLiteTensor* input_tensor = TfLiteInterpreterGetInputTensor(interpreter, 0);
+ TfLiteTensorCopyFromBuffer(input_tensor, input_data, input_buf_size);
+
+ TfLiteInterpreterInvoke(interpreter);
+
+ const TfLiteTensor* output_tensor = TfLiteInterpreterGetOutputTensor(interpreter, 0);
+ TfLiteTensorCopyToBuffer(output_tensor, output_data, output_buf_size);
+}
+
+int allocateIOBuffer(struct flb_tensorflow *ctx, void** buf, TfLiteType type, int size)
+{
+ if (type == kTfLiteFloat32) {
+ *buf = (void*) flb_malloc(size * sizeof(float));
+ }
+ else {
+ flb_plg_error(ctx->ins, "Tensor type (%d) is not currently supported!", type);
+ return -1;
+ }
+
+ return 0;
+}
+
+void flb_tensorflow_conf_destroy(struct flb_tensorflow *ctx)
+{
+ flb_sds_destroy(ctx->input_field);
+
+ if (ctx->input) {
+ flb_free(ctx->input);
+ }
+
+ if (ctx->output) {
+ flb_free(ctx->output);
+ }
+
+ if (ctx->normalization_value) {
+ flb_free(ctx->normalization_value);
+ }
+
+ /* delete TensorFlow model and interpreter */
+ if (ctx->model) {
+ TfLiteModelDelete(ctx->model);
+ }
+
+ TfLiteInterpreterOptionsDelete(ctx->interpreter_options);
+ TfLiteInterpreterDelete(ctx->interpreter);
+
+ flb_free(ctx);
+}
+
+static int cb_tensorflow_init(struct flb_filter_instance *f_ins,
+ struct flb_config *config,
+ void *data)
+{
+ int i;
+ int ret;
+ struct flb_tensorflow *ctx = NULL;
+ const char *tmp;
+ const TfLiteTensor* tensor;
+
+ ctx = flb_calloc(1, sizeof(struct flb_tensorflow));
+ if (!ctx) {
+ flb_errno();
+ return -1;
+ }
+
+ ret = flb_filter_config_map_set(f_ins, (void *) ctx);
+ if (ret == -1) {
+ flb_tensorflow_conf_destroy(ctx);
+ return -1;
+ }
+
+ ctx->ins = f_ins;
+
+ tmp = flb_filter_get_property("input_field", f_ins);
+ if (!tmp) {
+ flb_plg_error(ctx->ins, "input field is not defined!");
+ flb_tensorflow_conf_destroy(ctx);
+ return -1;
+ }
+
+ ctx->input_field = flb_sds_create(tmp);
+
+ tmp = flb_filter_get_property("model_file", f_ins);
+ if (!tmp) {
+ flb_plg_error(ctx->ins, "TensorFlow Lite model file is not provided!");
+ flb_tensorflow_conf_destroy(ctx);
+ return -1;
+ }
+
+ if(access(tmp, F_OK) == -1) {
+ flb_plg_error(ctx->ins, "TensorFlow Lite model file %s not found!", tmp);
+ flb_tensorflow_conf_destroy(ctx);
+ return -1;
+ }
+
+ build_interpreter(ctx, (char *) tmp);
+
+ if (!ctx->interpreter) {
+ flb_plg_error(ctx->ins, "Error creating the interpreter");
+ flb_tensorflow_conf_destroy(ctx);
+ return -1;
+ }
+
+ /* calculate input information */
+ ctx->input_size = 1;
+ tensor = TfLiteInterpreterGetInputTensor(ctx->interpreter, 0);
+ for (i = 0; i < TfLiteTensorNumDims(tensor); i++) {
+ ctx->input_size *= TfLiteTensorDim(tensor, i);
+ }
+ ctx->input_tensor_type = TfLiteTensorType(tensor);
+ if (allocateIOBuffer(ctx, &ctx->input, ctx->input_tensor_type, ctx->input_size) == -1) {
+ flb_tensorflow_conf_destroy(ctx);
+ return -1;
+ }
+ ctx->input_byte_size = TfLiteTensorByteSize(tensor);
+
+ /* calculate output information */
+ ctx->output_size = 1;
+ tensor = TfLiteInterpreterGetOutputTensor(ctx->interpreter, 0);
+ for (i = 0; i < TfLiteTensorNumDims(tensor); i++) {
+ ctx->output_size *= TfLiteTensorDim(tensor, i);
+ }
+ ctx->output_tensor_type = TfLiteTensorType(tensor);
+ if (allocateIOBuffer(ctx, &ctx->output, ctx->output_tensor_type, ctx->output_size) == -1) {
+ flb_tensorflow_conf_destroy(ctx);
+ return -1;
+ }
+ ctx->output_byte_size = TfLiteTensorByteSize(tensor);
+
+ tmp = flb_filter_get_property("include_input_fields", f_ins);
+ if (!tmp) {
+ ctx->include_input_fields = FLB_TRUE;
+ }
+ else {
+ ctx->include_input_fields = flb_utils_bool(tmp);
+ }
+
+ tmp = flb_filter_get_property("normalization_value", f_ins);
+ if (tmp) {
+ ctx->normalization_value = flb_malloc(sizeof(float));
+ *ctx->normalization_value = atof(tmp);
+ }
+
+ flb_filter_set_context(f_ins, ctx);
+ return 0;
+}
+
+static int cb_tensorflow_filter(const void *data, size_t bytes,
+ const char *tag, int tag_len,
+ void **out_buf, size_t *out_bytes,
+ struct flb_filter_instance *f_ins,
+ struct flb_input_instance *i_ins,
+ void *filter_context,
+ struct flb_config *config)
+{
+ int i;
+ int j;
+ int input_data_type;
+ int map_size;
+
+ msgpack_object map;
+ msgpack_object key;
+ msgpack_object value;
+
+ struct flb_tensorflow* ctx;
+
+ /* data pointers */
+ float* dfloat;
+
+ /* calculate inference time */
+ clock_t start, end;
+ double inference_time;
+
+ struct flb_log_event_encoder log_encoder;
+ struct flb_log_event_decoder log_decoder;
+ struct flb_log_event log_event;
+ int ret;
+
+ (void) out_buf;
+ (void) out_bytes;
+ (void) f_ins;
+ (void) i_ins;
+
+ /* initializations */
+ ctx = filter_context;
+ inference_time = 0;
+ start = clock();
+
+ ret = flb_log_event_decoder_init(&log_decoder, (char *) data, bytes);
+
+ if (ret != FLB_EVENT_DECODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event decoder initialization error : %d", ret);
+
+ return FLB_FILTER_NOTOUCH;
+ }
+
+ ret = flb_log_event_encoder_init(&log_encoder,
+ FLB_LOG_EVENT_FORMAT_DEFAULT);
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event encoder initialization error : %d", ret);
+
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ return FLB_FILTER_NOTOUCH;
+ }
+
+ while ((ret = flb_log_event_decoder_next(
+ &log_decoder,
+ &log_event)) == FLB_EVENT_DECODER_SUCCESS) {
+ /* TODO check if msgpack type is map */
+ map = *log_event.body;
+ map_size = map.via.map.size;
+
+ for (i = 0; i < map_size; i++) {
+ key = map.via.map.ptr[i].key;
+
+ if (flb_sds_cmp(ctx->input_field, (char *) key.via.str.ptr, key.via.str.size) != 0) {
+ continue;
+ }
+
+ value = map.via.map.ptr[i].val;
+ if (value.type == MSGPACK_OBJECT_ARRAY)
+ {
+ int size = value.via.array.size;
+ if (size == 0) {
+ flb_plg_error(ctx->ins, "input data size has to be non-zero!");
+ break;
+ }
+
+ if (size != ctx->input_size) {
+ flb_plg_error(ctx->ins, "input data size doesn't match model's input size!");
+ break;
+ }
+
+ /* we only accept numbers inside input array */
+ input_data_type = value.via.array.ptr[0].type;
+ if (!MSGPACK_NUMBER(input_data_type)) {
+ flb_plg_error(ctx->ins, "input data has to be of numerical type!");
+ break;
+ }
+
+ /* copy data from messagepack into the input buffer */
+ /* tensor type: kTfLiteFloat32 */
+ if (ctx->input_tensor_type == kTfLiteFloat32) {
+ if (sizeof(float) != sizeof(kTfLiteFloat32)) {
+ flb_plg_error(ctx->ins, "input tensor type (kTfLiteFloat32) doesn't match float size!");
+ break;
+ }
+
+ dfloat = (float *) ctx->input;
+
+ if (MSGPACK_FLOAT(input_data_type)) {
+ for (i = 0; i < value.via.array.size; i++) {
+ dfloat[i] = value.via.array.ptr[i].via.f64;
+ }
+ }
+ else if (MSGPACK_INTEGER(input_data_type)) {
+ for (i = 0; i < value.via.array.size; i++) {
+ dfloat[i] = ((float) value.via.array.ptr[i].via.i64);
+ }
+ }
+ else {
+ flb_plg_error(ctx->ins, "input record type is not supported for a float32 input tensor!");
+ break;
+ }
+
+ if (ctx->normalization_value) {
+ for (i = 0; i < value.via.array.size; i++) {
+ dfloat[i] /= *ctx->normalization_value;
+ }
+ }
+ }
+ else {
+ flb_plg_error(ctx->ins, "input tensor type is not currently not supported!");
+ break;
+ }
+ }
+ else if (value.type == MSGPACK_OBJECT_BIN) {
+ if (ctx->input_tensor_type == kTfLiteFloat32) {
+ dfloat = (float *) ctx->input;
+
+ /*
+ * re:IEEE754 float size is 32 bits
+ * TODO: currently, the following assumes that the binrary
+ * string is the serialization of a string of characters (uint8_t).
+ * It is required to add other primitive data type encodings such as
+ * floating point numbers.
+ */
+ if (ctx->input_byte_size != (value.via.bin.size << 2)) {
+ flb_plg_error(ctx->ins, "input data size (%d bytes * 4) doesn't"
+ "match model's input size (%d bytes)!",
+ value.via.bin.size, ctx->input_byte_size);
+ break;
+ }
+
+ for (i = 0; i < value.via.bin.size; i++) {
+ dfloat[i] = ((float) value.via.bin.ptr[i]);
+ }
+
+ if (ctx->normalization_value) {
+ for (i = 0; i < value.via.bin.size; i++) {
+ dfloat[i] /= *ctx->normalization_value;
+ }
+ }
+ }
+ }
+ else {
+ flb_plg_error(ctx->ins, "input data format is not currently supported!");
+ break;
+ }
+
+ /* run the inference */
+ inference(ctx->interpreter, ctx->input, ctx->output, ctx->input_byte_size, ctx->output_byte_size);
+
+ /* create output messagepack */
+ end = clock();
+ inference_time = ((double) (end - start)) / CLOCKS_PER_SEC;
+
+ ret = flb_log_event_encoder_begin_record(&log_encoder);
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_timestamp(
+ &log_encoder, &log_event.timestamp);
+ }
+
+ if (ctx->include_input_fields) {
+ for (j = 0;
+ j < map_size &&
+ ret == FLB_EVENT_ENCODER_SUCCESS;
+ j++) {
+ ret = flb_log_event_encoder_append_body_values(
+ &log_encoder,
+ FLB_LOG_EVENT_MSGPACK_OBJECT_VALUE(&map.via.map.ptr[j].key),
+ FLB_LOG_EVENT_MSGPACK_OBJECT_VALUE(&map.via.map.ptr[j].val));
+ }
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_append_body_values(
+ &log_encoder,
+ FLB_LOG_EVENT_CSTRING_VALUE("inference_time"),
+ FLB_LOG_EVENT_DOUBLE_VALUE(inference_time),
+
+ FLB_LOG_EVENT_CSTRING_VALUE("output"));
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_body_begin_array(&log_encoder);
+ }
+
+ for (i=0;
+ i < ctx->output_size &&
+ ret == FLB_EVENT_ENCODER_SUCCESS;
+ i++) {
+ if (ctx->output_tensor_type == kTfLiteFloat32) {
+ ret = flb_log_event_encoder_append_body_double(
+ &log_encoder, ((float*) ctx->output)[i]);
+ }
+ /* TODO: work out other types */
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_body_commit_array(&log_encoder);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_body_commit_record(&log_encoder);
+ }
+ else {
+ flb_log_event_encoder_body_rollback_record(&log_encoder);
+ }
+
+ break;
+ }
+ }
+
+ if (log_encoder.output_length > 0) {
+ *out_buf = log_encoder.output_buffer;
+ *out_bytes = log_encoder.output_length;
+
+ ret = FLB_FILTER_MODIFIED;
+
+ flb_log_event_encoder_claim_internal_buffer_ownership(&log_encoder);
+ }
+ else {
+ flb_plg_error(ctx->ins,
+ "Log event encoder error : %d", ret);
+
+ ret = FLB_FILTER_NOTOUCH;
+ }
+
+ flb_log_event_decoder_destroy(&log_decoder);
+ flb_log_event_encoder_destroy(&log_encoder);
+
+ return ret;
+}
+
+static int cb_tensorflow_exit(void *data, struct flb_config *config)
+{
+ struct flb_tensorflow *ctx = data;
+
+ flb_tensorflow_conf_destroy(ctx);
+ return 0;
+}
+
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_STR, "model_file", NULL,
+ 0, FLB_FALSE, 0,
+ "Address of the TensorFlow Lite model file (.tflite)"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "input_field", NULL,
+ 0, FLB_FALSE, 0,
+ "Input field name to use for inference."
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "include_input_fields", "true",
+ 0, FLB_TRUE, offsetof(struct flb_tensorflow, include_input_fields),
+ "Include input field in the output of the filter."
+ },
+ {
+ FLB_CONFIG_MAP_DOUBLE, "normalization_value", NULL,
+ 0, FLB_FALSE, 0,
+ "Divide input feature values to this value (e.g. divide image pixles by 255)."
+ },
+ /* EOF */
+ {0}
+};
+
+struct flb_filter_plugin filter_tensorflow_plugin = {
+ .name = "tensorflow",
+ .description = "TensorFlow Lite inference engine",
+ .cb_init = cb_tensorflow_init,
+ .cb_filter = cb_tensorflow_filter,
+ .cb_exit = cb_tensorflow_exit,
+ .config_map = config_map,
+ .flags = 0
+};
diff --git a/src/fluent-bit/plugins/filter_tensorflow/tensorflow.h b/src/fluent-bit/plugins/filter_tensorflow/tensorflow.h
new file mode 100644
index 000000000..4c923cc5c
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_tensorflow/tensorflow.h
@@ -0,0 +1,46 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_FILTER_TENSORFLOW_H
+#define FLB_FILTER_TENSORFLOW_H
+
+struct flb_tensorflow {
+ TfLiteModel* model;
+ TfLiteInterpreterOptions* interpreter_options;
+ TfLiteInterpreter* interpreter;
+ flb_sds_t input_field;
+ TfLiteType input_tensor_type;
+ TfLiteType output_tensor_type;
+
+ /* IO buffer */
+ void* input;
+ void* output;
+ int input_size;
+ int input_byte_size;
+ int output_size;
+ int output_byte_size;
+
+ /* feature scaling/normalization */
+ bool include_input_fields;
+ float* normalization_value;
+
+ struct flb_filter_instance *ins;
+};
+
+#endif
diff --git a/src/fluent-bit/plugins/filter_throttle/CMakeLists.txt b/src/fluent-bit/plugins/filter_throttle/CMakeLists.txt
new file mode 100644
index 000000000..adc7b8f4c
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_throttle/CMakeLists.txt
@@ -0,0 +1,6 @@
+set(src
+ window.c
+ throttle.c
+ )
+
+FLB_PLUGIN(filter_throttle "${src}" "")
diff --git a/src/fluent-bit/plugins/filter_throttle/throttle.c b/src/fluent-bit/plugins/filter_throttle/throttle.c
new file mode 100644
index 000000000..2a5ce29a4
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_throttle/throttle.c
@@ -0,0 +1,337 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_filter.h>
+#include <fluent-bit/flb_filter_plugin.h>
+#include <fluent-bit/flb_mem.h>
+#include <fluent-bit/flb_str.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_log.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_log_event_decoder.h>
+#include <fluent-bit/flb_log_event_encoder.h>
+#include <msgpack.h>
+#include "stdlib.h"
+
+#include "throttle.h"
+#include "window.h"
+
+#include <stdio.h>
+#include <sys/types.h>
+
+pthread_mutex_t throttle_mut;
+
+
+static bool apply_suffix (double *x, char suffix_char)
+{
+ int multiplier;
+
+ switch (suffix_char)
+ {
+ case 0:
+ case 's':
+ multiplier = 1;
+ break;
+ case 'm':
+ multiplier = 60;
+ break;
+ case 'h':
+ multiplier = 60 * 60;
+ break;
+ case 'd':
+ multiplier = 60 * 60 * 24;
+ break;
+ default:
+ return false;
+ }
+
+ *x *= multiplier;
+
+ return true;
+}
+
+
+void *time_ticker(void *args)
+{
+ struct flb_time ftm;
+ long timestamp;
+ struct flb_filter_throttle_ctx *ctx = args;
+
+ while (1) {
+ flb_time_get(&ftm);
+ timestamp = flb_time_to_double(&ftm);
+ pthread_mutex_lock(&throttle_mut);
+ window_add(ctx->hash, timestamp, 0);
+
+ ctx->hash->current_timestamp = timestamp;
+
+ if (ctx->print_status) {
+ flb_plg_info(ctx->ins,
+ "%ld: limit is %0.2f per %s with window size of %i, "
+ "current rate is: %i per interval",
+ timestamp, ctx->max_rate, ctx->slide_interval,
+ ctx->window_size,
+ ctx->hash->total / ctx->hash->size);
+ }
+ pthread_mutex_unlock(&throttle_mut);
+ /* sleep is a cancelable function */
+ sleep(ctx->ticker_data.seconds);
+ }
+}
+
+/* Given a msgpack record, do some filter action based on the defined rules */
+static inline int throttle_data(struct flb_filter_throttle_ctx *ctx)
+{
+ if ((ctx->hash->total / (double) ctx->hash->size) >= ctx->max_rate) {
+ return THROTTLE_RET_DROP;
+ }
+
+ window_add(ctx->hash, ctx->hash->current_timestamp, 1);
+
+ return THROTTLE_RET_KEEP;
+}
+
+static int configure(struct flb_filter_throttle_ctx *ctx, struct flb_filter_instance *f_ins)
+{
+ int ret;
+
+ ret = flb_filter_config_map_set(f_ins, ctx);
+ if (ret == -1) {
+ flb_plg_error(f_ins, "unable to load configuration");
+ return -1;
+ }
+ if (ctx->max_rate <= 1.0) {
+ ctx->max_rate = strtod(THROTTLE_DEFAULT_RATE, NULL);
+ }
+ if (ctx->window_size <= 1) {
+ ctx->window_size = strtoul(THROTTLE_DEFAULT_WINDOW, NULL, 10);
+ }
+
+ return 0;
+}
+
+static int parse_duration(struct flb_filter_throttle_ctx *ctx,
+ const char *interval)
+{
+ double seconds = 0.0;
+ double s;
+ char *p;
+
+ s = strtod(interval, &p);
+ if ( 0 >= s
+ /* No extra chars after the number and an optional s,m,h,d char. */
+ || (*p && *(p+1))
+ /* Check any suffix char and update S based on the suffix. */
+ || ! apply_suffix (&s, *p))
+ {
+ flb_plg_warn(ctx->ins,
+ "invalid time interval %s falling back to default: 1 "
+ "second",
+ interval);
+ }
+
+ seconds += s;
+ return seconds;
+}
+
+static int cb_throttle_init(struct flb_filter_instance *f_ins,
+ struct flb_config *config,
+ void *data)
+{
+ int ret;
+ struct flb_filter_throttle_ctx *ctx;
+
+ pthread_mutex_init(&throttle_mut, NULL);
+
+ /* Create context */
+ ctx = flb_calloc(1, sizeof(struct flb_filter_throttle_ctx));
+ if (!ctx) {
+ flb_errno();
+ return -1;
+ }
+ ctx->ins = f_ins;
+
+ /* parse plugin configuration */
+ ret = configure(ctx, f_ins);
+ if (ret == -1) {
+ flb_free(ctx);
+ return -1;
+ }
+
+ /* Set our context */
+ flb_filter_set_context(f_ins, ctx);
+
+ ctx->hash = window_create(ctx->window_size);
+
+ ctx->ticker_data.seconds = parse_duration(ctx, ctx->slide_interval);
+ pthread_create(&ctx->ticker_data.thr, NULL, &time_ticker, ctx);
+ return 0;
+}
+
+static int cb_throttle_filter(const void *data, size_t bytes,
+ const char *tag, int tag_len,
+ void **out_buf, size_t *out_size,
+ struct flb_filter_instance *f_ins,
+ struct flb_input_instance *i_ins,
+ void *context,
+ struct flb_config *config)
+{
+ int ret;
+ int old_size = 0;
+ int new_size = 0;
+ struct flb_log_event_encoder log_encoder;
+ struct flb_log_event_decoder log_decoder;
+ struct flb_log_event log_event;
+
+ (void) f_ins;
+ (void) i_ins;
+ (void) config;
+
+ ret = flb_log_event_decoder_init(&log_decoder, (char *) data, bytes);
+
+ if (ret != FLB_EVENT_DECODER_SUCCESS) {
+ flb_plg_error(f_ins,
+ "Log event decoder initialization error : %d", ret);
+
+ return FLB_FILTER_NOTOUCH;
+ }
+
+ ret = flb_log_event_encoder_init(&log_encoder,
+ FLB_LOG_EVENT_FORMAT_DEFAULT);
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ flb_plg_error(f_ins,
+ "Log event encoder initialization error : %d", ret);
+
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ return FLB_FILTER_NOTOUCH;
+ }
+
+ /* Iterate each item array and apply rules */
+ while ((ret = flb_log_event_decoder_next(
+ &log_decoder,
+ &log_event)) == FLB_EVENT_DECODER_SUCCESS) {
+ old_size++;
+ pthread_mutex_lock(&throttle_mut);
+ ret = throttle_data(context);
+ pthread_mutex_unlock(&throttle_mut);
+
+ if (ret == THROTTLE_RET_KEEP) {
+ ret = flb_log_event_encoder_emit_raw_record(
+ &log_encoder,
+ &((char *) data)[log_decoder.previous_offset],
+ log_decoder.record_length);
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ new_size++;
+ }
+ }
+ else if (ret == THROTTLE_RET_DROP) {
+ /* Do nothing */
+ }
+ }
+
+ /* we keep everything ? */
+ if (old_size == new_size) {
+ /* Destroy the buffer to avoid more overhead */
+ ret = FLB_FILTER_NOTOUCH;
+ }
+ else {
+ *out_buf = log_encoder.output_buffer;
+ *out_size = log_encoder.output_length;
+
+ flb_log_event_encoder_claim_internal_buffer_ownership(&log_encoder);
+
+ ret = FLB_FILTER_MODIFIED;
+ }
+
+ flb_log_event_decoder_destroy(&log_decoder);
+ flb_log_event_encoder_destroy(&log_encoder);
+
+ return ret;
+}
+
+static int cb_throttle_exit(void *data, struct flb_config *config)
+{
+ void *thr_res;
+ struct flb_filter_throttle_ctx *ctx = data;
+
+ int s = pthread_cancel(ctx->ticker_data.thr);
+ if (s != 0) {
+ flb_plg_error(ctx->ins, "Unable to cancel ticker. Leaking context to avoid memory corruption.");
+ return 1;
+ }
+
+ s = pthread_join(ctx->ticker_data.thr, &thr_res);
+ if (s != 0) {
+ flb_plg_error(ctx->ins, "Unable to join ticker. Leaking context to avoid memory corruption.");
+ return 1;
+ }
+
+ if (thr_res != PTHREAD_CANCELED) {
+ flb_plg_error(ctx->ins, "Thread joined but was not canceled which is impossible.");
+ }
+
+ flb_free(ctx->hash->table);
+ flb_free(ctx->hash);
+ flb_free(ctx);
+ return 0;
+}
+
+static struct flb_config_map config_map[] = {
+ // rate
+ // window
+ // print_status
+ // interval
+ {
+ FLB_CONFIG_MAP_DOUBLE, "rate", THROTTLE_DEFAULT_RATE,
+ 0, FLB_TRUE, offsetof(struct flb_filter_throttle_ctx, max_rate),
+ "Set throttle rate"
+ },
+ {
+ FLB_CONFIG_MAP_INT, "window", THROTTLE_DEFAULT_WINDOW,
+ 0, FLB_TRUE, offsetof(struct flb_filter_throttle_ctx, window_size),
+ "Set throttle window"
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "print_status", THROTTLE_DEFAULT_STATUS,
+ 0, FLB_TRUE, offsetof(struct flb_filter_throttle_ctx, print_status),
+ "Set whether or not to print status information"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "interval", THROTTLE_DEFAULT_INTERVAL,
+ 0, FLB_TRUE, offsetof(struct flb_filter_throttle_ctx, slide_interval),
+ "Set the slide interval"
+ },
+ /* EOF */
+ {0}
+};
+
+struct flb_filter_plugin filter_throttle_plugin = {
+ .name = "throttle",
+ .description = "Throttle messages using sliding window algorithm",
+ .cb_init = cb_throttle_init,
+ .cb_filter = cb_throttle_filter,
+ .cb_exit = cb_throttle_exit,
+ .config_map = config_map,
+ .flags = 0
+};
diff --git a/src/fluent-bit/plugins/filter_throttle/throttle.h b/src/fluent-bit/plugins/filter_throttle/throttle.h
new file mode 100644
index 000000000..30ca318c1
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_throttle/throttle.h
@@ -0,0 +1,56 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit Throttling
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_FILTER_THROTTLE_H
+#define FLB_FILTER_THROTTLE_H
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_filter.h>
+#include <fluent-bit/flb_pthread.h>
+
+/* actions */
+#define THROTTLE_RET_KEEP 0
+#define THROTTLE_RET_DROP 1
+
+/* defaults */
+#define THROTTLE_DEFAULT_RATE "1"
+#define THROTTLE_DEFAULT_WINDOW "5"
+#define THROTTLE_DEFAULT_INTERVAL "1"
+#define THROTTLE_DEFAULT_STATUS "false"
+
+struct ticker {
+ pthread_t thr;
+ double seconds;
+};
+
+struct flb_filter_throttle_ctx {
+ double max_rate;
+ unsigned int window_size;
+ const char *slide_interval;
+ int print_status;
+
+ /* internal */
+ struct throttle_window *hash;
+ struct flb_filter_instance *ins;
+ struct ticker ticker_data;
+};
+
+
+
+#endif
diff --git a/src/fluent-bit/plugins/filter_throttle/window.c b/src/fluent-bit/plugins/filter_throttle/window.c
new file mode 100644
index 000000000..75fcb492e
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_throttle/window.c
@@ -0,0 +1,97 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdio.h>
+#include <sys/types.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_mem.h>
+#include <fluent-bit/flb_log.h>
+
+#include "window.h"
+#include "throttle.h"
+
+
+struct throttle_window *window_create(size_t size) {
+ struct throttle_window *tw;
+
+ if (size <= 0) {
+ return NULL;
+ }
+
+ tw = flb_malloc(sizeof(struct throttle_window));
+ if (!tw) {
+ flb_errno();
+ return NULL;
+ }
+
+ tw->size = size;
+ tw->total = 0;
+ tw->current_timestamp = 0;
+ tw->max_index = -1;
+ tw->table = flb_calloc(size, sizeof(struct throttle_pane));
+ if (!tw->table) {
+ flb_errno();
+ flb_free(tw);
+ return NULL;
+ }
+
+ return tw;
+}
+
+
+int window_get(struct throttle_window *tw, long timestamp) {
+ int i;
+ for (i=0; i< tw->size; i++ ) {
+ if (tw->table[i].timestamp == timestamp) {
+ return i;
+ }
+ }
+ return NOT_FOUND;
+}
+
+
+int window_add(struct throttle_window *tw, long timestamp, int val) {
+ int i, index, size;
+ int sum = 0;
+ tw->current_timestamp = timestamp;
+
+ size = tw->size;
+ index = window_get(tw, timestamp);
+
+ if (index == NOT_FOUND) {
+ if (size - 1 == tw->max_index) {
+ /* window must be shifted */
+ tw->max_index = -1;
+ }
+ tw->max_index += 1;
+ tw->table[tw->max_index].timestamp= timestamp;
+ tw->table[tw->max_index].counter = val;
+ } else {
+ tw->table[index].counter += val;
+ }
+
+ for (i=0; i < tw->size; i++ ) {
+ sum += tw->table[i].counter;
+ flb_debug("timestamp: %ld, value: %ld",
+ tw->table[i].timestamp, tw->table[i].counter);
+ }
+ tw->total = sum;
+ flb_debug("total: %i", tw->total);
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/filter_throttle/window.h b/src/fluent-bit/plugins/filter_throttle/window.h
new file mode 100644
index 000000000..c7f392e07
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_throttle/window.h
@@ -0,0 +1,37 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define NOT_FOUND -1
+
+struct throttle_pane {
+ long timestamp;
+ long counter;
+};
+
+struct throttle_window {
+ long current_timestamp;
+ unsigned size;
+ unsigned total;
+ pthread_mutex_t result_mutex;
+ int max_index;
+ struct throttle_pane *table;
+};
+
+struct throttle_window *window_create(size_t size);
+int window_add(struct throttle_window *tw, long timestamp, int val);
diff --git a/src/fluent-bit/plugins/filter_throttle_size/CMakeLists.txt b/src/fluent-bit/plugins/filter_throttle_size/CMakeLists.txt
new file mode 100644
index 000000000..1cdf42b9d
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_throttle_size/CMakeLists.txt
@@ -0,0 +1,3 @@
+set(src size_window.c throttle_size.c)
+
+ FLB_PLUGIN(filter_throttle_size "${src}" "")
diff --git a/src/fluent-bit/plugins/filter_throttle_size/size_window.c b/src/fluent-bit/plugins/filter_throttle_size/size_window.c
new file mode 100644
index 000000000..c3cfeb2e1
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_throttle_size/size_window.c
@@ -0,0 +1,226 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdio.h>
+#include <sys/types.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_mem.h>
+#include <fluent-bit/flb_log.h>
+#include <fluent-bit/flb_str.h>
+
+#include "size_window.h"
+
+#ifdef _WIN32
+#include <windows.h>
+#elif _M_X64
+#include <windows.h>
+#else
+#include <pthread.h>
+#endif
+
+/*This function create a new size throttling window named @name with @size number of panes.
+ The total amount of entries is 0 with timestamp set according to the current system time.
+ The name of the window is null terminated. The length of the name @name_lenght is used
+ for optimization when you use strings longer than the name you want. Otherwise use strlen(@name)
+ when such thing is not needed.*/
+struct throttle_size_window *size_window_create(const char *name,
+ unsigned name_length,
+ unsigned int size)
+{
+ struct throttle_size_window *stw;
+ struct flb_time ftm;
+ int i;
+
+ if (size <= 0) {
+ return NULL;
+ }
+
+ stw = flb_malloc(sizeof(struct throttle_size_window));
+ if (!stw) {
+ flb_errno();
+ return NULL;
+ }
+
+ stw->size = size;
+ stw->total = 0;
+ stw->head = size - 1;
+ stw->tail = 0;
+ stw->table = flb_calloc(size, sizeof(struct throttle_size_pane));
+ if (!stw->table) {
+ flb_errno();
+ flb_free(stw);
+ return NULL;
+ }
+
+ stw->name = flb_strndup(name, name_length);
+
+ if (!stw->name) {
+ flb_errno();
+ flb_free(stw->table);
+ flb_free(stw);
+ return NULL;
+ }
+
+ flb_time_get(&ftm);
+ stw->timestamp = flb_time_to_double(&ftm);
+
+ for (i = 0; i < size; i++) {
+ stw->table[i].timestamp = stw->timestamp;
+ stw->table[i].size = 0;
+ }
+ flb_debug
+ ("[filter_throttle_size] New size throttling window named \"%s\" was created.",
+ stw->name);
+ return stw;
+}
+
+static inline void *create_lock()
+{
+#ifdef _WIN32
+ HANDLE lock = CreateMutex(NULL, // default security attributes
+ FALSE, // initially not owned
+ NULL); // unnamed mutex
+ if (lock == NULL) {
+ flb_error("CreateMutex error: %d\n", GetLastError());
+ return NULL;
+ }
+ return lock;
+#elif _M_X64
+ HANDLE lock = CreateMutex(NULL, // default security attributes
+ FALSE, // initially not owned
+ NULL); // unnamed mutex
+ if (lock == NULL) {
+ flb_error("CreateMutex error: %d\n", GetLastError());
+ return NULL;
+ }
+ return lock;
+#else
+ pthread_mutex_t *lock = flb_malloc(sizeof(pthread_mutex_t));
+ if (!lock) {
+ return NULL;
+ }
+ if (pthread_mutex_init(lock, NULL) != 0) {
+ flb_errno();
+ return NULL;
+ }
+ return lock;
+#endif
+}
+
+void lock_throttle_size_table(struct throttle_size_table *ht)
+{
+#ifdef _WIN32
+ DWORD dwWaitResult = WaitForSingleObject(ht->lock, // handle to mutex
+ INFINITE); // no time-out interval
+ if (WAIT_ABANDONED == dwWaitResult) {
+ flb_warn
+ ("[filter_throttle_size]The thread got ownership of an abandoned mutex\nThe throttle_size_table is in an indeterminate state");
+ }
+#elif _M_X64
+ DWORD dwWaitResult = WaitForSingleObject(ht->lock, // handle to mutex
+ INFINITE); // no time-out interval
+ if (WAIT_ABANDONED == dwWaitResult) {
+ flb_warn
+ ("[filter_throttle_size]The thread got ownership of an abandoned mutex\nThe throttle_size_table is in an indeterminate state");
+ }
+#else
+ pthread_mutex_lock(ht->lock);
+#endif
+}
+
+void unlock_throttle_size_table(struct throttle_size_table *ht)
+{
+#ifdef _WIN32
+ if (!ReleaseMutex(ht->lock)) {
+ flb_warn
+ ("[filter_throttle_size]Unable to release the ownership of throttle_size_table mutex!");
+ }
+#elif _M_X64
+ if (!ReleaseMutex(ht->lock)) {
+ flb_warn
+ ("[filter_throttle_size]Unable to release the ownership of throttle_size_table mutex!");
+ }
+#else
+ pthread_mutex_unlock(ht->lock);
+#endif
+}
+
+static inline void destroy_throttle_size_table_lock(struct throttle_size_table
+ *ht)
+{
+#ifdef _WIN32
+ CloseHandle(ht->lock);
+#elif _M_X64
+ CloseHandle(ht->lock);
+#else
+ pthread_mutex_destroy(ht->lock);
+ flb_free(ht->lock);
+#endif
+}
+
+struct throttle_size_table *create_throttle_size_table(size_t size)
+{
+ struct throttle_size_table *table;
+ table = flb_malloc(sizeof(struct throttle_size_table));
+ if (!table) {
+ return NULL;
+ }
+ table->windows =
+ flb_hash_table_create(FLB_HASH_TABLE_EVICT_NONE, size,
+ FLB_SIZE_WINDOW_HASH_MAX_ENTRIES);
+ if (!table->windows) {
+ flb_errno();
+ flb_free(table);
+ return NULL;
+ }
+ table->lock = create_lock();
+ if (!table->lock) {
+ flb_free(table);
+ return NULL;
+ }
+ return table;
+}
+
+void destroy_throttle_size_table(struct throttle_size_table *ht)
+{
+ int i;
+ struct mk_list *tmp;
+ struct mk_list *head;
+ struct flb_hash_table_entry *entry;
+ struct flb_hash_table_chain *table;
+
+ for (i = 0; i < ht->windows->size; i++) {
+ table = &ht->windows->table[i];
+ mk_list_foreach_safe(head, tmp, &table->chains) {
+ entry = mk_list_entry(head, struct flb_hash_table_entry, _head);
+ free_stw_content((struct throttle_size_window *) entry->val);
+ mk_list_del(&entry->_head);
+ mk_list_del(&entry->_head_parent);
+ entry->table->count--;
+ ht->windows->total_count--;
+ flb_free(entry->key);
+ flb_free(entry->val);
+ flb_free(entry);
+ }
+ }
+ destroy_throttle_size_table_lock(ht);
+ flb_free(ht->windows->table);
+ flb_free(ht->windows);
+ flb_free(ht);
+}
diff --git a/src/fluent-bit/plugins/filter_throttle_size/size_window.h b/src/fluent-bit/plugins/filter_throttle_size/size_window.h
new file mode 100644
index 000000000..be661d6ec
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_throttle_size/size_window.h
@@ -0,0 +1,140 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_SIZE_WINDOW_H
+#define FLB_SIZE_WINDOW_H
+
+#include <fluent-bit/flb_hash_table.h>
+
+#define FLB_SIZE_WINDOW_HASH_MAX_ENTRIES 100000
+
+struct throttle_size_pane
+{
+ long timestamp;
+ unsigned long size;
+};
+
+struct throttle_size_window
+{
+ char *name;
+ unsigned size;
+ unsigned long total;
+ long timestamp;
+ int head;
+ int tail;
+ struct throttle_size_pane *table;
+};
+
+struct throttle_size_table
+{
+ struct flb_hash_table *windows;
+ void *lock;
+};
+
+struct throttle_size_table *create_throttle_size_table();
+
+struct throttle_size_window *size_window_create(const char *name,
+ unsigned name_length,
+ unsigned int size);
+
+/*This function adds new pane on top of the pane stack by overwriting the oldes one
+ which @timestamp and load size of 0 bytes. The oldes pane's amount of load size
+ is subtracted of the total amount.*/
+inline static void add_new_pane(struct throttle_size_window *stw,
+ long timestamp)
+{
+ unsigned long tail_size = 0;
+ tail_size = stw->table[stw->tail].size;
+ if (stw->size - 1 == stw->head) {
+ /* the head will exceed the end of the inner array end must be put at the begging. */
+ stw->head = -1;
+ }
+ stw->head += 1;
+ stw->table[stw->head].timestamp = timestamp;
+ stw->table[stw->head].size = 0;
+ stw->total -= tail_size;
+ if (stw->size - 1 == stw->tail) {
+ /* the tail will exceed the end of the inner array end must be put at the begging. */
+ stw->tail = -1;
+ }
+ stw->tail += 1;
+}
+
+/*This function adds @load to the latest pane which is on top of the pane stack.
+ @load is added to the total amount of the size throttling window.
+ If @load is not 0 then the size throttling window's timestamp will be updated to the
+ one which is on top of the pane stack(latest)*/
+inline static void add_load(struct throttle_size_window *stw,
+ unsigned long load)
+{
+ stw->table[stw->head].size += load;
+ stw->total += load;
+ if (load) {
+ stw->timestamp = stw->table[stw->head].timestamp;
+ }
+}
+
+inline static void free_stw_content(struct throttle_size_window *stw)
+{
+ flb_free(stw->name);
+ flb_free(stw->table);
+}
+
+inline static void free_stw(struct throttle_size_window *stw)
+{
+ free_stw_content(stw);
+ flb_free(stw);
+}
+
+inline static struct throttle_size_window *find_throttle_size_window(struct
+ throttle_size_table
+ *table,
+ char
+ *name,
+ unsigned
+ name_length)
+{
+ char *window = NULL;
+ size_t out_size;
+ if (flb_hash_table_get(table->windows, name, name_length,
+ (const char **)&window, &out_size) >= 0) {
+ if (out_size < sizeof(struct throttle_size_window)) {
+ flb_error("Malformed data in size window hashtable");
+ return NULL;
+ }
+ return (struct throttle_size_window *) window;
+ }
+ return NULL;
+}
+
+inline static void add_throttle_size_window(struct throttle_size_table
+ *table,
+ struct throttle_size_window
+ *window)
+{
+ flb_hash_table_add(table->windows, window->name, strlen(window->name),
+ (char *) window, sizeof(struct throttle_size_window));
+}
+
+void destroy_throttle_size_table(struct throttle_size_table *table);
+
+void lock_throttle_size_table(struct throttle_size_table *ht);
+void unlock_throttle_size_table(struct throttle_size_table *ht);
+
+#endif
diff --git a/src/fluent-bit/plugins/filter_throttle_size/throttle_size.c b/src/fluent-bit/plugins/filter_throttle_size/throttle_size.c
new file mode 100644
index 000000000..31abc3df3
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_throttle_size/throttle_size.c
@@ -0,0 +1,774 @@
+ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_filter_plugin.h>
+#include <fluent-bit/flb_mem.h>
+#include <fluent-bit/flb_str.h>
+#include <fluent-bit/flb_filter.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_log.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_log_event_decoder.h>
+#include <fluent-bit/flb_log_event_encoder.h>
+#include <msgpack.h>
+#include "stdlib.h"
+#include <stdio.h>
+#include <sys/types.h>
+
+
+#include "throttle_size.h"
+
+#undef PLUGIN_NAME
+#define PLUGIN_NAME "filter_throttle_size"
+#define RELATIVE_ERROR 0.001
+#define KEY_DEPTH 20
+#define SPLIT_DELIMITER '|'
+
+struct field_key
+{
+ char *key;
+ int key_len;
+ struct mk_list _head;
+};
+
+static bool apply_suffix(double *x, char suffix_char)
+{
+ int multiplier;
+
+ switch (suffix_char) {
+ case 0:
+ case 's':
+ multiplier = 1;
+ break;
+ case 'm':
+ multiplier = 60;
+ break;
+ case 'h':
+ multiplier = 60 * 60;
+ break;
+ case 'd':
+ multiplier = 60 * 60 * 24;
+ break;
+ default:
+ return false;
+ }
+
+ *x *= multiplier;
+
+ return true;
+}
+
+/*
+ * add_new_pane_to_each will overides the oldes window pane with zero load and
+ * with new timestamp make it the newest.
+ */
+inline static void add_new_pane_to_each(struct throttle_size_table *ht,
+ double timestamp)
+{
+ struct mk_list *head;
+ struct flb_hash_table_entry *entry;
+ struct throttle_size_window *current_window;
+ struct flb_time ftm;
+
+ if (!timestamp) {
+ flb_time_get(&ftm);
+ timestamp = flb_time_to_double(&ftm);
+ }
+
+ mk_list_foreach(head, &ht->windows->entries) {
+ entry = mk_list_entry(head, struct flb_hash_table_entry, _head_parent);
+ current_window = (struct throttle_size_window *) (entry->val);
+ add_new_pane(current_window, timestamp);
+ flb_debug
+ ("[%s] Add new pane to \"%s\" window: timestamp: %ld, total %lu",
+ PLUGIN_NAME, current_window->name,
+ current_window->table[current_window->head].timestamp,
+ current_window->total);
+ }
+}
+
+inline static void delete_older_than_n_seconds(struct throttle_size_table *ht,
+ long seconds,
+ double current_timestamp)
+{
+ int i;
+ struct mk_list *tmp;
+ struct mk_list *head;
+ struct flb_hash_table_entry *entry;
+ struct flb_hash_table_chain *table;
+ struct throttle_size_window *current_window;
+ struct flb_time ftm;
+ long time_treshold;
+
+ if (!current_timestamp) {
+ flb_time_get(&ftm);
+ current_timestamp = flb_time_to_double(&ftm);
+ }
+
+ time_treshold = current_timestamp - seconds;
+ for (i = 0; i < ht->windows->size; i++) {
+ table = &ht->windows->table[i];
+ mk_list_foreach_safe(head, tmp, &table->chains) {
+ entry = mk_list_entry(head, struct flb_hash_table_entry, _head);
+ current_window = (struct throttle_size_window *) entry->val;
+
+ if (time_treshold > current_window->timestamp) {
+ free_stw_content(current_window);
+ mk_list_del(&entry->_head);
+ mk_list_del(&entry->_head_parent);
+ entry->table->count--;
+ ht->windows->total_count--;
+ flb_free(entry->key);
+ flb_free(entry->val);
+ flb_free(entry);
+ flb_info
+ ("[%s] Window \"%s\" was deleted. CT%ld TT%ld T%ld ",
+ PLUGIN_NAME, current_window->name,
+ (long) current_timestamp, time_treshold,
+ current_window->timestamp);
+ }
+ }
+ }
+}
+
+inline static void print_all(struct throttle_size_table *ht)
+{
+ struct mk_list *head;
+ struct flb_hash_table_entry *entry;
+ struct throttle_size_window *current_window;
+
+ mk_list_foreach(head, &ht->windows->entries) {
+ entry = mk_list_entry(head, struct flb_hash_table_entry, _head_parent);
+ current_window = (struct throttle_size_window *) entry->val;
+ printf("[%s] Name %s\n", PLUGIN_NAME, current_window->name);
+ printf("[%s] Timestamp %ld\n", PLUGIN_NAME,
+ current_window->timestamp);
+ printf("[%s] Total %lu\n", PLUGIN_NAME, current_window->total);
+ printf("[%s] Rate %f\n", PLUGIN_NAME,
+ current_window->total / (double) current_window->size);
+ }
+}
+
+void *size_time_ticker(void *args)
+{
+ struct flb_filter_throttle_size_ctx *ctx = args;
+ struct flb_time ftm;
+ long timestamp;
+
+ while (!ctx->done) {
+ flb_time_get(&ftm);
+ timestamp = flb_time_to_double(&ftm);
+
+ lock_throttle_size_table(ctx->hash);
+ add_new_pane_to_each(ctx->hash, timestamp);
+ delete_older_than_n_seconds(ctx->hash,
+ ctx->window_time_duration, timestamp);
+ if (ctx->print_status) {
+ print_all(ctx->hash);
+ }
+ unlock_throttle_size_table(ctx->hash);
+
+ sleep(ctx->slide_interval);
+ }
+
+ return NULL;
+}
+
+/* Check if a msgpack type is either binary or string */
+static inline int is_valid_key(const msgpack_object key_as_msgpack)
+{
+ return key_as_msgpack.type == MSGPACK_OBJECT_BIN ||
+ key_as_msgpack.type == MSGPACK_OBJECT_STR;
+}
+
+/*
+ * If msgpack can be represented as string get_msgobject_as_str returns that
+ * representation
+ */
+static inline uint32_t get_msgobject_as_str(const msgpack_object msg,
+ char **out)
+{
+ if (msg.type == MSGPACK_OBJECT_STR) {
+ *out = (char *) msg.via.str.ptr;
+ return (uint32_t) msg.via.str.size;
+ }
+ if (msg.type == MSGPACK_OBJECT_BIN) {
+ *out = (char *) msg.via.bin.ptr;
+ return (uint32_t) msg.via.bin.size;
+ }
+ *out = NULL;
+ return (uint32_t) 0;
+}
+
+static inline unsigned long get_msgpack_object_size(msgpack_object msg)
+{
+ int i;
+ unsigned long size = 0;
+
+ switch (msg.type) {
+ case MSGPACK_OBJECT_STR:
+ return msg.via.str.size;
+ case MSGPACK_OBJECT_BIN:
+ return msg.via.bin.size;
+ case MSGPACK_OBJECT_MAP:
+ for (i = 0; i < msg.via.map.size; i++) {
+ size += get_msgpack_object_size(msg.via.map.ptr[i].key);
+ size += get_msgpack_object_size(msg.via.map.ptr[i].val);
+ }
+ return size;
+ case MSGPACK_OBJECT_ARRAY:
+ for (i = 0; i < msg.via.array.size; i++) {
+ size += get_msgpack_object_size(msg.via.array.ptr[i]);
+ }
+ return size;
+ default:
+ return 0;
+ };
+
+ return 0;
+}
+
+/*
+ * get_value_of_msgpack_object_map_ search in msgpack_object map for @key
+ * and returns the value as msgpack_object if key is found or return NULL
+ * if not. This is helper function to get_value_of_msgpack_object_map
+ */
+static inline const msgpack_object *get_value_of_msgpack_object_map_(msgpack_object map,
+ struct field_key *key)
+{
+ int i;
+ int current_field_size;
+ char *current_field = NULL;
+
+ /* Lookup target key/value */
+ for (i = 0; i < map.via.map.size; i++) {
+ if (!is_valid_key(map.via.map.ptr[i].key)) {
+ continue;
+ }
+
+ current_field_size = get_msgobject_as_str(map.via.map.ptr[i].key, &current_field);
+ if (key->key_len != current_field_size) {
+ continue;
+ }
+
+ if (strncmp(key->key, current_field, current_field_size) != 0) {
+ continue;
+ }
+
+ return &map.via.map.ptr[i].val;
+ }
+
+ return NULL;
+}
+
+/*
+ * get_value_of_msgpack_object_map search in msgpack_object map for @key and
+ * returns the value as msgpack_object if key is found or return NULL if
+ * not. @key is a list of strings representing the nested key. Each
+ * element in thje list represent the next element in depth.
+ */
+const msgpack_object *get_value_of_msgpack_object_map(msgpack_object map,
+ const struct mk_list *fields_name)
+{
+ struct mk_list *head = NULL;
+ struct field_key *field;
+ const msgpack_object *msg = &map;
+
+ mk_list_foreach(head, fields_name) {
+ field = mk_list_entry(head, struct field_key, _head);
+ msg = get_value_of_msgpack_object_map_(*msg, field);
+ if (msg == NULL) {
+ /* not found */
+ flb_debug("Could not found field named %s", field->key);
+ return NULL;
+ }
+ }
+
+ return msg;
+}
+
+/* Given a msgpack record, do some filter action based on the defined rules */
+static inline int throttle_data_by_size(msgpack_object map,
+ struct flb_filter_throttle_size_ctx *ctx)
+{
+ char *name_field_str = NULL;
+ uint32_t name_field_size;
+ unsigned long load_size;
+ double current_rate;
+ struct throttle_size_window *window;
+ const msgpack_object *log_field;
+ const msgpack_object *name_field;
+
+ if (ctx->name_fields_depth > 0) {
+ /*
+ * We are looking for a message with a specific field. The other will
+ * not be taken to account.
+ */
+ name_field = get_value_of_msgpack_object_map(map, &ctx->name_fields);
+ if (name_field == NULL) {
+ /* We don't have such field so we keep the log */
+ flb_plg_debug(ctx->ins, "the name field is missing, so we are keeping "
+ "the log");
+ return throttle_size_RET_KEEP;
+ }
+ name_field_size = get_msgobject_as_str(*name_field, &name_field_str);
+ if (name_field_str == NULL) {
+ /* We don't have such field so we keep the log */
+ flb_plg_info(ctx->ins, "the value of the name field is nether string "
+ "not binary format. The log will not be throttle");
+ return throttle_size_RET_KEEP;
+ }
+ flb_plg_debug(ctx->ins, "field name found");
+ }
+ else {
+ flb_plg_debug(ctx->ins, "using default field name. All log will be taken "
+ "to account");
+
+ /* take all logs into account */
+ name_field_str = throttle_size_DEFAULT_NAME_FIELD;
+ name_field_size = strlen(throttle_size_DEFAULT_NAME_FIELD);
+ }
+
+ if (ctx->log_fields_depth > 0) {
+ /* we are looking for specific field and we will take only its size */
+ log_field = get_value_of_msgpack_object_map(map, &ctx->log_fields);
+ if (log_field == NULL) {
+ flb_plg_debug(ctx->ins,
+ "the log field is missing so we are keeping this log");
+ return throttle_size_RET_KEEP;
+ }
+ flb_plg_debug(ctx->ins, "log field found");
+ load_size = get_msgpack_object_size(*log_field);
+ }
+ else {
+ flb_plg_debug(ctx->ins, "using default log field name. All fields will be "
+ "taken into account");
+ load_size = get_msgpack_object_size(map);
+ }
+ flb_plg_debug(ctx->ins, "load size is %lu", load_size);
+
+ lock_throttle_size_table(ctx->hash);
+
+ window = find_throttle_size_window(ctx->hash, name_field_str, name_field_size);
+ if (window == NULL) {
+ /*
+ * Since Fluent Bit works on one thread and there is no chance someone
+ * to create the same window so we can unlock the mutex to give it to the
+ * ticker.
+ */
+ unlock_throttle_size_table(ctx->hash);
+ current_rate = load_size / (double) ctx->window_size;
+ if (current_rate - ctx->max_size_rate > RELATIVE_ERROR) {
+ flb_plg_info(ctx->ins, "load is too much for window \"%*.*s\". "
+ "The log record will be dropped",
+ name_field_size, name_field_str);
+ return throttle_size_RET_DROP;
+ }
+
+ window = size_window_create(name_field_str, name_field_size,
+ ctx->window_size);
+ if (window == NULL) {
+ flb_plg_warn(ctx->ins, "not enough memory. Log will be kept.",
+ load_size);
+ return throttle_size_RET_KEEP;
+ }
+
+ add_load(window, load_size);
+ flb_plg_debug(ctx->ins, "add %lu bytes to \"%s\" window: "
+ "timestamp: %ld, total %lu",
+ load_size, window->name,
+ window->table[window->head].timestamp, window->total);
+ lock_throttle_size_table(ctx->hash);
+ add_throttle_size_window(ctx->hash, window);
+ unlock_throttle_size_table(ctx->hash);
+ flb_plg_debug(ctx->ins, "new window named \"%s\" was added with load %lu.",
+ window->name, load_size);
+ flb_free(window);
+ }
+ else {
+ /*
+ * We found the wanted window and now we are going to make check and
+ * modify it if needed
+ */
+ flb_plg_debug(ctx->ins, "current rate is %.2f for windoe \"%s\"",
+ ((window->total + load_size) / (double) window->size),
+ window->name);
+
+ current_rate = (window->total + load_size) / (double) ctx->window_size;
+
+ if (current_rate - ctx->max_size_rate > RELATIVE_ERROR) {
+ unlock_throttle_size_table(ctx->hash);
+ flb_plg_info(ctx->ins, "load is too much. The log %*.*s record "
+ "will be dropped.",
+ load_size, name_field_size, name_field_str);
+ return throttle_size_RET_DROP;
+ }
+ add_load(window, load_size);
+ flb_plg_debug(ctx->ins, "add %lu bytes to \"%s\" window: "
+ "timestamp: %ld, total %lu", load_size, window->name,
+ window->table[window->head].timestamp, window->total);
+ unlock_throttle_size_table(ctx->hash);
+ flb_plg_debug(ctx->ins, "load of %lu was added and the message was kept",
+ load_size);
+ }
+
+ return throttle_size_RET_KEEP;
+}
+
+/*
+ * load_field_key_list split @str into list of string representing the depth
+ * of a nested key.
+ *
+ * The split is base on SPLIT_DELIMITER
+ */
+static inline int load_field_key_list(char *str, struct mk_list *the_list,
+ size_t *list_size)
+{
+ struct mk_list *split;
+ struct mk_list *head = NULL;
+ struct field_key *fk;
+ struct flb_split_entry *entry;
+
+ *list_size = 0;
+ mk_list_init(the_list);
+
+ if (str != NULL) {
+ split = flb_utils_split(str, SPLIT_DELIMITER, KEY_DEPTH);
+ if (mk_list_size(split) < 1) {
+ return 0;
+ }
+ mk_list_foreach(head, split) {
+ fk = flb_malloc(sizeof(struct field_key));
+ if (!fk) {
+ flb_errno();
+ flb_utils_split_free(split);
+ return -1;
+ }
+
+ entry = mk_list_entry(head, struct flb_split_entry, _head);
+
+ fk->key = strndup(entry->value, entry->len);
+ fk->key_len = entry->len;
+ mk_list_add(&fk->_head, the_list);
+ (*list_size)++;
+ }
+
+ flb_utils_split_free(split);
+ }
+ return 0;
+}
+
+static int parse_duration(char *interval, int default_seconds,
+ struct flb_filter_throttle_size_ctx *ctx)
+{
+ double seconds = 0.0;
+ double s;
+ char *p;
+
+ s = strtod(interval, &p);
+ if (0 >= s
+ /* No extra chars after the number and an optional s,m,h,d char. */
+ || (*p && *(p + 1))
+ /* Check any suffix char and update S based on the suffix. */
+ || !apply_suffix(&s, *p)) {
+ flb_plg_warn(ctx->ins, "invalid time interval %s falling back to "
+ "default: %d second",
+ interval, default_seconds);
+ return default_seconds;
+ }
+
+ seconds += s;
+ return seconds;
+}
+
+static inline int configure(struct flb_filter_throttle_size_ctx *ctx,
+ struct flb_filter_instance *ins)
+{
+ const char *str = NULL;
+ double val = 0;
+ char *endp;
+ ssize_t bytes;
+
+ ctx->name_fields_depth = 0;
+
+ /* rate per second */
+ str = flb_filter_get_property("rate", ins);
+ if (str) {
+ bytes = flb_utils_size_to_bytes(str);
+ if (bytes > 0) {
+ ctx->max_size_rate = (double) bytes;
+ }
+ else {
+ ctx->max_size_rate = throttle_size_DEFAULT_RATE;
+ }
+ }
+ else {
+ ctx->max_size_rate = throttle_size_DEFAULT_RATE;
+ }
+
+ /* windows size */
+ str = flb_filter_get_property("window", ins);
+ if (str != NULL && (val = strtoul(str, &endp, 10)) > 1) {
+ ctx->window_size = val;
+ }
+ else {
+ ctx->window_size = throttle_size_DEFAULT_WINDOW;
+ }
+
+ /* print informational status */
+ str = flb_filter_get_property("print_status", ins);
+ if (str != NULL) {
+ ctx->print_status = flb_utils_bool(str);
+ }
+ else {
+ ctx->print_status = throttle_size_DEFAULT_STATUS;
+ }
+
+ /* sliding interval */
+ str = flb_filter_get_property("interval", ins);
+ if (str != NULL) {
+ ctx->slide_interval =
+ parse_duration((char *) str, throttle_size_DEFAULT_INTERVAL, ctx);
+ }
+ else {
+ ctx->slide_interval = throttle_size_DEFAULT_INTERVAL;
+ }
+
+ /* the field which size will be taken into account */
+ str = flb_filter_get_property("log_field", ins);
+ if (load_field_key_list((char *) str, &ctx->log_fields, &ctx->log_fields_depth)) {
+ return -1;
+ }
+
+ str = NULL;
+
+ /* the field base on which new throttling window will be created */
+ str = flb_filter_get_property("name_field", ins);
+ if (load_field_key_list((char *) str, &ctx->name_fields, &ctx->name_fields_depth)) {
+ return -1;
+ }
+
+ /*
+ * The time after which the window will be delete when there is no log size
+ * recorded to it
+ */
+ str = flb_filter_get_property("window_time_duration", ins);
+ if (str != NULL) {
+ ctx->window_time_duration =
+ parse_duration((char *) str, throttle_size_DEFAULT_WINDOW_DURATION, ctx);
+ }
+ else {
+ ctx->window_time_duration = throttle_size_DEFAULT_WINDOW_DURATION;
+ }
+
+ /* Create the hash table of windows */
+ str = flb_filter_get_property("hash_table_size", ins);
+ if (str != NULL && (val = strtoul(str, &endp, 10)) > 0) {
+ ctx->hash = create_throttle_size_table(val);
+ }
+ else {
+ ctx->hash =
+ create_throttle_size_table
+ (throttle_size_WINDOW_TABLE_DEFAULT_SIZE);
+ }
+ if (ctx->hash == NULL) {
+ flb_errno();
+ return -1;
+ }
+
+ return 0;
+}
+
+static int cb_throttle_size_init(struct flb_filter_instance *ins,
+ struct flb_config *config, void *data)
+{
+ int ret;
+ struct flb_filter_throttle_size_ctx *ctx;
+ struct throttle_size_window *window;
+
+ /* Create context */
+ ctx = flb_calloc(1, sizeof(struct flb_filter_throttle_size_ctx));
+ if (!ctx) {
+ flb_errno();
+ return -1;
+ }
+ ctx->ins = ins;
+
+ /* parse plugin configuration */
+ ret = configure(ctx, ins);
+ if (ret == -1) {
+ flb_free(ctx);
+ return -1;
+ }
+
+ if (ctx->window_time_duration < ctx->slide_interval * ctx->window_size) {
+ ctx->window_time_duration = ctx->slide_interval * ctx->window_size;
+ }
+
+ /*
+ * if we specify "*" as a name field then all logs will be under
+ * the same window which we must make at initial time to save
+ * some checks later
+ */
+ if (ctx->name_fields_depth == 0) {
+ window = size_window_create(throttle_size_DEFAULT_NAME_FIELD,
+ strlen(throttle_size_DEFAULT_NAME_FIELD),
+ ctx->window_size);
+ if (window == NULL) {
+ flb_free(ctx);
+ flb_errno();
+ return -1;
+ }
+ add_throttle_size_window(ctx->hash, window);
+ flb_free(window);
+ }
+
+ ctx->ticker_id = flb_malloc(sizeof(pthread_t));
+ if (!ctx->ticker_id) {
+ flb_errno();
+ return -1;
+ }
+
+ ctx->done = false;
+ pthread_create((pthread_t *) ctx->ticker_id, NULL, &size_time_ticker,
+ ctx);
+
+ /* Set our context */
+ flb_filter_set_context(ins, ctx);
+
+ return 0;
+}
+
+static int cb_throttle_size_filter(const void *data, size_t bytes,
+ const char *tag, int tag_len,
+ void **out_buf, size_t * out_size,
+ struct flb_filter_instance *ins,
+ struct flb_input_instance *i_ins,
+ void *context, struct flb_config *config)
+{
+ int ret;
+ int old_size = 0;
+ int new_size = 0;
+ struct flb_log_event_encoder log_encoder;
+ struct flb_log_event_decoder log_decoder;
+ struct flb_log_event log_event;
+
+ (void) ins;
+ (void) i_ins;
+ (void) config;
+
+ ret = flb_log_event_decoder_init(&log_decoder, (char *) data, bytes);
+
+ if (ret != FLB_EVENT_DECODER_SUCCESS) {
+ flb_plg_error(ins,
+ "Log event decoder initialization error : %d", ret);
+
+ return FLB_FILTER_NOTOUCH;
+ }
+
+ ret = flb_log_event_encoder_init(&log_encoder,
+ FLB_LOG_EVENT_FORMAT_DEFAULT);
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ flb_plg_error(ins,
+ "Log event encoder initialization error : %d", ret);
+
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ return FLB_FILTER_NOTOUCH;
+ }
+
+ while ((ret = flb_log_event_decoder_next(
+ &log_decoder,
+ &log_event)) == FLB_EVENT_DECODER_SUCCESS) {
+ old_size++;
+
+ ret = throttle_data_by_size(*log_event.body, context);
+
+ if (ret == throttle_size_RET_KEEP) {
+ ret = flb_log_event_encoder_emit_raw_record(
+ &log_encoder,
+ log_decoder.record_base,
+ log_decoder.record_length);
+
+ new_size++;
+ }
+ else if (ret == throttle_size_RET_DROP) {
+ /* Do nothing */
+ }
+ }
+
+ /* we keep everything ? */
+ if (old_size == new_size) {
+ /* Destroy the buffer to avoid more overhead */
+ ret = FLB_FILTER_NOTOUCH;
+ }
+ else {
+ *out_buf = log_encoder.output_buffer;
+ *out_size = log_encoder.output_length;
+
+ flb_log_event_encoder_claim_internal_buffer_ownership(&log_encoder);
+
+ ret = FLB_FILTER_MODIFIED;
+ }
+
+ flb_log_event_decoder_destroy(&log_decoder);
+ flb_log_event_encoder_destroy(&log_encoder);
+
+ return ret;
+}
+
+static void delete_field_key(struct mk_list *head)
+{
+ struct mk_list *curr = NULL, *n = NULL;
+ struct field_key *field;
+
+ mk_list_foreach_safe(curr, n, head) {
+ field = mk_list_entry(curr, struct field_key, _head);
+ mk_list_del(curr);
+ flb_free(field->key);
+ flb_free(field);
+ }
+}
+
+static int cb_throttle_size_exit(void *data, struct flb_config *config)
+{
+ struct flb_filter_throttle_size_ctx *ctx = data;
+
+ ctx->done = true;
+ pthread_join(*(pthread_t *) ctx->ticker_id, NULL);
+
+ flb_free(ctx->ticker_id);
+ destroy_throttle_size_table(ctx->hash);
+ delete_field_key(&ctx->log_fields);
+ delete_field_key(&ctx->name_fields);
+ flb_free(ctx);
+
+ return 0;
+}
+
+struct flb_filter_plugin filter_throttle_size_plugin = {
+ .name = "throttle_size",
+ .description = "Throttle messages by size using sliding window algorithm",
+ .cb_init = cb_throttle_size_init,
+ .cb_filter = cb_throttle_size_filter,
+ .cb_exit = cb_throttle_size_exit,
+ .flags = 0
+};
diff --git a/src/fluent-bit/plugins/filter_throttle_size/throttle_size.h b/src/fluent-bit/plugins/filter_throttle_size/throttle_size.h
new file mode 100644
index 000000000..6a28e4613
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_throttle_size/throttle_size.h
@@ -0,0 +1,60 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_SIZE_FILTER_THROTTLE_H
+#define FLB_SIZE_FILTER_THROTTLE_H
+
+/* actions */
+#define throttle_size_RET_KEEP 0
+#define throttle_size_RET_DROP 1
+
+/* defaults */
+#define throttle_size_DEFAULT_RATE 1024*1024 /* bytes */
+#define throttle_size_DEFAULT_WINDOW 5
+#define throttle_size_DEFAULT_INTERVAL 1
+#define throttle_size_DEFAULT_STATUS FLB_FALSE;
+#define throttle_size_DEFAULT_LOG_FIELD "*"
+#define throttle_size_DEFAULT_NAME_FIELD "*"
+#define throttle_size_DEFAULT_WINDOW_DURATION 60
+#define throttle_size_WINDOW_TABLE_DEFAULT_SIZE 256
+
+#include "size_window.h"
+
+struct flb_filter_throttle_size_ctx
+{
+ int slide_interval;
+ int window_time_duration;
+ double max_size_rate;
+ unsigned int window_size;
+ size_t log_fields_depth;
+ size_t name_fields_depth;
+ void *ticker_id;
+ int print_status;
+
+ volatile bool done;
+
+ struct mk_list name_fields;
+ struct mk_list log_fields;
+
+ /* internal */
+ struct throttle_size_table *hash;
+ struct flb_filter_instance *ins;
+};
+
+#endif
diff --git a/src/fluent-bit/plugins/filter_type_converter/CMakeLists.txt b/src/fluent-bit/plugins/filter_type_converter/CMakeLists.txt
new file mode 100644
index 000000000..5938e1b6e
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_type_converter/CMakeLists.txt
@@ -0,0 +1,5 @@
+set(src
+ type_converter.c)
+
+FLB_PLUGIN(filter_type_converter "${src}" "")
+
diff --git a/src/fluent-bit/plugins/filter_type_converter/type_converter.c b/src/fluent-bit/plugins/filter_type_converter/type_converter.c
new file mode 100644
index 000000000..36422002c
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_type_converter/type_converter.c
@@ -0,0 +1,399 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_filter_plugin.h>
+#include <fluent-bit/flb_kv.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_mem.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_mp.h>
+#include <fluent-bit/flb_log_event_decoder.h>
+#include <fluent-bit/flb_log_event_encoder.h>
+#include <msgpack.h>
+
+#include "type_converter.h"
+
+static int delete_conv_entry(struct conv_entry *conv)
+{
+ if (conv == NULL) {
+ return 0;
+ }
+
+ if (conv->from_key != NULL) {
+ flb_sds_destroy(conv->from_key);
+ conv->from_key = NULL;
+ }
+ if (conv->to_key != NULL) {
+ flb_sds_destroy(conv->to_key);
+ conv->to_key = NULL;
+ }
+ if (conv->rule != NULL) {
+ flb_typecast_rule_destroy(conv->rule);
+ }
+ if (conv->from_ra != NULL) {
+ flb_ra_destroy(conv->from_ra);
+ }
+ mk_list_del(&conv->_head);
+ flb_free(conv);
+ return 0;
+}
+
+static int config_rule(struct type_converter_ctx *ctx, char* type_name,
+ struct flb_config_map_val *mv)
+{
+ struct conv_entry *entry = NULL;
+ struct flb_slist_entry *sentry = NULL;
+
+ if (ctx == NULL || mv == NULL) {
+ return -1;
+ }
+
+ entry = flb_calloc(1, sizeof(struct conv_entry));
+ if (entry == NULL) {
+ flb_errno();
+ return -1;
+ }
+
+ entry->rule = NULL;
+ if (mk_list_size(mv->val.list) != 3) {
+ flb_plg_error(ctx->ins, "invalid record parameters, "
+ "expects 'from_key to_key type' %d", mk_list_size(mv->val.list));
+ flb_free(entry);
+ return -1;
+ }
+
+ /* from_key name */
+ sentry = mk_list_entry_first(mv->val.list, struct flb_slist_entry, _head);
+ entry->from_key = flb_sds_create_len(sentry->str, flb_sds_len(sentry->str));
+
+ /* to_key name */
+ sentry = mk_list_entry_next(&sentry->_head, struct flb_slist_entry,
+ _head, mv->val.list);
+ entry->to_key = flb_sds_create_len(sentry->str, flb_sds_len(sentry->str));
+
+ sentry = mk_list_entry_last(mv->val.list, struct flb_slist_entry, _head);
+ entry->rule = flb_typecast_rule_create(type_name, strlen(type_name),
+ sentry->str,
+ flb_sds_len(sentry->str));
+ entry->from_ra = flb_ra_create(entry->from_key, FLB_FALSE);
+ if (entry->rule == NULL || entry->from_ra == NULL) {
+ flb_plg_error(ctx->ins,
+ "configuration error. ignore the key=%s",
+ entry->from_key);
+ delete_conv_entry(entry);
+ return -1;
+ }
+
+ mk_list_add(&entry->_head, &ctx->conv_entries);
+
+ return 0;
+}
+
+static int configure(struct type_converter_ctx *ctx,
+ struct flb_filter_instance *f_ins)
+{
+ struct mk_list *head = NULL;
+ struct flb_config_map_val *mv = NULL;
+
+ if (flb_filter_config_map_set(f_ins, ctx) < 0) {
+ flb_errno();
+ flb_plg_error(f_ins, "configuration error");
+ return -1;
+ }
+
+ /* Create rules for each type */
+ flb_config_map_foreach(head, mv, ctx->str_keys) {
+ config_rule(ctx, "string", mv);
+ }
+ flb_config_map_foreach(head, mv, ctx->int_keys) {
+ config_rule(ctx, "int", mv);
+ }
+ flb_config_map_foreach(head, mv, ctx->uint_keys) {
+ config_rule(ctx, "uint", mv);
+ }
+ flb_config_map_foreach(head, mv, ctx->float_keys) {
+ config_rule(ctx, "float", mv);
+ }
+
+ if (mk_list_size(&ctx->conv_entries) == 0) {
+ flb_plg_error(ctx->ins, "no rules");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int delete_list(struct type_converter_ctx *ctx)
+{
+ struct mk_list *tmp;
+ struct mk_list *head;
+ struct conv_entry *conv;
+
+ mk_list_foreach_safe(head, tmp, &ctx->conv_entries) {
+ conv = mk_list_entry(head, struct conv_entry, _head);
+ delete_conv_entry(conv);
+ }
+ return 0;
+}
+
+static int cb_type_converter_init(struct flb_filter_instance *ins,
+ struct flb_config *config,
+ void *data)
+{
+ struct type_converter_ctx *ctx = NULL;
+ int ret = 0;
+
+ ctx = flb_calloc(1, sizeof(struct type_converter_ctx));
+ if (!ctx) {
+ flb_errno();
+ return -1;
+ }
+ ctx->ins = ins;
+ mk_list_init(&ctx->conv_entries);
+
+ ret = configure(ctx, ins);
+ if (ret < 0) {
+ flb_plg_error(ins, "configuration error");
+ flb_free(ctx);
+ return -1;
+ }
+ /* set context */
+ flb_filter_set_context(ins, ctx);
+
+ return 0;
+}
+
+static int cb_type_converter_filter(const void *data, size_t bytes,
+ const char *tag, int tag_len,
+ void **out_buf, size_t *out_bytes,
+ struct flb_filter_instance *f_ins,
+ struct flb_input_instance *i_ins,
+ void *filter_context,
+ struct flb_config *config)
+{
+ struct type_converter_ctx *ctx = filter_context;
+ struct flb_time tm;
+ int i;
+ int map_num;
+ int is_record_modified = FLB_FALSE;
+ int ret;
+ msgpack_sbuffer tmp_sbuf;
+ msgpack_packer tmp_pck;
+ msgpack_object *obj;
+ struct conv_entry *entry;
+ struct mk_list *tmp;
+ struct mk_list *head;
+
+ msgpack_object *start_key;
+ msgpack_object *out_key;
+ msgpack_object *out_val;
+ struct flb_log_event_encoder log_encoder;
+ struct flb_log_event_decoder log_decoder;
+ struct flb_log_event log_event;
+
+ (void) f_ins;
+ (void) i_ins;
+ (void) config;
+
+ ret = flb_log_event_decoder_init(&log_decoder, (char *) data, bytes);
+
+ if (ret != FLB_EVENT_DECODER_SUCCESS) {
+ flb_plg_error(f_ins,
+ "Log event decoder initialization error : %d", ret);
+
+ return FLB_FILTER_NOTOUCH;
+ }
+
+ ret = flb_log_event_encoder_init(&log_encoder,
+ FLB_LOG_EVENT_FORMAT_DEFAULT);
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ flb_plg_error(f_ins,
+ "Log event encoder initialization error : %d", ret);
+
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ return FLB_FILTER_NOTOUCH;
+ }
+
+ /* Create temporary msgpack buffer */
+ msgpack_sbuffer_init(&tmp_sbuf);
+ msgpack_packer_init(&tmp_pck, &tmp_sbuf, msgpack_sbuffer_write);
+
+ /* Iterate each item to know map number */
+ while ((ret = flb_log_event_decoder_next(
+ &log_decoder,
+ &log_event)) == FLB_EVENT_DECODER_SUCCESS) {
+
+ flb_time_copy(&tm, &log_event.timestamp);
+ obj = log_event.body;
+
+ map_num = obj->via.map.size;
+
+ ret = flb_log_event_encoder_begin_record(&log_encoder);
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_timestamp(&log_encoder, &tm);
+ }
+
+ ret = flb_log_event_encoder_set_metadata_from_msgpack_object(
+ &log_encoder,
+ log_event.metadata);
+
+ /* write original k/v */
+ for (i = 0;
+ i < map_num &&
+ ret == FLB_EVENT_ENCODER_SUCCESS;
+ i++) {
+ ret = flb_log_event_encoder_append_body_values(
+ &log_encoder,
+ FLB_LOG_EVENT_MSGPACK_OBJECT_VALUE(&obj->via.map.ptr[i].key),
+ FLB_LOG_EVENT_MSGPACK_OBJECT_VALUE(&obj->via.map.ptr[i].val));
+ }
+
+ mk_list_foreach_safe(head, tmp, &ctx->conv_entries) {
+ start_key = NULL;
+ out_key = NULL;
+ out_val = NULL;
+
+ entry = mk_list_entry(head, struct conv_entry, _head);
+ ret = flb_ra_get_kv_pair(entry->from_ra, *obj, &start_key, &out_key, &out_val);
+ if (start_key == NULL || out_key == NULL || out_val == NULL) {
+ ret = FLB_EVENT_ENCODER_SUCCESS;
+
+ continue;
+ }
+
+ /* key is found. try to convert. */
+ ret = flb_log_event_encoder_append_body_string(
+ &log_encoder,
+ entry->to_key,
+ flb_sds_len(entry->to_key));
+
+ ret = flb_typecast_pack(*out_val, entry->rule, &tmp_pck);
+ if (ret < 0) {
+ /* failed. try to write original val... */
+ flb_plg_error(ctx->ins, "failed to convert. key=%s", entry->from_key);
+
+ ret = flb_log_event_encoder_append_body_msgpack_object(
+ &log_encoder,
+ out_val);
+
+ continue;
+ }
+ else {
+ ret = flb_log_event_encoder_append_body_raw_msgpack(
+ &log_encoder,
+ tmp_sbuf.data, tmp_sbuf.size);
+
+ msgpack_sbuffer_clear(&tmp_sbuf);
+ }
+
+ is_record_modified = FLB_TRUE;
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ flb_log_event_encoder_commit_record(&log_encoder);
+ }
+ else {
+ flb_log_event_encoder_rollback_record(&log_encoder);
+ }
+
+ }
+ msgpack_sbuffer_destroy(&tmp_sbuf);
+
+ if (is_record_modified != FLB_TRUE) {
+ /* Destroy the buffer to avoid more overhead */
+ flb_plg_trace(ctx->ins, "no touch");
+
+ ret = FLB_FILTER_NOTOUCH;
+ }
+ else {
+ if (ret == FLB_EVENT_DECODER_ERROR_INSUFFICIENT_DATA &&
+ log_decoder.offset == bytes) {
+ ret = FLB_EVENT_ENCODER_SUCCESS;
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ *out_buf = log_encoder.output_buffer;
+ *out_bytes = log_encoder.output_length;
+
+ ret = FLB_FILTER_MODIFIED;
+
+ flb_log_event_encoder_claim_internal_buffer_ownership(&log_encoder);
+ }
+ else {
+ flb_plg_error(ctx->ins,
+ "Log event encoder error : %d", ret);
+
+ ret = FLB_FILTER_NOTOUCH;
+ }
+ }
+
+ flb_log_event_decoder_destroy(&log_decoder);
+ flb_log_event_encoder_destroy(&log_encoder);
+
+ return ret;
+}
+
+static int cb_type_converter_exit(void *data, struct flb_config *config) {
+ struct type_converter_ctx *ctx = data;
+
+ if (ctx == NULL) {
+ return 0;
+ }
+ delete_list(ctx);
+ flb_free(ctx);
+ return 0;
+}
+
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_SLIST_3, "int_key", NULL,
+ FLB_CONFIG_MAP_MULT, FLB_TRUE, offsetof(struct type_converter_ctx, int_keys),
+ "Convert integer to other type. e.g. int_key id id_str string"
+ },
+ {
+ FLB_CONFIG_MAP_SLIST_3, "uint_key", NULL,
+ FLB_CONFIG_MAP_MULT, FLB_TRUE, offsetof(struct type_converter_ctx, uint_keys),
+ "Convert unsinged integer to other type. e.g. uint_key id id_str string"
+ },
+ {
+ FLB_CONFIG_MAP_SLIST_3, "float_key", NULL,
+ FLB_CONFIG_MAP_MULT, FLB_TRUE, offsetof(struct type_converter_ctx, float_keys),
+ "Convert float to other type. e.g. float_key ratio id_str string"
+ },
+ {
+ FLB_CONFIG_MAP_SLIST_3, "str_key", NULL,
+ FLB_CONFIG_MAP_MULT, FLB_TRUE, offsetof(struct type_converter_ctx, str_keys),
+ "Convert string to other type. e.g. str_key id id_val integer"
+ },
+ {0}
+};
+
+
+struct flb_filter_plugin filter_type_converter_plugin = {
+ .name = "type_converter",
+ .description = "Data type converter",
+ .cb_init = cb_type_converter_init,
+ .cb_filter = cb_type_converter_filter,
+ .cb_exit = cb_type_converter_exit,
+ .config_map = config_map,
+ .flags = 0,
+};
diff --git a/src/fluent-bit/plugins/filter_type_converter/type_converter.h b/src/fluent-bit/plugins/filter_type_converter/type_converter.h
new file mode 100644
index 000000000..cd7540351
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_type_converter/type_converter.h
@@ -0,0 +1,46 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_FILTER_TYPE_CONVERTER_H
+#define FLB_FILTER_TYPE_CONVERTER_H
+
+#include <fluent-bit/flb_filter.h>
+#include <fluent-bit/flb_sds.h>
+#include <fluent-bit/flb_typecast.h>
+#include <fluent-bit/flb_record_accessor.h>
+
+struct conv_entry {
+ flb_sds_t from_key;
+ struct flb_record_accessor *from_ra;
+ flb_sds_t to_key;
+ struct flb_typecast_rule *rule;
+ struct mk_list _head;
+};
+
+struct type_converter_ctx {
+ struct mk_list conv_entries;
+ struct flb_filter_instance *ins;
+ /* config maps */
+ struct mk_list *int_keys;
+ struct mk_list *uint_keys;
+ struct mk_list *float_keys;
+ struct mk_list *str_keys;
+};
+
+#endif
diff --git a/src/fluent-bit/plugins/filter_wasm/CMakeLists.txt b/src/fluent-bit/plugins/filter_wasm/CMakeLists.txt
new file mode 100644
index 000000000..e8e1f5ad4
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_wasm/CMakeLists.txt
@@ -0,0 +1,11 @@
+set(WAMR_ROOT_DIR ../../${FLB_PATH_LIB_WASM_MICRO_RUNTIME})
+set(WASM_INCLUDE_DIRS
+ ${WAMR_ROOT_DIR}/core/iwasm/include
+ )
+
+set(src
+ filter_wasm.c)
+
+FLB_PLUGIN(filter_wasm "${src}" "")
+target_include_directories(flb-plugin-filter_wasm PRIVATE ${WASM_INCLUDE_DIRS})
+target_link_libraries(flb-plugin-filter_wasm flb-wasm-static vmlib-static)
diff --git a/src/fluent-bit/plugins/filter_wasm/filter_wasm.c b/src/fluent-bit/plugins/filter_wasm/filter_wasm.c
new file mode 100644
index 000000000..be3adccd3
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_wasm/filter_wasm.c
@@ -0,0 +1,318 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_filter.h>
+#include <fluent-bit/flb_filter_plugin.h>
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_error.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_parser.h>
+#include <fluent-bit/flb_kv.h>
+#include <fluent-bit/flb_log_event_decoder.h>
+#include <fluent-bit/flb_log_event_encoder.h>
+#include <msgpack.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "filter_wasm.h"
+
+/* cb_filter callback */
+static int cb_wasm_filter(const void *data, size_t bytes,
+ const char *tag, int tag_len,
+ void **out_buf, size_t *out_bytes,
+ struct flb_filter_instance *f_ins,
+ struct flb_input_instance *i_ins,
+ void *filter_context,
+ struct flb_config *config)
+{
+ int ret;
+ char *ret_val = NULL;
+ char *buf = NULL;
+
+ size_t off = 0;
+ size_t last_off = 0;
+ size_t alloc_size = 0;
+ char *json_buf = NULL;
+ size_t json_size;
+ int root_type;
+ struct flb_wasm *wasm = NULL;
+
+ struct flb_filter_wasm *ctx = filter_context;
+ struct flb_log_event_encoder log_encoder;
+ struct flb_log_event_decoder log_decoder;
+ struct flb_log_event log_event;
+
+ (void) f_ins;
+ (void) i_ins;
+ (void) config;
+
+ ret = flb_log_event_decoder_init(&log_decoder, (char *) data, bytes);
+
+ if (ret != FLB_EVENT_DECODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event decoder initialization error : %d", ret);
+
+ return FLB_FILTER_NOTOUCH;
+ }
+
+ ret = flb_log_event_encoder_init(&log_encoder,
+ FLB_LOG_EVENT_FORMAT_DEFAULT);
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event encoder initialization error : %d", ret);
+
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ return FLB_FILTER_NOTOUCH;
+ }
+
+ wasm = flb_wasm_instantiate(config, ctx->wasm_path, ctx->accessible_dir_list, -1, -1, -1);
+ if (wasm == NULL) {
+ flb_plg_debug(ctx->ins, "instantiate wasm [%s] failed", ctx->wasm_path);
+ goto on_error;
+ }
+
+ while ((ret = flb_log_event_decoder_next(
+ &log_decoder,
+ &log_event)) == FLB_EVENT_DECODER_SUCCESS) {
+ off = log_decoder.offset;
+ alloc_size = (off - last_off) + 128; /* JSON is larger than msgpack */
+ last_off = off;
+
+ /* Encode as JSON from msgpack */
+ buf = flb_msgpack_to_json_str(alloc_size, log_event.body);
+
+ if (buf) {
+ /* Execute WASM program */
+ ret_val = flb_wasm_call_function_format_json(wasm, ctx->wasm_function_name,
+ tag, tag_len,
+ log_event.timestamp,
+ buf, strlen(buf));
+
+ flb_free(buf);
+ }
+ else {
+ flb_plg_error(ctx->ins, "encode as JSON from msgpack is failed");
+
+ goto on_error;
+ }
+
+ if (ret_val == NULL) { /* Skip record */
+ flb_plg_debug(ctx->ins, "encode as JSON from msgpack is broken. Skip.");
+ continue;
+ }
+
+
+ if (strlen(ret_val) == 0) { /* Skip record */
+ flb_plg_debug(ctx->ins, "WASM function returned empty string. Skip.");
+ flb_free(ret_val);
+ continue;
+ }
+
+ ret = flb_log_event_encoder_begin_record(&log_encoder);
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_timestamp(
+ &log_encoder, &log_event.timestamp);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_metadata_from_msgpack_object(
+ &log_encoder,
+ log_event.metadata);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ /* Convert JSON payload to msgpack */
+ ret = flb_pack_json(ret_val, strlen(ret_val),
+ &json_buf, &json_size, &root_type, NULL);
+
+ if (ret == 0 && root_type == JSMN_OBJECT) {
+ /* JSON found, pack it msgpack representation */
+ ret = flb_log_event_encoder_set_body_from_raw_msgpack(
+ &log_encoder,
+ json_buf,
+ json_size);
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_commit_record(&log_encoder);
+ }
+ else {
+ flb_log_event_encoder_rollback_record(&log_encoder);
+ }
+ }
+ else {
+ flb_plg_error(ctx->ins, "invalid JSON format. ret: %d, buf: %s", ret, ret_val);
+
+ flb_log_event_encoder_rollback_record(&log_encoder);
+ }
+ }
+ else {
+ flb_log_event_encoder_rollback_record(&log_encoder);
+ }
+
+ /* release 'ret_val' if it was allocated */
+ if (ret_val != NULL) {
+ flb_free(ret_val);
+ }
+
+ /* release 'json_buf' if it was allocated */
+ if (json_buf != NULL) {
+ flb_free(json_buf);
+ }
+ }
+
+ /* Teardown WASM context */
+ flb_wasm_destroy(wasm);
+
+ *out_buf = log_encoder.output_buffer;
+ *out_bytes = log_encoder.output_length;
+
+ flb_log_event_encoder_claim_internal_buffer_ownership(&log_encoder);
+
+ flb_log_event_decoder_destroy(&log_decoder);
+ flb_log_event_encoder_destroy(&log_encoder);
+
+ return FLB_FILTER_MODIFIED;
+
+on_error:
+ flb_log_event_decoder_destroy(&log_decoder);
+ flb_log_event_encoder_destroy(&log_encoder);
+
+ if (wasm != NULL) {
+ flb_wasm_destroy(wasm);
+ }
+
+ return FLB_FILTER_NOTOUCH;
+}
+
+/* read config file and*/
+static int filter_wasm_config_read(struct flb_filter_wasm *ctx,
+ struct flb_filter_instance *f_ins,
+ struct flb_config *config)
+{
+ int ret;
+
+ ctx->ins = f_ins;
+
+ /* Load the config map */
+ ret = flb_filter_config_map_set(f_ins, (void *)ctx);
+ if (ret == -1) {
+ flb_plg_error(f_ins, "unable to load configuration");
+ return -1;
+ }
+
+ /* filepath setting */
+ if (ctx->wasm_path == NULL) {
+ flb_plg_error(f_ins, "no WASM 'program path' was given");
+ return -1;
+ }
+
+ /* function_name setting */
+ if (ctx->wasm_function_name == NULL) {
+ flb_plg_error(f_ins, "no WASM 'function name' was given");
+ return -1;
+ }
+
+ return 0;
+}
+
+static void delete_wasm_config(struct flb_filter_wasm *ctx)
+{
+ if (!ctx) {
+ return;
+ }
+
+ flb_free(ctx);
+}
+
+/* Initialize plugin */
+static int cb_wasm_init(struct flb_filter_instance *f_ins,
+ struct flb_config *config, void *data)
+{
+ struct flb_filter_wasm *ctx = NULL;
+ int ret = -1;
+
+ /* Allocate space for the configuration */
+ ctx = flb_calloc(1, sizeof(struct flb_filter_wasm));
+ if (!ctx) {
+ return -1;
+ }
+
+ /* Initialize exec config */
+ ret = filter_wasm_config_read(ctx, f_ins, config);
+ if (ret < 0) {
+ goto init_error;
+ }
+
+ flb_wasm_init(config);
+
+ /* Set context */
+ flb_filter_set_context(f_ins, ctx);
+ return 0;
+
+init_error:
+ delete_wasm_config(ctx);
+
+ return -1;
+}
+
+static int cb_wasm_exit(void *data, struct flb_config *config)
+{
+ struct flb_filter_wasm *ctx = data;
+
+ flb_wasm_destroy_all(config);
+ delete_wasm_config(ctx);
+ return 0;
+}
+
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_STR, "wasm_path", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_filter_wasm, wasm_path),
+ "Set the wasm path to execute"
+ },
+ {
+ FLB_CONFIG_MAP_CLIST, "accessible_paths", ".",
+ 0, FLB_TRUE, offsetof(struct flb_filter_wasm, accessible_dir_list),
+ "Specifying paths to be accessible from a WASM program."
+ "Default value is current working directory"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "function_name", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_filter_wasm, wasm_function_name),
+ "Set the function name in wasm to execute"
+ },
+ /* EOF */
+ {0}
+};
+
+struct flb_filter_plugin filter_wasm_plugin = {
+ .name = "wasm",
+ .description = "WASM program filter",
+ .cb_init = cb_wasm_init,
+ .cb_filter = cb_wasm_filter,
+ .cb_exit = cb_wasm_exit,
+ .config_map = config_map
+};
diff --git a/src/fluent-bit/plugins/filter_wasm/filter_wasm.h b/src/fluent-bit/plugins/filter_wasm/filter_wasm.h
new file mode 100644
index 000000000..4aed048fb
--- /dev/null
+++ b/src/fluent-bit/plugins/filter_wasm/filter_wasm.h
@@ -0,0 +1,41 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef FLB_FILTER_WASM_H
+#define FLB_FILTER_WASM_H
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_input.h>
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_input.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_parser.h>
+#include <fluent-bit/wasm/flb_wasm.h>
+
+#include <msgpack.h>
+
+struct flb_filter_wasm {
+ flb_sds_t wasm_path;
+ struct mk_list *accessible_dir_list; /* list of directories to be
+ * accesible from WASM */
+ flb_sds_t wasm_function_name;
+ struct flb_filter_instance *ins;
+ struct flb_wasm *wasm;
+};
+
+#endif /* FLB_FILTER_WASM_H */
diff --git a/src/fluent-bit/plugins/in_calyptia_fleet/CMakeLists.txt b/src/fluent-bit/plugins/in_calyptia_fleet/CMakeLists.txt
new file mode 100644
index 000000000..593514d26
--- /dev/null
+++ b/src/fluent-bit/plugins/in_calyptia_fleet/CMakeLists.txt
@@ -0,0 +1,4 @@
+set(src
+ in_calyptia_fleet.c)
+
+FLB_PLUGIN(in_calyptia_fleet "${src}" "")
diff --git a/src/fluent-bit/plugins/in_calyptia_fleet/in_calyptia_fleet.c b/src/fluent-bit/plugins/in_calyptia_fleet/in_calyptia_fleet.c
new file mode 100644
index 000000000..3175b5645
--- /dev/null
+++ b/src/fluent-bit/plugins/in_calyptia_fleet/in_calyptia_fleet.c
@@ -0,0 +1,1269 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2023 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <time.h>
+#include <signal.h>
+#include <sys/stat.h>
+
+#include <msgpack.h>
+#include <fluent-bit/flb_input.h>
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_config_map.h>
+#include <fluent-bit/flb_error.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_strptime.h>
+#include <fluent-bit/flb_reload.h>
+#include <fluent-bit/flb_lib.h>
+#include <fluent-bit/config_format/flb_cf_fluentbit.h>
+#include <fluent-bit/flb_base64.h>
+
+
+#define CALYPTIA_H_PROJECT "X-Project-Token"
+#define CALYPTIA_H_CTYPE "Content-Type"
+#define CALYPTIA_H_CTYPE_JSON "application/json"
+
+#define DEFAULT_INTERVAL_SEC "15"
+#define DEFAULT_INTERVAL_NSEC "0"
+
+#define CALYPTIA_HOST "cloud-api.calyptia.com"
+#define CALYPTIA_PORT "443"
+
+#ifndef _WIN32
+#define PATH_SEPARATOR "/"
+#define DEFAULT_CONFIG_DIR "/tmp/calyptia-fleet"
+#else
+#define DEFAULT_CONFIG_DIR NULL
+#define PATH_SEPARATOR "\\"
+#endif
+
+struct flb_in_calyptia_fleet_config {
+ /* Time interval check */
+ int interval_sec;
+ int interval_nsec;
+
+ /* Grabbed from the cfg_path, used to check if configuration has
+ * has been updated.
+ */
+ long config_timestamp;
+
+ flb_sds_t api_key;
+ flb_sds_t fleet_id;
+ flb_sds_t fleet_name;
+ flb_sds_t machine_id;
+ flb_sds_t config_dir;
+ flb_sds_t cloud_host;
+ flb_sds_t cloud_port;
+
+ flb_sds_t fleet_url;
+
+ struct flb_input_instance *ins; /* plugin instance */
+ struct flb_config *config; /* Fluent Bit context */
+
+ /* Networking */
+ struct flb_upstream *u;
+
+ int event_fd;
+
+ int collect_fd;
+};
+
+static char *find_case_header(struct flb_http_client *cli, const char *header)
+{
+ char *ptr;
+ char *headstart;
+
+
+ headstart = strstr(cli->resp.data, "\r\n");
+
+ if (headstart == NULL) {
+ return NULL;
+ }
+
+ /* Lookup the beginning of the header */
+ for (ptr = headstart; ptr != NULL && ptr+2 < cli->resp.payload; ptr = strstr(ptr, "\r\n")) {
+
+ if (ptr + 4 < cli->resp.payload && strcmp(ptr, "\r\n\r\n") == 0) {
+ return NULL;
+ }
+
+ ptr+=2;
+
+ /* no space left for header */
+ if (ptr + strlen(header)+2 >= cli->resp.payload) {
+ return NULL;
+ }
+
+ /* matched header and the delimiter */
+ if (strncasecmp(ptr, header, strlen(header)) == 0) {
+
+ if (ptr[strlen(header)] == ':' && ptr[strlen(header)+1] == ' ') {
+ return ptr;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+/* Try to find a header value in the buffer. Copied from flb_http_client.c. */
+static int case_header_lookup(struct flb_http_client *cli,
+ const char *header, int header_len,
+ const char **out_val, int *out_len)
+{
+ char *ptr;
+ char *crlf;
+ char *end;
+
+ if (!cli->resp.data) {
+ return -1;
+ }
+
+ ptr = find_case_header(cli, header);
+ end = strstr(cli->resp.data, "\r\n\r\n");
+
+ if (!ptr) {
+
+ if (end) {
+ /* The headers are complete but the header is not there */
+ return -1;
+ }
+
+ /* We need more data */
+ return -1;
+ }
+
+ /* Exclude matches in the body */
+ if (end && ptr > end) {
+ return -1;
+ }
+
+ /* Lookup CRLF (end of line \r\n) */
+ crlf = strstr(ptr, "\r\n");
+
+ if (!crlf) {
+ return -1;
+ }
+
+ /* sanity check that the header_len does not exceed the headers. */
+ if (ptr + header_len + 2 > end) {
+ return -1;
+ }
+
+ ptr += header_len + 2;
+
+ *out_val = ptr;
+ *out_len = (crlf - ptr);
+
+ return 0;
+}
+
+struct reload_ctx {
+ flb_ctx_t *flb;
+ flb_sds_t cfg_path;
+};
+
+static flb_sds_t fleet_config_filename(struct flb_in_calyptia_fleet_config *ctx, char *fname)
+{
+ flb_sds_t cfgname;
+
+ cfgname = flb_sds_create_size(4096);
+
+ if (ctx->fleet_name != NULL) {
+ flb_sds_printf(&cfgname,
+ "%s" PATH_SEPARATOR "%s" PATH_SEPARATOR "%s" PATH_SEPARATOR "%s.ini",
+ ctx->config_dir, ctx->machine_id, ctx->fleet_name, fname);
+ }
+ else {
+ flb_sds_printf(&cfgname,
+ "%s" PATH_SEPARATOR "%s" PATH_SEPARATOR "%s" PATH_SEPARATOR "%s.ini",
+ ctx->config_dir, ctx->machine_id, ctx->fleet_id, fname);
+ }
+
+ return cfgname;
+}
+
+static flb_sds_t new_fleet_config_filename(struct flb_in_calyptia_fleet_config *ctx)
+{
+ return fleet_config_filename(ctx, "new");
+}
+
+static flb_sds_t cur_fleet_config_filename(struct flb_in_calyptia_fleet_config *ctx)
+{
+ return fleet_config_filename(ctx, "cur");
+}
+
+static flb_sds_t old_fleet_config_filename(struct flb_in_calyptia_fleet_config *ctx)
+{
+ return fleet_config_filename(ctx, "old");
+}
+
+static flb_sds_t time_fleet_config_filename(struct flb_in_calyptia_fleet_config *ctx, time_t t)
+{
+ char s_last_modified[32];
+
+ snprintf(s_last_modified, sizeof(s_last_modified)-1, "%d", (int)t);
+ return fleet_config_filename(ctx, s_last_modified);
+}
+
+static int is_new_fleet_config(struct flb_in_calyptia_fleet_config *ctx, struct flb_config *cfg)
+{
+ flb_sds_t cfgnewname;
+ int ret = FLB_FALSE;
+
+
+ if (cfg->conf_path_file == NULL) {
+ return FLB_FALSE;
+ }
+
+ cfgnewname = new_fleet_config_filename(ctx);
+
+ if (strcmp(cfgnewname, cfg->conf_path_file) == 0) {
+ ret = FLB_TRUE;
+ }
+
+ flb_sds_destroy(cfgnewname);
+
+ return ret;
+}
+
+static int is_cur_fleet_config(struct flb_in_calyptia_fleet_config *ctx, struct flb_config *cfg)
+{
+ flb_sds_t cfgcurname;
+ int ret = FLB_FALSE;
+
+
+ if (cfg->conf_path_file == NULL) {
+ return FLB_FALSE;
+ }
+
+ cfgcurname = cur_fleet_config_filename(ctx);
+
+ if (strcmp(cfgcurname, cfg->conf_path_file) == 0) {
+ ret = FLB_TRUE;
+ }
+
+ flb_sds_destroy(cfgcurname);
+
+ return ret;
+}
+
+static int is_timestamped_fleet_config(struct flb_in_calyptia_fleet_config *ctx, struct flb_config *cfg)
+{
+ char *fname;
+ char *end;
+ long val;
+
+ if (cfg->conf_path_file == NULL) {
+ return FLB_FALSE;
+ }
+
+ fname = strrchr(cfg->conf_path_file, PATH_SEPARATOR[0]);
+
+ if (fname == NULL) {
+ return FLB_FALSE;
+ }
+
+ fname++;
+
+ errno = 0;
+ val = strtol(fname, &end, 10);
+
+ if ((errno == ERANGE && (val == LONG_MAX || val == LONG_MIN)) ||
+ (errno != 0 && val == 0)) {
+ flb_errno();
+ return FLB_FALSE;
+ }
+
+ if (strcmp(end, ".ini") == 0) {
+ return FLB_TRUE;
+ }
+
+ return FLB_FALSE;
+}
+
+static int is_fleet_config(struct flb_in_calyptia_fleet_config *ctx, struct flb_config *cfg)
+{
+ if (cfg->conf_path_file == NULL) {
+ return FLB_FALSE;
+ }
+
+ return is_new_fleet_config(ctx, cfg) ||
+ is_cur_fleet_config(ctx, cfg) ||
+ is_timestamped_fleet_config(ctx, cfg);
+}
+
+static int exists_new_fleet_config(struct flb_in_calyptia_fleet_config *ctx)
+{
+ flb_sds_t cfgnewname;
+ int ret = FLB_FALSE;
+
+
+ cfgnewname = new_fleet_config_filename(ctx);
+ ret = access(cfgnewname, F_OK) == 0 ? FLB_TRUE : FLB_FALSE;
+
+ flb_sds_destroy(cfgnewname);
+ return ret;
+}
+
+static int exists_cur_fleet_config(struct flb_in_calyptia_fleet_config *ctx)
+{
+ flb_sds_t cfgcurname;
+ int ret = FLB_FALSE;
+
+
+ cfgcurname = cur_fleet_config_filename(ctx);
+ ret = access(cfgcurname, F_OK) == 0 ? FLB_TRUE : FLB_FALSE;
+
+ flb_sds_destroy(cfgcurname);
+ return ret;
+}
+
+static void *do_reload(void *data)
+{
+ struct reload_ctx *reload = (struct reload_ctx *)data;
+
+ /* avoid reloading the current configuration... just use our new one! */
+ flb_context_set(reload->flb);
+ reload->flb->config->enable_hot_reload = FLB_TRUE;
+ reload->flb->config->conf_path_file = reload->cfg_path;
+
+ sleep(5);
+#ifndef FLB_SYSTEM_WINDOWS
+ kill(getpid(), SIGHUP);
+#else
+ GenerateConsoleCtrlEvent(1 /* CTRL_BREAK_EVENT_1 */, 0);
+#endif
+ return NULL;
+}
+
+static int test_config_is_valid(flb_sds_t cfgpath)
+{
+ struct flb_config *config;
+ struct flb_cf *conf;
+ int ret = FLB_FALSE;
+
+
+ config = flb_config_init();
+
+ if (config == NULL) {
+ goto config_init_error;
+ }
+
+ conf = flb_cf_create();
+
+ if (conf == NULL) {
+ goto cf_create_error;
+ }
+
+ conf = flb_cf_create_from_file(conf, cfgpath);
+
+ if (conf == NULL) {
+ goto cf_create_from_file_error;
+ }
+
+ if (flb_config_load_config_format(config, conf)) {
+ goto cf_load_config_format_error;
+ }
+
+ if (flb_reload_property_check_all(config)) {
+ goto cf_property_check_error;
+ }
+
+ ret = FLB_TRUE;
+
+cf_property_check_error:
+cf_load_config_format_error:
+cf_create_from_file_error:
+ flb_cf_destroy(conf);
+cf_create_error:
+ flb_config_exit(config);
+config_init_error:
+ return ret;
+}
+
+static int execute_reload(struct flb_in_calyptia_fleet_config *ctx, flb_sds_t cfgpath)
+{
+ struct reload_ctx *reload;
+ pthread_t pth;
+ pthread_attr_t ptha;
+ flb_ctx_t *flb = flb_context_get();
+
+ if (ctx->collect_fd > 0) {
+ flb_input_collector_pause(ctx->collect_fd, ctx->ins);
+ }
+
+ if (flb == NULL) {
+ flb_plg_error(ctx->ins, "unable to get fluent-bit context.");
+
+ if (ctx->collect_fd > 0) {
+ flb_input_collector_resume(ctx->collect_fd, ctx->ins);
+ }
+
+ return FLB_FALSE;
+ }
+
+ /* fix execution in valgrind...
+ * otherwise flb_reload errors out with:
+ * [error] [reload] given flb context is NULL
+ */
+ flb_plg_info(ctx->ins, "loading configuration from %s.", cfgpath);
+
+ if (test_config_is_valid(cfgpath) == FLB_FALSE) {
+ flb_plg_error(ctx->ins, "unable to load configuration.");
+
+ if (ctx->collect_fd > 0) {
+ flb_input_collector_resume(ctx->collect_fd, ctx->ins);
+ }
+
+ return FLB_FALSE;
+ }
+
+ reload = flb_calloc(1, sizeof(struct reload_ctx));
+ reload->flb = flb;
+ reload->cfg_path = cfgpath;
+
+ pthread_attr_init(&ptha);
+ pthread_attr_setdetachstate(&ptha, PTHREAD_CREATE_DETACHED);
+ pthread_create(&pth, &ptha, do_reload, reload);
+
+ return FLB_TRUE;
+}
+
+static char *tls_setting_string(int use_tls)
+{
+ if (use_tls) {
+ return "On";
+ }
+
+ return "Off";
+}
+
+static flb_sds_t parse_api_key_json(struct flb_in_calyptia_fleet_config *ctx,
+ char *payload, size_t size)
+{
+ int ret;
+ int out_size;
+ char *pack;
+ struct flb_pack_state pack_state;
+ size_t off = 0;
+ msgpack_unpacked result;
+ msgpack_object_kv *cur;
+ msgpack_object_str *key;
+ flb_sds_t project_id;
+ int idx = 0;
+
+ /* Initialize packer */
+ flb_pack_state_init(&pack_state);
+
+ /* Pack JSON as msgpack */
+ ret = flb_pack_json_state(payload, size,
+ &pack, &out_size, &pack_state);
+ flb_pack_state_reset(&pack_state);
+
+ /* Handle exceptions */
+ if (ret == FLB_ERR_JSON_PART) {
+ flb_plg_warn(ctx->ins, "JSON data is incomplete, skipping");
+ return NULL;
+ }
+ else if (ret == FLB_ERR_JSON_INVAL) {
+ flb_plg_warn(ctx->ins, "invalid JSON message, skipping");
+ return NULL;
+ }
+ else if (ret == -1) {
+ return NULL;
+ }
+
+ msgpack_unpacked_init(&result);
+ while (msgpack_unpack_next(&result, pack, out_size, &off) == MSGPACK_UNPACK_SUCCESS) {
+
+ if (result.data.type == MSGPACK_OBJECT_MAP) {
+ for (idx = 0; idx < result.data.via.map.size; idx++) {
+ cur = &result.data.via.map.ptr[idx];
+ key = &cur->key.via.str;
+
+ if (strncmp(key->ptr, "ProjectID", key->size) == 0) {
+
+ if (cur->val.type != MSGPACK_OBJECT_STR) {
+ flb_plg_error(ctx->ins, "unable to find fleet by name");
+ msgpack_unpacked_destroy(&result);
+ return NULL;
+ }
+
+ project_id = flb_sds_create_len(cur->val.via.str.ptr,
+ cur->val.via.str.size);
+ msgpack_unpacked_destroy(&result);
+ flb_free(pack);
+
+ return project_id;
+ }
+ }
+ }
+ }
+
+ msgpack_unpacked_destroy(&result);
+ flb_free(pack);
+
+ return NULL;
+}
+
+static ssize_t parse_fleet_search_json(struct flb_in_calyptia_fleet_config *ctx,
+ char *payload, size_t size)
+{
+ int ret;
+ int out_size;
+ char *pack;
+ struct flb_pack_state pack_state;
+ size_t off = 0;
+ msgpack_unpacked result;
+ msgpack_object_array *results;
+ msgpack_object_kv *cur;
+ msgpack_object_str *key;
+ int idx = 0;
+
+ /* Initialize packer */
+ flb_pack_state_init(&pack_state);
+
+ /* Pack JSON as msgpack */
+ ret = flb_pack_json_state(payload, size,
+ &pack, &out_size, &pack_state);
+ flb_pack_state_reset(&pack_state);
+
+ /* Handle exceptions */
+ if (ret == FLB_ERR_JSON_PART) {
+ flb_plg_warn(ctx->ins, "JSON data is incomplete, skipping");
+ return -1;
+ }
+ else if (ret == FLB_ERR_JSON_INVAL) {
+ flb_plg_warn(ctx->ins, "invalid JSON message, skipping");
+ return -1;
+ }
+ else if (ret == -1) {
+ return -1;
+ }
+
+ msgpack_unpacked_init(&result);
+ while (msgpack_unpack_next(&result, pack, out_size, &off) == MSGPACK_UNPACK_SUCCESS) {
+
+ if (result.data.type == MSGPACK_OBJECT_ARRAY) {
+ results = &result.data.via.array;
+
+ if (results->ptr[0].type == MSGPACK_OBJECT_MAP) {
+
+ for (idx = 0; idx < results->ptr[0].via.map.size; idx++) {
+ cur = &results->ptr[0].via.map.ptr[idx];
+ key = &cur->key.via.str;
+
+ if (strncasecmp(key->ptr, "id", key->size) == 0) {
+
+ if (cur->val.type != MSGPACK_OBJECT_STR) {
+ flb_plg_error(ctx->ins, "unable to find fleet by name");
+ msgpack_unpacked_destroy(&result);
+ return -1;
+ }
+
+ ctx->fleet_id = flb_sds_create_len(cur->val.via.str.ptr,
+ cur->val.via.str.size);
+ break;
+ }
+ break;
+ }
+ break;
+ }
+ }
+ }
+
+ msgpack_unpacked_destroy(&result);
+ flb_free(pack);
+
+ if (ctx->fleet_id == NULL) {
+ return -1;
+ }
+
+ return 0;
+}
+
+static int get_calyptia_fleet_id_by_name(struct flb_in_calyptia_fleet_config *ctx,
+ struct flb_connection *u_conn,
+ struct flb_config *config)
+{
+ struct flb_http_client *client;
+ flb_sds_t url;
+ flb_sds_t project_id;
+ unsigned char token[512] = {0};
+ unsigned char encoded[256];
+ size_t elen;
+ size_t tlen;
+ char *api_token_sep;
+ size_t b_sent;
+ int ret;
+
+ api_token_sep = strchr(ctx->api_key, '.');
+
+ if (api_token_sep == NULL) {
+ return -1;
+ }
+
+ elen = api_token_sep-ctx->api_key;
+ elen = elen + (4 - (elen % 4));
+
+ if (elen > sizeof(encoded)) {
+ flb_plg_error(ctx->ins, "API Token is too large");
+ return -1;
+ }
+
+ memset(encoded, '=', sizeof(encoded));
+ memcpy(encoded, ctx->api_key, api_token_sep-ctx->api_key);
+
+ ret = flb_base64_decode(token, sizeof(token)-1, &tlen,
+ encoded, elen);
+
+ if (ret != 0) {
+ return ret;
+ }
+
+ project_id = parse_api_key_json(ctx, (char *)token, tlen);
+
+ if (project_id == NULL) {
+ return -1;
+ }
+
+ url = flb_sds_create_size(4096);
+ flb_sds_printf(&url, "/v1/search?project_id=%s&resource=fleet&term=%s",
+ project_id, ctx->fleet_name);
+
+ client = flb_http_client(u_conn, FLB_HTTP_GET, url, NULL, 0,
+ ctx->ins->host.name, ctx->ins->host.port, NULL, 0);
+
+ if (!client) {
+ flb_plg_error(ctx->ins, "unable to create http client");
+ return -1;
+ }
+
+ flb_http_buffer_size(client, 8192);
+
+ flb_http_add_header(client,
+ CALYPTIA_H_PROJECT, sizeof(CALYPTIA_H_PROJECT) - 1,
+ ctx->api_key, flb_sds_len(ctx->api_key));
+
+ ret = flb_http_do(client, &b_sent);
+
+ if (ret != 0) {
+ flb_plg_error(ctx->ins, "http do error");
+ return -1;
+ }
+
+ if (client->resp.status != 200) {
+ flb_plg_error(ctx->ins, "search http status code error: %d", client->resp.status);
+ return -1;
+ }
+
+ if (client->resp.payload_size <= 0) {
+ flb_plg_error(ctx->ins, "empty response");
+ return -1;
+ }
+
+ if (parse_fleet_search_json(ctx, client->resp.payload, client->resp.payload_size) == -1) {
+ flb_plg_error(ctx->ins, "unable to find fleet: %s", ctx->fleet_name);
+ return -1;
+ }
+
+ if (ctx->fleet_id == NULL) {
+ return -1;
+ }
+ return 0;
+}
+
+#ifdef FLB_SYSTEM_WINDOWS
+#define link(a, b) CreateHardLinkA(b, a, 0)
+
+ssize_t readlink(const char *path, char *realpath, size_t srealpath) {
+ HANDLE hFile;
+ DWORD ret;
+
+ hFile = CreateFile(path, GENERIC_READ, FILE_SHARE_READ, NULL, OPEN_EXISTING,
+ FILE_ATTRIBUTE_NORMAL, NULL);
+
+ if (hFile == INVALID_HANDLE_VALUE) {
+ return -1;
+ }
+
+ ret = GetFinalPathNameByHandleA(hFile, realpath, srealpath, VOLUME_NAME_NT);
+
+ if (ret < srealpath) {
+ CloseHandle(hFile);
+ return -1;
+ }
+
+ CloseHandle(hFile);
+ return ret;
+}
+
+#endif
+
+/* cb_collect callback */
+static int in_calyptia_fleet_collect(struct flb_input_instance *ins,
+ struct flb_config *config,
+ void *in_context)
+{
+ struct flb_in_calyptia_fleet_config *ctx = in_context;
+ struct flb_connection *u_conn;
+ struct flb_http_client *client;
+ flb_sds_t cfgname;
+ flb_sds_t cfgnewname;
+ flb_sds_t cfgoldname;
+ flb_sds_t cfgcurname;
+ flb_sds_t header;
+ flb_sds_t hdr;
+ FILE *cfgfp;
+ const char *fbit_last_modified;
+ int fbit_last_modified_len;
+ struct flb_tm tm_last_modified = { 0 };
+ time_t time_last_modified;
+ char *data;
+ size_t b_sent;
+ int ret = -1;
+#ifdef FLB_SYSTEM_WINDOWS
+ DWORD err;
+ LPSTR lpMsg;
+#endif
+
+ u_conn = flb_upstream_conn_get(ctx->u);
+
+ if (!u_conn) {
+ flb_plg_error(ctx->ins, "could not get an upstream connection to %s:%u",
+ ctx->ins->host.name, ctx->ins->host.port);
+ goto conn_error;
+ }
+
+ if (ctx->fleet_id == NULL) {
+
+ if (get_calyptia_fleet_id_by_name(ctx, u_conn, config) == -1) {
+ flb_plg_error(ctx->ins, "unable to find fleet: %s", ctx->fleet_name);
+ goto conn_error;
+ }
+ }
+
+ if (ctx->fleet_url == NULL) {
+ ctx->fleet_url = flb_sds_create_size(4096);
+ flb_sds_printf(&ctx->fleet_url, "/v1/fleets/%s/config?format=ini", ctx->fleet_id);
+ }
+
+ client = flb_http_client(u_conn, FLB_HTTP_GET, ctx->fleet_url,
+ NULL, 0,
+ ctx->ins->host.name, ctx->ins->host.port, NULL, 0);
+
+ if (!client) {
+ flb_plg_error(ins, "unable to create http client");
+ goto client_error;
+ }
+
+ flb_http_buffer_size(client, 8192);
+
+ flb_http_add_header(client,
+ CALYPTIA_H_PROJECT, sizeof(CALYPTIA_H_PROJECT) - 1,
+ ctx->api_key, flb_sds_len(ctx->api_key));
+
+ ret = flb_http_do(client, &b_sent);
+
+ if (ret != 0) {
+ flb_plg_error(ins, "http do error");
+ goto http_error;
+ }
+
+ if (client->resp.status != 200) {
+ flb_plg_error(ins, "http status code error: %d", client->resp.status);
+ goto http_error;
+ }
+
+ if (client->resp.payload_size <= 0) {
+ flb_plg_error(ins, "empty response");
+ goto http_error;
+ }
+
+ /* copy and NULL terminate the payload */
+ data = flb_sds_create_size(client->resp.payload_size + 1);
+
+ if (!data) {
+ goto http_error;
+ }
+ memcpy(data, client->resp.payload, client->resp.payload_size);
+ data[client->resp.payload_size] = '\0';
+
+ ret = case_header_lookup(client, "Last-modified", strlen("Last-modified"),
+ &fbit_last_modified, &fbit_last_modified_len);
+
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "unable to get last-modified header");
+ goto http_error;
+ }
+
+ flb_strptime(fbit_last_modified, "%a, %d %B %Y %H:%M:%S GMT", &tm_last_modified);
+ time_last_modified = mktime(&tm_last_modified.tm);
+
+ cfgname = time_fleet_config_filename(ctx, time_last_modified);
+
+ if (access(cfgname, F_OK) == -1 && errno == ENOENT) {
+ cfgfp = fopen(cfgname, "w+");
+
+ if (cfgfp == NULL) {
+ flb_plg_error(ctx->ins, "unable to open configuration file: %s", cfgname);
+ goto http_error;
+ }
+
+ header = flb_sds_create_size(4096);
+
+ if (ctx->fleet_name == NULL) {
+ hdr = flb_sds_printf(&header,
+ "[CUSTOM]\n"
+ " Name calyptia\n"
+ " api_key %s\n"
+ " fleet_id %s\n"
+ " add_label fleet_id %s\n"
+ " fleet.config_dir %s\n"
+ " calyptia_host %s\n"
+ " calyptia_port %d\n"
+ " calyptia_tls %s\n",
+ ctx->api_key,
+ ctx->fleet_id,
+ ctx->fleet_id,
+ ctx->config_dir,
+ ctx->ins->host.name,
+ ctx->ins->host.port,
+ tls_setting_string(ctx->ins->use_tls)
+ );
+ }
+ else {
+ hdr = flb_sds_printf(&header,
+ "[CUSTOM]\n"
+ " Name calyptia\n"
+ " api_key %s\n"
+ " fleet_name %s\n"
+ " fleet_id %s\n"
+ " add_label fleet_id %s\n"
+ " fleet.config_dir %s\n"
+ " calyptia_host %s\n"
+ " calyptia_port %d\n"
+ " calyptia_tls %s\n",
+ ctx->api_key,
+ ctx->fleet_name,
+ ctx->fleet_id,
+ ctx->fleet_id,
+ ctx->config_dir,
+ ctx->ins->host.name,
+ ctx->ins->host.port,
+ tls_setting_string(ctx->ins->use_tls)
+ );
+ }
+ if (hdr == NULL) {
+ fclose(cfgfp);
+ goto http_error;
+ }
+ if (ctx->machine_id) {
+ hdr = flb_sds_printf(&header, " machine_id %s\n", ctx->machine_id);
+ if (hdr == NULL) {
+ fclose(cfgfp);
+ goto http_error;
+ }
+ }
+ fwrite(header, strlen(header), 1, cfgfp);
+ flb_sds_destroy(header);
+ fwrite(data, client->resp.payload_size, 1, cfgfp);
+ fclose(cfgfp);
+
+ cfgnewname = new_fleet_config_filename(ctx);
+
+ if (exists_new_fleet_config(ctx) == FLB_TRUE) {
+ cfgoldname = old_fleet_config_filename(ctx);
+ rename(cfgnewname, cfgoldname);
+ unlink(cfgnewname);
+ flb_sds_destroy(cfgoldname);
+ }
+
+ if (!link(cfgname, cfgnewname)) {
+#ifdef FLB_SYSTEM_WINDOWS
+ err = GetLastError();
+ FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_ALLOCATE_BUFFER,
+ NULL, err, 0, &lpMsg, 0, NULL);
+ flb_plg_error(ctx->ins, "unable to create hard link: %s", lpMsg);
+#else
+ flb_errno();
+#endif
+ }
+ }
+
+ if (ctx->config_timestamp < time_last_modified) {
+ flb_plg_debug(ctx->ins, "new configuration is newer than current: %ld < %ld",
+ ctx->config_timestamp, time_last_modified);
+ flb_plg_info(ctx->ins, "force the reloading of the configuration file=%d.", ctx->event_fd);
+ flb_sds_destroy(data);
+
+ if (execute_reload(ctx, cfgname) == FLB_FALSE) {
+ cfgoldname = old_fleet_config_filename(ctx);
+ cfgcurname = cur_fleet_config_filename(ctx);
+ rename(cfgoldname, cfgcurname);
+ flb_sds_destroy(cfgcurname);
+ flb_sds_destroy(cfgoldname);
+ goto reload_error;
+ }
+ else {
+ FLB_INPUT_RETURN(0);
+ }
+ }
+
+ ret = 0;
+
+reload_error:
+http_error:
+ flb_http_client_destroy(client);
+client_error:
+ flb_upstream_conn_release(u_conn);
+conn_error:
+ FLB_INPUT_RETURN(ret);
+}
+
+#ifdef FLB_SYSTEM_WINDOWS
+#define _mkdir(a, b) mkdir(a)
+#else
+#define _mkdir(a, b) mkdir(a, b)
+#endif
+
+/* recursively create directories, based on:
+ * https://stackoverflow.com/a/2336245
+ * who found it at:
+ * http://nion.modprobe.de/blog/archives/357-Recursive-directory-creation.html
+ */
+static int __mkdir(const char *dir, int perms) {
+ char tmp[255];
+ char *ptr = NULL;
+ size_t len;
+ int ret;
+
+ ret = snprintf(tmp, sizeof(tmp),"%s",dir);
+ if (ret > sizeof(tmp)) {
+ return -1;
+ }
+
+ len = strlen(tmp);
+ if (tmp[len - 1] == '/') {
+ tmp[len - 1] = 0;
+ }
+
+ for (ptr = tmp + 1; *ptr; ptr++) {
+ if (*ptr == '/') {
+ *ptr = 0;
+ if (access(tmp, F_OK) != 0) {
+ ret = _mkdir(tmp, perms);
+ if (ret != 0) {
+ return ret;
+ }
+ }
+ *ptr = '/';
+ }
+ }
+ return _mkdir(tmp, perms);
+}
+
+static int create_fleet_directory(struct flb_in_calyptia_fleet_config *ctx)
+{
+ flb_sds_t myfleetdir;
+
+ if (access(ctx->config_dir, F_OK) != 0) {
+ if (__mkdir(ctx->config_dir, 0700) != 0) {
+ return -1;
+ }
+ }
+
+ myfleetdir = flb_sds_create_size(256);
+
+ if (ctx->fleet_name != NULL) {
+ flb_sds_printf(&myfleetdir, "%s" PATH_SEPARATOR "%s" PATH_SEPARATOR "%s",
+ ctx->config_dir, ctx->machine_id, ctx->fleet_name);
+ }
+ else {
+ flb_sds_printf(&myfleetdir, "%s" PATH_SEPARATOR "%s" PATH_SEPARATOR "%s",
+ ctx->config_dir, ctx->machine_id, ctx->fleet_id);
+ }
+
+ if (access(myfleetdir, F_OK) != 0) {
+ if (__mkdir(myfleetdir, 0700) !=0) {
+ return -1;
+ }
+ }
+
+ flb_sds_destroy(myfleetdir);
+ return 0;
+}
+
+static int load_fleet_config(struct flb_in_calyptia_fleet_config *ctx)
+{
+ flb_ctx_t *flb_ctx = flb_context_get();
+ char *fname;
+ char *ext;
+ long timestamp;
+ char realname[4096];
+ ssize_t len;
+
+ if (create_fleet_directory(ctx) != 0) {
+ return -1;
+ }
+
+ /* check if we are already using the fleet configuration file. */
+ if (is_fleet_config(ctx, flb_ctx->config) == FLB_FALSE) {
+ /* check which one and load it */
+ if (exists_cur_fleet_config(ctx) == FLB_TRUE) {
+ return execute_reload(ctx, cur_fleet_config_filename(ctx));
+ }
+ else if (exists_new_fleet_config(ctx) == FLB_TRUE) {
+ return execute_reload(ctx, new_fleet_config_filename(ctx));
+ }
+ }
+ else {
+ if (is_new_fleet_config(ctx, flb_ctx->config) || is_cur_fleet_config(ctx, flb_ctx->config)) {
+ len = readlink(flb_ctx->config->conf_path_file, realname, sizeof(realname));
+
+ if (len > sizeof(realname)) {
+ return FLB_FALSE;
+ }
+
+ fname = basename(realname);
+ }
+ else {
+ fname = basename(flb_ctx->config->conf_path_file);
+ }
+
+ if (fname == NULL) {
+ return FLB_FALSE;
+ }
+
+ errno = 0;
+ timestamp = strtol(fname, &ext, 10);
+
+ if ((errno == ERANGE && (timestamp == LONG_MAX || timestamp == LONG_MIN)) ||
+ (errno != 0 && timestamp == 0)) {
+ flb_errno();
+ return FLB_FALSE;
+ }
+
+ /* unable to parse the timstamp */
+ if (errno == ERANGE) {
+ return FLB_FALSE;
+ }
+
+ ctx->config_timestamp = timestamp;
+ }
+
+ return FLB_FALSE;
+}
+
+static int in_calyptia_fleet_init(struct flb_input_instance *in,
+ struct flb_config *config, void *data)
+{
+ int ret;
+ int upstream_flags;
+ struct flb_in_calyptia_fleet_config *ctx;
+ (void) data;
+
+#ifdef _WIN32
+ char *tmpdir;
+#endif
+
+ flb_plg_info(in, "initializing calyptia fleet input.");
+
+ if (in->host.name == NULL) {
+ flb_plg_error(in, "no input 'Host' provided");
+ return -1;
+ }
+
+ /* Allocate space for the configuration */
+ ctx = flb_calloc(1, sizeof(struct flb_in_calyptia_fleet_config));
+
+ if (!ctx) {
+ flb_errno();
+ return -1;
+ }
+ ctx->ins = in;
+ ctx->collect_fd = -1;
+
+
+ /* Load the config map */
+ ret = flb_input_config_map_set(in, (void *)ctx);
+
+ if (ret == -1) {
+ flb_free(ctx);
+ flb_plg_error(in, "unable to load configuration");
+ return -1;
+ }
+
+#ifdef _WIN32
+ if (ctx->config_dir == NULL) {
+ tmpdir = getenv("TEMP");
+
+ if (tmpdir == NULL) {
+ flb_plg_error(in, "unable to find temporary directory (%%TEMP%%).");
+ return -1;
+ }
+
+ ctx->config_dir = flb_sds_create_size(4096);
+
+ if (ctx->config_dir == NULL) {
+ flb_plg_error(in, "unable to allocate config-dir.");
+ return -1;
+ }
+ flb_sds_printf(&ctx->config_dir, "%s" PATH_SEPARATOR "%s", tmpdir, "calyptia-fleet");
+ }
+#endif
+
+ upstream_flags = FLB_IO_TCP;
+
+ if (in->use_tls) {
+ upstream_flags |= FLB_IO_TLS;
+ }
+
+ ctx->u = flb_upstream_create(config, in->host.name, in->host.port,
+ upstream_flags, in->tls);
+
+ if (!ctx->u) {
+ flb_plg_error(ctx->ins, "could not initialize upstream");
+ flb_free(ctx);
+ return -1;
+ }
+
+ if (ctx->interval_sec <= 0 && ctx->interval_nsec <= 0) {
+ /* Illegal settings. Override them. */
+ ctx->interval_sec = atoi(DEFAULT_INTERVAL_SEC);
+ ctx->interval_nsec = atoi(DEFAULT_INTERVAL_NSEC);
+ }
+
+ if (ctx->interval_sec < atoi(DEFAULT_INTERVAL_SEC)) {
+ ctx->interval_sec = atoi(DEFAULT_INTERVAL_SEC);
+ }
+
+ /* Set the context */
+ flb_input_set_context(in, ctx);
+
+ /* if we load a new configuration then we will be reloaded anyways */
+ if (load_fleet_config(ctx) == FLB_TRUE) {
+ return 0;
+ }
+
+ /* Set our collector based on time */
+ ret = flb_input_set_collector_time(in,
+ in_calyptia_fleet_collect,
+ ctx->interval_sec,
+ ctx->interval_nsec,
+ config);
+
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "could not initialize collector for fleet input plugin");
+ flb_free(ctx);
+ return -1;
+ }
+
+ ctx->collect_fd = ret;
+
+ return 0;
+}
+
+static void cb_in_calyptia_fleet_pause(void *data, struct flb_config *config)
+{
+ struct flb_in_calyptia_fleet_config *ctx = data;
+ flb_input_collector_pause(ctx->collect_fd, ctx->ins);
+}
+
+static void cb_in_calyptia_fleet_resume(void *data, struct flb_config *config)
+{
+ struct flb_in_calyptia_fleet_config *ctx = data;
+ flb_input_collector_resume(ctx->collect_fd, ctx->ins);
+}
+
+static int in_calyptia_fleet_exit(void *data, struct flb_config *config)
+{
+ (void) *config;
+ struct flb_in_calyptia_fleet_config *ctx = (struct flb_in_calyptia_fleet_config *)data;
+
+ flb_input_collector_delete(ctx->collect_fd, ctx->ins);
+ flb_upstream_destroy(ctx->u);
+ flb_free(ctx);
+
+ return 0;
+}
+
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_STR, "api_key", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_in_calyptia_fleet_config, api_key),
+ "Calyptia Cloud API Key."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "config_dir", DEFAULT_CONFIG_DIR,
+ 0, FLB_TRUE, offsetof(struct flb_in_calyptia_fleet_config, config_dir),
+ "Base path for the configuration directory."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "fleet_id", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_in_calyptia_fleet_config, fleet_id),
+ "Calyptia Fleet ID."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "fleet_name", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_in_calyptia_fleet_config, fleet_name),
+ "Calyptia Fleet Name (used to lookup the fleet ID via the cloud API)."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "machine_id", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_in_calyptia_fleet_config, machine_id),
+ "Agent Machine ID."
+ },
+ {
+ FLB_CONFIG_MAP_INT, "event_fd", "-1",
+ 0, FLB_TRUE, offsetof(struct flb_in_calyptia_fleet_config, event_fd),
+ "Used internally to set the event fd."
+ },
+ {
+ FLB_CONFIG_MAP_INT, "interval_sec", DEFAULT_INTERVAL_SEC,
+ 0, FLB_TRUE, offsetof(struct flb_in_calyptia_fleet_config, interval_sec),
+ "Set the collector interval"
+ },
+ {
+ FLB_CONFIG_MAP_INT, "interval_nsec", DEFAULT_INTERVAL_NSEC,
+ 0, FLB_TRUE, offsetof(struct flb_in_calyptia_fleet_config, interval_nsec),
+ "Set the collector interval (nanoseconds)"
+ },
+ /* EOF */
+ {0}
+};
+
+/* Plugin reference */
+struct flb_input_plugin in_calyptia_fleet_plugin = {
+ .name = "calyptia_fleet",
+ .description = "Calyptia Fleet Input",
+ .cb_init = in_calyptia_fleet_init,
+ .cb_pre_run = NULL,
+ .cb_collect = in_calyptia_fleet_collect,
+ .cb_resume = cb_in_calyptia_fleet_resume,
+ .cb_pause = cb_in_calyptia_fleet_pause,
+ .cb_flush_buf = NULL,
+ .cb_exit = in_calyptia_fleet_exit,
+ .config_map = config_map,
+ .flags = FLB_INPUT_NET|FLB_INPUT_CORO|FLB_IO_OPT_TLS|FLB_INPUT_PRIVATE
+};
diff --git a/src/fluent-bit/plugins/in_collectd/CMakeLists.txt b/src/fluent-bit/plugins/in_collectd/CMakeLists.txt
new file mode 100644
index 000000000..1f8d64074
--- /dev/null
+++ b/src/fluent-bit/plugins/in_collectd/CMakeLists.txt
@@ -0,0 +1,7 @@
+set(src
+ typesdb.c
+ typesdb_parser.c
+ netprot.c
+ in_collectd.c)
+
+FLB_PLUGIN(in_collectd "${src}" "")
diff --git a/src/fluent-bit/plugins/in_collectd/in_collectd.c b/src/fluent-bit/plugins/in_collectd/in_collectd.c
new file mode 100644
index 000000000..06ef5ae8a
--- /dev/null
+++ b/src/fluent-bit/plugins/in_collectd/in_collectd.c
@@ -0,0 +1,226 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_compat.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_utils.h>
+#include <msgpack.h>
+
+#include "in_collectd.h"
+#include "netprot.h"
+#include "typesdb.h"
+
+/*
+ * Max payload size. By default, Collectd sends up to 1452 bytes
+ * per a UDP packet, but the limit can be increased up to 65535
+ * bytes through a configuration parameter.
+ *
+ * See network_config_set_buffer_size() in collectd/src/network.c.
+ */
+#define BUFFER_SIZE 65535
+
+#define DEFAULT_LISTEN "0.0.0.0"
+#define DEFAULT_PORT 25826
+
+/* This is where most Linux systems places a default TypesDB */
+#define DEFAULT_TYPESDB "/usr/share/collectd/types.db"
+
+static int in_collectd_callback(struct flb_input_instance *i_ins,
+ struct flb_config *config, void *in_context);
+
+static int in_collectd_init(struct flb_input_instance *in,
+ struct flb_config *config, void *data)
+{
+ int ret;
+ struct flb_in_collectd_config *ctx;
+ struct mk_list *tdb;
+ char *listen = DEFAULT_LISTEN;
+ int port = DEFAULT_PORT;
+
+ /* Initialize context */
+ ctx = flb_calloc(1, sizeof(struct flb_in_collectd_config));
+ if (!ctx) {
+ flb_errno();
+ return -1;
+ }
+ ctx->ins = in;
+
+ ctx->bufsize = BUFFER_SIZE;
+ ctx->buf = flb_malloc(ctx->bufsize);
+ if (!ctx->buf) {
+ flb_errno();
+ flb_free(ctx);
+ return -1;
+ }
+
+ /* Load the config map */
+ ret = flb_input_config_map_set(in, (void *)ctx);
+ if (ret == -1) {
+ flb_plg_error(in, "unable to load configuration");
+ flb_free(ctx);
+ return -1;
+ }
+
+ /* Listening address */
+ if (in->host.listen) {
+ listen = in->host.listen;
+ }
+
+ if (strlen(listen) > sizeof(ctx->listen) - 1) {
+ flb_plg_error(ctx->ins, "too long address '%s'", listen);
+ flb_free(ctx);
+ return -1;
+ }
+ strcpy(ctx->listen, listen);
+
+ /* Listening port */
+ if (in->host.port) {
+ port = in->host.port;
+ }
+ snprintf(ctx->port, sizeof(ctx->port), "%hu", (unsigned short) port);
+
+ flb_plg_debug(ctx->ins, "Loading TypesDB from %s", ctx->types_db);
+
+ tdb = typesdb_load_all(ctx, ctx->types_db);
+ if (!tdb) {
+ flb_plg_error(ctx->ins, "failed to load '%s'", ctx->types_db);
+ flb_free(ctx->buf);
+ flb_free(ctx);
+ return -1;
+ }
+ ctx->tdb = tdb;
+
+ /* Set the context */
+ flb_input_set_context(in, ctx);
+
+ ctx->server_fd = flb_net_server_udp(ctx->port, ctx->listen);
+ if (ctx->server_fd < 0) {
+ flb_plg_error(ctx->ins, "failed to bind to %s:%s", ctx->listen,
+ ctx->port);
+ typesdb_destroy(ctx->tdb);
+ flb_free(ctx->buf);
+ flb_free(ctx);
+ return -1;
+ }
+
+ /* Set the collector */
+ ret = flb_input_set_collector_socket(in,
+ in_collectd_callback,
+ ctx->server_fd,
+ config);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "failed set up a collector");
+ flb_socket_close(ctx->server_fd);
+ typesdb_destroy(ctx->tdb);
+ flb_free(ctx->buf);
+ flb_free(ctx);
+ return -1;
+ }
+ ctx->coll_fd = ret;
+
+ ret = flb_log_event_encoder_init(&ctx->log_encoder,
+ FLB_LOG_EVENT_FORMAT_DEFAULT);
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ flb_plg_error(ctx->ins, "error initializing event encoder : %d", ret);
+
+ flb_socket_close(ctx->server_fd);
+ typesdb_destroy(ctx->tdb);
+ flb_free(ctx->buf);
+ flb_free(ctx);
+
+ return -1;
+ }
+
+ flb_plg_info(ctx->ins, "start listening to %s:%s",
+ ctx->listen, ctx->port);
+
+ return 0;
+}
+
+static int in_collectd_callback(struct flb_input_instance *i_ins,
+ struct flb_config *config, void *in_context)
+{
+ int len;
+ struct flb_in_collectd_config *ctx = in_context;
+
+ len = recv(ctx->server_fd, ctx->buf, ctx->bufsize, 0);
+ if (len < 0) {
+ flb_errno();
+ return -1;
+ }
+ if (len == 0) {
+ return 0;
+ }
+
+ flb_log_event_encoder_reset(&ctx->log_encoder);
+
+ if (netprot_to_msgpack(ctx->buf, len, ctx->tdb, &ctx->log_encoder)) {
+ flb_plg_error(ctx->ins, "netprot_to_msgpack fails");
+
+ return -1;
+ }
+
+ if (ctx->log_encoder.output_length > 0) {
+ flb_input_log_append(i_ins, NULL, 0,
+ ctx->log_encoder.output_buffer,
+ ctx->log_encoder.output_length);
+ }
+
+ return 0;
+}
+
+static int in_collectd_exit(void *data, struct flb_config *config)
+{
+ struct flb_in_collectd_config *ctx = data;
+
+ flb_log_event_encoder_destroy(&ctx->log_encoder);
+ flb_socket_close(ctx->server_fd);
+ flb_pipe_close(ctx->coll_fd);
+ typesdb_destroy(ctx->tdb);
+ flb_free(ctx->buf);
+ flb_free(ctx);
+
+ return 0;
+}
+
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_STR, "typesdb", DEFAULT_TYPESDB,
+ 0, FLB_TRUE, offsetof(struct flb_in_collectd_config, types_db),
+ "Set the types database filename"
+ },
+ /* EOF */
+ {0}
+};
+
+struct flb_input_plugin in_collectd_plugin = {
+ .name = "collectd",
+ .description = "collectd input plugin",
+ .cb_init = in_collectd_init,
+ .cb_pre_run = NULL,
+ .cb_collect = NULL,
+ .cb_flush_buf = NULL,
+ .cb_pause = NULL,
+ .cb_resume = NULL,
+ .config_map = config_map,
+ .flags = FLB_INPUT_NET_SERVER,
+ .cb_exit = in_collectd_exit
+};
diff --git a/src/fluent-bit/plugins/in_collectd/in_collectd.h b/src/fluent-bit/plugins/in_collectd/in_collectd.h
new file mode 100644
index 000000000..8750c7bf5
--- /dev/null
+++ b/src/fluent-bit/plugins/in_collectd/in_collectd.h
@@ -0,0 +1,46 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_COLLECTD_H
+#define FLB_IN_COLLECTD_H
+
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_log_event_encoder.h>
+
+struct flb_in_collectd_config {
+ char *buf;
+ int bufsize;
+
+ /* Server */
+ char listen[256]; /* RFC-2181 */
+ char port[6]; /* RFC-793 */
+
+ /* Sockets */
+ flb_sockfd_t server_fd;
+ flb_pipefd_t coll_fd;
+
+ flb_sds_t types_db;
+ struct mk_list *tdb;
+ struct flb_log_event_encoder log_encoder;
+
+ /* Plugin input instance */
+ struct flb_input_instance *ins;
+};
+
+#endif
diff --git a/src/fluent-bit/plugins/in_collectd/netprot.c b/src/fluent-bit/plugins/in_collectd/netprot.c
new file mode 100644
index 000000000..005db2edb
--- /dev/null
+++ b/src/fluent-bit/plugins/in_collectd/netprot.c
@@ -0,0 +1,308 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * This module implements the binary network protocol of collectd.
+ * (https://collectd.org/wiki/index.php/Binary_protocol)
+ *
+ * The only interface you need to care is netprot_to_msgpack() that
+ * parses a UDP packet and converts it into MessagePack format.
+ */
+
+#include <fluent-bit/flb_compat.h>
+#include <fluent-bit/flb_log.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_endian.h>
+#include <fluent-bit/flb_log_event_encoder.h>
+#include <msgpack.h>
+#include "netprot.h"
+#include "typesdb.h"
+
+#define be16read(x) (be16toh(*(uint16_t *) (x)))
+#define be32read(x) (be32toh(*(uint32_t *) (x)))
+#define be64read(x) (be64toh(*(uint64_t *) (x)))
+
+#define le16read(x) (le16toh(*(uint16_t *) (x)))
+#define le32read(x) (le32toh(*(uint32_t *) (x)))
+#define le64read(x) (le64toh(*(uint64_t *) (x)))
+
+/* Convert a high-resolution time into a normal UNIX time. */
+#define hr2time(x) ((double) (x) / 1073741824)
+
+/* Basic data field definitions for collectd */
+#define PART_HOST 0x0000
+#define PART_TIME 0x0001
+#define PART_PLUGIN 0x0002
+#define PART_PLUGIN_INSTANCE 0x0003
+#define PART_TYPE 0x0004
+#define PART_TYPE_INSTANCE 0x0005
+#define PART_VALUE 0x0006
+#define PART_INTERVAL 0x0007
+
+#define PART_TIME_HR 0x0008
+#define PART_INTERVAL_HR 0x0009
+
+/*
+ * The "DS_TYPE_*" are value types for PART_VALUE fields.
+ *
+ * Read https://collectd.org/wiki/index.php/Data_source for what
+ * these types mean.
+ */
+#define DS_TYPE_COUNTER 0
+#define DS_TYPE_GAUGE 1
+#define DS_TYPE_DERIVE 2
+#define DS_TYPE_ABSOLUTE 3
+
+struct netprot_header
+{
+ double time;
+ double interval;
+ char *host;
+ char *plugin;
+ char *plugin_instance;
+ char *type;
+ char *type_instance;
+};
+
+static int netprot_pack_value(char *ptr, int size, struct netprot_header *hdr,
+ struct mk_list *tdb,
+ struct flb_log_event_encoder *encoder)
+{
+ int i;
+ char type;
+ char *pval;
+ uint16_t count;
+ struct typesdb_node *node;
+ int result;
+
+ if (hdr->type == NULL) {
+ flb_error("[in_collectd] invalid data (type is NULL)");
+ return -1;
+ }
+
+ /*
+ * Since each value uses (1 + 8) bytes, the total buffer size must
+ * be 2-byte header plus <count * 9> bytes.
+ */
+ count = be16read(ptr);
+ if (size != 2 + count * 9) {
+ flb_error("[in_collectd] data corrupted (size=%i, count=%i)",
+ size, count);
+ return -1;
+ }
+
+ /*
+ * We need to query against TypesDB in order to get field names
+ * for the data set values.
+ */
+ node = typesdb_find_node(tdb, hdr->type);
+ if (!node) {
+ flb_error("[in_collectd] no such type found '%s'", hdr->type);
+ return -1;
+ }
+ if (node->count != count) {
+ flb_error("[in_collectd] invalid value for '%s' (%i != %i)",
+ hdr->type, node->count, count);
+ return -1;
+ }
+
+ result = flb_log_event_encoder_begin_record(encoder);
+
+ if (result == FLB_EVENT_ENCODER_SUCCESS) {
+ result = flb_log_event_encoder_set_current_timestamp(encoder);
+ }
+
+ if (hdr->type != NULL &&
+ result == FLB_EVENT_ENCODER_SUCCESS) {
+ result = flb_log_event_encoder_append_body_values(
+ encoder,
+ FLB_LOG_EVENT_CSTRING_VALUE("type"),
+ FLB_LOG_EVENT_CSTRING_VALUE(hdr->type));
+ }
+
+ if (hdr->type_instance != NULL &&
+ result == FLB_EVENT_ENCODER_SUCCESS) {
+ result = flb_log_event_encoder_append_body_values(
+ encoder,
+ FLB_LOG_EVENT_CSTRING_VALUE("type_instance"),
+ FLB_LOG_EVENT_CSTRING_VALUE(hdr->type_instance));
+ }
+
+ if (hdr->time > 0 &&
+ result == FLB_EVENT_ENCODER_SUCCESS) {
+ result = flb_log_event_encoder_append_body_values(
+ encoder,
+ FLB_LOG_EVENT_CSTRING_VALUE("time"),
+ FLB_LOG_EVENT_DOUBLE_VALUE(hdr->time));
+ }
+
+ if (hdr->interval > 0 &&
+ result == FLB_EVENT_ENCODER_SUCCESS) {
+ result = flb_log_event_encoder_append_body_values(
+ encoder,
+ FLB_LOG_EVENT_CSTRING_VALUE("interval"),
+ FLB_LOG_EVENT_DOUBLE_VALUE(hdr->interval));
+ }
+
+ if (hdr->plugin != NULL &&
+ result == FLB_EVENT_ENCODER_SUCCESS) {
+ result = flb_log_event_encoder_append_body_values(
+ encoder,
+ FLB_LOG_EVENT_CSTRING_VALUE("plugin"),
+ FLB_LOG_EVENT_CSTRING_VALUE(hdr->plugin));
+ }
+
+ if (hdr->plugin_instance != NULL &&
+ result == FLB_EVENT_ENCODER_SUCCESS) {
+ result = flb_log_event_encoder_append_body_values(
+ encoder,
+ FLB_LOG_EVENT_CSTRING_VALUE("plugin_instance"),
+ FLB_LOG_EVENT_CSTRING_VALUE(hdr->plugin_instance));
+ }
+
+ if (hdr->host != NULL &&
+ result == FLB_EVENT_ENCODER_SUCCESS) {
+ result = flb_log_event_encoder_append_body_values(
+ encoder,
+ FLB_LOG_EVENT_CSTRING_VALUE("host"),
+ FLB_LOG_EVENT_CSTRING_VALUE(hdr->host));
+ }
+
+ for (i = 0; i < count && result == FLB_EVENT_ENCODER_SUCCESS ; i++) {
+ pval = ptr + 2 + count + 8 * i;
+ type = ptr[2 + i];
+
+ if (result == FLB_EVENT_ENCODER_SUCCESS) {
+ result = flb_log_event_encoder_append_body_cstring(
+ encoder, node->fields[i]);
+ }
+
+ switch (type) {
+ case DS_TYPE_COUNTER:
+ result = flb_log_event_encoder_append_body_uint64(
+ encoder, be64read(pval));
+ break;
+ case DS_TYPE_GAUGE:
+ result = flb_log_event_encoder_append_body_double(
+ encoder, *((double *) pval));
+ break;
+ case DS_TYPE_DERIVE:
+ result = flb_log_event_encoder_append_body_int64(
+ encoder, (int64_t) be64read(pval));
+ break;
+ case DS_TYPE_ABSOLUTE:
+ result = flb_log_event_encoder_append_body_uint64(
+ encoder, be64read(pval));
+ break;
+ default:
+ flb_error("[in_collectd] unknown data type %i", type);
+
+ result = FLB_EVENT_ENCODER_ERROR_INVALID_ARGUMENT;
+ }
+ }
+
+ if (result == FLB_EVENT_ENCODER_SUCCESS) {
+ result = flb_log_event_encoder_commit_record(encoder);
+ }
+ else {
+ flb_log_event_encoder_rollback_record(encoder);
+ }
+
+ if (result != FLB_EVENT_ENCODER_SUCCESS) {
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * Entry point function
+ */
+int netprot_to_msgpack(char *buf, int len, struct mk_list *tdb,
+ struct flb_log_event_encoder *encoder)
+{
+ uint16_t part_type;
+ uint16_t part_len;
+ int size;
+ char *ptr;
+ struct netprot_header hdr = {0};
+
+ while (len >= 4) {
+ part_type = be16read(buf);
+ part_len = be16read(buf + 2);
+
+ if (len < part_len) {
+ flb_error("[in_collectd] data truncated (%i < %i)", len, part_len);
+ return -1;
+ }
+ ptr = buf + 4;
+ size = part_len - 4;
+
+ switch (part_type) {
+ case PART_HOST:
+ if (ptr[size] == '\0') {
+ hdr.host = ptr;
+ }
+ break;
+ case PART_TIME:
+ hdr.time = (double) be64read(ptr);
+ break;
+ case PART_TIME_HR:
+ hdr.time = hr2time(be64read(ptr));
+ break;
+ case PART_PLUGIN:
+ if (ptr[size] == '\0') {
+ hdr.plugin = ptr;
+ }
+ break;
+ case PART_PLUGIN_INSTANCE:
+ if (ptr[size] == '\0') {
+ hdr.plugin_instance = ptr;
+ }
+ break;
+ case PART_TYPE:
+ if (ptr[size] == '\0') {
+ hdr.type = ptr;
+ }
+ break;
+ case PART_TYPE_INSTANCE:
+ if (ptr[size] == '\0') {
+ hdr.type_instance = ptr;
+ }
+ break;
+ case PART_VALUE:
+ if (netprot_pack_value(ptr, size, &hdr, tdb, encoder)) {
+ return -1;
+ }
+ break;
+ case PART_INTERVAL:
+ hdr.interval = (double) be64read(ptr);
+ break;
+ case PART_INTERVAL_HR:
+ hdr.interval = hr2time(be64read(ptr));
+ break;
+ default:
+ flb_debug("[in_collectd] skip unknown type %x", part_type);
+ break;
+ }
+ len -= part_len;
+ buf += part_len;
+ }
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/in_collectd/netprot.h b/src/fluent-bit/plugins/in_collectd/netprot.h
new file mode 100644
index 000000000..c9292640c
--- /dev/null
+++ b/src/fluent-bit/plugins/in_collectd/netprot.h
@@ -0,0 +1,22 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* Convert a binary buffer into MessagePack format */
+int netprot_to_msgpack(char *buf, int len, struct mk_list *tdb,
+ struct flb_log_event_encoder *encoder);
diff --git a/src/fluent-bit/plugins/in_collectd/typesdb.c b/src/fluent-bit/plugins/in_collectd/typesdb.c
new file mode 100644
index 000000000..ef579583c
--- /dev/null
+++ b/src/fluent-bit/plugins/in_collectd/typesdb.c
@@ -0,0 +1,223 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_compat.h>
+#include <fluent-bit/flb_log.h>
+#include <fluent-bit/flb_mem.h>
+#include <fluent-bit/flb_str.h>
+
+#include "in_collectd.h"
+#include "typesdb.h"
+#include "typesdb_parser.h"
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+
+/* Internal function to load from a single TypesDB */
+static int typesdb_load(struct flb_in_collectd_config *ctx,
+ struct mk_list *tdb, const char *path)
+{
+ int fd = open(path, O_RDONLY);
+ if (fd < 0) {
+ flb_errno();
+ flb_plg_error(ctx->ins, "failed to open '%s'", path);
+ return -1;
+ }
+
+ if (typesdb_parse(tdb, fd)) {
+ flb_plg_error(ctx->ins, "failed to parse '%s'", path);
+ close(fd);
+ return -1;
+ }
+ close(fd);
+ return 0;
+}
+
+/*
+ * Load multiple TypesDB files at once. The return value is
+ * a linked list of typesdb_node objects.
+ *
+ * "paths" is a comma-separated list of file names.
+ */
+struct mk_list *typesdb_load_all(struct flb_in_collectd_config *ctx,
+ const char *paths)
+{
+ char *buf;
+ char *state;
+ char *path;
+ struct mk_list *tdb;
+
+ buf = flb_strdup(paths);
+ if (!buf) {
+ flb_errno();
+ return NULL;
+ }
+
+ tdb = flb_malloc(sizeof(struct mk_list));
+ if (!tdb) {
+ flb_errno();
+ flb_free(buf);
+ return NULL;
+ }
+ mk_list_init(tdb);
+
+ path = strtok_r(buf, ",", &state);
+ while (path) {
+ if (typesdb_load(ctx, tdb, path)) {
+ typesdb_destroy(tdb);
+ flb_free(buf);
+ return NULL;
+ }
+ path = strtok_r(NULL, ",", &state);
+ }
+ flb_free(buf);
+ return tdb;
+}
+
+void typesdb_destroy(struct mk_list *tdb)
+{
+ struct typesdb_node *node;
+ struct mk_list *head;
+ struct mk_list *tmp;
+
+ mk_list_foreach_safe(head, tmp, tdb) {
+ node = mk_list_entry(head, struct typesdb_node, _head);
+ typesdb_destroy_node(node);
+ }
+ flb_free(tdb);
+}
+
+struct typesdb_node *typesdb_find_node(struct mk_list *tdb, const char *type)
+{
+ struct typesdb_node *node;
+ struct mk_list *head;
+
+ if (type == NULL) {
+ return NULL;
+ }
+
+ /*
+ * Search the linked list from the tail so that later entries
+ * take precedence over earlier ones.
+ */
+ mk_list_foreach_r(head, tdb) {
+ node = mk_list_entry(head, struct typesdb_node, _head);
+ if (strcmp(node->type, type) == 0) {
+ return node;
+ }
+ }
+ return NULL;
+}
+
+struct typesdb_node *typesdb_last_node(struct mk_list *tdb)
+{
+ return mk_list_entry_last(tdb, struct typesdb_node, _head);
+}
+
+/*
+ * The folloings are API functions to modify a TypesDB instance.
+ */
+int typesdb_add_node(struct mk_list *tdb, const char *type)
+{
+ struct typesdb_node *node;
+
+ node = flb_calloc(1, sizeof(struct typesdb_node));
+ if (!node) {
+ flb_errno();
+ return -1;
+ }
+
+ node->type = flb_strdup(type);
+ if (!node->type) {
+ flb_errno();
+ flb_free(node);
+ return -1;
+ }
+
+ mk_list_add(&node->_head, tdb);
+ return 0;
+}
+
+void typesdb_destroy_node(struct typesdb_node *node)
+{
+ int i;
+
+ flb_free(node->type);
+
+ if (node->fields) {
+ for (i = 0; i < node->count; i++) {
+ flb_free(node->fields[i]);
+ }
+ flb_free(node->fields);
+ }
+ mk_list_del(&node->_head);
+ flb_free(node);
+}
+
+int typesdb_add_field(struct typesdb_node *node, const char *field)
+{
+ char *end;
+ int alloc;
+ char **fields;
+
+ end = strchr(field, ':');
+ if (!end) {
+ return -1;
+ }
+
+ if (node->count >= node->alloc) {
+ alloc = node->alloc > 0 ? node->alloc * 2 : 1;
+ fields = flb_realloc(node->fields, alloc * sizeof(char *));
+ if (!fields) {
+ flb_errno();
+ return -1;
+ }
+ node->alloc = alloc;
+ node->fields = fields;
+ }
+
+ node->fields[node->count] = flb_strndup(field, end - field);
+ if (!node->fields[node->count]) {
+ flb_errno();
+ return -1;
+ }
+ node->count++;
+ return 0;
+}
+
+/* A debug function to see the content of TypesDB */
+void typesdb_dump(struct mk_list *tdb)
+{
+ struct mk_list *head;
+ struct typesdb_node *node;
+ int i;
+
+ mk_list_foreach(head, tdb) {
+ node = mk_list_entry(head, struct typesdb_node, _head);
+
+ printf("%s", node->type);
+ for (i = 0; i < node->count; i++) {
+ printf("\t%s", node->fields[i]);
+ }
+ putchar('\n');
+ }
+}
diff --git a/src/fluent-bit/plugins/in_collectd/typesdb.h b/src/fluent-bit/plugins/in_collectd/typesdb.h
new file mode 100644
index 000000000..7da131be0
--- /dev/null
+++ b/src/fluent-bit/plugins/in_collectd/typesdb.h
@@ -0,0 +1,45 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "in_collectd.h"
+
+struct typesdb_node {
+ char *type;
+ int alloc;
+ int count;
+ char **fields;
+ struct mk_list _head;
+};
+
+/* Load and destroy TypesDB */
+struct mk_list *typesdb_load_all(struct flb_in_collectd_config *ctx,
+ const char *paths);
+void typesdb_destroy(struct mk_list *tdb);
+
+/* Find a node in TypesDB */
+struct typesdb_node *typesdb_find_node(struct mk_list *tdb, const char *type);
+struct typesdb_node *typesdb_last_node(struct mk_list *tdb);
+
+/* Modify a TypesDB instance (used in typesdb_parser.c) */
+int typesdb_add_node(struct mk_list *tdb, const char *type);
+void typesdb_destroy_node(struct typesdb_node *node);
+int typesdb_add_field(struct typesdb_node *node, const char *field);
+
+/* For debugging */
+void typesdb_dump(struct mk_list *tdb);
diff --git a/src/fluent-bit/plugins/in_collectd/typesdb_parser.c b/src/fluent-bit/plugins/in_collectd/typesdb_parser.c
new file mode 100644
index 000000000..5e237ffaf
--- /dev/null
+++ b/src/fluent-bit/plugins/in_collectd/typesdb_parser.c
@@ -0,0 +1,214 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * This file implements a collectd 5.x compatible parser for types.db(5).
+ *
+ * Note: it internally implements a finite state machine that consumes a
+ * single char at once, and pushes parsed tokens via typesdb_* methods.
+ */
+
+#include <fluent-bit/flb_compat.h>
+#include <fluent-bit/flb_log.h>
+#include <fluent-bit/flb_mem.h>
+#include <fluent-bit/flb_str.h>
+
+#include "typesdb.h"
+#include "typesdb_parser.h"
+
+#define TDB_INVALID -1
+#define TDB_INIT 0
+#define TDB_LEFT 1
+#define TDB_SEP 2
+#define TDB_RIGHT 3
+#define TDB_RIGHT_SEP 4
+#define TDB_COMMENT 5
+
+/* See collectd/src/daemon/types_list.c */
+#define MAX_LINE_SIZE 4096
+
+/*
+ * tdb_* are state functions that take a single character as input.
+ * They do some action based on the input and return the next state.
+ */
+static int tdb_init(char c, struct mk_list *tdb, char *buf)
+{
+ switch (c) {
+ case '#':
+ return TDB_COMMENT;
+ case '\r':
+ case '\n':
+ return TDB_INIT;
+ default:
+ buf[0] = c;
+ buf[1] = '\0';
+ return TDB_LEFT;
+ }
+}
+
+static int tdb_left(char c, struct mk_list *tdb, char *buf)
+{
+ int len;
+
+ switch (c) {
+ case ' ':
+ if (typesdb_add_node(tdb, buf)) {
+ return TDB_INVALID;
+ }
+ return TDB_SEP;
+ case '\r':
+ case '\n':
+ return TDB_INVALID;
+ default:
+ len = strlen(buf);
+ if (len >= MAX_LINE_SIZE - 1) {
+ return TDB_INVALID;
+ }
+ buf[len] = c;
+ buf[++len] = '\0';
+ return TDB_LEFT;
+ }
+}
+
+static int tdb_sep(char c, struct mk_list *tdb, char *buf)
+{
+ switch (c) {
+ case ' ':
+ return TDB_SEP;
+ case '\r':
+ case '\n':
+ return TDB_INVALID;
+ default:
+ buf[0] = c;
+ buf[1] = '\0';
+ return TDB_RIGHT;
+ }
+}
+
+static int tdb_right(char c, struct mk_list *tdb, char *buf)
+{
+ int len;
+ struct typesdb_node *node = typesdb_last_node(tdb);
+
+ switch (c) {
+ case ' ':
+ case ',':
+ if (typesdb_add_field(node, buf)) {
+ flb_error("[in_collectd] cannot add value '%s'", buf);
+ return TDB_INVALID;
+ }
+ return TDB_RIGHT_SEP;
+ case '\r':
+ case '\n':
+ if (typesdb_add_field(node, buf)) {
+ flb_error("[in_collectd] cannot add value '%s'", buf);
+ return TDB_INVALID;
+ }
+ return TDB_INIT;
+ default:
+ len = strlen(buf);
+ if (len >= MAX_LINE_SIZE - 1) {
+ flb_error("[in_collectd] line too long > %i", MAX_LINE_SIZE);
+ return TDB_INVALID;
+ }
+ buf[len] = c;
+ buf[++len] = '\0';
+ return TDB_RIGHT;
+ }
+}
+
+static int tdb_right_sep(char c, struct mk_list *tdb, char *buf)
+{
+ switch (c) {
+ case ' ':
+ case ',':
+ return TDB_RIGHT_SEP;
+ case '\r':
+ case '\n':
+ return TDB_INIT;
+ default:
+ buf[0] = c;
+ buf[1] = '\0';
+ return TDB_RIGHT;
+ }
+}
+
+static int tdb_comment(char c, struct mk_list *tdb, char *buf)
+{
+ switch (c) {
+ case '\r':
+ case '\n':
+ return TDB_INIT;
+ default:
+ return TDB_COMMENT;
+ }
+}
+
+/*
+ * Entry point function
+ */
+int typesdb_parse(struct mk_list *tdb, int fp)
+{
+ char tmp[1024];
+ char buf[MAX_LINE_SIZE];
+ char c;
+ int i;
+ int bytes;
+ int state = TDB_INIT;
+
+ while (1) {
+ bytes = read(fp, tmp, 1024);
+ if (bytes < 0) {
+ flb_errno();
+ return bytes;
+ }
+ if (bytes == 0) {
+ return 0;
+ }
+ for (i = 0; i < bytes; i++) {
+ c = tmp[i];
+ switch (state) {
+ case TDB_INVALID:
+ return -1;
+ case TDB_INIT:
+ state = tdb_init(c, tdb, buf);
+ break;
+ case TDB_LEFT:
+ state = tdb_left(c, tdb, buf);
+ break;
+ case TDB_SEP:
+ state = tdb_sep(c, tdb, buf);
+ break;
+ case TDB_RIGHT:
+ state = tdb_right(c, tdb, buf);
+ break;
+ case TDB_RIGHT_SEP:
+ state = tdb_right_sep(c, tdb, buf);
+ break;
+ case TDB_COMMENT:
+ state = tdb_comment(c, tdb, buf);
+ break;
+ default:
+ flb_error("[in_collectd] unknown state %i", state);
+ return -1;
+ }
+ }
+ }
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/in_collectd/typesdb_parser.h b/src/fluent-bit/plugins/in_collectd/typesdb_parser.h
new file mode 100644
index 000000000..985570134
--- /dev/null
+++ b/src/fluent-bit/plugins/in_collectd/typesdb_parser.h
@@ -0,0 +1,20 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+int typesdb_parse(struct mk_list *tdb, int fp);
diff --git a/src/fluent-bit/plugins/in_cpu/CMakeLists.txt b/src/fluent-bit/plugins/in_cpu/CMakeLists.txt
new file mode 100644
index 000000000..4ed82c5d7
--- /dev/null
+++ b/src/fluent-bit/plugins/in_cpu/CMakeLists.txt
@@ -0,0 +1,4 @@
+set(src
+ cpu.c)
+
+FLB_PLUGIN(in_cpu "${src}" "")
diff --git a/src/fluent-bit/plugins/in_cpu/cpu.c b/src/fluent-bit/plugins/in_cpu/cpu.c
new file mode 100644
index 000000000..5d049eb92
--- /dev/null
+++ b/src/fluent-bit/plugins/in_cpu/cpu.c
@@ -0,0 +1,672 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_input.h>
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_pack.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <math.h>
+
+#include "cpu.h"
+
+static inline void snapshot_key_format(int cpus, struct cpu_snapshot *snap_arr)
+{
+ int i;
+ struct cpu_snapshot *snap;
+
+ snap = &snap_arr[0];
+ memcpy(snap->k_cpu.name, "cpu", 3);
+ snap->k_cpu.name[3] = '\0';
+
+ for (i = 1; i <= cpus; i++) {
+ snap = (struct cpu_snapshot *) &snap_arr[i];
+ CPU_KEY_FORMAT(snap, cpu, i);
+ CPU_KEY_FORMAT(snap, user, i);
+ CPU_KEY_FORMAT(snap, system, i);
+ }
+}
+
+static int snapshots_init(int cpus, struct cpu_stats *cstats)
+{
+ cstats->snap_a = flb_calloc(1, sizeof(struct cpu_snapshot) * (cpus + 1));
+ if (!cstats->snap_a) {
+ flb_errno();
+ return -1;
+ }
+
+ cstats->snap_b = flb_malloc(sizeof(struct cpu_snapshot) * (cpus + 1));
+ if (!cstats->snap_b) {
+ flb_errno();
+ return -1;
+ }
+
+ /* Initialize each array */
+ snapshot_key_format(cpus, cstats->snap_a);
+ snapshot_key_format(cpus, cstats->snap_b);
+ cstats->snap_active = CPU_SNAP_ACTIVE_A;
+ return 0;
+}
+
+static inline void snapshots_switch(struct cpu_stats *cstats)
+{
+ if (cstats->snap_active == CPU_SNAP_ACTIVE_A) {
+ cstats->snap_active = CPU_SNAP_ACTIVE_B;
+ }
+ else {
+ cstats->snap_active = CPU_SNAP_ACTIVE_A;
+ }
+}
+
+/* Retrieve CPU load from the system (through ProcFS) */
+static inline double proc_cpu_load(int cpus, struct cpu_stats *cstats)
+{
+ int i;
+ int ret;
+ char line[255];
+ size_t len = 0;
+ char *fmt;
+ FILE *f;
+ struct cpu_snapshot *s;
+ struct cpu_snapshot *snap_arr;
+
+ f = fopen("/proc/stat", "r");
+ if (f == NULL) {
+ flb_errno();
+ return -1;
+ }
+
+ if (cstats->snap_active == CPU_SNAP_ACTIVE_A) {
+ snap_arr = cstats->snap_a;
+ }
+ else {
+ snap_arr = cstats->snap_b;
+ }
+
+ /* Always read (n_cpus + 1) lines */
+ for (i = 0; i <= cpus; i++) {
+ if (fgets(line, sizeof(line) - 1, f)) {
+ len = strlen(line);
+ if (line[len - 1] == '\n') {
+ line[--len] = 0;
+ if (len && line[len - 1] == '\r') {
+ line[--len] = 0;
+ }
+ }
+
+ s = &snap_arr[i];
+ if (i == 0) {
+ fmt = " cpu %lu %lu %lu %lu %lu";
+ ret = sscanf(line,
+ fmt,
+ &s->v_user,
+ &s->v_nice,
+ &s->v_system,
+ &s->v_idle,
+ &s->v_iowait);
+ if (ret < 5) {
+ fclose(f);
+ return -1;
+ }
+ }
+ else {
+ fmt = " %s %lu %lu %lu %lu %lu";
+ ret = sscanf(line,
+ fmt,
+ s->v_cpuid,
+ &s->v_user,
+ &s->v_nice,
+ &s->v_system,
+ &s->v_idle,
+ &s->v_iowait);
+ if (ret <= 5) {
+ fclose(f);
+ return -1;
+ }
+ }
+ }
+ else {
+ break;
+ }
+ }
+
+ fclose(f);
+ return 0;
+}
+
+/* Retrieve CPU stats for a given PID */
+static inline double proc_cpu_pid_load(struct flb_cpu *ctx,
+ pid_t pid, struct cpu_stats *cstats)
+{
+ int ret;
+ char *p;
+ char line[255];
+ char *fmt = ") %c %d %d %d %d %d %u %lu %lu %lu %lu %lu %lu ";
+ FILE *f;
+ /* sscanf variables (ss_N) to perform scanning */
+ unsigned char ss_state;
+ unsigned int ss_ppid;
+ unsigned int ss_pgrp;
+ unsigned int ss_session;
+ unsigned int ss_tty_nr;
+ unsigned int ss_tpgid;
+ unsigned int ss_flags;
+ unsigned long ss_minflt;
+ unsigned long ss_cmdinflt;
+ unsigned long ss_majflt;
+ unsigned long ss_cmajflt;
+ struct cpu_snapshot *s;
+
+ /* Read the process stats */
+ snprintf(line, sizeof(line) - 1, "/proc/%d/stat", pid);
+ f = fopen(line, "r");
+ if (f == NULL) {
+ flb_errno();
+ flb_plg_error(ctx->ins, "error opening stats file %s", line);
+ return -1;
+ }
+
+ if (cstats->snap_active == CPU_SNAP_ACTIVE_A) {
+ s = cstats->snap_a;
+ }
+ else {
+ s = cstats->snap_b;
+ }
+
+ if (fgets(line, sizeof(line) - 1, f) == NULL) {
+ flb_plg_error(ctx->ins, "cannot read process %ld stats", (long) pid);
+ fclose(f);
+ return -1;
+ }
+
+ errno = 0;
+
+ /* skip first two values (after process name) */
+ p = line;
+ while (*p != ')') p++;
+
+ errno = 0;
+ ret = sscanf(p,
+ fmt,
+ &ss_state,
+ &ss_ppid,
+ &ss_pgrp,
+ &ss_session,
+ &ss_tty_nr,
+ &ss_tpgid,
+ &ss_flags,
+ &ss_minflt,
+ &ss_cmdinflt,
+ &ss_majflt,
+ &ss_cmajflt,
+ &s->v_user,
+ &s->v_system);
+ if (errno != 0) {
+ flb_errno();
+ flb_plg_error(ctx->ins, "pid sscanf failed ret=%i", ret);
+ }
+
+ fclose(f);
+ return 0;
+}
+
+/*
+ * Given the two snapshots, calculate the % used in user and kernel space,
+ * it returns the active snapshot.
+ */
+struct cpu_snapshot *snapshot_percent(struct cpu_stats *cstats,
+ struct flb_cpu *ctx)
+{
+ int i;
+ unsigned long sum_pre;
+ unsigned long sum_now;
+ struct cpu_snapshot *arr_pre = cstats->snap_b;
+ struct cpu_snapshot *arr_now = cstats->snap_a;
+ struct cpu_snapshot *snap_pre = NULL;
+ struct cpu_snapshot *snap_now = NULL;
+
+ if (cstats->snap_active == CPU_SNAP_ACTIVE_A) {
+ arr_now = cstats->snap_a;
+ arr_pre = cstats->snap_b;
+ }
+ else if (cstats->snap_active == CPU_SNAP_ACTIVE_B) {
+ arr_now = cstats->snap_b;
+ arr_pre = cstats->snap_a;
+ }
+
+ for (i = 0; i <= ctx->n_processors; i++) {
+ snap_pre = &arr_pre[i];
+ snap_now = &arr_now[i];
+
+ /* Calculate overall CPU usage (user space + kernel space */
+ sum_pre = (snap_pre->v_user + snap_pre->v_nice + snap_pre->v_system);
+ sum_now = (snap_now->v_user + snap_now->v_nice + snap_now->v_system);
+
+ if (i == 0) {
+ snap_now->p_cpu = CPU_METRIC_SYS_AVERAGE(sum_pre, sum_now, ctx);
+ }
+ else {
+ snap_now->p_cpu = CPU_METRIC_USAGE(sum_pre, sum_now, ctx);
+ }
+
+ /* User space CPU% */
+ sum_pre = (snap_pre->v_user + snap_pre->v_nice);
+ sum_now = (snap_now->v_user + snap_now->v_nice);
+ if (i == 0) {
+ snap_now->p_user = CPU_METRIC_SYS_AVERAGE(sum_pre, sum_now, ctx);
+ }
+ else {
+ snap_now->p_user = CPU_METRIC_USAGE(sum_pre, sum_now, ctx);
+ }
+
+ /* Kernel space CPU% */
+ if (i == 0) {
+ snap_now->p_system = CPU_METRIC_SYS_AVERAGE(snap_pre->v_system,
+ snap_now->v_system,
+ ctx);
+ }
+ else {
+ snap_now->p_system = CPU_METRIC_USAGE(snap_pre->v_system,
+ snap_now->v_system,
+ ctx);
+ }
+
+#ifdef FLB_TRACE
+ if (i == 0) {
+ flb_trace("cpu[all] all=%s%f%s user=%s%f%s system=%s%f%s",
+ ANSI_BOLD, snap_now->p_cpu, ANSI_RESET,
+ ANSI_BOLD, snap_now->p_user, ANSI_RESET,
+ ANSI_BOLD, snap_now->p_system, ANSI_RESET);
+ }
+ else {
+ flb_trace("cpu[i=%i] all=%f user=%f system=%f",
+ i-1, snap_now->p_cpu,
+ snap_now->p_user, snap_now->p_system);
+ }
+#endif
+ }
+
+ return arr_now;
+}
+
+struct cpu_snapshot *snapshot_pid_percent(struct cpu_stats *cstats,
+ struct flb_cpu *ctx)
+{
+ unsigned long sum_pre;
+ unsigned long sum_now;
+ struct cpu_snapshot *snap_pre = NULL;
+ struct cpu_snapshot *snap_now = NULL;
+
+ if (cstats->snap_active == CPU_SNAP_ACTIVE_A) {
+ snap_now = cstats->snap_a;
+ snap_pre = cstats->snap_b;
+ }
+ else if (cstats->snap_active == CPU_SNAP_ACTIVE_B) {
+ snap_now = cstats->snap_b;
+ snap_pre = cstats->snap_a;
+ }
+
+ /* Calculate overall CPU usage (user space + kernel space */
+ sum_pre = (snap_pre->v_user + snap_pre->v_system);
+ sum_now = (snap_now->v_user + snap_now->v_system);
+
+ snap_now->p_cpu = CPU_METRIC_SYS_AVERAGE(sum_pre, sum_now, ctx);
+
+ /* User space CPU% */
+ snap_now->p_user = CPU_METRIC_SYS_AVERAGE(snap_pre->v_user,
+ snap_now->v_user,
+ ctx);
+
+ /* Kernel space CPU% */
+ snap_now->p_system = CPU_METRIC_SYS_AVERAGE(snap_pre->v_system,
+ snap_now->v_system,
+ ctx);
+
+#ifdef FLB_TRACE
+ flb_trace("cpu[pid=%i] all=%s%f%s user=%s%f%s system=%s%f%s",
+ ctx->pid,
+ ANSI_BOLD, snap_now->p_cpu, ANSI_RESET,
+ ANSI_BOLD, snap_now->p_user, ANSI_RESET,
+ ANSI_BOLD, snap_now->p_system, ANSI_RESET);
+#endif
+
+ return snap_now;
+}
+
+static int cpu_collect_system(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ int i;
+ int ret;
+ struct flb_cpu *ctx = in_context;
+ struct cpu_stats *cstats = &ctx->cstats;
+ struct cpu_snapshot *s;
+
+ (void) config;
+
+ /* Get overall system CPU usage */
+ ret = proc_cpu_load(ctx->n_processors, cstats);
+ if (ret != 0) {
+ flb_plg_error(ins, "error retrieving overall system CPU stats");
+ return -1;
+ }
+
+ s = snapshot_percent(cstats, ctx);
+
+ ret = flb_log_event_encoder_begin_record(&ctx->log_encoder);
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_current_timestamp(
+ &ctx->log_encoder);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_append_body_values(
+ &ctx->log_encoder,
+ FLB_LOG_EVENT_CSTRING_VALUE("cpu_p"),
+ FLB_LOG_EVENT_DOUBLE_VALUE(s[0].p_cpu),
+
+ FLB_LOG_EVENT_CSTRING_VALUE("user_p"),
+ FLB_LOG_EVENT_DOUBLE_VALUE(s[0].p_user),
+
+ FLB_LOG_EVENT_CSTRING_VALUE("system_p"),
+ FLB_LOG_EVENT_DOUBLE_VALUE(s[0].p_system));
+ }
+
+ for (i = 1;
+ i < ctx->n_processors + 1 &&
+ ret == FLB_EVENT_ENCODER_SUCCESS;
+ i++) {
+ struct cpu_snapshot *e = &s[i];
+
+ ret = flb_log_event_encoder_append_body_values(
+ &ctx->log_encoder,
+ FLB_LOG_EVENT_CSTRING_VALUE(e->k_cpu.name),
+ FLB_LOG_EVENT_DOUBLE_VALUE(e->p_cpu),
+
+ FLB_LOG_EVENT_CSTRING_VALUE(e->k_user.name),
+ FLB_LOG_EVENT_DOUBLE_VALUE(e->p_user),
+
+ FLB_LOG_EVENT_CSTRING_VALUE(e->k_system.name),
+ FLB_LOG_EVENT_DOUBLE_VALUE(e->p_system));
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_commit_record(&ctx->log_encoder);
+ }
+
+ snapshots_switch(cstats);
+
+ flb_plg_trace(ins, "CPU %0.2f%%", s->p_cpu);
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ flb_input_log_append(ins, NULL, 0,
+ ctx->log_encoder.output_buffer,
+ ctx->log_encoder.output_length);
+ ret = 0;
+ }
+ else {
+ flb_plg_error(ctx->ins, "Error encoding record : %d", ret);
+
+ ret = -1;
+ }
+
+ flb_log_event_encoder_reset(&ctx->log_encoder);
+
+ return ret;
+}
+
+static int cpu_collect_pid(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ int ret;
+ struct flb_cpu *ctx = in_context;
+ struct cpu_stats *cstats = &ctx->cstats;
+ struct cpu_snapshot *s;
+
+ (void) config;
+
+ /* Get overall system CPU usage */
+ ret = proc_cpu_pid_load(ctx, ctx->pid, cstats);
+ if (ret != 0) {
+ flb_plg_error(ctx->ins, "error retrieving PID CPU stats");
+ return -1;
+ }
+
+ s = snapshot_pid_percent(cstats, ctx);
+
+ ret = flb_log_event_encoder_begin_record(&ctx->log_encoder);
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_current_timestamp(
+ &ctx->log_encoder);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_append_body_values(
+ &ctx->log_encoder,
+ FLB_LOG_EVENT_CSTRING_VALUE("cpu_p"),
+ FLB_LOG_EVENT_DOUBLE_VALUE(s->p_cpu),
+
+ FLB_LOG_EVENT_CSTRING_VALUE("user_p"),
+ FLB_LOG_EVENT_DOUBLE_VALUE(s->p_user),
+
+ FLB_LOG_EVENT_CSTRING_VALUE("system_p"),
+ FLB_LOG_EVENT_DOUBLE_VALUE(s->p_system));
+ }
+
+ snapshots_switch(cstats);
+ flb_plg_trace(ctx->ins, "PID %i CPU %0.2f%%", ctx->pid, s->p_cpu);
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_commit_record(&ctx->log_encoder);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ flb_input_log_append(ins, NULL, 0,
+ ctx->log_encoder.output_buffer,
+ ctx->log_encoder.output_length);
+
+ ret = 0;
+ }
+ else {
+ flb_plg_error(ctx->ins, "Error encoding record : %d", ret);
+
+ ret = -1;
+ }
+
+ flb_log_event_encoder_reset(&ctx->log_encoder);
+
+ return ret;
+}
+
+/* Callback to gather CPU usage between now and previous snapshot */
+static int cb_cpu_collect(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ struct flb_cpu *ctx = in_context;
+
+ /* if a PID is get, get CPU stats only for that process */
+ if (ctx->pid >= 0) {
+ return cpu_collect_pid(ins, config, in_context);
+ }
+ else {
+ /* Get all system CPU stats */
+ return cpu_collect_system(ins, config, in_context);
+ }
+}
+
+/* Init CPU input */
+static int cb_cpu_init(struct flb_input_instance *in,
+ struct flb_config *config, void *data)
+{
+ int ret;
+ struct flb_cpu *ctx;
+ (void) data;
+
+ /* Allocate space for the configuration */
+ ctx = flb_calloc(1, sizeof(struct flb_cpu));
+ if (!ctx) {
+ flb_errno();
+ return -1;
+ }
+ ctx->ins = in;
+
+ ret = flb_input_config_map_set(in, (void *)ctx);
+ if (ret == -1) {
+ flb_free(ctx);
+ return -1;
+ }
+
+ /* Gather number of processors and CPU ticks */
+ ctx->n_processors = sysconf(_SC_NPROCESSORS_ONLN);
+ ctx->cpu_ticks = sysconf(_SC_CLK_TCK);
+
+ /* Collection time setting */
+ if (ctx->interval_sec <= 0 && ctx->interval_nsec <= 0) {
+ /* Illegal settings. Override them. */
+ ctx->interval_sec = atoi(DEFAULT_INTERVAL_SEC);
+ ctx->interval_nsec = atoi(DEFAULT_INTERVAL_NSEC);
+ }
+
+ /* Initialize buffers for CPU stats */
+ ret = snapshots_init(ctx->n_processors, &ctx->cstats);
+ if (ret != 0) {
+ flb_free(ctx);
+ return -1;
+ }
+
+ /* Get CPU load, ready to be updated once fired the calc callback */
+ if (ctx->pid > 0) {
+ ret = proc_cpu_pid_load(ctx, ctx->pid, &ctx->cstats);
+ }
+ else {
+ ret = proc_cpu_load(ctx->n_processors, &ctx->cstats);
+ }
+ if (ret != 0) {
+ flb_error("[cpu] Could not obtain CPU data");
+ flb_free(ctx);
+ return -1;
+ }
+
+ ctx->cstats.snap_active = CPU_SNAP_ACTIVE_B;
+
+ /* Set the context */
+ flb_input_set_context(in, ctx);
+
+ /* Set our collector based on time, CPU usage every 1 second */
+ ret = flb_input_set_collector_time(in,
+ cb_cpu_collect,
+ ctx->interval_sec,
+ ctx->interval_nsec,
+ config);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "could not set collector for CPU input plugin");
+ return -1;
+ }
+ ctx->coll_fd = ret;
+
+ ret = flb_log_event_encoder_init(&ctx->log_encoder,
+ FLB_LOG_EVENT_FORMAT_DEFAULT);
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ flb_plg_error(ctx->ins, "error initializing event encoder : %d", ret);
+
+ /* Release snapshots */
+ flb_free(ctx->cstats.snap_a);
+ flb_free(ctx->cstats.snap_b);
+
+ /* done */
+ flb_free(ctx);
+
+ return -1;
+ }
+
+ return 0;
+}
+
+static void cb_cpu_pause(void *data, struct flb_config *config)
+{
+ struct flb_cpu *ctx = data;
+ flb_input_collector_pause(ctx->coll_fd, ctx->ins);
+}
+
+static void cb_cpu_resume(void *data, struct flb_config *config)
+{
+ struct flb_cpu *ctx = data;
+ flb_input_collector_resume(ctx->coll_fd, ctx->ins);
+}
+
+static int cb_cpu_exit(void *data, struct flb_config *config)
+{
+ (void) *config;
+ struct flb_cpu *ctx = data;
+ struct cpu_stats *cs;
+
+ flb_log_event_encoder_destroy(&ctx->log_encoder);
+
+ /* Release snapshots */
+ cs = &ctx->cstats;
+ flb_free(cs->snap_a);
+ flb_free(cs->snap_b);
+
+ /* done */
+ flb_free(ctx);
+
+ return 0;
+}
+
+/* Configuration properties map */
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_INT, "pid", "-1",
+ 0, FLB_TRUE, offsetof(struct flb_cpu, pid),
+ "Configure a single process to measure usage via their PID"
+ },
+ {
+ FLB_CONFIG_MAP_INT, "interval_sec", DEFAULT_INTERVAL_SEC,
+ 0, FLB_TRUE, offsetof(struct flb_cpu, interval_sec),
+ "Set the collector interval"
+ },
+ {
+ FLB_CONFIG_MAP_INT, "interval_nsec", DEFAULT_INTERVAL_NSEC,
+ 0, FLB_TRUE, offsetof(struct flb_cpu, interval_nsec),
+ "Set the collector interval (sub seconds)"
+ },
+ /* EOF */
+ {0}
+};
+
+/* Plugin reference */
+struct flb_input_plugin in_cpu_plugin = {
+ .name = "cpu",
+ .description = "CPU Usage",
+ .cb_init = cb_cpu_init,
+ .cb_pre_run = NULL,
+ .cb_collect = cb_cpu_collect,
+ .cb_flush_buf = NULL,
+ .config_map = config_map,
+ .cb_pause = cb_cpu_pause,
+ .cb_resume = cb_cpu_resume,
+ .cb_exit = cb_cpu_exit
+};
diff --git a/src/fluent-bit/plugins/in_cpu/cpu.h b/src/fluent-bit/plugins/in_cpu/cpu.h
new file mode 100644
index 000000000..93cbd88c1
--- /dev/null
+++ b/src/fluent-bit/plugins/in_cpu/cpu.h
@@ -0,0 +1,129 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_CPU_H
+#define FLB_IN_CPU_H
+
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_input.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_log_event_encoder.h>
+
+/* Default collection time: every 1 second (0 nanoseconds) */
+#define DEFAULT_INTERVAL_SEC "1"
+#define DEFAULT_INTERVAL_NSEC "0"
+#define IN_CPU_KEY_LEN 16
+
+struct cpu_key {
+ uint8_t length;
+ char name[IN_CPU_KEY_LEN];
+};
+
+struct cpu_snapshot {
+ /* data snapshots */
+ char v_cpuid[8];
+ unsigned long v_user;
+ unsigned long v_nice;
+ unsigned long v_system;
+ unsigned long v_idle;
+ unsigned long v_iowait;
+
+ /* percent values */
+ double p_cpu; /* Overall CPU usage */
+ double p_user; /* user space (user + nice) */
+ double p_system; /* kernel space percent */
+
+ /* necessary... */
+ struct cpu_key k_cpu;
+ struct cpu_key k_user;
+ struct cpu_key k_system;
+};
+
+#define CPU_SNAP_ACTIVE_A 0
+#define CPU_SNAP_ACTIVE_B 1
+
+struct cpu_stats {
+ uint8_t snap_active;
+
+ /* CPU snapshots, we always keep two snapshots */
+ struct cpu_snapshot *snap_a;
+ struct cpu_snapshot *snap_b;
+};
+
+/* CPU Input configuration & context */
+struct flb_cpu {
+ /* setup */
+ pid_t pid; /* optional PID */
+ int n_processors; /* number of core processors */
+ int cpu_ticks; /* CPU ticks (Kernel setting) */
+ int coll_fd; /* collector id/fd */
+ int interval_sec; /* interval collection time (Second) */
+ int interval_nsec; /* interval collection time (Nanosecond) */
+ struct cpu_stats cstats;
+ struct flb_input_instance *ins;
+ struct flb_log_event_encoder log_encoder;
+};
+
+#define CPU_KEY_FORMAT(s, key, i) \
+ s->k_##key.length = snprintf(s->k_##key.name, \
+ IN_CPU_KEY_LEN, \
+ "cpu%i.p_%s", i - 1, #key)
+
+#define ULL_ABS(a, b) (a > b) ? a - b : b - a
+
+/*
+ * This routine calculate the average CPU utilization of the system, it
+ * takes in consideration the number CPU cores, so it return a value
+ * between 0 and 100 based on 'capacity'.
+ */
+static inline double CPU_METRIC_SYS_AVERAGE(unsigned long pre,
+ unsigned long now,
+ struct flb_cpu *ctx)
+{
+ double diff;
+ double total = 0;
+
+ if (pre == now) {
+ return 0.0;
+ }
+
+ diff = ULL_ABS(now, pre);
+ total = (((diff / ctx->cpu_ticks) * 100) / ctx->n_processors) / (ctx->interval_sec + 1e-9*ctx->interval_nsec);
+
+ return total;
+}
+
+/* Returns the CPU % utilization of a given CPU core */
+static inline double CPU_METRIC_USAGE(unsigned long pre, unsigned long now,
+ struct flb_cpu *ctx)
+{
+ double diff;
+ double total = 0;
+
+ if (pre == now) {
+ return 0.0;
+ }
+
+ diff = ULL_ABS(now, pre);
+
+ total = ((diff * 100) / ctx->cpu_ticks) / (ctx->interval_sec + 1e-9*ctx->interval_nsec);
+ return total;
+}
+
+#endif
diff --git a/src/fluent-bit/plugins/in_disk/CMakeLists.txt b/src/fluent-bit/plugins/in_disk/CMakeLists.txt
new file mode 100644
index 000000000..a969ad47e
--- /dev/null
+++ b/src/fluent-bit/plugins/in_disk/CMakeLists.txt
@@ -0,0 +1,4 @@
+set(src
+ in_disk.c)
+
+FLB_PLUGIN(in_disk "${src}" "")
diff --git a/src/fluent-bit/plugins/in_disk/in_disk.c b/src/fluent-bit/plugins/in_disk/in_disk.c
new file mode 100644
index 000000000..607d030b1
--- /dev/null
+++ b/src/fluent-bit/plugins/in_disk/in_disk.c
@@ -0,0 +1,387 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_input.h>
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_error.h>
+#include <fluent-bit/flb_str.h>
+#include <fluent-bit/flb_pack.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <limits.h>
+
+#include "in_disk.h"
+
+#define LINE_SIZE 256
+#define BUF_SIZE 32
+
+static char *shift_line(const char *line, char separator, int *idx,
+ char *buf, int buf_size)
+{
+ char pack_mode = FLB_FALSE;
+ int idx_buf = 0;
+
+ while (1) {
+ if (line[*idx] == '\0') {
+ /* end of line */
+ return NULL;
+ }
+ else if (line[*idx] != separator) {
+ pack_mode = FLB_TRUE;
+ buf[idx_buf] = line[*idx];
+ idx_buf++;
+
+ if (idx_buf >= buf_size) {
+ buf[idx_buf-1] = '\0';
+ return NULL;
+ }
+ }
+ else if (pack_mode == FLB_TRUE) {
+ buf[idx_buf] = '\0';
+ return buf;
+ }
+ *idx += 1;
+ }
+}
+
+static int update_disk_stats(struct flb_in_disk_config *ctx)
+{
+ char line[LINE_SIZE] = {0};
+ char buf[BUF_SIZE] = {0};
+ char skip_line = FLB_FALSE;
+ uint64_t temp_total = 0;
+ FILE *fp = NULL;
+ int i_line = 0;
+ int i_entry = 0;
+ int i_field = 0;
+
+ fp = fopen("/proc/diskstats", "r");
+ if (fp == NULL) {
+ flb_errno();
+ return -1;
+ }
+
+ while (fgets(line, LINE_SIZE-1, fp) != NULL) {
+ i_line = 0;
+ i_field = 0;
+ skip_line = FLB_FALSE;
+ while (skip_line != FLB_TRUE &&
+ shift_line(line, ' ', &i_line, buf, BUF_SIZE-1) != NULL) {
+ i_field++;
+ switch(i_field) {
+ case 3: /* device name */
+ if (ctx->dev_name != NULL && strstr(buf, ctx->dev_name) == NULL) {
+ skip_line = FLB_TRUE;
+ }
+ break;
+ case 6: /* sectors read */
+ temp_total = strtoull(buf, NULL, 10);
+ ctx->prev_read_total[i_entry] = ctx->read_total[i_entry];
+ ctx->read_total[i_entry] = temp_total;
+ break;
+ case 10: /* sectors written */
+ temp_total = strtoull(buf, NULL, 10);
+ ctx->prev_write_total[i_entry] = ctx->write_total[i_entry];
+ ctx->write_total[i_entry] = temp_total;
+
+ skip_line = FLB_TRUE;
+ break;
+ default:
+ continue;
+ }
+ }
+ i_entry++;
+ }
+
+ fclose(fp);
+ return 0;
+}
+
+
+/* cb_collect callback */
+static int in_disk_collect(struct flb_input_instance *i_ins,
+ struct flb_config *config, void *in_context)
+{
+ unsigned long write_total;
+ unsigned long read_total;
+ int entry;
+ struct flb_in_disk_config *ctx;
+ int ret;
+ int i;
+
+ (void) *config;
+
+ ret = 0;
+ ctx = (struct flb_in_disk_config *) in_context;
+ entry = ctx->entry;
+
+ /* The type of sector size is unsigned long in kernel source */
+ read_total = 0;
+ write_total = 0;
+
+ update_disk_stats(ctx);
+
+ if (ctx->first_snapshot == FLB_TRUE) {
+ ctx->first_snapshot = FLB_FALSE; /* assign first_snapshot with FLB_FALSE */
+ }
+ else {
+ for (i = 0; i < entry; i++) {
+ if (ctx->read_total[i] >= ctx->prev_read_total[i]) {
+ read_total += ctx->read_total[i] - ctx->prev_read_total[i];
+ }
+ else {
+ /* Overflow */
+ read_total += ctx->read_total[i] +
+ (ULONG_MAX - ctx->prev_read_total[i]);
+ }
+
+ if (ctx->write_total[i] >= ctx->prev_write_total[i]) {
+ write_total += ctx->write_total[i] - ctx->prev_write_total[i];
+ }
+ else {
+ /* Overflow */
+ write_total += ctx->write_total[i] +
+ (ULONG_MAX - ctx->prev_write_total[i]);
+ }
+ }
+
+ read_total *= 512;
+ write_total *= 512;
+
+
+ ret = flb_log_event_encoder_begin_record(&ctx->log_encoder);
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_current_timestamp(
+ &ctx->log_encoder);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_append_body_values(
+ &ctx->log_encoder,
+ FLB_LOG_EVENT_CSTRING_VALUE(STR_KEY_READ),
+ FLB_LOG_EVENT_UINT64_VALUE(read_total),
+
+ FLB_LOG_EVENT_CSTRING_VALUE(STR_KEY_WRITE),
+ FLB_LOG_EVENT_UINT64_VALUE(write_total));
+ }
+
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_commit_record(&ctx->log_encoder);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ flb_input_log_append(i_ins, NULL, 0,
+ ctx->log_encoder.output_buffer,
+ ctx->log_encoder.output_length);
+
+ ret = 0;
+ }
+ else {
+ flb_plg_error(i_ins, "Error encoding record : %d", ret);
+
+ ret = -1;
+ }
+
+ flb_log_event_encoder_reset(&ctx->log_encoder);
+ }
+
+ return 0;
+}
+
+static int get_diskstats_entries(void)
+{
+ char line[LINE_SIZE] = {0};
+ int ret = 0;
+ FILE *fp = NULL;
+
+ fp = fopen("/proc/diskstats", "r");
+ if (fp == NULL) {
+ perror("fopen");
+ return 0;
+ }
+ while(fgets(line, LINE_SIZE-1, fp) != NULL) {
+ ret++;
+ }
+
+ fclose(fp);
+ return ret;
+}
+
+static int configure(struct flb_in_disk_config *disk_config,
+ struct flb_input_instance *in)
+{
+ (void) *in;
+ int entry = 0;
+ int i;
+ int ret;
+
+ /* Load the config map */
+ ret = flb_input_config_map_set(in, (void *)disk_config);
+ if (ret == -1) {
+ flb_plg_error(in, "unable to load configuration.");
+ return -1;
+ }
+
+ /* interval settings */
+ if (disk_config->interval_sec <= 0 && disk_config->interval_nsec <= 0) {
+ /* Illegal settings. Override them. */
+ disk_config->interval_sec = atoi(DEFAULT_INTERVAL_SEC);
+ disk_config->interval_nsec = atoi(DEFAULT_INTERVAL_NSEC);
+ }
+
+ entry = get_diskstats_entries();
+ if (entry == 0) {
+ /* no entry to count */
+ return -1;
+ }
+
+ disk_config->read_total = (uint64_t*)flb_malloc(sizeof(uint64_t)*entry);
+ disk_config->write_total = (uint64_t*)flb_malloc(sizeof(uint64_t)*entry);
+ disk_config->prev_read_total = (uint64_t*)flb_malloc(sizeof(uint64_t)*entry);
+ disk_config->prev_write_total = (uint64_t*)flb_malloc(sizeof(uint64_t)*entry);
+ disk_config->entry = entry;
+
+ if ( disk_config->read_total == NULL ||
+ disk_config->write_total == NULL ||
+ disk_config->prev_read_total == NULL ||
+ disk_config->prev_write_total == NULL) {
+ flb_plg_error(in, "could not allocate memory");
+ return -1;
+ }
+
+ /* initialize */
+ for (i=0; i<entry; i++) {
+ disk_config->read_total[i] = 0;
+ disk_config->write_total[i] = 0;
+ disk_config->prev_read_total[i] = 0;
+ disk_config->prev_write_total[i] = 0;
+ }
+ update_disk_stats(disk_config);
+
+ disk_config->first_snapshot = FLB_TRUE; /* assign first_snapshot with FLB_TRUE */
+
+ return 0;
+}
+
+/* Initialize plugin */
+static int in_disk_init(struct flb_input_instance *in,
+ struct flb_config *config, void *data)
+{
+ struct flb_in_disk_config *disk_config = NULL;
+ int ret = -1;
+
+ /* Allocate space for the configuration */
+ disk_config = flb_calloc(1, sizeof(struct flb_in_disk_config));
+ if (disk_config == NULL) {
+ return -1;
+ }
+
+ disk_config->read_total = NULL;
+ disk_config->write_total = NULL;
+ disk_config->prev_read_total = NULL;
+ disk_config->prev_write_total = NULL;
+
+ /* Initialize head config */
+ ret = configure(disk_config, in);
+ if (ret < 0) {
+ goto init_error;
+ }
+
+ flb_input_set_context(in, disk_config);
+
+ ret = flb_input_set_collector_time(in,
+ in_disk_collect,
+ disk_config->interval_sec,
+ disk_config->interval_nsec, config);
+ if (ret < 0) {
+ flb_plg_error(in, "could not set collector for disk input plugin");
+ goto init_error;
+ }
+
+ ret = flb_log_event_encoder_init(&disk_config->log_encoder,
+ FLB_LOG_EVENT_FORMAT_DEFAULT);
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ flb_plg_error(in, "error initializing event encoder : %d", ret);
+
+ goto init_error;
+ }
+
+ return 0;
+
+ init_error:
+ flb_free(disk_config->read_total);
+ flb_free(disk_config->write_total);
+ flb_free(disk_config->prev_read_total);
+ flb_free(disk_config->prev_write_total);
+ flb_free(disk_config);
+ return -1;
+}
+
+static int in_disk_exit(void *data, struct flb_config *config)
+{
+ (void) *config;
+ struct flb_in_disk_config *disk_config = data;
+
+ flb_log_event_encoder_destroy(&disk_config->log_encoder);
+
+ flb_free(disk_config->read_total);
+ flb_free(disk_config->write_total);
+ flb_free(disk_config->prev_read_total);
+ flb_free(disk_config->prev_write_total);
+ flb_free(disk_config);
+ return 0;
+}
+
+/* Configuration properties map */
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_INT, "interval_sec", DEFAULT_INTERVAL_SEC,
+ 0, FLB_TRUE, offsetof(struct flb_in_disk_config, interval_sec),
+ "Set the collector interval"
+ },
+ {
+ FLB_CONFIG_MAP_INT, "interval_nsec", DEFAULT_INTERVAL_NSEC,
+ 0, FLB_TRUE, offsetof(struct flb_in_disk_config, interval_nsec),
+ "Set the collector interval (nanoseconds)"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "dev_name", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_in_disk_config, dev_name),
+ "Set the device name"
+ },
+ /* EOF */
+ {0}
+};
+
+struct flb_input_plugin in_disk_plugin = {
+ .name = "disk",
+ .description = "Diskstats",
+ .cb_init = in_disk_init,
+ .cb_pre_run = NULL,
+ .cb_collect = in_disk_collect,
+ .cb_flush_buf = NULL,
+ .cb_exit = in_disk_exit,
+ .config_map = config_map
+};
diff --git a/src/fluent-bit/plugins/in_disk/in_disk.h b/src/fluent-bit/plugins/in_disk/in_disk.h
new file mode 100644
index 000000000..4f4506c7c
--- /dev/null
+++ b/src/fluent-bit/plugins/in_disk/in_disk.h
@@ -0,0 +1,48 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef FLB_IN_DISK_H
+#define FLB_IN_DISK_H
+
+#include <stdint.h>
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_input.h>
+#include <fluent-bit/flb_log_event_encoder.h>
+
+#define DEFAULT_INTERVAL_SEC "1"
+#define DEFAULT_INTERVAL_NSEC "0"
+
+#define STR_KEY_WRITE "write_size"
+#define STR_KEY_READ "read_size"
+
+struct flb_in_disk_config {
+ uint64_t *read_total;
+ uint64_t *write_total;
+ uint64_t *prev_read_total;
+ uint64_t *prev_write_total;
+ flb_sds_t dev_name;
+ int entry;
+ int interval_sec;
+ int interval_nsec;
+ int first_snapshot; /* a feild to indicate whethor or not this is the first collect*/
+ struct flb_log_event_encoder log_encoder;
+};
+
+extern struct flb_input_plugin in_disk_plugin;
+
+#endif /* FLB_IN_DISK_H */
diff --git a/src/fluent-bit/plugins/in_docker/CMakeLists.txt b/src/fluent-bit/plugins/in_docker/CMakeLists.txt
new file mode 100644
index 000000000..01cf3a848
--- /dev/null
+++ b/src/fluent-bit/plugins/in_docker/CMakeLists.txt
@@ -0,0 +1,6 @@
+set(src
+ docker.c
+ cgroup_v1.c
+ )
+
+FLB_PLUGIN(in_docker "${src}" "")
diff --git a/src/fluent-bit/plugins/in_docker/cgroup_v1.c b/src/fluent-bit/plugins/in_docker/cgroup_v1.c
new file mode 100644
index 000000000..a6fe355e2
--- /dev/null
+++ b/src/fluent-bit/plugins/in_docker/cgroup_v1.c
@@ -0,0 +1,397 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2023 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_log.h>
+#include <fluent-bit/flb_input_plugin.h>
+
+#include <dirent.h>
+#include <string.h>
+#include "docker.h"
+
+/* This method returns list of currently running docker ids. */
+static struct mk_list *get_active_dockers()
+{
+ DIR *dp;
+ struct dirent *ep;
+ struct mk_list *list;
+
+ list = flb_malloc(sizeof(struct mk_list));
+ if (!list) {
+ flb_errno();
+ return NULL;
+ }
+ mk_list_init(list);
+
+ dp = opendir(DOCKER_CGROUP_V1_CPU_DIR);
+ if (dp != NULL) {
+ ep = readdir(dp);
+
+ while(ep != NULL) {
+ if (ep->d_type == OS_DIR_TYPE) {
+ if (strcmp(ep->d_name, CURRENT_DIR) != 0
+ && strcmp(ep->d_name, PREV_DIR) != 0
+ && strlen(ep->d_name) == DOCKER_LONG_ID_LEN) { /* precautionary check */
+
+ docker_info *docker = in_docker_init_docker_info(ep->d_name);
+ mk_list_add(&docker->_head, list);
+ }
+ }
+ ep = readdir(dp);
+ }
+ closedir(dp);
+ }
+
+ return list;
+}
+
+static char *read_line(FILE *fin)
+{
+ char *buffer;
+ char *tmp;
+ int read_chars = 0;
+ int bufsize = 1215;
+ char *line;
+
+ line = (char *) flb_calloc(bufsize, sizeof(char));
+ if (!line) {
+ flb_errno();
+ return NULL;
+ }
+
+ buffer = line;
+
+ while (fgets(buffer, bufsize - read_chars, fin)) {
+ read_chars = strlen(line);
+
+ if (line[read_chars - 1] == '\n') {
+ line[read_chars - 1] = '\0';
+ return line;
+ }
+ else {
+ bufsize = 2 * bufsize;
+ tmp = flb_realloc(line, bufsize);
+ if (!tmp) {
+ flb_errno();
+ return NULL;
+ }
+ else {
+ line = tmp;
+ buffer = line + read_chars;
+ }
+ }
+ }
+
+ flb_free(line);
+ return NULL;
+}
+
+/* This routine returns path to docker's cgroup CPU usage file. */
+static char *get_cpu_used_file(char *id)
+{
+ char *path;
+
+ if (!id) {
+ return NULL;
+ }
+
+ path = (char *) flb_calloc(105, sizeof(char));
+ if (!path) {
+ flb_errno();
+ return NULL;
+ }
+
+ strcat(path, DOCKER_CGROUP_V1_CPU_DIR);
+ strcat(path, "/");
+ strcat(path, id);
+ strcat(path, "/");
+ strcat(path, DOCKER_CGROUP_V1_CPU_USAGE_FILE);
+
+ return path;
+}
+
+/* This routine returns path to docker's cgroup memory limit file. */
+static char *get_mem_limit_file(char *id)
+{
+ char *path;
+
+ if (!id) {
+ return NULL;
+ }
+
+ path = (char *) flb_calloc(116, sizeof(char));
+ if (!path) {
+ flb_errno();
+ return NULL;
+ }
+ strcat(path, DOCKER_CGROUP_V1_MEM_DIR);
+ strcat(path, "/");
+ strcat(path, id);
+ strcat(path, "/");
+ strcat(path, DOCKER_CGROUP_V1_MEM_LIMIT_FILE);
+
+ return path;
+}
+
+/* This routine returns path to docker's cgroup memory used file. */
+static char *get_mem_used_file(char *id)
+{
+ char *path;
+
+ if (!id) {
+ return NULL;
+ }
+
+ path = (char *) flb_calloc(116, sizeof(char));
+ if (!path) {
+ flb_errno();
+ return NULL;
+ }
+ strcat(path, DOCKER_CGROUP_V1_MEM_DIR);
+ strcat(path, "/");
+ strcat(path, id);
+ strcat(path, "/");
+ strcat(path, DOCKER_CGROUP_V1_MEM_USAGE_FILE);
+
+ return path;
+}
+
+static char *get_config_file(char *id)
+{
+ char *path;
+
+ if (!id) {
+ return NULL;
+ }
+
+ path = (char *) flb_calloc(107, sizeof(char));
+ if (!path) {
+ flb_errno();
+ return NULL;
+ }
+ strcat(path, DOCKER_LIB_ROOT);
+ strcat(path, "/");
+ strcat(path, id);
+ strcat(path, "/");
+ strcat(path, DOCKER_CONFIG_JSON);
+
+ return path;
+}
+
+static char *extract_name(char *line, char *start)
+{
+ int skip = 9;
+ int len = 0;
+ char *name;
+ char buff[256];
+ char *curr;
+
+ if (start != NULL) {
+ curr = start + skip;
+ while (*curr != '"') {
+ buff[len++] = *curr;
+ curr++;
+ }
+
+ if (len > 0) {
+ name = (char *) flb_calloc(len + 1, sizeof(char));
+ if (!name) {
+ flb_errno();
+ return NULL;
+ }
+ memcpy(name, buff, len);
+
+ return name;
+ }
+ }
+
+ return NULL;
+}
+
+static char *get_container_name(struct flb_docker *ctx, char *id)
+{
+ char *container_name = NULL;
+ char *config_file;
+ FILE *f = NULL;
+ char *line;
+
+ config_file = get_config_file(id);
+ if (!config_file) {
+ return NULL;
+ }
+
+ f = fopen(config_file, "r");
+ if (!f) {
+ flb_errno();
+ flb_plg_error(ctx->ins, "cannot open %s", config_file);
+ flb_free(config_file);
+ return NULL;
+ }
+
+ while ((line = read_line(f))) {
+ char *index = strstr(line, DOCKER_NAME_ARG);
+ if (index != NULL) {
+ container_name = extract_name(line, index);
+ flb_free(line);
+ break;
+ }
+ flb_free(line);
+ }
+
+ flb_free(config_file);
+ fclose(f);
+
+ return container_name;
+}
+
+/* Returns CPU metrics for docker id. */
+static cpu_snapshot *get_docker_cpu_snapshot(struct flb_docker *ctx, char *id)
+{
+ int c;
+ unsigned long cpu_used = 0;
+ char *usage_file;
+ cpu_snapshot *snapshot = NULL;
+ FILE *f;
+
+ usage_file = get_cpu_used_file(id);
+ if (!usage_file) {
+ return NULL;
+ }
+
+ f = fopen(usage_file, "r");
+ if (!f) {
+ flb_errno();
+ flb_plg_error(ctx->ins, "error gathering CPU data from %s",
+ usage_file);
+ flb_free(usage_file);
+ return NULL;
+ }
+
+ c = fscanf(f, "%ld", &cpu_used);
+ if (c != 1) {
+ flb_plg_error(ctx->ins, "error scanning used CPU value from %s",
+ usage_file);
+ flb_free(usage_file);
+ fclose(f);
+ return NULL;
+ }
+
+ snapshot = (cpu_snapshot *) flb_calloc(1, sizeof(cpu_snapshot));
+ if (!snapshot) {
+ flb_errno();
+ fclose(f);
+ flb_free(usage_file);
+ return NULL;
+ }
+
+ snapshot->used = cpu_used;
+
+ flb_free(usage_file);
+ fclose(f);
+ return snapshot;
+}
+
+/* Returns memory used by a docker in bytes. */
+static uint64_t get_docker_mem_used(struct flb_docker *ctx, char *id)
+{
+ int c;
+ char *usage_file = NULL;
+ uint64_t mem_used = 0;
+ FILE *f;
+
+ usage_file = get_mem_used_file(id);
+ if (!usage_file) {
+ return 0;
+ }
+
+ f = fopen(usage_file, "r");
+ if (!f) {
+ flb_errno();
+ flb_plg_error(ctx->ins, "cannot retrieve memory used from %s",
+ usage_file);
+ flb_free(usage_file);
+ return 0;
+ }
+
+ c = fscanf(f, "%ld", &mem_used);
+ if (c != 1) {
+ flb_plg_error(ctx->ins, "cannot scan memory usage value from %s",
+ usage_file);
+ flb_free(usage_file);
+ fclose(f);
+ return 0;
+ }
+
+ flb_free(usage_file);
+ fclose(f);
+
+ return mem_used;
+}
+
+/* Returns memory limit for a docker in bytes. */
+static uint64_t get_docker_mem_limit(char *id)
+{
+ char *limit_file = get_mem_limit_file(id);
+ uint64_t mem_limit = 0;
+ FILE *f;
+
+ if (!limit_file) {
+ return 0;
+ }
+
+ f = fopen(limit_file, "r");
+ if (!f) {
+ flb_errno();
+ flb_free(limit_file);
+ return 0;
+ }
+
+ fscanf(f, "%ld", &mem_limit);
+ flb_free(limit_file);
+ fclose(f);
+
+ return mem_limit;
+}
+
+/* Get memory snapshot for a docker id. */
+static mem_snapshot *get_docker_mem_snapshot(struct flb_docker *ctx, char *id)
+{
+ mem_snapshot *snapshot = NULL;
+
+ snapshot = (mem_snapshot *) flb_calloc(1, sizeof(mem_snapshot));
+ if (!snapshot) {
+ flb_errno();
+ return NULL;
+ }
+
+ snapshot->used = get_docker_mem_used(ctx, id);
+ snapshot->limit = get_docker_mem_limit(id);
+
+ return snapshot;
+}
+
+int in_docker_set_cgroup_api_v1(struct cgroup_api *api)
+{
+ api->cgroup_version = 1;
+ api->get_active_docker_ids = get_active_dockers;
+ api->get_container_name = get_container_name;
+ api->get_cpu_snapshot = get_docker_cpu_snapshot;
+ api->get_mem_snapshot = get_docker_mem_snapshot;
+
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/in_docker/docker.c b/src/fluent-bit/plugins/in_docker/docker.c
new file mode 100644
index 000000000..135c9f6b4
--- /dev/null
+++ b/src/fluent-bit/plugins/in_docker/docker.c
@@ -0,0 +1,560 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_input.h>
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_kv.h>
+
+#include <stdio.h>
+#include <dirent.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdint.h>
+
+#include "docker.h"
+
+static int cb_docker_collect(struct flb_input_instance *i_ins,
+ struct flb_config *config, void *in_context);
+
+docker_info *in_docker_init_docker_info(char *id)
+{
+ int len;
+ docker_info *docker;
+
+ docker = flb_malloc(sizeof(docker_info));
+ if (!docker) {
+ flb_errno();
+ return NULL;
+ }
+
+ len = strlen(id);
+ docker->id = flb_malloc(sizeof(char)*(len + 1));
+ if (!docker->id) {
+ flb_errno();
+ flb_free(docker);
+ return NULL;
+ }
+ strcpy(docker->id, id);
+ docker->id[len] = '\0';
+
+ return docker;
+}
+
+static docker_snapshot *init_snapshot(char *id)
+{
+ int id_len;
+ docker_snapshot *snapshot;
+
+ snapshot = (docker_snapshot *) flb_malloc(sizeof(docker_snapshot));
+ if (!snapshot) {
+ flb_errno();
+ return NULL;
+ }
+
+ id_len = strlen(id) + 1;
+ snapshot->id = (char *) flb_malloc((id_len)*sizeof(char));
+ if (!snapshot->id) {
+ flb_errno();
+ flb_free(snapshot);
+ return NULL;
+ }
+ strcpy(snapshot->id, id);
+
+ return snapshot;
+}
+
+static bool is_exists(struct mk_list *list, char *id)
+{
+ int id_len;
+ char *cmp;
+ docker_info *item;
+ bool result = false;
+ struct mk_list *head;
+
+ if (!list || !id) {
+ return result;
+ }
+
+ mk_list_foreach(head, list) {
+ item = mk_list_entry(head, docker_info, _head);
+
+ /* id could be of length 12 or 64 */
+ id_len = strlen(item->id);
+ cmp = flb_calloc(id_len + 1, sizeof(char));
+ if (!cmp) {
+ flb_errno();
+ return NULL;
+ }
+ memcpy(cmp, id, id_len);
+ if (strcmp(item->id, cmp) == 0) {
+ result = true;
+ }
+ flb_free(cmp);
+ }
+
+ return result;
+}
+
+static void free_snapshots(struct mk_list *snaps);
+/* Returns dockers CPU/Memory metrics. */
+static struct mk_list *get_docker_stats(struct flb_docker *ctx, struct mk_list *dockers)
+{
+ docker_snapshot *snapshot;
+ struct docker_info *docker;
+ struct mk_list *head;
+ struct mk_list *snapshots;
+
+ if (!dockers) {
+ return NULL;
+ }
+
+ snapshots = flb_malloc(sizeof(struct mk_list));
+ if (!snapshots) {
+ flb_errno();
+ return NULL;
+ }
+
+ mk_list_init(snapshots);
+ mk_list_foreach(head, dockers) {
+ docker = mk_list_entry(head, docker_info, _head);
+ snapshot = init_snapshot(docker->id);
+ if (snapshot == NULL) {
+ free_snapshots(snapshots);
+ return NULL;
+ }
+ snapshot->name = ctx->cgroup_api.get_container_name(ctx, docker->id);
+ if (snapshot->name == NULL) {
+ free_snapshots(snapshots);
+ flb_free(snapshot->id);
+ flb_free(snapshot);
+ return NULL;
+ }
+ snapshot->cpu = ctx->cgroup_api.get_cpu_snapshot(ctx, docker->id);
+ if (snapshot->cpu == NULL) {
+ free_snapshots(snapshots);
+ flb_free(snapshot->name);
+ flb_free(snapshot->id);
+ flb_free(snapshot);
+ return NULL;
+ }
+ snapshot->mem = ctx->cgroup_api.get_mem_snapshot(ctx, docker->id);
+ if (snapshot->mem == NULL) {
+ free_snapshots(snapshots);
+ flb_free(snapshot->cpu);
+ flb_free(snapshot->name);
+ flb_free(snapshot->id);
+ flb_free(snapshot);
+ return NULL;
+ }
+
+ mk_list_add(&snapshot->_head, snapshots);
+ }
+
+ return snapshots;
+}
+
+/* Returns a list of docker ids from space delimited string. */
+static struct mk_list *get_ids_from_str(char *space_delimited_str)
+{
+ struct mk_list *str_parts;
+ struct mk_list *parts_head;
+ struct mk_list *tmp;
+ struct flb_split_entry *part;
+ struct mk_list *dockers;
+ docker_info *docker;
+
+ dockers = flb_malloc(sizeof(struct mk_list));
+ if (!dockers) {
+ flb_errno();
+ return NULL;
+ }
+
+ mk_list_init(dockers);
+ str_parts = flb_utils_split(space_delimited_str, ' ', 256);
+ mk_list_foreach_safe(parts_head, tmp, str_parts) {
+ part = mk_list_entry(parts_head, struct flb_split_entry, _head);
+ if (part->len == DOCKER_LONG_ID_LEN
+ || part->len == DOCKER_SHORT_ID_LEN) {
+ docker = in_docker_init_docker_info(part->value);
+ mk_list_add(&docker->_head, dockers);
+ }
+ }
+
+ flb_utils_split_free(str_parts);
+ return dockers;
+}
+
+/* Initializes blacklist/whitelist. */
+static void init_filter_lists(struct flb_input_instance *f_ins,
+ struct flb_docker *ctx)
+{
+ struct mk_list *head;
+ struct flb_kv *kv;
+
+ ctx->whitelist = NULL;
+ ctx->blacklist = NULL;
+
+ /* Iterate all filter properties */
+ mk_list_foreach(head, &f_ins->properties) {
+ kv = mk_list_entry(head, struct flb_kv, _head);
+
+ if (strcasecmp(kv->key, "include") == 0) {
+ ctx->whitelist = get_ids_from_str(kv->val);
+ }
+ else if (strcasecmp(kv->key, "exclude") == 0) {
+ ctx->blacklist = get_ids_from_str(kv->val);
+ }
+ }
+}
+
+/* Filters list of active dockers as per config. This returns a new list */
+static struct mk_list *apply_filters(struct flb_docker *ctx,
+ struct mk_list *dockers)
+{
+ struct mk_list *head;
+ struct mk_list *tmp;
+ docker_info *new;
+ docker_info *docker;
+ struct mk_list *filtered;
+
+ if (ctx->whitelist == NULL && ctx->blacklist == NULL) {
+ return dockers;
+ }
+
+ filtered = flb_malloc(sizeof(struct mk_list));
+ if (!filtered) {
+ flb_errno();
+ return NULL;
+ }
+
+ mk_list_init(filtered);
+
+ /* whitelist */
+ mk_list_foreach_safe(head, tmp, dockers) {
+ docker = mk_list_entry(head, docker_info, _head);
+ if (ctx->whitelist == NULL) {
+ new = in_docker_init_docker_info(docker->id);
+ mk_list_add(&new->_head, filtered);
+ }
+ else {
+ if (is_exists(ctx->whitelist, docker->id)) {
+ new = in_docker_init_docker_info(docker->id);
+ mk_list_add(&new->_head, filtered);
+ }
+ }
+ }
+
+ /* blacklist */
+ if (ctx->blacklist != NULL) {
+ mk_list_foreach_safe(head, tmp, filtered) {
+ docker = mk_list_entry(head, docker_info, _head);
+ if (is_exists(ctx->blacklist, docker->id)) {
+ mk_list_del(&docker->_head);
+ flb_free(docker->id);
+ flb_free(docker);
+ }
+ }
+ }
+
+ return filtered;
+}
+
+/* Init Docker input */
+static int cb_docker_init(struct flb_input_instance *in,
+ struct flb_config *config, void *data)
+{
+ int ret;
+ struct flb_docker *ctx;
+
+ /* Allocate space for the configuration */
+ ctx = flb_calloc(1, sizeof(struct flb_docker));
+ if (!ctx) {
+ flb_errno();
+ return -1;
+ }
+ ctx->ins = in;
+ in_docker_set_cgroup_api_v1(&ctx->cgroup_api); /* TODO: support cgroup v2*/
+
+ init_filter_lists(in, ctx);
+
+ /* Set the context */
+ flb_input_set_context(in, ctx);
+
+ /* Load the config map */
+ ret = flb_input_config_map_set(in, (void *)ctx);
+ if (ret == -1) {
+ flb_free(ctx);
+ flb_plg_error(in, "unable to load configuration.");
+ return -1;
+ }
+
+ if (ctx->interval_sec <= 0 && ctx->interval_nsec <= 0) {
+ ctx->interval_sec = atoi(DEFAULT_INTERVAL_SEC);
+ ctx->interval_nsec = atoi(DEFAULT_INTERVAL_NSEC);
+ }
+
+ /* Set our collector based on time, CPU usage every 1 second */
+ ret = flb_input_set_collector_time(in,
+ cb_docker_collect, ctx->interval_sec,
+ ctx->interval_nsec, config);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "Could not set collector for Docker input plugin");
+ flb_free(ctx);
+ return -1;
+ }
+ ctx->coll_fd = ret;
+
+ ret = flb_log_event_encoder_init(&ctx->log_encoder,
+ FLB_LOG_EVENT_FORMAT_DEFAULT);
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ flb_plg_error(ctx->ins, "error initializing event encoder : %d", ret);
+ flb_free(ctx);
+ return -1;
+ }
+
+ return ret;
+}
+
+/* Flush snapshot as a message for output. */
+static void flush_snapshot(struct flb_docker *ctx,
+ struct flb_input_instance *i_ins,
+ docker_snapshot *snapshot)
+{
+ int result;
+
+ if (!snapshot) {
+ return;
+ }
+
+ result = flb_log_event_encoder_begin_record(&ctx->log_encoder);
+
+ if (result == FLB_EVENT_ENCODER_SUCCESS) {
+ result = flb_log_event_encoder_set_current_timestamp(
+ &ctx->log_encoder);
+ }
+
+ if (result == FLB_EVENT_ENCODER_SUCCESS) {
+ result = flb_log_event_encoder_append_body_values(
+ &ctx->log_encoder,
+ /* Docker ID [12 chars] */
+ FLB_LOG_EVENT_CSTRING_VALUE("id"),
+ FLB_LOG_EVENT_STRING_VALUE(snapshot->id, DOCKER_SHORT_ID_LEN),
+
+ /* Docker Name */
+ FLB_LOG_EVENT_CSTRING_VALUE("name"),
+ FLB_LOG_EVENT_CSTRING_VALUE(snapshot->name),
+
+ /* CPU used [nanoseconds] */
+ FLB_LOG_EVENT_CSTRING_VALUE("cpu_used"),
+ FLB_LOG_EVENT_UINT32_VALUE(snapshot->cpu->used),
+
+ /* Memory used [bytes] */
+ FLB_LOG_EVENT_CSTRING_VALUE("mem_used"),
+ FLB_LOG_EVENT_UINT32_VALUE(snapshot->mem->used),
+
+ /* Memory limit [bytes] */
+ FLB_LOG_EVENT_CSTRING_VALUE("mem_limit"),
+ FLB_LOG_EVENT_UINT64_VALUE(snapshot->mem->limit));
+ }
+
+ if (result == FLB_EVENT_ENCODER_SUCCESS) {
+ result = flb_log_event_encoder_commit_record(&ctx->log_encoder);
+ }
+
+ flb_trace("[in_docker] ID %s CPU %lu MEMORY %ld", snapshot->id,
+ snapshot->cpu->used, snapshot->mem->used);
+
+ if (result == FLB_EVENT_ENCODER_SUCCESS) {
+ flb_input_log_append(i_ins, NULL, 0,
+ ctx->log_encoder.output_buffer,
+ ctx->log_encoder.output_length);
+
+ }
+ else {
+ flb_plg_error(i_ins, "Error encoding record : %d", result);
+ }
+
+ flb_log_event_encoder_reset(&ctx->log_encoder);
+}
+
+static void flush_snapshots(struct flb_docker *ctx,
+ struct flb_input_instance *i_ins,
+ struct mk_list *snapshots)
+{
+ struct mk_list *head;
+ docker_snapshot *snapshot;
+
+ mk_list_foreach(head, snapshots) {
+ snapshot = mk_list_entry(head, docker_snapshot, _head);
+ flush_snapshot(ctx, i_ins, snapshot);
+ }
+}
+
+static void free_snapshots(struct mk_list *snaps)
+{
+ struct docker_snapshot *snap;
+ struct mk_list *tmp;
+ struct mk_list *head;
+
+ if (snaps == NULL) {
+ return;
+ }
+
+ mk_list_foreach_safe(head, tmp, snaps) {
+ snap = mk_list_entry(head, docker_snapshot, _head);
+ flb_free(snap->id);
+ flb_free(snap->name);
+ flb_free(snap->cpu);
+ flb_free(snap->mem);
+ flb_free(snap);
+ }
+ flb_free(snaps);
+}
+
+static void free_docker_list(struct mk_list *dockers)
+{
+ struct mk_list *head;
+ struct mk_list *tmp;
+ struct docker_info *docker;
+
+ if (dockers == NULL) {
+ return;
+ }
+
+ mk_list_foreach_safe(head, tmp, dockers) {
+ docker = mk_list_entry(head, docker_info, _head);
+ flb_free(docker->id);
+ flb_free(docker);
+ }
+ flb_free(dockers);
+}
+
+/* Callback to gather Docker CPU/Memory usage. */
+static int cb_docker_collect(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ struct mk_list *active;
+ struct mk_list *filtered;
+ struct mk_list *snaps;
+ struct flb_docker *ctx = in_context;
+ (void) config;
+
+ /* Get current active dockers. */
+ active = ctx->cgroup_api.get_active_docker_ids();
+
+ filtered = apply_filters(ctx, active);
+ if (!filtered) {
+ free_docker_list(active);
+ return 0;
+ }
+
+ /* Get Mem/CPU stats of dockers. */
+ snaps = get_docker_stats(ctx, filtered);
+ if (!snaps) {
+ free_docker_list(active);
+ if (active != filtered) {
+ /* apply_filters can return the address of acive.
+ * In that case, filtered is already freed.
+ */
+ free_docker_list(filtered);
+ }
+ return 0;
+ }
+
+ flush_snapshots(ctx, ins, snaps);
+
+ free_snapshots(snaps);
+ free_docker_list(active);
+
+ if (ctx->whitelist != NULL || ctx->blacklist != NULL) {
+ free_docker_list(filtered);
+ }
+
+ return 0;
+}
+
+static void cb_docker_pause(void *data, struct flb_config *config)
+{
+ struct flb_docker *ctx = data;
+ flb_input_collector_pause(ctx->coll_fd, ctx->ins);
+}
+
+static void cb_docker_resume(void *data, struct flb_config *config)
+{
+ struct flb_docker *ctx = data;
+ flb_input_collector_resume(ctx->coll_fd, ctx->ins);
+}
+
+static int cb_docker_exit(void *data, struct flb_config *config)
+{
+ (void) *config;
+ struct flb_docker *ctx = data;
+
+ /* done */
+ flb_log_event_encoder_destroy(&ctx->log_encoder);
+
+ free_docker_list(ctx->whitelist);
+ free_docker_list(ctx->blacklist);
+ flb_free(ctx);
+
+ return 0;
+}
+
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_INT, "interval_sec", DEFAULT_INTERVAL_SEC,
+ 0, FLB_TRUE, offsetof(struct flb_docker, interval_sec),
+ "Set the collector interval"
+ },
+ {
+ FLB_CONFIG_MAP_INT, "interval_nsec", DEFAULT_INTERVAL_NSEC,
+ 0, FLB_TRUE, offsetof(struct flb_docker, interval_nsec),
+ "Set the collector interval (nanoseconds)"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "include", NULL,
+ 0, FLB_FALSE, 0,
+ "A space-separated list of containers to include"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "exclude", NULL,
+ 0, FLB_FALSE, 0,
+ "A space-separated list of containers to exclude"
+ },
+ /* EOF */
+ {0}
+};
+
+/* Plugin reference */
+struct flb_input_plugin in_docker_plugin = {
+ .name = "docker",
+ .description = "Docker containers metrics",
+ .cb_init = cb_docker_init,
+ .cb_pre_run = NULL,
+ .cb_collect = cb_docker_collect,
+ .cb_flush_buf = NULL,
+ .cb_pause = cb_docker_pause,
+ .cb_resume = cb_docker_resume,
+ .cb_exit = cb_docker_exit,
+ .config_map = config_map
+};
diff --git a/src/fluent-bit/plugins/in_docker/docker.h b/src/fluent-bit/plugins/in_docker/docker.h
new file mode 100644
index 000000000..d3814c390
--- /dev/null
+++ b/src/fluent-bit/plugins/in_docker/docker.h
@@ -0,0 +1,94 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_DOCKER_H
+#define FLB_IN_DOCKER_H
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_input.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_log_event_encoder.h>
+
+
+#define CURRENT_DIR "."
+#define PREV_DIR ".."
+#define OS_DIR_TYPE 4
+#define DOCKER_LONG_ID_LEN 64
+#define DOCKER_SHORT_ID_LEN 12
+#define DOCKER_CGROUP_V1_MEM_DIR "/sys/fs/cgroup/memory/docker"
+#define DOCKER_CGROUP_V1_CPU_DIR "/sys/fs/cgroup/cpu/docker"
+#define DOCKER_CGROUP_V1_MEM_LIMIT_FILE "memory.limit_in_bytes"
+#define DOCKER_CGROUP_V1_MEM_USAGE_FILE "memory.usage_in_bytes"
+#define DOCKER_CGROUP_V1_CPU_USAGE_FILE "cpuacct.usage"
+#define DOCKER_LIB_ROOT "/var/lib/docker/containers"
+#define DOCKER_CONFIG_JSON "config.v2.json"
+#define DOCKER_NAME_ARG "\"Name\""
+#define DEFAULT_INTERVAL_SEC "1"
+#define DEFAULT_INTERVAL_NSEC "0"
+
+typedef struct docker_info {
+ char *id;
+ struct mk_list _head;
+} docker_info;
+
+typedef struct cpu_snapshot {
+ unsigned long used;
+} cpu_snapshot;
+
+typedef struct mem_snapshot {
+ uint64_t limit;
+ uint64_t used;
+} mem_snapshot;
+
+typedef struct docker_snapshot {
+ char *id;
+ char *name;
+ mem_snapshot *mem;
+ cpu_snapshot *cpu;
+ struct mk_list _head;
+} docker_snapshot;
+
+struct flb_docker;
+
+struct cgroup_api {
+ int cgroup_version;
+ struct mk_list* (*get_active_docker_ids) ();
+ char* (*get_container_name) (struct flb_docker *, char *);
+ cpu_snapshot* (*get_cpu_snapshot) (struct flb_docker *, char *);
+ mem_snapshot* (*get_mem_snapshot) (struct flb_docker *, char *);
+};
+int in_docker_set_cgroup_api_v1(struct cgroup_api *api);
+
+/* Docker Input configuration & context */
+struct flb_docker {
+ int coll_fd; /* collector id/fd */
+ int interval_sec; /* interval collection time (Second) */
+ int interval_nsec; /* interval collection time (Nanosecond) */
+ struct mk_list *whitelist; /* dockers to monitor */
+ struct mk_list *blacklist; /* dockers to exclude */
+ struct cgroup_api cgroup_api;
+ struct flb_input_instance *ins;
+ struct flb_log_event_encoder log_encoder;
+};
+
+int in_docker_collect(struct flb_input_instance *i_ins,
+ struct flb_config *config, void *in_context);
+docker_info *in_docker_init_docker_info(char *id);
+#endif
diff --git a/src/fluent-bit/plugins/in_docker_events/CMakeLists.txt b/src/fluent-bit/plugins/in_docker_events/CMakeLists.txt
new file mode 100644
index 000000000..dee7c0f27
--- /dev/null
+++ b/src/fluent-bit/plugins/in_docker_events/CMakeLists.txt
@@ -0,0 +1,5 @@
+set(src
+ docker_events.c
+ docker_events_config.c)
+
+FLB_PLUGIN(in_docker_events "${src}" "")
diff --git a/src/fluent-bit/plugins/in_docker_events/docker_events.c b/src/fluent-bit/plugins/in_docker_events/docker_events.c
new file mode 100644
index 000000000..7534eb1d6
--- /dev/null
+++ b/src/fluent-bit/plugins/in_docker_events/docker_events.c
@@ -0,0 +1,476 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_network.h>
+#include <fluent-bit/flb_pack.h>
+#include <sys/socket.h>
+#include <sys/un.h>
+
+#include "docker_events.h"
+#include "docker_events_config.h"
+
+
+/**
+ * Creates the connection to docker's unix socket and sends the
+ * HTTP GET /events
+ *
+ * @param ctx Pointer to flb_in_de_config
+ *
+ * @return int 0 on success, -1 on failure
+ */
+static int de_unix_create(struct flb_in_de_config *ctx)
+{
+ ssize_t bytes;
+ unsigned long len;
+ size_t address_length;
+ struct sockaddr_un address;
+ char request[512];
+
+ ctx->fd = flb_net_socket_create(AF_UNIX, FLB_FALSE);
+ if (ctx->fd == -1) {
+ return -1;
+ }
+
+ /* Prepare the unix socket path */
+ len = strlen(ctx->unix_path);
+ address.sun_family = AF_UNIX;
+ sprintf(address.sun_path, "%s", ctx->unix_path);
+ address_length = sizeof(address.sun_family) + len + 1;
+ if (connect(ctx->fd, (struct sockaddr *)&address, address_length) == -1) {
+ flb_errno();
+ close(ctx->fd);
+ return -1;
+ }
+
+ strcpy(request, "GET /events HTTP/1.0\r\n\r\n");
+ flb_plg_trace(ctx->ins, "writing to socket %s", request);
+ write(ctx->fd, request, strlen(request));
+
+ /* Read the initial http response */
+ bytes = read(ctx->fd, ctx->buf, ctx->buf_size - 1);
+ if (bytes == -1) {
+ flb_errno();
+ }
+ flb_plg_debug(ctx->ins, "read %zu bytes from socket", bytes);
+
+ return 0;
+}
+
+static int in_de_collect(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context);
+
+static int reconnect_docker_sock(struct flb_input_instance *ins,
+ struct flb_config *config,
+ struct flb_in_de_config *ctx)
+{
+ int ret;
+
+ /* remove old socket collector */
+ if (ctx->coll_id >= 0) {
+ ret = flb_input_collector_delete(ctx->coll_id, ins);
+ if (ret < 0) {
+ flb_plg_error(ctx->ins, "failed to pause event");
+ return -1;
+ }
+ ctx->coll_id = -1;
+ }
+ if (ctx->fd > 0) {
+ flb_plg_debug(ctx->ins, "close socket fd=%d", ctx->fd);
+ close(ctx->fd);
+ ctx->fd = -1;
+ }
+
+ /* create socket again */
+ if (de_unix_create(ctx) < 0) {
+ flb_plg_error(ctx->ins, "failed to re-initialize socket");
+ if (ctx->fd > 0) {
+ flb_plg_debug(ctx->ins, "close socket fd=%d", ctx->fd);
+ close(ctx->fd);
+ ctx->fd = -1;
+ }
+ return -1;
+ }
+ /* set event */
+ ctx->coll_id = flb_input_set_collector_event(ins,
+ in_de_collect,
+ ctx->fd, config);
+ if (ctx->coll_id < 0) {
+ flb_plg_error(ctx->ins,
+ "could not set collector for IN_DOCKER_EVENTS plugin");
+ close(ctx->fd);
+ ctx->fd = -1;
+ return -1;
+ }
+ ret = flb_input_collector_start(ctx->coll_id, ins);
+ if (ret < 0) {
+ flb_plg_error(ctx->ins,
+ "could not start collector for IN_DOCKER_EVENTS plugin");
+ flb_input_collector_delete(ctx->coll_id, ins);
+ close(ctx->fd);
+ ctx->coll_id = -1;
+ ctx->fd = -1;
+ return -1;
+ }
+
+ flb_plg_info(ctx->ins, "Reconnect successful");
+ return 0;
+}
+
+static int cb_reconnect(struct flb_input_instance *ins,
+ struct flb_config *config,
+ void *in_context)
+{
+ struct flb_in_de_config *ctx = in_context;
+ int ret;
+
+ flb_plg_info(ctx->ins, "Retry(%d/%d)",
+ ctx->current_retries, ctx->reconnect_retry_limits);
+ ret = reconnect_docker_sock(ins, config, ctx);
+ if (ret < 0) {
+ /* Failed to reconnect */
+ ctx->current_retries++;
+ if (ctx->current_retries > ctx->reconnect_retry_limits) {
+ /* give up */
+ flb_plg_error(ctx->ins, "Failed to retry. Giving up...");
+ goto cb_reconnect_end;
+ }
+ flb_plg_info(ctx->ins, "Failed. Waiting for next retry..");
+ return 0;
+ }
+
+ cb_reconnect_end:
+ if(flb_input_collector_delete(ctx->retry_coll_id, ins) < 0) {
+ flb_plg_error(ctx->ins, "failed to delete timer event");
+ }
+ ctx->current_retries = 0;
+ ctx->retry_coll_id = -1;
+ return ret;
+}
+
+static int create_reconnect_event(struct flb_input_instance *ins,
+ struct flb_config *config,
+ struct flb_in_de_config *ctx)
+{
+ int ret;
+
+ if (ctx->retry_coll_id >= 0) {
+ flb_plg_debug(ctx->ins, "already retring ?");
+ return 0;
+ }
+
+ /* try before creating event to stop incoming event */
+ ret = reconnect_docker_sock(ins, config, ctx);
+ if (ret == 0) {
+ return 0;
+ }
+
+ ctx->current_retries = 1;
+ ctx->retry_coll_id = flb_input_set_collector_time(ins,
+ cb_reconnect,
+ ctx->reconnect_retry_interval,
+ 0,
+ config);
+ if (ctx->retry_coll_id < 0) {
+ flb_plg_error(ctx->ins, "failed to create timer event");
+ return -1;
+ }
+ ret = flb_input_collector_start(ctx->retry_coll_id, ins);
+ if (ret < 0) {
+ flb_plg_error(ctx->ins, "failed to start timer event");
+ flb_input_collector_delete(ctx->retry_coll_id, ins);
+ ctx->retry_coll_id = -1;
+ return -1;
+ }
+ flb_plg_info(ctx->ins, "create reconnect event. interval=%d second",
+ ctx->reconnect_retry_interval);
+
+ return 0;
+}
+
+static int is_recoverable_error(int error)
+{
+ /* ENOTTY:
+ It reports on Docker in Docker mode.
+ https://github.com/fluent/fluent-bit/issues/3439#issuecomment-831424674
+ */
+ if (error == ENOTTY || error == EBADF) {
+ return FLB_TRUE;
+ }
+ return FLB_FALSE;
+}
+
+
+/**
+ * Callback function to process events recieved on the unix
+ * socket.
+ *
+ * @param ins Pointer to flb_input_instance
+ * @param config Pointer to flb_config
+ * @param in_context void Pointer used to cast to
+ * flb_in_de_config
+ *
+ * @return int Always returns success
+ */
+static int in_de_collect(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ int ret = 0;
+ int error;
+ size_t str_len = 0;
+ struct flb_in_de_config *ctx = in_context;
+
+ /* variables for parser */
+ int parser_ret = -1;
+ void *out_buf = NULL;
+ size_t out_size = 0;
+ struct flb_time out_time;
+
+ ret = read(ctx->fd, ctx->buf, ctx->buf_size - 1);
+
+ if (ret > 0) {
+ str_len = ret;
+ ctx->buf[str_len] = '\0';
+
+ ret = flb_log_event_encoder_begin_record(&ctx->log_encoder);
+
+ if (!ctx->parser) {
+ /* Initialize local msgpack buffer */
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_current_timestamp(
+ &ctx->log_encoder);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_append_body_values(
+ &ctx->log_encoder,
+ FLB_LOG_EVENT_CSTRING_VALUE(ctx->key),
+ FLB_LOG_EVENT_STRING_VALUE(ctx->buf, str_len));
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_commit_record(&ctx->log_encoder);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ flb_input_log_append(ins, NULL, 0,
+ ctx->log_encoder.output_buffer,
+ ctx->log_encoder.output_length);
+
+ }
+ else {
+ flb_plg_error(ctx->ins, "Error encoding record : %d", ret);
+ }
+ }
+ else {
+ flb_time_get(&out_time);
+
+ parser_ret = flb_parser_do(ctx->parser, ctx->buf, str_len - 1,
+ &out_buf, &out_size, &out_time);
+ if (parser_ret >= 0) {
+ if (flb_time_to_nanosec(&out_time) == 0L) {
+ flb_time_get(&out_time);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_timestamp(
+ &ctx->log_encoder,
+ &out_time);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_body_from_raw_msgpack(
+ &ctx->log_encoder,
+ out_buf,
+ out_size);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_commit_record(&ctx->log_encoder);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ flb_input_log_append(ins, NULL, 0,
+ ctx->log_encoder.output_buffer,
+ ctx->log_encoder.output_length);
+
+ }
+ else {
+ flb_plg_error(ctx->ins, "Error encoding record : %d", ret);
+ }
+
+
+ flb_free(out_buf);
+ }
+ else {
+ flb_plg_trace(ctx->ins, "tried to parse: %s", ctx->buf);
+ flb_plg_trace(ctx->ins, "buf_size %zu", ctx->buf_size);
+ flb_plg_error(ctx->ins, "parser returned an error: %d",
+ parser_ret);
+ }
+ }
+
+ flb_log_event_encoder_reset(&ctx->log_encoder);
+ }
+ else if (ret == 0) {
+ /* EOF */
+
+ /* docker service may be restarted */
+ flb_plg_info(ctx->ins, "EOF detected. Re-initialize");
+ if (ctx->reconnect_retry_limits > 0) {
+ ret = create_reconnect_event(ins, config, ctx);
+ if (ret < 0) {
+ return ret;
+ }
+ }
+ }
+ else {
+ error = errno;
+ flb_plg_error(ctx->ins, "read returned error: %d, %s", error,
+ strerror(error));
+ if (is_recoverable_error(error)) {
+ if (ctx->reconnect_retry_limits > 0) {
+ ret = create_reconnect_event(ins, config, ctx);
+ if (ret < 0) {
+ return ret;
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * Callback function to initialize docker events plugin
+ *
+ * @param ins Pointer to flb_input_instance
+ * @param config Pointer to flb_config
+ * @param data Unused
+ *
+ * @return int 0 on success, -1 on failure
+ */
+static int in_de_init(struct flb_input_instance *ins,
+ struct flb_config *config, void *data)
+{
+ struct flb_in_de_config *ctx = NULL;
+ (void) data;
+
+ /* Allocate space for the configuration */
+ ctx = de_config_init(ins, config);
+ if (!ctx) {
+ return -1;
+ }
+ ctx->ins = ins;
+ ctx->retry_coll_id = -1;
+ ctx->current_retries = 0;
+
+ /* Set the context */
+ flb_input_set_context(ins, ctx);
+
+ if (de_unix_create(ctx) != 0) {
+ flb_plg_error(ctx->ins, "could not listen on unix://%s",
+ ctx->unix_path);
+ de_config_destroy(ctx);
+ return -1;
+ }
+
+ ctx->coll_id = flb_input_set_collector_event(ins, in_de_collect,
+ ctx->fd, config);
+ if(ctx->coll_id < 0){
+ flb_plg_error(ctx->ins,
+ "could not set collector for IN_DOCKER_EVENTS plugin");
+ de_config_destroy(ctx);
+ return -1;
+ }
+
+ flb_plg_info(ctx->ins, "listening for events on %s", ctx->unix_path);
+ return 0;
+}
+
+/**
+ * Callback exit function to cleanup plugin
+ *
+ * @param data Pointer cast to flb_in_de_config
+ * @param config Unused
+ *
+ * @return int Always returns 0
+ */
+static int in_de_exit(void *data, struct flb_config *config)
+{
+ (void) config;
+ struct flb_in_de_config *ctx = data;
+
+ if (!ctx) {
+ return 0;
+ }
+
+ de_config_destroy(ctx);
+
+ return 0;
+}
+
+/* Configuration properties map */
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_STR, "unix_path", DEFAULT_UNIX_SOCKET_PATH,
+ 0, FLB_TRUE, offsetof(struct flb_in_de_config, unix_path),
+ "Define Docker unix socket path to read events"
+ },
+ {
+ FLB_CONFIG_MAP_SIZE, "buffer_size", "8k",
+ 0, FLB_TRUE, offsetof(struct flb_in_de_config, buf_size),
+ "Set buffer size to read events"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "parser", NULL,
+ 0, FLB_FALSE, 0,
+ "Optional parser for records, if not set, records are packages under 'key'"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "key", DEFAULT_FIELD_NAME,
+ 0, FLB_TRUE, offsetof(struct flb_in_de_config, key),
+ "Set the key name to store unparsed Docker events"
+ },
+ {
+ FLB_CONFIG_MAP_INT, "reconnect.retry_limits", "5",
+ 0, FLB_TRUE, offsetof(struct flb_in_de_config, reconnect_retry_limits),
+ "Maximum number to retry to connect docker socket"
+ },
+ {
+ FLB_CONFIG_MAP_INT, "reconnect.retry_interval", "1",
+ 0, FLB_TRUE, offsetof(struct flb_in_de_config, reconnect_retry_interval),
+ "Retry interval to connect docker socket"
+ },
+ /* EOF */
+ {0}
+};
+
+/* Plugin reference */
+struct flb_input_plugin in_docker_events_plugin = {
+ .name = "docker_events",
+ .description = "Docker events",
+ .cb_init = in_de_init,
+ .cb_pre_run = NULL,
+ .cb_collect = in_de_collect,
+ .cb_flush_buf = NULL,
+ .cb_exit = in_de_exit,
+ .config_map = config_map,
+ .flags = FLB_INPUT_NET
+};
diff --git a/src/fluent-bit/plugins/in_docker_events/docker_events.h b/src/fluent-bit/plugins/in_docker_events/docker_events.h
new file mode 100644
index 000000000..dc659d5ec
--- /dev/null
+++ b/src/fluent-bit/plugins/in_docker_events/docker_events.h
@@ -0,0 +1,56 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_DE_H
+#define FLB_IN_DE_H
+
+#include <msgpack.h>
+#include <fluent-bit/flb_input.h>
+#include <fluent-bit/flb_parser.h>
+#include <fluent-bit/flb_log_event_encoder.h>
+
+#define DEFAULT_BUF_SIZE 8192
+#define MIN_BUF_SIZE 2048
+#define DEFAULT_FIELD_NAME "message"
+#define DEFAULT_UNIX_SOCKET_PATH "/var/run/docker.sock"
+
+struct flb_in_de_config
+{
+ int fd; /* File descriptor */
+ int coll_id; /* collector id */
+ flb_sds_t unix_path; /* Unix path for socket */
+ char *buf;
+ size_t buf_size;
+ flb_sds_t key;
+
+ /* retries */
+ int reconnect_retry_limits;
+ int reconnect_retry_interval;
+
+ /* retries (internal) */
+ int current_retries;
+ int retry_coll_id;
+
+ struct flb_parser *parser;
+ struct flb_log_event_encoder log_encoder;
+ struct flb_input_instance *ins; /* Input plugin instace */
+
+};
+
+#endif
diff --git a/src/fluent-bit/plugins/in_docker_events/docker_events_config.c b/src/fluent-bit/plugins/in_docker_events/docker_events_config.c
new file mode 100644
index 000000000..8290686c1
--- /dev/null
+++ b/src/fluent-bit/plugins/in_docker_events/docker_events_config.c
@@ -0,0 +1,106 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_utils.h>
+
+#include "docker_events.h"
+#include "docker_events_config.h"
+
+/**
+ * Function to initialize docker_events plugin.
+ *
+ * @param ins Pointer to flb_input_instance
+ * @param config Pointer to flb_config
+ *
+ * @return struct flb_in_de_config* Pointer to the plugin's
+ * structure on success, NULL on failure.
+ */
+struct flb_in_de_config *de_config_init(struct flb_input_instance *ins,
+ struct flb_config *config)
+{
+ int ret;
+ const char *tmp;
+ struct flb_in_de_config *ctx;
+
+ ctx = flb_calloc(1, sizeof(struct flb_in_de_config));
+ if (!ctx) {
+ flb_errno();
+ return NULL;
+ }
+ ctx->ins = ins;
+
+ /* Load the config map */
+ ret = flb_input_config_map_set(ins, (void *) ctx);
+ if (ret == -1) {
+ flb_free(ctx);
+ return NULL;
+ }
+
+ /* Allocate buffer for events */
+ ctx->buf = flb_malloc(ctx->buf_size);
+ if (!ctx->buf) {
+ flb_errno();
+ flb_free(ctx);
+ return NULL;
+ }
+
+ tmp = flb_input_get_property("parser", ins);
+ if (tmp) {
+ ctx->parser = flb_parser_get(tmp, config);
+ if (ctx->parser == NULL) {
+ flb_plg_error(ctx->ins, "requested parser '%s' not found", tmp);
+ flb_free(ctx->buf);
+ flb_free(ctx);
+ return NULL;
+ }
+ }
+
+ ret = flb_log_event_encoder_init(&ctx->log_encoder,
+ FLB_LOG_EVENT_FORMAT_DEFAULT);
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ flb_plg_error(ctx->ins, "error initializing event encoder : %d", ret);
+
+ de_config_destroy(ctx);
+
+ ctx = NULL;
+ }
+
+ return ctx;
+}
+
+/**
+ * Function to destroy docker_events plugin.
+ *
+ * @param ctx Pointer to flb_in_de_config
+ *
+ * @return int 0
+ */
+int de_config_destroy(struct flb_in_de_config *ctx)
+{
+ if (ctx->buf) {
+ flb_free(ctx->buf);
+ }
+
+ flb_log_event_encoder_destroy(&ctx->log_encoder);
+
+ flb_free(ctx);
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/in_docker_events/docker_events_config.h b/src/fluent-bit/plugins/in_docker_events/docker_events_config.h
new file mode 100644
index 000000000..94a6d87db
--- /dev/null
+++ b/src/fluent-bit/plugins/in_docker_events/docker_events_config.h
@@ -0,0 +1,29 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_DE_CONFIG_H
+#define FLB_IN_DE_CONFIG_H
+
+#include "docker_events.h"
+
+struct flb_in_de_config *de_config_init(struct flb_input_instance *ins,
+ struct flb_config *config);
+int de_config_destroy(struct flb_in_de_config *config);
+
+#endif
diff --git a/src/fluent-bit/plugins/in_dummy/CMakeLists.txt b/src/fluent-bit/plugins/in_dummy/CMakeLists.txt
new file mode 100644
index 000000000..52b03a2c8
--- /dev/null
+++ b/src/fluent-bit/plugins/in_dummy/CMakeLists.txt
@@ -0,0 +1,4 @@
+set(src
+ in_dummy.c)
+
+FLB_PLUGIN(in_dummy "${src}" "")
diff --git a/src/fluent-bit/plugins/in_dummy/in_dummy.c b/src/fluent-bit/plugins/in_dummy/in_dummy.c
new file mode 100644
index 000000000..75d1b7333
--- /dev/null
+++ b/src/fluent-bit/plugins/in_dummy/in_dummy.c
@@ -0,0 +1,438 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <time.h>
+
+#include <msgpack.h>
+#include <fluent-bit/flb_input.h>
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_config_map.h>
+#include <fluent-bit/flb_error.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_log_event.h>
+
+#include "in_dummy.h"
+
+static void generate_timestamp(struct flb_dummy *ctx,
+ struct flb_time *result)
+{
+ struct flb_time current_timestamp;
+ struct flb_time delta;
+
+ if (ctx->fixed_timestamp) {
+ if (ctx->dummy_timestamp_set) {
+ flb_time_copy(result, &ctx->dummy_timestamp);
+ }
+ else {
+ flb_time_copy(result, &ctx->base_timestamp);
+ }
+ }
+ else {
+ if (ctx->dummy_timestamp_set) {
+ flb_time_zero(&delta);
+
+ flb_time_get(&current_timestamp);
+
+ flb_time_diff(&current_timestamp,
+ &ctx->base_timestamp,
+ &delta);
+
+ flb_time_add(&ctx->dummy_timestamp,
+ &delta,
+ result);
+ }
+ else {
+ flb_time_get(result);
+ }
+ }
+}
+
+static int generate_event(struct flb_dummy *ctx)
+{
+ size_t chunk_offset;
+ size_t body_length;
+ char *body_buffer;
+ size_t body_start;
+ struct flb_time timestamp;
+ msgpack_unpacked object;
+ int result;
+
+ result = FLB_EVENT_ENCODER_SUCCESS;
+ body_start = 0;
+ chunk_offset = 0;
+
+ generate_timestamp(ctx, &timestamp);
+
+ msgpack_unpacked_init(&object);
+
+ while (result == FLB_EVENT_ENCODER_SUCCESS &&
+ msgpack_unpack_next(&object,
+ ctx->ref_body_msgpack,
+ ctx->ref_body_msgpack_size,
+ &chunk_offset) == MSGPACK_UNPACK_SUCCESS) {
+ body_buffer = &ctx->ref_body_msgpack[body_start];
+ body_length = chunk_offset - body_start;
+
+ if (object.data.type == MSGPACK_OBJECT_MAP) {
+ flb_log_event_encoder_begin_record(ctx->encoder);
+
+ flb_log_event_encoder_set_timestamp(ctx->encoder, &timestamp);
+
+ result = flb_log_event_encoder_set_metadata_from_raw_msgpack(
+ ctx->encoder,
+ ctx->ref_metadata_msgpack,
+ ctx->ref_metadata_msgpack_size);
+
+ if (result == FLB_EVENT_ENCODER_SUCCESS) {
+ result = flb_log_event_encoder_set_body_from_raw_msgpack(
+ ctx->encoder,
+ body_buffer,
+ body_length);
+ }
+
+ if (result == FLB_EVENT_ENCODER_SUCCESS) {
+ result = flb_log_event_encoder_commit_record(ctx->encoder);
+ }
+ }
+
+ body_start = chunk_offset;
+ }
+
+ msgpack_unpacked_destroy(&object);
+
+ if (result == FLB_EVENT_ENCODER_SUCCESS) {
+ result = 0;
+ }
+ else {
+ result = -1;
+ }
+
+ return result;
+}
+
+/* cb_collect callback */
+static int in_dummy_collect(struct flb_input_instance *ins,
+ struct flb_config *config,
+ void *in_context)
+{
+ int result;
+ int index;
+ struct flb_dummy *ctx;
+
+ ctx = (struct flb_dummy *) in_context;
+
+ if (ctx->samples > 0 && (ctx->samples_count >= ctx->samples)) {
+ return -1;
+ }
+
+ result = 0;
+
+ if (ctx->samples_count == 0 || !ctx->fixed_timestamp) {
+ flb_log_event_encoder_reset(ctx->encoder);
+
+ for (index = 0 ; index < ctx->copies && result == 0 ; index++) {
+ result = generate_event(ctx);
+ }
+ }
+
+ if (result == 0) {
+ if (ctx->encoder->output_length > 0) {
+ flb_input_log_append(ins, NULL, 0,
+ ctx->encoder->output_buffer,
+ ctx->encoder->output_length);
+ }
+ else {
+ flb_plg_error(ins, "log chunk size == 0");
+ }
+ }
+ else {
+ flb_plg_error(ins, "log chunk genartion error (%d)", result);
+ }
+
+ if (ctx->samples > 0) {
+ ctx->samples_count++;
+ }
+
+ return 0;
+}
+
+static int config_destroy(struct flb_dummy *ctx)
+{
+ if (ctx->ref_body_msgpack != NULL) {
+ flb_free(ctx->ref_body_msgpack);
+ }
+
+ if (ctx->ref_metadata_msgpack != NULL) {
+ flb_free(ctx->ref_metadata_msgpack);
+ }
+
+ if (ctx->encoder != NULL) {
+ flb_log_event_encoder_destroy(ctx->encoder);
+ }
+
+ flb_free(ctx);
+
+ return 0;
+}
+
+/* Set plugin configuration */
+static int configure(struct flb_dummy *ctx,
+ struct flb_input_instance *in,
+ struct timespec *tm)
+{
+ const char *msg;
+ int root_type;
+ int ret = -1;
+
+ ctx->ref_metadata_msgpack = NULL;
+ ctx->ref_body_msgpack = NULL;
+ ctx->dummy_timestamp_set = FLB_FALSE;
+
+ ret = flb_input_config_map_set(in, (void *) ctx);
+ if (ret == -1) {
+ return -1;
+ }
+
+ /* interval settings */
+ tm->tv_sec = 1;
+ tm->tv_nsec = 0;
+
+ if (ctx->rate > 1) {
+ tm->tv_sec = 0;
+ tm->tv_nsec = 1000000000 / ctx->rate;
+ }
+
+ /* dummy timestamp */
+ flb_time_zero(&ctx->dummy_timestamp);
+
+ if (ctx->start_time_sec >= 0 || ctx->start_time_nsec >= 0) {
+ ctx->dummy_timestamp_set = FLB_TRUE;
+
+ if (ctx->start_time_sec >= 0) {
+ ctx->dummy_timestamp.tm.tv_sec = ctx->start_time_sec;
+ }
+ if (ctx->start_time_nsec >= 0) {
+ ctx->dummy_timestamp.tm.tv_nsec = ctx->start_time_nsec;
+ }
+ }
+
+ flb_time_get(&ctx->base_timestamp);
+
+ /* handle it explicitly since we need to validate it is valid JSON */
+ msg = flb_input_get_property("dummy", in);
+ if (msg == NULL) {
+ msg = DEFAULT_DUMMY_MESSAGE;
+ }
+
+ ret = flb_pack_json(msg,
+ strlen(msg),
+ &ctx->ref_body_msgpack,
+ &ctx->ref_body_msgpack_size,
+ &root_type,
+ NULL);
+
+ if (ret != 0) {
+ flb_plg_warn(ctx->ins, "data is incomplete. Use default string.");
+
+ ret = flb_pack_json(DEFAULT_DUMMY_MESSAGE,
+ strlen(DEFAULT_DUMMY_MESSAGE),
+ &ctx->ref_body_msgpack,
+ &ctx->ref_body_msgpack_size,
+ &root_type,
+ NULL);
+ if (ret != 0) {
+ flb_plg_error(ctx->ins, "unexpected error");
+ return -1;
+ }
+ }
+
+ /* handle it explicitly since we need to validate it is valid JSON */
+ msg = flb_input_get_property("metadata", in);
+
+ if (msg == NULL) {
+ msg = DEFAULT_DUMMY_METADATA;
+ }
+
+ ret = flb_pack_json(msg,
+ strlen(msg),
+ &ctx->ref_metadata_msgpack,
+ &ctx->ref_metadata_msgpack_size,
+ &root_type,
+ NULL);
+
+ if (ret != 0) {
+ flb_plg_warn(ctx->ins, "data is incomplete. Use default string.");
+
+ ret = flb_pack_json(DEFAULT_DUMMY_METADATA,
+ strlen(DEFAULT_DUMMY_METADATA),
+ &ctx->ref_metadata_msgpack,
+ &ctx->ref_metadata_msgpack_size,
+ &root_type,
+ NULL);
+
+ if (ret != 0) {
+ flb_plg_error(ctx->ins, "unexpected error");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+
+
+
+/* Initialize plugin */
+static int in_dummy_init(struct flb_input_instance *in,
+ struct flb_config *config, void *data)
+{
+ int ret = -1;
+ struct flb_dummy *ctx = NULL;
+ struct timespec tm;
+
+ /* Allocate space for the configuration */
+ ctx = flb_malloc(sizeof(struct flb_dummy));
+ if (ctx == NULL) {
+ return -1;
+ }
+ ctx->ins = in;
+ ctx->samples = 0;
+ ctx->samples_count = 0;
+
+ /* Initialize head config */
+ ret = configure(ctx, in, &tm);
+ if (ret < 0) {
+ config_destroy(ctx);
+ return -1;
+ }
+
+ ctx->encoder = flb_log_event_encoder_create(FLB_LOG_EVENT_FORMAT_DEFAULT);
+
+ if (ctx->encoder == NULL) {
+ flb_plg_error(in, "could not initialize event encoder");
+ config_destroy(ctx);
+
+ return -1;
+ }
+
+ flb_input_set_context(in, ctx);
+
+ ret = flb_input_set_collector_time(in,
+ in_dummy_collect,
+ tm.tv_sec,
+ tm.tv_nsec, config);
+ if (ret < 0) {
+ flb_plg_error(ctx->ins, "could not set collector for dummy input plugin");
+ config_destroy(ctx);
+ return -1;
+ }
+
+ ctx->coll_fd = ret;
+
+ flb_time_get(&ctx->base_timestamp);
+
+ return 0;
+}
+
+static void in_dummy_pause(void *data, struct flb_config *config)
+{
+ struct flb_dummy *ctx = data;
+
+ flb_input_collector_pause(ctx->coll_fd, ctx->ins);
+}
+
+static void in_dummy_resume(void *data, struct flb_config *config)
+{
+ struct flb_dummy *ctx = data;
+
+ flb_input_collector_resume(ctx->coll_fd, ctx->ins);
+}
+
+static int in_dummy_exit(void *data, struct flb_config *config)
+{
+ (void) *config;
+ struct flb_dummy *ctx = data;
+
+ config_destroy(ctx);
+
+ return 0;
+}
+
+/* Configuration properties map */
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_INT, "samples", "0",
+ 0, FLB_TRUE, offsetof(struct flb_dummy, samples),
+ "set a number of times to generate event."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "dummy", DEFAULT_DUMMY_MESSAGE,
+ 0, FLB_FALSE, 0,
+ "set the sample record to be generated. It should be a JSON object."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "metadata", DEFAULT_DUMMY_METADATA,
+ 0, FLB_FALSE, 0,
+ "set the sample metadata to be generated. It should be a JSON object."
+ },
+ {
+ FLB_CONFIG_MAP_INT, "rate", "1",
+ 0, FLB_TRUE, offsetof(struct flb_dummy, rate),
+ "set a number of events per second."
+ },
+ {
+ FLB_CONFIG_MAP_INT, "copies", "1",
+ 0, FLB_TRUE, offsetof(struct flb_dummy, copies),
+ "set the number of copies to generate per collectd."
+ },
+ {
+ FLB_CONFIG_MAP_INT, "start_time_sec", "-1",
+ 0, FLB_TRUE, offsetof(struct flb_dummy, start_time_sec),
+ "set a dummy base timestamp in seconds."
+ },
+ {
+ FLB_CONFIG_MAP_INT, "start_time_nsec", "-1",
+ 0, FLB_TRUE, offsetof(struct flb_dummy, start_time_nsec),
+ "set a dummy base timestamp in nanoseconds."
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "fixed_timestamp", "off",
+ 0, FLB_TRUE, offsetof(struct flb_dummy, fixed_timestamp),
+ "used a fixed timestamp, allows the message to pre-generated once."
+ },
+ {0}
+};
+
+
+struct flb_input_plugin in_dummy_plugin = {
+ .name = "dummy",
+ .description = "Generate dummy data",
+ .cb_init = in_dummy_init,
+ .cb_pre_run = NULL,
+ .cb_collect = in_dummy_collect,
+ .cb_flush_buf = NULL,
+ .config_map = config_map,
+ .cb_pause = in_dummy_pause,
+ .cb_resume = in_dummy_resume,
+ .cb_exit = in_dummy_exit
+};
diff --git a/src/fluent-bit/plugins/in_dummy/in_dummy.h b/src/fluent-bit/plugins/in_dummy/in_dummy.h
new file mode 100644
index 000000000..d351420cb
--- /dev/null
+++ b/src/fluent-bit/plugins/in_dummy/in_dummy.h
@@ -0,0 +1,58 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_DUMMY_H
+#define FLB_IN_DUMMY_H
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_input.h>
+#include <fluent-bit/flb_log_event_encoder.h>
+
+#define DEFAULT_DUMMY_MESSAGE "{\"message\":\"dummy\"}"
+#define DEFAULT_DUMMY_METADATA "{}"
+
+struct flb_dummy {
+ int coll_fd;
+
+ int rate;
+ int copies;
+ int samples;
+ int samples_count;
+
+ int dummy_timestamp_set;
+ struct flb_time base_timestamp;
+ struct flb_time dummy_timestamp;
+
+ int start_time_sec;
+ int start_time_nsec;
+
+ bool fixed_timestamp;
+
+ char *ref_metadata_msgpack;
+ size_t ref_metadata_msgpack_size;
+
+ char *ref_body_msgpack;
+ size_t ref_body_msgpack_size;
+
+ struct flb_log_event_encoder *encoder;
+
+ struct flb_input_instance *ins;
+};
+
+#endif
diff --git a/src/fluent-bit/plugins/in_elasticsearch/CMakeLists.txt b/src/fluent-bit/plugins/in_elasticsearch/CMakeLists.txt
new file mode 100644
index 000000000..50a472f6a
--- /dev/null
+++ b/src/fluent-bit/plugins/in_elasticsearch/CMakeLists.txt
@@ -0,0 +1,12 @@
+if(NOT FLB_METRICS)
+ message(FATAL_ERROR "Elasticsearch input plugin requires FLB_HTTP_SERVER=On.")
+endif()
+
+set(src
+ in_elasticsearch.c
+ in_elasticsearch_config.c
+ in_elasticsearch_bulk_conn.c
+ in_elasticsearch_bulk_prot.c
+ )
+
+FLB_PLUGIN(in_elasticsearch "${src}" "")
diff --git a/src/fluent-bit/plugins/in_elasticsearch/in_elasticsearch.c b/src/fluent-bit/plugins/in_elasticsearch/in_elasticsearch.c
new file mode 100644
index 000000000..af1a594c6
--- /dev/null
+++ b/src/fluent-bit/plugins/in_elasticsearch/in_elasticsearch.c
@@ -0,0 +1,245 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2023 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_network.h>
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_random.h>
+
+#include "in_elasticsearch.h"
+#include "in_elasticsearch_config.h"
+#include "in_elasticsearch_bulk_conn.h"
+
+/*
+ * For a server event, the collection event means a new client have arrived, we
+ * accept the connection and create a new TCP instance which will wait for
+ * JSON map messages.
+ */
+static int in_elasticsearch_bulk_collect(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ struct flb_connection *connection;
+ struct in_elasticsearch_bulk_conn *conn;
+ struct flb_in_elasticsearch *ctx;
+
+ ctx = in_context;
+
+ connection = flb_downstream_conn_get(ctx->downstream);
+
+ if (connection == NULL) {
+ flb_plg_error(ctx->ins, "could not accept new connection");
+
+ return -1;
+ }
+
+ flb_plg_trace(ctx->ins, "new TCP connection arrived FD=%i",
+ connection->fd);
+
+ conn = in_elasticsearch_bulk_conn_add(connection, ctx);
+
+ if (conn == NULL) {
+ flb_downstream_conn_release(connection);
+
+ return -1;
+ }
+
+ return 0;
+}
+
+static void bytes_to_groupname(unsigned char *data, char *buf, size_t len) {
+ int index;
+ char charset[] = "0123456789"
+ "abcdefghijklmnopqrstuvwxyz"
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ";
+
+ while (len-- > 0) {
+ index = (int) data[len];
+ index = index % (sizeof(charset) - 1);
+ buf[len] = charset[index];
+ }
+}
+
+static void bytes_to_nodename(unsigned char *data, char *buf, size_t len) {
+ int index;
+ char charset[] = "0123456789"
+ "abcdefghijklmnopqrstuvwxyz";
+
+ while (len-- > 0) {
+ index = (int) data[len];
+ index = index % (sizeof(charset) - 1);
+ buf[len] = charset[index];
+ }
+}
+
+static int in_elasticsearch_bulk_init(struct flb_input_instance *ins,
+ struct flb_config *config, void *data)
+{
+ unsigned short int port;
+ int ret;
+ struct flb_in_elasticsearch *ctx;
+ unsigned char rand[16];
+
+ (void) data;
+
+ /* Create context and basic conf */
+ ctx = in_elasticsearch_config_create(ins);
+ if (!ctx) {
+ return -1;
+ }
+
+ ctx->collector_id = -1;
+
+ /* Populate context with config map defaults and incoming properties */
+ ret = flb_input_config_map_set(ins, (void *) ctx);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "configuration error");
+ in_elasticsearch_config_destroy(ctx);
+ return -1;
+ }
+
+ /* Set the context */
+ flb_input_set_context(ins, ctx);
+
+ port = (unsigned short int) strtoul(ctx->tcp_port, NULL, 10);
+
+ if (flb_random_bytes(rand, 16)) {
+ flb_plg_error(ctx->ins, "cannot generate cluster name");
+ in_elasticsearch_config_destroy(ctx);
+ return -1;
+ }
+
+ bytes_to_groupname(rand, ctx->cluster_name, 16);
+
+ if (flb_random_bytes(rand, 12)) {
+ flb_plg_error(ctx->ins, "cannot generate node name");
+ in_elasticsearch_config_destroy(ctx);
+ return -1;
+ }
+
+ bytes_to_nodename(rand, ctx->node_name, 12);
+
+ ctx->downstream = flb_downstream_create(FLB_TRANSPORT_TCP,
+ ins->flags,
+ ctx->listen,
+ port,
+ ins->tls,
+ config,
+ &ins->net_setup);
+
+ if (ctx->downstream == NULL) {
+ flb_plg_error(ctx->ins,
+ "could not initialize downstream on %s:%s. Aborting",
+ ctx->listen, ctx->tcp_port);
+
+ in_elasticsearch_config_destroy(ctx);
+
+ return -1;
+ }
+
+ flb_input_downstream_set(ctx->downstream, ctx->ins);
+
+ /* Collect upon data available on the standard input */
+ ret = flb_input_set_collector_socket(ins,
+ in_elasticsearch_bulk_collect,
+ ctx->downstream->server_fd,
+ config);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "Could not set collector for IN_ELASTICSEARCH input plugin");
+ in_elasticsearch_config_destroy(ctx);
+
+ return -1;
+ }
+
+ ctx->collector_id = ret;
+
+ return 0;
+}
+
+static int in_elasticsearch_bulk_exit(void *data, struct flb_config *config)
+{
+ struct flb_in_elasticsearch *ctx;
+
+ (void) config;
+
+ ctx = data;
+
+ if (ctx != NULL) {
+ in_elasticsearch_config_destroy(ctx);
+ }
+
+ return 0;
+}
+
+/* Configuration properties map */
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_SIZE, "buffer_max_size", HTTP_BUFFER_MAX_SIZE,
+ 0, FLB_TRUE, offsetof(struct flb_in_elasticsearch, buffer_max_size),
+ "Set the maximum size of buffer"
+ },
+
+ {
+ FLB_CONFIG_MAP_SIZE, "buffer_chunk_size", HTTP_BUFFER_CHUNK_SIZE,
+ 0, FLB_TRUE, offsetof(struct flb_in_elasticsearch, buffer_chunk_size),
+ "Set the buffer chunk size"
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "tag_key", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_in_elasticsearch, tag_key),
+ "Specify a key name for extracting as a tag"
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "meta_key", "@meta",
+ 0, FLB_TRUE, offsetof(struct flb_in_elasticsearch, meta_key),
+ "Specify a key name for meta information"
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "hostname", "localhost",
+ 0, FLB_TRUE, offsetof(struct flb_in_elasticsearch, hostname),
+ "Specify hostname or FQDN. This parameter is effective for sniffering node information."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "version", "8.0.0",
+ 0, FLB_TRUE, offsetof(struct flb_in_elasticsearch, es_version),
+ "Specify returning Elasticsearch server version."
+ },
+
+ /* EOF */
+ {0}
+};
+
+/* Plugin reference */
+struct flb_input_plugin in_elasticsearch_plugin = {
+ .name = "elasticsearch",
+ .description = "HTTP Endpoints for Elasticsearch (Bulk API)",
+ .cb_init = in_elasticsearch_bulk_init,
+ .cb_pre_run = NULL,
+ .cb_collect = in_elasticsearch_bulk_collect,
+ .cb_flush_buf = NULL,
+ .cb_pause = NULL,
+ .cb_resume = NULL,
+ .cb_exit = in_elasticsearch_bulk_exit,
+ .config_map = config_map,
+ .flags = FLB_INPUT_NET_SERVER | FLB_IO_OPT_TLS
+};
diff --git a/src/fluent-bit/plugins/in_elasticsearch/in_elasticsearch.h b/src/fluent-bit/plugins/in_elasticsearch/in_elasticsearch.h
new file mode 100644
index 000000000..159dff88c
--- /dev/null
+++ b/src/fluent-bit/plugins/in_elasticsearch/in_elasticsearch.h
@@ -0,0 +1,59 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2023 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_ELASTICSEARCH_H
+#define FLB_IN_ELASTICSEARCH_H
+
+#include <fluent-bit/flb_downstream.h>
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_input.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_log_event_encoder.h>
+
+#include <monkey/monkey.h>
+
+#define HTTP_BUFFER_MAX_SIZE "4M"
+#define HTTP_BUFFER_CHUNK_SIZE "512K"
+
+struct flb_in_elasticsearch {
+ flb_sds_t listen;
+ flb_sds_t tcp_port;
+ const char *tag_key;
+ const char *meta_key;
+ flb_sds_t hostname;
+ flb_sds_t es_version;
+ char cluster_name[16];
+ char node_name[12];
+
+ int collector_id;
+
+ size_t buffer_max_size; /* Maximum buffer size */
+ size_t buffer_chunk_size; /* Chunk allocation size */
+
+ struct flb_downstream *downstream; /* Client manager */
+ struct mk_list connections; /* linked list of connections */
+
+ struct flb_log_event_encoder log_encoder;
+
+ struct mk_server *server;
+ struct flb_input_instance *ins;
+};
+
+
+#endif
diff --git a/src/fluent-bit/plugins/in_elasticsearch/in_elasticsearch_bulk_conn.c b/src/fluent-bit/plugins/in_elasticsearch/in_elasticsearch_bulk_conn.c
new file mode 100644
index 000000000..f835af26a
--- /dev/null
+++ b/src/fluent-bit/plugins/in_elasticsearch/in_elasticsearch_bulk_conn.c
@@ -0,0 +1,307 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2023 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_engine.h>
+
+#include "in_elasticsearch.h"
+#include "in_elasticsearch_bulk_conn.h"
+#include "in_elasticsearch_bulk_prot.h"
+
+static void in_elasticsearch_bulk_conn_request_init(struct mk_http_session *session,
+ struct mk_http_request *request);
+
+static int in_elasticsearch_bulk_conn_event(void *data)
+{
+ int status;
+ size_t size;
+ ssize_t available;
+ ssize_t bytes;
+ char *tmp;
+ char *request_end;
+ size_t request_len;
+ struct flb_connection *connection;
+ struct in_elasticsearch_bulk_conn *conn;
+ struct mk_event *event;
+ struct flb_in_elasticsearch *ctx;
+
+ connection = (struct flb_connection *) data;
+
+ conn = connection->user_data;
+
+ ctx = conn->ctx;
+
+ event = &connection->event;
+
+ if (event->mask & MK_EVENT_READ) {
+ available = (conn->buf_size - conn->buf_len) - 1;
+ if (available < 1) {
+ if (conn->buf_size + ctx->buffer_chunk_size > ctx->buffer_max_size) {
+ flb_plg_trace(ctx->ins,
+ "fd=%i incoming data exceed limit (%zu KB)",
+ event->fd, (ctx->buffer_max_size / 1024));
+ in_elasticsearch_bulk_conn_del(conn);
+ return -1;
+ }
+
+ size = conn->buf_size + ctx->buffer_chunk_size;
+ tmp = flb_realloc(conn->buf_data, size);
+ if (!tmp) {
+ flb_errno();
+ in_elasticsearch_bulk_conn_del(conn);
+ return -1;
+ }
+ flb_plg_trace(ctx->ins, "fd=%i buffer realloc %i -> %zu",
+ event->fd, conn->buf_size, size);
+
+ conn->buf_data = tmp;
+ conn->buf_size = size;
+ available = (conn->buf_size - conn->buf_len) - 1;
+ }
+
+ /* Read data */
+ bytes = flb_io_net_read(connection,
+ (void *) &conn->buf_data[conn->buf_len],
+ available);
+
+ if (bytes <= 0) {
+ flb_plg_trace(ctx->ins, "fd=%i closed connection", event->fd);
+ in_elasticsearch_bulk_conn_del(conn);
+ return -1;
+ }
+
+ flb_plg_trace(ctx->ins, "read()=%zi pre_len=%i now_len=%zi",
+ bytes, conn->buf_len, conn->buf_len + bytes);
+ conn->buf_len += bytes;
+ conn->buf_data[conn->buf_len] = '\0';
+
+ status = mk_http_parser(&conn->request, &conn->session.parser,
+ conn->buf_data, conn->buf_len, conn->session.server);
+
+ if (status == MK_HTTP_PARSER_OK) {
+ /* Do more logic parsing and checks for this request */
+ in_elasticsearch_bulk_prot_handle(ctx, conn, &conn->session, &conn->request);
+
+ /* Evict the processed request from the connection buffer and reinitialize
+ * the HTTP parser.
+ */
+
+ request_end = NULL;
+
+ if (NULL != conn->request.data.data) {
+ request_end = &conn->request.data.data[conn->request.data.len];
+ }
+ else {
+ request_end = strstr(conn->buf_data, "\r\n\r\n");
+
+ if(NULL != request_end) {
+ request_end = &request_end[4];
+ }
+ }
+
+ if (NULL != request_end) {
+ request_len = (size_t)(request_end - conn->buf_data);
+
+ if (0 < (conn->buf_len - request_len)) {
+ memmove(conn->buf_data, &conn->buf_data[request_len],
+ conn->buf_len - request_len);
+
+ conn->buf_data[conn->buf_len - request_len] = '\0';
+ conn->buf_len -= request_len;
+ }
+ else {
+ memset(conn->buf_data, 0, request_len);
+
+ conn->buf_len = 0;
+ }
+
+ /* Reinitialize the parser so the next request is properly
+ * handled, the additional memset intends to wipe any left over data
+ * from the headers parsed in the previous request.
+ */
+ memset(&conn->session.parser, 0, sizeof(struct mk_http_parser));
+ mk_http_parser_init(&conn->session.parser);
+ in_elasticsearch_bulk_conn_request_init(&conn->session, &conn->request);
+ }
+ }
+ else if (status == MK_HTTP_PARSER_ERROR) {
+ in_elasticsearch_bulk_prot_handle_error(ctx, conn, &conn->session, &conn->request);
+
+ /* Reinitialize the parser so the next request is properly
+ * handled, the additional memset intends to wipe any left over data
+ * from the headers parsed in the previous request.
+ */
+ memset(&conn->session.parser, 0, sizeof(struct mk_http_parser));
+ mk_http_parser_init(&conn->session.parser);
+ in_elasticsearch_bulk_conn_request_init(&conn->session, &conn->request);
+ }
+
+ /* FIXME: add Protocol handler here */
+ return bytes;
+ }
+
+ if (event->mask & MK_EVENT_CLOSE) {
+ flb_plg_trace(ctx->ins, "fd=%i hangup", event->fd);
+ in_elasticsearch_bulk_conn_del(conn);
+ return -1;
+ }
+
+ return 0;
+
+}
+
+static void in_elasticsearch_bulk_conn_session_init(struct mk_http_session *session,
+ struct mk_server *server,
+ int client_fd)
+{
+ /* Alloc memory for node */
+ session->_sched_init = MK_TRUE;
+ session->pipelined = MK_FALSE;
+ session->counter_connections = 0;
+ session->close_now = MK_FALSE;
+ session->status = MK_REQUEST_STATUS_INCOMPLETE;
+ session->server = server;
+ session->socket = client_fd;
+
+ /* creation time in unix time */
+ session->init_time = time(NULL);
+
+ session->channel = mk_channel_new(MK_CHANNEL_SOCKET, session->socket);
+ session->channel->io = session->server->network;
+
+ /* Init session request list */
+ mk_list_init(&session->request_list);
+
+ /* Initialize the parser */
+ mk_http_parser_init(&session->parser);
+}
+
+static void in_elasticsearch_bulk_conn_request_init(struct mk_http_session *session,
+ struct mk_http_request *request)
+{
+ memset(request, 0, sizeof(struct mk_http_request));
+
+ mk_http_request_init(session, request, session->server);
+
+ request->in_headers.type = MK_STREAM_IOV;
+ request->in_headers.dynamic = MK_FALSE;
+ request->in_headers.cb_consumed = NULL;
+ request->in_headers.cb_finished = NULL;
+ request->in_headers.stream = &request->stream;
+
+ mk_list_add(&request->in_headers._head, &request->stream.inputs);
+
+ request->session = session;
+}
+
+struct in_elasticsearch_bulk_conn *in_elasticsearch_bulk_conn_add(struct flb_connection *connection,
+ struct flb_in_elasticsearch *ctx)
+{
+ struct in_elasticsearch_bulk_conn *conn;
+ int ret;
+
+ conn = flb_calloc(1, sizeof(struct in_elasticsearch_bulk_conn));
+ if (!conn) {
+ flb_errno();
+ return NULL;
+ }
+
+ conn->connection = connection;
+
+ /* Set data for the event-loop */
+ MK_EVENT_NEW(&connection->event);
+
+ connection->user_data = conn;
+ connection->event.type = FLB_ENGINE_EV_CUSTOM;
+ connection->event.handler = in_elasticsearch_bulk_conn_event;
+
+ /* Connection info */
+ conn->ctx = ctx;
+ conn->buf_len = 0;
+
+ conn->buf_data = flb_malloc(ctx->buffer_chunk_size);
+ if (!conn->buf_data) {
+ flb_errno();
+
+ flb_plg_error(ctx->ins, "could not allocate new connection");
+ flb_free(conn);
+
+ return NULL;
+ }
+ conn->buf_size = ctx->buffer_chunk_size;
+
+ /* Register instance into the event loop */
+ ret = mk_event_add(flb_engine_evl_get(),
+ connection->fd,
+ FLB_ENGINE_EV_CUSTOM,
+ MK_EVENT_READ,
+ &connection->event);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "could not register new connection");
+
+ flb_free(conn->buf_data);
+ flb_free(conn);
+
+ return NULL;
+ }
+
+ /* Initialize HTTP Session: this is a custom context for Monkey HTTP */
+ in_elasticsearch_bulk_conn_session_init(&conn->session, ctx->server, conn->connection->fd);
+
+ /* Initialize HTTP Request: this is the initial request and it will be reinitialized
+ * automatically after the request is handled so it can be used for the next one.
+ */
+ in_elasticsearch_bulk_conn_request_init(&conn->session, &conn->request);
+
+ /* Link connection node to parent context list */
+ mk_list_add(&conn->_head, &ctx->connections);
+
+ return conn;
+}
+
+int in_elasticsearch_bulk_conn_del(struct in_elasticsearch_bulk_conn *conn)
+{
+ if (conn->session.channel != NULL) {
+ mk_channel_release(conn->session.channel);
+ }
+
+ /* The downstream unregisters the file descriptor from the event-loop
+ * so there's nothing to be done by the plugin
+ */
+ flb_downstream_conn_release(conn->connection);
+
+ mk_list_del(&conn->_head);
+
+ flb_free(conn->buf_data);
+ flb_free(conn);
+
+ return 0;
+}
+
+void in_elasticsearch_bulk_conn_release_all(struct flb_in_elasticsearch *ctx)
+{
+ struct mk_list *tmp;
+ struct mk_list *head;
+ struct in_elasticsearch_bulk_conn *conn;
+
+ mk_list_foreach_safe(head, tmp, &ctx->connections) {
+ conn = mk_list_entry(head, struct in_elasticsearch_bulk_conn, _head);
+ in_elasticsearch_bulk_conn_del(conn);
+ }
+}
diff --git a/src/fluent-bit/plugins/in_elasticsearch/in_elasticsearch_bulk_conn.h b/src/fluent-bit/plugins/in_elasticsearch/in_elasticsearch_bulk_conn.h
new file mode 100644
index 000000000..a5a7593ac
--- /dev/null
+++ b/src/fluent-bit/plugins/in_elasticsearch/in_elasticsearch_bulk_conn.h
@@ -0,0 +1,55 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2023 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_ELASTICSEARCH_BULK_CONN
+#define FLB_IN_ELASTICSEARCH_BULK_CONN
+
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_connection.h>
+
+#include <monkey/mk_http.h>
+#include <monkey/mk_http_parser.h>
+#include <monkey/mk_utils.h>
+
+struct in_elasticsearch_bulk_conn {
+ /* Buffer */
+ char *buf_data; /* Buffer data */
+ int buf_len; /* Data length */
+ int buf_size; /* Buffer size */
+
+ /*
+ * Parser context: we only held one parser per connection
+ * which is re-used everytime we have a new request.
+ */
+ struct mk_http_parser parser;
+ struct mk_http_request request;
+ struct mk_http_session session;
+ struct flb_connection *connection;
+
+ void *ctx; /* Plugin parent context */
+ struct mk_list _head; /* link to flb_es_bulk->connections */
+};
+
+struct in_elasticsearch_bulk_conn *in_elasticsearch_bulk_conn_add(struct flb_connection *connection,
+ struct flb_in_elasticsearch *ctx);
+int in_elasticsearch_bulk_conn_del(struct in_elasticsearch_bulk_conn *conn);
+void in_elasticsearch_bulk_conn_release_all(struct flb_in_elasticsearch *ctx);
+
+
+#endif
diff --git a/src/fluent-bit/plugins/in_elasticsearch/in_elasticsearch_bulk_prot.c b/src/fluent-bit/plugins/in_elasticsearch/in_elasticsearch_bulk_prot.c
new file mode 100644
index 000000000..c7acfd671
--- /dev/null
+++ b/src/fluent-bit/plugins/in_elasticsearch/in_elasticsearch_bulk_prot.c
@@ -0,0 +1,922 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2023 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_version.h>
+#include <fluent-bit/flb_error.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_gzip.h>
+
+#include <monkey/monkey.h>
+#include <monkey/mk_core.h>
+
+#include "in_elasticsearch.h"
+#include "in_elasticsearch_bulk_conn.h"
+#include "in_elasticsearch_bulk_prot.h"
+
+#define HTTP_CONTENT_JSON 0
+#define HTTP_CONTENT_NDJSON 1
+
+static int send_empty_response(struct in_elasticsearch_bulk_conn *conn, int http_status)
+{
+ size_t sent;
+ flb_sds_t out;
+
+ out = flb_sds_create_size(256);
+ if (!out) {
+ return -1;
+ }
+
+ if (http_status == 200) {
+ flb_sds_printf(&out,
+ "HTTP/1.1 200 OK\r\n"
+ "Content-Type: application/json\r\n\r\n");
+ }
+
+ /* We should check this operations result */
+ flb_io_net_write(conn->connection,
+ (void *) out,
+ flb_sds_len(out),
+ &sent);
+
+ flb_sds_destroy(out);
+
+ return 0;
+}
+
+static int send_json_message_response(struct in_elasticsearch_bulk_conn *conn, int http_status, char *message)
+{
+ size_t sent;
+ int len;
+ flb_sds_t out;
+
+ out = flb_sds_create_size(256);
+ if (!out) {
+ return -1;
+ }
+
+ if (message) {
+ len = strlen(message);
+ }
+ else {
+ len = 0;
+ }
+
+ if (http_status == 200) {
+ flb_sds_printf(&out,
+ "HTTP/1.1 200 OK\r\n"
+ "Content-Type: application/json\r\n"
+ "Content-Length: %i\r\n\r\n%s",
+ len, message);
+ }
+
+ /* We should check this operations result */
+ flb_io_net_write(conn->connection,
+ (void *) out,
+ flb_sds_len(out),
+ &sent);
+
+ flb_sds_destroy(out);
+
+ return 0;
+}
+
+static int send_version_message_response(struct flb_in_elasticsearch *ctx,
+ struct in_elasticsearch_bulk_conn *conn, int http_status)
+{
+ size_t sent;
+ int len;
+ flb_sds_t out;
+ flb_sds_t resp;
+
+ out = flb_sds_create_size(256);
+ if (!out) {
+ return -1;
+ }
+ resp = flb_sds_create_size(384);
+ if (!resp) {
+ flb_sds_destroy(out);
+ return -1;
+ }
+
+ flb_sds_printf(&resp,
+ ES_VERSION_RESPONSE_TEMPLATE,
+ ctx->es_version);
+
+ len = flb_sds_len(resp);
+
+ if (http_status == 200) {
+ flb_sds_printf(&out,
+ "HTTP/1.1 200 OK\r\n"
+ "Content-Type: application/json\r\n"
+ "Content-Length: %i\r\n\r\n%s",
+ len, resp);
+ }
+
+ /* We should check this operations result */
+ flb_io_net_write(conn->connection,
+ (void *) out,
+ flb_sds_len(out),
+ &sent);
+
+ flb_sds_destroy(resp);
+ flb_sds_destroy(out);
+
+ return 0;
+}
+
+static int send_dummy_sniffer_response(struct in_elasticsearch_bulk_conn *conn, int http_status,
+ struct flb_in_elasticsearch *ctx)
+{
+ size_t sent;
+ int len;
+ flb_sds_t out;
+ flb_sds_t resp;
+ flb_sds_t hostname;
+
+ if (ctx->hostname != NULL) {
+ hostname = ctx->hostname;
+ }
+ else {
+ hostname = "localhost";
+ }
+
+ out = flb_sds_create_size(384);
+ if (!out) {
+ return -1;
+ }
+
+ resp = flb_sds_create_size(384);
+ if (!resp) {
+ flb_sds_destroy(out);
+ return -1;
+ }
+
+ flb_sds_printf(&resp,
+ ES_NODES_TEMPLATE,
+ ctx->cluster_name, ctx->node_name,
+ hostname, ctx->tcp_port, ctx->buffer_max_size);
+
+ len = flb_sds_len(resp) ;
+
+ if (http_status == 200) {
+ flb_sds_printf(&out,
+ "HTTP/1.1 200 OK\r\n"
+ "Content-Type: application/json\r\n"
+ "Content-Length: %i\r\n\r\n%s",
+ len, resp);
+ }
+
+ /* We should check this operations result */
+ flb_io_net_write(conn->connection,
+ (void *) out,
+ flb_sds_len(out),
+ &sent);
+
+ flb_sds_destroy(resp);
+ flb_sds_destroy(out);
+
+ return 0;
+}
+
+static int send_response(struct in_elasticsearch_bulk_conn *conn, int http_status, char *message)
+{
+ size_t sent;
+ int len;
+ flb_sds_t out;
+
+ out = flb_sds_create_size(256);
+ if (!out) {
+ return -1;
+ }
+
+ if (message) {
+ len = strlen(message);
+ }
+ else {
+ len = 0;
+ }
+
+ if (http_status == 200) {
+ flb_sds_printf(&out,
+ "HTTP/1.1 200 OK\r\n"
+ "Server: Fluent Bit v%s\r\n"
+ "Content-Type: application/json\r\n"
+ "Content-Length: %i\r\n\r\n%s",
+ FLB_VERSION_STR,
+ len, message);
+ }
+ else if (http_status == 400) {
+ flb_sds_printf(&out,
+ "HTTP/1.1 400 Forbidden\r\n"
+ "Server: Fluent Bit v%s\r\n"
+ "Content-Length: %i\r\n\r\n%s",
+ FLB_VERSION_STR,
+ len, message);
+ }
+
+ /* We should check this operations result */
+ flb_io_net_write(conn->connection,
+ (void *) out,
+ flb_sds_len(out),
+ &sent);
+
+ flb_sds_destroy(out);
+
+ return 0;
+}
+
+/* implements functionality to get tag from key in record */
+static flb_sds_t tag_key(struct flb_in_elasticsearch *ctx, msgpack_object *map)
+{
+ size_t map_size = map->via.map.size;
+ msgpack_object_kv *kv;
+ msgpack_object key;
+ msgpack_object val;
+ char *key_str = NULL;
+ char *val_str = NULL;
+ size_t key_str_size = 0;
+ size_t val_str_size = 0;
+ int j;
+ int check = FLB_FALSE;
+ int found = FLB_FALSE;
+ flb_sds_t tag;
+
+ kv = map->via.map.ptr;
+
+ for(j=0; j < map_size; j++) {
+ check = FLB_FALSE;
+ found = FLB_FALSE;
+ key = (kv+j)->key;
+ if (key.type == MSGPACK_OBJECT_BIN) {
+ key_str = (char *) key.via.bin.ptr;
+ key_str_size = key.via.bin.size;
+ check = FLB_TRUE;
+ }
+ if (key.type == MSGPACK_OBJECT_STR) {
+ key_str = (char *) key.via.str.ptr;
+ key_str_size = key.via.str.size;
+ check = FLB_TRUE;
+ }
+
+ if (check == FLB_TRUE) {
+ if (strncmp(ctx->tag_key, key_str, key_str_size) == 0) {
+ val = (kv+j)->val;
+ if (val.type == MSGPACK_OBJECT_BIN) {
+ val_str = (char *) val.via.bin.ptr;
+ val_str_size = val.via.str.size;
+ found = FLB_TRUE;
+ break;
+ }
+ if (val.type == MSGPACK_OBJECT_STR) {
+ val_str = (char *) val.via.str.ptr;
+ val_str_size = val.via.str.size;
+ found = FLB_TRUE;
+ break;
+ }
+ }
+ }
+ }
+
+ if (found == FLB_TRUE) {
+ tag = flb_sds_create_len(val_str, val_str_size);
+ if (!tag) {
+ flb_errno();
+ return NULL;
+ }
+ return tag;
+ }
+
+
+ flb_plg_error(ctx->ins, "Could not find tag_key %s in record", ctx->tag_key);
+ return NULL;
+}
+
+static int get_write_op(struct flb_in_elasticsearch *ctx, msgpack_object *map, flb_sds_t *out_write_op, size_t *out_key_size)
+{
+ char *op_str = NULL;
+ size_t op_str_size = 0;
+ msgpack_object_kv *kv;
+ msgpack_object key;
+ int check = FLB_FALSE;
+
+ kv = map->via.map.ptr;
+ key = kv[0].key;
+ if (key.type == MSGPACK_OBJECT_BIN) {
+ op_str = (char *) key.via.bin.ptr;
+ op_str_size = key.via.bin.size;
+ check = FLB_TRUE;
+ }
+ if (key.type == MSGPACK_OBJECT_STR) {
+ op_str = (char *) key.via.str.ptr;
+ op_str_size = key.via.str.size;
+ check = FLB_TRUE;
+ }
+
+ if (check == FLB_TRUE) {
+ *out_write_op = flb_sds_create_len(op_str, op_str_size);
+ *out_key_size = op_str_size;
+ }
+
+ return check;
+}
+
+static int status_buffer_avail(struct flb_in_elasticsearch *ctx, flb_sds_t bulk_statuses, size_t threshold)
+{
+ if (flb_sds_avail(bulk_statuses) < threshold) {
+ flb_plg_warn(ctx->ins, "left buffer for bulk status(es) is too small");
+
+ return FLB_FALSE;
+ }
+
+ return FLB_TRUE;
+}
+
+static int process_ndpack(struct flb_in_elasticsearch *ctx, flb_sds_t tag, char *buf, size_t size, flb_sds_t bulk_statuses)
+{
+ int ret;
+ size_t off = 0;
+ size_t map_copy_index;
+ msgpack_object_kv *map_copy_entry;
+ msgpack_unpacked result;
+ struct flb_time tm;
+ msgpack_object *obj;
+ flb_sds_t tag_from_record = NULL;
+ int idx = 0;
+ flb_sds_t write_op;
+ size_t op_str_size = 0;
+ int op_ret = FLB_FALSE;
+ int error_op = FLB_FALSE;
+
+ flb_time_get(&tm);
+
+ msgpack_unpacked_init(&result);
+ while (msgpack_unpack_next(&result, buf, size, &off) == MSGPACK_UNPACK_SUCCESS) {
+ if (result.data.type == MSGPACK_OBJECT_MAP) {
+ if (idx > 0 && idx % 2 == 0) {
+ flb_sds_cat(bulk_statuses, ",", 1);
+ }
+ if (status_buffer_avail(ctx, bulk_statuses, 50) == FLB_FALSE) {
+ break;
+ }
+ if (idx % 2 == 0) {
+ op_ret = get_write_op(ctx, &result.data, &write_op, &op_str_size);
+
+ if (op_ret) {
+ if (flb_sds_cmp(write_op, "index", op_str_size) == 0) {
+ flb_sds_cat(bulk_statuses, "{\"index\":", 9);
+ error_op = FLB_FALSE;
+ }
+ else if (flb_sds_cmp(write_op, "create", op_str_size) == 0) {
+ flb_sds_cat(bulk_statuses, "{\"create\":", 10);
+ error_op = FLB_FALSE;
+ }
+ else if (flb_sds_cmp(write_op, "update", op_str_size) == 0) {
+ flb_sds_cat(bulk_statuses, "{\"update\":", 10);
+ error_op = FLB_TRUE;
+ }
+ else if (flb_sds_cmp(write_op, "delete", op_str_size) == 0) {
+ flb_sds_cat(bulk_statuses, "{\"delete\":{\"status\":404,\"result\":\"not_found\"}}", 46);
+ error_op = FLB_TRUE;
+ idx += 1; /* Prepare to adjust to multiple of two
+ * in the end of the loop.
+ * Due to delete actions include only one line. */
+ flb_sds_destroy(write_op);
+
+ goto proceed;
+ }
+ else {
+ flb_sds_cat(bulk_statuses, "{\"unknown\":{\"status\":400,\"result\":\"bad_request\"}}", 49);
+ error_op = FLB_TRUE;
+
+ flb_sds_destroy(write_op);
+
+ break;
+ }
+ } else {
+ flb_sds_destroy(write_op);
+ flb_plg_error(ctx->ins, "meta information line is missing");
+ error_op = FLB_TRUE;
+
+ break;
+ }
+
+ if (error_op == FLB_FALSE) {
+ flb_log_event_encoder_reset(&ctx->log_encoder);
+
+ ret = flb_log_event_encoder_begin_record(&ctx->log_encoder);
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ flb_sds_destroy(write_op);
+ flb_plg_error(ctx->ins, "event encoder error : %d", ret);
+ error_op = FLB_TRUE;
+
+ break;
+ }
+
+ ret = flb_log_event_encoder_set_timestamp(
+ &ctx->log_encoder,
+ &tm);
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ flb_sds_destroy(write_op);
+ flb_plg_error(ctx->ins, "event encoder error : %d", ret);
+ error_op = FLB_TRUE;
+
+ break;
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_append_body_values(
+ &ctx->log_encoder,
+ FLB_LOG_EVENT_CSTRING_VALUE((char *) ctx->meta_key),
+ FLB_LOG_EVENT_MSGPACK_OBJECT_VALUE(&result.data));
+ }
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ flb_sds_destroy(write_op);
+ flb_plg_error(ctx->ins, "event encoder error : %d", ret);
+ error_op = FLB_TRUE;
+
+ break;
+ }
+ }
+ }
+ else if (idx % 2 == 1) {
+ if (error_op == FLB_FALSE) {
+ /* Pack body */
+
+ for (map_copy_index = 0 ;
+ map_copy_index < result.data.via.map.size &&
+ ret == FLB_EVENT_ENCODER_SUCCESS ;
+ map_copy_index++) {
+ map_copy_entry = &result.data.via.map.ptr[map_copy_index];
+
+ ret = flb_log_event_encoder_append_body_values(
+ &ctx->log_encoder,
+ FLB_LOG_EVENT_MSGPACK_OBJECT_VALUE(&map_copy_entry->key),
+ FLB_LOG_EVENT_MSGPACK_OBJECT_VALUE(&map_copy_entry->val));
+ }
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ flb_plg_error(ctx->ins, "event encoder error : %d", ret);
+ error_op = FLB_TRUE;
+
+ break;
+ }
+
+ ret = flb_log_event_encoder_commit_record(&ctx->log_encoder);
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ flb_plg_error(ctx->ins, "event encoder error : %d", ret);
+ error_op = FLB_TRUE;
+
+ break;
+ }
+
+ tag_from_record = NULL;
+
+ if (ctx->tag_key) {
+ obj = &result.data;
+ tag_from_record = tag_key(ctx, obj);
+ }
+
+ if (tag_from_record) {
+ flb_input_log_append(ctx->ins,
+ tag_from_record,
+ flb_sds_len(tag_from_record),
+ ctx->log_encoder.output_buffer,
+ ctx->log_encoder.output_length);
+
+ flb_sds_destroy(tag_from_record);
+ }
+ else if (tag) {
+ flb_input_log_append(ctx->ins,
+ tag,
+ flb_sds_len(tag),
+ ctx->log_encoder.output_buffer,
+ ctx->log_encoder.output_length);
+ }
+ else {
+ /* use default plugin Tag (it internal name, e.g: http.0 */
+ flb_input_log_append(ctx->ins, NULL, 0,
+ ctx->log_encoder.output_buffer,
+ ctx->log_encoder.output_length);
+ }
+
+ flb_log_event_encoder_reset(&ctx->log_encoder);
+ }
+ if (op_ret) {
+ if (flb_sds_cmp(write_op, "index", op_str_size) == 0) {
+ flb_sds_cat(bulk_statuses, "{\"status\":201,\"result\":\"created\"}}", 34);
+ }
+ else if (flb_sds_cmp(write_op, "create", op_str_size) == 0) {
+ flb_sds_cat(bulk_statuses, "{\"status\":201,\"result\":\"created\"}}", 34);
+ }
+ else if (flb_sds_cmp(write_op, "update", op_str_size) == 0) {
+ flb_sds_cat(bulk_statuses, "{\"status\":403,\"result\":\"forbidden\"}}", 36);
+ }
+ if (status_buffer_avail(ctx, bulk_statuses, 50) == FLB_FALSE) {
+ flb_sds_destroy(write_op);
+
+ break;
+ }
+ }
+ flb_sds_destroy(write_op);
+ }
+
+ proceed:
+ idx++;
+ }
+ else {
+ flb_plg_error(ctx->ins, "skip record from invalid type: %i",
+ result.data.type);
+ msgpack_unpacked_destroy(&result);
+ return -1;
+ }
+ }
+
+ if (idx % 2 != 0) {
+ flb_plg_warn(ctx->ins, "decode payload of Bulk API is failed");
+ msgpack_unpacked_destroy(&result);
+ if (error_op == FLB_FALSE) {
+ /* On lacking of body case in non-error case, there is no
+ * releasing memory code paths. We should proceed to do
+ * it here. */
+ flb_sds_destroy(write_op);
+ }
+
+ return -1;
+ }
+
+ msgpack_unpacked_destroy(&result);
+
+ return 0;
+}
+
+static ssize_t parse_payload_ndjson(struct flb_in_elasticsearch *ctx, flb_sds_t tag,
+ char *payload, size_t size, flb_sds_t bulk_statuses)
+{
+ int ret;
+ int out_size;
+ char *pack;
+ struct flb_pack_state pack_state;
+
+ /* Initialize packer */
+ flb_pack_state_init(&pack_state);
+
+ /* Pack JSON as msgpack */
+ ret = flb_pack_json_state(payload, size,
+ &pack, &out_size, &pack_state);
+ flb_pack_state_reset(&pack_state);
+
+ /* Handle exceptions */
+ if (ret == FLB_ERR_JSON_PART) {
+ flb_plg_warn(ctx->ins, "JSON data is incomplete, skipping");
+ return -1;
+ }
+ else if (ret == FLB_ERR_JSON_INVAL) {
+ flb_plg_warn(ctx->ins, "invalid JSON message, skipping");
+ return -1;
+ }
+ else if (ret == -1) {
+ return -1;
+ }
+
+ /* Process the packaged JSON and return the last byte used */
+ process_ndpack(ctx, tag, pack, out_size, bulk_statuses);
+ flb_free(pack);
+
+ return 0;
+}
+
+static int process_payload(struct flb_in_elasticsearch *ctx, struct in_elasticsearch_bulk_conn *conn,
+ flb_sds_t tag,
+ struct mk_http_session *session,
+ struct mk_http_request *request,
+ flb_sds_t bulk_statuses)
+{
+ int type = -1;
+ int i = 0;
+ int ret = 0;
+ struct mk_http_header *header;
+ int extra_size = -1;
+ struct mk_http_header *headers_extra;
+ int gzip_compressed = FLB_FALSE;
+ void *gz_data = NULL;
+ size_t gz_size = -1;
+
+ header = &session->parser.headers[MK_HEADER_CONTENT_TYPE];
+ if (header->key.data == NULL) {
+ send_response(conn, 400, "error: header 'Content-Type' is not set\n");
+ return -1;
+ }
+
+ if (header->val.len >= 20 &&
+ strncasecmp(header->val.data, "application/x-ndjson", 20) == 0) {
+ type = HTTP_CONTENT_NDJSON;
+ }
+
+ if (header->val.len >= 16 &&
+ strncasecmp(header->val.data, "application/json", 16) == 0) {
+ type = HTTP_CONTENT_JSON;
+ }
+
+ if (type == -1) {
+ send_response(conn, 400, "error: invalid 'Content-Type'\n");
+ return -1;
+ }
+
+ if (request->data.len <= 0) {
+ send_response(conn, 400, "error: no payload found\n");
+ return -1;
+ }
+
+ extra_size = session->parser.headers_extra_count;
+ if (extra_size > 0) {
+ for (i = 0; i < extra_size; i++) {
+ headers_extra = &session->parser.headers_extra[i];
+ if (headers_extra->key.len == 16 &&
+ strncasecmp(headers_extra->key.data, "Content-Encoding", 16) == 0) {
+ if (headers_extra->val.len == 4 &&
+ strncasecmp(headers_extra->val.data, "gzip", 4) == 0) {
+ flb_debug("[elasticsearch_bulk_prot] body is gzipped");
+ gzip_compressed = FLB_TRUE;
+ }
+ }
+ }
+ }
+
+ if (type == HTTP_CONTENT_NDJSON || type == HTTP_CONTENT_JSON) {
+ if (gzip_compressed == FLB_TRUE) {
+ ret = flb_gzip_uncompress((void *) request->data.data, request->data.len,
+ &gz_data, &gz_size);
+ if (ret == -1) {
+ flb_error("[elasticsearch_bulk_prot] gzip uncompress is failed");
+ return -1;
+ }
+ parse_payload_ndjson(ctx, tag, gz_data, gz_size, bulk_statuses);
+ flb_free(gz_data);
+ }
+ else {
+ parse_payload_ndjson(ctx, tag, request->data.data, request->data.len, bulk_statuses);
+ }
+ }
+
+ return 0;
+}
+
+static inline int mk_http_point_header(mk_ptr_t *h,
+ struct mk_http_parser *parser, int key)
+{
+ struct mk_http_header *header;
+
+ header = &parser->headers[key];
+ if (header->type == key) {
+ h->data = header->val.data;
+ h->len = header->val.len;
+ return 0;
+ }
+ else {
+ h->data = NULL;
+ h->len = -1;
+ }
+
+ return -1;
+}
+
+/*
+ * Handle an incoming request. It perform extra checks over the request, if
+ * everything is OK, it enqueue the incoming payload.
+ */
+int in_elasticsearch_bulk_prot_handle(struct flb_in_elasticsearch *ctx,
+ struct in_elasticsearch_bulk_conn *conn,
+ struct mk_http_session *session,
+ struct mk_http_request *request)
+{
+ int i;
+ int ret;
+ int len;
+ char *uri;
+ char *qs;
+ off_t diff;
+ flb_sds_t tag;
+ struct mk_http_header *header;
+ flb_sds_t bulk_statuses = NULL;
+ flb_sds_t bulk_response = NULL;
+ char *error_str = NULL;
+
+ if (request->uri.data[0] != '/') {
+ send_response(conn, 400, "error: invalid request\n");
+ return -1;
+ }
+
+ /* Decode URI */
+ uri = mk_utils_url_decode(request->uri);
+ if (!uri) {
+ uri = mk_mem_alloc_z(request->uri.len + 1);
+ if (!uri) {
+ return -1;
+ }
+ memcpy(uri, request->uri.data, request->uri.len);
+ uri[request->uri.len] = '\0';
+ }
+
+ /* Try to match a query string so we can remove it */
+ qs = strchr(uri, '?');
+ if (qs) {
+ /* remove the query string part */
+ diff = qs - uri;
+ uri[diff] = '\0';
+ }
+
+ /* Refer the tag at first*/
+ if (ctx->ins->tag && !ctx->ins->tag_default) {
+ tag = flb_sds_create(ctx->ins->tag);
+ if (tag == NULL) {
+ return -1;
+ }
+ }
+ else {
+ /* Compose the query string using the URI */
+ len = strlen(uri);
+
+ if (len == 1) {
+ tag = NULL; /* use default tag */
+ }
+ else {
+ /* New tag skipping the URI '/' */
+ tag = flb_sds_create_len(&uri[1], len - 1);
+ if (!tag) {
+ mk_mem_free(uri);
+ return -1;
+ }
+
+ /* Sanitize, only allow alphanum chars */
+ for (i = 0; i < flb_sds_len(tag); i++) {
+ if (!isalnum(tag[i]) && tag[i] != '_' && tag[i] != '.') {
+ tag[i] = '_';
+ }
+ }
+ }
+ }
+
+ /* Check if we have a Host header: Hostname ; port */
+ mk_http_point_header(&request->host, &session->parser, MK_HEADER_HOST);
+
+ /* Header: Connection */
+ mk_http_point_header(&request->connection, &session->parser,
+ MK_HEADER_CONNECTION);
+
+ /* HTTP/1.1 needs Host header */
+ if (!request->host.data && request->protocol == MK_HTTP_PROTOCOL_11) {
+ flb_sds_destroy(tag);
+ mk_mem_free(uri);
+ return -1;
+ }
+
+ /* Should we close the session after this request ? */
+ mk_http_keepalive_check(session, request, ctx->server);
+
+ /* Content Length */
+ header = &session->parser.headers[MK_HEADER_CONTENT_LENGTH];
+ if (header->type == MK_HEADER_CONTENT_LENGTH) {
+ request->_content_length.data = header->val.data;
+ request->_content_length.len = header->val.len;
+ }
+ else {
+ request->_content_length.data = NULL;
+ }
+
+ if (request->method == MK_METHOD_HEAD) {
+ send_empty_response(conn, 200);
+
+ flb_sds_destroy(tag);
+ mk_mem_free(uri);
+
+ return 0;
+ }
+
+ if (request->method == MK_METHOD_PUT) {
+ send_json_message_response(conn, 200, "{}");
+
+ flb_sds_destroy(tag);
+ mk_mem_free(uri);
+
+ return 0;
+ }
+
+ if (request->method == MK_METHOD_GET) {
+ if (strncmp(uri, "/_nodes/http", 12) == 0) {
+ send_dummy_sniffer_response(conn, 200, ctx);
+ }
+ else if (strlen(uri) == 1 && strncmp(uri, "/", 1) == 0) {
+ send_version_message_response(ctx, conn, 200);
+ }
+ else {
+ send_json_message_response(conn, 200, "{}");
+ }
+
+ flb_sds_destroy(tag);
+ mk_mem_free(uri);
+
+ return 0;
+ }
+
+ if (request->method == MK_METHOD_POST) {
+ if (strncmp(uri, "/_bulk", 6) == 0) {
+ bulk_statuses = flb_sds_create_size(ctx->buffer_max_size);
+ if (!bulk_statuses) {
+ flb_sds_destroy(tag);
+ mk_mem_free(uri);
+ return -1;
+ }
+
+ bulk_response = flb_sds_create_size(ctx->buffer_max_size);
+ if (!bulk_response) {
+ flb_sds_destroy(bulk_statuses);
+ flb_sds_destroy(tag);
+ mk_mem_free(uri);
+ return -1;
+ }
+ } else {
+ flb_sds_destroy(tag);
+ mk_mem_free(uri);
+
+ send_response(conn, 400, "error: invaild HTTP endpoint\n");
+
+ return -1;
+ }
+ }
+
+ if (request->method != MK_METHOD_POST &&
+ request->method != MK_METHOD_GET &&
+ request->method != MK_METHOD_HEAD &&
+ request->method != MK_METHOD_PUT) {
+
+ if (bulk_statuses) {
+ flb_sds_destroy(bulk_statuses);
+ }
+ if (bulk_response) {
+ flb_sds_destroy(bulk_response);
+ }
+
+ flb_sds_destroy(tag);
+ mk_mem_free(uri);
+
+ send_response(conn, 400, "error: invalid HTTP method\n");
+ return -1;
+ }
+
+ ret = process_payload(ctx, conn, tag, session, request, bulk_statuses);
+ flb_sds_destroy(tag);
+
+ len = flb_sds_len(bulk_statuses);
+ if (flb_sds_alloc(bulk_response) < len + 27) {
+ bulk_response = flb_sds_increase(bulk_response, len + 27 - flb_sds_alloc(bulk_response));
+ }
+ error_str = strstr(bulk_statuses, "\"status\":40");
+ if (error_str){
+ flb_sds_cat(bulk_response, "{\"errors\":true,\"items\":[", 24);
+ }
+ else {
+ flb_sds_cat(bulk_response, "{\"errors\":false,\"items\":[", 25);
+ }
+ flb_sds_cat(bulk_response, bulk_statuses, flb_sds_len(bulk_statuses));
+ flb_sds_cat(bulk_response, "]}", 2);
+ send_response(conn, 200, bulk_response);
+
+ mk_mem_free(uri);
+ flb_sds_destroy(bulk_statuses);
+ flb_sds_destroy(bulk_response);
+
+ return ret;
+}
+
+/*
+ * Handle an incoming request which has resulted in an http parser error.
+ */
+int in_elasticsearch_bulk_prot_handle_error(struct flb_in_elasticsearch *ctx,
+ struct in_elasticsearch_bulk_conn *conn,
+ struct mk_http_session *session,
+ struct mk_http_request *request)
+{
+ send_response(conn, 400, "error: invalid request\n");
+ return -1;
+}
diff --git a/src/fluent-bit/plugins/in_elasticsearch/in_elasticsearch_bulk_prot.h b/src/fluent-bit/plugins/in_elasticsearch/in_elasticsearch_bulk_prot.h
new file mode 100644
index 000000000..be1aeceea
--- /dev/null
+++ b/src/fluent-bit/plugins/in_elasticsearch/in_elasticsearch_bulk_prot.h
@@ -0,0 +1,40 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2023 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_ELASTICSEARCH_BULK_PROT
+#define FLB_IN_ELASTICSEARCH_BULK_PROT
+
+#define ES_VERSION_RESPONSE_TEMPLATE \
+ "{\"version\":{\"number\":\"%s\",\"build_flavor\":\"Fluent Bit OSS\"},\"tagline\":\"Fluent Bit's Bulk API compatible endpoint\"}"
+
+#define ES_NODES_TEMPLATE "{\"_nodes\":{\"total\":1,\"successful\":1,\"failed\":0}," \
+ "\"nodes\":{\"%s\":{\"name\":\"%s\",\"version\":\"8.0.0\"," \
+ "\"http\":{\"publish_address\":\"%s:%s\",\"max_content_length_in_bytes\":%ld}}}}"
+
+int in_elasticsearch_bulk_prot_handle(struct flb_in_elasticsearch *ctx,
+ struct in_elasticsearch_bulk_conn *conn,
+ struct mk_http_session *session,
+ struct mk_http_request *request);
+
+int in_elasticsearch_bulk_prot_handle_error(struct flb_in_elasticsearch *ctx,
+ struct in_elasticsearch_bulk_conn *conn,
+ struct mk_http_session *session,
+ struct mk_http_request *request);
+
+#endif
diff --git a/src/fluent-bit/plugins/in_elasticsearch/in_elasticsearch_config.c b/src/fluent-bit/plugins/in_elasticsearch/in_elasticsearch_config.c
new file mode 100644
index 000000000..4beb96320
--- /dev/null
+++ b/src/fluent-bit/plugins/in_elasticsearch/in_elasticsearch_config.c
@@ -0,0 +1,105 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2023 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_input_plugin.h>
+
+#include "in_elasticsearch.h"
+#include "in_elasticsearch_config.h"
+#include "in_elasticsearch_bulk_conn.h"
+
+struct flb_in_elasticsearch *in_elasticsearch_config_create(struct flb_input_instance *ins)
+{
+ int ret;
+ char port[8];
+ struct flb_in_elasticsearch *ctx;
+
+ ctx = flb_calloc(1, sizeof(struct flb_in_elasticsearch));
+ if (!ctx) {
+ flb_errno();
+ return NULL;
+ }
+ ctx->ins = ins;
+ mk_list_init(&ctx->connections);
+
+ /* Load the config map */
+ ret = flb_input_config_map_set(ins, (void *) ctx);
+ if (ret == -1) {
+ flb_free(ctx);
+ return NULL;
+ }
+
+ /* Listen interface (if not set, defaults to 0.0.0.0:9200) */
+ flb_input_net_default_listener("0.0.0.0", 9200, ins);
+
+ ctx->listen = flb_sds_create(ins->host.listen);
+ snprintf(port, sizeof(port) - 1, "%d", ins->host.port);
+ ctx->tcp_port = flb_sds_create(port);
+
+ /* HTTP Server specifics */
+ ctx->server = flb_calloc(1, sizeof(struct mk_server));
+ ctx->server->keep_alive = MK_TRUE;
+
+ /* monkey detects server->workers == 0 as the server not being initialized at the
+ * moment so we want to make sure that it stays that way!
+ */
+
+ ret = flb_log_event_encoder_init(&ctx->log_encoder,
+ FLB_LOG_EVENT_FORMAT_DEFAULT);
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ flb_plg_error(ctx->ins, "error initializing event encoder : %d", ret);
+
+ in_elasticsearch_config_destroy(ctx);
+
+ return ctx = NULL;
+ }
+
+
+ return ctx;
+}
+
+int in_elasticsearch_config_destroy(struct flb_in_elasticsearch *ctx)
+{
+ flb_log_event_encoder_destroy(&ctx->log_encoder);
+
+ /* release all connections */
+ in_elasticsearch_bulk_conn_release_all(ctx);
+
+
+ if (ctx->collector_id != -1) {
+ flb_input_collector_delete(ctx->collector_id, ctx->ins);
+
+ ctx->collector_id = -1;
+ }
+
+ if (ctx->downstream != NULL) {
+ flb_downstream_destroy(ctx->downstream);
+ }
+
+ if (ctx->server) {
+ flb_free(ctx->server);
+ }
+
+ flb_sds_destroy(ctx->listen);
+ flb_sds_destroy(ctx->tcp_port);
+
+ flb_free(ctx);
+
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/in_elasticsearch/in_elasticsearch_config.h b/src/fluent-bit/plugins/in_elasticsearch/in_elasticsearch_config.h
new file mode 100644
index 000000000..28108723d
--- /dev/null
+++ b/src/fluent-bit/plugins/in_elasticsearch/in_elasticsearch_config.h
@@ -0,0 +1,29 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2023 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_ELASTICSEARCH_CONFIG_H
+#define FLB_IN_ELASTICSEARCH_CONFIG_H
+
+#include <fluent-bit/flb_input_plugin.h>
+#include "in_elasticsearch.h"
+
+struct flb_in_elasticsearch *in_elasticsearch_config_create(struct flb_input_instance *ins);
+int in_elasticsearch_config_destroy(struct flb_in_elasticsearch *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/in_emitter/CMakeLists.txt b/src/fluent-bit/plugins/in_emitter/CMakeLists.txt
new file mode 100644
index 000000000..596b53d08
--- /dev/null
+++ b/src/fluent-bit/plugins/in_emitter/CMakeLists.txt
@@ -0,0 +1,5 @@
+set(src
+ emitter.c
+ )
+
+FLB_PLUGIN(in_emitter "${src}" "")
diff --git a/src/fluent-bit/plugins/in_emitter/emitter.c b/src/fluent-bit/plugins/in_emitter/emitter.c
new file mode 100644
index 000000000..821df9539
--- /dev/null
+++ b/src/fluent-bit/plugins/in_emitter/emitter.c
@@ -0,0 +1,321 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_compat.h>
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_input.h>
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_sds.h>
+#include <fluent-bit/flb_scheduler.h>
+#include <fluent-bit/flb_ring_buffer.h>
+
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#define DEFAULT_EMITTER_RING_BUFFER_FLUSH_FREQUENCY 2000
+
+struct em_chunk {
+ flb_sds_t tag;
+ struct msgpack_sbuffer mp_sbuf; /* msgpack sbuffer */
+ struct msgpack_packer mp_pck; /* msgpack packer */
+ struct mk_list _head;
+};
+
+struct flb_emitter {
+ struct mk_list chunks; /* list of all pending chunks */
+ struct flb_input_instance *ins; /* input instance */
+ struct flb_ring_buffer *msgs; /* ring buffer for cross-thread messages */
+ int ring_buffer_size; /* size of the ring buffer */
+};
+
+struct em_chunk *em_chunk_create(const char *tag, int tag_len,
+ struct flb_emitter *ctx)
+{
+ struct em_chunk *ec;
+
+ ec = flb_calloc(1, sizeof(struct em_chunk));
+ if (!ec) {
+ flb_errno();
+ return NULL;
+ }
+
+ ec->tag = flb_sds_create_len(tag, tag_len);
+ if (!ec->tag) {
+ flb_errno();
+ flb_free(ec);
+ return NULL;
+ }
+
+ msgpack_sbuffer_init(&ec->mp_sbuf);
+ msgpack_packer_init(&ec->mp_pck, &ec->mp_sbuf, msgpack_sbuffer_write);
+
+ mk_list_add(&ec->_head, &ctx->chunks);
+
+ return ec;
+}
+
+static void em_chunk_destroy(struct em_chunk *ec)
+{
+ mk_list_del(&ec->_head);
+ flb_sds_destroy(ec->tag);
+ msgpack_sbuffer_destroy(&ec->mp_sbuf);
+ flb_free(ec);
+}
+
+int static do_in_emitter_add_record(struct em_chunk *ec,
+ struct flb_input_instance *in)
+{
+ struct flb_emitter *ctx = (struct flb_emitter *) in->context;
+ int ret;
+
+ /* Associate this backlog chunk to this instance into the engine */
+ ret = flb_input_log_append(in,
+ ec->tag, flb_sds_len(ec->tag),
+ ec->mp_sbuf.data,
+ ec->mp_sbuf.size);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "error registering chunk with tag: %s",
+ ec->tag);
+ /* Release the echunk */
+ em_chunk_destroy(ec);
+ return -1;
+ }
+ /* Release the echunk */
+ em_chunk_destroy(ec);
+ return 0;
+}
+
+/*
+ * Function used by filters to ingest custom records with custom tags, at the
+ * moment it's only used by rewrite_tag filter.
+ */
+int in_emitter_add_record(const char *tag, int tag_len,
+ const char *buf_data, size_t buf_size,
+ struct flb_input_instance *in)
+{
+ struct em_chunk temporary_chunk;
+ struct mk_list *head;
+ struct em_chunk *ec;
+ struct flb_emitter *ctx;
+
+ ctx = (struct flb_emitter *) in->context;
+ ec = NULL;
+
+ /* Use the ring buffer first if it exists */
+ if (ctx->msgs) {
+ memset(&temporary_chunk, 0, sizeof(struct em_chunk));
+
+ temporary_chunk.tag = flb_sds_create_len(tag, tag_len);
+
+ if (temporary_chunk.tag == NULL) {
+ flb_plg_error(ctx->ins,
+ "cannot allocate memory for tag: %s",
+ tag);
+ return -1;
+ }
+
+ msgpack_sbuffer_init(&temporary_chunk.mp_sbuf);
+ msgpack_sbuffer_write(&temporary_chunk.mp_sbuf, buf_data, buf_size);
+
+ return flb_ring_buffer_write(ctx->msgs,
+ (void *) &temporary_chunk,
+ sizeof(struct em_chunk));
+ }
+
+ /* Check if any target chunk already exists */
+ mk_list_foreach(head, &ctx->chunks) {
+ ec = mk_list_entry(head, struct em_chunk, _head);
+ if (flb_sds_cmp(ec->tag, tag, tag_len) != 0) {
+ ec = NULL;
+ continue;
+ }
+ break;
+ }
+
+ /* No candidate chunk found, so create a new one */
+ if (!ec) {
+ ec = em_chunk_create(tag, tag_len, ctx);
+ if (!ec) {
+ flb_plg_error(ctx->ins, "cannot create new chunk for tag: %s",
+ tag);
+ return -1;
+ }
+ }
+
+ /* Append raw msgpack data */
+ msgpack_sbuffer_write(&ec->mp_sbuf, buf_data, buf_size);
+
+ return do_in_emitter_add_record(ec, in);
+}
+
+/*
+ * Triggered by refresh_interval, it re-scan the path looking for new files
+ * that match the original path pattern.
+ */
+static int in_emitter_ingest_ring_buffer(struct flb_input_instance *in,
+ struct flb_config *config, void *context)
+{
+ int ret;
+ struct flb_emitter *ctx = (struct flb_emitter *)context;
+ struct em_chunk ec;
+ (void) config;
+ (void) in;
+
+
+ while ((ret = flb_ring_buffer_read(ctx->msgs, (void *)&ec,
+ sizeof(struct em_chunk))) == 0) {
+ ret = flb_input_log_append(in,
+ ec.tag, flb_sds_len(ec.tag),
+ ec.mp_sbuf.data,
+ ec.mp_sbuf.size);
+ flb_sds_destroy(ec.tag);
+ msgpack_sbuffer_destroy(&ec.mp_sbuf);
+ }
+ return ret;
+}
+
+static int in_emitter_start_ring_buffer(struct flb_input_instance *in, struct flb_emitter *ctx)
+{
+ if (ctx->ring_buffer_size <= 0) {
+ return 0;
+ }
+
+ if (ctx->msgs != NULL) {
+ flb_warn("emitter %s already has a ring buffer",
+ flb_input_name(in));
+ return 0;
+ }
+
+ ctx->msgs = flb_ring_buffer_create(sizeof(void *) * ctx->ring_buffer_size);
+ if (!ctx->msgs) {
+ flb_error("emitter %s could not initialize ring buffer",
+ flb_input_name(in));
+ return -1;
+ }
+
+ return flb_input_set_collector_time(in, in_emitter_ingest_ring_buffer,
+ 1, 0, in->config);
+}
+
+/* Initialize plugin */
+static int cb_emitter_init(struct flb_input_instance *in,
+ struct flb_config *config, void *data)
+{
+ struct flb_sched *scheduler;
+ struct flb_emitter *ctx;
+ int ret;
+
+ scheduler = flb_sched_ctx_get();
+
+ ctx = flb_calloc(1, sizeof(struct flb_emitter));
+ if (!ctx) {
+ flb_errno();
+ return -1;
+ }
+ ctx->ins = in;
+ mk_list_init(&ctx->chunks);
+
+
+ ret = flb_input_config_map_set(in, (void *) ctx);
+ if (ret == -1) {
+ return -1;
+ }
+
+ if (scheduler != config->sched &&
+ scheduler != NULL &&
+ ctx->ring_buffer_size == 0) {
+
+ ctx->ring_buffer_size = DEFAULT_EMITTER_RING_BUFFER_FLUSH_FREQUENCY;
+
+ flb_plg_debug(in,
+ "threaded emitter instances require ring_buffer_size"
+ " being set, using default value of %u",
+ ctx->ring_buffer_size);
+ }
+
+ if (ctx->ring_buffer_size > 0) {
+ ret = in_emitter_start_ring_buffer(in, ctx);
+ if (ret == -1) {
+ flb_free(ctx);
+ return -1;
+ }
+ }
+
+ /* export plugin context */
+ flb_input_set_context(in, ctx);
+
+ return 0;
+}
+
+static int cb_emitter_exit(void *data, struct flb_config *config)
+{
+ struct mk_list *tmp;
+ struct mk_list *head;
+ struct flb_emitter *ctx = data;
+ struct em_chunk *echunk;
+ struct em_chunk ec;
+ int ret;
+
+
+ mk_list_foreach_safe(head, tmp, &ctx->chunks) {
+ echunk = mk_list_entry(head, struct em_chunk, _head);
+ mk_list_del(&echunk->_head);
+ flb_free(echunk);
+ }
+
+ if (ctx->msgs) {
+ while ((ret = flb_ring_buffer_read(ctx->msgs, (void *)&ec,
+ sizeof(struct em_chunk))) == 0) {
+ flb_sds_destroy(ec.tag);
+ msgpack_sbuffer_destroy(&ec.mp_sbuf);
+ }
+ flb_ring_buffer_destroy(ctx->msgs);
+ }
+
+ flb_free(ctx);
+ return 0;
+}
+
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_INT, "ring_buffer_size", "0",
+ 0, FLB_TRUE, offsetof(struct flb_emitter, ring_buffer_size),
+ "use a ring buffer to ingest messages for the emitter (required across threads)."
+ },
+ {0}
+};
+
+/* Plugin reference */
+struct flb_input_plugin in_emitter_plugin = {
+ .name = "emitter",
+ .description = "Record Emitter",
+ .cb_init = cb_emitter_init,
+ .cb_pre_run = NULL,
+ .cb_collect = NULL,
+ .cb_ingest = NULL,
+ .cb_flush_buf = NULL,
+ .config_map = config_map,
+ .cb_pause = NULL,
+ .cb_resume = NULL,
+ .cb_exit = cb_emitter_exit,
+
+ /* This plugin can only be configured and invoked by the Engine only */
+ .flags = FLB_INPUT_PRIVATE
+};
diff --git a/src/fluent-bit/plugins/in_event_test/CMakeLists.txt b/src/fluent-bit/plugins/in_event_test/CMakeLists.txt
new file mode 100644
index 000000000..9a9577a6c
--- /dev/null
+++ b/src/fluent-bit/plugins/in_event_test/CMakeLists.txt
@@ -0,0 +1,4 @@
+set(src
+ event_test.c)
+
+FLB_PLUGIN(in_event_test "${src}" "")
diff --git a/src/fluent-bit/plugins/in_event_test/event_test.c b/src/fluent-bit/plugins/in_event_test/event_test.c
new file mode 100644
index 000000000..557017fe2
--- /dev/null
+++ b/src/fluent-bit/plugins/in_event_test/event_test.c
@@ -0,0 +1,407 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_config_map.h>
+#include <fluent-bit/flb_error.h>
+#include <fluent-bit/flb_pipe.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_upstream.h>
+#include <fluent-bit/flb_time_utils.h>
+
+#define STATUS_OK 1
+#define STATUS_ERROR 0
+#define STATUS_PENDING -1
+#define CALLBACK_TIME 2 /* 2 seconds */
+
+#define SERVER_PORT "9092"
+#define SERVER_IFACE "0.0.0.0"
+
+struct unit_test {
+ int id;
+ int coll_id;
+ int status;
+ char *desc;
+};
+
+struct unit_test tests[] = {
+ {0, 0, STATUS_PENDING, "collector time"},
+ {1, 0, STATUS_PENDING, "collector fd_event"},
+ {2, 0, STATUS_PENDING, "collector fd_server | socket"},
+ {3, 0, STATUS_PENDING, "plugin paused from engine"},
+ {4, 0, STATUS_PENDING, "plugin resumed from engine"},
+};
+
+#define UNIT_TESTS_SIZE (sizeof(tests) / sizeof(struct unit_test))
+
+struct event_test {
+ flb_pipefd_t pipe[2];
+ int server_fd;
+ int client_coll_id;
+ struct flb_upstream *upstream;
+ struct unit_test *tests;
+ struct flb_input_instance *ins;
+};
+
+static void set_unit_test_status(struct event_test *ctx, int id, int status)
+{
+ struct unit_test *ut;
+
+ ut = &ctx->tests[id];
+ ut->status = status;
+}
+
+static int config_destroy(struct event_test *ctx)
+{
+ if (!ctx) {
+ return 0;
+ }
+
+ if (ctx->tests) {
+ flb_free(ctx->tests);
+ }
+
+ if (ctx->pipe[0] > 0) {
+ flb_socket_close(ctx->pipe[0]);
+ }
+ if (ctx->pipe[1] > 0) {
+ flb_socket_close(ctx->pipe[1]);
+ }
+ if (ctx->server_fd > 0) {
+ flb_socket_close(ctx->server_fd);
+ }
+
+ if (ctx->upstream) {
+ flb_upstream_destroy(ctx->upstream);
+ }
+
+ flb_free(ctx);
+ return 0;
+}
+
+static int cb_collector_time(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ int diff;
+ int ret;
+ uint64_t val;
+ time_t now;
+ struct unit_test *ut;
+ struct event_test *ctx = (struct event_test *) in_context;
+
+ now = time(NULL);
+ diff = now - config->init_time;
+ /* For macOS, we sometimes get the +1 longer time elapse.
+ * To handle this, we simply add +1 as a delta for checking interval. */
+ if (diff > (CALLBACK_TIME + 1)) {
+ flb_plg_error(ins, "cb_collector_time difference failed: %i seconds", diff);
+ set_unit_test_status(ctx, 0, STATUS_ERROR);
+ flb_engine_exit(config);
+ }
+
+ /* disable the collector */
+ ut = &ctx->tests[0];
+ flb_input_collector_pause(ut->coll_id, ins);
+
+ /*
+ * before to return, trigger test 1 (collector_fd_event) by writing a byte
+ * to our local pipe.
+ */
+ val = 1;
+ ret = write(ctx->pipe[1], &val, sizeof(val));
+ if (ret == -1) {
+ flb_errno();
+ set_unit_test_status(ctx, 0, STATUS_ERROR);
+ flb_engine_exit(config);
+ }
+
+ set_unit_test_status(ctx, 0, STATUS_OK);
+ flb_plg_info(ins, "[OK] collector_time");
+ FLB_INPUT_RETURN(0);
+}
+
+static int cb_collector_fd(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ uint64_t val = 0;
+ size_t bytes;
+ struct unit_test *ut;
+ struct event_test *ctx = (struct event_test *) in_context;
+
+ bytes = read(ctx->pipe[0], &val, sizeof(val));
+ if (bytes <= 0) {
+ flb_errno();
+ set_unit_test_status(ctx, 1, STATUS_ERROR);
+ flb_engine_exit(config);
+ }
+ else {
+ flb_plg_info(ins, "[OK] collector_fd");
+ }
+
+ /* disable the collector */
+ ut = &ctx->tests[1];
+ flb_input_collector_pause(ut->coll_id, ins);
+ set_unit_test_status(ctx, 1, STATUS_OK);
+
+ FLB_INPUT_RETURN(0);
+}
+
+static int cb_collector_server_socket(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ int fd;
+ struct unit_test *ut;
+ struct event_test *ctx = in_context;
+
+ /* Accept the new connection */
+ fd = flb_net_accept(ctx->server_fd);
+ if (fd == -1) {
+ flb_plg_error(ins, "could not accept new connection");
+ return -1;
+ }
+
+ /* sleep co-routine for 500ms */
+ flb_time_sleep(500);
+ flb_socket_close(fd);
+
+ ut = &ctx->tests[2];
+ flb_input_collector_pause(ut->coll_id, ins);
+ set_unit_test_status(ctx, 2, STATUS_OK);
+
+ flb_plg_info(ins, "[OK] collector_server_socket");
+
+ /* tell the engine to deliver a pause request */
+ flb_plg_info(ins, "test pause/resume in 5 seconds...");
+ flb_input_test_pause_resume(ins, 5);
+
+ /* return */
+ FLB_INPUT_RETURN(0);
+}
+
+static int cb_collector_server_client(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ struct flb_connection *u_conn;
+ struct event_test *ctx = (struct event_test *) in_context;
+
+ /* get the upstream connection (localhost) */
+ u_conn = flb_upstream_conn_get(ctx->upstream);
+ if (!u_conn) {
+ flb_plg_error(ins, "could not connect to socket server");
+ return -1;
+ }
+
+ flb_time_sleep(200);
+ flb_upstream_conn_release(u_conn);
+
+ /* disable this collector */
+ flb_input_collector_pause(ctx->client_coll_id, ins);
+ FLB_INPUT_RETURN(0);
+}
+
+static struct event_test *config_create(struct flb_input_instance *ins)
+{
+ size_t size;
+ struct event_test *ctx;
+
+ /* Allocate space for the configuration */
+ ctx = flb_calloc(1, sizeof(struct event_test));
+ if (!ctx) {
+ flb_errno();
+ return NULL;
+ }
+ ctx->ins = ins;
+
+ size = sizeof(struct unit_test) * UNIT_TESTS_SIZE;
+ ctx->tests = flb_malloc(size);
+ if (!ctx->tests) {
+ flb_errno();
+ flb_free(ctx);
+ return NULL;
+ }
+ memcpy(ctx->tests, &tests, size);
+ return ctx;
+}
+
+/* Initialize plugin */
+static int cb_event_test_init(struct flb_input_instance *ins,
+ struct flb_config *config, void *data)
+{
+ int fd;
+ int ret;
+ struct unit_test *ut;
+ struct event_test *ctx = NULL;
+ struct flb_upstream *upstream;
+
+ /* Allocate space for the configuration */
+ ctx = config_create(ins);
+ if (!ctx) {
+ return -1;
+ }
+ flb_input_set_context(ins, ctx);
+
+ /* unit test 0: collector_time */
+ ret = flb_input_set_collector_time(ins, cb_collector_time,
+ CALLBACK_TIME, 0, config);
+ if (ret < 0) {
+ config_destroy(ctx);
+ return -1;
+ }
+ ut = &ctx->tests[0];
+ ut->coll_id = ret;
+
+ /* unit test 1: collector_fd_event */
+ ret = flb_pipe_create(ctx->pipe);
+ if (ret == -1) {
+ flb_errno();
+ config_destroy(ctx);
+ return -1;
+ }
+ ret = flb_input_set_collector_event(ins,
+ cb_collector_fd,
+ ctx->pipe[0],
+ config);
+ if (ret < 0) {
+ config_destroy(ctx);
+ return -1;
+ }
+ ut = &ctx->tests[1];
+ ut->coll_id = ret;
+
+ /* unit test 2: collector_socket */
+ fd = flb_net_server(SERVER_PORT, SERVER_IFACE);
+ if (fd < 0) {
+ flb_errno();
+ config_destroy(ctx);
+ return -1;
+ }
+ flb_net_socket_nonblocking(fd);
+ ctx->server_fd = fd;
+
+ /* socket server */
+ ret = flb_input_set_collector_socket(ins,
+ cb_collector_server_socket,
+ ctx->server_fd,
+ config);
+ if (ret == -1) {
+ config_destroy(ctx);
+ return -1;
+ }
+ ut = &ctx->tests[2];
+ ut->coll_id = ret;
+
+ /* socket client: connect to socket server to trigger the event */
+ ret = flb_input_set_collector_time(ins, cb_collector_server_client,
+ CALLBACK_TIME * 2, 0, config);
+ if (ret < 0) {
+ config_destroy(ctx);
+ return -1;
+ }
+ ctx->client_coll_id = ret;
+
+ /* upstream context for socket client */
+ upstream = flb_upstream_create(config, "127.0.0.1", atoi(SERVER_PORT),
+ FLB_IO_TCP, NULL);
+ if (!upstream) {
+ config_destroy(ctx);
+ return -1;
+ }
+ ctx->upstream = upstream;
+ flb_input_upstream_set(ctx->upstream, ins);
+
+ return 0;
+}
+
+static int cb_event_test_pre_run(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ flb_plg_info(ins, "pre run OK");
+ return -1;
+}
+
+static void cb_event_test_pause(void *data, struct flb_config *config)
+{
+ struct event_test *ctx = data;
+
+ set_unit_test_status(ctx, 3, STATUS_OK);
+ flb_plg_info(ctx->ins, "[OK] engine has paused the plugin");
+}
+
+static void cb_event_test_resume(void *data, struct flb_config *config)
+{
+ struct event_test *ctx = data;
+
+ set_unit_test_status(ctx, 4, STATUS_OK);
+ flb_plg_info(ctx->ins, "[OK] engine has resumed the plugin");
+
+ flb_engine_exit(config);
+}
+
+static int in_event_test_exit(void *data, struct flb_config *config)
+{
+ int i;
+ int failed = FLB_FALSE;
+ struct event_test *ctx = data;
+ struct unit_test *ut;
+ (void) *config;
+
+ /* check tests */
+ for (i = 0; i < UNIT_TESTS_SIZE; i++) {
+ ut = &ctx->tests[i];
+ if (ut->status != STATUS_OK) {
+ flb_plg_error(ctx->ins, "unit test #%i '%s' failed",
+ i, ut->desc);
+ failed = FLB_TRUE;
+ }
+ else {
+ flb_plg_info(ctx->ins, "unit test #%i '%s' succeeded",
+ i, ut->desc);
+ }
+ }
+
+ /* if one test failed, perform an abrupt exit with proper error */
+ if (failed) {
+ exit(EXIT_FAILURE);
+ }
+
+ config_destroy(ctx);
+ return 0;
+}
+
+/* Configuration properties map */
+static struct flb_config_map config_map[] = {
+ /* EOF */
+ {0}
+};
+
+struct flb_input_plugin in_event_test_plugin = {
+ .name = "event_test",
+ .description = "Event tests for input plugins",
+ .cb_init = cb_event_test_init,
+ .cb_pre_run = cb_event_test_pre_run,
+ .cb_collect = NULL,
+ .cb_flush_buf = NULL,
+ .cb_pause = cb_event_test_pause,
+ .cb_resume = cb_event_test_resume,
+ .cb_exit = in_event_test_exit,
+ .config_map = config_map,
+ .flags = FLB_INPUT_CORO | FLB_INPUT_THREADED
+};
diff --git a/src/fluent-bit/plugins/in_event_type/CMakeLists.txt b/src/fluent-bit/plugins/in_event_type/CMakeLists.txt
new file mode 100644
index 000000000..596f5f94e
--- /dev/null
+++ b/src/fluent-bit/plugins/in_event_type/CMakeLists.txt
@@ -0,0 +1,4 @@
+set(src
+ event_type.c)
+
+FLB_PLUGIN(in_event_type "${src}" "")
diff --git a/src/fluent-bit/plugins/in_event_type/event_type.c b/src/fluent-bit/plugins/in_event_type/event_type.c
new file mode 100644
index 000000000..bfad258c2
--- /dev/null
+++ b/src/fluent-bit/plugins/in_event_type/event_type.c
@@ -0,0 +1,482 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_event.h>
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_config_map.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_log_event_encoder.h>
+
+#include <ctraces/ctraces.h>
+
+#include <cmetrics/cmetrics.h>
+#include <cmetrics/cmt_gauge.h>
+#include <cmetrics/cmt_counter.h>
+#include <cmetrics/cmt_summary.h>
+#include <cmetrics/cmt_histogram.h>
+
+#define DEFAULT_INTERVAL_SEC "2"
+#define DEFAULT_INTERVAL_NSEC "0"
+
+#define OTEL_SPAN_ID_LEN 8
+
+struct event_type {
+ int coll_fd;
+ int type;
+
+ int interval_sec;
+ int interval_nsec;
+};
+
+static struct ctrace_id *create_random_span_id()
+{
+ char *buf;
+ ssize_t ret;
+ struct ctrace_id *cid;
+
+ buf = flb_malloc(OTEL_SPAN_ID_LEN);
+ if (!buf) {
+ ctr_errno();
+ return NULL;
+ }
+
+ ret = ctr_random_get(buf, OTEL_SPAN_ID_LEN);
+ if (ret < 0) {
+ flb_free(buf);
+ return NULL;
+ }
+
+ cid = ctr_id_create(buf, OTEL_SPAN_ID_LEN);
+ flb_free(buf);
+
+ return cid;
+
+}
+
+static int send_logs(struct flb_input_instance *ins)
+{
+ struct flb_log_event_encoder log_encoder;
+ int ret;
+
+ ret = flb_log_event_encoder_init(&log_encoder,
+ FLB_LOG_EVENT_FORMAT_DEFAULT);
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ flb_plg_error(ins, "error initializing event encoder : %d", ret);
+
+ return -1;
+ }
+
+ ret = flb_log_event_encoder_begin_record(&log_encoder);
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_current_timestamp(
+ &log_encoder);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_append_body_cstring(
+ &log_encoder, "event_type");
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_append_body_cstring(
+ &log_encoder, "some logs");
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_commit_record(&log_encoder);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ flb_input_log_append(ins, NULL, 0,
+ log_encoder.output_buffer,
+ log_encoder.output_length);
+
+ ret = 0;
+ }
+ else {
+ flb_plg_error(ins, "Error encoding record : %d", ret);
+
+ ret = -1;
+ }
+
+ flb_log_event_encoder_destroy(&log_encoder);
+
+ return 0;
+}
+
+static int send_metrics(struct flb_input_instance *ins)
+{
+ int ret;
+ double quantiles[5];
+ struct cmt_histogram_buckets *buckets;
+ double val;
+ struct cmt *cmt;
+ uint64_t ts;
+ struct cmt_gauge *g1;
+ struct cmt_counter *c1;
+ struct cmt_summary *s1;
+ struct cmt_histogram *h1;
+
+ ts = cfl_time_now();
+ cmt = cmt_create();
+
+ c1 = cmt_counter_create(cmt, "kubernetes", "network", "load_counter", "Network load counter",
+ 2, (char *[]) {"hostname", "app"});
+
+ cmt_counter_get_val(c1, 0, NULL, &val);
+ cmt_counter_inc(c1, ts, 0, NULL);
+ cmt_counter_add(c1, ts, 2, 0, NULL);
+ cmt_counter_get_val(c1, 0, NULL, &val);
+
+ cmt_counter_inc(c1, ts, 2, (char *[]) {"localhost", "cmetrics"});
+ cmt_counter_get_val(c1, 2, (char *[]) {"localhost", "cmetrics"}, &val);
+ cmt_counter_add(c1, ts, 10.55, 2, (char *[]) {"localhost", "test"});
+ cmt_counter_get_val(c1, 2, (char *[]) {"localhost", "test"}, &val);
+ cmt_counter_set(c1, ts, 12.15, 2, (char *[]) {"localhost", "test"});
+ cmt_counter_set(c1, ts, 1, 2, (char *[]) {"localhost", "test"});
+
+ g1 = cmt_gauge_create(cmt, "kubernetes", "network", "load_gauge", "Network load gauge", 0, NULL);
+
+ cmt_gauge_get_val(g1, 0, NULL, &val);
+ cmt_gauge_set(g1, ts, 2.0, 0, NULL);
+ cmt_gauge_get_val(g1, 0, NULL, &val);
+ cmt_gauge_inc(g1, ts, 0, NULL);
+ cmt_gauge_get_val(g1, 0, NULL, &val);
+ cmt_gauge_sub(g1, ts, 2, 0, NULL);
+ cmt_gauge_get_val(g1, 0, NULL, &val);
+ cmt_gauge_dec(g1, ts, 0, NULL);
+ cmt_gauge_get_val(g1, 0, NULL, &val);
+ cmt_gauge_inc(g1, ts, 0, NULL);
+
+ buckets = cmt_histogram_buckets_create(3, 0.05, 5.0, 10.0);
+
+ h1 = cmt_histogram_create(cmt,
+ "k8s", "network", "load_histogram", "Network load histogram",
+ buckets,
+ 1, (char *[]) {"my_label"});
+
+ cmt_histogram_observe(h1, ts, 0.001, 0, NULL);
+ cmt_histogram_observe(h1, ts, 0.020, 0, NULL);
+ cmt_histogram_observe(h1, ts, 5.0, 0, NULL);
+ cmt_histogram_observe(h1, ts, 8.0, 0, NULL);
+ cmt_histogram_observe(h1, ts, 1000, 0, NULL);
+
+ cmt_histogram_observe(h1, ts, 0.001, 1, (char *[]) {"my_val"});
+ cmt_histogram_observe(h1, ts, 0.020, 1, (char *[]) {"my_val"});
+ cmt_histogram_observe(h1, ts, 5.0, 1, (char *[]) {"my_val"});
+ cmt_histogram_observe(h1, ts, 8.0, 1, (char *[]) {"my_val"});
+ cmt_histogram_observe(h1, ts, 1000, 1, (char *[]) {"my_val"});;
+
+ quantiles[0] = 0.1;
+ quantiles[1] = 0.2;
+ quantiles[2] = 0.3;
+ quantiles[3] = 0.4;
+ quantiles[4] = 0.5;
+
+ s1 = cmt_summary_create(cmt,
+ "k8s", "disk", "load_summary", "Disk load summary",
+ 5, quantiles,
+ 1, (char *[]) {"my_label"});
+
+ quantiles[0] = 1.1;
+ quantiles[1] = 2.2;
+ quantiles[2] = 3.3;
+ quantiles[3] = 4.4;
+ quantiles[4] = 5.5;
+
+ cmt_summary_set_default(s1, ts, quantiles, 51.612894511314444, 10, 0, NULL);
+
+ quantiles[0] = 11.11;
+ quantiles[1] = 0;
+ quantiles[2] = 33.33;
+ quantiles[3] = 44.44;
+ quantiles[4] = 55.55;
+
+ cmt_summary_set_default(s1, ts, quantiles, 51.612894511314444, 10, 1, (char *[]) {"my_val"});
+
+ ret = flb_input_metrics_append(ins, NULL, 0, cmt);
+
+ cmt_destroy(cmt);
+ return ret;
+}
+
+static int send_traces(struct flb_input_instance *ins)
+{
+ int ret;
+ struct ctrace *ctx;
+ struct ctrace_opts opts;
+ struct ctrace_span *span_root;
+ struct ctrace_span *span_child;
+ struct ctrace_span_event *event;
+ struct ctrace_resource_span *resource_span;
+ struct ctrace_resource *resource;
+ struct ctrace_scope_span *scope_span;
+ struct ctrace_instrumentation_scope *instrumentation_scope;
+ struct ctrace_link *link;
+ struct ctrace_id *span_id;
+ struct ctrace_id *trace_id;
+ struct cfl_array *array;
+ struct cfl_array *sub_array;
+ struct cfl_kvlist *kv;
+
+ ctr_opts_init(&opts);
+
+ /* ctrace context */
+ ctx = ctr_create(&opts);
+ if (!ctx) {
+ return -1;
+ }
+
+ /* resource span */
+ resource_span = ctr_resource_span_create(ctx);
+ ctr_resource_span_set_schema_url(resource_span, "https://ctraces/resource_span_schema_url");
+
+ /* create a 'resource' for the 'resource span' in question */
+ resource = ctr_resource_span_get_resource(resource_span);
+ ctr_resource_set_dropped_attr_count(resource, 5);
+
+ ctr_attributes_set_string(resource->attr, "service.name", "Fluent Bit Test Service");
+
+ /* scope span */
+ scope_span = ctr_scope_span_create(resource_span);
+ ctr_scope_span_set_schema_url(scope_span, "https://ctraces/scope_span_schema_url");
+
+ /* create an optional instrumentation scope */
+ instrumentation_scope = ctr_instrumentation_scope_create("ctrace", "a.b.c", 3, NULL);
+ ctr_scope_span_set_instrumentation_scope(scope_span, instrumentation_scope);
+
+ /* generate a random trace_id */
+ trace_id = ctr_id_create_random(CTR_ID_OTEL_TRACE_SIZE);
+
+ /* generate a random ID for the new span */
+ span_id = ctr_id_create_random(CTR_ID_OTEL_SPAN_SIZE);
+
+ /* Create a root span */
+ span_root = ctr_span_create(ctx, scope_span, "main", NULL);
+ if (!span_root) {
+ ctr_destroy(ctx);
+ ctr_opts_exit(&opts);
+ return -1;
+ }
+
+ /* assign the random ID */
+ ctr_span_set_span_id_with_cid(span_root, span_id);
+
+ /* set random trace_id */
+ ctr_span_set_trace_id_with_cid(span_root, trace_id);
+
+ /* add some attributes to the span */
+ ctr_span_set_attribute_string(span_root, "agent", "Fluent Bit");
+ ctr_span_set_attribute_int64(span_root, "year", 2022);
+ ctr_span_set_attribute_bool(span_root, "open_source", CTR_TRUE);
+ ctr_span_set_attribute_double(span_root, "temperature", 25.5);
+
+ /* pack an array: create an array context by using the CFL api */
+ array = cfl_array_create(4);
+ cfl_array_append_string(array, "first");
+ cfl_array_append_double(array, 2.0);
+ cfl_array_append_bool(array, CFL_FALSE);
+
+ sub_array = cfl_array_create(3);
+ cfl_array_append_double(sub_array, 3.1);
+ cfl_array_append_double(sub_array, 5.2);
+ cfl_array_append_double(sub_array, 6.3);
+ cfl_array_append_array(array, sub_array);
+
+ /* add array to the attribute list */
+ ctr_span_set_attribute_array(span_root, "my_array", array);
+
+ /* event: add one event and set attributes to it */
+ event = ctr_span_event_add(span_root, "connect to remote server");
+
+ ctr_span_event_set_attribute_string(event, "syscall 1", "open()");
+ ctr_span_event_set_attribute_string(event, "syscall 2", "connect()");
+ ctr_span_event_set_attribute_string(event, "syscall 3", "write()");
+
+ /* add a key/value pair list */
+ kv = cfl_kvlist_create();
+ cfl_kvlist_insert_string(kv, "language", "c");
+
+ ctr_span_set_attribute_kvlist(span_root, "my-list", kv);
+
+ /* create a child span */
+ span_child = ctr_span_create(ctx, scope_span, "do-work", span_root);
+ if (!span_child) {
+ ctr_destroy(ctx);
+ ctr_opts_exit(&opts);
+ return -1;
+ }
+
+ /* set trace_id */
+ ctr_span_set_trace_id_with_cid(span_child, trace_id);
+
+ /* use span_root ID as parent_span_id */
+ ctr_span_set_parent_span_id_with_cid(span_child, span_id);
+
+ /* delete old span id and generate a new one */
+ ctr_id_destroy(span_id);
+ span_id = create_random_span_id();
+ ctr_span_set_span_id_with_cid(span_child, span_id);
+
+ /* destroy the IDs since is not longer needed */
+ ctr_id_destroy(span_id);
+ ctr_id_destroy(trace_id);
+
+ /* change span kind to client */
+ ctr_span_kind_set(span_child, CTRACE_SPAN_CLIENT);
+
+ /* create a Link (no valid IDs of course) */
+ trace_id = ctr_id_create_random(CTR_ID_OTEL_TRACE_SIZE);
+ span_id = ctr_id_create_random(CTR_ID_OTEL_SPAN_SIZE);
+
+ link = ctr_link_create_with_cid(span_child, trace_id, span_id);
+ ctr_link_set_trace_state(link, "aaabbbccc");
+ ctr_link_set_dropped_attr_count(link, 2);
+
+ /* delete IDs */
+ ctr_id_destroy(span_id);
+ ctr_id_destroy(trace_id);
+
+ ret = flb_input_trace_append(ins, NULL, 0, ctx);
+
+ /* destroy the context */
+ ctr_destroy(ctx);
+
+ /* exit options (it release resources allocated) */
+ ctr_opts_exit(&opts);
+
+ return ret;
+}
+
+static int cb_collector_time(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ int ret;
+ struct event_type *ctx = (struct event_type *) in_context;
+
+ if (ctx->type == FLB_EVENT_TYPE_LOGS) {
+ ret = send_logs(ins);
+ flb_plg_debug(ins, "logs, ret=%i", ret);
+ }
+ else if (ctx->type == FLB_EVENT_TYPE_METRICS) {
+ ret = send_metrics(ins);
+ flb_plg_debug(ins, "metrics, ret=%i", ret);
+ }
+ else if (ctx->type == FLB_EVENT_TYPE_TRACES) {
+ ret = send_traces(ins);
+ flb_plg_debug(ins, "traces, ret=%i", ret);
+ }
+
+ flb_plg_info(ins, "[OK] collector_time");
+ FLB_INPUT_RETURN(0);
+}
+
+/* Initialize plugin */
+static int cb_event_type_init(struct flb_input_instance *ins,
+ struct flb_config *config, void *data)
+{
+ int ret;
+ char *tmp;
+ struct event_type *ctx = NULL;
+
+ ctx = flb_calloc(1, sizeof(struct event_type));
+ if (!ctx) {
+ flb_errno();
+ return -1;
+ }
+
+ ret = flb_input_config_map_set(ins, (void *) ctx);
+ if (ret == -1) {
+ return -1;
+ }
+
+ flb_input_set_context(ins, ctx);
+
+
+ ctx->type = FLB_EVENT_TYPE_LOGS;
+ tmp = (char *) flb_input_get_property("type", ins);
+ if (tmp) {
+ if (strcasecmp(tmp, "logs") == 0) {
+ ctx->type = FLB_EVENT_TYPE_LOGS;
+ }
+ else if (strcasecmp(tmp, "metrics") == 0) {
+ ctx->type = FLB_EVENT_TYPE_METRICS;
+ }
+ else if (strcasecmp(tmp, "traces") == 0) {
+ ctx->type = FLB_EVENT_TYPE_TRACES;
+ }
+ }
+
+ /* unit test 0: collector_time */
+ ret = flb_input_set_collector_time(ins, cb_collector_time,
+ ctx->interval_sec, ctx->interval_nsec, config);
+ if (ret < 0) {
+ return -1;
+ }
+ ctx->coll_fd = ret;
+
+ return 0;
+}
+
+static int cb_event_type_exit(void *data, struct flb_config *config)
+{
+ struct event_type *ctx = data;
+
+ flb_free(ctx);
+ return 0;
+}
+
+/* Configuration properties map */
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_STR, "type", "logs",
+ 0, FLB_FALSE, 0,
+ "Set the type of event to deliver, optionsa are: logs, metrics or traces"
+ },
+ {
+ FLB_CONFIG_MAP_INT, "interval_sec", DEFAULT_INTERVAL_SEC,
+ 0, FLB_TRUE, offsetof(struct event_type, interval_sec),
+ "Set the interval seconds between events generation"
+ },
+ {
+ FLB_CONFIG_MAP_INT, "interval_nsec", DEFAULT_INTERVAL_NSEC,
+ 0, FLB_TRUE, offsetof(struct event_type, interval_nsec),
+ "Set the nanoseconds interval (sub seconds)"
+ },
+
+ /* EOF */
+ {0}
+};
+
+struct flb_input_plugin in_event_type_plugin = {
+ .name = "event_type",
+ .description = "Event tests for input plugins",
+ .cb_init = cb_event_type_init,
+ .cb_pre_run = NULL,
+ .cb_collect = NULL,
+ .cb_flush_buf = NULL,
+ .cb_pause = NULL,
+ .cb_resume = NULL,
+ .cb_exit = cb_event_type_exit,
+ .config_map = config_map,
+ .flags = FLB_INPUT_CORO | FLB_INPUT_THREADED
+};
diff --git a/src/fluent-bit/plugins/in_exec/CMakeLists.txt b/src/fluent-bit/plugins/in_exec/CMakeLists.txt
new file mode 100644
index 000000000..73b601dcd
--- /dev/null
+++ b/src/fluent-bit/plugins/in_exec/CMakeLists.txt
@@ -0,0 +1,4 @@
+set(src
+ in_exec.c)
+
+FLB_PLUGIN(in_exec "${src}" "")
diff --git a/src/fluent-bit/plugins/in_exec/in_exec.c b/src/fluent-bit/plugins/in_exec/in_exec.c
new file mode 100644
index 000000000..b5d66acdc
--- /dev/null
+++ b/src/fluent-bit/plugins/in_exec/in_exec.c
@@ -0,0 +1,491 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_input.h>
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_error.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_parser.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include "in_exec_win32_compat.h"
+
+#include "in_exec.h"
+
+/* cb_collect callback */
+static int in_exec_collect(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ int ret = -1;
+ int cmdret;
+ int flb_exit_code;
+ uint64_t val;
+ size_t str_len = 0;
+ FILE *cmdp = NULL;
+ struct flb_exec *ctx = in_context;
+
+ /* variables for parser */
+ int parser_ret = -1;
+ void *out_buf = NULL;
+ size_t out_size = 0;
+ struct flb_time out_time;
+
+ if (ctx->oneshot == FLB_TRUE) {
+ ret = flb_pipe_r(ctx->ch_manager[0], &val, sizeof(val));
+ if (ret == -1) {
+ flb_errno();
+ return -1;
+ }
+ }
+
+ cmdp = flb_popen(ctx->cmd, "r");
+ if (cmdp == NULL) {
+ flb_plg_debug(ctx->ins, "command %s failed", ctx->cmd);
+ goto collect_end;
+ }
+
+ if (ctx->parser) {
+ while (fgets(ctx->buf, ctx->buf_size, cmdp) != NULL) {
+ str_len = strnlen(ctx->buf, ctx->buf_size);
+ if (ctx->buf[str_len - 1] == '\n') {
+ ctx->buf[--str_len] = '\0'; /* chomp */
+ }
+
+ flb_time_get(&out_time);
+ parser_ret = flb_parser_do(ctx->parser, ctx->buf, str_len,
+ &out_buf, &out_size, &out_time);
+ if (parser_ret >= 0) {
+ if (flb_time_to_nanosec(&out_time) == 0L) {
+ flb_time_get(&out_time);
+ }
+
+ ret = flb_log_event_encoder_begin_record(&ctx->log_encoder);
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_timestamp(
+ &ctx->log_encoder,
+ &out_time);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_body_from_raw_msgpack(
+ &ctx->log_encoder,
+ out_buf,
+ out_size);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_commit_record(&ctx->log_encoder);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ flb_input_log_append(ctx->ins, NULL, 0,
+ ctx->log_encoder.output_buffer,
+ ctx->log_encoder.output_length);
+
+ }
+ else {
+ flb_plg_error(ctx->ins, "Error encoding record : %d", ret);
+ }
+
+ flb_log_event_encoder_reset(&ctx->log_encoder);
+
+ flb_free(out_buf);
+ }
+ else {
+ flb_plg_trace(ctx->ins, "tried to parse '%s'", ctx->buf);
+ flb_plg_trace(ctx->ins, "buf_size %zu", ctx->buf_size);
+ flb_plg_error(ctx->ins, "parser returned an error");
+ }
+ }
+ }
+ else {
+ while (fgets(ctx->buf, ctx->buf_size, cmdp) != NULL) {
+ str_len = strnlen(ctx->buf, ctx->buf_size);
+ if (ctx->buf[str_len - 1] == '\n') {
+ ctx->buf[--str_len] = '\0'; /* chomp */
+ }
+
+ ret = flb_log_event_encoder_begin_record(&ctx->log_encoder);
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_current_timestamp(
+ &ctx->log_encoder);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_append_body_cstring(
+ &ctx->log_encoder, "exec");
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_append_body_string(
+ &ctx->log_encoder,
+ ctx->buf,
+ str_len);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_commit_record(&ctx->log_encoder);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ flb_input_log_append(ctx->ins, NULL, 0,
+ ctx->log_encoder.output_buffer,
+ ctx->log_encoder.output_length);
+
+ }
+ else {
+ flb_plg_error(ctx->ins, "Error encoding record : %d", ret);
+ }
+
+ flb_log_event_encoder_reset(&ctx->log_encoder);
+ }
+ }
+
+ ret = 0; /* success */
+
+ collect_end:
+ if(cmdp != NULL){
+ /*
+ * If we're propagating the child exit code to the fluent-bit exit code
+ * in one-shot mode, popen() will have invoked our child command via
+ * its own shell, so unless the shell itself exited on a signal the
+ * translation is already done for us.
+ * For references on exit code handling in wrappers see
+ * https://www.gnu.org/software/bash/manual/html_node/Exit-Status.html
+ * and
+ * https://skarnet.org/software/execline/exitcodes.html
+ */
+ cmdret = flb_pclose(cmdp);
+ if (cmdret == -1) {
+ flb_errno();
+ flb_plg_debug(ctx->ins,
+ "unexpected error while waiting for exit of command %s ",
+ ctx->cmd);
+ /*
+ * The exit code of the shell run by popen() could not be
+ * determined; exit with 128, which is not a code that could be
+ * returned through a shell by a real child command.
+ */
+ flb_exit_code = 128;
+ } else if (FLB_WIFEXITED(cmdret)) {
+ flb_plg_debug(ctx->ins, "command %s exited with code %d",
+ ctx->cmd, FLB_WEXITSTATUS(cmdret));
+ /*
+ * Propagate shell exit code, which may encode a normal or signal
+ * exit for the real child process, directly to the caller. This
+ * could be greater than 127 if the shell encoded a signal exit
+ * status from the child process into its own return code.
+ */
+ flb_exit_code = FLB_WEXITSTATUS(cmdret);
+ } else if (FLB_WIFSIGNALED(cmdret)) {
+ flb_plg_debug(ctx->ins, "command %s exited with signal %d",
+ ctx->cmd, FLB_WTERMSIG(cmdret));
+ /*
+ * Follow the shell convention of returning 128+signo for signal
+ * exits. The consumer of fluent-bit's exit code will be unable to
+ * differentiate between the shell exiting on a signal and the
+ * process called by the shell exiting on a signal.
+ */
+ flb_exit_code = 128 + FLB_WTERMSIG(cmdret);
+ } else {
+ flb_plg_debug(ctx->ins, "command %s exited with unknown status",
+ ctx->cmd);
+ flb_exit_code = 128;
+ }
+
+ /*
+ * In one-shot mode, exit fluent-bit once the child process terminates.
+ */
+ if (ctx->exit_after_oneshot == FLB_TRUE) {
+ /*
+ * propagate the child process exit code as the fluent-bit exit
+ * code so fluent-bit with the exec plugin can be used as a
+ * command wrapper.
+ */
+ if (ctx->propagate_exit_code == FLB_TRUE) {
+ config->exit_status_code = flb_exit_code;
+ }
+ flb_plg_info(ctx->ins,
+ "one-shot command exited, terminating fluent-bit");
+ flb_engine_exit(config);
+ } else {
+ flb_plg_debug(ctx->ins,
+ "one-shot command exited but exit_after_oneshot not set");
+ }
+ }
+
+ return ret;
+}
+
+/* read config file and*/
+static int in_exec_config_read(struct flb_exec *ctx,
+ struct flb_input_instance *in,
+ struct flb_config *config
+)
+{
+ int ret;
+
+ ctx->ins = in;
+
+ /* Load the config map */
+ ret = flb_input_config_map_set(in, (void *)ctx);
+ if (ret == -1) {
+ flb_plg_error(in, "unable to load configuration");
+ return -1;
+ }
+
+ /* filepath setting */
+ if (ctx->cmd == NULL) {
+ flb_plg_error(in, "no input 'command' was given");
+ return -1;
+ }
+
+ if (ctx->parser_name != NULL) {
+ ctx->parser = flb_parser_get(ctx->parser_name, config);
+ if (ctx->parser == NULL) {
+ flb_plg_error(in, "requested parser '%s' not found", ctx->parser_name);
+ }
+ }
+
+ if (ctx->buf_size == -1) {
+ flb_plg_error(in, "buffer size is invalid");
+ return -1;
+ }
+
+ if (ctx->interval_sec <= 0 && ctx->interval_nsec <= 0) {
+ /* Illegal settings. Override them. */
+ ctx->interval_sec = atoi(DEFAULT_INTERVAL_SEC);
+ ctx->interval_nsec = atoi(DEFAULT_INTERVAL_NSEC);
+ }
+
+ /*
+ * propagate_exit_code is not being forced to imply exit_after_oneshot in
+ * case somebody in future wishes to make the exec plugin exit on nonzero
+ * exit codes for normal repeating commands.
+ */
+ if (ctx->propagate_exit_code && !ctx->exit_after_oneshot) {
+ flb_plg_error(in,
+ "propagate_exit_code=True option makes no sense without "
+ "exit_after_oneshot=True");
+ return -1;
+ }
+
+ if (ctx->exit_after_oneshot && !ctx->oneshot) {
+ flb_plg_debug(in, "exit_after_oneshot implies oneshot mode, enabling");
+ ctx->oneshot = FLB_TRUE;
+ }
+
+ if (ctx->oneshot) {
+ ctx->interval_sec = -1;
+ ctx->interval_nsec = -1;
+ }
+
+ ret = flb_log_event_encoder_init(&ctx->log_encoder,
+ FLB_LOG_EVENT_FORMAT_DEFAULT);
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ flb_plg_error(in, "error initializing event encoder : %d", ret);
+
+ return -1;
+ }
+
+ flb_plg_debug(in, "interval_sec=%d interval_nsec=%d oneshot=%i buf_size=%zu",
+ ctx->interval_sec, ctx->interval_nsec, ctx->oneshot, ctx->buf_size);
+
+ return 0;
+}
+
+static void delete_exec_config(struct flb_exec *ctx)
+{
+ if (!ctx) {
+ return;
+ }
+
+ flb_log_event_encoder_destroy(&ctx->log_encoder);
+
+ /* release buffer */
+ if (ctx->buf != NULL) {
+ flb_free(ctx->buf);
+ }
+
+ if (ctx->ch_manager[0] > -1) {
+ flb_pipe_close(ctx->ch_manager[0]);
+ }
+
+ if (ctx->ch_manager[1] > -1) {
+ flb_pipe_close(ctx->ch_manager[1]);
+ }
+
+ flb_free(ctx);
+}
+
+/* Initialize plugin */
+static int in_exec_init(struct flb_input_instance *in,
+ struct flb_config *config, void *data)
+{
+ struct flb_exec *ctx = NULL;
+ int ret = -1;
+
+ /* Allocate space for the configuration */
+ ctx = flb_calloc(1, sizeof(struct flb_exec));
+ if (!ctx) {
+ return -1;
+ }
+ ctx->parser = NULL;
+
+ /* Initialize exec config */
+ ret = in_exec_config_read(ctx, in, config);
+ if (ret < 0) {
+ goto init_error;
+ }
+
+ ctx->buf = flb_malloc(ctx->buf_size);
+ if (ctx->buf == NULL) {
+ flb_plg_error(in, "could not allocate exec buffer");
+ goto init_error;
+ }
+
+ flb_input_set_context(in, ctx);
+
+ ctx->ch_manager[0] = -1;
+ ctx->ch_manager[1] = -1;
+
+ if (ctx->oneshot == FLB_TRUE) {
+ if (flb_pipe_create(ctx->ch_manager)) {
+ flb_plg_error(in, "could not create pipe for oneshot command");
+ goto init_error;
+ }
+
+ ret = flb_input_set_collector_event(in,
+ in_exec_collect,
+ ctx->ch_manager[0], config);
+ }
+ else {
+ ret = flb_input_set_collector_time(in,
+ in_exec_collect,
+ ctx->interval_sec,
+ ctx->interval_nsec, config);
+ }
+ if (ret < 0) {
+ flb_plg_error(in, "could not set collector for exec input plugin");
+ goto init_error;
+ }
+
+ return 0;
+
+ init_error:
+ delete_exec_config(ctx);
+
+ return -1;
+}
+
+static int in_exec_prerun(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ int ret;
+ uint64_t val = 0xc003; /* dummy constant */
+ struct flb_exec *ctx = in_context;
+ (void) ins;
+ (void) config;
+
+ if (ctx->oneshot == FLB_FALSE) {
+ return 0;
+ }
+
+ /* Kick the oneshot execution */
+ ret = flb_pipe_w(ctx->ch_manager[1], &val, sizeof(val));
+ if (ret == -1) {
+ flb_errno();
+ return -1;
+ }
+ return 0;
+}
+
+static int in_exec_exit(void *data, struct flb_config *config)
+{
+ (void) *config;
+ struct flb_exec *ctx = data;
+
+ delete_exec_config(ctx);
+ return 0;
+}
+
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_STR, "command", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_exec, cmd),
+ "Set the command to execute"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "parser", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_exec, parser_name),
+ "Set a parser"
+ },
+ {
+ FLB_CONFIG_MAP_INT, "interval_sec", DEFAULT_INTERVAL_SEC,
+ 0, FLB_TRUE, offsetof(struct flb_exec, interval_sec),
+ "Set the collector interval"
+ },
+ {
+ FLB_CONFIG_MAP_INT, "interval_nsec", DEFAULT_INTERVAL_NSEC,
+ 0, FLB_TRUE, offsetof(struct flb_exec, interval_nsec),
+ "Set the collector interval (nanoseconds)"
+ },
+ {
+ FLB_CONFIG_MAP_SIZE, "buf_size", DEFAULT_BUF_SIZE,
+ 0, FLB_TRUE, offsetof(struct flb_exec, buf_size),
+ "Set the buffer size"
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "oneshot", "false",
+ 0, FLB_TRUE, offsetof(struct flb_exec, oneshot),
+ "execute the command only once"
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "exit_after_oneshot", "false",
+ 0, FLB_TRUE, offsetof(struct flb_exec, exit_after_oneshot),
+ "exit fluent-bit after the command terminates in one-shot mode"
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "propagate_exit_code", "false",
+ 0, FLB_TRUE, offsetof(struct flb_exec, propagate_exit_code),
+ "propagate oneshot exit command fluent-bit exit code using "
+ "shell exit code translation conventions"
+ },
+ /* EOF */
+ {0}
+};
+
+struct flb_input_plugin in_exec_plugin = {
+ .name = "exec",
+ .description = "Exec Input",
+ .cb_init = in_exec_init,
+ .cb_pre_run = in_exec_prerun,
+ .cb_collect = in_exec_collect,
+ .cb_flush_buf = NULL,
+ .cb_exit = in_exec_exit,
+ .config_map = config_map
+};
diff --git a/src/fluent-bit/plugins/in_exec/in_exec.h b/src/fluent-bit/plugins/in_exec/in_exec.h
new file mode 100644
index 000000000..efde8c8d9
--- /dev/null
+++ b/src/fluent-bit/plugins/in_exec/in_exec.h
@@ -0,0 +1,52 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef FLB_IN_EXEC_H
+#define FLB_IN_EXEC_H
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_input.h>
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_input.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_parser.h>
+#include <fluent-bit/flb_log_event_encoder.h>
+
+#include <msgpack.h>
+
+#define DEFAULT_BUF_SIZE "4096"
+#define DEFAULT_INTERVAL_SEC "1"
+#define DEFAULT_INTERVAL_NSEC "0"
+
+struct flb_exec {
+ flb_sds_t cmd;
+ flb_sds_t parser_name;
+ struct flb_parser *parser;
+ char *buf;
+ size_t buf_size;
+ struct flb_input_instance *ins;
+ int oneshot;
+ flb_pipefd_t ch_manager[2];
+ int interval_sec;
+ int interval_nsec;
+ struct flb_log_event_encoder log_encoder;
+ int exit_after_oneshot;
+ int propagate_exit_code;
+};
+
+#endif /* FLB_IN_EXEC_H */
diff --git a/src/fluent-bit/plugins/in_exec/in_exec_win32_compat.h b/src/fluent-bit/plugins/in_exec/in_exec_win32_compat.h
new file mode 100644
index 000000000..9f0dfe695
--- /dev/null
+++ b/src/fluent-bit/plugins/in_exec/in_exec_win32_compat.h
@@ -0,0 +1,94 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef FLB_IN_EXEC_WIN32_COMPAT_H
+#define FLB_IN_EXEC_WIN32_COMPAT_H
+
+#include <stdio.h>
+#include <fluent-bit/flb_info.h>
+
+/*
+ * Work around lack of sys/wait.h and POSIX exit status macros from waitpid()
+ * in win32's _popen() and _pclose() implementation, since fluent-bit uses
+ * these in the in_exec plugin.
+ *
+ * On POSIX-like OSes this'll just use the standard macros with a name alias.
+ *
+ * On windows, where the concept of a signal exit does not exist, it defines
+ * dummy macros to indicate that the process exited normally and extract the
+ * exit code.
+ *
+ * These macros are for use with flb_pclose() only. Do not use them with
+ * other APIs that may differ in return value semantics.
+ */
+#ifdef FLB_HAVE_SYS_WAIT_H
+#include <sys/wait.h>
+#define FLB_WIFEXITED(status) WIFEXITED((status))
+#define FLB_WEXITSTATUS(status) WEXITSTATUS((status))
+#define FLB_WIFSIGNALED(status) WIFSIGNALED((status))
+#define FLB_WTERMSIG(status) WTERMSIG((status))
+#else
+#define FLB_WIFEXITED(status) (1)
+#define FLB_WEXITSTATUS(status) ((status) & 0x00ff)
+#define FLB_WIFSIGNALED(status) (0)
+#define FLB_WTERMSIG(status) (-1)
+#endif
+
+/*
+ * Because Windows has to do everything differently, call _popen() and
+ * _pclose() instead of the POSIX popen() and pclose() functions.
+ *
+ * flb_pclose() has different return value semantics on Windows vs non-windows
+ * targets because it propagates the pclose() or _pclose() return value
+ * directly. You MUST use the FLB_WIFEXITED(), FLB_WEXITSTATUS(),
+ * FLB_WIFSIGNALED() and FLB_WTERMSIG() macros to consume the return value,
+ * rather than the underlying POSIX macros or manual bit-shifts.
+ */
+#if !defined(FLB_SYSTEM_WINDOWS)
+static inline FILE* flb_popen(const char *command, const char *type) {
+ return popen(command, type);
+}
+static inline int flb_pclose(FILE *stream) {
+ return pclose(stream);
+}
+#define FLB_PCLOSE pclose
+#else
+static inline FILE* flb_popen(const char *command, const char *type) {
+ return _popen(command, type);
+}
+/*
+ * flb_pclose() has the same return value on Windows as win32 _pclose(), rather
+ * than posix pclose(). The process exit code is not bit-shifted to the high
+ * byte.
+ *
+ * The MSVC docs for _pclose() at
+ * https://learn.microsoft.com/en-us/cpp/c-runtime-library/reference/pclose?view=msvc-170
+ * are misleading; they say that "The format of the return value is the same as
+ * for _cwait, except the low-order and high-order bytes are swapped." But
+ * _cwait isn't documented as having any meaningful return on success, the
+ * process exit code is meant to be in its "termstat" out parameter per
+ * https://learn.microsoft.com/en-us/cpp/c-runtime-library/reference/cwait?view=msvc-170
+ * The return code of _pclose() actually appears to be the process exit code
+ * without the bit-shift that waitpid() applies.
+ */
+static inline int flb_pclose(FILE *stream) {
+ return _pclose(stream);
+}
+#endif
+
+#endif /* FLB_IN_EXEC_WIN32_COMPAT_H */
diff --git a/src/fluent-bit/plugins/in_exec_wasi/CMakeLists.txt b/src/fluent-bit/plugins/in_exec_wasi/CMakeLists.txt
new file mode 100644
index 000000000..7dcb817a1
--- /dev/null
+++ b/src/fluent-bit/plugins/in_exec_wasi/CMakeLists.txt
@@ -0,0 +1,11 @@
+set(WAMR_ROOT_DIR ../../${FLB_PATH_LIB_WASM_MICRO_RUNTIME})
+set(WASM_INCLUDE_DIRS
+ ${WAMR_ROOT_DIR}/core/iwasm/include
+ )
+
+set(src
+ in_exec_wasi.c)
+
+FLB_PLUGIN(in_exec_wasi "${src}" "")
+target_include_directories(flb-plugin-in_exec_wasi PRIVATE ${WASM_INCLUDE_DIRS})
+target_link_libraries(flb-plugin-in_exec_wasi flb-wasm-static vmlib-static)
diff --git a/src/fluent-bit/plugins/in_exec_wasi/in_exec_wasi.c b/src/fluent-bit/plugins/in_exec_wasi/in_exec_wasi.c
new file mode 100644
index 000000000..bf765430d
--- /dev/null
+++ b/src/fluent-bit/plugins/in_exec_wasi/in_exec_wasi.c
@@ -0,0 +1,451 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_input.h>
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_error.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_parser.h>
+#include <fluent-bit/flb_kv.h>
+#include <msgpack.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#ifdef FLB_SYSTEM_WINDOWS
+#define STDIN_FILENO (_fileno( stdin ))
+#define STDOUT_FILENO (_fileno( stdout ))
+#define STDERR_FILENO (_fileno( stderr ))
+#else
+#include <unistd.h>
+#endif
+
+#include "in_exec_wasi.h"
+
+/* cb_collect callback */
+static int in_exec_wasi_collect(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ int ret = -1;
+ uint64_t val;
+ size_t str_len = 0;
+ struct flb_exec_wasi *ctx = in_context;
+ struct flb_wasm *wasm = NULL;
+ FILE *stdoutp = tmpfile();
+
+ /* variables for parser */
+ int parser_ret = -1;
+ void *out_buf = NULL;
+ size_t out_size = 0;
+ struct flb_time out_time;
+
+ /* Validate the temporary file was created */
+ if (stdoutp == NULL) {
+ flb_plg_error(ctx->ins, "failed to created temporary file");
+ return -1;
+ }
+
+ if (ctx->oneshot == FLB_TRUE) {
+ ret = flb_pipe_r(ctx->ch_manager[0], &val, sizeof(val));
+ if (ret == -1) {
+ fclose(stdoutp);
+ flb_errno();
+ return -1;
+ }
+ }
+
+ wasm = flb_wasm_instantiate(config, ctx->wasi_path, ctx->accessible_dir_list, -1, fileno(stdoutp), -1);
+ if (wasm == NULL) {
+ flb_plg_debug(ctx->ins, "instantiate wasm [%s] failed", ctx->wasi_path);
+ goto collect_end;
+ }
+ ctx->wasm = wasm;
+
+ ret = flb_wasm_call_wasi_main(ctx->wasm);
+
+ if (!ret) {
+ flb_plg_error(ctx->ins, "WASI main function is not found");
+ goto collect_end;
+ }
+
+ if (ctx->parser) {
+ rewind(stdoutp);
+
+ while (fgets(ctx->buf, ctx->buf_size, stdoutp) != NULL) {
+ str_len = strnlen(ctx->buf, ctx->buf_size);
+ if (ctx->buf[str_len - 1] == '\n') {
+ ctx->buf[--str_len] = '\0'; /* chomp */
+ }
+
+ flb_time_get(&out_time);
+ parser_ret = flb_parser_do(ctx->parser, ctx->buf, str_len,
+ &out_buf, &out_size, &out_time);
+ if (parser_ret >= 0) {
+ if (flb_time_to_nanosec(&out_time) == 0L) {
+ flb_time_get(&out_time);
+ }
+
+ ret = flb_log_event_encoder_begin_record(&ctx->log_encoder);
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_timestamp(
+ &ctx->log_encoder,
+ &out_time);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_body_from_raw_msgpack(
+ &ctx->log_encoder,
+ out_buf,
+ out_size);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_commit_record(&ctx->log_encoder);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ flb_input_log_append(ctx->ins, NULL, 0,
+ ctx->log_encoder.output_buffer,
+ ctx->log_encoder.output_length);
+
+ }
+ else {
+ flb_plg_error(ctx->ins, "Error encoding record : %d", ret);
+ }
+
+ flb_log_event_encoder_reset(&ctx->log_encoder);
+
+ flb_free(out_buf);
+ }
+ else {
+ flb_plg_trace(ctx->ins, "tried to parse '%s'", ctx->buf);
+ flb_plg_trace(ctx->ins, "buf_size %zu", ctx->buf_size);
+ flb_plg_error(ctx->ins, "parser returned an error");
+ }
+ }
+ }
+ else {
+ rewind(stdoutp);
+
+ while (fgets(ctx->buf, ctx->buf_size, stdoutp) != NULL) {
+ str_len = strnlen(ctx->buf, ctx->buf_size);
+ if (ctx->buf[str_len - 1] == '\n') {
+ ctx->buf[--str_len] = '\0'; /* chomp */
+ }
+
+ ret = flb_log_event_encoder_begin_record(&ctx->log_encoder);
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_current_timestamp(
+ &ctx->log_encoder);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_append_body_cstring(
+ &ctx->log_encoder, "wasi_stdout");
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_append_body_string(
+ &ctx->log_encoder,
+ ctx->buf,
+ str_len);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_commit_record(&ctx->log_encoder);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ flb_input_log_append(ctx->ins, NULL, 0,
+ ctx->log_encoder.output_buffer,
+ ctx->log_encoder.output_length);
+
+ }
+ else {
+ flb_plg_error(ctx->ins, "Error encoding record : %d", ret);
+ }
+
+ flb_log_event_encoder_reset(&ctx->log_encoder);
+ }
+ }
+
+ collect_end:
+ if (ctx->wasm != NULL) {
+ flb_wasm_destroy(ctx->wasm);
+ }
+ fclose(stdoutp);
+
+ return ret;
+}
+
+/* read config file and*/
+static int in_exec_wasi_config_read(struct flb_exec_wasi *ctx,
+ struct flb_input_instance *in,
+ struct flb_config *config)
+{
+ int ret;
+
+ ctx->ins = in;
+
+ /* Load the config map */
+ ret = flb_input_config_map_set(in, (void *)ctx);
+ if (ret == -1) {
+ flb_plg_error(in, "unable to load configuration");
+ return -1;
+ }
+
+ /* filepath setting */
+ if (ctx->wasi_path == NULL) {
+ flb_plg_error(in, "no input 'command' was given");
+ return -1;
+ }
+
+ if (ctx->parser_name != NULL) {
+ ctx->parser = flb_parser_get(ctx->parser_name, config);
+ if (ctx->parser == NULL) {
+ flb_plg_error(in, "requested parser '%s' not found", ctx->parser_name);
+ }
+ }
+
+ if (ctx->buf_size == -1) {
+ flb_plg_error(in, "buffer size is invalid");
+ return -1;
+ }
+
+ if (ctx->interval_sec <= 0 && ctx->interval_nsec <= 0) {
+ /* Illegal settings. Override them. */
+ ctx->interval_sec = atoi(DEFAULT_INTERVAL_SEC);
+ ctx->interval_nsec = atoi(DEFAULT_INTERVAL_NSEC);
+ }
+
+ if (ctx->oneshot) {
+ ctx->interval_sec = -1;
+ ctx->interval_nsec = -1;
+ }
+
+ ret = flb_log_event_encoder_init(&ctx->log_encoder,
+ FLB_LOG_EVENT_FORMAT_DEFAULT);
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ flb_plg_error(ctx->ins, "error initializing event encoder : %d", ret);
+
+ return -1;
+ }
+
+ flb_plg_debug(in, "interval_sec=%d interval_nsec=%d oneshot=%i buf_size=%zu",
+ ctx->interval_sec, ctx->interval_nsec, ctx->oneshot, ctx->buf_size);
+
+ return 0;
+}
+
+static void delete_exec_wasi_config(struct flb_exec_wasi *ctx)
+{
+ if (!ctx) {
+ return;
+ }
+
+ flb_log_event_encoder_destroy(&ctx->log_encoder);
+
+ /* release buffer */
+ if (ctx->buf != NULL) {
+ flb_free(ctx->buf);
+ }
+
+ if (ctx->ch_manager[0] > -1) {
+ flb_pipe_close(ctx->ch_manager[0]);
+ }
+
+ if (ctx->ch_manager[1] > -1) {
+ flb_pipe_close(ctx->ch_manager[1]);
+ }
+
+ flb_free(ctx);
+}
+
+/* Initialize plugin */
+static int in_exec_wasi_init(struct flb_input_instance *in,
+ struct flb_config *config, void *data)
+{
+ struct flb_exec_wasi *ctx = NULL;
+ int ret = -1;
+
+ /* Allocate space for the configuration */
+ ctx = flb_malloc(sizeof(struct flb_exec_wasi));
+ if (!ctx) {
+ return -1;
+ }
+ ctx->parser = NULL;
+ ctx->parser_name = NULL;
+ ctx->wasm = NULL;
+ ctx->wasi_path = NULL;
+ ctx->oneshot = FLB_FALSE;
+
+ /* Initialize exec config */
+ ret = in_exec_wasi_config_read(ctx, in, config);
+ if (ret < 0) {
+ goto init_error;
+ }
+
+ flb_wasm_init(config);
+
+ ctx->buf = flb_malloc(ctx->buf_size);
+ if (ctx->buf == NULL) {
+ flb_plg_error(in, "could not allocate exec buffer");
+ goto init_error;
+ }
+
+ flb_input_set_context(in, ctx);
+
+ ctx->ch_manager[0] = -1;
+ ctx->ch_manager[1] = -1;
+
+ if (ctx->oneshot == FLB_TRUE) {
+ if (flb_pipe_create(ctx->ch_manager)) {
+ flb_plg_error(in, "could not create pipe for oneshot command");
+ goto init_error;
+ }
+
+ ret = flb_input_set_collector_event(in,
+ in_exec_wasi_collect,
+ ctx->ch_manager[0], config);
+ }
+ else {
+ ret = flb_input_set_collector_time(in,
+ in_exec_wasi_collect,
+ ctx->interval_sec,
+ ctx->interval_nsec, config);
+ }
+ if (ret < 0) {
+ flb_plg_error(in, "could not set collector for exec input plugin");
+ goto init_error;
+ }
+ ctx->coll_fd = ret;
+
+ return 0;
+
+ init_error:
+ delete_exec_wasi_config(ctx);
+
+ return -1;
+}
+
+static void in_exec_wasi_pause(void *data, struct flb_config *config)
+{
+ struct flb_exec_wasi *ctx = data;
+
+ flb_input_collector_pause(ctx->coll_fd, ctx->ins);
+}
+
+static void in_exec_wasi_resume(void *data, struct flb_config *config)
+{
+ struct flb_exec_wasi *ctx = data;
+
+ flb_input_collector_resume(ctx->coll_fd, ctx->ins);
+}
+
+static int in_exec_wasi_prerun(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ int ret;
+ uint64_t val = 0xc003; /* dummy constant */
+ struct flb_exec_wasi *ctx = in_context;
+ (void) ins;
+ (void) config;
+
+ if (ctx->oneshot == FLB_FALSE) {
+ return 0;
+ }
+
+ /* Kick the oneshot execution */
+ ret = flb_pipe_w(ctx->ch_manager[1], &val, sizeof(val));
+ if (ret == -1) {
+ flb_errno();
+ return -1;
+ }
+ return 0;
+}
+
+static int in_exec_wasi_exit(void *data, struct flb_config *config)
+{
+ struct flb_exec_wasi *ctx = data;
+
+ flb_wasm_destroy_all(config);
+ delete_exec_wasi_config(ctx);
+ return 0;
+}
+
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_STR, "wasi_path", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_exec_wasi, wasi_path),
+ "Set the path of WASM program to execute"
+ },
+ {
+ FLB_CONFIG_MAP_CLIST, "accessible_paths", ".",
+ 0, FLB_TRUE, offsetof(struct flb_exec_wasi, accessible_dir_list),
+ "Specifying paths to be accessible from a WASM program."
+ "Default value is current working directory"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "parser", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_exec_wasi, parser_name),
+ "Set a parser"
+ },
+ {
+ FLB_CONFIG_MAP_INT, "interval_sec", DEFAULT_INTERVAL_SEC,
+ 0, FLB_TRUE, offsetof(struct flb_exec_wasi, interval_sec),
+ "Set the collector interval"
+ },
+ {
+ FLB_CONFIG_MAP_INT, "interval_nsec", DEFAULT_INTERVAL_NSEC,
+ 0, FLB_TRUE, offsetof(struct flb_exec_wasi, interval_nsec),
+ "Set the collector interval (nanoseconds)"
+ },
+ {
+ FLB_CONFIG_MAP_SIZE, "buf_size", DEFAULT_BUF_SIZE,
+ 0, FLB_TRUE, offsetof(struct flb_exec_wasi, buf_size),
+ "Set the buffer size"
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "bool", "false",
+ 0, FLB_TRUE, offsetof(struct flb_exec_wasi, oneshot),
+ "execute the command only once"
+ },
+ /* EOF */
+ {0}
+};
+
+struct flb_input_plugin in_exec_wasi_plugin = {
+ .name = "exec_wasi",
+ .description = "Exec WASI Input",
+ .cb_init = in_exec_wasi_init,
+ .cb_pre_run = in_exec_wasi_prerun,
+ .cb_pause = in_exec_wasi_pause,
+ .cb_resume = in_exec_wasi_resume,
+ .cb_collect = in_exec_wasi_collect,
+ .cb_flush_buf = NULL,
+ .cb_exit = in_exec_wasi_exit,
+ .config_map = config_map
+};
diff --git a/src/fluent-bit/plugins/in_exec_wasi/in_exec_wasi.h b/src/fluent-bit/plugins/in_exec_wasi/in_exec_wasi.h
new file mode 100644
index 000000000..132407bb9
--- /dev/null
+++ b/src/fluent-bit/plugins/in_exec_wasi/in_exec_wasi.h
@@ -0,0 +1,55 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef FLB_IN_EXEC_WASI_H
+#define FLB_IN_EXEC_WASI_H
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_input.h>
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_input.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_parser.h>
+#include <fluent-bit/flb_log_event_encoder.h>
+#include <fluent-bit/wasm/flb_wasm.h>
+
+#include <msgpack.h>
+
+#define DEFAULT_BUF_SIZE "4096"
+#define DEFAULT_INTERVAL_SEC "1"
+#define DEFAULT_INTERVAL_NSEC "0"
+
+struct flb_exec_wasi {
+ flb_sds_t wasi_path;
+ struct mk_list *accessible_dir_list; /* list of directories to be
+ * accesible from WASM */
+ flb_sds_t parser_name;
+ struct flb_parser *parser;
+ char *buf;
+ size_t buf_size;
+ struct flb_input_instance *ins;
+ struct flb_wasm *wasm;
+ int oneshot;
+ flb_pipefd_t ch_manager[2];
+ int interval_sec;
+ int interval_nsec;
+ struct flb_log_event_encoder log_encoder;
+ int coll_fd;
+};
+
+#endif /* FLB_IN_EXEC_WASI_H */
diff --git a/src/fluent-bit/plugins/in_fluentbit_metrics/CMakeLists.txt b/src/fluent-bit/plugins/in_fluentbit_metrics/CMakeLists.txt
new file mode 100644
index 000000000..299e05b58
--- /dev/null
+++ b/src/fluent-bit/plugins/in_fluentbit_metrics/CMakeLists.txt
@@ -0,0 +1,5 @@
+set(src
+ metrics.c
+ )
+
+FLB_PLUGIN(in_fluentbit_metrics "${src}" "")
diff --git a/src/fluent-bit/plugins/in_fluentbit_metrics/metrics.c b/src/fluent-bit/plugins/in_fluentbit_metrics/metrics.c
new file mode 100644
index 000000000..779a03651
--- /dev/null
+++ b/src/fluent-bit/plugins/in_fluentbit_metrics/metrics.c
@@ -0,0 +1,201 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_metrics.h>
+#include <fluent-bit/flb_metrics_exporter.h>
+
+struct flb_in_metrics {
+ /* config map options */
+ int scrape_on_start;
+ int scrape_interval;
+
+ /* internal */
+ int coll_fd_start;
+ int coll_fd_runtime;
+ struct cmt_counter *c;
+ struct flb_input_instance *ins;
+};
+
+static int scrape_metrics(struct flb_config *config, struct flb_in_metrics *ctx)
+{
+ int ret;
+ size_t ts;
+ char *name;
+ struct cmt *cmt;
+
+ /* Update internal metric */
+ ts = cfl_time_now();
+ name = (char *) flb_input_name(ctx->ins);
+ cmt_counter_inc(ctx->c, ts, 1, (char *[]) {name});
+
+
+ cmt = flb_me_get_cmetrics(config);
+ if (!cmt) {
+ flb_plg_error(ctx->ins, "could not scrape metrics");
+ return 0;
+ }
+
+ /* Append the updated metrics */
+ ret = flb_input_metrics_append(ctx->ins, NULL, 0, cmt);
+ if (ret != 0) {
+ flb_plg_error(ctx->ins, "could not append metrics");
+ }
+ cmt_destroy(cmt);
+
+ return 0;
+}
+
+/*
+ * Update the metrics, this function is invoked every time 'scrape_interval'
+ * expires.
+ */
+static int cb_metrics_collect_runtime(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ return scrape_metrics(config, in_context);
+}
+
+static int cb_metrics_collect_start(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ struct flb_in_metrics *ctx = in_context;
+
+ /* pause collector */
+ flb_input_collector_pause(ctx->coll_fd_start, ctx->ins);
+
+ return scrape_metrics(config, ctx);
+}
+
+static int in_metrics_init(struct flb_input_instance *in,
+ struct flb_config *config, void *data)
+{
+ int ret;
+ struct flb_in_metrics *ctx;
+
+ /* Create plugin context */
+ ctx = flb_calloc(1, sizeof(struct flb_in_metrics));
+ if (!ctx) {
+ flb_errno();
+ return -1;
+ }
+ ctx->ins = in;
+
+ /* Load the config map */
+ ret = flb_input_config_map_set(in, (void *) ctx);
+ if (ret == -1) {
+ flb_free(ctx);
+ return -1;
+ }
+
+ /* Associate context with the instance */
+ flb_input_set_context(in, ctx);
+
+ /* Scrape metrics on start / collector */
+ if (ctx->scrape_interval > 2 && ctx->scrape_on_start) {
+ ret = flb_input_set_collector_time(in,
+ cb_metrics_collect_start,
+ 5, 0,
+ config);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins,
+ "could not set collector on start for Fluent Bit "
+ "metrics plugin");
+ return -1;
+ }
+ ctx->coll_fd_start = ret;
+ }
+
+ /* Create the runtime collector */
+ ret = flb_input_set_collector_time(in,
+ cb_metrics_collect_runtime,
+ ctx->scrape_interval, 0,
+ config);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins,
+ "could not set collector for Fluent Bit metrics plugin");
+ return -1;
+ }
+ ctx->coll_fd_runtime = ret;
+
+ /* Internal metrics */
+ ctx->c = cmt_counter_create(ctx->ins->cmt,
+ "fluentbit", "input_metrics", "scrapes_total",
+ "Number of total metrics scrapes",
+ 1, (char *[]) {"name"});
+ return 0;
+}
+
+static int in_metrics_exit(void *data, struct flb_config *config)
+{
+ struct flb_in_metrics *ctx = data;
+
+ if (!ctx) {
+ return 0;
+ }
+
+ flb_free(ctx);
+ return 0;
+}
+
+static void in_metrics_pause(void *data, struct flb_config *config)
+{
+ struct flb_in_metrics *ctx = data;
+
+ flb_input_collector_pause(ctx->coll_fd_runtime, ctx->ins);
+}
+
+static void in_metrics_resume(void *data, struct flb_config *config)
+{
+ struct flb_in_metrics *ctx = data;
+
+ flb_input_collector_resume(ctx->coll_fd_runtime, ctx->ins);
+}
+
+/* Configuration properties map */
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_TIME, "scrape_interval", "2",
+ 0, FLB_TRUE, offsetof(struct flb_in_metrics, scrape_interval),
+ "scrape interval to collect the internal metrics of Fluent Bit."
+ },
+
+ {
+ FLB_CONFIG_MAP_BOOL, "scrape_on_start", "false",
+ 0, FLB_TRUE, offsetof(struct flb_in_metrics, scrape_on_start),
+ "scrape metrics upon start, useful to avoid waiting for 'scrape_interval' "
+ "for the first round of metrics."
+ },
+
+ /* EOF */
+ {0}
+};
+
+struct flb_input_plugin in_fluentbit_metrics_plugin = {
+ .name = "fluentbit_metrics",
+ .description = "Fluent Bit internal metrics",
+ .cb_init = in_metrics_init,
+ .cb_pre_run = NULL,
+ .cb_flush_buf = NULL,
+ .config_map = config_map,
+ .cb_pause = in_metrics_pause,
+ .cb_resume = in_metrics_resume,
+ .cb_exit = in_metrics_exit,
+};
diff --git a/src/fluent-bit/plugins/in_forward/CMakeLists.txt b/src/fluent-bit/plugins/in_forward/CMakeLists.txt
new file mode 100644
index 000000000..ce4f62728
--- /dev/null
+++ b/src/fluent-bit/plugins/in_forward/CMakeLists.txt
@@ -0,0 +1,7 @@
+set(src
+ fw.c
+ fw_conn.c
+ fw_prot.c
+ fw_config.c)
+
+FLB_PLUGIN(in_forward "${src}" "")
diff --git a/src/fluent-bit/plugins/in_forward/fw.c b/src/fluent-bit/plugins/in_forward/fw.c
new file mode 100644
index 000000000..b85b198c6
--- /dev/null
+++ b/src/fluent-bit/plugins/in_forward/fw.c
@@ -0,0 +1,325 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_input.h>
+#include <fluent-bit/flb_engine.h>
+#include <fluent-bit/flb_downstream.h>
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_network.h>
+#include <msgpack.h>
+
+#ifdef FLB_HAVE_UNIX_SOCKET
+#include <sys/socket.h>
+#include <sys/un.h>
+#include <sys/stat.h>
+#endif
+
+#include "fw.h"
+#include "fw_conn.h"
+#include "fw_config.h"
+
+#ifdef FLB_HAVE_UNIX_SOCKET
+static int remove_existing_socket_file(char *socket_path)
+{
+ struct stat file_data;
+ int result;
+
+ result = stat(socket_path, &file_data);
+
+ if (result == -1) {
+ if (errno == ENOENT) {
+ return 0;
+ }
+
+ flb_errno();
+
+ return -1;
+ }
+
+ if (S_ISSOCK(file_data.st_mode) == 0) {
+ return -2;
+ }
+
+ result = unlink(socket_path);
+
+ if (result != 0) {
+ return -3;
+ }
+
+ return 0;
+}
+
+static int fw_unix_create(struct flb_in_fw_config *ctx)
+{
+ int ret;
+
+ ret = remove_existing_socket_file(ctx->unix_path);
+
+ if (ret != 0) {
+ if (ret == -2) {
+ flb_plg_error(ctx->ins,
+ "%s exists and it is not a unix socket. Aborting",
+ ctx->unix_path);
+ }
+ else {
+ flb_plg_error(ctx->ins,
+ "could not remove existing unix socket %s. Aborting",
+ ctx->unix_path);
+ }
+
+ return -1;
+ }
+
+ ctx->downstream = flb_downstream_create(FLB_TRANSPORT_UNIX_STREAM,
+ ctx->ins->flags,
+ ctx->unix_path,
+ 0,
+ ctx->ins->tls,
+ ctx->ins->config,
+ &ctx->ins->net_setup);
+
+ if (ctx->downstream == NULL) {
+ return -1;
+ }
+
+ if (ctx->unix_perm_str) {
+ if (chmod(ctx->unix_path, ctx->unix_perm)) {
+ flb_errno();
+
+ flb_plg_error(ctx->ins, "cannot set permission on '%s' to %04o",
+ ctx->unix_path, ctx->unix_perm);
+
+ return -1;
+ }
+ }
+
+ return 0;
+}
+#endif
+
+/*
+ * For a server event, the collection event means a new client have arrived, we
+ * accept the connection and create a new FW instance which will wait for
+ * MessagePack records.
+ */
+static int in_fw_collect(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ struct flb_connection *connection;
+ struct fw_conn *conn;
+ struct flb_in_fw_config *ctx;
+
+ ctx = in_context;
+
+ connection = flb_downstream_conn_get(ctx->downstream);
+
+ if (connection == NULL) {
+ flb_plg_error(ctx->ins, "could not accept new connection");
+
+ return -1;
+ }
+
+ if (!config->is_ingestion_active) {
+ flb_downstream_conn_release(connection);
+
+ return -1;
+ }
+
+ flb_plg_trace(ins, "new TCP connection arrived FD=%i", connection->fd);
+
+ conn = fw_conn_add(connection, ctx);
+
+ if (!conn) {
+ return -1;
+ }
+
+ return 0;
+}
+
+/* Initialize plugin */
+static int in_fw_init(struct flb_input_instance *ins,
+ struct flb_config *config, void *data)
+{
+ unsigned short int port;
+ int ret;
+ struct flb_in_fw_config *ctx;
+
+ (void) data;
+
+ /* Allocate space for the configuration */
+ ctx = fw_config_init(ins);
+ if (!ctx) {
+ return -1;
+ }
+
+ ctx->coll_fd = -1;
+ ctx->ins = ins;
+ mk_list_init(&ctx->connections);
+
+ /* Set the context */
+ flb_input_set_context(ins, ctx);
+
+ /* Unix Socket mode */
+ if (ctx->unix_path) {
+#ifndef FLB_HAVE_UNIX_SOCKET
+ flb_plg_error(ctx->ins, "unix address is not supported %s:%s. Aborting",
+ ctx->listen, ctx->tcp_port);
+ fw_config_destroy(ctx);
+ return -1;
+#else
+ ret = fw_unix_create(ctx);
+ if (ret != 0) {
+ flb_plg_error(ctx->ins, "could not listen on unix://%s",
+ ctx->unix_path);
+ fw_config_destroy(ctx);
+ return -1;
+ }
+ flb_plg_info(ctx->ins, "listening on unix://%s", ctx->unix_path);
+#endif
+ }
+ else {
+ port = (unsigned short int) strtoul(ctx->tcp_port, NULL, 10);
+
+ ctx->downstream = flb_downstream_create(FLB_TRANSPORT_TCP,
+ ctx->ins->flags,
+ ctx->listen,
+ port,
+ ctx->ins->tls,
+ config,
+ &ctx->ins->net_setup);
+
+ if (ctx->downstream == NULL) {
+ flb_plg_error(ctx->ins,
+ "could not initialize downstream on unix://%s. Aborting",
+ ctx->listen);
+
+ fw_config_destroy(ctx);
+
+ return -1;
+ }
+
+ if (ctx->downstream != NULL) {
+ flb_plg_info(ctx->ins, "listening on %s:%s",
+ ctx->listen, ctx->tcp_port);
+ }
+ else {
+ flb_plg_error(ctx->ins, "could not bind address %s:%s. Aborting",
+ ctx->listen, ctx->tcp_port);
+
+ fw_config_destroy(ctx);
+
+ return -1;
+ }
+ }
+
+ flb_input_downstream_set(ctx->downstream, ctx->ins);
+
+ flb_net_socket_nonblocking(ctx->downstream->server_fd);
+
+ /* Collect upon data available on the standard input */
+ ret = flb_input_set_collector_socket(ins,
+ in_fw_collect,
+ ctx->downstream->server_fd,
+ config);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "could not set server socket collector");
+ fw_config_destroy(ctx);
+ return -1;
+ }
+
+ ctx->coll_fd = ret;
+
+ return 0;
+}
+
+static void in_fw_pause(void *data, struct flb_config *config)
+{
+ struct flb_in_fw_config *ctx = data;
+
+ /*
+ * If the plugin is paused AND the ingestion not longer active,
+ * it means we are in a shutdown phase. This plugin can safetly
+ * close the socket server collector.
+ *
+ * This socket stop is a workaround since the server API will be
+ * refactored shortly.
+ */
+ if (config->is_ingestion_active == FLB_FALSE) {
+ fw_conn_del_all(ctx);
+ }
+}
+
+static int in_fw_exit(void *data, struct flb_config *config)
+{
+ (void) *config;
+ struct flb_in_fw_config *ctx = data;
+
+ if (!ctx) {
+ return 0;
+ }
+
+ fw_conn_del_all(ctx);
+ fw_config_destroy(ctx);
+ return 0;
+}
+
+/* Configuration properties map */
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_STR, "tag_prefix", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_in_fw_config, tag_prefix),
+ "Prefix incoming tag with the defined value."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "unix_path", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_in_fw_config, unix_path),
+ "The path to unix socket to receive a Forward message."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "unix_perm", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_in_fw_config, unix_perm_str),
+ "Set the permissions for the UNIX socket"
+ },
+ {
+ FLB_CONFIG_MAP_SIZE, "buffer_chunk_size", FLB_IN_FW_CHUNK_SIZE,
+ 0, FLB_TRUE, offsetof(struct flb_in_fw_config, buffer_chunk_size),
+ "The buffer memory size used to receive a Forward message."
+ },
+ {
+ FLB_CONFIG_MAP_SIZE, "buffer_max_size", FLB_IN_FW_CHUNK_MAX_SIZE,
+ 0, FLB_TRUE, offsetof(struct flb_in_fw_config, buffer_max_size),
+ "The maximum buffer memory size used to receive a Forward message."
+ },
+ {0}
+};
+
+/* Plugin reference */
+struct flb_input_plugin in_forward_plugin = {
+ .name = "forward",
+ .description = "Fluentd in-forward",
+ .cb_init = in_fw_init,
+ .cb_pre_run = NULL,
+ .cb_collect = in_fw_collect,
+ .cb_flush_buf = NULL,
+ .cb_pause = in_fw_pause,
+ .cb_exit = in_fw_exit,
+ .config_map = config_map,
+ .flags = FLB_INPUT_NET_SERVER | FLB_IO_OPT_TLS
+};
diff --git a/src/fluent-bit/plugins/in_forward/fw.h b/src/fluent-bit/plugins/in_forward/fw.h
new file mode 100644
index 000000000..454f255b9
--- /dev/null
+++ b/src/fluent-bit/plugins/in_forward/fw.h
@@ -0,0 +1,52 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_FW_H
+#define FLB_IN_FW_H
+
+#include <msgpack.h>
+#include <fluent-bit/flb_input.h>
+#include <fluent-bit/flb_log_event_decoder.h>
+#include <fluent-bit/flb_log_event_encoder.h>
+
+struct flb_in_fw_config {
+ size_t buffer_max_size; /* Max Buffer size */
+ size_t buffer_chunk_size; /* Chunk allocation size */
+
+ /* Network */
+ char *listen; /* Listen interface */
+ char *tcp_port; /* TCP Port */
+
+ flb_sds_t tag_prefix; /* tag prefix */
+
+ /* Unix Socket */
+ char *unix_path; /* Unix path for socket */
+ unsigned int unix_perm; /* Permission for socket */
+ flb_sds_t unix_perm_str; /* Permission (config map) */
+
+ int coll_fd;
+ struct flb_downstream *downstream; /* Client manager */
+ struct mk_list connections; /* List of active connections */
+ struct flb_input_instance *ins; /* Input plugin instace */
+
+ struct flb_log_event_decoder *log_decoder;
+ struct flb_log_event_encoder *log_encoder;
+};
+
+#endif
diff --git a/src/fluent-bit/plugins/in_forward/fw_config.c b/src/fluent-bit/plugins/in_forward/fw_config.c
new file mode 100644
index 000000000..7edbab0c7
--- /dev/null
+++ b/src/fluent-bit/plugins/in_forward/fw_config.c
@@ -0,0 +1,120 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdlib.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_downstream.h>
+#include <fluent-bit/flb_input_plugin.h>
+
+#include "fw.h"
+#include "fw_conn.h"
+#include "fw_config.h"
+
+struct flb_in_fw_config *fw_config_init(struct flb_input_instance *i_ins)
+{
+ char tmp[16];
+ int ret = -1;
+ const char *p;
+ struct flb_in_fw_config *config;
+
+ config = flb_calloc(1, sizeof(struct flb_in_fw_config));
+ if (!config) {
+ flb_errno();
+ return NULL;
+ }
+ config->coll_fd = -1;
+
+ config->log_encoder = flb_log_event_encoder_create(FLB_LOG_EVENT_FORMAT_DEFAULT);
+
+ if (config->log_encoder == NULL) {
+ flb_plg_error(i_ins, "could not initialize event encoder");
+ fw_config_destroy(config);
+
+ return NULL;
+ }
+
+ config->log_decoder = flb_log_event_decoder_create(NULL, 0);
+
+ if (config->log_decoder == NULL) {
+ flb_plg_error(i_ins, "could not initialize event decoder");
+ fw_config_destroy(config);
+
+ return NULL;
+ }
+
+ ret = flb_input_config_map_set(i_ins, (void *)config);
+ if (ret == -1) {
+ flb_plg_error(i_ins, "config map set error");
+ flb_free(config);
+ return NULL;
+ }
+
+ p = flb_input_get_property("unix_path", i_ins);
+ if (p == NULL) {
+ /* Listen interface (if not set, defaults to 0.0.0.0:24224) */
+ flb_input_net_default_listener("0.0.0.0", 24224, i_ins);
+ config->listen = i_ins->host.listen;
+ snprintf(tmp, sizeof(tmp) - 1, "%d", i_ins->host.port);
+ config->tcp_port = flb_strdup(tmp);
+ }
+ else {
+ /* Unix socket mode */
+ if (config->unix_perm_str) {
+ config->unix_perm = strtol(config->unix_perm_str, NULL, 8) & 07777;
+ }
+ }
+
+ if (!config->unix_path) {
+ flb_debug("[in_fw] Listen='%s' TCP_Port=%s",
+ config->listen, config->tcp_port);
+ }
+ return config;
+}
+
+int fw_config_destroy(struct flb_in_fw_config *config)
+{
+ if (config->log_encoder != NULL) {
+ flb_log_event_encoder_destroy(config->log_encoder);
+ }
+
+ if (config->log_decoder != NULL) {
+ flb_log_event_decoder_destroy(config->log_decoder);
+ }
+
+ if (config->coll_fd != -1) {
+ flb_input_collector_delete(config->coll_fd, config->ins);
+
+ config->coll_fd = -1;
+ }
+
+ if (config->downstream != NULL) {
+ flb_downstream_destroy(config->downstream);
+ }
+
+ if (config->unix_path) {
+ unlink(config->unix_path);
+ }
+ else {
+ flb_free(config->tcp_port);
+ }
+
+ flb_free(config);
+
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/in_forward/fw_config.h b/src/fluent-bit/plugins/in_forward/fw_config.h
new file mode 100644
index 000000000..bbad17610
--- /dev/null
+++ b/src/fluent-bit/plugins/in_forward/fw_config.h
@@ -0,0 +1,28 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_FW_CONFIG_H
+#define FLB_IN_FW_CONFIG_H
+
+#include "fw.h"
+
+struct flb_in_fw_config *fw_config_init(struct flb_input_instance *i_ins);
+int fw_config_destroy(struct flb_in_fw_config *config);
+
+#endif
diff --git a/src/fluent-bit/plugins/in_forward/fw_conn.c b/src/fluent-bit/plugins/in_forward/fw_conn.c
new file mode 100644
index 000000000..3ccd98c24
--- /dev/null
+++ b/src/fluent-bit/plugins/in_forward/fw_conn.c
@@ -0,0 +1,199 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_engine.h>
+#include <fluent-bit/flb_network.h>
+#include <fluent-bit/flb_downstream.h>
+
+#include "fw.h"
+#include "fw_prot.h"
+#include "fw_conn.h"
+
+/* Callback invoked every time an event is triggered for a connection */
+int fw_conn_event(void *data)
+{
+ int ret;
+ int bytes;
+ int available;
+ int size;
+ char *tmp;
+ struct fw_conn *conn;
+ struct mk_event *event;
+ struct flb_in_fw_config *ctx;
+ struct flb_connection *connection;
+
+ connection = (struct flb_connection *) data;
+
+ conn = connection->user_data;
+
+ ctx = conn->ctx;
+
+ event = &connection->event;
+
+
+ if (event->mask & MK_EVENT_READ) {
+ available = (conn->buf_size - conn->buf_len);
+ if (available < 1) {
+ if (conn->buf_size >= ctx->buffer_max_size) {
+ flb_plg_warn(ctx->ins, "fd=%i incoming data exceed limit (%lu bytes)",
+ event->fd, (ctx->buffer_max_size));
+ fw_conn_del(conn);
+ return -1;
+ }
+ else if (conn->buf_size + ctx->buffer_chunk_size > ctx->buffer_max_size) {
+ /* no space to add buffer_chunk_size */
+ /* set maximum size */
+ size = ctx->buffer_max_size;
+ }
+ else {
+ size = conn->buf_size + ctx->buffer_chunk_size;
+ }
+ tmp = flb_realloc(conn->buf, size);
+ if (!tmp) {
+ flb_errno();
+ return -1;
+ }
+ flb_plg_trace(ctx->ins, "fd=%i buffer realloc %i -> %i",
+ event->fd, conn->buf_size, size);
+
+ conn->buf = tmp;
+ conn->buf_size = size;
+ available = (conn->buf_size - conn->buf_len);
+ }
+
+ bytes = flb_io_net_read(connection,
+ (void *) &conn->buf[conn->buf_len],
+ available);
+
+ if (bytes > 0) {
+ flb_plg_trace(ctx->ins, "read()=%i pre_len=%i now_len=%i",
+ bytes, conn->buf_len, conn->buf_len + bytes);
+ conn->buf_len += bytes;
+
+ ret = fw_prot_process(ctx->ins, conn);
+ if (ret == -1) {
+ fw_conn_del(conn);
+ return -1;
+ }
+ return bytes;
+ }
+ else {
+ flb_plg_trace(ctx->ins, "fd=%i closed connection", event->fd);
+ fw_conn_del(conn);
+ return -1;
+ }
+ }
+
+ if (event->mask & MK_EVENT_CLOSE) {
+ flb_plg_trace(ctx->ins, "fd=%i hangup", event->fd);
+ fw_conn_del(conn);
+ return -1;
+ }
+ return 0;
+}
+
+/* Create a new Forward request instance */
+struct fw_conn *fw_conn_add(struct flb_connection *connection, struct flb_in_fw_config *ctx)
+{
+ struct fw_conn *conn;
+ int ret;
+
+ conn = flb_malloc(sizeof(struct fw_conn));
+ if (!conn) {
+ flb_errno();
+
+ return NULL;
+ }
+
+ conn->connection = connection;
+
+ /* Set data for the event-loop */
+ connection->user_data = conn;
+ connection->event.type = FLB_ENGINE_EV_CUSTOM;
+ connection->event.handler = fw_conn_event;
+
+ /* Connection info */
+ conn->ctx = ctx;
+ conn->buf_len = 0;
+ conn->rest = 0;
+ conn->status = FW_NEW;
+
+ /* Allocate read buffer */
+ conn->buf = flb_malloc(ctx->buffer_chunk_size);
+ if (!conn->buf) {
+ flb_errno();
+ flb_free(conn);
+
+ return NULL;
+ }
+ conn->buf_size = ctx->buffer_chunk_size;
+ conn->in = ctx->ins;
+
+ /* Register instance into the event loop */
+ ret = mk_event_add(flb_engine_evl_get(),
+ connection->fd,
+ FLB_ENGINE_EV_CUSTOM,
+ MK_EVENT_READ,
+ &connection->event);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "could not register new connection");
+
+ flb_free(conn->buf);
+ flb_free(conn);
+
+ return NULL;
+ }
+
+ mk_list_add(&conn->_head, &ctx->connections);
+
+ return conn;
+}
+
+int fw_conn_del(struct fw_conn *conn)
+{
+ /* The downstream unregisters the file descriptor from the event-loop
+ * so there's nothing to be done by the plugin
+ */
+ flb_downstream_conn_release(conn->connection);
+
+ /* Release resources */
+ mk_list_del(&conn->_head);
+
+ flb_free(conn->buf);
+ flb_free(conn);
+
+ return 0;
+}
+
+int fw_conn_del_all(struct flb_in_fw_config *ctx)
+{
+ struct mk_list *tmp;
+ struct mk_list *head;
+ struct fw_conn *conn;
+
+ mk_list_foreach_safe(head, tmp, &ctx->connections) {
+ conn = mk_list_entry(head, struct fw_conn, _head);
+ fw_conn_del(conn);
+ }
+
+ return 0;
+} \ No newline at end of file
diff --git a/src/fluent-bit/plugins/in_forward/fw_conn.h b/src/fluent-bit/plugins/in_forward/fw_conn.h
new file mode 100644
index 000000000..4c04d9400
--- /dev/null
+++ b/src/fluent-bit/plugins/in_forward/fw_conn.h
@@ -0,0 +1,57 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_FW_CONN_H
+#define FLB_IN_FW_CONN_H
+
+#define FLB_IN_FW_CHUNK_SIZE "1024000" /* 1MB */
+#define FLB_IN_FW_CHUNK_MAX_SIZE "6144000" /* =FLB_IN_FW_CHUNK_SIZE * 6. 6MB */
+
+enum {
+ FW_NEW = 1, /* it's a new connection */
+ FW_CONNECTED = 2, /* MQTT connection per protocol spec OK */
+};
+
+struct fw_conn_stream {
+ char *tag;
+ size_t tag_len;
+};
+
+/* Respresents a connection */
+struct fw_conn {
+ int status; /* Connection status */
+
+ /* Buffer */
+ char *buf; /* Buffer data */
+ int buf_len; /* Data length */
+ int buf_size; /* Buffer size */
+ size_t rest; /* Unpacking offset */
+
+ struct flb_input_instance *in; /* Parent plugin instance */
+ struct flb_in_fw_config *ctx; /* Plugin configuration context */
+ struct flb_connection *connection;
+
+ struct mk_list _head;
+};
+
+struct fw_conn *fw_conn_add(struct flb_connection *connection, struct flb_in_fw_config *ctx);
+int fw_conn_del(struct fw_conn *conn);
+int fw_conn_del_all(struct flb_in_fw_config *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/in_forward/fw_prot.c b/src/fluent-bit/plugins/in_forward/fw_prot.c
new file mode 100644
index 000000000..2a23b6254
--- /dev/null
+++ b/src/fluent-bit/plugins/in_forward/fw_prot.c
@@ -0,0 +1,846 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_input.h>
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_gzip.h>
+
+#include <fluent-bit/flb_input_metric.h>
+#include <fluent-bit/flb_input_trace.h>
+
+#include <cmetrics/cmetrics.h>
+#include <cmetrics/cmt_decode_msgpack.h>
+
+#include <ctraces/ctraces.h>
+#include <ctraces/ctr_decode_msgpack.h>
+
+#include <msgpack.h>
+
+#include "fw.h"
+#include "fw_prot.h"
+#include "fw_conn.h"
+
+/* Try parsing rounds up-to 32 bytes */
+#define EACH_RECV_SIZE 32
+
+static int get_chunk_event_type(struct flb_input_instance *ins, msgpack_object options)
+{
+ int i;
+ int type = FLB_EVENT_TYPE_LOGS;
+ msgpack_object k;
+ msgpack_object v;
+
+ if (options.type != MSGPACK_OBJECT_MAP) {
+ flb_plg_error(ins, "invalid options field in record");
+ return -1;
+ }
+
+ for (i = 0; i < options.via.map.size; i++) {
+ k = options.via.map.ptr[i].key;
+ v = options.via.map.ptr[i].val;
+
+ if (k.type != MSGPACK_OBJECT_STR) {
+ return -1;
+ }
+
+ if (k.via.str.size != 13) {
+ continue;
+ }
+
+ if (strncmp(k.via.str.ptr, "fluent_signal", 13) == 0) {
+ if (v.type != MSGPACK_OBJECT_POSITIVE_INTEGER) {
+ flb_plg_error(ins, "invalid value type in options fluent_signal");
+ return -1;
+ }
+
+ if (v.via.i64 != FLB_EVENT_TYPE_LOGS && v.via.i64 != FLB_EVENT_TYPE_METRICS && v.via.i64 != FLB_EVENT_TYPE_TRACES) {
+ flb_plg_error(ins, "invalid value in options fluent_signal");
+ return -1;
+ }
+
+ /* cast should be fine */
+ type = (int) v.via.i64;
+ break;
+ }
+ }
+
+ return type;
+}
+
+static int is_gzip_compressed(msgpack_object options)
+{
+ int i;
+ msgpack_object k;
+ msgpack_object v;
+
+ if (options.type != MSGPACK_OBJECT_MAP) {
+ return -1;
+ }
+
+
+ for (i = 0; i < options.via.map.size; i++) {
+ k = options.via.map.ptr[i].key;
+ v = options.via.map.ptr[i].val;
+
+ if (k.type != MSGPACK_OBJECT_STR) {
+ return -1;
+ }
+
+ if (k.via.str.size != 10) {
+ continue;
+ }
+
+ if (strncmp(k.via.str.ptr, "compressed", 10) == 0) {
+ if (v.type != MSGPACK_OBJECT_STR) {
+ return -1;
+ }
+
+ if (v.via.str.size != 4) {
+ return -1;
+ }
+
+ if (strncmp(v.via.str.ptr, "gzip", 4) == 0) {
+ return FLB_TRUE;
+ }
+ else if (strncmp(v.via.str.ptr, "text", 4) == 0) {
+ return FLB_FALSE;
+ }
+
+ return -1;
+ }
+ }
+
+ return FLB_FALSE;
+}
+
+static int send_ack(struct flb_input_instance *in, struct fw_conn *conn,
+ msgpack_object chunk)
+{
+ int result;
+ size_t sent;
+ ssize_t bytes;
+ msgpack_packer mp_pck;
+ msgpack_sbuffer mp_sbuf;
+
+ msgpack_sbuffer_init(&mp_sbuf);
+ msgpack_packer_init(&mp_pck, &mp_sbuf, msgpack_sbuffer_write);
+
+ msgpack_pack_map(&mp_pck, 1);
+ msgpack_pack_str(&mp_pck, 3);
+ msgpack_pack_str_body(&mp_pck, "ack", 3);
+ msgpack_pack_object(&mp_pck, chunk);
+
+
+ bytes = flb_io_net_write(conn->connection,
+ (void *) mp_sbuf.data,
+ mp_sbuf.size,
+ &sent);
+
+ msgpack_sbuffer_destroy(&mp_sbuf);
+
+ if (bytes == -1) {
+ flb_plg_error(in, "cannot send ACK response: %.*s",
+ chunk.via.str.size, chunk.via.str.ptr);
+
+ result = -1;
+ }
+ else {
+ result = 0;
+ }
+
+ return result;
+
+}
+
+static size_t get_options_metadata(msgpack_object *arr, int expected, size_t *idx)
+{
+ size_t i;
+ msgpack_object *options;
+ msgpack_object k;
+ msgpack_object v;
+
+ if (arr->type != MSGPACK_OBJECT_ARRAY) {
+ return -1;
+ }
+
+ /* Make sure the 'expected' entry position is valid for the array size */
+ if (expected >= arr->via.array.size) {
+ return 0;
+ }
+
+ options = &arr->via.array.ptr[expected];
+ if (options->type == MSGPACK_OBJECT_NIL) {
+ /*
+ * Old Docker 18.x sends a NULL options parameter, just be friendly and
+ * let it pass.
+ */
+ return 0;
+ }
+
+ if (options->type != MSGPACK_OBJECT_MAP) {
+ return -1;
+ }
+
+ if (options->via.map.size <= 0) {
+ return 0;
+ }
+
+ for (i = 0; i < options->via.map.size; i++) {
+ k = options->via.map.ptr[i].key;
+ v = options->via.map.ptr[i].val;
+
+ if (k.type != MSGPACK_OBJECT_STR) {
+ continue;
+ }
+
+ if (k.via.str.size != 8) {
+ continue;
+ }
+
+ if (strncmp(k.via.str.ptr, "metadata", 8) != 0) {
+ continue;
+ }
+
+ if (v.type != MSGPACK_OBJECT_MAP) {
+ return -1;
+ }
+
+ *idx = i;
+
+ return 0;
+ }
+
+ return 0;
+}
+
+static size_t get_options_chunk(msgpack_object *arr, int expected, size_t *idx)
+{
+ size_t i;
+ msgpack_object *options;
+ msgpack_object k;
+ msgpack_object v;
+
+ if (arr->type != MSGPACK_OBJECT_ARRAY) {
+ return -1;
+ }
+
+ /* Make sure the 'expected' entry position is valid for the array size */
+ if (expected >= arr->via.array.size) {
+ return 0;
+ }
+
+ options = &arr->via.array.ptr[expected];
+ if (options->type == MSGPACK_OBJECT_NIL) {
+ /*
+ * Old Docker 18.x sends a NULL options parameter, just be friendly and
+ * let it pass.
+ */
+ return 0;
+ }
+
+ if (options->type != MSGPACK_OBJECT_MAP) {
+ return -1;
+ }
+
+ if (options->via.map.size <= 0) {
+ return 0;
+ }
+
+ for (i = 0; i < options->via.map.size; i++) {
+ k = options->via.map.ptr[i].key;
+ v = options->via.map.ptr[i].val;
+
+ if (k.type != MSGPACK_OBJECT_STR) {
+ continue;
+ }
+
+ if (k.via.str.size != 5) {
+ continue;
+ }
+
+ if (strncmp(k.via.str.ptr, "chunk", 5) != 0) {
+ continue;
+ }
+
+ if (v.type != MSGPACK_OBJECT_STR) {
+ return -1;
+ }
+
+ *idx = i;
+ return 0;
+ }
+
+ return 0;
+}
+
+static int fw_process_forward_mode_entry(
+ struct fw_conn *conn,
+ const char *tag, int tag_len,
+ msgpack_object *entry,
+ int chunk_id)
+{
+ int result;
+ struct flb_log_event event;
+
+ result = flb_event_decoder_decode_object(conn->ctx->log_decoder,
+ &event, entry);
+
+ if (result == FLB_EVENT_DECODER_SUCCESS) {
+ result = flb_log_event_encoder_begin_record(conn->ctx->log_encoder);
+ }
+
+ if (result == FLB_EVENT_ENCODER_SUCCESS) {
+ result = flb_log_event_encoder_set_timestamp(conn->ctx->log_encoder,
+ &event.timestamp);
+ }
+
+ if (result == FLB_EVENT_ENCODER_SUCCESS) {
+ result = flb_log_event_encoder_set_metadata_from_msgpack_object(
+ conn->ctx->log_encoder,
+ event.metadata);
+ }
+
+ if (result == FLB_EVENT_ENCODER_SUCCESS) {
+ result = flb_log_event_encoder_set_body_from_msgpack_object(
+ conn->ctx->log_encoder,
+ event.body);
+ }
+
+ if (result == FLB_EVENT_ENCODER_SUCCESS) {
+ result = flb_log_event_encoder_commit_record(conn->ctx->log_encoder);
+ }
+
+ if (result == FLB_EVENT_ENCODER_SUCCESS) {
+ flb_input_log_append(conn->ctx->ins, tag, tag_len,
+ conn->ctx->log_encoder->output_buffer,
+ conn->ctx->log_encoder->output_length);
+ }
+
+ flb_log_event_encoder_reset(conn->ctx->log_encoder);
+
+ if (result != FLB_EVENT_ENCODER_SUCCESS) {
+ flb_plg_warn(conn->ctx->ins, "Event decoder failure : %d", result);
+
+ return -1;
+ }
+
+ return 0;
+}
+
+static int fw_process_message_mode_entry(
+ struct flb_input_instance *in,
+ struct fw_conn *conn,
+ const char *tag, int tag_len,
+ msgpack_object *root,
+ msgpack_object *ts,
+ msgpack_object *body,
+ int chunk_id, int metadata_id)
+{
+ struct flb_time timestamp;
+ msgpack_object *metadata;
+ msgpack_object options;
+ int result;
+ msgpack_object chunk;
+
+ metadata = NULL;
+
+ if (chunk_id != -1 || metadata_id != -1) {
+ options = root->via.array.ptr[3];
+
+ if (metadata_id != -1) {
+ metadata = &options.via.map.ptr[metadata_id].val;
+ }
+ }
+
+ result = flb_log_event_decoder_decode_timestamp(ts, &timestamp);
+
+ if (result == FLB_EVENT_ENCODER_SUCCESS) {
+ result = flb_log_event_encoder_begin_record(conn->ctx->log_encoder);
+ }
+
+ if (result == FLB_EVENT_ENCODER_SUCCESS) {
+ result = flb_log_event_encoder_set_timestamp(conn->ctx->log_encoder,
+ &timestamp);
+ }
+
+ if (result == FLB_EVENT_ENCODER_SUCCESS) {
+ if (metadata != NULL) {
+ result = flb_log_event_encoder_set_metadata_from_msgpack_object(
+ conn->ctx->log_encoder,
+ metadata);
+ }
+ }
+
+ if (result == FLB_EVENT_ENCODER_SUCCESS) {
+ result = flb_log_event_encoder_set_body_from_msgpack_object(
+ conn->ctx->log_encoder,
+ body);
+ }
+
+ if (result == FLB_EVENT_ENCODER_SUCCESS) {
+ result = flb_log_event_encoder_commit_record(conn->ctx->log_encoder);
+ }
+
+ if (result == FLB_EVENT_ENCODER_SUCCESS) {
+ flb_input_log_append(in, tag, tag_len,
+ conn->ctx->log_encoder->output_buffer,
+ conn->ctx->log_encoder->output_length);
+ }
+
+ flb_log_event_encoder_reset(conn->ctx->log_encoder);
+
+ if (chunk_id != -1) {
+ chunk = options.via.map.ptr[chunk_id].val;
+ send_ack(in, conn, chunk);
+ }
+
+ return 0;
+}
+
+static size_t receiver_recv(struct fw_conn *conn, char *buf, size_t try_size) {
+ size_t off;
+ size_t actual_size;
+
+ off = conn->buf_len - conn->rest;
+ actual_size = try_size;
+
+ if (actual_size > conn->rest) {
+ actual_size = conn->rest;
+ }
+
+ memcpy(buf, conn->buf + off, actual_size);
+ conn->rest -= actual_size;
+
+ return actual_size;
+}
+
+static size_t receiver_to_unpacker(struct fw_conn *conn, size_t request_size,
+ msgpack_unpacker *unpacker)
+{
+ size_t recv_len;
+
+ /* make sure there's enough room, or expand the unpacker accordingly */
+ if (msgpack_unpacker_buffer_capacity(unpacker) < request_size) {
+ msgpack_unpacker_reserve_buffer(unpacker, request_size);
+ assert(msgpack_unpacker_buffer_capacity(unpacker) >= request_size);
+ }
+ recv_len = receiver_recv(conn, msgpack_unpacker_buffer(unpacker),
+ request_size);
+ msgpack_unpacker_buffer_consumed(unpacker, recv_len);
+
+ return recv_len;
+}
+
+int fw_prot_process(struct flb_input_instance *ins, struct fw_conn *conn)
+{
+ int ret;
+ int stag_len;
+ int event_type;
+ int contain_options = FLB_FALSE;
+ size_t index = 0;
+ size_t off = 0;
+ size_t chunk_id = -1;
+ size_t metadata_id = -1;
+ const char *stag;
+ flb_sds_t out_tag = NULL;
+ size_t bytes;
+ size_t recv_len;
+ size_t gz_size;
+ void *gz_data;
+ msgpack_object tag;
+ msgpack_object entry;
+ msgpack_object map;
+ msgpack_object root;
+ msgpack_object chunk;
+ msgpack_unpacked result;
+ msgpack_unpacker *unp;
+ size_t all_used = 0;
+ struct flb_in_fw_config *ctx = conn->ctx;
+ struct cmt *cmt;
+ struct ctrace *ctr;
+
+ /*
+ * [tag, time, record]
+ * [tag, [[time,record], [time,record], ...]]
+ */
+
+ out_tag = flb_sds_create_size(1024);
+ if (!out_tag) {
+ return -1;
+ }
+
+ unp = msgpack_unpacker_new(1024);
+ msgpack_unpacked_init(&result);
+ conn->rest = conn->buf_len;
+
+ while (1) {
+ recv_len = receiver_to_unpacker(conn, EACH_RECV_SIZE, unp);
+ if (recv_len == 0) {
+ /* No more data */
+ msgpack_unpacker_free(unp);
+ msgpack_unpacked_destroy(&result);
+
+ /* Adjust buffer data */
+ if (conn->buf_len >= all_used && all_used > 0) {
+ memmove(conn->buf, conn->buf + all_used,
+ conn->buf_len - all_used);
+ conn->buf_len -= all_used;
+ }
+ flb_sds_destroy(out_tag);
+
+ return 0;
+ }
+
+ /* Always summarize the total number of bytes requested to parse */
+ ret = msgpack_unpacker_next_with_size(unp, &result, &bytes);
+
+ /*
+ * Upon parsing or memory errors, break the loop, return the error
+ * and expect the connection to be closed.
+ */
+ if (ret == MSGPACK_UNPACK_PARSE_ERROR ||
+ ret == MSGPACK_UNPACK_NOMEM_ERROR) {
+ /* A bit redunant, print out the real error */
+ if (ret == MSGPACK_UNPACK_PARSE_ERROR) {
+ flb_plg_debug(ctx->ins, "err=MSGPACK_UNPACK_PARSE_ERROR");
+ }
+ else {
+ flb_plg_error(ctx->ins, "err=MSGPACK_UNPACK_NOMEM_ERROR");
+ }
+
+ /* Cleanup buffers */
+ msgpack_unpacked_destroy(&result);
+ msgpack_unpacker_free(unp);
+ flb_sds_destroy(out_tag);
+
+ return -1;
+ }
+
+ while (ret == MSGPACK_UNPACK_SUCCESS) {
+ /*
+ * For buffering optimization we always want to know the total
+ * number of bytes involved on the new object returned. Despites
+ * buf_off always know the given bytes, it's likely we used a bit
+ * less. This 'all_used' field keep a reference per object so
+ * when returning to the caller we can adjust the source buffer
+ * and deprecated consumed data.
+ *
+ * The 'last_parsed' field is Fluent Bit specific and is documented
+ * in:
+ *
+ * lib/msgpack-c/include/msgpack/unpack.h
+ *
+ * Other references:
+ *
+ * https://github.com/msgpack/msgpack-c/issues/514
+ */
+ all_used += bytes;
+
+
+ /* Map the array */
+ root = result.data;
+
+ if (root.type != MSGPACK_OBJECT_ARRAY) {
+ flb_plg_debug(ctx->ins,
+ "parser: expecting an array (type=%i), skip.",
+ root.type);
+ msgpack_unpacked_destroy(&result);
+ msgpack_unpacker_free(unp);
+ flb_sds_destroy(out_tag);
+
+ return -1;
+ }
+
+ if (root.via.array.size < 2) {
+ flb_plg_debug(ctx->ins,
+ "parser: array of invalid size, skip.");
+ msgpack_unpacked_destroy(&result);
+ msgpack_unpacker_free(unp);
+ flb_sds_destroy(out_tag);
+
+ return -1;
+ }
+
+ if (root.via.array.size == 3) {
+ contain_options = FLB_TRUE;
+ }
+
+ /* Get the tag */
+ tag = root.via.array.ptr[0];
+ if (tag.type != MSGPACK_OBJECT_STR) {
+ flb_plg_debug(ctx->ins,
+ "parser: invalid tag format, skip.");
+ msgpack_unpacked_destroy(&result);
+ msgpack_unpacker_free(unp);
+ flb_sds_destroy(out_tag);
+ return -1;
+ }
+
+ /* reference the tag associated with the record */
+ stag = tag.via.str.ptr;
+ stag_len = tag.via.str.size;
+
+ /* clear out_tag before using */
+ flb_sds_len_set(out_tag, 0);
+
+ /* Prefix the incoming record tag with a custom prefix */
+ if (ctx->tag_prefix) {
+ /* prefix */
+ flb_sds_cat_safe(&out_tag,
+ ctx->tag_prefix, flb_sds_len(ctx->tag_prefix));
+ /* record tag */
+ flb_sds_cat_safe(&out_tag, stag, stag_len);
+ }
+ else if (ins->tag && !ins->tag_default) {
+ /* if the input plugin instance Tag has been manually set, use it */
+ flb_sds_cat_safe(&out_tag, ins->tag, flb_sds_len(ins->tag));
+ }
+ else {
+ /* use the tag from the record */
+ flb_sds_cat_safe(&out_tag, stag, stag_len);
+ }
+
+ entry = root.via.array.ptr[1];
+
+ if (entry.type == MSGPACK_OBJECT_ARRAY) {
+ /*
+ * Forward format 1 (forward mode: [tag, [[time, map], ...]]
+ */
+
+ /* Check for options */
+ chunk_id = -1;
+ ret = get_options_chunk(&root, 2, &chunk_id);
+ if (ret == -1) {
+ flb_plg_debug(ctx->ins, "invalid options field");
+ msgpack_unpacked_destroy(&result);
+ msgpack_unpacker_free(unp);
+ flb_sds_destroy(out_tag);
+
+ return -1;
+ }
+
+ /* Process array */
+ ret = 0;
+
+ for(index = 0 ;
+ index < entry.via.array.size &&
+ ret == 0 ;
+ index++) {
+ ret = fw_process_forward_mode_entry(
+ conn,
+ out_tag, flb_sds_len(out_tag),
+ &entry.via.array.ptr[index],
+ chunk_id);
+ }
+
+ if (chunk_id != -1) {
+ msgpack_object options;
+ msgpack_object chunk;
+
+ options = root.via.array.ptr[2];
+ chunk = options.via.map.ptr[chunk_id].val;
+
+ send_ack(conn->in, conn, chunk);
+ }
+ }
+ else if (entry.type == MSGPACK_OBJECT_POSITIVE_INTEGER ||
+ entry.type == MSGPACK_OBJECT_EXT) {
+ /*
+ * Forward format 2 (message mode) : [tag, time, map, ...]
+ */
+ map = root.via.array.ptr[2];
+ if (map.type != MSGPACK_OBJECT_MAP) {
+ flb_plg_warn(ctx->ins, "invalid data format, map expected");
+ msgpack_unpacked_destroy(&result);
+ msgpack_unpacker_free(unp);
+ flb_sds_destroy(out_tag);
+ return -1;
+ }
+
+ /* Check for options */
+ chunk_id = -1;
+ ret = get_options_chunk(&root, 3, &chunk_id);
+ if (ret == -1) {
+ flb_plg_debug(ctx->ins, "invalid options field");
+ msgpack_unpacked_destroy(&result);
+ msgpack_unpacker_free(unp);
+ flb_sds_destroy(out_tag);
+ return -1;
+ }
+
+ metadata_id = -1;
+ ret = get_options_metadata(&root, 3, &metadata_id);
+ if (ret == -1) {
+ flb_plg_debug(ctx->ins, "invalid options field");
+ msgpack_unpacked_destroy(&result);
+ msgpack_unpacker_free(unp);
+ flb_sds_destroy(out_tag);
+ return -1;
+ }
+
+ /* Process map */
+ fw_process_message_mode_entry(
+ conn->in, conn,
+ out_tag, flb_sds_len(out_tag),
+ &root, &entry, &map, chunk_id,
+ metadata_id);
+ }
+ else if (entry.type == MSGPACK_OBJECT_STR ||
+ entry.type == MSGPACK_OBJECT_BIN) {
+ /* PackedForward Mode */
+ const char *data = NULL;
+ size_t len = 0;
+
+ /* Check for options */
+ chunk_id = -1;
+ ret = get_options_chunk(&root, 2, &chunk_id);
+ if (ret == -1) {
+ flb_plg_debug(ctx->ins, "invalid options field");
+ msgpack_unpacked_destroy(&result);
+ msgpack_unpacker_free(unp);
+ flb_sds_destroy(out_tag);
+ return -1;
+ }
+
+ if (entry.type == MSGPACK_OBJECT_STR) {
+ data = entry.via.str.ptr;
+ len = entry.via.str.size;
+ }
+ else if (entry.type == MSGPACK_OBJECT_BIN) {
+ data = entry.via.bin.ptr;
+ len = entry.via.bin.size;
+ }
+
+ if (data) {
+ ret = is_gzip_compressed(root.via.array.ptr[2]);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "invalid 'compressed' option");
+ msgpack_unpacked_destroy(&result);
+ msgpack_unpacker_free(unp);
+ flb_sds_destroy(out_tag);
+ return -1;
+ }
+
+ if (ret == FLB_TRUE) {
+ ret = flb_gzip_uncompress((void *) data, len,
+ &gz_data, &gz_size);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "gzip uncompress failure");
+ msgpack_unpacked_destroy(&result);
+ msgpack_unpacker_free(unp);
+ flb_sds_destroy(out_tag);
+ return -1;
+ }
+
+ /* Append uncompressed data */
+ flb_input_log_append(conn->in,
+ out_tag, flb_sds_len(out_tag),
+ gz_data, gz_size);
+ flb_free(gz_data);
+ }
+ else {
+ event_type = FLB_EVENT_TYPE_LOGS;
+ if (contain_options) {
+ ret = get_chunk_event_type(ins, root.via.array.ptr[2]);
+ if (ret == -1) {
+ msgpack_unpacked_destroy(&result);
+ msgpack_unpacker_free(unp);
+ flb_sds_destroy(out_tag);
+ return -1;
+ }
+ event_type = ret;
+ }
+
+ if (event_type == FLB_EVENT_TYPE_LOGS) {
+ flb_input_log_append(conn->in,
+ out_tag, flb_sds_len(out_tag),
+ data, len);
+ }
+ else if (event_type == FLB_EVENT_TYPE_METRICS) {
+ ret = cmt_decode_msgpack_create(&cmt, (char *) data, len, &off);
+ if (ret == -1) {
+ msgpack_unpacked_destroy(&result);
+ msgpack_unpacker_free(unp);
+ flb_sds_destroy(out_tag);
+ return -1;
+ }
+ flb_input_metrics_append(conn->in,
+ out_tag, flb_sds_len(out_tag),
+ cmt);
+ }
+ else if (event_type == FLB_EVENT_TYPE_TRACES) {
+ off = 0;
+ ret = ctr_decode_msgpack_create(&ctr, (char *) data, len, &off);
+ if (ret == -1) {
+ msgpack_unpacked_destroy(&result);
+ msgpack_unpacker_free(unp);
+ flb_sds_destroy(out_tag);
+ return -1;
+ }
+
+ flb_input_trace_append(ins,
+ out_tag, flb_sds_len(out_tag),
+ ctr);
+ }
+ }
+
+ /* Handle ACK response */
+ if (chunk_id != -1) {
+ chunk = root.via.array.ptr[2].via.map.ptr[chunk_id].val;
+ send_ack(ctx->ins, conn, chunk);
+ }
+ }
+ }
+ else {
+ flb_plg_warn(ctx->ins, "invalid data format, type=%i",
+ entry.type);
+ msgpack_unpacked_destroy(&result);
+ msgpack_unpacker_free(unp);
+ return -1;
+ }
+
+ ret = msgpack_unpacker_next(unp, &result);
+ }
+ }
+
+ msgpack_unpacked_destroy(&result);
+ msgpack_unpacker_free(unp);
+ flb_sds_destroy(out_tag);
+
+ switch (ret) {
+ case MSGPACK_UNPACK_EXTRA_BYTES:
+ flb_plg_error(ctx->ins, "MSGPACK_UNPACK_EXTRA_BYTES");
+ return -1;
+ case MSGPACK_UNPACK_CONTINUE:
+ flb_plg_trace(ctx->ins, "MSGPACK_UNPACK_CONTINUE");
+ return 1;
+ case MSGPACK_UNPACK_PARSE_ERROR:
+ flb_plg_debug(ctx->ins, "err=MSGPACK_UNPACK_PARSE_ERROR");
+ return -1;
+ case MSGPACK_UNPACK_NOMEM_ERROR:
+ flb_plg_error(ctx->ins, "err=MSGPACK_UNPACK_NOMEM_ERROR");
+ return -1;
+ };
+
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/in_forward/fw_prot.h b/src/fluent-bit/plugins/in_forward/fw_prot.h
new file mode 100644
index 000000000..67eae5507
--- /dev/null
+++ b/src/fluent-bit/plugins/in_forward/fw_prot.h
@@ -0,0 +1,28 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_FW_PROT_H
+#define FLB_IN_FW_PROT_H
+
+#include "fw_conn.h"
+
+int fw_prot_parser(struct fw_conn *conn);
+int fw_prot_process(struct flb_input_instance *ins, struct fw_conn *conn);
+
+#endif
diff --git a/src/fluent-bit/plugins/in_head/CMakeLists.txt b/src/fluent-bit/plugins/in_head/CMakeLists.txt
new file mode 100644
index 000000000..2410c9ebe
--- /dev/null
+++ b/src/fluent-bit/plugins/in_head/CMakeLists.txt
@@ -0,0 +1,4 @@
+set(src
+ in_head.c)
+
+FLB_PLUGIN(in_head "${src}" "")
diff --git a/src/fluent-bit/plugins/in_head/in_head.c b/src/fluent-bit/plugins/in_head/in_head.c
new file mode 100644
index 000000000..2619d1c18
--- /dev/null
+++ b/src/fluent-bit/plugins/in_head/in_head.c
@@ -0,0 +1,473 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_input.h>
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_error.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_pack.h>
+#include <msgpack.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+#include "in_head.h"
+#define BUF_SIZE_MAX 512
+
+static int read_lines(struct flb_in_head_config *ctx)
+{
+ FILE *fp = NULL;
+ int i;
+ int index = 0;
+ int str_len;
+ char buf[BUF_SIZE_MAX] = {0};
+ int new_len = 0;
+ char *tmp;
+ char *ret_buf;
+
+ fp = fopen(ctx->filepath, "r");
+ if (fp == NULL) {
+ flb_errno();
+ return -1;
+ }
+
+ for (i = 0; i<ctx->lines; i++){
+ ret_buf = fgets(buf, BUF_SIZE_MAX-1, fp);
+ if (ret_buf == NULL) {
+ break;
+ }
+ str_len = strlen(buf);
+ if (ctx->buf_size < str_len + index + 1) {
+ /* buffer full. re-allocate new buffer */
+ new_len = ctx->buf_size + str_len + 1;
+ tmp = flb_malloc(new_len);
+ if (tmp == NULL) {
+ flb_plg_error(ctx->ins, "failed to allocate buffer");
+ /* try to output partial data */
+ break;
+ }
+ /* copy and release old buffer */
+ strcpy(tmp, ctx->buf);
+ flb_free(ctx->buf);
+
+ ctx->buf_size = new_len;
+ ctx->buf = tmp;
+ }
+ strncat(&ctx->buf[index], buf, str_len);
+ ctx->buf_len += str_len;
+ index += str_len;
+ }
+
+ fclose(fp);
+ return 0;
+}
+
+static int read_bytes(struct flb_in_head_config *ctx)
+{
+ int fd = -1;
+ /* open at every collect callback */
+ fd = open(ctx->filepath, O_RDONLY);
+ if (fd < 0) {
+ flb_errno();
+ return -1;
+ }
+ ctx->buf_len = read(fd, ctx->buf, ctx->buf_size);
+ close(fd);
+
+ if (ctx->buf_len < 0) {
+ flb_errno();
+ return -1;
+ }
+ else {
+ return 0;
+ }
+}
+
+static int single_value_per_record(struct flb_input_instance *i_ins,
+ struct flb_in_head_config *ctx)
+{
+ int ret = -1;
+
+ ctx->buf[0] = '\0'; /* clear buf */
+ ctx->buf_len = 0;
+
+ if (ctx->lines > 0) {
+ read_lines(ctx);
+ }
+ else {
+ read_bytes(ctx);
+ }
+
+ flb_plg_trace(ctx->ins, "%s read_len=%zd buf_size=%zu", __FUNCTION__,
+ ctx->buf_len, ctx->buf_size);
+
+ ret = flb_log_event_encoder_begin_record(&ctx->log_encoder);
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_current_timestamp(
+ &ctx->log_encoder);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_append_body_values(
+ &ctx->log_encoder,
+ FLB_LOG_EVENT_CSTRING_VALUE(ctx->key),
+ FLB_LOG_EVENT_STRING_VALUE(ctx->buf, ctx->buf_len));
+
+ }
+
+ if (ctx->add_path) {
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_append_body_values(
+ &ctx->log_encoder,
+ FLB_LOG_EVENT_CSTRING_VALUE("path"),
+ FLB_LOG_EVENT_STRING_VALUE(ctx->filepath, ctx->path_len));
+ }
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_commit_record(&ctx->log_encoder);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ flb_input_log_append(i_ins, NULL, 0,
+ ctx->log_encoder.output_buffer,
+ ctx->log_encoder.output_length);
+
+ ret = 0;
+ }
+ else {
+ flb_plg_error(i_ins, "Error encoding record : %d", ret);
+
+ ret = -1;
+ }
+
+ flb_log_event_encoder_reset(&ctx->log_encoder);
+
+ return ret;
+}
+
+#define KEY_LEN_MAX 32
+static int split_lines_per_record(struct flb_input_instance *i_ins,
+ struct flb_in_head_config *ctx)
+{
+ FILE *fp = NULL;
+ int i;
+ int ret;
+ size_t str_len;
+ size_t key_len;
+ char *ret_buf;
+ char key_str[KEY_LEN_MAX] = {0};
+
+ fp = fopen(ctx->filepath, "r");
+ if (fp == NULL) {
+ flb_errno();
+ return -1;
+ }
+
+ ret = flb_log_event_encoder_begin_record(&ctx->log_encoder);
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_current_timestamp(
+ &ctx->log_encoder);
+ }
+
+ if (ctx->add_path) {
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_append_body_values(
+ &ctx->log_encoder,
+ FLB_LOG_EVENT_CSTRING_VALUE("path"),
+ FLB_LOG_EVENT_STRING_VALUE(ctx->filepath, ctx->path_len));
+ }
+ }
+
+ for (i = 0; i < ctx->lines; i++) {
+ ret_buf = fgets(ctx->buf, ctx->buf_size, fp);
+ if (ret_buf == NULL) {
+ ctx->buf[0] = '\0';
+ str_len = 0;
+ }
+ else {
+ str_len = strnlen(ctx->buf, ctx->buf_size-1);
+ ctx->buf[str_len-1] = '\0';/* chomp str */
+ }
+
+ key_len = snprintf(key_str, KEY_LEN_MAX, "line%d", i);
+ if (key_len > KEY_LEN_MAX) {
+ key_len = KEY_LEN_MAX;
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_append_body_values(
+ &ctx->log_encoder,
+ FLB_LOG_EVENT_CSTRING_VALUE(key_str),
+ FLB_LOG_EVENT_STRING_VALUE(ctx->buf, ctx->buf_len));
+ }
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_commit_record(&ctx->log_encoder);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ flb_input_log_append(i_ins, NULL, 0,
+ ctx->log_encoder.output_buffer,
+ ctx->log_encoder.output_length);
+
+ ret = 0;
+ }
+ else {
+ flb_plg_error(i_ins, "Error encoding record : %d", ret);
+
+ ret = -1;
+ }
+
+ flb_log_event_encoder_reset(&ctx->log_encoder);
+
+ fclose(fp);
+
+ return ret;
+}
+
+
+/* cb_collect callback */
+static int in_head_collect(struct flb_input_instance *i_ins,
+ struct flb_config *config, void *in_context)
+{
+ int ret = -1;
+ struct flb_in_head_config *ctx = in_context;
+
+ if (ctx->lines > 0 && ctx->split_line) {
+ ret = split_lines_per_record(i_ins, ctx);
+ }
+ else {
+ ret = single_value_per_record(i_ins, ctx);
+ }
+
+ return ret;
+}
+
+/* read config file and*/
+static int in_head_config_read(struct flb_in_head_config *ctx,
+ struct flb_input_instance *in)
+{
+ int ret;
+ /* Load the config map */
+ ret = flb_input_config_map_set(in, (void *)ctx);
+ if (ret == -1) {
+ flb_plg_error(in, "unable to load configuration");
+ return -1;
+ }
+
+
+ ctx->key_len = strlen(ctx->key);
+
+ /* only set lines if not explicitly set */
+ if (ctx->split_line && ctx->lines <= 0) {
+ ctx->lines = 10;
+ }
+
+ if (ctx->interval_sec <= 0 && ctx->interval_nsec <= 0) {
+ /* Illegal settings. Override them. */
+ ctx->interval_sec = atoi(DEFAULT_INTERVAL_SEC);
+ ctx->interval_nsec = atoi(DEFAULT_INTERVAL_NSEC);
+ }
+
+ if (ctx->add_path) {
+ ctx->path_len = strlen(ctx->filepath);
+ }
+
+ ret = flb_log_event_encoder_init(&ctx->log_encoder,
+ FLB_LOG_EVENT_FORMAT_DEFAULT);
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ flb_plg_error(ctx->ins, "error initializing event encoder : %d", ret);
+
+ return -1;
+ }
+
+ flb_plg_debug(ctx->ins, "buf_size=%zu path=%s",
+ ctx->buf_size, ctx->filepath);
+ flb_plg_debug(ctx->ins, "interval_sec=%d interval_nsec=%d",
+ ctx->interval_sec, ctx->interval_nsec);
+
+ return 0;
+}
+
+static void delete_head_config(struct flb_in_head_config *ctx)
+{
+ if (!ctx) {
+ return;
+ }
+
+ flb_log_event_encoder_destroy(&ctx->log_encoder);
+
+ /* release buffer */
+ if (ctx->buf) {
+ flb_free(ctx->buf);
+ }
+
+ flb_free(ctx);
+}
+
+/* Initialize plugin */
+static int in_head_init(struct flb_input_instance *in,
+ struct flb_config *config, void *data)
+{
+ int ret = -1;
+ struct flb_in_head_config *ctx;
+
+ /* Allocate space for the configuration */
+ ctx = flb_calloc(1, sizeof(struct flb_in_head_config));
+ if (!ctx) {
+ return -1;
+ }
+
+ ctx->buf = NULL;
+ ctx->buf_len = 0;
+ ctx->add_path = FLB_FALSE;
+ ctx->lines = 0;
+ ctx->ins = in;
+
+ /* Initialize head config */
+ ret = in_head_config_read(ctx, in);
+ if (ret < 0) {
+ goto init_error;
+ }
+
+ ctx->buf = flb_malloc(ctx->buf_size);
+ if (!ctx->buf) {
+ flb_errno();
+ goto init_error;
+ }
+
+ flb_plg_trace(ctx->ins, "%s read_len=%zd buf_size=%zu", __FUNCTION__,
+ ctx->buf_len, ctx->buf_size);
+
+ flb_input_set_context(in, ctx);
+
+ ret = flb_input_set_collector_time(in,
+ in_head_collect,
+ ctx->interval_sec,
+ ctx->interval_nsec, config);
+ if (ret < 0) {
+ flb_plg_error(ctx->ins, "could not set collector for head input plugin");
+ goto init_error;
+ }
+
+ ctx->coll_fd = ret;
+ return 0;
+
+ init_error:
+ delete_head_config(ctx);
+
+ return -1;
+}
+
+static void in_head_pause(void *data, struct flb_config *config)
+{
+ struct flb_in_head_config *ctx = data;
+ (void) config;
+
+ /* Pause collector */
+ flb_input_collector_pause(ctx->coll_fd, ctx->ins);
+}
+
+static void in_head_resume(void *data, struct flb_config *config)
+{
+ struct flb_in_head_config *ctx = data;
+ (void) config;
+
+ /* Resume collector */
+ flb_input_collector_resume(ctx->coll_fd, ctx->ins);
+}
+
+static int in_head_exit(void *data, struct flb_config *config)
+{
+ (void) *config;
+ struct flb_in_head_config *head_config = data;
+
+ delete_head_config(head_config);
+ return 0;
+}
+
+/* Configuration properties map */
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_STR, "file", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_in_head_config, filepath),
+ "Set the file"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "key", "head",
+ 0, FLB_TRUE, offsetof(struct flb_in_head_config, key),
+ "Set the record key"
+ },
+ {
+ FLB_CONFIG_MAP_INT, "buf_size", DEFAULT_BUF_SIZE,
+ 0, FLB_TRUE, offsetof(struct flb_in_head_config, buf_size),
+ "Set the read buffer size"
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "split_line", "false",
+ 0, FLB_TRUE, offsetof(struct flb_in_head_config, split_line),
+ "generate key/value pair per line"
+ },
+ {
+ FLB_CONFIG_MAP_INT, "lines", "0",
+ 0, FLB_TRUE, offsetof(struct flb_in_head_config, lines),
+ "Line number to read"
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "add_path", "false",
+ 0, FLB_TRUE, offsetof(struct flb_in_head_config, add_path),
+ "append filepath to records"
+ },
+ {
+ FLB_CONFIG_MAP_INT, "interval_sec", DEFAULT_INTERVAL_SEC,
+ 0, FLB_TRUE, offsetof(struct flb_in_head_config, interval_sec),
+ "Set the collector interval"
+ },
+ {
+ FLB_CONFIG_MAP_INT, "interval_nsec", DEFAULT_INTERVAL_NSEC,
+ 0, FLB_TRUE, offsetof(struct flb_in_head_config, interval_nsec),
+ "Set the collector interval (nanoseconds)"
+ },
+ /* EOF */
+ {0}
+};
+
+struct flb_input_plugin in_head_plugin = {
+ .name = "head",
+ .description = "Head Input",
+ .cb_init = in_head_init,
+ .cb_pre_run = NULL,
+ .cb_collect = in_head_collect,
+ .cb_flush_buf = NULL,
+ .cb_pause = in_head_pause,
+ .cb_resume = in_head_resume,
+ .config_map = config_map,
+ .cb_exit = in_head_exit
+};
diff --git a/src/fluent-bit/plugins/in_head/in_head.h b/src/fluent-bit/plugins/in_head/in_head.h
new file mode 100644
index 000000000..08376a942
--- /dev/null
+++ b/src/fluent-bit/plugins/in_head/in_head.h
@@ -0,0 +1,59 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef FLB_IN_HEAD_H
+#define FLB_IN_HEAD_H
+
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_input.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_log_event_encoder.h>
+
+#include <msgpack.h>
+
+#define DEFAULT_BUF_SIZE "256"
+#define DEFAULT_INTERVAL_SEC "1"
+#define DEFAULT_INTERVAL_NSEC "0"
+
+struct flb_in_head_config {
+ int coll_fd;
+ size_t buf_size; /* size of buf */
+ ssize_t buf_len; /* read size */
+ char *buf; /* read buf */
+ flb_sds_t key;
+ int key_len;
+
+ flb_sds_t filepath; /* to read */
+
+ int add_path; /* add path mode */
+ size_t path_len;
+
+ int lines; /* line num to read */
+ int split_line;
+
+ int interval_sec;
+ int interval_nsec;
+
+ struct flb_log_event_encoder log_encoder;
+
+ struct flb_input_instance *ins;
+};
+
+extern struct flb_input_plugin in_head_plugin;
+
+#endif /* FLB_IN_HEAD_H */
diff --git a/src/fluent-bit/plugins/in_health/CMakeLists.txt b/src/fluent-bit/plugins/in_health/CMakeLists.txt
new file mode 100644
index 000000000..afb509ece
--- /dev/null
+++ b/src/fluent-bit/plugins/in_health/CMakeLists.txt
@@ -0,0 +1,4 @@
+set(src
+ health.c)
+
+FLB_PLUGIN(in_health "${src}" "")
diff --git a/src/fluent-bit/plugins/in_health/health.c b/src/fluent-bit/plugins/in_health/health.c
new file mode 100644
index 000000000..72f59e51c
--- /dev/null
+++ b/src/fluent-bit/plugins/in_health/health.c
@@ -0,0 +1,293 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_input.h>
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_io.h>
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_upstream.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_log_event_encoder.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <math.h>
+
+#define DEFAULT_INTERVAL_SEC "1"
+#define DEFAULT_INTERVAL_NSEC "0"
+
+/* Input configuration & context */
+struct flb_in_health_config {
+ /* Alert mode */
+ int alert;
+
+ /* Append Hostname */
+ int add_host;
+ int len_host;
+ char* hostname;
+
+ /* Append Port Number */
+ int add_port;
+ int port;
+
+ /* Time interval check */
+ int interval_sec;
+ int interval_nsec;
+
+ /* Networking */
+ struct flb_upstream *u;
+
+ struct flb_log_event_encoder log_encoder;
+
+ /* Plugin instance */
+ struct flb_input_instance *ins;
+};
+
+/* Collection aims to try to connect to the specified TCP server */
+static int in_health_collect(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ uint8_t alive;
+ struct flb_in_health_config *ctx = in_context;
+ struct flb_connection *u_conn;
+ int ret;
+
+ u_conn = flb_upstream_conn_get(ctx->u);
+ if (!u_conn) {
+ alive = FLB_FALSE;
+ }
+ else {
+ alive = FLB_TRUE;
+ flb_upstream_conn_release(u_conn);
+ }
+
+ if (alive == FLB_TRUE && ctx->alert == FLB_TRUE) {
+ FLB_INPUT_RETURN(0);
+ }
+
+ ret = flb_log_event_encoder_begin_record(&ctx->log_encoder);
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_current_timestamp(
+ &ctx->log_encoder);
+ }
+
+ /* Status */
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_append_body_values(
+ &ctx->log_encoder,
+ FLB_LOG_EVENT_CSTRING_VALUE("alive"),
+ FLB_LOG_EVENT_BOOLEAN_VALUE(alive));
+ }
+
+ if (ctx->add_host) {
+ /* append hostname */
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_append_body_values(
+ &ctx->log_encoder,
+ FLB_LOG_EVENT_CSTRING_VALUE("hostname"),
+ FLB_LOG_EVENT_CSTRING_VALUE(ctx->hostname));
+ }
+ }
+
+ if (ctx->add_port) {
+ /* append port number */
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_append_body_values(
+ &ctx->log_encoder,
+ FLB_LOG_EVENT_CSTRING_VALUE("port"),
+ FLB_LOG_EVENT_INT32_VALUE(ctx->port));
+ }
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_commit_record(&ctx->log_encoder);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ flb_input_log_append(ins, NULL, 0,
+ ctx->log_encoder.output_buffer,
+ ctx->log_encoder.output_length);
+
+ ret = 0;
+ }
+ else {
+ flb_plg_error(ins, "Error encoding record : %d", ret);
+
+ ret = -1;
+ }
+
+ flb_log_event_encoder_reset(&ctx->log_encoder);
+
+ FLB_INPUT_RETURN(ret);
+}
+
+static int in_health_init(struct flb_input_instance *in,
+ struct flb_config *config, void *data)
+{
+ int ret;
+ int upstream_flags;
+ struct flb_in_health_config *ctx;
+ (void) data;
+
+ if (in->host.name == NULL) {
+ flb_plg_error(in, "no input 'Host' provided");
+ return -1;
+ }
+
+ /* Allocate space for the configuration */
+ ctx = flb_calloc(1, sizeof(struct flb_in_health_config));
+ if (!ctx) {
+ flb_errno();
+ return -1;
+ }
+ ctx->alert = FLB_FALSE;
+ ctx->add_host = FLB_FALSE;
+ ctx->len_host = 0;
+ ctx->hostname = NULL;
+ ctx->add_port = FLB_FALSE;
+ ctx->port = -1;
+ ctx->ins = in;
+
+
+ /* Load the config map */
+ ret = flb_input_config_map_set(in, (void *)ctx);
+ if (ret == -1) {
+ flb_free(ctx);
+ flb_plg_error(in, "unable to load configuration");
+ return -1;
+ }
+
+ upstream_flags = FLB_IO_TCP;
+
+ if (in->use_tls) {
+ upstream_flags |= FLB_IO_TLS;
+ }
+
+ ctx->u = flb_upstream_create(config, in->host.name, in->host.port,
+ upstream_flags, in->tls);
+
+ if (!ctx->u) {
+ flb_plg_error(ctx->ins, "could not initialize upstream");
+ flb_free(ctx);
+ return -1;
+ }
+
+ if (ctx->interval_sec <= 0 && ctx->interval_nsec <= 0) {
+ /* Illegal settings. Override them. */
+ ctx->interval_sec = atoi(DEFAULT_INTERVAL_SEC);
+ ctx->interval_nsec = atoi(DEFAULT_INTERVAL_NSEC);
+ }
+
+ if (ctx->add_host) {
+ ctx->len_host = strlen(in->host.name);
+ ctx->hostname = flb_strdup(in->host.name);
+ }
+
+ if (ctx->add_port) {
+ ctx->port = in->host.port;
+ }
+
+ /* Set the context */
+ flb_input_set_context(in, ctx);
+
+ /* Set our collector based on time */
+ ret = flb_input_set_collector_time(in,
+ in_health_collect,
+ ctx->interval_sec,
+ ctx->interval_nsec,
+ config);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "could not set collector for Health input plugin");
+ flb_free(ctx);
+ return -1;
+ }
+
+ ret = flb_log_event_encoder_init(&ctx->log_encoder,
+ FLB_LOG_EVENT_FORMAT_DEFAULT);
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ flb_plg_error(in, "error initializing event encoder : %d", ret);
+
+ flb_free(ctx);
+
+ return -1;
+ }
+
+ return 0;
+}
+
+static int in_health_exit(void *data, struct flb_config *config)
+{
+ (void) *config;
+ struct flb_in_health_config *ctx = data;
+
+ flb_log_event_encoder_destroy(&ctx->log_encoder);
+
+ /* Remove msgpack buffer and destroy context */
+ flb_upstream_destroy(ctx->u);
+ flb_free(ctx->hostname);
+ flb_free(ctx);
+
+ return 0;
+}
+
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_BOOL, "alert", "false",
+ 0, FLB_TRUE, offsetof(struct flb_in_health_config, alert),
+ "Only generate records when the port is down"
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "add_host", "false",
+ 0, FLB_TRUE, offsetof(struct flb_in_health_config, add_host),
+ "Append hostname to each record"
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "add_port", "false",
+ 0, FLB_TRUE, offsetof(struct flb_in_health_config, add_port),
+ "Append port to each record"
+ },
+ {
+ FLB_CONFIG_MAP_INT, "interval_sec", DEFAULT_INTERVAL_SEC,
+ 0, FLB_TRUE, offsetof(struct flb_in_health_config, interval_sec),
+ "Set the collector interval"
+ },
+ {
+ FLB_CONFIG_MAP_INT, "interval_nsec", DEFAULT_INTERVAL_NSEC,
+ 0, FLB_TRUE, offsetof(struct flb_in_health_config, interval_nsec),
+ "Set the collector interval (nanoseconds)"
+ },
+ /* EOF */
+ {0}
+};
+
+/* Plugin reference */
+struct flb_input_plugin in_health_plugin = {
+ .name = "health",
+ .description = "Check TCP server health",
+ .cb_init = in_health_init,
+ .cb_pre_run = NULL,
+ .cb_collect = in_health_collect,
+ .cb_flush_buf = NULL,
+ .cb_exit = in_health_exit,
+ .config_map = config_map,
+ .flags = FLB_INPUT_NET|FLB_INPUT_CORO
+};
diff --git a/src/fluent-bit/plugins/in_http/CMakeLists.txt b/src/fluent-bit/plugins/in_http/CMakeLists.txt
new file mode 100644
index 000000000..69ebeab71
--- /dev/null
+++ b/src/fluent-bit/plugins/in_http/CMakeLists.txt
@@ -0,0 +1,12 @@
+if(NOT FLB_METRICS)
+ message(FATAL_ERROR "HTTP input plugin requires FLB_HTTP_SERVER=On.")
+endif()
+
+set(src
+ http.c
+ http_conn.c
+ http_prot.c
+ http_config.c
+ )
+
+FLB_PLUGIN(in_http "${src}" "monkey-core-static")
diff --git a/src/fluent-bit/plugins/in_http/http.c b/src/fluent-bit/plugins/in_http/http.c
new file mode 100644
index 000000000..7bbe6e5f4
--- /dev/null
+++ b/src/fluent-bit/plugins/in_http/http.c
@@ -0,0 +1,204 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_network.h>
+#include <fluent-bit/flb_config.h>
+
+#include "http.h"
+#include "http_conn.h"
+#include "http_config.h"
+
+/*
+ * For a server event, the collection event means a new client have arrived, we
+ * accept the connection and create a new TCP instance which will wait for
+ * JSON map messages.
+ */
+static int in_http_collect(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ struct flb_connection *connection;
+ struct http_conn *conn;
+ struct flb_http *ctx;
+
+ ctx = in_context;
+
+ connection = flb_downstream_conn_get(ctx->downstream);
+
+ if (connection == NULL) {
+ flb_plg_error(ctx->ins, "could not accept new connection");
+
+ return -1;
+ }
+
+ flb_plg_trace(ctx->ins, "new TCP connection arrived FD=%i",
+ connection->fd);
+
+ conn = http_conn_add(connection, ctx);
+
+ if (conn == NULL) {
+ flb_downstream_conn_release(connection);
+
+ return -1;
+ }
+
+ return 0;
+}
+
+static int in_http_init(struct flb_input_instance *ins,
+ struct flb_config *config, void *data)
+{
+ unsigned short int port;
+ int ret;
+ struct flb_http *ctx;
+
+ (void) data;
+
+ /* Create context and basic conf */
+ ctx = http_config_create(ins);
+ if (!ctx) {
+ return -1;
+ }
+
+ ctx->collector_id = -1;
+
+ /* Populate context with config map defaults and incoming properties */
+ ret = flb_input_config_map_set(ins, (void *) ctx);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "configuration error");
+ http_config_destroy(ctx);
+ return -1;
+ }
+
+ /* Set the context */
+ flb_input_set_context(ins, ctx);
+
+ port = (unsigned short int) strtoul(ctx->tcp_port, NULL, 10);
+
+ ctx->downstream = flb_downstream_create(FLB_TRANSPORT_TCP,
+ ins->flags,
+ ctx->listen,
+ port,
+ ins->tls,
+ config,
+ &ins->net_setup);
+
+ if (ctx->downstream == NULL) {
+ flb_plg_error(ctx->ins,
+ "could not initialize downstream on %s:%s. Aborting",
+ ctx->listen, ctx->tcp_port);
+
+ http_config_destroy(ctx);
+
+ return -1;
+ }
+
+ flb_input_downstream_set(ctx->downstream, ctx->ins);
+
+ if (ctx->successful_response_code != 200 &&
+ ctx->successful_response_code != 201 &&
+ ctx->successful_response_code != 204) {
+ flb_plg_error(ctx->ins, "%d is not supported response code. Use default 201",
+ ctx->successful_response_code);
+ ctx->successful_response_code = 201;
+ }
+
+ /* Collect upon data available on the standard input */
+ ret = flb_input_set_collector_socket(ins,
+ in_http_collect,
+ ctx->downstream->server_fd,
+ config);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "Could not set collector for IN_TCP input plugin");
+ http_config_destroy(ctx);
+
+ return -1;
+ }
+
+ ctx->collector_id = ret;
+
+ return 0;
+}
+
+static int in_http_exit(void *data, struct flb_config *config)
+{
+ struct flb_http *ctx;
+
+ (void) config;
+
+ ctx = data;
+
+ if (ctx != NULL) {
+ http_config_destroy(ctx);
+ }
+
+ return 0;
+}
+
+/* Configuration properties map */
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_SIZE, "buffer_max_size", HTTP_BUFFER_MAX_SIZE,
+ 0, FLB_TRUE, offsetof(struct flb_http, buffer_max_size),
+ ""
+ },
+
+ {
+ FLB_CONFIG_MAP_SIZE, "buffer_chunk_size", HTTP_BUFFER_CHUNK_SIZE,
+ 0, FLB_TRUE, offsetof(struct flb_http, buffer_chunk_size),
+ ""
+ },
+
+ {
+ FLB_CONFIG_MAP_SLIST_1, "success_header", NULL,
+ FLB_CONFIG_MAP_MULT, FLB_TRUE, offsetof(struct flb_http, success_headers),
+ "Add an HTTP header key/value pair on success. Multiple headers can be set"
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "tag_key", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_http, tag_key),
+ ""
+ },
+ {
+ FLB_CONFIG_MAP_INT, "successful_response_code", "201",
+ 0, FLB_TRUE, offsetof(struct flb_http, successful_response_code),
+ "Set successful response code. 200, 201 and 204 are supported."
+ },
+
+
+ /* EOF */
+ {0}
+};
+
+/* Plugin reference */
+struct flb_input_plugin in_http_plugin = {
+ .name = "http",
+ .description = "HTTP",
+ .cb_init = in_http_init,
+ .cb_pre_run = NULL,
+ .cb_collect = in_http_collect,
+ .cb_flush_buf = NULL,
+ .cb_pause = NULL,
+ .cb_resume = NULL,
+ .cb_exit = in_http_exit,
+ .config_map = config_map,
+ .flags = FLB_INPUT_NET_SERVER | FLB_IO_OPT_TLS
+};
diff --git a/src/fluent-bit/plugins/in_http/http.h b/src/fluent-bit/plugins/in_http/http.h
new file mode 100644
index 000000000..f9832e9b2
--- /dev/null
+++ b/src/fluent-bit/plugins/in_http/http.h
@@ -0,0 +1,58 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_HTTP_H
+#define FLB_IN_HTTP_H
+
+#include <fluent-bit/flb_downstream.h>
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_input.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_log_event_encoder.h>
+
+#include <monkey/monkey.h>
+
+#define HTTP_BUFFER_MAX_SIZE "4M"
+#define HTTP_BUFFER_CHUNK_SIZE "512K"
+
+struct flb_http {
+ int successful_response_code;
+ flb_sds_t listen;
+ flb_sds_t tcp_port;
+ const char *tag_key;
+
+ int collector_id;
+
+ /* Success HTTP headers */
+ struct mk_list *success_headers;
+ flb_sds_t success_headers_str;
+
+ size_t buffer_max_size; /* Maximum buffer size */
+ size_t buffer_chunk_size; /* Chunk allocation size */
+
+ struct flb_log_event_encoder log_encoder;
+ struct flb_downstream *downstream; /* Client manager */
+ struct mk_list connections; /* linked list of connections */
+
+ struct mk_server *server;
+ struct flb_input_instance *ins;
+};
+
+
+#endif
diff --git a/src/fluent-bit/plugins/in_http/http_config.c b/src/fluent-bit/plugins/in_http/http_config.c
new file mode 100644
index 000000000..f23759a66
--- /dev/null
+++ b/src/fluent-bit/plugins/in_http/http_config.c
@@ -0,0 +1,157 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_input_plugin.h>
+
+#include "http.h"
+#include "http_config.h"
+#include "http_conn.h"
+#include "http_config.h"
+
+struct flb_http *http_config_create(struct flb_input_instance *ins)
+{
+ struct mk_list *header_iterator;
+ struct flb_slist_entry *header_value;
+ struct flb_slist_entry *header_name;
+ struct flb_config_map_val *header_pair;
+ char port[8];
+ int ret;
+ struct flb_http *ctx;
+
+ ctx = flb_calloc(1, sizeof(struct flb_http));
+ if (!ctx) {
+ flb_errno();
+ return NULL;
+ }
+ ctx->ins = ins;
+ mk_list_init(&ctx->connections);
+
+ /* Load the config map */
+ ret = flb_input_config_map_set(ins, (void *) ctx);
+ if (ret == -1) {
+ flb_free(ctx);
+ return NULL;
+ }
+
+ /* Listen interface (if not set, defaults to 0.0.0.0:9880) */
+ flb_input_net_default_listener("0.0.0.0", 9880, ins);
+
+ ctx->listen = flb_strdup(ins->host.listen);
+ snprintf(port, sizeof(port) - 1, "%d", ins->host.port);
+ ctx->tcp_port = flb_strdup(port);
+
+ /* HTTP Server specifics */
+ ctx->server = flb_calloc(1, sizeof(struct mk_server));
+ ctx->server->keep_alive = MK_TRUE;
+
+ /* monkey detects server->workers == 0 as the server not being initialized at the
+ * moment so we want to make sure that it stays that way!
+ */
+
+ ret = flb_log_event_encoder_init(&ctx->log_encoder,
+ FLB_LOG_EVENT_FORMAT_DEFAULT);
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ flb_plg_error(ctx->ins, "error initializing event encoder : %d", ret);
+
+ http_config_destroy(ctx);
+
+ return NULL;
+ }
+
+ ctx->success_headers_str = flb_sds_create_size(1);
+
+ if (ctx->success_headers_str == NULL) {
+ http_config_destroy(ctx);
+
+ return NULL;
+ }
+
+ flb_config_map_foreach(header_iterator, header_pair, ctx->success_headers) {
+ header_name = mk_list_entry_first(header_pair->val.list,
+ struct flb_slist_entry,
+ _head);
+
+ header_value = mk_list_entry_last(header_pair->val.list,
+ struct flb_slist_entry,
+ _head);
+
+ ret = flb_sds_cat_safe(&ctx->success_headers_str,
+ header_name->str,
+ flb_sds_len(header_name->str));
+
+ if (ret == 0) {
+ ret = flb_sds_cat_safe(&ctx->success_headers_str,
+ ": ",
+ 2);
+ }
+
+ if (ret == 0) {
+ ret = flb_sds_cat_safe(&ctx->success_headers_str,
+ header_value->str,
+ flb_sds_len(header_value->str));
+ }
+
+ if (ret == 0) {
+ ret = flb_sds_cat_safe(&ctx->success_headers_str,
+ "\r\n",
+ 2);
+ }
+
+ if (ret != 0) {
+ http_config_destroy(ctx);
+
+ return NULL;
+ }
+ }
+
+ return ctx;
+}
+
+int http_config_destroy(struct flb_http *ctx)
+{
+ /* release all connections */
+ http_conn_release_all(ctx);
+
+ flb_log_event_encoder_destroy(&ctx->log_encoder);
+
+ if (ctx->collector_id != -1) {
+ flb_input_collector_delete(ctx->collector_id, ctx->ins);
+
+ ctx->collector_id = -1;
+ }
+
+ if (ctx->downstream != NULL) {
+ flb_downstream_destroy(ctx->downstream);
+ }
+
+ if (ctx->server) {
+ flb_free(ctx->server);
+ }
+
+ if (ctx->success_headers_str != NULL) {
+ flb_sds_destroy(ctx->success_headers_str);
+ }
+
+
+ flb_free(ctx->listen);
+ flb_free(ctx->tcp_port);
+ flb_free(ctx);
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/in_http/http_config.h b/src/fluent-bit/plugins/in_http/http_config.h
new file mode 100644
index 000000000..8a9611116
--- /dev/null
+++ b/src/fluent-bit/plugins/in_http/http_config.h
@@ -0,0 +1,29 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_HTTP_CONFIG_H
+#define FLB_IN_HTTP_CONFIG_H
+
+#include <fluent-bit/flb_input_plugin.h>
+#include "http.h"
+
+struct flb_http *http_config_create(struct flb_input_instance *ins);
+int http_config_destroy(struct flb_http *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/in_http/http_conn.c b/src/fluent-bit/plugins/in_http/http_conn.c
new file mode 100644
index 000000000..a5d9efa98
--- /dev/null
+++ b/src/fluent-bit/plugins/in_http/http_conn.c
@@ -0,0 +1,306 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_engine.h>
+
+#include "http.h"
+#include "http_conn.h"
+#include "http_prot.h"
+
+static void http_conn_request_init(struct mk_http_session *session,
+ struct mk_http_request *request);
+
+static int http_conn_event(void *data)
+{
+ int status;
+ size_t size;
+ ssize_t available;
+ ssize_t bytes;
+ char *tmp;
+ char *request_end;
+ size_t request_len;
+ struct flb_connection *connection;
+ struct http_conn *conn;
+ struct mk_event *event;
+ struct flb_http *ctx;
+
+ connection = (struct flb_connection *) data;
+
+ conn = connection->user_data;
+
+ ctx = conn->ctx;
+
+ event = &connection->event;
+
+ if (event->mask & MK_EVENT_READ) {
+ available = (conn->buf_size - conn->buf_len) - 1;
+ if (available < 1) {
+ if (conn->buf_size + ctx->buffer_chunk_size > ctx->buffer_max_size) {
+ flb_plg_trace(ctx->ins,
+ "fd=%i incoming data exceed limit (%zu KB)",
+ event->fd, (ctx->buffer_max_size / 1024));
+ http_conn_del(conn);
+ return -1;
+ }
+
+ size = conn->buf_size + ctx->buffer_chunk_size;
+ tmp = flb_realloc(conn->buf_data, size);
+ if (!tmp) {
+ flb_errno();
+ return -1;
+ }
+ flb_plg_trace(ctx->ins, "fd=%i buffer realloc %i -> %zu",
+ event->fd, conn->buf_size, size);
+
+ conn->buf_data = tmp;
+ conn->buf_size = size;
+ available = (conn->buf_size - conn->buf_len) - 1;
+ }
+
+ /* Read data */
+ bytes = flb_io_net_read(connection,
+ (void *) &conn->buf_data[conn->buf_len],
+ available);
+
+ if (bytes <= 0) {
+ flb_plg_trace(ctx->ins, "fd=%i closed connection", event->fd);
+ http_conn_del(conn);
+ return -1;
+ }
+
+ flb_plg_trace(ctx->ins, "read()=%zi pre_len=%i now_len=%zi",
+ bytes, conn->buf_len, conn->buf_len + bytes);
+ conn->buf_len += bytes;
+ conn->buf_data[conn->buf_len] = '\0';
+
+ status = mk_http_parser(&conn->request, &conn->session.parser,
+ conn->buf_data, conn->buf_len, conn->session.server);
+
+ if (status == MK_HTTP_PARSER_OK) {
+ /* Do more logic parsing and checks for this request */
+ http_prot_handle(ctx, conn, &conn->session, &conn->request);
+
+ /* Evict the processed request from the connection buffer and reinitialize
+ * the HTTP parser.
+ */
+
+ request_end = NULL;
+
+ if (NULL != conn->request.data.data) {
+ request_end = &conn->request.data.data[conn->request.data.len];
+ }
+ else {
+ request_end = strstr(conn->buf_data, "\r\n\r\n");
+
+ if(NULL != request_end) {
+ request_end = &request_end[4];
+ }
+ }
+
+ if (NULL != request_end) {
+ request_len = (size_t)(request_end - conn->buf_data);
+
+ if (0 < (conn->buf_len - request_len)) {
+ memmove(conn->buf_data, &conn->buf_data[request_len],
+ conn->buf_len - request_len);
+
+ conn->buf_data[conn->buf_len - request_len] = '\0';
+ conn->buf_len -= request_len;
+ }
+ else {
+ memset(conn->buf_data, 0, request_len);
+
+ conn->buf_len = 0;
+ }
+
+ /* Reinitialize the parser so the next request is properly
+ * handled, the additional memset intends to wipe any left over data
+ * from the headers parsed in the previous request.
+ */
+ memset(&conn->session.parser, 0, sizeof(struct mk_http_parser));
+ mk_http_parser_init(&conn->session.parser);
+ http_conn_request_init(&conn->session, &conn->request);
+ }
+ }
+ else if (status == MK_HTTP_PARSER_ERROR) {
+ http_prot_handle_error(ctx, conn, &conn->session, &conn->request);
+
+ /* Reinitialize the parser so the next request is properly
+ * handled, the additional memset intends to wipe any left over data
+ * from the headers parsed in the previous request.
+ */
+ memset(&conn->session.parser, 0, sizeof(struct mk_http_parser));
+ mk_http_parser_init(&conn->session.parser);
+ http_conn_request_init(&conn->session, &conn->request);
+ }
+
+ /* FIXME: add Protocol handler here */
+ return bytes;
+ }
+
+ if (event->mask & MK_EVENT_CLOSE) {
+ flb_plg_trace(ctx->ins, "fd=%i hangup", event->fd);
+ http_conn_del(conn);
+ return -1;
+ }
+
+ return 0;
+
+}
+
+static void http_conn_session_init(struct mk_http_session *session,
+ struct mk_server *server,
+ int client_fd)
+{
+ /* Alloc memory for node */
+ session->_sched_init = MK_TRUE;
+ session->pipelined = MK_FALSE;
+ session->counter_connections = 0;
+ session->close_now = MK_FALSE;
+ session->status = MK_REQUEST_STATUS_INCOMPLETE;
+ session->server = server;
+ session->socket = client_fd;
+
+ /* creation time in unix time */
+ session->init_time = time(NULL);
+
+ session->channel = mk_channel_new(MK_CHANNEL_SOCKET, session->socket);
+ session->channel->io = session->server->network;
+
+ /* Init session request list */
+ mk_list_init(&session->request_list);
+
+ /* Initialize the parser */
+ mk_http_parser_init(&session->parser);
+}
+
+static void http_conn_request_init(struct mk_http_session *session,
+ struct mk_http_request *request)
+{
+ memset(request, 0, sizeof(struct mk_http_request));
+
+ mk_http_request_init(session, request, session->server);
+
+ request->in_headers.type = MK_STREAM_IOV;
+ request->in_headers.dynamic = MK_FALSE;
+ request->in_headers.cb_consumed = NULL;
+ request->in_headers.cb_finished = NULL;
+ request->in_headers.stream = &request->stream;
+
+ mk_list_add(&request->in_headers._head, &request->stream.inputs);
+
+ request->session = session;
+}
+
+struct http_conn *http_conn_add(struct flb_connection *connection,
+ struct flb_http *ctx)
+{
+ struct http_conn *conn;
+ int ret;
+
+ conn = flb_calloc(1, sizeof(struct http_conn));
+ if (!conn) {
+ flb_errno();
+ return NULL;
+ }
+
+ conn->connection = connection;
+
+ /* Set data for the event-loop */
+ MK_EVENT_NEW(&connection->event);
+
+ connection->user_data = conn;
+ connection->event.type = FLB_ENGINE_EV_CUSTOM;
+ connection->event.handler = http_conn_event;
+
+ /* Connection info */
+ conn->ctx = ctx;
+ conn->buf_len = 0;
+
+ conn->buf_data = flb_malloc(ctx->buffer_chunk_size);
+ if (!conn->buf_data) {
+ flb_errno();
+
+ flb_plg_error(ctx->ins, "could not allocate new connection");
+ flb_free(conn);
+
+ return NULL;
+ }
+ conn->buf_size = ctx->buffer_chunk_size;
+
+ /* Register instance into the event loop */
+ ret = mk_event_add(flb_engine_evl_get(),
+ connection->fd,
+ FLB_ENGINE_EV_CUSTOM,
+ MK_EVENT_READ,
+ &connection->event);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "could not register new connection");
+
+ flb_free(conn->buf_data);
+ flb_free(conn);
+
+ return NULL;
+ }
+
+ /* Initialize HTTP Session: this is a custom context for Monkey HTTP */
+ http_conn_session_init(&conn->session, ctx->server, conn->connection->fd);
+
+ /* Initialize HTTP Request: this is the initial request and it will be reinitialized
+ * automatically after the request is handled so it can be used for the next one.
+ */
+ http_conn_request_init(&conn->session, &conn->request);
+
+ /* Link connection node to parent context list */
+ mk_list_add(&conn->_head, &ctx->connections);
+
+ return conn;
+}
+
+int http_conn_del(struct http_conn *conn)
+{
+ if (conn->session.channel != NULL) {
+ mk_channel_release(conn->session.channel);
+ }
+
+ /* The downstream unregisters the file descriptor from the event-loop
+ * so there's nothing to be done by the plugin
+ */
+ flb_downstream_conn_release(conn->connection);
+
+ mk_list_del(&conn->_head);
+
+ flb_free(conn->buf_data);
+ flb_free(conn);
+
+ return 0;
+}
+
+void http_conn_release_all(struct flb_http *ctx)
+{
+ struct mk_list *tmp;
+ struct mk_list *head;
+ struct http_conn *conn;
+
+ mk_list_foreach_safe(head, tmp, &ctx->connections) {
+ conn = mk_list_entry(head, struct http_conn, _head);
+ http_conn_del(conn);
+ }
+}
diff --git a/src/fluent-bit/plugins/in_http/http_conn.h b/src/fluent-bit/plugins/in_http/http_conn.h
new file mode 100644
index 000000000..8e1078982
--- /dev/null
+++ b/src/fluent-bit/plugins/in_http/http_conn.h
@@ -0,0 +1,54 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_HTTP_CONN
+#define FLB_IN_HTTP_CONN
+
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_connection.h>
+
+#include <monkey/mk_http.h>
+#include <monkey/mk_http_parser.h>
+#include <monkey/mk_utils.h>
+
+struct http_conn {
+ /* Buffer */
+ char *buf_data; /* Buffer data */
+ int buf_len; /* Data length */
+ int buf_size; /* Buffer size */
+
+ /*
+ * Parser context: we only held one parser per connection
+ * which is re-used everytime we have a new request.
+ */
+ struct mk_http_parser parser;
+ struct mk_http_request request;
+ struct mk_http_session session;
+ struct flb_connection *connection;
+
+ void *ctx; /* Plugin parent context */
+ struct mk_list _head; /* link to flb_http->connections */
+};
+
+struct http_conn *http_conn_add(struct flb_connection *connection, struct flb_http *ctx);
+int http_conn_del(struct http_conn *conn);
+void http_conn_release_all(struct flb_http *ctx);
+
+
+#endif
diff --git a/src/fluent-bit/plugins/in_http/http_prot.c b/src/fluent-bit/plugins/in_http/http_prot.c
new file mode 100644
index 000000000..ab16eb328
--- /dev/null
+++ b/src/fluent-bit/plugins/in_http/http_prot.c
@@ -0,0 +1,665 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_version.h>
+#include <fluent-bit/flb_error.h>
+#include <fluent-bit/flb_pack.h>
+
+#include <monkey/monkey.h>
+#include <monkey/mk_core.h>
+
+#include "http.h"
+#include "http_conn.h"
+
+#define HTTP_CONTENT_JSON 0
+#define HTTP_CONTENT_URLENCODED 1
+
+static inline char hex2nibble(char c)
+{
+ if ((c >= 0x30) && (c <= '9')) {
+ return c - 0x30;
+ }
+ // 0x30-0x39 are digits, 0x41-0x46 A-F,
+ // so there is a gap at 0x40
+ if ((c >= 'A') && (c <= 'F')) {
+ return (c - 'A') + 10;
+ }
+ if ((c >= 'a') && (c <= 'f')) {
+ return (c - 'a') + 10;
+ }
+ return 0;
+}
+
+static int sds_uri_decode(flb_sds_t s)
+{
+ char buf[1024];
+ char *optr;
+ char *iptr;
+
+
+ for (optr = buf, iptr = s; iptr < s + flb_sds_len(s) && optr-buf < sizeof(buf); iptr++) {
+ if (*iptr == '%') {
+ if (iptr+2 > (s + flb_sds_len(s))) {
+ return -1;
+ }
+ *optr++ = hex2nibble(*(iptr+1)) << 4 | hex2nibble(*(iptr+2));
+ iptr+=2;
+ } else if (*iptr == '+') {
+ *optr++ = ' ';
+ } else {
+ *optr++ = *iptr;
+ }
+ }
+
+ memcpy(s, buf, optr-buf);
+ s[optr-buf] = '\0';
+ flb_sds_len_set(s, (optr-buf));
+
+ return 0;
+}
+
+static int send_response(struct http_conn *conn, int http_status, char *message)
+{
+ struct flb_http *context;
+ size_t sent;
+ int len;
+ flb_sds_t out;
+
+ context = (struct flb_http *) conn->ctx;
+
+ out = flb_sds_create_size(256);
+ if (!out) {
+ return -1;
+ }
+
+ if (message) {
+ len = strlen(message);
+ }
+ else {
+ len = 0;
+ }
+
+ if (http_status == 201) {
+ flb_sds_printf(&out,
+ "HTTP/1.1 201 Created \r\n"
+ "Server: Fluent Bit v%s\r\n"
+ "%s"
+ "Content-Length: 0\r\n\r\n",
+ FLB_VERSION_STR,
+ context->success_headers_str);
+ }
+ else if (http_status == 200) {
+ flb_sds_printf(&out,
+ "HTTP/1.1 200 OK\r\n"
+ "Server: Fluent Bit v%s\r\n"
+ "%s"
+ "Content-Length: 0\r\n\r\n",
+ FLB_VERSION_STR,
+ context->success_headers_str);
+ }
+ else if (http_status == 204) {
+ flb_sds_printf(&out,
+ "HTTP/1.1 204 No Content\r\n"
+ "Server: Fluent Bit v%s\r\n"
+ "%s"
+ "\r\n\r\n",
+ FLB_VERSION_STR,
+ context->success_headers_str);
+ }
+ else if (http_status == 400) {
+ flb_sds_printf(&out,
+ "HTTP/1.1 400 Forbidden\r\n"
+ "Server: Fluent Bit v%s\r\n"
+ "Content-Length: %i\r\n\r\n%s",
+ FLB_VERSION_STR,
+ len, message);
+ }
+
+ /* We should check this operations result */
+ flb_io_net_write(conn->connection,
+ (void *) out,
+ flb_sds_len(out),
+ &sent);
+
+ flb_sds_destroy(out);
+
+ return 0;
+}
+
+/* implements functionality to get tag from key in record */
+static flb_sds_t tag_key(struct flb_http *ctx, msgpack_object *map)
+{
+ size_t map_size = map->via.map.size;
+ msgpack_object_kv *kv;
+ msgpack_object key;
+ msgpack_object val;
+ char *key_str = NULL;
+ char *val_str = NULL;
+ size_t key_str_size = 0;
+ size_t val_str_size = 0;
+ int j;
+ int check = FLB_FALSE;
+ int found = FLB_FALSE;
+ flb_sds_t tag;
+
+ kv = map->via.map.ptr;
+
+ for(j=0; j < map_size; j++) {
+ check = FLB_FALSE;
+ found = FLB_FALSE;
+ key = (kv+j)->key;
+ if (key.type == MSGPACK_OBJECT_BIN) {
+ key_str = (char *) key.via.bin.ptr;
+ key_str_size = key.via.bin.size;
+ check = FLB_TRUE;
+ }
+ if (key.type == MSGPACK_OBJECT_STR) {
+ key_str = (char *) key.via.str.ptr;
+ key_str_size = key.via.str.size;
+ check = FLB_TRUE;
+ }
+
+ if (check == FLB_TRUE) {
+ if (strncmp(ctx->tag_key, key_str, key_str_size) == 0) {
+ val = (kv+j)->val;
+ if (val.type == MSGPACK_OBJECT_BIN) {
+ val_str = (char *) val.via.bin.ptr;
+ val_str_size = val.via.str.size;
+ found = FLB_TRUE;
+ break;
+ }
+ if (val.type == MSGPACK_OBJECT_STR) {
+ val_str = (char *) val.via.str.ptr;
+ val_str_size = val.via.str.size;
+ found = FLB_TRUE;
+ break;
+ }
+ }
+ }
+ }
+
+ if (found == FLB_TRUE) {
+ tag = flb_sds_create_len(val_str, val_str_size);
+ if (!tag) {
+ flb_errno();
+ return NULL;
+ }
+ return tag;
+ }
+
+
+ flb_plg_error(ctx->ins, "Could not find tag_key %s in record", ctx->tag_key);
+ return NULL;
+}
+
+int process_pack(struct flb_http *ctx, flb_sds_t tag, char *buf, size_t size)
+{
+ int ret;
+ size_t off = 0;
+ msgpack_unpacked result;
+ struct flb_time tm;
+ int i = 0;
+ msgpack_object *obj;
+ msgpack_object record;
+ flb_sds_t tag_from_record = NULL;
+
+ flb_time_get(&tm);
+
+ msgpack_unpacked_init(&result);
+ while (msgpack_unpack_next(&result, buf, size, &off) == MSGPACK_UNPACK_SUCCESS) {
+ if (result.data.type == MSGPACK_OBJECT_MAP) {
+ tag_from_record = NULL;
+ if (ctx->tag_key) {
+ obj = &result.data;
+ tag_from_record = tag_key(ctx, obj);
+ }
+
+ ret = flb_log_event_encoder_begin_record(&ctx->log_encoder);
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_timestamp(
+ &ctx->log_encoder,
+ &tm);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_body_from_msgpack_object(
+ &ctx->log_encoder,
+ &result.data);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_commit_record(&ctx->log_encoder);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ if (tag_from_record) {
+ flb_input_log_append(ctx->ins,
+ tag_from_record,
+ flb_sds_len(tag_from_record),
+ ctx->log_encoder.output_buffer,
+ ctx->log_encoder.output_length);
+
+ flb_sds_destroy(tag_from_record);
+ }
+ else if (tag) {
+ flb_input_log_append(ctx->ins, tag, flb_sds_len(tag),
+ ctx->log_encoder.output_buffer,
+ ctx->log_encoder.output_length);
+ }
+ else {
+ /* use default plugin Tag (it internal name, e.g: http.0 */
+ flb_input_log_append(ctx->ins, NULL, 0,
+ ctx->log_encoder.output_buffer,
+ ctx->log_encoder.output_length);
+ }
+ }
+ else {
+ flb_plg_error(ctx->ins, "Error encoding record : %d", ret);
+ }
+
+ flb_log_event_encoder_reset(&ctx->log_encoder);
+ }
+ else if (result.data.type == MSGPACK_OBJECT_ARRAY) {
+ obj = &result.data;
+ for (i = 0; i < obj->via.array.size; i++)
+ {
+ record = obj->via.array.ptr[i];
+
+ tag_from_record = NULL;
+ if (ctx->tag_key) {
+ tag_from_record = tag_key(ctx, &record);
+ }
+
+ ret = flb_log_event_encoder_begin_record(&ctx->log_encoder);
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_timestamp(
+ &ctx->log_encoder,
+ &tm);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_body_from_msgpack_object(
+ &ctx->log_encoder,
+ &record);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_commit_record(&ctx->log_encoder);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ if (tag_from_record) {
+ flb_input_log_append(ctx->ins,
+ tag_from_record,
+ flb_sds_len(tag_from_record),
+ ctx->log_encoder.output_buffer,
+ ctx->log_encoder.output_length);
+
+ flb_sds_destroy(tag_from_record);
+ }
+ else if (tag) {
+ flb_input_log_append(ctx->ins, tag, flb_sds_len(tag),
+ ctx->log_encoder.output_buffer,
+ ctx->log_encoder.output_length);
+ }
+ else {
+ /* use default plugin Tag (it internal name, e.g: http.0 */
+ flb_input_log_append(ctx->ins, NULL, 0,
+ ctx->log_encoder.output_buffer,
+ ctx->log_encoder.output_length);
+ }
+ }
+ else {
+ flb_plg_error(ctx->ins, "Error encoding record : %d", ret);
+ }
+
+ /* TODO : Optimize this
+ *
+ * This is wasteful, considering that we are emitting a series
+ * of records we should start and commit each one and then
+ * emit them all at once after the loop.
+ */
+
+ flb_log_event_encoder_reset(&ctx->log_encoder);
+ }
+
+ break;
+ }
+ else {
+ flb_plg_error(ctx->ins, "skip record from invalid type: %i",
+ result.data.type);
+
+ msgpack_unpacked_destroy(&result);
+
+ return -1;
+ }
+ }
+
+ msgpack_unpacked_destroy(&result);
+
+ return 0;
+}
+
+static ssize_t parse_payload_json(struct flb_http *ctx, flb_sds_t tag,
+ char *payload, size_t size)
+{
+ int ret;
+ int out_size;
+ char *pack;
+ struct flb_pack_state pack_state;
+
+ /* Initialize packer */
+ flb_pack_state_init(&pack_state);
+
+ /* Pack JSON as msgpack */
+ ret = flb_pack_json_state(payload, size,
+ &pack, &out_size, &pack_state);
+ flb_pack_state_reset(&pack_state);
+
+ /* Handle exceptions */
+ if (ret == FLB_ERR_JSON_PART) {
+ flb_plg_warn(ctx->ins, "JSON data is incomplete, skipping");
+ return -1;
+ }
+ else if (ret == FLB_ERR_JSON_INVAL) {
+ flb_plg_warn(ctx->ins, "invalid JSON message, skipping");
+ return -1;
+ }
+ else if (ret == -1) {
+ return -1;
+ }
+
+ /* Process the packaged JSON and return the last byte used */
+ process_pack(ctx, tag, pack, out_size);
+ flb_free(pack);
+
+ return 0;
+}
+
+static ssize_t parse_payload_urlencoded(struct flb_http *ctx, flb_sds_t tag,
+ char *payload, size_t size)
+{
+ struct mk_list *kvs;
+ struct mk_list *head = NULL;
+ struct flb_split_entry *cur = NULL;
+ char **keys = NULL;
+ char **vals = NULL;
+ char *sep;
+ char *start;
+ int idx = 0;
+ int ret = -1;
+ msgpack_packer pck;
+ msgpack_sbuffer sbuf;
+
+
+ /* initialize buffers */
+ msgpack_sbuffer_init(&sbuf);
+ msgpack_packer_init(&pck, &sbuf, msgpack_sbuffer_write);
+
+ kvs = flb_utils_split(payload, '&', -1 );
+ if (kvs == NULL) {
+ goto split_error;
+ }
+
+ keys = flb_calloc(mk_list_size(kvs), sizeof(char *));
+ if (keys == NULL) {
+ goto keys_calloc_error;
+ }
+
+ vals = flb_calloc(mk_list_size(kvs), sizeof(char *));
+ if (vals == NULL) {
+ goto vals_calloc_error;
+ }
+
+ mk_list_foreach(head, kvs) {
+ cur = mk_list_entry(head, struct flb_split_entry, _head);
+ if (cur->value[0] == '\n') {
+ start = &cur->value[1];
+ } else {
+ start = cur->value;
+ }
+ sep = strchr(start, '=');
+ if (sep == NULL) {
+ vals[idx] = NULL;
+ continue;
+ }
+ *sep++ = '\0';
+
+ keys[idx] = flb_sds_create_len(start, strlen(start));
+ vals[idx] = flb_sds_create_len(sep, strlen(sep));
+
+ flb_sds_trim(keys[idx]);
+ flb_sds_trim(vals[idx]);
+ idx++;
+ }
+
+ msgpack_pack_map(&pck, mk_list_size(kvs));
+ for (idx = 0; idx < mk_list_size(kvs); idx++) {
+ msgpack_pack_str(&pck, flb_sds_len(keys[idx]));
+ msgpack_pack_str_body(&pck, keys[idx], flb_sds_len(keys[idx]));
+
+ if (sds_uri_decode(vals[idx]) != 0) {
+ goto decode_error;
+ } else {
+ msgpack_pack_str(&pck, flb_sds_len(vals[idx]));
+ msgpack_pack_str_body(&pck, vals[idx], strlen(vals[idx]));
+ }
+ }
+
+ ret = process_pack(ctx, tag, sbuf.data, sbuf.size);
+
+decode_error:
+ for (idx = 0; idx < mk_list_size(kvs); idx++) {
+ if (keys[idx]) {
+ flb_sds_destroy(keys[idx]);
+ }
+ if (vals[idx]) {
+ flb_sds_destroy(vals[idx]);
+ }
+ }
+ flb_free(vals);
+vals_calloc_error:
+ flb_free(keys);
+keys_calloc_error:
+ flb_utils_split_free(kvs);
+split_error:
+ msgpack_sbuffer_destroy(&sbuf);
+ return ret;
+}
+
+static int process_payload(struct flb_http *ctx, struct http_conn *conn,
+ flb_sds_t tag,
+ struct mk_http_session *session,
+ struct mk_http_request *request)
+{
+ int type = -1;
+ struct mk_http_header *header;
+
+ header = &session->parser.headers[MK_HEADER_CONTENT_TYPE];
+ if (header->key.data == NULL) {
+ send_response(conn, 400, "error: header 'Content-Type' is not set\n");
+ return -1;
+ }
+
+ if (header->val.len == 16 &&
+ strncasecmp(header->val.data, "application/json", 16) == 0) {
+ type = HTTP_CONTENT_JSON;
+ }
+
+ if (header->val.len == 33 &&
+ strncasecmp(header->val.data, "application/x-www-form-urlencoded", 33) == 0) {
+ type = HTTP_CONTENT_URLENCODED;
+ }
+
+ if (type == -1) {
+ send_response(conn, 400, "error: invalid 'Content-Type'\n");
+ return -1;
+ }
+
+ if (request->data.len <= 0) {
+ send_response(conn, 400, "error: no payload found\n");
+ return -1;
+ }
+
+ if (type == HTTP_CONTENT_JSON) {
+ parse_payload_json(ctx, tag, request->data.data, request->data.len);
+ } else if (type == HTTP_CONTENT_URLENCODED) {
+ parse_payload_urlencoded(ctx, tag, request->data.data, request->data.len);
+ }
+
+ return 0;
+}
+
+static inline int mk_http_point_header(mk_ptr_t *h,
+ struct mk_http_parser *parser, int key)
+{
+ struct mk_http_header *header;
+
+ header = &parser->headers[key];
+ if (header->type == key) {
+ h->data = header->val.data;
+ h->len = header->val.len;
+ return 0;
+ }
+ else {
+ h->data = NULL;
+ h->len = -1;
+ }
+
+ return -1;
+}
+
+/*
+ * Handle an incoming request. It perform extra checks over the request, if
+ * everything is OK, it enqueue the incoming payload.
+ */
+int http_prot_handle(struct flb_http *ctx, struct http_conn *conn,
+ struct mk_http_session *session,
+ struct mk_http_request *request)
+{
+ int i;
+ int ret;
+ int len;
+ char *uri;
+ char *qs;
+ off_t diff;
+ flb_sds_t tag;
+ struct mk_http_header *header;
+
+ if (request->uri.data[0] != '/') {
+ send_response(conn, 400, "error: invalid request\n");
+ return -1;
+ }
+
+ /* Decode URI */
+ uri = mk_utils_url_decode(request->uri);
+ if (!uri) {
+ uri = mk_mem_alloc_z(request->uri.len + 1);
+ if (!uri) {
+ return -1;
+ }
+ memcpy(uri, request->uri.data, request->uri.len);
+ uri[request->uri.len] = '\0';
+ }
+
+ /* Try to match a query string so we can remove it */
+ qs = strchr(uri, '?');
+ if (qs) {
+ /* remove the query string part */
+ diff = qs - uri;
+ uri[diff] = '\0';
+ }
+
+ /* Compose the query string using the URI */
+ len = strlen(uri);
+
+ if (len == 1) {
+ tag = NULL; /* use default tag */
+ }
+ else {
+ tag = flb_sds_create_size(len);
+ if (!tag) {
+ mk_mem_free(uri);
+ return -1;
+ }
+
+ /* New tag skipping the URI '/' */
+ flb_sds_cat(tag, uri + 1, len - 1);
+
+ /* Sanitize, only allow alphanum chars */
+ for (i = 0; i < flb_sds_len(tag); i++) {
+ if (!isalnum(tag[i]) && tag[i] != '_' && tag[i] != '.') {
+ tag[i] = '_';
+ }
+ }
+ }
+
+ mk_mem_free(uri);
+
+ /* Check if we have a Host header: Hostname ; port */
+ mk_http_point_header(&request->host, &session->parser, MK_HEADER_HOST);
+
+ /* Header: Connection */
+ mk_http_point_header(&request->connection, &session->parser,
+ MK_HEADER_CONNECTION);
+
+ /* HTTP/1.1 needs Host header */
+ if (!request->host.data && request->protocol == MK_HTTP_PROTOCOL_11) {
+ flb_sds_destroy(tag);
+ return -1;
+ }
+
+ /* Should we close the session after this request ? */
+ mk_http_keepalive_check(session, request, ctx->server);
+
+ /* Content Length */
+ header = &session->parser.headers[MK_HEADER_CONTENT_LENGTH];
+ if (header->type == MK_HEADER_CONTENT_LENGTH) {
+ request->_content_length.data = header->val.data;
+ request->_content_length.len = header->val.len;
+ }
+ else {
+ request->_content_length.data = NULL;
+ }
+
+ if (request->method != MK_METHOD_POST) {
+ flb_sds_destroy(tag);
+ send_response(conn, 400, "error: invalid HTTP method\n");
+ return -1;
+ }
+
+ ret = process_payload(ctx, conn, tag, session, request);
+ flb_sds_destroy(tag);
+ send_response(conn, ctx->successful_response_code, NULL);
+ return ret;
+}
+
+/*
+ * Handle an incoming request which has resulted in an http parser error.
+ */
+int http_prot_handle_error(struct flb_http *ctx, struct http_conn *conn,
+ struct mk_http_session *session,
+ struct mk_http_request *request)
+{
+ send_response(conn, 400, "error: invalid request\n");
+ return -1;
+}
diff --git a/src/fluent-bit/plugins/in_http/http_prot.h b/src/fluent-bit/plugins/in_http/http_prot.h
new file mode 100644
index 000000000..1cb603089
--- /dev/null
+++ b/src/fluent-bit/plugins/in_http/http_prot.h
@@ -0,0 +1,31 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_HTTP_PROT
+#define FLB_IN_HTTP_PROT
+
+int http_prot_handle(struct flb_http *ctx, struct http_conn *conn,
+ struct mk_http_session *session,
+ struct mk_http_request *request);
+
+int http_prot_handle_error(struct flb_http *ctx, struct http_conn *conn,
+ struct mk_http_session *session,
+ struct mk_http_request *request);
+
+#endif
diff --git a/src/fluent-bit/plugins/in_kafka/CMakeLists.txt b/src/fluent-bit/plugins/in_kafka/CMakeLists.txt
new file mode 100644
index 000000000..696e263fa
--- /dev/null
+++ b/src/fluent-bit/plugins/in_kafka/CMakeLists.txt
@@ -0,0 +1,6 @@
+set(src
+ in_kafka.c
+ )
+
+FLB_PLUGIN(in_kafka "${src}" "rdkafka")
+target_link_libraries(flb-plugin-in_kafka -lpthread)
diff --git a/src/fluent-bit/plugins/in_kafka/in_kafka.c b/src/fluent-bit/plugins/in_kafka/in_kafka.c
new file mode 100644
index 000000000..972ae4170
--- /dev/null
+++ b/src/fluent-bit/plugins/in_kafka/in_kafka.c
@@ -0,0 +1,382 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2019-2021 The Fluent Bit Authors
+ * Copyright (C) 2015-2018 Treasure Data Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_engine.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_parser.h>
+#include <fluent-bit/flb_error.h>
+#include <fluent-bit/flb_utils.h>
+#include <mpack/mpack.h>
+#include <stddef.h>
+#include <stdio.h>
+
+#include "fluent-bit/flb_input.h"
+#include "fluent-bit/flb_kafka.h"
+#include "fluent-bit/flb_mem.h"
+#include "in_kafka.h"
+#include "rdkafka.h"
+
+static int try_json(struct flb_log_event_encoder *log_encoder,
+ rd_kafka_message_t *rkm)
+{
+ int root_type;
+ char *buf = NULL;
+ size_t bufsize;
+ int ret;
+
+ ret = flb_pack_json(rkm->payload, rkm->len, &buf, &bufsize, &root_type, NULL);
+ if (ret) {
+ if (buf) {
+ flb_free(buf);
+ }
+ return ret;
+ }
+ flb_log_event_encoder_append_body_raw_msgpack(log_encoder, buf, bufsize);
+ flb_free(buf);
+ return 0;
+}
+
+static int process_message(struct flb_in_kafka_config *ctx,
+ rd_kafka_message_t *rkm)
+{
+ struct flb_log_event_encoder *log_encoder = ctx->log_encoder;
+ int ret;
+
+ ret = flb_log_event_encoder_begin_record(log_encoder);
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_current_timestamp(log_encoder);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_append_body_cstring(log_encoder, "topic");
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ if (rkm->rkt) {
+ ret = flb_log_event_encoder_append_body_cstring(log_encoder,
+ rd_kafka_topic_name(rkm->rkt));
+ }
+ else {
+ ret = flb_log_event_encoder_append_body_null(log_encoder);
+ }
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_append_body_values(log_encoder,
+ FLB_LOG_EVENT_CSTRING_VALUE("partition"),
+ FLB_LOG_EVENT_INT32_VALUE(rkm->partition));
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_append_body_values(log_encoder,
+ FLB_LOG_EVENT_CSTRING_VALUE("offset"),
+ FLB_LOG_EVENT_INT64_VALUE(rkm->offset));
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_append_body_cstring(log_encoder, "error");
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ if (rkm->err) {
+ ret = flb_log_event_encoder_append_body_cstring(log_encoder,
+ rd_kafka_message_errstr(rkm));
+ }
+ else {
+ ret = flb_log_event_encoder_append_body_null(log_encoder);
+ }
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_append_body_cstring(log_encoder, "key");
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ if (rkm->key) {
+ ret = flb_log_event_encoder_append_body_string(log_encoder,
+ rkm->key,
+ rkm->key_len);
+ }
+ else {
+ ret = flb_log_event_encoder_append_body_null(log_encoder);
+ }
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_append_body_cstring(log_encoder, "payload");
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ if (rkm->payload) {
+ if (ctx->format != FLB_IN_KAFKA_FORMAT_JSON ||
+ try_json(log_encoder, rkm)) {
+ ret = flb_log_event_encoder_append_body_string(log_encoder,
+ rkm->payload,
+ rkm->len);
+ }
+ }
+ else {
+ ret = flb_log_event_encoder_append_body_null(log_encoder);
+ }
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_commit_record(log_encoder);
+ }
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ flb_log_event_encoder_rollback_record(log_encoder);
+ }
+
+ return ret;
+}
+
+static int in_kafka_collect(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ int ret;
+ struct flb_in_kafka_config *ctx = in_context;
+ rd_kafka_message_t *rkm;
+
+ ret = FLB_EVENT_ENCODER_SUCCESS;
+
+ while (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ rkm = rd_kafka_consumer_poll(ctx->kafka.rk, 1);
+
+ if (!rkm) {
+ break;
+ }
+
+ flb_plg_debug(ins, "kafka message received");
+
+ ret = process_message(ctx, rkm);
+
+ rd_kafka_message_destroy(rkm);
+
+ /* TO-DO: commit the record based on `ret` */
+ rd_kafka_commit(ctx->kafka.rk, NULL, 0);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ flb_input_log_append(ins, NULL, 0,
+ ctx->log_encoder->output_buffer,
+ ctx->log_encoder->output_length);
+ ret = 0;
+ }
+ else {
+ flb_plg_error(ins, "Error encoding record : %d", ret);
+ ret = -1;
+ }
+
+ flb_log_event_encoder_reset(ctx->log_encoder);
+
+ return ret;
+}
+
+/* Initialize plugin */
+static int in_kafka_init(struct flb_input_instance *ins,
+ struct flb_config *config, void *data)
+{
+ int ret;
+ const char *conf;
+ struct flb_in_kafka_config *ctx;
+ rd_kafka_conf_t *kafka_conf = NULL;
+ rd_kafka_topic_partition_list_t *kafka_topics = NULL;
+ rd_kafka_resp_err_t err;
+ char errstr[512];
+ (void) data;
+
+ /* Allocate space for the configuration context */
+ ctx = flb_malloc(sizeof(struct flb_in_kafka_config));
+ if (!ctx) {
+ return -1;
+ }
+ ctx->ins = ins;
+
+ ret = flb_input_config_map_set(ins, (void*) ctx);
+ if (ret == -1) {
+ flb_plg_error(ins, "unable to load configuration.");
+ flb_free(ctx);
+ return -1;
+ }
+
+ kafka_conf = flb_kafka_conf_create(&ctx->kafka, &ins->properties, 1);
+ if (!kafka_conf) {
+ flb_plg_error(ins, "Could not initialize kafka config object");
+ goto init_error;
+ }
+
+ ctx->kafka.rk = rd_kafka_new(RD_KAFKA_CONSUMER, kafka_conf, errstr,
+ sizeof(errstr));
+
+ /* Create Kafka consumer handle */
+ if (!ctx->kafka.rk) {
+ flb_plg_error(ins, "Failed to create new consumer: %s", errstr);
+ goto init_error;
+ }
+
+ conf = flb_input_get_property("topics", ins);
+ if (!conf) {
+ flb_plg_error(ins, "config: no topics specified");
+ goto init_error;
+ }
+
+ kafka_topics = flb_kafka_parse_topics(conf);
+ if (!kafka_topics) {
+ flb_plg_error(ins, "Failed to parse topic list");
+ goto init_error;
+ }
+
+ if (strcasecmp(ctx->format_str, "none") == 0) {
+ ctx->format = FLB_IN_KAFKA_FORMAT_NONE;
+ }
+ else if (strcasecmp(ctx->format_str, "json") == 0) {
+ ctx->format = FLB_IN_KAFKA_FORMAT_JSON;
+ }
+ else {
+ flb_plg_error(ins, "config: invalid format \"%s\"", ctx->format_str);
+ goto init_error;
+ }
+
+ if ((err = rd_kafka_subscribe(ctx->kafka.rk, kafka_topics))) {
+ flb_plg_error(ins, "Failed to start consuming topics: %s", rd_kafka_err2str(err));
+ goto init_error;
+ }
+ rd_kafka_topic_partition_list_destroy(kafka_topics);
+ kafka_topics = NULL;
+
+ /* Set the context */
+ flb_input_set_context(ins, ctx);
+ /* Collect upon data available on the pipe read fd */
+
+ int poll_seconds = ctx->poll_ms / 1000;
+ int poll_milliseconds = ctx->poll_ms % 1000;
+
+ ret = flb_input_set_collector_time(ins,
+ in_kafka_collect,
+ poll_seconds, poll_milliseconds * 1e6,
+ config);
+ if (ret) {
+ flb_plg_error(ctx->ins, "could not set collector for kafka input plugin");
+ goto init_error;
+ }
+
+ ctx->log_encoder = flb_log_event_encoder_create(FLB_LOG_EVENT_FORMAT_DEFAULT);
+
+ if (ctx->log_encoder == NULL) {
+ flb_plg_error(ins, "could not initialize log encoder");
+ goto init_error;
+ }
+
+ return 0;
+
+init_error:
+ if (kafka_topics) {
+ rd_kafka_topic_partition_list_destroy(kafka_topics);
+ }
+ if (ctx->kafka.rk) {
+ rd_kafka_destroy(ctx->kafka.rk);
+ }
+ else if (kafka_conf) {
+ /* conf is already destroyed when rd_kafka is initialized */
+ rd_kafka_conf_destroy(kafka_conf);
+ }
+ flb_free(ctx);
+
+ return -1;
+}
+
+/* Cleanup serial input */
+static int in_kafka_exit(void *in_context, struct flb_config *config)
+{
+ struct flb_in_kafka_config *ctx;
+
+ if (!in_context) {
+ return 0;
+ }
+
+ ctx = in_context;
+ rd_kafka_destroy(ctx->kafka.rk);
+ flb_free(ctx->kafka.brokers);
+
+ if (ctx->log_encoder){
+ flb_log_event_encoder_destroy(ctx->log_encoder);
+ }
+
+ flb_free(ctx);
+
+ return 0;
+}
+
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_INT, "poll_ms", FLB_IN_KAFKA_DEFAULT_POLL_MS,
+ 0, FLB_TRUE, offsetof(struct flb_in_kafka_config, poll_ms),
+ "Interval in milliseconds to check for new messages."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "topics", (char *)NULL,
+ 0, FLB_FALSE, 0,
+ "Set the kafka topics, delimited by commas."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "format", FLB_IN_KAFKA_DEFAULT_FORMAT,
+ 0, FLB_TRUE, offsetof(struct flb_in_kafka_config, format_str),
+ "Set the data format which will be used for parsing records."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "brokers", (char *)NULL,
+ 0, FLB_FALSE, 0,
+ "Set the kafka brokers, delimited by commas."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "client_id", (char *)NULL,
+ 0, FLB_FALSE, 0,
+ "Set the kafka client_id."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "group_id", (char *)NULL,
+ 0, FLB_FALSE, 0,
+ "Set the kafka group_id."
+ },
+ {
+ FLB_CONFIG_MAP_STR_PREFIX, "rdkafka.", NULL,
+ /* FLB_CONFIG_MAP_MULT, FLB_TRUE, offsetof(struct flb_in_kafka_config, rdkafka_opts), */
+ 0, FLB_FALSE, 0,
+ "Set the librdkafka options"
+ },
+ /* EOF */
+ {0}
+};
+
+/* Plugin reference */
+struct flb_input_plugin in_kafka_plugin = {
+ .name = "kafka",
+ .description = "Kafka consumer input plugin",
+ .cb_init = in_kafka_init,
+ .cb_pre_run = NULL,
+ .cb_collect = in_kafka_collect,
+ .cb_flush_buf = NULL,
+ .cb_exit = in_kafka_exit,
+ .config_map = config_map
+};
diff --git a/src/fluent-bit/plugins/in_kafka/in_kafka.h b/src/fluent-bit/plugins/in_kafka/in_kafka.h
new file mode 100644
index 000000000..2992efff1
--- /dev/null
+++ b/src/fluent-bit/plugins/in_kafka/in_kafka.h
@@ -0,0 +1,48 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2019-2021 The Fluent Bit Authors
+ * Copyright (C) 2015-2018 Treasure Data Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_KAFKA_H
+#define FLB_IN_KAFKA_H
+
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_input.h>
+#include <fluent-bit/flb_input_thread.h>
+#include <fluent-bit/flb_kafka.h>
+#include <fluent-bit/flb_log_event_encoder.h>
+
+
+#define FLB_IN_KAFKA_DEFAULT_POLL_MS "500"
+#define FLB_IN_KAFKA_DEFAULT_FORMAT "none"
+
+enum {
+ FLB_IN_KAFKA_FORMAT_NONE,
+ FLB_IN_KAFKA_FORMAT_JSON,
+};
+
+struct flb_in_kafka_config {
+ struct flb_kafka kafka;
+ struct flb_input_instance *ins;
+ struct flb_log_event_encoder *log_encoder;
+ int poll_ms;
+ int format;
+ char *format_str;
+};
+
+#endif
diff --git a/src/fluent-bit/plugins/in_kmsg/CMakeLists.txt b/src/fluent-bit/plugins/in_kmsg/CMakeLists.txt
new file mode 100644
index 000000000..a4272294d
--- /dev/null
+++ b/src/fluent-bit/plugins/in_kmsg/CMakeLists.txt
@@ -0,0 +1,4 @@
+set(src
+ in_kmsg.c)
+
+FLB_PLUGIN(in_kmsg "${src}" "")
diff --git a/src/fluent-bit/plugins/in_kmsg/in_kmsg.c b/src/fluent-bit/plugins/in_kmsg/in_kmsg.c
new file mode 100644
index 000000000..0f27c67a1
--- /dev/null
+++ b/src/fluent-bit/plugins/in_kmsg/in_kmsg.c
@@ -0,0 +1,390 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_input.h>
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_engine.h>
+#include <fluent-bit/flb_time.h>
+
+#include <msgpack.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <limits.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <unistd.h>
+#include <ctype.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <inttypes.h>
+
+#include "in_kmsg.h"
+
+/*
+ * Note: Functions timeval_diff() and in_kmsg_boot_time() are based
+ * on syslog-ng-3.5 source code.
+ */
+static inline uint64_t timeval_diff(struct timeval *t1, struct timeval *t2)
+{
+ return ((uint64_t) t1->tv_sec - (uint64_t) t2->tv_sec) * KMSG_USEC_PER_SEC +
+ ((uint64_t) t1->tv_usec - (uint64_t) t2->tv_usec);
+}
+
+static int boot_time(struct timeval *boot_time)
+{
+ int fd, pos = 0;
+ int bytes;
+ uint64_t tdiff;
+ char buf[256];
+ struct timeval curr_time;
+
+ fd = open("/proc/uptime", O_RDONLY);
+ if (fd == -1) {
+ return -1;
+ }
+
+ bytes = read(fd, buf, sizeof(buf));
+ if (bytes <= 0) {
+ close(fd);
+ return -1;
+ }
+
+ close(fd);
+ gettimeofday(&curr_time, NULL);
+
+ /* Read the seconds part */
+ while (pos < bytes && buf[pos] != '.') {
+ if (isdigit(buf[pos])) {
+ boot_time->tv_sec = boot_time->tv_sec * 10 + ((buf[pos]) - '0');
+ }
+ else {
+ boot_time->tv_sec = 0;
+ return 0;
+ }
+ pos++;
+ }
+ pos++;
+
+ /* Then the microsecond part */
+ while (pos < bytes && buf[pos] != ' ') {
+ if (isdigit(buf[pos])) {
+ boot_time->tv_usec = boot_time->tv_usec * 10 + ((buf[pos]) - '0');
+ }
+ else {
+ boot_time->tv_sec = 0;
+ boot_time->tv_usec = 0;
+ return 0;
+ }
+ pos++;
+ }
+
+ tdiff = timeval_diff(&curr_time, boot_time);
+ boot_time->tv_sec = tdiff / KMSG_USEC_PER_SEC;
+ boot_time->tv_usec = tdiff % KMSG_USEC_PER_SEC;
+
+ return 0;
+}
+
+static inline int process_line(const char *line,
+ struct flb_input_instance *i_ins,
+ struct flb_in_kmsg_config *ctx)
+{
+ char priority; /* log priority */
+ uint64_t sequence; /* sequence number */
+ struct timeval tv; /* time value */
+ int line_len;
+ uint64_t val;
+ const char *p = line;
+ char *end = NULL;
+ struct flb_time ts;
+ int ret;
+
+ /* Increase buffer position */
+ ctx->buffer_id++;
+
+ errno = 0;
+ val = strtol(p, &end, 10);
+ if ((errno == ERANGE && (val == INT_MAX || val == INT_MIN))
+ || (errno != 0 && val == 0)) {
+ goto fail;
+ }
+
+ /* Priority */
+ priority = FLB_KLOG_PRI(val);
+
+ if (priority > ctx->prio_level) {
+ /* Drop line */
+ return 0;
+ }
+
+ /* Sequence */
+ p = strchr(p, ',');
+ if (!p) {
+ goto fail;
+ }
+ p++;
+
+ val = strtol(p, &end, 10);
+ if ((errno == ERANGE && (val == INT_MAX || val == INT_MIN))
+ || (errno != 0 && val == 0)) {
+ goto fail;
+ }
+
+ sequence = val;
+ p = ++end;
+
+ /* Timestamp */
+ val = strtol(p, &end, 10);
+ if ((errno == ERANGE && (val == INT_MAX || val == INT_MIN))
+ || (errno != 0 && val == 0)) {
+ goto fail;
+ }
+
+ tv.tv_sec = val/1000000;
+ tv.tv_usec = val - (tv.tv_sec * 1000000);
+
+ flb_time_set(&ts, ctx->boot_time.tv_sec + tv.tv_sec, tv.tv_usec * 1000);
+
+ /* Now process the human readable message */
+ p = strchr(p, ';');
+ if (!p) {
+ goto fail;
+ }
+ p++;
+
+ line_len = strlen(p);
+
+ ret = flb_log_event_encoder_begin_record(&ctx->log_encoder);
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_timestamp(
+ &ctx->log_encoder,
+ &ts);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_append_body_values(
+ &ctx->log_encoder,
+ FLB_LOG_EVENT_CSTRING_VALUE("priority"),
+ FLB_LOG_EVENT_CHAR_VALUE(priority),
+
+ FLB_LOG_EVENT_CSTRING_VALUE("sequence"),
+ FLB_LOG_EVENT_UINT64_VALUE(sequence),
+
+ FLB_LOG_EVENT_CSTRING_VALUE("sec"),
+ FLB_LOG_EVENT_UINT64_VALUE(tv.tv_sec),
+
+ FLB_LOG_EVENT_CSTRING_VALUE("usec"),
+ FLB_LOG_EVENT_UINT64_VALUE(tv.tv_usec),
+
+ FLB_LOG_EVENT_CSTRING_VALUE("msg"),
+ FLB_LOG_EVENT_STRING_VALUE((char *) p, line_len - 1));
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_commit_record(&ctx->log_encoder);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ flb_input_log_append(ctx->ins, NULL, 0,
+ ctx->log_encoder.output_buffer,
+ ctx->log_encoder.output_length);
+
+ ret = 0;
+ }
+ else {
+ flb_plg_error(ctx->ins, "Error encoding record : %d", ret);
+
+ ret = -1;
+ }
+
+ flb_log_event_encoder_reset(&ctx->log_encoder);
+
+ flb_plg_debug(ctx->ins, "pri=%i seq=%" PRIu64 " sec=%ld usec=%ld msg_length=%i",
+ priority,
+ sequence,
+ (long int) tv.tv_sec,
+ (long int) tv.tv_usec,
+ line_len - 1);
+ return ret;
+
+ fail:
+ ctx->buffer_id--;
+ return -1;
+}
+
+/* Callback triggered when some Kernel Log buffer msgs are available */
+static int in_kmsg_collect(struct flb_input_instance *i_ins,
+ struct flb_config *config, void *in_context)
+{
+ int ret;
+ int bytes;
+ struct flb_in_kmsg_config *ctx = in_context;
+
+ bytes = read(ctx->fd, ctx->buf_data, ctx->buf_size - 1);
+ if (bytes == -1) {
+ if (errno == -EPIPE) {
+ return -1;
+ }
+ return 0;
+ }
+ else if (bytes == 0) {
+ flb_errno();
+ return 0;
+ }
+ ctx->buf_len += bytes;
+
+ /* Always set a delimiter to avoid buffer trash */
+ ctx->buf_data[ctx->buf_len] = '\0';
+
+ /* Check if our buffer is full */
+ if (ctx->buffer_id + 1 == KMSG_BUFFER_SIZE) {
+ ret = flb_engine_flush(config, &in_kmsg_plugin);
+ if (ret == -1) {
+ ctx->buffer_id = 0;
+ }
+ }
+
+ /* Process and enqueue the received line */
+ process_line(ctx->buf_data, i_ins, ctx);
+ ctx->buf_len = 0;
+
+ return 0;
+}
+
+/* Init kmsg input */
+static int in_kmsg_init(struct flb_input_instance *ins,
+ struct flb_config *config, void *data)
+{
+ int fd;
+ int ret;
+ struct flb_in_kmsg_config *ctx;
+ (void) data;
+
+ ctx = flb_calloc(1, sizeof(struct flb_in_kmsg_config));
+ if (!ctx) {
+ flb_errno();
+ return -1;
+ }
+ ctx->ins = ins;
+ ctx->buf_data = flb_malloc(FLB_KMSG_BUF_SIZE);
+ if (!ctx->buf_data) {
+ flb_errno();
+ flb_free(ctx);
+ return -1;
+ }
+ ctx->buf_len = 0;
+ ctx->buf_size = FLB_KMSG_BUF_SIZE;
+
+ /* Load the config map */
+ ret = flb_input_config_map_set(ins, (void *)ctx);
+ if (ret == -1) {
+ flb_free(ctx);
+ return -1;
+ }
+
+ /* set context */
+ flb_input_set_context(ins, ctx);
+
+ /* open device */
+ fd = open(FLB_KMSG_DEV, O_RDONLY);
+ if (fd == -1) {
+ flb_errno();
+ flb_free(ctx);
+ return -1;
+ }
+ ctx->fd = fd;
+
+ /* get the system boot time */
+ ret = boot_time(&ctx->boot_time);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins,
+ "could not get system boot time for kmsg input plugin");
+ flb_free(ctx);
+ return -1;
+ }
+ flb_plg_debug(ctx->ins, "prio_level is %d", ctx->prio_level);
+
+ /* Set our collector based on a file descriptor event */
+ ret = flb_input_set_collector_event(ins,
+ in_kmsg_collect,
+ ctx->fd,
+ config);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins,
+ "could not set collector for kmsg input plugin");
+ flb_free(ctx);
+ return -1;
+ }
+
+ ret = flb_log_event_encoder_init(&ctx->log_encoder,
+ FLB_LOG_EVENT_FORMAT_DEFAULT);
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ flb_plg_error(ctx->ins, "error initializing event encoder : %d", ret);
+
+ flb_free(ctx);
+
+ return -1;
+ }
+
+ return 0;
+}
+
+static int in_kmsg_exit(void *data, struct flb_config *config)
+{
+ (void)*config;
+ struct flb_in_kmsg_config *ctx = data;
+
+ flb_log_event_encoder_destroy(&ctx->log_encoder);
+
+ if (ctx->fd >= 0) {
+ close(ctx->fd);
+ }
+
+ flb_free(ctx->buf_data);
+ flb_free(ctx);
+ return 0;
+}
+
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_INT, "prio_level", "8",
+ 0, FLB_TRUE, offsetof(struct flb_in_kmsg_config, prio_level),
+ "The log level to filter. The kernel log is dropped if its priority is more than prio_level. "
+ "Allowed values are 0-8. Default is 8."
+ },
+ /* EOF */
+ {0}
+};
+
+/* Plugin reference */
+struct flb_input_plugin in_kmsg_plugin = {
+ .name = "kmsg",
+ .description = "Kernel Log Buffer",
+ .cb_init = in_kmsg_init,
+ .cb_pre_run = NULL,
+ .cb_collect = in_kmsg_collect,
+ .cb_flush_buf = NULL,
+ .cb_exit = in_kmsg_exit,
+ .config_map = config_map
+};
diff --git a/src/fluent-bit/plugins/in_kmsg/in_kmsg.h b/src/fluent-bit/plugins/in_kmsg/in_kmsg.h
new file mode 100644
index 000000000..48a28f5ec
--- /dev/null
+++ b/src/fluent-bit/plugins/in_kmsg/in_kmsg.h
@@ -0,0 +1,68 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_KMSG
+#define FLB_IN_KMSG
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_input.h>
+#include <fluent-bit/flb_log_event_encoder.h>
+
+#include <stdint.h>
+
+#define FLB_KMSG_DEV "/dev/kmsg"
+#define FLB_KMSG_BUF_SIZE 4096
+
+/* Alert levels, taken from util-linux sources */
+#define FLB_KLOG_EMERG 0
+#define FLB_KLOG_ALERT 1
+#define FLB_KLOG_CRIT 2
+#define FLB_KLOG_ERR 3
+#define FLB_KLOG_WARNING 4
+#define FLB_KLOG_NOTICE 5
+#define FLB_KLOG_INFO 6
+#define FLB_KLOG_DEBUG 7
+
+#define FLB_KLOG_PRIMASK 0x07
+#define FLB_KLOG_PRI(p) ((p) & FLB_KLOG_PRIMASK)
+
+#define KMSG_BUFFER_SIZE 256
+#define KMSG_USEC_PER_SEC 1000000
+
+struct flb_in_kmsg_config {
+ int fd; /* descriptor -> FLB_KMSG_DEV */
+ struct timeval boot_time; /* System boot time */
+
+ int prio_level;
+
+ /* Line processing */
+ int buffer_id;
+
+ /* Buffer */
+ char *buf_data;
+ size_t buf_len;
+ size_t buf_size;
+ struct flb_log_event_encoder log_encoder;
+ struct flb_input_instance *ins;
+};
+
+
+extern struct flb_input_plugin in_kmsg_plugin;
+
+#endif
diff --git a/src/fluent-bit/plugins/in_kubernetes_events/CMakeLists.txt b/src/fluent-bit/plugins/in_kubernetes_events/CMakeLists.txt
new file mode 100644
index 000000000..b860a55e3
--- /dev/null
+++ b/src/fluent-bit/plugins/in_kubernetes_events/CMakeLists.txt
@@ -0,0 +1,5 @@
+set(src
+ kubernetes_events_conf.c
+ kubernetes_events.c)
+
+FLB_PLUGIN(in_kubernetes_events "${src}" "")
diff --git a/src/fluent-bit/plugins/in_kubernetes_events/kubernetes_events.c b/src/fluent-bit/plugins/in_kubernetes_events/kubernetes_events.c
new file mode 100644
index 000000000..97719fba6
--- /dev/null
+++ b/src/fluent-bit/plugins/in_kubernetes_events/kubernetes_events.c
@@ -0,0 +1,921 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2023 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_network.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_error.h>
+#include <fluent-bit/flb_compat.h>
+#include <fluent-bit/flb_ra_key.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_strptime.h>
+#include <fluent-bit/flb_parser.h>
+#include <fluent-bit/flb_log_event_encoder.h>
+#include <fluent-bit/flb_compat.h>
+
+#include "kubernetes_events.h"
+#include "kubernetes_events_conf.h"
+
+#ifdef FLB_HAVE_SQLDB
+#include "kubernetes_events_sql.h"
+static int k8s_events_sql_insert_event(struct k8s_events *ctx, msgpack_object *item);
+#endif
+
+static int file_to_buffer(const char *path,
+ char **out_buf, size_t *out_size)
+{
+ int ret;
+ int len;
+ char *buf;
+ ssize_t bytes;
+ FILE *fp;
+ struct stat st;
+
+ if (!(fp = fopen(path, "r"))) {
+ return -1;
+ }
+
+ ret = stat(path, &st);
+ if (ret == -1) {
+ flb_errno();
+ fclose(fp);
+ return -1;
+ }
+
+ buf = flb_calloc(1, (st.st_size + 1));
+ if (!buf) {
+ flb_errno();
+ fclose(fp);
+ return -1;
+ }
+
+ bytes = fread(buf, st.st_size, 1, fp);
+ if (bytes < 1) {
+ flb_free(buf);
+ fclose(fp);
+ return -1;
+ }
+
+ fclose(fp);
+
+ // trim new lines
+ for (len = st.st_size; len > 0; len--) {
+ if (buf[len-1] != '\n' && buf[len-1] != '\r') {
+ break;
+ }
+ }
+ buf[len] = '\0';
+
+ *out_buf = buf;
+ *out_size = len;
+
+ return 0;
+}
+
+/* Set K8s Authorization Token and get HTTP Auth Header */
+static int get_http_auth_header(struct k8s_events *ctx)
+{
+ int ret;
+ char *temp;
+ char *tk = NULL;
+ size_t tk_size = 0;
+
+ if (!ctx->token_file || strlen(ctx->token_file) == 0) {
+ return 0;
+ }
+
+ ret = file_to_buffer(ctx->token_file, &tk, &tk_size);
+ if (ret == -1) {
+ flb_plg_warn(ctx->ins, "cannot open %s", ctx->token_file);
+ return -1;
+ }
+ ctx->token_created = time(NULL);
+
+ /* Token */
+ if (ctx->token != NULL) {
+ flb_free(ctx->token);
+ }
+ ctx->token = tk;
+ ctx->token_len = tk_size;
+
+ /* HTTP Auth Header */
+ if (ctx->auth == NULL) {
+ ctx->auth = flb_malloc(tk_size + 32);
+ }
+ else if (ctx->auth_len < tk_size + 32) {
+ temp = flb_realloc(ctx->auth, tk_size + 32);
+ if (temp == NULL) {
+ flb_errno();
+ flb_free(ctx->auth);
+ ctx->auth = NULL;
+ return -1;
+ }
+ ctx->auth = temp;
+ }
+
+ if (!ctx->auth) {
+ return -1;
+ }
+
+ ctx->auth_len = snprintf(ctx->auth, tk_size + 32, "Bearer %s", tk);
+ return 0;
+}
+
+/* Refresh HTTP Auth Header if K8s Authorization Token is expired */
+static int refresh_token_if_needed(struct k8s_events *ctx)
+{
+ int expired = FLB_FALSE;
+ int ret;
+
+ if (!ctx->token_file || strlen(ctx->token_file) == 0) {
+ return 0;
+ }
+
+ if (ctx->token_created > 0) {
+ if (time(NULL) > ctx->token_created + ctx->token_ttl) {
+ expired = FLB_TRUE;
+ }
+ }
+
+ if (expired || ctx->token_created == 0) {
+ ret = get_http_auth_header(ctx);
+ if (ret == -1) {
+ return -1;
+ }
+ }
+
+ return 0;
+}
+static int timestamp_lookup(struct k8s_events *ctx, char *ts, struct flb_time *time)
+{
+ struct flb_tm tm = { 0 };
+
+ if (flb_strptime(ts, "%Y-%m-%dT%H:%M:%SZ", &tm) == NULL) {
+ return -1;
+ }
+
+ time->tm.tv_sec = flb_parser_tm2time(&tm);
+ time->tm.tv_nsec = 0;
+
+ return 0;
+}
+
+static msgpack_object *record_get_field_ptr(msgpack_object *obj, const char *fieldname)
+{
+ int i;
+ msgpack_object *k;
+ msgpack_object *v;
+
+ if (obj->type != MSGPACK_OBJECT_MAP) {
+ return NULL;
+ }
+
+ for (i = 0; i < obj->via.map.size; i++) {
+ k = &obj->via.map.ptr[i].key;
+ if (k->type != MSGPACK_OBJECT_STR) {
+ continue;
+ }
+
+ if (strncmp(k->via.str.ptr, fieldname, strlen(fieldname)) == 0) {
+ v = &obj->via.map.ptr[i].val;
+ return v;
+ }
+ }
+ return NULL;
+}
+
+static int record_get_field_sds(msgpack_object *obj, const char *fieldname, flb_sds_t *val)
+{
+ msgpack_object *v;
+
+ v = record_get_field_ptr(obj, fieldname);
+ if (v == NULL) {
+ return 0;
+ }
+ if (v->type != MSGPACK_OBJECT_STR) {
+ return -1;
+ }
+
+ *val = flb_sds_create_len(v->via.str.ptr, v->via.str.size);
+ return 0;
+}
+
+static int record_get_field_time(msgpack_object *obj, const char *fieldname, time_t *val)
+{
+ msgpack_object *v;
+ struct flb_tm tm = { 0 };
+
+ v = record_get_field_ptr(obj, fieldname);
+ if (v == NULL) {
+ return -1;
+ }
+ if (v->type != MSGPACK_OBJECT_STR) {
+ return -1;
+ }
+
+ if (flb_strptime(v->via.str.ptr, "%Y-%m-%dT%H:%M:%SZ", &tm) == NULL) {
+ return -2;
+ }
+
+ *val = mktime(&tm.tm);
+ return 0;
+}
+
+static int record_get_field_uint64(msgpack_object *obj, const char *fieldname, uint64_t *val)
+{
+ msgpack_object *v;
+ char *end;
+
+ v = record_get_field_ptr(obj, fieldname);
+ if (v == NULL) {
+ return -1;
+ }
+
+ // attempt to parse string as number...
+ if (v->type == MSGPACK_OBJECT_STR) {
+ *val = strtoul(v->via.str.ptr, &end, 10);
+ if (end == NULL || (end < v->via.str.ptr + v->via.str.size)) {
+ return -1;
+ }
+ return 0;
+ }
+ if (v->type == MSGPACK_OBJECT_POSITIVE_INTEGER) {
+ *val = v->via.u64;
+ return 0;
+ }
+ if (v->type == MSGPACK_OBJECT_NEGATIVE_INTEGER) {
+ *val = (uint64_t)v->via.i64;
+ return 0;
+ }
+ return -1;
+}
+
+static int item_get_timestamp(msgpack_object *obj, time_t *event_time)
+{
+ int ret;
+ msgpack_object *metadata;
+
+ // some events can have lastTimestamp and firstTimestamp set to
+ // NULL while having metadata.creationTimestamp set.
+ ret = record_get_field_time(obj, "lastTimestamp", event_time);
+ if (ret != -1) {
+ return FLB_TRUE;
+ }
+
+ ret = record_get_field_time(obj, "firstTimestamp", event_time);
+ if (ret != -1) {
+ return FLB_TRUE;
+ }
+
+ metadata = record_get_field_ptr(obj, "metadata");
+ if (metadata == NULL) {
+ return FLB_FALSE;
+ }
+
+ ret = record_get_field_time(metadata, "creationTimestamp", event_time);
+ if (ret != -1) {
+ return FLB_TRUE;
+ }
+
+ return FLB_FALSE;
+}
+
+static bool check_event_is_filtered(struct k8s_events *ctx, msgpack_object *obj)
+{
+ int ret;
+ time_t event_time;
+ time_t now;
+ msgpack_object *metadata;
+ flb_sds_t uid;
+ uint64_t resource_version;
+
+ ret = item_get_timestamp(obj, &event_time);
+ if (ret == -FLB_FALSE) {
+ flb_plg_error(ctx->ins, "Cannot get timestamp for item in response");
+ return FLB_FALSE;
+ }
+
+ now = (time_t)(cfl_time_now() / 1000000000);
+ if (event_time < (now - ctx->retention_time)) {
+ flb_plg_debug(ctx->ins, "Item is older than retention_time: %ld < %ld",
+ event_time, (now - ctx->retention_time));
+ return FLB_TRUE;
+ }
+
+ metadata = record_get_field_ptr(obj, "metadata");
+ if (metadata == NULL) {
+ flb_plg_error(ctx->ins, "Cannot unpack item metadata in response");
+ return FLB_FALSE;
+ }
+
+ ret = record_get_field_uint64(metadata, "resourceVersion", &resource_version);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "Cannot get resourceVersion for item in response");
+ return FLB_FALSE;
+ }
+
+ ret = record_get_field_sds(metadata, "uid", &uid);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "Cannot get resourceVersion for item in response");
+ return FLB_FALSE;
+ }
+
+
+#ifdef FLB_HAVE_SQLDB
+ bool exists;
+
+
+ if (ctx->db) {
+ sqlite3_bind_text(ctx->stmt_get_kubernetes_event_exists_by_uid,
+ 1, uid, -1, NULL);
+ ret = sqlite3_step(ctx->stmt_get_kubernetes_event_exists_by_uid);
+ if (ret != SQLITE_ROW) {
+ if (ret != SQLITE_DONE) {
+ flb_plg_error(ctx->ins, "cannot execute kubernetes event exists");
+ }
+ sqlite3_clear_bindings(ctx->stmt_get_kubernetes_event_exists_by_uid);
+ sqlite3_reset(ctx->stmt_get_kubernetes_event_exists_by_uid);
+ flb_sds_destroy(uid);
+ return FLB_FALSE;
+ }
+
+ exists = sqlite3_column_int64(ctx->stmt_get_kubernetes_event_exists_by_uid, 0);
+
+ flb_plg_debug(ctx->ins, "is_filtered: uid=%s exists=%d", uid, exists);
+ sqlite3_clear_bindings(ctx->stmt_get_kubernetes_event_exists_by_uid);
+ sqlite3_reset(ctx->stmt_get_kubernetes_event_exists_by_uid);
+ flb_sds_destroy(uid);
+
+ return exists > 0 ? FLB_TRUE : FLB_FALSE;
+ }
+#endif
+
+ // check if this is an old event.
+ if (ctx->last_resource_version && resource_version <= ctx->last_resource_version) {
+ flb_plg_debug(ctx->ins, "skipping old object: %lu (< %lu)", resource_version,
+ ctx->last_resource_version);
+ flb_sds_destroy(uid);
+ return FLB_TRUE;
+ }
+
+ flb_sds_destroy(uid);
+ return FLB_FALSE;
+}
+
+static int process_events(struct k8s_events *ctx, char *in_data, size_t in_size, uint64_t *max_resource_version, flb_sds_t *continue_token)
+{
+ int i;
+ int ret = -1;
+ int root_type;
+ size_t consumed = 0;
+ char *buf_data;
+ size_t buf_size;
+ size_t off = 0;
+ struct flb_time ts;
+ struct flb_ra_value *rval;
+ uint64_t resource_version;
+ msgpack_unpacked result;
+ msgpack_object root;
+ msgpack_object k;
+ msgpack_object *items = NULL;
+ msgpack_object *item = NULL;
+ msgpack_object *item_metadata = NULL;
+ msgpack_object *metadata = NULL;
+
+
+ ret = flb_pack_json(in_data, in_size, &buf_data, &buf_size, &root_type, &consumed);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "could not process payload, incomplete or bad formed JSON");
+ goto json_error;
+ }
+
+ /* unpack */
+ msgpack_unpacked_init(&result);
+ ret = msgpack_unpack_next(&result, buf_data, buf_size, &off);
+ if (ret != MSGPACK_UNPACK_SUCCESS) {
+ flb_plg_error(ctx->ins, "Cannot unpack response");
+ goto unpack_error;
+ }
+
+ /* lookup the items array */
+ root = result.data;
+ if (root.type != MSGPACK_OBJECT_MAP) {
+ return -1;
+ }
+
+ // Traverse the EventList for the metadata (for the continue token) and the items.
+ // https://kubernetes.io/docs/reference/kubernetes-api/cluster-resources/event-v1/#EventList
+ for (i = 0; i < root.via.map.size; i++) {
+ k = root.via.map.ptr[i].key;
+ if (k.type != MSGPACK_OBJECT_STR) {
+ continue;
+ }
+
+ if (strncmp(k.via.str.ptr, "items", 5) == 0) {
+ items = &root.via.map.ptr[i].val;
+ if (items->type != MSGPACK_OBJECT_ARRAY) {
+ flb_plg_error(ctx->ins, "Cannot unpack items");
+ goto msg_error;
+ }
+ }
+
+ if (strncmp(k.via.str.ptr, "metadata", 8) == 0) {
+ metadata = &root.via.map.ptr[i].val;
+ if (metadata->type != MSGPACK_OBJECT_MAP) {
+ flb_plg_error(ctx->ins, "Cannot unpack metadata");
+ goto msg_error;
+ }
+ }
+ }
+
+ if (items == NULL) {
+ flb_plg_error(ctx->ins, "Cannot find items in response");
+ goto msg_error;
+ }
+
+ if (metadata == NULL) {
+ flb_plg_error(ctx->ins, "Cannot find metatada in response");
+ goto msg_error;
+ }
+
+ ret = record_get_field_sds(metadata, "continue", continue_token);
+ if (ret == -1) {
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "Cannot process continue token");
+ goto msg_error;
+ }
+ }
+
+ for (i = 0; i < items->via.array.size; i++) {
+ if (items->via.array.ptr[i].type != MSGPACK_OBJECT_MAP) {
+ flb_plg_warn(ctx->ins, "Event that is not a map");
+ continue;
+ }
+ item_metadata = record_get_field_ptr(&items->via.array.ptr[i], "metadata");
+ if (item_metadata == NULL) {
+ flb_plg_warn(ctx->ins, "Event without metadata");
+ continue;
+ }
+ ret = record_get_field_uint64(item_metadata,
+ "resourceVersion", &resource_version);
+ if (ret == -1) {
+ continue;
+ }
+ if (resource_version > *max_resource_version) {
+ *max_resource_version = resource_version;
+ }
+ }
+
+ /* reset the log encoder */
+ flb_log_event_encoder_reset(ctx->encoder);
+
+ /* print every item from the items array */
+ for (i = 0; i < items->via.array.size; i++) {
+ item = &items->via.array.ptr[i];
+ if (item->type != MSGPACK_OBJECT_MAP) {
+ flb_plg_error(ctx->ins, "Cannot unpack item in response");
+ goto msg_error;
+ }
+
+ if (check_event_is_filtered(ctx, item) == FLB_TRUE) {
+ continue;
+ }
+
+#ifdef FLB_HAVE_SQLDB
+ if (ctx->db) {
+ k8s_events_sql_insert_event(ctx, item);
+ }
+#endif
+
+ /* get event timestamp */
+ rval = flb_ra_get_value_object(ctx->ra_timestamp, *item);
+ if (!rval || rval->type != FLB_RA_STRING) {
+ flb_plg_error(ctx->ins, "cannot retrieve event timestamp");
+ goto msg_error;
+ }
+
+ /* convert timestamp */
+ ret = timestamp_lookup(ctx, rval->val.string, &ts);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "cannot lookup event timestamp");
+ flb_ra_key_value_destroy(rval);
+ goto msg_error;
+ }
+
+ /* encode content as a log event */
+ flb_log_event_encoder_begin_record(ctx->encoder);
+ flb_log_event_encoder_set_timestamp(ctx->encoder, &ts);
+
+ ret = flb_log_event_encoder_set_body_from_msgpack_object(ctx->encoder, item);
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_commit_record(ctx->encoder);
+ } else {
+ flb_plg_warn(ctx->ins, "unable to encode: %lu", resource_version);
+ }
+ flb_ra_key_value_destroy(rval);
+ }
+
+ if (ctx->encoder->output_length > 0) {
+ flb_input_log_append(ctx->ins, NULL, 0,
+ ctx->encoder->output_buffer,
+ ctx->encoder->output_length);
+ }
+
+msg_error:
+ msgpack_unpacked_destroy(&result);
+unpack_error:
+ flb_free(buf_data);
+json_error:
+ return ret;
+}
+
+static struct flb_http_client *make_event_api_request(struct k8s_events *ctx,
+ struct flb_connection *u_conn,
+ flb_sds_t continue_token)
+{
+ flb_sds_t url;
+ struct flb_http_client *c;
+
+
+ if (continue_token == NULL && ctx->limit_request == 0 && ctx->namespace == NULL) {
+ return flb_http_client(u_conn, FLB_HTTP_GET, K8S_EVENTS_KUBE_API_URI,
+ NULL, 0, ctx->api_host, ctx->api_port, NULL, 0);
+ }
+
+ if (ctx->namespace == NULL) {
+ url = flb_sds_create(K8S_EVENTS_KUBE_API_URI);
+ } else {
+ url = flb_sds_create_size(strlen(K8S_EVENTS_KUBE_NAMESPACE_API_URI) +
+ strlen(ctx->namespace));
+ flb_sds_printf(&url, K8S_EVENTS_KUBE_NAMESPACE_API_URI, ctx->namespace);
+ }
+
+ flb_sds_cat_safe(&url, "?", 1);
+ if (ctx->limit_request) {
+ if (continue_token != NULL) {
+ flb_sds_printf(&url, "continue=%s&", continue_token);
+ }
+ flb_sds_printf(&url, "limit=%d", ctx->limit_request);
+ }
+ c = flb_http_client(u_conn, FLB_HTTP_GET, url,
+ NULL, 0, ctx->api_host, ctx->api_port, NULL, 0);
+ flb_sds_destroy(url);
+ return c;
+}
+
+#ifdef FLB_HAVE_SQLDB
+
+static int k8s_events_cleanup_db(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ int ret;
+ struct k8s_events *ctx = (struct k8s_events *)in_context;
+ time_t retention_time_ago;
+ time_t now = (cfl_time_now() / 1000000000);
+
+ if (ctx->db == NULL) {
+ FLB_INPUT_RETURN(0);
+ }
+
+ retention_time_ago = now - (ctx->retention_time);
+ sqlite3_bind_int64(ctx->stmt_delete_old_kubernetes_events,
+ 1, (int64_t)retention_time_ago);
+ ret = sqlite3_step(ctx->stmt_delete_old_kubernetes_events);
+ if (ret != SQLITE_ROW && ret != SQLITE_DONE) {
+ flb_plg_error(ctx->ins, "cannot execute delete old kubernetes events");
+ }
+
+ sqlite3_clear_bindings(ctx->stmt_delete_old_kubernetes_events);
+ sqlite3_reset(ctx->stmt_delete_old_kubernetes_events);
+
+ FLB_INPUT_RETURN(0);
+}
+
+static int k8s_events_sql_insert_event(struct k8s_events *ctx, msgpack_object *item)
+{
+ int ret;
+ uint64_t resource_version;
+ time_t last;
+ msgpack_object *meta;
+ flb_sds_t uid;
+
+
+ meta = record_get_field_ptr(item, "meta");
+ if (meta == NULL) {
+ flb_plg_error(ctx->ins, "unable to find metadata to save event");
+ return -1;
+ }
+
+ ret = record_get_field_uint64(meta, "resourceVersion", &resource_version);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "unable to find resourceVersion in metadata to save event");
+ return -1;
+ }
+
+ ret = record_get_field_sds(meta, "uid", &uid);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "unable to find uid in metadata to save event");
+ return -1;
+ }
+
+ ret = item_get_timestamp(item, &last);
+ if (ret == -FLB_FALSE) {
+ flb_plg_error(ctx->ins, "Cannot get timestamp for item to save it");
+ return -1;
+ }
+
+ if (ret == -2) {
+ flb_plg_error(ctx->ins, "unable to parse lastTimestamp in item to save event");
+ flb_sds_destroy(uid);
+ return -1;
+ }
+
+ /* Bind parameters */
+ sqlite3_bind_text(ctx->stmt_insert_kubernetes_event, 1, uid, -1, 0);
+ sqlite3_bind_int64(ctx->stmt_insert_kubernetes_event, 2, resource_version);
+ sqlite3_bind_int64(ctx->stmt_insert_kubernetes_event, 3, (int64_t)last);
+
+ /* Run the insert */
+ ret = sqlite3_step(ctx->stmt_insert_kubernetes_event);
+ if (ret != SQLITE_DONE) {
+ sqlite3_clear_bindings(ctx->stmt_insert_kubernetes_event);
+ sqlite3_reset(ctx->stmt_insert_kubernetes_event);
+ flb_plg_error(ctx->ins, "cannot execute insert kubernetes event %s inode=%lu",
+ uid, resource_version);
+ flb_sds_destroy(uid);
+ return -1;
+ }
+
+ flb_plg_debug(ctx->ins,
+ "inserted k8s event: uid=%s, resource_version=%lu, last=%ld",
+ uid, resource_version, last);
+ sqlite3_clear_bindings(ctx->stmt_insert_kubernetes_event);
+ sqlite3_reset(ctx->stmt_insert_kubernetes_event);
+
+ flb_sds_destroy(uid);
+ return flb_sqldb_last_id(ctx->db);
+}
+
+#endif
+
+static int k8s_events_collect(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ int ret;
+ size_t b_sent;
+ struct flb_connection *u_conn = NULL;
+ struct flb_http_client *c = NULL;
+ struct k8s_events *ctx = in_context;
+ flb_sds_t continue_token = NULL;
+ uint64_t max_resource_version = 0;
+
+ if (pthread_mutex_trylock(&ctx->lock) != 0) {
+ FLB_INPUT_RETURN(0);
+ }
+
+ u_conn = flb_upstream_conn_get(ctx->upstream);
+ if (!u_conn) {
+ flb_plg_error(ins, "upstream connection initialization error");
+ goto exit;
+ }
+
+ ret = refresh_token_if_needed(ctx);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "failed to refresh token");
+ goto exit;
+ }
+
+ do {
+ c = make_event_api_request(ctx, u_conn, continue_token);
+ if (continue_token != NULL) {
+ flb_sds_destroy(continue_token);
+ continue_token = NULL;
+ }
+ if (!c) {
+ flb_plg_error(ins, "unable to create http client");
+ goto exit;
+ }
+ flb_http_buffer_size(c, 0);
+
+ flb_http_add_header(c, "User-Agent", 10, "Fluent-Bit", 10);
+ if (ctx->auth_len > 0) {
+ flb_http_add_header(c, "Authorization", 13, ctx->auth, ctx->auth_len);
+ }
+
+ ret = flb_http_do(c, &b_sent);
+ if (ret != 0) {
+ flb_plg_error(ins, "http do error");
+ goto exit;
+ }
+
+ if (c->resp.status == 200) {
+ ret = process_events(ctx, c->resp.payload, c->resp.payload_size, &max_resource_version, &continue_token);
+ }
+ else {
+ if (c->resp.payload_size > 0) {
+ flb_plg_error(ctx->ins, "http_status=%i:\n%s", c->resp.status, c->resp.payload);
+ }
+ else {
+ flb_plg_error(ctx->ins, "http_status=%i", c->resp.status);
+ }
+ }
+ flb_http_client_destroy(c);
+ c = NULL;
+ } while(continue_token != NULL);
+
+ if (max_resource_version > ctx->last_resource_version) {
+ flb_plg_debug(ctx->ins, "set last resourceVersion=%lu", max_resource_version);
+ ctx->last_resource_version = max_resource_version;
+ }
+
+exit:
+ pthread_mutex_unlock(&ctx->lock);
+ if (c) {
+ flb_http_client_destroy(c);
+ }
+ if (u_conn) {
+ flb_upstream_conn_release(u_conn);
+ }
+ FLB_INPUT_RETURN(0);
+}
+
+static int k8s_events_init(struct flb_input_instance *ins,
+ struct flb_config *config, void *data)
+{
+ struct k8s_events *ctx = NULL;
+
+ ctx = k8s_events_conf_create(ins);
+ if (!ctx) {
+ return -1;
+ }
+
+ ctx->coll_id = flb_input_set_collector_time(ins,
+ k8s_events_collect,
+ ctx->interval_sec,
+ ctx->interval_nsec,
+ config);
+
+#ifdef FLB_HAVE_SQLDB
+ if (ctx->db) {
+ ctx->coll_cleanup_id = flb_input_set_collector_time(ins,
+ k8s_events_cleanup_db,
+ ctx->interval_sec,
+ ctx->interval_nsec,
+ config);
+ }
+#endif
+
+ return 0;
+}
+
+static int k8s_events_exit(void *data, struct flb_config *config)
+{
+ struct k8s_events *ctx = data;
+
+ if (!ctx) {
+ return 0;
+ }
+
+ k8s_events_conf_destroy(ctx);
+ return 0;
+}
+
+/* Configuration properties map */
+static struct flb_config_map config_map[] = {
+ /* Full Kubernetes API server URL */
+ {
+ FLB_CONFIG_MAP_STR, "kube_url", "https://kubernetes.default.svc",
+ 0, FLB_FALSE, 0,
+ "Kubernetes API server URL"
+ },
+
+ /* Refresh interval */
+ {
+ FLB_CONFIG_MAP_INT, "interval_sec", DEFAULT_INTERVAL_SEC,
+ 0, FLB_TRUE, offsetof(struct k8s_events, interval_sec),
+ "Set the polling interval for each channel"
+ },
+ {
+ FLB_CONFIG_MAP_INT, "interval_nsec", DEFAULT_INTERVAL_NSEC,
+ 0, FLB_TRUE, offsetof(struct k8s_events, interval_nsec),
+ "Set the polling interval for each channel (sub seconds)"
+ },
+
+ /* TLS: set debug 'level' */
+ {
+ FLB_CONFIG_MAP_INT, "tls.debug", "0",
+ 0, FLB_TRUE, offsetof(struct k8s_events, tls_debug),
+ "set TLS debug level: 0 (no debug), 1 (error), "
+ "2 (state change), 3 (info) and 4 (verbose)"
+ },
+
+ /* TLS: enable verification */
+ {
+ FLB_CONFIG_MAP_BOOL, "tls.verify", "true",
+ 0, FLB_TRUE, offsetof(struct k8s_events, tls_verify),
+ "enable or disable verification of TLS peer certificate"
+ },
+
+ /* TLS: set tls.vhost feature */
+ {
+ FLB_CONFIG_MAP_STR, "tls.vhost", NULL,
+ 0, FLB_TRUE, offsetof(struct k8s_events, tls_vhost),
+ "set optional TLS virtual host"
+ },
+
+ /* Kubernetes TLS: CA file */
+ {
+ FLB_CONFIG_MAP_STR, "kube_ca_file", K8S_EVENTS_KUBE_CA,
+ 0, FLB_TRUE, offsetof(struct k8s_events, tls_ca_file),
+ "Kubernetes TLS CA file"
+ },
+
+ /* Kubernetes TLS: CA certs path */
+ {
+ FLB_CONFIG_MAP_STR, "kube_ca_path", NULL,
+ 0, FLB_TRUE, offsetof(struct k8s_events, tls_ca_path),
+ "Kubernetes TLS ca path"
+ },
+
+ /* Kubernetes Token file */
+ {
+ FLB_CONFIG_MAP_STR, "kube_token_file", K8S_EVENTS_KUBE_TOKEN,
+ 0, FLB_TRUE, offsetof(struct k8s_events, token_file),
+ "Kubernetes authorization token file"
+ },
+
+ /* Kubernetes Token file TTL */
+ {
+ FLB_CONFIG_MAP_TIME, "kube_token_ttl", "10m",
+ 0, FLB_TRUE, offsetof(struct k8s_events, token_ttl),
+ "kubernetes token ttl, until it is reread from the token file. Default: 10m"
+ },
+
+ {
+ FLB_CONFIG_MAP_INT, "kube_request_limit", "0",
+ 0, FLB_TRUE, offsetof(struct k8s_events, limit_request),
+ "kubernetes limit parameter for events query, no limit applied when set to 0"
+ },
+
+ {
+ FLB_CONFIG_MAP_TIME, "kube_retention_time", "1h",
+ 0, FLB_TRUE, offsetof(struct k8s_events, retention_time),
+ "kubernetes retention time for events. Default: 1h"
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "kube_namespace", NULL,
+ 0, FLB_TRUE, offsetof(struct k8s_events, namespace),
+ "kubernetes namespace to get events from, gets event from all namespaces by default."
+ },
+
+#ifdef FLB_HAVE_SQLDB
+ {
+ FLB_CONFIG_MAP_STR, "db", NULL,
+ 0, FLB_FALSE, 0,
+ "set a database file to keep track of recorded kubernetes events."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "db.sync", "normal",
+ 0, FLB_FALSE, 0,
+ "set a database sync method. values: extra, full, normal and off."
+ },
+#endif
+
+ /* EOF */
+ {0}
+};
+
+/* Plugin reference */
+struct flb_input_plugin in_kubernetes_events_plugin = {
+ .name = "kubernetes_events",
+ .description = "Kubernetes Events",
+ .cb_init = k8s_events_init,
+ .cb_pre_run = NULL,
+ .cb_collect = k8s_events_collect,
+ .cb_flush_buf = NULL,
+ .cb_exit = k8s_events_exit,
+ .config_map = config_map,
+ .flags = FLB_INPUT_NET | FLB_INPUT_CORO | FLB_INPUT_THREADED
+};
diff --git a/src/fluent-bit/plugins/in_kubernetes_events/kubernetes_events.h b/src/fluent-bit/plugins/in_kubernetes_events/kubernetes_events.h
new file mode 100644
index 000000000..3afd48570
--- /dev/null
+++ b/src/fluent-bit/plugins/in_kubernetes_events/kubernetes_events.h
@@ -0,0 +1,106 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2023 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_KUBERNETES_EVENTS_H
+#define FLB_IN_KUBERNETES_EVENTS_H
+
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_upstream.h>
+#include <fluent-bit/flb_record_accessor.h>
+#include <fluent-bit/flb_sqldb.h>
+
+#define DEFAULT_INTERVAL_SEC "0"
+#define DEFAULT_INTERVAL_NSEC "500000000"
+
+/* Filter context */
+struct k8s_events {
+ int coll_id;
+ int coll_cleanup_id;
+ int interval_sec; /* interval collection time (Second) */
+ int interval_nsec; /* interval collection time (Nanosecond) */
+ int retention_time; /* retention time limit, default 1 hour */
+
+ /* Configuration parameters */
+ char *api_host;
+ int api_port;
+ int api_https;
+ int tls_debug;
+ int tls_verify;
+ int kube_token_ttl;
+ flb_sds_t namespace;
+
+ /* API Server end point */
+ char kube_url[1024];
+
+ /* TLS CA certificate file */
+ char *tls_ca_path;
+ char *tls_ca_file;
+
+ /* TLS virtual host (optional), set by configmap */
+ flb_sds_t tls_vhost;
+
+ /* Kubernetes Token from FLB_KUBE_TOKEN file */
+ char *token_file;
+ char *token;
+ int token_ttl;
+ size_t token_len;
+ int token_created;
+
+ /* Pre-formatted HTTP Authorization header value */
+ char *auth;
+ size_t auth_len;
+
+ int dns_retries;
+ int dns_wait_time;
+
+ struct flb_tls *tls;
+
+ struct flb_log_event_encoder *encoder;
+
+ /* record accessor */
+ struct flb_record_accessor *ra_timestamp;
+ struct flb_record_accessor *ra_resource_version;
+
+ /* others */
+ struct flb_config *config;
+ struct flb_upstream *upstream;
+ struct flb_input_instance *ins;
+
+ /* limit for event queries */
+ int limit_request;
+ /* last highest seen resource_version */
+ uint64_t last_resource_version;
+
+#ifdef FLB_HAVE_SQLDB
+ /* State database */
+ struct flb_sqldb *db;
+ int db_sync;
+ int db_locking;
+ flb_sds_t db_journal_mode;
+ sqlite3_stmt *stmt_get_kubernetes_event_exists_by_uid;
+ sqlite3_stmt *stmt_insert_kubernetes_event;
+ sqlite3_stmt *stmt_delete_old_kubernetes_events;
+#endif
+
+ /* concurrency lock */
+ pthread_mutex_t lock;
+};
+
+#endif \ No newline at end of file
diff --git a/src/fluent-bit/plugins/in_kubernetes_events/kubernetes_events_conf.c b/src/fluent-bit/plugins/in_kubernetes_events/kubernetes_events_conf.c
new file mode 100644
index 000000000..4f67d8cfc
--- /dev/null
+++ b/src/fluent-bit/plugins/in_kubernetes_events/kubernetes_events_conf.c
@@ -0,0 +1,326 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2023 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "kubernetes_events_conf.h"
+
+#ifdef FLB_HAVE_SQLDB
+#include "kubernetes_events_sql.h"
+
+
+/* Open or create database required by tail plugin */
+static struct flb_sqldb *flb_kubernetes_event_db_open(const char *path,
+ struct flb_input_instance *in,
+ struct k8s_events *ctx,
+ struct flb_config *config)
+{
+ int ret;
+ char tmp[64];
+ struct flb_sqldb *db;
+
+ /* Open/create the database */
+ db = flb_sqldb_open(path, in->name, config);
+ if (!db) {
+ return NULL;
+ }
+
+ /* Create table schema if it don't exists */
+ ret = flb_sqldb_query(db, SQL_CREATE_KUBERNETES_EVENTS, NULL, NULL);
+ if (ret != FLB_OK) {
+ flb_plg_error(ctx->ins, "db: could not create 'in_kubernetes_events' table");
+ flb_sqldb_close(db);
+ return NULL;
+ }
+
+ if (ctx->db_sync >= 0) {
+ snprintf(tmp, sizeof(tmp) - 1, SQL_PRAGMA_SYNC,
+ ctx->db_sync);
+ ret = flb_sqldb_query(db, tmp, NULL, NULL);
+ if (ret != FLB_OK) {
+ flb_plg_error(ctx->ins, "db could not set pragma 'sync'");
+ flb_sqldb_close(db);
+ return NULL;
+ }
+ }
+
+ if (ctx->db_locking == FLB_TRUE) {
+ ret = flb_sqldb_query(db, SQL_PRAGMA_LOCKING_MODE, NULL, NULL);
+ if (ret != FLB_OK) {
+ flb_plg_error(ctx->ins, "db: could not set pragma 'locking_mode'");
+ flb_sqldb_close(db);
+ return NULL;
+ }
+ }
+
+ if (ctx->db_journal_mode) {
+ snprintf(tmp, sizeof(tmp) - 1, SQL_PRAGMA_JOURNAL_MODE,
+ ctx->db_journal_mode);
+ ret = flb_sqldb_query(db, tmp, NULL, NULL);
+ if (ret != FLB_OK) {
+ flb_plg_error(ctx->ins, "db could not set pragma 'journal_mode'");
+ flb_sqldb_close(db);
+ return NULL;
+ }
+ }
+
+ return db;
+}
+
+static int flb_kubernetes_event_db_close(struct flb_sqldb *db)
+{
+ flb_sqldb_close(db);
+ return 0;
+}
+
+#endif
+
+static int network_init(struct k8s_events *ctx, struct flb_config *config)
+{
+ int io_type = FLB_IO_TCP;
+
+ ctx->upstream = NULL;
+
+ if (ctx->api_https == FLB_TRUE) {
+ if (!ctx->tls_ca_path && !ctx->tls_ca_file) {
+ ctx->tls_ca_file = flb_strdup(K8S_EVENTS_KUBE_CA);
+ }
+
+ /* create a custom TLS context since we use user-defined certs */
+ ctx->tls = flb_tls_create(FLB_TLS_CLIENT_MODE,
+ ctx->tls_verify,
+ ctx->tls_debug,
+ ctx->tls_vhost,
+ ctx->tls_ca_path,
+ ctx->tls_ca_file,
+ NULL, NULL, NULL);
+ if (!ctx->tls) {
+ return -1;
+ }
+
+ io_type = FLB_IO_TLS;
+ }
+
+ /* Create an Upstream context */
+ ctx->upstream = flb_upstream_create(config,
+ ctx->api_host,
+ ctx->api_port,
+ io_type,
+ ctx->tls);
+ if (!ctx->upstream) {
+ flb_plg_error(ctx->ins, "network initialization failed");
+ return -1;
+ }
+
+ return 0;
+}
+
+struct k8s_events *k8s_events_conf_create(struct flb_input_instance *ins)
+{
+ int off;
+ int ret;
+ const char *p;
+ const char *url;
+ const char *tmp;
+ struct k8s_events *ctx = NULL;
+ pthread_mutexattr_t attr;
+
+ ctx = flb_calloc(1, sizeof(struct k8s_events));
+ if (!ctx) {
+ flb_errno();
+ return NULL;
+ }
+ ctx->ins = ins;
+
+ pthread_mutexattr_init(&attr);
+ pthread_mutex_init(&ctx->lock, &attr);
+
+ /* Load the config map */
+ ret = flb_input_config_map_set(ins, (void *) ctx);
+ if (ret == -1) {
+ flb_free(ctx);
+ return NULL;
+ }
+ flb_input_set_context(ins, ctx);
+
+ ctx->encoder = flb_log_event_encoder_create(FLB_LOG_EVENT_FORMAT_DEFAULT);
+ if (!ctx->encoder) {
+ flb_plg_error(ins, "could not initialize event encoder");
+ k8s_events_conf_destroy(ctx);
+ return NULL;
+ }
+
+ /* Record accessor pattern */
+ ctx->ra_timestamp = flb_ra_create(K8S_EVENTS_RA_TIMESTAMP, FLB_TRUE);
+ if (!ctx->ra_timestamp) {
+ flb_plg_error(ctx->ins,
+ "could not create record accessor for metadata items");
+ k8s_events_conf_destroy(ctx);
+ return NULL;
+ }
+
+ ctx->ra_resource_version = flb_ra_create(K8S_EVENTS_RA_RESOURCE_VERSION, FLB_TRUE);
+ if (!ctx->ra_resource_version) {
+ flb_plg_error(ctx->ins, "could not create record accessor for resource version");
+ k8s_events_conf_destroy(ctx);
+ return NULL;
+ }
+
+ /* Get Kubernetes API server */
+ url = flb_input_get_property("kube_url", ins);
+ if (!url) {
+ ctx->api_host = flb_strdup(K8S_EVENTS_KUBE_API_HOST);
+ ctx->api_port = K8S_EVENTS_KUBE_API_PORT;
+ ctx->api_https = FLB_TRUE;
+ }
+ else {
+ tmp = url;
+
+ /* Check the protocol */
+ if (strncmp(tmp, "http://", 7) == 0) {
+ off = 7;
+ ctx->api_https = FLB_FALSE;
+ }
+ else if (strncmp(tmp, "https://", 8) == 0) {
+ off = 8;
+ ctx->api_https = FLB_TRUE;
+ }
+ else {
+ k8s_events_conf_destroy(ctx);
+ return NULL;
+ }
+
+ /* Get hostname and TCP port */
+ p = url + off;
+ tmp = strchr(p, ':');
+ if (tmp) {
+ ctx->api_host = flb_strndup(p, tmp - p);
+ tmp++;
+ ctx->api_port = atoi(tmp);
+ }
+ else {
+ ctx->api_host = flb_strdup(p);
+ ctx->api_port = K8S_EVENTS_KUBE_API_PORT;
+ }
+ }
+ snprintf(ctx->kube_url, sizeof(ctx->kube_url) - 1,
+ "%s://%s:%i",
+ ctx->api_https ? "https" : "http",
+ ctx->api_host, ctx->api_port);
+
+ flb_plg_info(ctx->ins, "API server: %s", ctx->kube_url);
+
+ /* network setup */
+ ret = network_init(ctx, ins->config);
+ if (ret == -1) {
+ k8s_events_conf_destroy(ctx);
+ return NULL;
+ }
+
+#ifdef FLB_HAVE_SQLDB
+ /* Initialize database */
+ tmp = flb_input_get_property("db", ins);
+ if (tmp) {
+ ctx->db = flb_kubernetes_event_db_open(tmp, ins, ctx, ins->config);
+ if (!ctx->db) {
+ flb_plg_error(ctx->ins, "could not open/create database");
+ k8s_events_conf_destroy(ctx);
+ return NULL;
+ }
+ }
+
+ if (ctx->db) {
+ ret = sqlite3_prepare_v2(ctx->db->handler,
+ SQL_KUBERNETES_EVENT_EXISTS_BY_UID,
+ -1,
+ &ctx->stmt_get_kubernetes_event_exists_by_uid,
+ 0);
+ if (ret != SQLITE_OK) {
+ flb_plg_error(ctx->ins, "error preparing database SQL statement: stmt_get_kubernetes_event_exists_by_uid");
+ k8s_events_conf_destroy(ctx);
+ return NULL;
+ }
+
+ ret = sqlite3_prepare_v2(ctx->db->handler,
+ SQL_INSERT_KUBERNETES_EVENTS,
+ -1,
+ &ctx->stmt_insert_kubernetes_event,
+ 0);
+ if (ret != SQLITE_OK) {
+ flb_plg_error(ctx->ins, "error preparing database SQL statement: stmt_insert_kubernetes_event");
+ k8s_events_conf_destroy(ctx);
+ return NULL;
+ }
+
+ ret = sqlite3_prepare_v2(ctx->db->handler,
+ SQL_DELETE_OLD_KUBERNETES_EVENTS,
+ -1,
+ &ctx->stmt_delete_old_kubernetes_events,
+ 0);
+ if (ret != SQLITE_OK) {
+ flb_plg_error(ctx->ins, "error preparing database SQL statement: stmt_delete_old_kubernetes_events");
+ k8s_events_conf_destroy(ctx);
+ return NULL;
+ }
+ }
+#endif
+
+ return ctx;
+}
+
+void k8s_events_conf_destroy(struct k8s_events *ctx)
+{
+ if (ctx->ra_timestamp) {
+ flb_ra_destroy(ctx->ra_timestamp);
+ }
+
+ if (ctx->ra_resource_version) {
+ flb_ra_destroy(ctx->ra_resource_version);
+ }
+
+ if (ctx->upstream) {
+ flb_upstream_destroy(ctx->upstream);
+ }
+
+ if (ctx->encoder) {
+ flb_log_event_encoder_destroy(ctx->encoder);
+ }
+
+ if (ctx->api_host) {
+ flb_free(ctx->api_host);
+ }
+ if (ctx->token) {
+ flb_free(ctx->token);
+ }
+ if (ctx->auth) {
+ flb_free(ctx->auth);
+ }
+
+#ifdef FLB_HAVE_TLS
+ if (ctx->tls) {
+ flb_tls_destroy(ctx->tls);
+ }
+#endif
+
+#ifdef FLB_HAVE_SQLDB
+ if (ctx->db) {
+ flb_kubernetes_event_db_close(ctx->db);
+ }
+#endif
+
+ flb_free(ctx);
+}
diff --git a/src/fluent-bit/plugins/in_kubernetes_events/kubernetes_events_conf.h b/src/fluent-bit/plugins/in_kubernetes_events/kubernetes_events_conf.h
new file mode 100644
index 000000000..9d6b54197
--- /dev/null
+++ b/src/fluent-bit/plugins/in_kubernetes_events/kubernetes_events_conf.h
@@ -0,0 +1,47 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2023 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_KUBERNETES_EVENTS_CONF_H
+#define FLB_IN_KUBERNETES_EVENTS_CONF_H
+
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_record_accessor.h>
+#include <fluent-bit/flb_log_event_encoder.h>
+
+#include "kubernetes_events.h"
+
+/* Kubernetes API server info */
+#define K8S_EVENTS_KUBE_API_HOST "kubernetes.default.svc"
+#define K8S_EVENTS_KUBE_API_PORT 443
+// /apis/events.k8s.io/v1/events
+// /apis/events.k8s.io/v1/namespaces/{namespace}/events
+#define K8S_EVENTS_KUBE_API_URI "/api/v1/events"
+#define K8S_EVENTS_KUBE_NAMESPACE_API_URI "/api/v1/namespaces/%s/events"
+
+/* secrets */
+#define K8S_EVENTS_KUBE_TOKEN "/var/run/secrets/kubernetes.io/serviceaccount/token"
+#define K8S_EVENTS_KUBE_CA "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
+
+#define K8S_EVENTS_RA_TIMESTAMP "$metadata['creationTimestamp']"
+#define K8S_EVENTS_RA_RESOURCE_VERSION "$metadata['resourceVersion']"
+
+struct k8s_events *k8s_events_conf_create(struct flb_input_instance *ins);
+void k8s_events_conf_destroy(struct k8s_events *ctx);
+
+#endif \ No newline at end of file
diff --git a/src/fluent-bit/plugins/in_kubernetes_events/kubernetes_events_sql.h b/src/fluent-bit/plugins/in_kubernetes_events/kubernetes_events_sql.h
new file mode 100644
index 000000000..3076791cc
--- /dev/null
+++ b/src/fluent-bit/plugins/in_kubernetes_events/kubernetes_events_sql.h
@@ -0,0 +1,60 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2023 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_KUBERNETES_EVENTS_SQL_H
+#define FLB_KUBERNETES_EVENTS_SQL_H
+
+/*
+ * In Fluent Bit we try to have a common convention for table names,
+ * if the table belongs to an input/output plugin, use the plugins name
+ * with the name of the object or type.
+ *
+ * in_kubernetes_events plugin table to track kubernetes events:
+ * in_kubernetes_events
+ */
+#define SQL_CREATE_KUBERNETES_EVENTS \
+ "CREATE TABLE IF NOT EXISTS in_kubernetes_events (" \
+ " id INTEGER PRIMARY KEY," \
+ " uid TEXT NOT NULL," \
+ " resourceVersion INTEGER NOT NULL," \
+ " created INTEGER NOT NULL" \
+ ");"
+
+#define SQL_KUBERNETES_EVENT_EXISTS_BY_UID \
+ "SELECT COUNT(id) " \
+ " FROM in_kubernetes_events " \
+ " WHERE uid=@uid;"
+
+#define SQL_INSERT_KUBERNETES_EVENTS \
+ "INSERT INTO in_kubernetes_events (uid, resourceVersion, created)" \
+ " VALUES (@uid, @resourceVersion, @created);"
+
+#define SQL_DELETE_OLD_KUBERNETES_EVENTS \
+ "DELETE FROM in_kubernetes_events WHERE created <= @createdBefore;"
+
+#define SQL_PRAGMA_SYNC \
+ "PRAGMA synchronous=%i;"
+
+#define SQL_PRAGMA_JOURNAL_MODE \
+ "PRAGMA journal_mode=%s;"
+
+#define SQL_PRAGMA_LOCKING_MODE \
+ "PRAGMA locking_mode=EXCLUSIVE;"
+
+#endif
diff --git a/src/fluent-bit/plugins/in_lib/CMakeLists.txt b/src/fluent-bit/plugins/in_lib/CMakeLists.txt
new file mode 100644
index 000000000..87a19c5b2
--- /dev/null
+++ b/src/fluent-bit/plugins/in_lib/CMakeLists.txt
@@ -0,0 +1,10 @@
+# FIXME: there is something wrong when linking objects and this
+# static plugin, I should not require to link to a specific symbol
+# if the object was already linked from fluent-bit core on src/, also
+# jsmn should not be required.
+
+set(src
+ in_lib.c
+ ../../src/flb_pack.c)
+
+FLB_PLUGIN(in_lib "${src}" "jsmn")
diff --git a/src/fluent-bit/plugins/in_lib/in_lib.c b/src/fluent-bit/plugins/in_lib/in_lib.c
new file mode 100644
index 000000000..466f1afe8
--- /dev/null
+++ b/src/fluent-bit/plugins/in_lib/in_lib.c
@@ -0,0 +1,279 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_input.h>
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_error.h>
+#include <fluent-bit/flb_log_event_decoder.h>
+#include "in_lib.h"
+
+static int in_lib_collect(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ int ret;
+ int dec_ret;
+ int enc_ret;
+ int bytes;
+ int out_size;
+ int capacity;
+ int size;
+ char *ptr;
+ char *pack;
+ struct flb_log_event record;
+ struct flb_log_event_decoder decoder;
+ struct flb_in_lib_config *ctx = in_context;
+
+ capacity = (ctx->buf_size - ctx->buf_len);
+
+ /* Allocate memory as required (FIXME: this will be limited in later) */
+ if (capacity == 0) {
+ size = ctx->buf_size + LIB_BUF_CHUNK;
+ ptr = flb_realloc(ctx->buf_data, size);
+ if (!ptr) {
+ flb_errno();
+ return -1;
+ }
+ ctx->buf_data = ptr;
+ ctx->buf_size = size;
+ capacity = LIB_BUF_CHUNK;
+ }
+
+ bytes = flb_pipe_r(ctx->fd,
+ ctx->buf_data + ctx->buf_len,
+ capacity);
+ flb_plg_trace(ctx->ins, "in_lib read() = %i", bytes);
+ if (bytes == -1) {
+ perror("read");
+ if (errno == -EPIPE) {
+ return -1;
+ }
+ return 0;
+ }
+ ctx->buf_len += bytes;
+
+ /* initially we should support json input */
+ ret = flb_pack_json_state(ctx->buf_data, ctx->buf_len,
+ &pack, &out_size, &ctx->state);
+ if (ret == FLB_ERR_JSON_PART) {
+ flb_plg_warn(ctx->ins, "lib data incomplete, waiting for more data...");
+ return 0;
+ }
+ else if (ret == FLB_ERR_JSON_INVAL) {
+ flb_plg_warn(ctx->ins, "lib data invalid");
+ flb_pack_state_reset(&ctx->state);
+ flb_pack_state_init(&ctx->state);
+ return -1;
+ }
+ ctx->buf_len = 0;
+
+ dec_ret = flb_log_event_decoder_init(&decoder, pack, out_size);
+ if (dec_ret != FLB_EVENT_DECODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event decoder initialization error : %s",
+ flb_log_event_decoder_get_error_description(dec_ret));
+ flb_free(pack);
+ flb_pack_state_reset(&ctx->state);
+ flb_pack_state_init(&ctx->state);
+ return -1;
+ }
+
+ while ((dec_ret = flb_log_event_decoder_next(
+ &decoder,
+ &record)) == FLB_EVENT_DECODER_SUCCESS) {
+ enc_ret = flb_log_event_encoder_begin_record(&ctx->log_encoder);
+ if (enc_ret != FLB_EVENT_ENCODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "flb_log_event_encoder_begin_record error : %s",
+ flb_log_event_encoder_get_error_description(enc_ret));
+ flb_log_event_encoder_rollback_record(&ctx->log_encoder);
+ continue;
+ }
+
+ enc_ret = flb_log_event_encoder_set_timestamp(
+ &ctx->log_encoder,
+ &record.timestamp);
+ if (enc_ret != FLB_EVENT_ENCODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "flb_log_event_encoder_set_timestamp error : %s",
+ flb_log_event_encoder_get_error_description(enc_ret));
+ flb_log_event_encoder_rollback_record(&ctx->log_encoder);
+ continue;
+ }
+
+ enc_ret = flb_log_event_encoder_set_metadata_from_msgpack_object(
+ &ctx->log_encoder,
+ record.metadata);
+ if (enc_ret != FLB_EVENT_ENCODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "flb_log_event_encoder_set_metadata_from_msgpack_object error : %s",
+ flb_log_event_encoder_get_error_description(enc_ret));
+ flb_log_event_encoder_rollback_record(&ctx->log_encoder);
+ continue;
+ }
+
+ enc_ret = flb_log_event_encoder_set_body_from_msgpack_object(
+ &ctx->log_encoder,
+ record.body);
+ if (enc_ret != FLB_EVENT_ENCODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "flb_log_event_encoder_set_body_from_msgpack_object error : %s",
+ flb_log_event_encoder_get_error_description(enc_ret));
+ flb_log_event_encoder_rollback_record(&ctx->log_encoder);
+ continue;
+ }
+
+ enc_ret = flb_log_event_encoder_commit_record(&ctx->log_encoder);
+ if (enc_ret != FLB_EVENT_ENCODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "flb_log_event_encoder_commit_record error : %s",
+ flb_log_event_encoder_get_error_description(enc_ret));
+ flb_log_event_encoder_rollback_record(&ctx->log_encoder);
+ continue;
+ }
+ }
+
+ dec_ret = flb_log_event_decoder_get_last_result(&decoder);
+ if (dec_ret == FLB_EVENT_DECODER_SUCCESS) {
+ flb_input_log_append(ctx->ins, NULL, 0,
+ ctx->log_encoder.output_buffer,
+ ctx->log_encoder.output_length);
+
+ ret = 0;
+ }
+ else {
+ flb_plg_error(ctx->ins,
+ "flb_log_event_decoder_get_last_result error : %s",
+ flb_log_event_decoder_get_error_description(dec_ret));
+ ret = -1;
+ }
+
+ flb_log_event_encoder_reset(&ctx->log_encoder);
+ flb_log_event_decoder_destroy(&decoder);
+
+ /* Reset the state */
+ flb_free(pack);
+
+ flb_pack_state_reset(&ctx->state);
+ flb_pack_state_init(&ctx->state);
+
+ return ret;
+}
+
+/* Initialize plugin */
+static int in_lib_init(struct flb_input_instance *in,
+ struct flb_config *config, void *data)
+{
+ int ret;
+ struct flb_in_lib_config *ctx;
+ (void) data;
+
+ /* Allocate space for the configuration */
+ ctx = flb_malloc(sizeof(struct flb_in_lib_config));
+ if (!ctx) {
+ return -1;
+ }
+ ctx->ins = in;
+
+ /* Buffer for incoming data */
+ ctx->buf_size = LIB_BUF_CHUNK;
+ ctx->buf_data = flb_calloc(1, LIB_BUF_CHUNK);
+ ctx->buf_len = 0;
+
+ if (!ctx->buf_data) {
+ flb_errno();
+ flb_plg_error(ctx->ins, "Could not allocate initial buf memory buffer");
+ flb_free(ctx);
+ return -1;
+ }
+
+ /* Init communication channel */
+ flb_input_channel_init(in);
+ ctx->fd = in->channel[0];
+
+ /* Set the context */
+ flb_input_set_context(in, ctx);
+
+ /* Collect upon data available on the standard input */
+ ret = flb_input_set_collector_event(in,
+ in_lib_collect,
+ ctx->fd,
+ config);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "Could not set collector for LIB input plugin");
+ flb_free(ctx->buf_data);
+ flb_free(ctx);
+ return -1;
+ }
+
+ ret = flb_log_event_encoder_init(&ctx->log_encoder,
+ FLB_LOG_EVENT_FORMAT_DEFAULT);
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ flb_plg_error(ctx->ins, "error initializing event encoder : %d", ret);
+
+ flb_free(ctx->buf_data);
+ flb_free(ctx);
+
+ return -1;
+ }
+
+ flb_pack_state_init(&ctx->state);
+
+ return 0;
+}
+
+static int in_lib_exit(void *data, struct flb_config *config)
+{
+ struct flb_in_lib_config *ctx = data;
+ struct flb_pack_state *s;
+
+ (void) config;
+
+ flb_log_event_encoder_destroy(&ctx->log_encoder);
+
+ if (ctx->buf_data) {
+ flb_free(ctx->buf_data);
+ }
+
+ s = &ctx->state;
+ flb_pack_state_reset(s);
+ flb_free(ctx);
+ return 0;
+}
+
+/* Plugin reference */
+struct flb_input_plugin in_lib_plugin = {
+ .name = "lib",
+ .description = "Library mode Input",
+ .cb_init = in_lib_init,
+ .cb_pre_run = NULL,
+ .cb_collect = NULL,
+ .cb_ingest = NULL,
+ .cb_flush_buf = NULL,
+ .cb_exit = in_lib_exit
+};
diff --git a/src/fluent-bit/plugins/in_lib/in_lib.h b/src/fluent-bit/plugins/in_lib/in_lib.h
new file mode 100644
index 000000000..a5fc8a9ea
--- /dev/null
+++ b/src/fluent-bit/plugins/in_lib/in_lib.h
@@ -0,0 +1,45 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_LIB_H
+#define FLB_IN_LIB_H
+
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_input.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_log_event_encoder.h>
+#include <fluent-bit/flb_pthread.h>
+
+#define LIB_BUF_CHUNK 65536
+
+pthread_key_t flb_active_lib_context;
+
+/* Library input configuration & context */
+struct flb_in_lib_config {
+ int fd; /* instance input channel */
+ int buf_size; /* buffer size / capacity */
+ int buf_len; /* read buffer length */
+ char *buf_data; /* the real buffer */
+
+ struct flb_log_event_encoder log_encoder;
+ struct flb_pack_state state;
+ struct flb_input_instance *ins;
+};
+
+#endif
diff --git a/src/fluent-bit/plugins/in_mem/CMakeLists.txt b/src/fluent-bit/plugins/in_mem/CMakeLists.txt
new file mode 100644
index 000000000..613abd69f
--- /dev/null
+++ b/src/fluent-bit/plugins/in_mem/CMakeLists.txt
@@ -0,0 +1,5 @@
+set(src
+ mem.c
+ proc.c)
+
+FLB_PLUGIN(in_mem "${src}" "")
diff --git a/src/fluent-bit/plugins/in_mem/mem.c b/src/fluent-bit/plugins/in_mem/mem.c
new file mode 100644
index 000000000..391ba6144
--- /dev/null
+++ b/src/fluent-bit/plugins/in_mem/mem.c
@@ -0,0 +1,320 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_input.h>
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_log_event.h>
+#include <fluent-bit/flb_kernel.h>
+#include <fluent-bit/flb_pack.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/sysinfo.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+#include "mem.h"
+#include "proc.h"
+
+struct flb_input_plugin in_mem_plugin;
+
+static int in_mem_collect(struct flb_input_instance *i_ins,
+ struct flb_config *config, void *in_context);
+#if 0
+/* Locate a specific key into the buffer */
+static char *field(char *data, char *field)
+{
+ char *p;
+ char *q;
+ char *sep;
+ char *value;
+ int len = strlen(field);
+
+ p = strstr(data, field);
+ if (!p) {
+ return NULL;
+ }
+
+ sep = strchr(p, ':');
+ p = ++sep;
+ p++;
+
+ while (*p == ' ') p++;
+
+ q = strchr(p, ' ');
+ len = q - p;
+ value = flb_malloc(len + 1);
+ strncpy(value, p, len);
+ value[len] = '\0';
+
+ return value;
+}
+#endif
+
+static uint64_t calc_kb(unsigned long amount, unsigned int unit)
+{
+ unsigned long long bytes = amount;
+
+ /*
+ * Recent Linux versions return memory/swap sizes as multiples
+ * of a certain size unit. See sysinfo(2) for details.
+ */
+ if (unit > 1) {
+ bytes = bytes * unit;
+ }
+
+ bytes = bytes / 1024;
+
+ return (uint64_t) bytes;
+}
+
+static int mem_calc(struct flb_in_mem_info *m_info)
+{
+ int ret;
+ struct sysinfo info;
+
+ ret = sysinfo(&info);
+ if (ret == -1) {
+ flb_errno();
+ return -1;
+ }
+
+ /* set values in KBs */
+ m_info->mem_total = calc_kb(info.totalram, info.mem_unit);
+
+ /*
+ * This value seems to be MemAvailable if it is supported
+ * or MemFree on legacy Linux.
+ */
+ m_info->mem_free = calc_kb(info.freeram, info.mem_unit);
+
+ m_info->mem_used = m_info->mem_total - m_info->mem_free;
+
+ m_info->swap_total = calc_kb(info.totalswap, info.mem_unit);
+ m_info->swap_free = calc_kb(info.freeswap, info.mem_unit);
+ m_info->swap_used = m_info->swap_total - m_info->swap_free;
+
+ return 0;
+}
+
+static int in_mem_init(struct flb_input_instance *in,
+ struct flb_config *config, void *data)
+{
+ int ret;
+ struct flb_in_mem_config *ctx;
+ (void) data;
+
+ /* Initialize context */
+ ctx = flb_malloc(sizeof(struct flb_in_mem_config));
+ if (!ctx) {
+ return -1;
+ }
+ ctx->idx = 0;
+ ctx->pid = 0;
+ ctx->page_size = sysconf(_SC_PAGESIZE);
+ ctx->ins = in;
+
+ /* Load the config map */
+ ret = flb_input_config_map_set(in, (void *)ctx);
+ if (ret == -1) {
+ flb_free(ctx);
+ return -1;
+ }
+
+ /* Collection time setting */
+ if (ctx->interval_sec <= 0) {
+ ctx->interval_sec = atoi(DEFAULT_INTERVAL_SEC);
+ }
+ if (ctx->interval_nsec <= 0) {
+ ctx->interval_nsec = atoi(DEFAULT_INTERVAL_NSEC);
+ }
+
+ /* Set the context */
+ flb_input_set_context(in, ctx);
+
+ /* Set the collector */
+ ret = flb_input_set_collector_time(in,
+ in_mem_collect,
+ ctx->interval_sec,
+ ctx->interval_nsec,
+ config);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "could not set collector for memory input plugin");
+ return -1;
+ }
+
+ ret = flb_log_event_encoder_init(&ctx->log_encoder,
+ FLB_LOG_EVENT_FORMAT_DEFAULT);
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ flb_plg_error(ctx->ins, "error initializing event encoder : %d", ret);
+
+ return -1;
+ }
+
+ return 0;
+}
+
+static int in_mem_collect(struct flb_input_instance *i_ins,
+ struct flb_config *config, void *in_context)
+{
+ int ret;
+ struct proc_task *task = NULL;
+ struct flb_in_mem_config *ctx = in_context;
+ struct flb_in_mem_info info;
+
+ if (ctx->pid) {
+ task = proc_stat(ctx->pid, ctx->page_size);
+ if (!task) {
+ flb_plg_warn(ctx->ins, "could not measure PID %i", ctx->pid);
+ ctx->pid = 0;
+ }
+ }
+
+ ret = mem_calc(&info);
+
+ if (ret == -1) {
+ if (task) {
+ proc_free(task);
+ }
+ return -1;
+ }
+
+ ret = flb_log_event_encoder_begin_record(&ctx->log_encoder);
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_current_timestamp(
+ &ctx->log_encoder);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_append_body_values(
+ &ctx->log_encoder,
+ FLB_LOG_EVENT_CSTRING_VALUE("Mem.total"),
+ FLB_LOG_EVENT_UINT64_VALUE(info.mem_total),
+
+ FLB_LOG_EVENT_CSTRING_VALUE("Mem.used"),
+ FLB_LOG_EVENT_UINT64_VALUE(info.mem_used),
+
+ FLB_LOG_EVENT_CSTRING_VALUE("Mem.free"),
+ FLB_LOG_EVENT_UINT64_VALUE(info.mem_free),
+
+ FLB_LOG_EVENT_CSTRING_VALUE("Swap.total"),
+ FLB_LOG_EVENT_UINT64_VALUE(info.swap_total),
+
+ FLB_LOG_EVENT_CSTRING_VALUE("Swap.used"),
+ FLB_LOG_EVENT_UINT64_VALUE(info.swap_used),
+
+ FLB_LOG_EVENT_CSTRING_VALUE("Swap.free"),
+ FLB_LOG_EVENT_UINT64_VALUE(info.swap_free));
+ }
+
+ if (task != NULL &&
+ ret == FLB_EVENT_ENCODER_SUCCESS) {
+ /* RSS bytes */
+
+ ret = flb_log_event_encoder_append_body_values(
+ &ctx->log_encoder,
+ FLB_LOG_EVENT_CSTRING_VALUE("proc_bytes"),
+ FLB_LOG_EVENT_UINT64_VALUE(task->proc_rss),
+
+ FLB_LOG_EVENT_CSTRING_VALUE("proc_hr"),
+ FLB_LOG_EVENT_UINT64_VALUE(task->proc_rss_hr));
+
+ proc_free(task);
+ }
+
+ flb_plg_trace(ctx->ins, "memory total=%lu kb, used=%lu kb, free=%lu kb",
+ info.mem_total, info.mem_used, info.mem_free);
+ flb_plg_trace(ctx->ins, "swap total=%lu kb, used=%lu kb, free=%lu kb",
+ info.swap_total, info.swap_used, info.swap_free);
+ ++ctx->idx;
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_commit_record(&ctx->log_encoder);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ flb_input_log_append(i_ins, NULL, 0,
+ ctx->log_encoder.output_buffer,
+ ctx->log_encoder.output_length);
+
+ ret = 0;
+ }
+ else {
+ flb_plg_error(i_ins, "Error encoding record : %d", ret);
+
+ ret = -1;
+ }
+
+ flb_log_event_encoder_reset(&ctx->log_encoder);
+
+ return 0;
+}
+
+static int in_mem_exit(void *data, struct flb_config *config)
+{
+ (void) *config;
+ struct flb_in_mem_config *ctx = data;
+
+ if (!ctx) {
+ return 0;
+ }
+
+ flb_log_event_encoder_destroy(&ctx->log_encoder);
+
+ /* done */
+ flb_free(ctx);
+
+ return 0;
+}
+
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_INT, "interval_sec", DEFAULT_INTERVAL_SEC,
+ 0, FLB_TRUE, offsetof(struct flb_in_mem_config, interval_sec),
+ "Set the collector interval"
+ },
+ {
+ FLB_CONFIG_MAP_INT, "interval_nsec", DEFAULT_INTERVAL_NSEC,
+ 0, FLB_TRUE, offsetof(struct flb_in_mem_config, interval_nsec),
+ "Set the collector interval (subseconds)"
+ },
+ {
+ FLB_CONFIG_MAP_INT, "pid", "0",
+ 0, FLB_TRUE, offsetof(struct flb_in_mem_config, pid),
+ "Set the PID of the process to measure"
+ },
+ /* EOF */
+ {0}
+};
+
+struct flb_input_plugin in_mem_plugin = {
+ .name = "mem",
+ .description = "Memory Usage",
+ .cb_init = in_mem_init,
+ .cb_pre_run = NULL,
+ .cb_collect = in_mem_collect,
+ .cb_flush_buf = NULL,
+ .cb_exit = in_mem_exit,
+ .config_map = config_map
+};
diff --git a/src/fluent-bit/plugins/in_mem/mem.h b/src/fluent-bit/plugins/in_mem/mem.h
new file mode 100644
index 000000000..3c28ff907
--- /dev/null
+++ b/src/fluent-bit/plugins/in_mem/mem.h
@@ -0,0 +1,51 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_MEM_H
+#define FLB_IN_MEM_H
+
+#include <fluent-bit/flb_input.h>
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_log_event_encoder.h>
+#include <msgpack.h>
+
+#define DEFAULT_INTERVAL_SEC "1"
+#define DEFAULT_INTERVAL_NSEC "0"
+
+struct flb_in_mem_info {
+ uint64_t mem_total;
+ uint64_t mem_used;
+ uint64_t mem_free;
+ uint64_t swap_total;
+ uint64_t swap_used;
+ uint64_t swap_free;
+};
+
+struct flb_in_mem_config {
+ int idx;
+ int page_size;
+ int interval_sec;
+ int interval_nsec;
+ pid_t pid;
+ struct flb_input_instance *ins;
+ struct flb_log_event_encoder log_encoder;
+};
+
+#endif
diff --git a/src/fluent-bit/plugins/in_mem/proc.c b/src/fluent-bit/plugins/in_mem/proc.c
new file mode 100644
index 000000000..0e70b9de1
--- /dev/null
+++ b/src/fluent-bit/plugins/in_mem/proc.c
@@ -0,0 +1,185 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <string.h>
+
+#include <fluent-bit/flb_input.h>
+#include "proc.h"
+
+static char *human_readable_size(long size)
+{
+ long u = 1024, i, len = 128;
+ char *buf;
+ static const char *__units[] = { "b", "K", "M", "G",
+ "T", "P", "E", "Z", "Y", NULL
+ };
+
+ buf = flb_malloc(len);
+ if (!buf) {
+ flb_errno();
+ return NULL;
+ }
+
+ for (i = 0; __units[i] != NULL; i++) {
+ if ((size / u) == 0) {
+ break;
+ }
+ u *= 1024;
+ }
+ if (!i) {
+ snprintf(buf, len, "%ld %s", size, __units[0]);
+ }
+ else {
+ float fsize = (float) ((double) size / (u / 1024));
+ snprintf(buf, len, "%.2f%s", fsize, __units[i]);
+ }
+
+ return buf;
+}
+
+/* Read file content into a memory buffer */
+static char *file_to_buffer(const char *path)
+{
+ FILE *fp;
+ char *buffer;
+
+ if (!(fp = fopen(path, "r"))) {
+ flb_errno();
+ return NULL;
+ }
+
+ buffer = flb_calloc(1, PROC_STAT_BUF_SIZE);
+ if (!buffer) {
+ fclose(fp);
+ flb_errno();
+ return NULL;
+ }
+
+ fread(buffer, PROC_STAT_BUF_SIZE, 1, fp);
+ if (ferror(fp) || !feof(fp)) {
+ flb_free(buffer);
+ fclose(fp);
+ return NULL;
+ }
+
+ fclose(fp);
+ return buffer;
+}
+
+
+struct proc_task *proc_stat(pid_t pid, int page_size)
+{
+ int ret;
+ char *p, *q;
+ char *buf;
+ char pid_path[PROC_PID_SIZE];
+ struct proc_task *t;
+
+ t = flb_calloc(1, sizeof(struct proc_task));
+ if (!t) {
+ flb_errno();
+ return NULL;
+ }
+
+ /* Compose path for /proc/PID/stat */
+ ret = snprintf(pid_path, PROC_PID_SIZE, "/proc/%i/stat", pid);
+ if (ret < 0) {
+ flb_free(t);
+ flb_errno();
+ return NULL;
+ }
+
+ buf = file_to_buffer(pid_path);
+ if (!buf) {
+ flb_free(t);
+ return NULL;
+ }
+
+ sscanf(buf, "%d", &t->pid);
+
+ /*
+ * workaround for process with spaces in the name, so we dont screw up
+ * sscanf(3).
+ */
+ p = buf;
+ while (*p != '(') {
+ p++;
+ }
+ p++;
+
+ /* seek from tail of file. */
+ q = buf + (PROC_STAT_BUF_SIZE - 1);
+ while (*q != ')' && p < q) {
+ q--;
+ }
+ if (p >= q) {
+ flb_free(buf);
+ flb_free(t);
+ return NULL;
+ }
+
+ strncpy(t->comm, p, q - p);
+ q += 2;
+
+ /* Read pending values */
+ sscanf(q, PROC_STAT_FORMAT,
+ &t->state,
+ &t->ppid,
+ &t->pgrp,
+ &t->session,
+ &t->tty_nr,
+ &t->tpgid,
+ &t->flags,
+ &t->minflt,
+ &t->cminflt,
+ &t->majflt,
+ &t->cmajflt,
+ &t->utime,
+ &t->stime,
+ &t->cutime,
+ &t->cstime,
+ &t->priority,
+ &t->nice,
+ &t->num_threads,
+ &t->itrealvalue,
+ &t->starttime,
+ &t->vsize,
+ &t->rss);
+
+ /* Internal conversion */
+ t->proc_rss = (t->rss * page_size);
+ t->proc_rss_hr = human_readable_size(t->proc_rss);
+ if ( t->proc_rss_hr == NULL ) {
+ flb_free(buf);
+ flb_free(t);
+ return NULL;
+ }
+
+ flb_free(buf);
+ return t;
+}
+
+void proc_free(struct proc_task *t)
+{
+ flb_free(t->proc_rss_hr);
+ flb_free(t);
+}
diff --git a/src/fluent-bit/plugins/in_mem/proc.h b/src/fluent-bit/plugins/in_mem/proc.h
new file mode 100644
index 000000000..79b4c3b81
--- /dev/null
+++ b/src/fluent-bit/plugins/in_mem/proc.h
@@ -0,0 +1,68 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef IN_MEM_PROC_H
+#define IN_MEM_PROC_H
+
+#define PROC_PID_SIZE 1024
+#define PROC_STAT_BUF_SIZE 1024
+
+/*
+ * This 'stat' format omits the first two fields, due to the nature
+ * of sscanf(3) and whitespaces, programs with spaces in the name can
+ * screw up when scanning the information.
+ */
+#define PROC_STAT_FORMAT "%c %d %d %d %d %d %u %lu %lu %lu %lu %lu %lu %ld %ld %ld %ld %ld %ld %llu %lu %ld"
+
+/* Our tast struct to read the /proc/PID/stat values */
+struct proc_task {
+ int pid; /* %d */
+ char comm[256]; /* %s */
+ char state; /* %c */
+ int ppid; /* %d */
+ int pgrp; /* %d */
+ int session; /* %d */
+ int tty_nr; /* %d */
+ int tpgid; /* %d */
+ unsigned int flags; /* %u */
+ unsigned long minflt; /* %lu */
+ unsigned long cminflt; /* %lu */
+ unsigned long majflt; /* %lu */
+ unsigned long cmajflt; /* %lu */
+ unsigned long utime; /* %lu */
+ unsigned long stime; /* %lu */
+ long cutime; /* %ld */
+ long cstime; /* %ld */
+ long priority; /* %ld */
+ long nice; /* %ld */
+ long num_threads; /* %ld */
+ long itrealvalue; /* %ld */
+ unsigned long long starttime; /* %llu */
+ unsigned long vsize; /* %lu */
+ long rss; /* %ld */
+
+ /* Internal conversion */
+ long proc_rss; /* bytes = (rss * PAGESIZE) */
+ char *proc_rss_hr; /* RSS in human readable format */
+};
+
+struct proc_task *proc_stat(pid_t pid, int page_size);
+void proc_free(struct proc_task *t);
+
+#endif
diff --git a/src/fluent-bit/plugins/in_mqtt/CMakeLists.txt b/src/fluent-bit/plugins/in_mqtt/CMakeLists.txt
new file mode 100644
index 000000000..53259d541
--- /dev/null
+++ b/src/fluent-bit/plugins/in_mqtt/CMakeLists.txt
@@ -0,0 +1,7 @@
+set(src
+ mqtt.c
+ mqtt_conn.c
+ mqtt_prot.c
+ mqtt_config.c)
+
+FLB_PLUGIN(in_mqtt "${src}" "")
diff --git a/src/fluent-bit/plugins/in_mqtt/mqtt.c b/src/fluent-bit/plugins/in_mqtt/mqtt.c
new file mode 100644
index 000000000..d1ae74f1e
--- /dev/null
+++ b/src/fluent-bit/plugins/in_mqtt/mqtt.c
@@ -0,0 +1,162 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_input.h>
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_network.h>
+#include <fluent-bit/flb_downstream.h>
+#include <fluent-bit/flb_config_map.h>
+
+#include "mqtt.h"
+#include "mqtt_conn.h"
+#include "mqtt_config.h"
+
+/* Initialize plugin */
+static int in_mqtt_init(struct flb_input_instance *in,
+ struct flb_config *config, void *data)
+{
+ unsigned short int port;
+ int ret;
+ struct flb_in_mqtt_config *ctx;
+
+ (void) data;
+
+ /* Allocate space for the configuration */
+ ctx = mqtt_config_init(in);
+ if (!ctx) {
+ return -1;
+ }
+ ctx->ins = in;
+ ctx->msgp_len = 0;
+
+ /* Set the context */
+ flb_input_set_context(in, ctx);
+
+ /* Create downstream */
+ port = (unsigned short int) strtoul(ctx->tcp_port, NULL, 10);
+
+ ctx->downstream = flb_downstream_create(FLB_TRANSPORT_TCP,
+ in->flags,
+ ctx->listen,
+ port,
+ in->tls,
+ config,
+ &in->net_setup);
+
+ if (ctx->downstream == NULL) {
+ flb_plg_error(ctx->ins,
+ "could not initialize downstream on %s:%s. Aborting",
+ ctx->listen, ctx->tcp_port);
+
+ mqtt_config_free(ctx);
+
+ return -1;
+ }
+
+ flb_input_downstream_set(ctx->downstream, ctx->ins);
+
+ /* Collect upon data available on the standard input */
+ ret = flb_input_set_collector_event(in,
+ in_mqtt_collect,
+ ctx->downstream->server_fd,
+ config);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "could not set collector for MQTT input plugin");
+ mqtt_config_free(ctx);
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * For a server event, the collection event means a new client have arrived, we
+ * accept the connection and create a new MQTT instance which will wait for
+ * events/data (MQTT control packages)
+ */
+int in_mqtt_collect(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ struct flb_connection *connection;
+ struct mqtt_conn *conn;
+ struct flb_in_mqtt_config *ctx;
+
+ ctx = in_context;
+
+ connection = flb_downstream_conn_get(ctx->downstream);
+
+ if (connection == NULL) {
+ flb_plg_error(ctx->ins, "could not accept new connection");
+
+ return -1;
+ }
+
+ flb_plg_debug(ctx->ins, "[fd=%i] new TCP connection", connection->fd);
+
+ conn = mqtt_conn_add(connection, ctx);
+
+ if (!conn) {
+ flb_downstream_conn_release(connection);
+
+ return -1;
+ }
+
+ return 0;
+}
+
+static int in_mqtt_exit(void *data, struct flb_config *config)
+{
+ (void) *config;
+ struct flb_in_mqtt_config *ctx = data;
+
+ if (!ctx) {
+ return 0;
+ }
+
+ mqtt_conn_destroy_all(ctx);
+
+ mqtt_config_free(ctx);
+
+ return 0;
+}
+
+/* Configuration properties map */
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_STR, "payload_key", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_in_mqtt_config, payload_key),
+ "Key where the payload will be preserved"
+ },
+ /* EOF */
+ {0}
+};
+
+/* Plugin reference */
+struct flb_input_plugin in_mqtt_plugin = {
+ .name = "mqtt",
+ .description = "MQTT, listen for Publish messages",
+ .cb_init = in_mqtt_init,
+ .cb_pre_run = NULL,
+ .cb_collect = in_mqtt_collect,
+ .cb_flush_buf = NULL,
+ .cb_exit = in_mqtt_exit,
+ .config_map = config_map,
+ .flags = FLB_INPUT_NET_SERVER | FLB_IO_OPT_TLS
+};
diff --git a/src/fluent-bit/plugins/in_mqtt/mqtt.h b/src/fluent-bit/plugins/in_mqtt/mqtt.h
new file mode 100644
index 000000000..01c3b7be9
--- /dev/null
+++ b/src/fluent-bit/plugins/in_mqtt/mqtt.h
@@ -0,0 +1,45 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_MQTT_H
+#define FLB_IN_MQTT_H
+
+#include <fluent-bit/flb_sds.h>
+#include <fluent-bit/flb_log_event_encoder.h>
+
+#define MQTT_MSGP_BUF_SIZE 8192
+
+struct flb_in_mqtt_config {
+ char *listen; /* Listen interface */
+ char *tcp_port; /* TCP Port */
+
+ flb_sds_t payload_key; /* payload key */
+
+ int msgp_len; /* msgpack data length */
+ char msgp[MQTT_MSGP_BUF_SIZE]; /* msgpack static buffer */
+ struct flb_input_instance *ins; /* plugin input instance */
+ struct flb_downstream *downstream; /* Client manager */
+ struct mk_list conns; /* Active connections */
+ struct flb_log_event_encoder *log_encoder;
+};
+
+int in_mqtt_collect(struct flb_input_instance *i_ins,
+ struct flb_config *config, void *in_context);
+
+#endif
diff --git a/src/fluent-bit/plugins/in_mqtt/mqtt_config.c b/src/fluent-bit/plugins/in_mqtt/mqtt_config.c
new file mode 100644
index 000000000..800834c05
--- /dev/null
+++ b/src/fluent-bit/plugins/in_mqtt/mqtt_config.c
@@ -0,0 +1,82 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdlib.h>
+
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_downstream.h>
+#include <fluent-bit/flb_utils.h>
+
+#include "mqtt.h"
+#include "mqtt_config.h"
+
+struct flb_in_mqtt_config *mqtt_config_init(struct flb_input_instance *ins)
+{
+ char tmp[16];
+ struct flb_in_mqtt_config *config;
+ int ret;
+
+ config = flb_calloc(1, sizeof(struct flb_in_mqtt_config));
+ if (!config) {
+ flb_errno();
+ return NULL;
+ }
+
+ ret = flb_input_config_map_set(ins, (void*) config);
+ if (ret == -1) {
+ flb_plg_error(ins, "could not initialize config map");
+ flb_free(config);
+ return NULL;
+ }
+
+ config->log_encoder = flb_log_event_encoder_create(
+ FLB_LOG_EVENT_FORMAT_DEFAULT);
+
+ if (config->log_encoder == NULL) {
+ flb_plg_error(ins, "could not initialize event encoder");
+ mqtt_config_free(config);
+
+ return NULL;
+ }
+
+ /* Listen interface (if not set, defaults to 0.0.0.0) */
+ flb_input_net_default_listener("0.0.0.0", 1883, ins);
+
+ /* Map 'listen' and 'port' into the local context */
+ config->listen = ins->host.listen;
+ snprintf(tmp, sizeof(tmp) - 1, "%d", ins->host.port);
+ config->tcp_port = flb_strdup(tmp);
+
+ mk_list_init(&config->conns);
+ return config;
+}
+
+void mqtt_config_free(struct flb_in_mqtt_config *config)
+{
+ if (config->downstream != NULL) {
+ flb_downstream_destroy(config->downstream);
+ }
+
+ if (config->log_encoder != NULL) {
+ flb_log_event_encoder_destroy(config->log_encoder);
+ }
+
+ flb_free(config->tcp_port);
+ flb_free(config);
+}
diff --git a/src/fluent-bit/plugins/in_mqtt/mqtt_config.h b/src/fluent-bit/plugins/in_mqtt/mqtt_config.h
new file mode 100644
index 000000000..709c3dd95
--- /dev/null
+++ b/src/fluent-bit/plugins/in_mqtt/mqtt_config.h
@@ -0,0 +1,29 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_MQTT_CONFIG_H
+#define FLB_MQTT_CONFIG_H
+
+#include "mqtt.h"
+#include <fluent-bit/flb_input.h>
+
+struct flb_in_mqtt_config *mqtt_config_init(struct flb_input_instance *in);
+void mqtt_config_free(struct flb_in_mqtt_config *config);
+
+#endif
diff --git a/src/fluent-bit/plugins/in_mqtt/mqtt_conn.c b/src/fluent-bit/plugins/in_mqtt/mqtt_conn.c
new file mode 100644
index 000000000..32ade2f6e
--- /dev/null
+++ b/src/fluent-bit/plugins/in_mqtt/mqtt_conn.c
@@ -0,0 +1,157 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_input.h>
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_engine.h>
+#include <fluent-bit/flb_network.h>
+#include <fluent-bit/flb_downstream.h>
+
+#include "mqtt.h"
+#include "mqtt_prot.h"
+#include "mqtt_conn.h"
+
+/* Callback invoked every time an event is triggered for a connection */
+int mqtt_conn_event(void *data)
+{
+ int ret;
+ int bytes;
+ int available;
+ struct mk_event *event;
+ struct mqtt_conn *conn;
+ struct flb_in_mqtt_config *ctx;
+ struct flb_connection *connection;
+
+ connection = (struct flb_connection *) data;
+
+ conn = connection->user_data;
+
+ ctx = conn->ctx;
+
+ event = &connection->event;
+
+ if (event->mask & MK_EVENT_READ) {
+ available = sizeof(conn->buf) - conn->buf_len;
+
+ bytes = flb_io_net_read(connection,
+ (void *) &conn->buf[conn->buf_len],
+ available);
+
+ if (bytes > 0) {
+ conn->buf_len += bytes;
+ flb_plg_trace(ctx->ins, "[fd=%i] read()=%i bytes",
+ connection->fd,
+ bytes);
+
+ ret = mqtt_prot_parser(conn);
+ if (ret < 0) {
+ mqtt_conn_del(conn);
+ return -1;
+ }
+ }
+ else {
+ flb_plg_debug(ctx->ins, "[fd=%i] connection closed",
+ connection->fd);
+
+ mqtt_conn_del(conn);
+ }
+ }
+ else if (event->mask & MK_EVENT_CLOSE) {
+ flb_plg_debug(ctx->ins, "[fd=%i] hangup", event->fd);
+ }
+
+ return 0;
+}
+
+/* Create a new mqtt request instance */
+struct mqtt_conn *mqtt_conn_add(struct flb_connection *connection,
+ struct flb_in_mqtt_config *ctx)
+{
+ struct mqtt_conn *conn;
+ int ret;
+
+ conn = flb_malloc(sizeof(struct mqtt_conn));
+ if (!conn) {
+ flb_errno();
+ return NULL;
+ }
+
+ conn->connection = connection;
+
+ /* Set data for the event-loop */
+ MK_EVENT_NEW(&connection->event);
+
+ connection->user_data = conn;
+ connection->event.type = FLB_ENGINE_EV_CUSTOM;
+ connection->event.handler = mqtt_conn_event;
+
+ /* Connection info */
+ conn->ctx = ctx;
+ conn->buf_pos = 0;
+ conn->buf_len = 0;
+ conn->buf_frame_end = 0;
+ conn->status = MQTT_NEW;
+
+ /* Register instance into the event loop */
+ ret = mk_event_add(flb_engine_evl_get(),
+ connection->fd,
+ FLB_ENGINE_EV_CUSTOM,
+ MK_EVENT_READ,
+ &connection->event);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "could not register new connection");
+ flb_free(conn);
+
+ return NULL;
+ }
+
+ mk_list_add(&conn->_head, &ctx->conns);
+
+ return conn;
+}
+
+int mqtt_conn_del(struct mqtt_conn *conn)
+{
+ /* The downstream unregisters the file descriptor from the event-loop
+ * so there's nothing to be done by the plugin
+ */
+ flb_downstream_conn_release(conn->connection);
+
+ /* Release resources */
+ mk_list_del(&conn->_head);
+
+ flb_free(conn);
+
+ return 0;
+}
+
+int mqtt_conn_destroy_all(struct flb_in_mqtt_config *ctx)
+{
+ struct mk_list *tmp;
+ struct mk_list *head;
+ struct mqtt_conn *conn;
+
+ mk_list_foreach_safe(head, tmp, &ctx->conns) {
+ conn = mk_list_entry(head, struct mqtt_conn, _head);
+ mqtt_conn_del(conn);
+ }
+
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/in_mqtt/mqtt_conn.h b/src/fluent-bit/plugins/in_mqtt/mqtt_conn.h
new file mode 100644
index 000000000..43f98f09e
--- /dev/null
+++ b/src/fluent-bit/plugins/in_mqtt/mqtt_conn.h
@@ -0,0 +1,49 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_MQTT_CONN_H
+#define FLB_MQTT_CONN_H
+
+#include <fluent-bit/flb_connection.h>
+
+enum {
+ MQTT_NEW = 1, /* it's a new connection */
+ MQTT_CONNECTED = 2, /* MQTT connection per protocol spec OK */
+ MQTT_NEXT = 4 /* Waiting for Control packets */
+};
+
+/* This structure respresents a MQTT connection */
+struct mqtt_conn {
+ int status; /* Connection status */
+ int packet_type; /* MQTT packet type */
+ int packet_length;
+ int buf_frame_end; /* Frame end position */
+ int buf_pos; /* Index position */
+ int buf_len; /* Buffer content length */
+ unsigned char buf[1024]; /* Buffer data */
+ struct flb_in_mqtt_config *ctx; /* Plugin configuration context */
+ struct flb_connection *connection;
+ struct mk_list _head; /* Link to flb_in_mqtt_config->conns */
+};
+
+struct mqtt_conn *mqtt_conn_add(struct flb_connection *connection, struct flb_in_mqtt_config *ctx);
+int mqtt_conn_del(struct mqtt_conn *conn);
+int mqtt_conn_destroy_all(struct flb_in_mqtt_config *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/in_mqtt/mqtt_prot.c b/src/fluent-bit/plugins/in_mqtt/mqtt_prot.c
new file mode 100644
index 000000000..e0267daf2
--- /dev/null
+++ b/src/fluent-bit/plugins/in_mqtt/mqtt_prot.c
@@ -0,0 +1,465 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_input.h>
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_utils.h>
+#include <msgpack.h>
+
+#include "mqtt.h"
+#include "mqtt_prot.h"
+
+#define BUFC() conn->buf[conn->buf_pos]
+#define BUF_AVAIL() conn->buf_len - conn->buf_pos
+#define BIT_SET(a, b) ((a) |= (1 << (b)))
+#define BIT_CHECK(a, b) ((a) & (1 << (b)))
+
+/*
+static inline void print_hex(struct mqtt_conn *conn)
+{
+ int x;
+
+ printf("\n--------HEX--------> ");
+ printf("buf_pos=%i buf_len=%i\n", conn->buf_pos, conn->buf_len);
+ for (x = conn->buf_pos; x < conn->buf_len; x++) {
+ printf("%x ", conn->buf[x]);
+ }
+ printf("\n--------------------\n\n");
+}
+
+static inline void print_str(struct mqtt_conn *conn)
+{
+ int x;
+
+ printf("\n--------HEX--------> ");
+ printf("buf_pos=%i buf_len=%i\n", conn->buf_pos, conn->buf_len);
+ for (x = conn->buf_pos; x < conn->buf_len; x++) {
+ printf("%c", conn->buf[x]);
+ }
+ printf("\n--------------------\n\n");
+}
+*/
+
+/*
+ * It drop the current packet from the buffer, it move the remaining bytes
+ * from right-to-left and adjust the new length.
+ */
+static inline int mqtt_packet_drop(struct mqtt_conn *conn)
+{
+ int move_bytes;
+
+ if (conn->buf_pos == conn->buf_len) {
+ conn->buf_frame_end = 0;
+ conn->buf_len = 0;
+ conn->buf_pos = 0;
+ return 0;
+ }
+
+ /* Check boundaries */
+ if (conn->buf_pos + 1 > conn->buf_len) {
+ conn->buf_frame_end = 0;
+ conn->buf_len = 0;
+ conn->buf_pos = 0;
+ return 0;
+ }
+
+ move_bytes = conn->buf_pos + 1;
+ memmove(conn->buf,
+ conn->buf + move_bytes,
+ conn->buf_len - move_bytes);
+
+ conn->buf_frame_end = 0;
+ conn->buf_len -= move_bytes;
+ conn->buf_pos = 0;
+
+ return 0;
+}
+
+/*
+ * It writes the packet control header which includes the packet type
+ * and the remaining length of the packet. The incoming buffer must have
+ * at least 6 bytes of space.
+ *
+ * The function returns the number of bytes used.
+ */
+static inline int mqtt_packet_header(int type, int length, char *buf)
+{
+ int i = 0;
+ uint8_t byte;
+
+ buf[i] = (type << 4) | 0;
+ i++;
+
+ do {
+ byte = length % 128;
+ length = (length / 128);
+ if (length > 0) {
+ byte = (byte | 128);
+ }
+ buf[i] = byte;
+ i++;
+ } while (length > 0);
+
+ return i;
+}
+
+/* Collect a buffer of JSON data and convert it to Fluent Bit format */
+static int mqtt_data_append(char *topic, size_t topic_len,
+ char *msg, int msg_len,
+ void *in_context)
+{
+ int i;
+ int ret;
+ int root_type;
+ size_t out;
+ size_t off = 0;
+ char *pack;
+ msgpack_object root;
+ msgpack_unpacked result;
+ struct flb_in_mqtt_config *ctx = in_context;
+
+ /* Convert our incoming JSON to MsgPack */
+ ret = flb_pack_json(msg, msg_len, &pack, &out, &root_type, NULL);
+ if (ret != 0) {
+ flb_plg_warn(ctx->ins, "MQTT Packet incomplete or is not JSON");
+ return -1;
+ }
+
+ off = 0;
+ msgpack_unpacked_init(&result);
+ if (msgpack_unpack_next(&result, pack, out, &off) != MSGPACK_UNPACK_SUCCESS) {
+ msgpack_unpacked_destroy(&result);
+ return -1;
+ }
+
+ if (result.data.type != MSGPACK_OBJECT_MAP){
+ msgpack_unpacked_destroy(&result);
+ return -1;
+ }
+ root = result.data;
+
+
+ ret = flb_log_event_encoder_begin_record(ctx->log_encoder);
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_current_timestamp(ctx->log_encoder);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_append_body_values(
+ ctx->log_encoder,
+ FLB_LOG_EVENT_CSTRING_VALUE("topic"),
+ FLB_LOG_EVENT_STRING_VALUE(topic, topic_len));
+ }
+
+ if (ctx->payload_key) {
+ flb_log_event_encoder_append_body_string_length(ctx->log_encoder, flb_sds_len(ctx->payload_key));
+ flb_log_event_encoder_append_body_string_body(ctx->log_encoder, ctx->payload_key,
+ flb_sds_len(ctx->payload_key));
+ flb_log_event_encoder_body_begin_map(ctx->log_encoder);
+ }
+
+ /* Re-pack original KVs */
+ for (i = 0;
+ i < root.via.map.size &&
+ ret == FLB_EVENT_ENCODER_SUCCESS;
+ i++) {
+ ret = flb_log_event_encoder_append_body_values(
+ ctx->log_encoder,
+ FLB_LOG_EVENT_MSGPACK_OBJECT_VALUE(&root.via.map.ptr[i].key),
+ FLB_LOG_EVENT_MSGPACK_OBJECT_VALUE(&root.via.map.ptr[i].val));
+ }
+
+ if (ctx->payload_key) {
+ flb_log_event_encoder_body_commit_map(ctx->log_encoder);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_commit_record(ctx->log_encoder);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ flb_input_log_append(ctx->ins, NULL, 0,
+ ctx->log_encoder->output_buffer,
+ ctx->log_encoder->output_length);
+ ret = 0;
+ }
+ else {
+ flb_plg_error(ctx->ins, "log event encoding error : %d", ret);
+
+ ret = -1;
+ }
+
+ flb_log_event_encoder_reset(ctx->log_encoder);
+
+ msgpack_unpacked_destroy(&result);
+ flb_free(pack);
+
+ return ret;
+}
+
+
+/*
+ * Handle a CONNECT request control packet:
+ *
+ * basically we need to acknoledge the sender so it can start
+ * publishing messages to our service.
+ */
+static int mqtt_handle_connect(struct mqtt_conn *conn)
+{
+ int i;
+ int ret;
+ size_t sent;
+ char buf[4] = {0, 0, 0, 0};
+ struct flb_in_mqtt_config *ctx = conn->ctx;
+
+ i = mqtt_packet_header(MQTT_CONNACK, 2 , (char *) &buf);
+ BIT_SET(buf[i], 0);
+ i++;
+ buf[i] = MQTT_CONN_ACCEPTED;
+
+ /* write CONNACK message */
+ ret = flb_io_net_write(conn->connection,
+ (void *) buf,
+ 4,
+ &sent);
+
+ flb_plg_trace(ctx->ins, "[fd=%i] CMD CONNECT (connack=%i bytes)",
+ conn->connection->fd, ret);
+
+ return ret;
+}
+
+/*
+ * Handle a PUBLISH control packet
+ */
+static int mqtt_handle_publish(struct mqtt_conn *conn)
+{
+ int topic;
+ int topic_len;
+ uint8_t qos;
+ size_t sent;
+ uint16_t hlen;
+ uint16_t packet_id;
+ char buf[4];
+ struct flb_in_mqtt_config *ctx = conn->ctx;
+
+ /*
+ * DUP: we skip duplicated messages.
+ * QOS: We process this.
+ * Retain: skipped
+ */
+
+ qos = ((conn->buf[0] >> 1) & 0x03);
+ conn->buf_pos++;
+
+ /* Topic */
+ hlen = BUFC() << 8;
+ conn->buf_pos++;
+ hlen |= BUFC();
+
+ /* Validate topic length against current buffer capacity (overflow) */
+ if (hlen > (conn->buf_len - conn->buf_pos)) {
+ flb_plg_debug(ctx->ins, "invalid topic length");
+ return -1;
+ }
+
+ conn->buf_pos++;
+ topic = conn->buf_pos;
+ topic_len = hlen;
+ conn->buf_pos += hlen;
+
+ /* Check QOS flag and respond if required */
+ if (qos > MQTT_QOS_LEV0) {
+ /* Packet Identifier */
+ packet_id = BUFC() << 8;
+ conn->buf_pos++;
+ packet_id |= BUFC();
+ conn->buf_pos++;
+
+ if (qos == MQTT_QOS_LEV1) {
+ mqtt_packet_header(MQTT_PUBACK, 2 , (char *) &buf);
+ }
+ else if (qos == MQTT_QOS_LEV2) {
+ mqtt_packet_header(MQTT_PUBREC, 2 , (char *) &buf);
+ }
+ /* Set the identifier that we are replying to */
+ buf[2] = (packet_id >> 8) & 0xff;
+ buf[3] = (packet_id & 0xff);
+
+ /* This operation should be checked */
+ flb_io_net_write(conn->connection,
+ (void *) buf,
+ 4,
+ &sent);
+ }
+
+ /* Message */
+ mqtt_data_append((char *) (conn->buf + topic), topic_len,
+ (char *) (conn->buf + conn->buf_pos),
+ conn->buf_frame_end - conn->buf_pos + 1,
+ conn->ctx);
+
+ flb_plg_trace(ctx->ins, "[fd=%i] CMD PUBLISH",
+ conn->connection->fd);
+ return 0;
+}
+
+/* Handle a PINGREQ control packet */
+static int mqtt_handle_ping(struct mqtt_conn *conn)
+{
+ int ret;
+ size_t sent;
+ char buf[2] = {0, 0};
+ struct flb_in_mqtt_config *ctx = conn->ctx;
+
+ mqtt_packet_header(MQTT_PINGRESP, 0 , (char *) &buf);
+
+ /* write PINGRESP message */
+
+ ret = flb_io_net_write(conn->connection,
+ (void *) buf,
+ 2,
+ &sent);
+
+ flb_plg_trace(ctx->ins, "[fd=%i] CMD PING (pong=%i bytes)",
+ conn->connection->fd, ret);
+ return ret;
+}
+
+int mqtt_prot_parser(struct mqtt_conn *conn)
+{
+ int ret;
+ int length = 0;
+ int pos = conn->buf_pos;
+ int mult;
+ struct flb_in_mqtt_config *ctx = conn->ctx;
+
+ for (; conn->buf_pos < conn->buf_len; conn->buf_pos++) {
+ if (conn->status & (MQTT_NEW | MQTT_NEXT)) {
+ /*
+ * Do we have at least the Control Packet fixed header
+ * and the remaining length byte field ?
+ */
+ if (BUF_AVAIL() < 2) {
+ conn->buf_pos = pos;
+ flb_plg_trace(ctx->ins, "[fd=%i] Need more data",
+ conn->connection->fd);
+ return MQTT_MORE;
+ }
+
+ /* As the connection is new we expect a MQTT_CONNECT request */
+ conn->packet_type = BUFC() >> 4;
+ if (conn->status == MQTT_NEW && conn->packet_type != MQTT_CONNECT) {
+ flb_plg_trace(ctx->ins, "[fd=%i] error, expecting MQTT_CONNECT",
+ conn->connection->fd);
+ return MQTT_ERROR;
+ }
+ conn->packet_length = conn->buf_pos;
+ conn->buf_pos++;
+
+ /* Get the remaining length */
+ mult = 1;
+ length = 0;
+
+ do {
+ if (conn->buf_pos + 1 > conn->buf_len) {
+ conn->buf_pos = pos;
+ flb_plg_trace(ctx->ins, "[fd=%i] Need more data",
+ conn->connection->fd);
+ return MQTT_MORE;
+ }
+
+ length += (BUFC() & 127) * mult;
+ mult *= 128;
+ if (mult > 128*128*128) {
+ return MQTT_ERROR;
+ }
+
+ if (length + 2 > (conn->buf_len - pos)) {
+ conn->buf_pos = pos;
+ flb_plg_trace(ctx->ins, "[fd=%i] Need more data",
+ conn->connection->fd);
+ return MQTT_MORE;
+ }
+
+ if ((BUFC() & 128) == 0) {
+ if (conn->buf_len - 2 < length) {
+ conn->buf_pos = pos;
+ flb_plg_trace(ctx->ins, "[fd=%i] Need more data",
+ conn->connection->fd);
+ return MQTT_MORE;
+ }
+ else {
+ conn->buf_frame_end = conn->buf_pos + length;
+ break;
+ }
+ }
+
+ if (conn->buf_pos + 1 < conn->buf_len) {
+ conn->buf_pos++;
+ }
+ else {
+ conn->buf_pos = pos;
+ flb_plg_trace(ctx->ins, "[fd=%i] Need more data",
+ conn->connection->fd);
+ return MQTT_MORE;
+ }
+ } while (1);
+
+ conn->packet_length = length;
+
+ /* At this point we have a full control packet in place */
+ if (conn->packet_type == MQTT_CONNECT) {
+ mqtt_handle_connect(conn);
+ }
+ else if (conn->packet_type == MQTT_PUBLISH) {
+ ret = mqtt_handle_publish(conn);
+ if (ret == -1) {
+ return MQTT_ERROR;
+ }
+ }
+ else if (conn->packet_type == MQTT_PINGREQ) {
+ mqtt_handle_ping(conn);
+ }
+ else if (conn->packet_type == MQTT_DISCONNECT) {
+ flb_plg_trace(ctx->ins, "[fd=%i] CMD DISCONNECT",
+ conn->connection->fd);
+ return MQTT_HANGUP;
+ }
+ else {
+ }
+
+ /* Prepare for next round */
+ conn->status = MQTT_NEXT;
+ conn->buf_pos = conn->buf_frame_end;
+
+ mqtt_packet_drop(conn);
+
+ if (conn->buf_len > 0) {
+ conn->buf_pos = -1;
+ }
+ }
+ }
+ conn->buf_pos--;
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/in_mqtt/mqtt_prot.h b/src/fluent-bit/plugins/in_mqtt/mqtt_prot.h
new file mode 100644
index 000000000..74c4fe32e
--- /dev/null
+++ b/src/fluent-bit/plugins/in_mqtt/mqtt_prot.h
@@ -0,0 +1,62 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_MQTT_PROT_H
+#define FLB_MQTT_PROT_H
+
+#include "mqtt_conn.h"
+
+/*
+ * Specs definition from 2.2.1 MQTT Control Packet:
+ *
+ * http://docs.oasis-open.org/mqtt/mqtt/v3.1.1/os/mqtt-v3.1.1-os.html#_Toc398718021
+ */
+#define MQTT_CONNECT 1
+#define MQTT_CONNACK 2
+#define MQTT_PUBLISH 3
+#define MQTT_PUBACK 4
+#define MQTT_PUBREC 5
+#define MQTT_PUBREL 6
+#define MQTT_PUBCOMP 7
+#define MQTT_PINGREQ 12
+#define MQTT_PINGRESP 13
+#define MQTT_DISCONNECT 14
+
+/* CONNACK status codes */
+#define MQTT_CONN_ACCEPTED 0
+#define MQTT_CONN_REFUSED_PROTOCOL 1
+#define MQTT_CONN_REFUSED_IDENTIF 2
+#define MQTT_CONN_REFUSED_SERVER 3
+#define MQTT_CONN_REFUSED_BADCRED 4
+#define MQTT_CONN_REFUSED_NOAUTH 5
+
+/* QOS Flag status */
+#define MQTT_QOS_LEV0 0 /* no reply */
+#define MQTT_QOS_LEV1 1 /* PUBACK packet */
+#define MQTT_QOS_LEV2 2 /* PUBREC packet */
+
+/* Specific macros for Fluent Bit handling, not related to MQTT spec */
+#define MQTT_HANGUP -2 /* MQTT client is closing */
+#define MQTT_ERROR -1 /* MQTT protocol error, hangup */
+#define MQTT_OK 0 /* Everything is OK */
+#define MQTT_MORE 1 /* need to read more data */
+
+int mqtt_prot_parser(struct mqtt_conn *conn);
+
+#endif
diff --git a/src/fluent-bit/plugins/in_netif/CMakeLists.txt b/src/fluent-bit/plugins/in_netif/CMakeLists.txt
new file mode 100644
index 000000000..952e7b3c8
--- /dev/null
+++ b/src/fluent-bit/plugins/in_netif/CMakeLists.txt
@@ -0,0 +1,4 @@
+set(src
+ in_netif.c)
+
+FLB_PLUGIN(in_netif "${src}" "")
diff --git a/src/fluent-bit/plugins/in_netif/in_netif.c b/src/fluent-bit/plugins/in_netif/in_netif.c
new file mode 100644
index 000000000..f82c685aa
--- /dev/null
+++ b/src/fluent-bit/plugins/in_netif/in_netif.c
@@ -0,0 +1,392 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_pack.h>
+
+#include <stdio.h>
+#include "in_netif.h"
+
+struct entry_define entry_name_linux[] = {
+ {"rx.bytes", FLB_TRUE},
+ {"rx.packets", FLB_TRUE},
+ {"rx.errors", FLB_TRUE},
+ {"rx.drop", FLB_FALSE},
+ {"rx.fifo", FLB_FALSE},
+ {"rx.frame", FLB_FALSE},
+ {"rx.compressed", FLB_FALSE},
+ {"rx.multicast", FLB_FALSE},
+ {"tx.bytes", FLB_TRUE},
+ {"tx.packets", FLB_TRUE},
+ {"tx.errors", FLB_TRUE},
+ {"tx.drop", FLB_FALSE},
+ {"tx.fifo", FLB_FALSE},
+ {"tx.collisions", FLB_FALSE},
+ {"tx.carrier", FLB_FALSE},
+ {"tx.compressepd", FLB_FALSE}
+};
+
+static int config_destroy(struct flb_in_netif_config *ctx)
+{
+ if (ctx->log_encoder != NULL) {
+ flb_log_event_encoder_destroy(ctx->log_encoder);
+ }
+
+ flb_free(ctx->entry);
+ flb_free(ctx);
+ return 0;
+}
+
+
+static int in_netif_exit(void *data, struct flb_config *config)
+{
+ (void) *config;
+ struct flb_in_netif_config *ctx = data;
+
+ /* Destroy context */
+ config_destroy(ctx);
+
+ return 0;
+}
+
+static int init_entry_linux(struct flb_in_netif_config *ctx)
+{
+ int i;
+
+ ctx->entry_len = sizeof(entry_name_linux) / sizeof(struct entry_define);
+ ctx->entry = flb_malloc(sizeof(struct netif_entry) * ctx->entry_len);
+ if (!ctx->entry) {
+ flb_errno();
+ return -1;
+ }
+
+ for(i = 0; i < ctx->entry_len; i++) {
+ ctx->entry[i].name = entry_name_linux[i].name;
+ ctx->entry[i].name_len = strlen(entry_name_linux[i].name);
+ ctx->entry[i].prev = 0;
+ ctx->entry[i].now = 0;
+ if (ctx->verbose){
+ ctx->entry[i].checked = FLB_TRUE;
+ }
+ else {
+ ctx->entry[i].checked = entry_name_linux[i].checked;
+ }
+ if (ctx->entry[i].checked) {
+ ctx->map_num++;
+ }
+ }
+ return 0;
+}
+
+static int configure(struct flb_in_netif_config *ctx,
+ struct flb_input_instance *in)
+{
+ int ret;
+ ctx->map_num = 0;
+
+ /* Load the config map */
+ ret = flb_input_config_map_set(in, (void *)ctx);
+ if (ret == -1) {
+ flb_plg_error(in, "unable to load configuration");
+ return -1;
+ }
+
+ if (ctx->interval_sec <= 0 && ctx->interval_nsec <= 0) {
+ /* Illegal settings. Override them. */
+ ctx->interval_sec = atoi(DEFAULT_INTERVAL_SEC);
+ ctx->interval_nsec = atoi(DEFAULT_INTERVAL_NSEC);
+ }
+
+ if (ctx->interface == NULL) {
+ flb_plg_error(ctx->ins, "'interface' is not set");
+ return -1;
+ }
+ ctx->interface_len = strlen(ctx->interface);
+
+ ctx->first_snapshot = FLB_TRUE; /* assign first_snapshot with FLB_TRUE */
+
+ return init_entry_linux(ctx);
+}
+
+static inline int is_specific_interface(struct flb_in_netif_config *ctx,
+ char* interface)
+{
+ if (ctx->interface != NULL &&
+ !strncmp(ctx->interface, interface, ctx->interface_len)) {
+ return FLB_TRUE;
+ }
+ return FLB_FALSE;
+}
+
+static int parse_proc_line(char *line,
+ struct flb_in_netif_config *ctx)
+{
+ struct mk_list *head = NULL;
+ struct mk_list *split = NULL;
+ struct flb_split_entry *sentry = NULL;
+
+ int i = 0;
+ int entry_num;
+
+ split = flb_utils_split(line, ' ', 256);
+ entry_num = mk_list_size(split);
+ if (entry_num != ctx->entry_len + 1) {
+ flb_utils_split_free(split);
+ return -1;
+ }
+
+ mk_list_foreach(head, split) {
+ sentry = mk_list_entry(head, struct flb_split_entry ,_head);
+ if (i==0) {
+ /* interface name */
+ if( is_specific_interface(ctx, sentry->value)){
+ i++;
+ continue;
+ }
+ else {
+ /* skip this line */
+ flb_utils_split_free(split);
+ return -1;
+ }
+ }
+ ctx->entry[i-1].now = strtoul(sentry->value ,NULL ,10);
+ i++;
+ }
+
+ flb_utils_split_free(split);
+
+ return 0;
+}
+
+static inline uint64_t calc_diff(struct netif_entry *entry)
+{
+ if (entry->prev <= entry->now) {
+ return entry->now - entry->prev;
+ }
+ else {
+ return entry->now + (UINT64_MAX - entry->prev);
+ }
+}
+
+#define LINE_LEN 256
+static int read_proc_file_linux(struct flb_in_netif_config *ctx)
+{
+ FILE *fp = NULL;
+ char line[LINE_LEN] = {0};
+ int interface_found = FLB_FALSE;
+
+ fp = fopen("/proc/net/dev", "r");
+ if (fp == NULL) {
+ flb_errno();
+ flb_plg_error(ctx->ins, "cannot open /proc/net/dev");
+ return -1;
+ }
+ while(fgets(line, LINE_LEN-1, fp) != NULL){
+ if(parse_proc_line(line, ctx) == 0) {
+ interface_found = FLB_TRUE;
+ }
+ }
+ fclose(fp);
+ if (interface_found != FLB_TRUE) {
+ return -1;
+ }
+ return 0;
+}
+
+static int in_netif_collect_linux(struct flb_input_instance *i_ins,
+ struct flb_config *config, void *in_context)
+{
+ struct flb_in_netif_config *ctx = in_context;
+ char key_name[LINE_LEN] = {0};
+ int key_len;
+ int i;
+ int entry_len = ctx->entry_len;
+ int ret;
+
+ ret = 0;
+
+ read_proc_file_linux(ctx);
+
+ if (ctx->first_snapshot == FLB_TRUE) {
+ /* if in_netif are called for the first time, assign prev with now */
+ for (i = 0; i < entry_len; i++) {
+ ctx->entry[i].prev = ctx->entry[i].now;
+ }
+
+ /* assign first_snapshot with FLB_FALSE */
+ ctx->first_snapshot = FLB_FALSE;
+ }
+ else {
+ ret = flb_log_event_encoder_begin_record(ctx->log_encoder);
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_current_timestamp(ctx->log_encoder);
+ }
+
+ for (i = 0 ;
+ i < entry_len &&
+ ret == FLB_EVENT_ENCODER_SUCCESS ;
+ i++) {
+ if (ctx->entry[i].checked) {
+ key_len = ctx->interface_len + ctx->entry[i].name_len + 1/* '.' */;
+
+ snprintf(key_name, key_len + 1 /* add null character */,
+ "%s.%s", ctx->interface, ctx->entry[i].name);
+
+ ret = flb_log_event_encoder_append_body_values(
+ ctx->log_encoder,
+ FLB_LOG_EVENT_CSTRING_VALUE(key_name),
+ FLB_LOG_EVENT_UINT64_VALUE(calc_diff(&ctx->entry[i])));
+
+ ctx->entry[i].prev = ctx->entry[i].now;
+ }
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_commit_record(ctx->log_encoder);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ flb_input_log_append(i_ins, NULL, 0,
+ ctx->log_encoder->output_buffer,
+ ctx->log_encoder->output_length);
+ ret = 0;
+ }
+ else {
+ flb_plg_error(i_ins, "log event encoding error : %d", ret);
+
+ ret = -1;
+ }
+
+ flb_log_event_encoder_reset(ctx->log_encoder);
+ }
+
+ return ret;
+}
+
+static int in_netif_collect(struct flb_input_instance *i_ins,
+ struct flb_config *config, void *in_context)
+{
+ return in_netif_collect_linux(i_ins, config, in_context);
+}
+
+static int in_netif_init(struct flb_input_instance *in,
+ struct flb_config *config, void *data)
+{
+ int ret;
+
+ struct flb_in_netif_config *ctx = NULL;
+ (void) data;
+
+ /* Allocate space for the configuration */
+ ctx = flb_calloc(1, sizeof(struct flb_in_netif_config));
+ if (!ctx) {
+ flb_errno();
+ return -1;
+ }
+ ctx->ins = in;
+
+ ctx->log_encoder = flb_log_event_encoder_create(FLB_LOG_EVENT_FORMAT_DEFAULT);
+
+ if (ctx->log_encoder == NULL) {
+ flb_plg_error(in, "could not initialize event encoder");
+ config_destroy(ctx);
+
+ return -1;
+ }
+
+ if (configure(ctx, in) < 0) {
+ config_destroy(ctx);
+ return -1;
+ }
+
+ /* Testing interface */
+ if (ctx->test_at_init == FLB_TRUE) {
+ /* Try to read procfs */
+ ret = read_proc_file_linux(ctx);
+ if (ret < 0) {
+ flb_plg_error(in, "%s: init test failed", ctx->interface);
+ config_destroy(ctx);
+ return -1;
+ }
+ flb_plg_info(in, "%s: init test passed", ctx->interface);
+ }
+
+ /* Set the context */
+ flb_input_set_context(in, ctx);
+
+ /* Set our collector based on time */
+ ret = flb_input_set_collector_time(in,
+ in_netif_collect,
+ ctx->interval_sec,
+ ctx->interval_nsec,
+ config);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "Could not set collector for Proc input plugin");
+ config_destroy(ctx);
+ return -1;
+ }
+
+ return 0;
+}
+
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_STR, "interface", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_in_netif_config, interface),
+ "Set the interface, eg: eth0 or enp1s0"
+ },
+ {
+ FLB_CONFIG_MAP_INT, "interval_sec", DEFAULT_INTERVAL_SEC,
+ 0, FLB_TRUE, offsetof(struct flb_in_netif_config, interval_sec),
+ "Set the collector interval"
+ },
+ {
+ FLB_CONFIG_MAP_INT, "interval_nsec", DEFAULT_INTERVAL_NSEC,
+ 0, FLB_TRUE, offsetof(struct flb_in_netif_config, interval_nsec),
+ "Set the collector interval (nanoseconds)"
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "verbose", "false",
+ 0, FLB_TRUE, offsetof(struct flb_in_netif_config, verbose),
+ "Enable verbosity"
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "test_at_init", "false",
+ 0, FLB_TRUE, offsetof(struct flb_in_netif_config, test_at_init),
+ "Testing interface at initialization"
+ },
+ /* EOF */
+ {0}
+};
+
+/* Plugin reference */
+struct flb_input_plugin in_netif_plugin = {
+ .name = "netif",
+ .description = "Network Interface Usage",
+ .cb_init = in_netif_init,
+ .cb_pre_run = NULL,
+ .cb_collect = in_netif_collect,
+ .cb_flush_buf = NULL,
+ .cb_exit = in_netif_exit,
+ .config_map = config_map,
+ .flags = 0,
+};
diff --git a/src/fluent-bit/plugins/in_netif/in_netif.h b/src/fluent-bit/plugins/in_netif/in_netif.h
new file mode 100644
index 000000000..e571ec1fb
--- /dev/null
+++ b/src/fluent-bit/plugins/in_netif/in_netif.h
@@ -0,0 +1,70 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_NETIF_H
+#define FLB_IN_NETIF_H
+
+#include <stdint.h>
+#include <unistd.h>
+
+#include <fluent-bit/flb_input.h>
+#include <fluent-bit/flb_log_event_encoder.h>
+#include <msgpack.h>
+
+#define DEFAULT_INTERVAL_SEC "1"
+#define DEFAULT_INTERVAL_NSEC "0"
+
+#define FLB_IN_NETIF_NAME "in_netif"
+
+struct entry_define
+{
+ char *name;
+ int checked;
+};
+
+struct netif_entry {
+ int checked;
+
+ char *name;
+ int name_len;
+
+ uint64_t prev;
+ uint64_t now;
+};
+
+struct flb_in_netif_config {
+ int interval_sec;
+ int interval_nsec;
+
+ flb_sds_t interface;
+ int interface_len;
+ int test_at_init;
+
+ int verbose;
+ int first_snapshot; /* a feild to indicate whethor or not this is the first collect */
+
+ struct netif_entry *entry;
+ int entry_len;
+
+ int map_num;
+ struct flb_input_instance *ins;
+ struct flb_log_event_encoder *log_encoder;
+};
+
+#endif /*FLB_IN_NETIF_H*/
diff --git a/src/fluent-bit/plugins/in_nginx_exporter_metrics/CMakeLists.txt b/src/fluent-bit/plugins/in_nginx_exporter_metrics/CMakeLists.txt
new file mode 100644
index 000000000..a74b01b2f
--- /dev/null
+++ b/src/fluent-bit/plugins/in_nginx_exporter_metrics/CMakeLists.txt
@@ -0,0 +1,4 @@
+set(src
+ nginx.c)
+
+FLB_PLUGIN(in_nginx_exporter_metrics "${src}" "")
diff --git a/src/fluent-bit/plugins/in_nginx_exporter_metrics/nginx.c b/src/fluent-bit/plugins/in_nginx_exporter_metrics/nginx.c
new file mode 100644
index 000000000..bee495e23
--- /dev/null
+++ b/src/fluent-bit/plugins/in_nginx_exporter_metrics/nginx.c
@@ -0,0 +1,2363 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_network.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_error.h>
+#include <fluent-bit/flb_compat.h>
+#include <cmetrics/cmt_counter.h>
+#include <cmetrics/cmt_gauge.h>
+#include <msgpack.h>
+
+#include "nginx.h"
+
+/**
+ * parse the output of the nginx stub_status module.
+ *
+ * An example:
+ * Active connections: 1
+ * server accepts handled requests
+ * 10 10 10
+ * Reading: 0 Writing: 1 Waiting: 0
+ *
+ * Would result in:
+ * struct nginx_status = {
+ * active = 1,
+ * reading = 0,
+ * writing = 1,
+ * waiting = 0
+ * accepts = 10,
+ * handled = 10,
+ * requests = 10
+ *}
+ */
+static int nginx_parse_stub_status(flb_sds_t buf, struct nginx_status *status)
+{
+ struct mk_list *llines;
+ struct mk_list *head = NULL;
+ char *lines[4];
+ int line = 0;
+ int rc;
+ struct flb_split_entry *cur = NULL;
+
+
+ llines = flb_utils_split(buf, '\n', 4);
+ if (llines == NULL) {
+ return -1;
+ }
+
+ mk_list_foreach(head, llines) {
+ cur = mk_list_entry(head, struct flb_split_entry, _head);
+ lines[line] = cur->value;
+ line++;
+ }
+ if (line < 4) {
+ goto error;
+ }
+
+ rc = sscanf(lines[0], "Active connections: %" PRIu64 " \n", &status->active);
+ if (rc != 1) {
+ goto error;
+ }
+ rc = sscanf(lines[2], " %" PRIu64 " %" PRIu64 " %" PRIu64 " \n",
+ &status->accepts, &status->handled, &status->requests);
+ if (rc != 3) {
+ goto error;
+ }
+ rc = sscanf(lines[3], "Reading: %" PRIu64 " Writing: %" PRIu64 " Waiting: %" PRIu64 " \n",
+ &status->reading, &status->writing, &status->waiting);
+ if (rc != 3) {
+ goto error;
+ }
+
+ flb_utils_split_free(llines);
+ return 0;
+error:
+ flb_utils_split_free(llines);
+ return -1;
+}
+
+/**
+ * Callback function to gather statistics from the nginx
+ * status module.
+ *
+ * @param ins Pointer to flb_input_instance
+ * @param config Pointer to flb_config
+ * @param in_context void Pointer used to cast to nginx_ctx
+ *
+ * @return int Always returns success
+ */
+static int nginx_collect_stub_status(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ struct nginx_ctx *ctx = (struct nginx_ctx *)in_context;
+ struct flb_connection *u_conn;
+ struct flb_http_client *client;
+ struct nginx_status status;
+ flb_sds_t data;
+
+ size_t b_sent;
+ int ret = -1;
+ int rc = -1;
+ uint64_t ts = cfl_time_now();
+
+
+ u_conn = flb_upstream_conn_get(ctx->upstream);
+ if (!u_conn) {
+ flb_plg_error(ins, "upstream connection initialization error");
+ goto conn_error;
+ }
+
+ client = flb_http_client(u_conn, FLB_HTTP_GET, ctx->status_url,
+ NULL, 0, ctx->ins->host.name, ctx->ins->host.port, NULL, 0);
+ if (!client) {
+ flb_plg_error(ins, "unable to create http client");
+ goto client_error;
+ }
+
+ ret = flb_http_do(client, &b_sent);
+ if (ret != 0) {
+ flb_plg_error(ins, "http do error");
+ goto http_error;
+ }
+
+ if (client->resp.status != 200) {
+ flb_plg_error(ins, "http status code error: %d", client->resp.status);
+ goto http_error;
+ }
+
+ if (client->resp.payload_size <= 0) {
+ flb_plg_error(ins, "empty response");
+ goto http_error;
+ }
+
+ /* copy and NULL terminate the payload */
+ data = flb_sds_create_size(client->resp.payload_size + 1);
+ if (!data) {
+ goto http_error;
+ }
+ memcpy(data, client->resp.payload, client->resp.payload_size);
+ data[client->resp.payload_size] = '\0';
+
+ /* work directly on the data here ... */
+ if (nginx_parse_stub_status(data, &status) == -1) {
+ flb_plg_error(ins, "unable to parse stub status response");
+ goto status_error;
+ }
+
+ rc = 0;
+
+ cmt_counter_set(ctx->connections_accepted, ts, (double)status.accepts, 0, NULL);
+ cmt_gauge_set(ctx->connections_active, ts, (double)status.active, 0, NULL);
+ cmt_counter_set(ctx->connections_handled, ts, (double)status.handled, 0, NULL);
+
+ cmt_gauge_set(ctx->connections_reading, ts, (double)status.reading, 0, NULL);
+ cmt_gauge_set(ctx->connections_writing, ts, (double)status.writing, 0, NULL);
+ cmt_gauge_set(ctx->connections_waiting, ts, (double)status.waiting, 0, NULL);
+
+ cmt_counter_set(ctx->connections_total, ts, (double)status.requests, 0, NULL);
+
+status_error:
+ flb_sds_destroy(data);
+http_error:
+ flb_http_client_destroy(client);
+client_error:
+ flb_upstream_conn_release(u_conn);
+conn_error:
+ if (rc == 0 && ctx->is_up == FLB_FALSE) {
+ cmt_gauge_set(ctx->connection_up, ts, 1.0, 0, NULL);
+ ctx->is_up = FLB_TRUE;
+ }
+ else if (rc != 0 && ctx->is_up == FLB_TRUE) {
+ cmt_gauge_set(ctx->connection_up, ts, 0.0, 0, NULL);
+ ctx->is_up = FLB_FALSE;
+ }
+ ret = flb_input_metrics_append(ins, NULL, 0, ctx->cmt);
+ if (ret != 0) {
+ flb_plg_error(ins, "could not append metrics");
+ }
+
+ return rc;
+}
+
+
+int process_connections(void *ctx, uint64_t ts, char *buf, size_t size)
+{
+ struct nginx_plus_connections *plus = (struct nginx_plus_connections *)ctx;
+ size_t off = 0;
+ msgpack_unpacked result;
+ msgpack_object_kv *cur;
+ msgpack_object_str *key;
+ int i = 0;
+
+
+ msgpack_unpacked_init(&result);
+ while (msgpack_unpack_next(&result, buf, size, &off) == MSGPACK_UNPACK_SUCCESS) {
+ if (result.data.type == MSGPACK_OBJECT_MAP) {
+ for (i = 0; i < result.data.via.map.size; i++) {
+
+ cur = &result.data.via.map.ptr[i];
+ key = &cur->key.via.str;
+
+ if (strncmp(key->ptr, "accepted", key->size) == 0) {
+ cmt_counter_set(plus->connections_accepted, ts,
+ (double)cur->val.via.i64, 0, NULL);
+ }
+ else if (strncmp(key->ptr, "dropped", key->size) == 0) {
+ cmt_counter_set(plus->connections_dropped, ts,
+ (double)cur->val.via.i64, 0, NULL);
+ }
+ else if (strncmp(key->ptr, "active", key->size) == 0) {
+ cmt_counter_set(plus->connections_active, ts,
+ (double)cur->val.via.i64, 0, NULL);
+ }
+ else if (strncmp(key->ptr, "idle", key->size) == 0) {
+ cmt_counter_set(plus->connections_idle, ts,
+ (double)cur->val.via.i64, 0, NULL);
+ }
+ }
+ break;
+ }
+ }
+ msgpack_unpacked_destroy(&result);
+ return 0;
+}
+
+int process_ssl(void *ctx, uint64_t ts, char *buf, size_t size)
+{
+ struct nginx_plus_ssl *plus = (struct nginx_plus_ssl *)ctx;
+ size_t off = 0;
+ msgpack_unpacked result;
+ msgpack_object_kv *cur;
+ msgpack_object_str *key;
+ int i = 0;
+
+
+ msgpack_unpacked_init(&result);
+ while (msgpack_unpack_next(&result, buf, size, &off) == MSGPACK_UNPACK_SUCCESS) {
+ if (result.data.type == MSGPACK_OBJECT_MAP) {
+ for (i = 0; i < result.data.via.map.size; i++) {
+ cur = &result.data.via.map.ptr[i];
+ key = &cur->key.via.str;
+ if (strncmp(key->ptr, "handshakes", key->size) == 0) {
+ cmt_counter_set(plus->handshakes, ts,
+ (double)cur->val.via.i64, 0, NULL);
+ }
+ else if (strncmp(key->ptr, "handshakes_failed", key->size) == 0) {
+ cmt_counter_set(plus->handshakes_failed, ts,
+ (double)cur->val.via.i64, 0, NULL);
+ }
+ else if (strncmp(key->ptr, "session_reuses", key->size) == 0) {
+ cmt_counter_set(plus->session_reuses, ts,
+ (double)cur->val.via.i64, 0, NULL);
+ }
+ }
+ break;
+ }
+ }
+ msgpack_unpacked_destroy(&result);
+ return 0;
+}
+
+int process_http_requests(void *ctx, uint64_t ts, char *buf, size_t size)
+{
+ struct nginx_plus_http_requests *plus = (struct nginx_plus_http_requests *)ctx;
+ size_t off = 0;
+ msgpack_unpacked result;
+ msgpack_object_kv *cur;
+ msgpack_object_str *key;
+ int i = 0;
+
+
+ msgpack_unpacked_init(&result);
+ while (msgpack_unpack_next(&result, buf, size, &off) == MSGPACK_UNPACK_SUCCESS) {
+ if (result.data.type == MSGPACK_OBJECT_MAP) {
+ for (i = 0; i < result.data.via.map.size; i++) {
+ cur = &result.data.via.map.ptr[i];
+ key = &cur->key.via.str;
+ if (strncmp(key->ptr, "total", key->size) == 0) {
+ cmt_counter_set(plus->total, ts,
+ (double)cur->val.via.i64, 0, NULL);
+ }
+ else if (strncmp(key->ptr, "current", key->size) == 0) {
+ cmt_counter_set(plus->current, ts,
+ (double)cur->val.via.i64, 0, NULL);
+ }
+ }
+ break;
+ }
+ }
+ msgpack_unpacked_destroy(&result);
+ return 0;
+}
+
+static ssize_t parse_payload_json(struct nginx_ctx *nginx, void *ctx, uint64_t ts,
+ int (*process)(void *, uint64_t, char *, size_t),
+ char *payload, size_t size)
+{
+ int ret;
+ int out_size;
+ char *pack;
+ struct flb_pack_state pack_state;
+
+ /* Initialize packer */
+ flb_pack_state_init(&pack_state);
+
+ /* Pack JSON as msgpack */
+ ret = flb_pack_json_state(payload, size,
+ &pack, &out_size, &pack_state);
+ flb_pack_state_reset(&pack_state);
+
+ /* Handle exceptions */
+ if (ret == FLB_ERR_JSON_PART) {
+ flb_plg_warn(nginx->ins, "JSON data is incomplete, skipping");
+ return -1;
+ }
+ else if (ret == FLB_ERR_JSON_INVAL) {
+ flb_plg_warn(nginx->ins, "invalid JSON message, skipping");
+ return -1;
+ }
+ else if (ret == -1) {
+ return -1;
+ }
+
+ /* Process the packaged JSON and return the last byte used */
+ process(ctx, ts, pack, out_size);
+ flb_free(pack);
+
+ return 0;
+}
+
+/**
+ * Callback function to gather statistics from the nginx
+ * plus ngx_http module.
+ *
+ * @param ins Pointer to flb_input_instance
+ * @param config Pointer to flb_config
+ * @param in_context void Pointer used to cast to nginx_ctx
+ *
+ * @return int Always returns success
+ */
+static int nginx_collect_plus_connections(struct flb_input_instance *ins,
+ struct flb_config *config, struct nginx_ctx *ctx, uint64_t ts)
+{
+ struct flb_connection *u_conn;
+ struct flb_http_client *client;
+ char url[1024];
+ size_t b_sent;
+ int ret = -1;
+ int rc = -1;
+
+
+ u_conn = flb_upstream_conn_get(ctx->upstream);
+ if (!u_conn) {
+ flb_plg_error(ins, "upstream connection initialization error");
+ goto conn_error;
+ }
+
+ snprintf(url, sizeof(url)-1, "%s/%d/connections", ctx->status_url,
+ ctx->nginx_plus_version);
+ client = flb_http_client(u_conn, FLB_HTTP_GET, url,
+ NULL, 0, ctx->ins->host.name, ctx->ins->host.port, NULL, 0);
+ if (!client) {
+ flb_plg_error(ins, "unable to create http client");
+ goto client_error;
+ }
+
+ ret = flb_http_do(client, &b_sent);
+ if (ret != 0) {
+ flb_plg_error(ins, "http do error");
+ goto http_error;
+ }
+
+ if (client->resp.status != 200) {
+ flb_plg_error(ins, "http status code error: %d", client->resp.status);
+ goto http_error;
+ }
+
+ if (client->resp.payload_size <= 0) {
+ flb_plg_error(ins, "empty response");
+ goto http_error;
+ }
+
+ parse_payload_json(ctx, ctx->plus_connections, ts, process_connections,
+ client->resp.payload, client->resp.payload_size);
+
+ rc = 0;
+http_error:
+ flb_http_client_destroy(client);
+client_error:
+ flb_upstream_conn_release(u_conn);
+conn_error:
+ return rc;
+}
+
+/**
+ * Callback function to gather statistics from the nginx
+ * plus ngx_http module.
+ *
+ * @param ins Pointer to flb_input_instance
+ * @param config Pointer to flb_config
+ * @param in_context void Pointer used to cast to nginx_ctx
+ *
+ * @return int Always returns success
+ */
+static int nginx_collect_plus_ssl(struct flb_input_instance *ins,
+ struct flb_config *config, struct nginx_ctx *ctx, uint64_t ts)
+{
+ struct flb_connection *u_conn;
+ struct flb_http_client *client;
+ char url[1024];
+ size_t b_sent;
+ int ret = -1;
+ int rc = -1;
+
+
+ u_conn = flb_upstream_conn_get(ctx->upstream);
+ if (!u_conn) {
+ flb_plg_error(ins, "upstream connection initialization error");
+ goto conn_error;
+ }
+
+ snprintf(url, sizeof(url)-1, "%s/%d/ssl", ctx->status_url, ctx->nginx_plus_version);
+ client = flb_http_client(u_conn, FLB_HTTP_GET, url,
+ NULL, 0, ctx->ins->host.name, ctx->ins->host.port, NULL, 0);
+ if (!client) {
+ flb_plg_error(ins, "unable to create http client");
+ goto client_error;
+ }
+
+ ret = flb_http_do(client, &b_sent);
+ if (ret != 0) {
+ flb_plg_error(ins, "http do error");
+ goto http_error;
+ }
+
+ if (client->resp.status != 200) {
+ flb_plg_error(ins, "http status code error: %d", client->resp.status);
+ goto http_error;
+ }
+
+ if (client->resp.payload_size <= 0) {
+ flb_plg_error(ins, "empty response");
+ goto http_error;
+ }
+
+ parse_payload_json(ctx, ctx->plus_ssl, ts, process_ssl,
+ client->resp.payload, client->resp.payload_size);
+
+ rc = 0;
+http_error:
+ flb_http_client_destroy(client);
+client_error:
+ flb_upstream_conn_release(u_conn);
+conn_error:
+ return rc;
+}
+
+/**
+ * Callback function to gather statistics from the nginx
+ * plus ngx_http module.
+ *
+ * @param ins Pointer to flb_input_instance
+ * @param config Pointer to flb_config
+ * @param in_context void Pointer used to cast to nginx_ctx
+ *
+ * @return int Always returns success
+ */
+static int nginx_collect_plus_http_requests(struct flb_input_instance *ins,
+ struct flb_config *config, struct nginx_ctx *ctx, uint64_t ts)
+{
+ struct flb_connection *u_conn;
+ struct flb_http_client *client;
+ char url[1024];
+ size_t b_sent;
+ int ret = -1;
+ int rc = -1;
+
+
+ u_conn = flb_upstream_conn_get(ctx->upstream);
+ if (!u_conn) {
+ flb_plg_error(ins, "upstream connection initialization error");
+ goto conn_error;
+ }
+
+ snprintf(url, sizeof(url)-1, "%s/%d/http/requests", ctx->status_url,
+ ctx->nginx_plus_version);
+ client = flb_http_client(u_conn, FLB_HTTP_GET, url,
+ NULL, 0, ctx->ins->host.name, ctx->ins->host.port, NULL, 0);
+ if (!client) {
+ flb_plg_error(ins, "unable to create http client");
+ goto client_error;
+ }
+
+ ret = flb_http_do(client, &b_sent);
+ if (ret != 0) {
+ flb_plg_error(ins, "http do error");
+ goto http_error;
+ }
+
+ if (client->resp.status != 200) {
+ flb_plg_error(ins, "http status code error: %d", client->resp.status);
+ goto http_error;
+ }
+
+ if (client->resp.payload_size <= 0) {
+ flb_plg_error(ins, "empty response");
+ goto http_error;
+ }
+
+ parse_payload_json(ctx, ctx->plus_http_requests, ts, process_http_requests,
+ client->resp.payload, client->resp.payload_size);
+
+ rc = 0;
+http_error:
+ flb_http_client_destroy(client);
+client_error:
+ flb_upstream_conn_release(u_conn);
+conn_error:
+ return rc;
+}
+
+void *process_server_zone(struct nginx_ctx *ctx, char *zone, uint64_t ts,
+ msgpack_object_map *map)
+{
+ msgpack_object_kv *responses;
+ msgpack_object_kv *cur;
+ msgpack_object_str *key;
+ int i = 0;
+ int x = 0;
+ char code[4] = { '0', 'x', 'x', 0};
+
+
+ for (i = 0; i < map->size; i++) {
+ cur = &map->ptr[i];
+ key = &cur->key.via.str;
+ if (strncmp(key->ptr, "processing", key->size) == 0) {
+ cmt_counter_set(ctx->server_zones->processing, ts,
+ (double)map->ptr[i].val.via.i64, 1, (char *[]){zone});
+ }
+ else if (strncmp(key->ptr, "requests", key->size) == 0) {
+ cmt_counter_set(ctx->server_zones->requests, ts,
+ (double)map->ptr[i].val.via.i64, 1, (char *[]){zone});
+ }
+ else if (strncmp(key->ptr, "discarded", key->size) == 0) {
+ cmt_counter_set(ctx->server_zones->discarded, ts,
+ (double)map->ptr[i].val.via.i64, 1, (char *[]){zone});
+ }
+ else if (strncmp(key->ptr, "received", key->size) == 0) {
+ cmt_counter_set(ctx->server_zones->received, ts,
+ (double)map->ptr[i].val.via.i64, 1, (char *[]){zone});
+ }
+ else if (strncmp(key->ptr, "sent", key->size) == 0) {
+ cmt_counter_set(ctx->server_zones->sent, ts,
+ (double)map->ptr[i].val.via.i64, 1, (char *[]){zone});
+ }
+ else if (strncmp(key->ptr, "responses", key->size) == 0) {
+ for (x = 0; x < map->ptr[i].val.via.map.size; x++) {
+ responses = &map->ptr[i].val.via.map.ptr[x];
+ if (responses->key.via.str.size == 3 &&
+ responses->key.via.str.ptr[1] == 'x' &&
+ responses->key.via.str.ptr[2] == 'x') {
+ code[0] = responses->key.via.str.ptr[0];
+ cmt_counter_set(ctx->server_zones->responses, ts,
+ (double)responses->val.via.i64,
+ 2, (char *[]){zone, code});
+ }
+ }
+ }
+ }
+ return ctx;
+}
+
+void *process_location_zone(struct nginx_ctx *ctx, char *zone, uint64_t ts,
+ msgpack_object_map *map)
+{
+ msgpack_object_kv *responses;
+ msgpack_object_str *str;
+ int i = 0;
+ int x = 0;
+ char code[4] = { '0', 'x', 'x', 0};
+
+ for (i = 0; i < map->size; i++) {
+
+ str = &map->ptr[i].key.via.str;
+
+ if (strncmp(str->ptr, "requests", str->size) == 0) {
+ cmt_counter_set(ctx->location_zones->requests, ts,
+ (double)map->ptr[i].val.via.i64, 1, (char *[]){zone});
+ }
+ else if (strncmp(str->ptr, "discarded", str->size) == 0) {
+ cmt_counter_set(ctx->location_zones->discarded, ts,
+ (double)map->ptr[i].val.via.i64, 1, (char *[]){zone});
+ }
+ else if (strncmp(str->ptr, "received", str->size) == 0) {
+ cmt_counter_set(ctx->location_zones->received, ts,
+ (double)map->ptr[i].val.via.i64, 1, (char *[]){zone});
+ }
+ else if (strncmp(str->ptr, "sent", str->size) == 0) {
+ cmt_counter_set(ctx->location_zones->sent, ts,
+ (double)map->ptr[i].val.via.i64, 1, (char *[]){zone});
+ }
+ else if (strncmp(str->ptr, "responses", str->size) == 0) {
+ for (x = 0; x < map->ptr[i].val.via.map.size; x++) {
+ responses = &map->ptr[i].val.via.map.ptr[x];
+ if (responses->key.via.str.size == 3 &&
+ responses->key.via.str.ptr[1] == 'x' &&
+ responses->key.via.str.ptr[2] == 'x') {
+ code[0] = responses->key.via.str.ptr[0];
+ cmt_counter_set(ctx->location_zones->responses, ts,
+ (double)responses->val.via.i64,
+ 2, (char *[]){zone, code});
+ }
+ }
+ }
+ }
+ //msgpack_unpacked_destroy(&result);
+ return ctx;
+}
+
+void *process_stream_server_zone(struct nginx_ctx *ctx, char *zone, uint64_t ts,
+ msgpack_object_map *map)
+{
+ msgpack_object_kv *sessions;
+ msgpack_object_str *str;
+ int i = 0;
+ int x = 0;
+ char code[4] = { '0', 'x', 'x', 0};
+
+
+ for (i = 0; i < map->size; i++) {
+
+ str = &map->ptr[i].key.via.str;
+
+ if (strncmp(str->ptr, "connections", str->size) == 0) {
+ cmt_counter_set(ctx->streams->connections, ts,
+ (double)map->ptr[i].val.via.i64, 1, (char *[]){zone});
+ }
+ if (strncmp(str->ptr, "processing", str->size) == 0) {
+ cmt_counter_set(ctx->streams->processing, ts,
+ (double)map->ptr[i].val.via.i64, 1, (char *[]){zone});
+ }
+ else if (strncmp(str->ptr, "discarded", str->size) == 0) {
+ cmt_counter_set(ctx->streams->discarded, ts,
+ (double)map->ptr[i].val.via.i64, 1, (char *[]){zone});
+ }
+ else if (strncmp(str->ptr, "received", str->size) == 0) {
+ cmt_counter_set(ctx->streams->received, ts,
+ (double)map->ptr[i].val.via.i64, 1, (char *[]){zone});
+ }
+ else if (strncmp(str->ptr, "sent", str->size) == 0) {
+ cmt_counter_set(ctx->streams->sent, ts,
+ (double)map->ptr[i].val.via.i64, 1, (char *[]){zone});
+ }
+ else if (strncmp(str->ptr, "sessions", str->size) == 0) {
+ for (x = 0; x < map->ptr[i].val.via.map.size; x++) {
+ sessions = &map->ptr[i].val.via.map.ptr[x];
+ if (sessions->key.via.str.size == 3 &&
+ sessions->key.via.str.ptr[1] == 'x' &&
+ sessions->key.via.str.ptr[2] == 'x') {
+ code[0] = sessions->key.via.str.ptr[0];
+ cmt_counter_set(ctx->streams->sessions, ts,
+ (double)sessions->val.via.i64,
+ 2, (char *[]){zone, code});
+ }
+ }
+ }
+ }
+ //msgpack_unpacked_destroy(&result);
+ return ctx;
+}
+
+static int process_upstream_peers(struct nginx_ctx *ctx, char *backend, uint64_t ts,
+ msgpack_object_array *peers)
+{
+ int i = 0;
+ int p = 0;
+ int x = 0;
+ msgpack_object_map *map;
+ msgpack_object_kv *responses;
+ msgpack_object_str *key;
+ msgpack_object *kv;
+ char *server;
+ char code[4] = {'0', 'x', 'x', 0};
+
+
+ for (i = 0; i < peers->size; i++) {
+ map = &peers->ptr[i].via.map;
+ for (p = 0, server = NULL; p < map->size; p++) {
+ key = &map->ptr[p].key.via.str;
+ kv = &map->ptr[p].val;
+ if (strncmp(key->ptr, "server", key->size) == 0) {
+ server = flb_calloc(1, kv->via.str.size+1);
+ memcpy(server, kv->via.str.ptr, kv->via.str.size);
+ break;
+ }
+ }
+ if (server == NULL) {
+ flb_plg_warn(ctx->ins, "no server for upstream");
+ continue;
+ }
+ for (p = 0; p < map->size; p++) {
+ key = &map->ptr[p].key.via.str;
+ // initialize to zero for now to respond
+ // how the official exporter does...
+ cmt_gauge_set(ctx->upstreams->limit, ts, (double)0.0, 2,
+ (char *[]){backend, server});
+ cmt_gauge_set(ctx->upstreams->header_time, ts, (double)0.0, 2,
+ (char *[]){backend, server});
+ cmt_gauge_set(ctx->upstreams->response_time, ts, (double)0.0, 2,
+ (char *[]){backend, server});
+
+ if (strncmp(key->ptr, "active", key->size) == 0) {
+ cmt_gauge_set(ctx->upstreams->active, ts,
+ (double)map->ptr[p].val.via.i64, 2,
+ (char *[]){backend, server});
+ }
+ else if (strncmp(key->ptr, "fails", key->size) == 0) {
+ cmt_counter_set(ctx->upstreams->fails, ts,
+ (double)map->ptr[p].val.via.i64, 2,
+ (char *[]){backend, server});
+ }
+ else if (strncmp(key->ptr, "header_time", key->size) == 0) {
+ cmt_gauge_set(ctx->upstreams->header_time, ts,
+ (double)map->ptr[p].val.via.i64, 2,
+ (char *[]){backend, server});
+ }
+ else if (strncmp(key->ptr, "limit", key->size) == 0) {
+ cmt_gauge_set(ctx->upstreams->limit, ts,
+ (double)map->ptr[p].val.via.i64, 2,
+ (char *[]){backend, server});
+ }
+ else if (strncmp(key->ptr, "received", key->size) == 0) {
+ cmt_counter_set(ctx->upstreams->received, ts,
+ (double)map->ptr[p].val.via.i64, 2,
+ (char *[]){backend, server});
+ }
+ else if (strncmp(key->ptr, "requests", key->size) == 0) {
+ cmt_counter_set(ctx->upstreams->requests, ts,
+ (double)map->ptr[p].val.via.i64, 2,
+ (char *[]){backend, server});
+ }
+ else if (strncmp(key->ptr, "responses", key->size) == 0) {
+ for (x = 0; x < map->ptr[p].val.via.map.size; x++) {
+ responses = &map->ptr[p].val.via.map.ptr[x];
+ if (responses->key.via.str.size == 3 &&
+ responses->key.via.str.ptr[1] == 'x' &&
+ responses->key.via.str.ptr[2] == 'x') {
+ code[0] = responses->key.via.str.ptr[0];
+ cmt_counter_set(ctx->upstreams->responses, ts,
+ (double)responses->val.via.i64,
+ 3, (char *[]){backend, server, code});
+ }
+ }
+ }
+ else if (strncmp(key->ptr, "response_time", key->size) == 0) {
+ cmt_gauge_set(ctx->upstreams->response_time, ts,
+ (double)map->ptr[p].val.via.i64, 2,
+ (char *[]){backend, server});
+ }
+ else if (strncmp(key->ptr, "sent", key->size) == 0) {
+ cmt_counter_set(ctx->upstreams->sent, ts,
+ (double)map->ptr[p].val.via.i64, 2,
+ (char *[]){backend, server});
+ }
+ else if (strncmp(key->ptr, "state", key->size) == 0) {
+ cmt_gauge_set(ctx->upstreams->state, ts,
+ (double)map->ptr[p].val.via.i64, 2,
+ (char *[]){backend, server});
+ }
+ else if (strncmp(key->ptr, "unavail", key->size) == 0) {
+ cmt_counter_set(ctx->upstreams->unavail, ts,
+ (double)map->ptr[p].val.via.i64, 2,
+ (char *[]){backend, server});
+ }
+ }
+ flb_free(server);
+ }
+ return 0;
+}
+
+void *process_upstreams(struct nginx_ctx *ctx, char *backend, uint64_t ts,
+ msgpack_object_map *map)
+{
+ int i = 0;
+ msgpack_object_str *key;
+
+ for (i = 0; i < map->size; i++) {
+ key = &map->ptr[i].key.via.str;
+ if (strncmp(key->ptr, "keepalives", key->size) == 0) {
+ cmt_gauge_set(ctx->upstreams->keepalives, ts,
+ (double)map->ptr[i].val.via.i64, 1, (char *[]){backend});
+ }
+ else if (strncmp(key->ptr, "zombies", key->size) == 0) {
+ cmt_gauge_set(ctx->upstreams->zombies, ts,
+ (double)map->ptr[i].val.via.i64, 1, (char *[]){backend});
+ }
+ // go into the peer...
+ else if (strncmp(key->ptr, "peers", key->size) == 0) {
+ process_upstream_peers(ctx, backend, ts, &map->ptr[i].val.via.array);
+ }
+ }
+ //msgpack_unpacked_destroy(&result);
+ return ctx;
+}
+
+static int process_stream_upstream_peers(struct nginx_ctx *ctx, char *backend,
+ uint64_t ts, msgpack_object_array *peers)
+{
+ int i = 0;
+ int p = 0;
+ msgpack_object_map *map;
+ msgpack_object_str *key;
+ char *server;
+
+
+ for (i = 0; i < peers->size; i++) {
+ map = &peers->ptr[i].via.map;
+ for (p = 0, server = NULL; p < map->size; p++) {
+ key = &map->ptr[p].key.via.str;
+ if (strncmp(key->ptr, "server", key->size) == 0) {
+ server = flb_calloc(1, map->ptr[p].val.via.str.size+1);
+ memcpy(server, map->ptr[p].val.via.str.ptr, map->ptr[p].val.via.str.size);
+ break;
+ }
+ }
+ if (server == NULL) {
+ flb_plg_warn(ctx->ins, "no server for stream upstream");
+ continue;
+ }
+ for (p = 0; p < map->size; p++) {
+ // initialize to zero for now to respond
+ // how the official exporter does...
+ cmt_gauge_set(ctx->stream_upstreams->limit, ts, (double)0.0, 2,
+ (char *[]){backend, server});
+ cmt_gauge_set(ctx->stream_upstreams->response_time, ts, (double)0.0, 2,
+ (char *[]){backend, server});
+ cmt_gauge_set(ctx->stream_upstreams->connect_time, ts, (double)0.0, 2,
+ (char *[]){backend, server});
+ cmt_gauge_set(ctx->stream_upstreams->first_byte_time, ts, (double)0.0, 2,
+ (char *[]){backend, server});
+
+ key = &map->ptr[p].key.via.str;
+ if (strncmp(key->ptr, "active", key->size) == 0) {
+ cmt_gauge_set(ctx->stream_upstreams->active, ts,
+ (double)map->ptr[p].val.via.i64, 2,
+ (char *[]){backend, server});
+ }
+ else if (strncmp(key->ptr, "fails", key->size) == 0) {
+ cmt_counter_set(ctx->stream_upstreams->fails, ts,
+ (double)map->ptr[p].val.via.i64, 2,
+ (char *[]){backend, server});
+ }
+ else if (strncmp(key->ptr, "limit", key->size) == 0) {
+ cmt_gauge_set(ctx->stream_upstreams->limit, ts,
+ (double)map->ptr[p].val.via.i64, 2,
+ (char *[]){backend, server});
+ }
+ else if (strncmp(key->ptr, "received", key->size) == 0) {
+ cmt_counter_set(ctx->stream_upstreams->received, ts,
+ (double)map->ptr[p].val.via.i64, 2,
+ (char *[]){backend, server});
+ }
+ else if (strncmp(key->ptr, "connect_time", key->size) == 0) {
+ cmt_gauge_set(ctx->stream_upstreams->connect_time, ts,
+ (double)map->ptr[p].val.via.i64, 2,
+ (char *[]){backend, server});
+ }
+ else if (strncmp(key->ptr, "first_byte_time", key->size) == 0) {
+ cmt_gauge_set(ctx->stream_upstreams->first_byte_time, ts,
+ (double)map->ptr[p].val.via.i64, 2,
+ (char *[]){backend, server});
+ }
+ else if (strncmp(key->ptr, "connections", key->size) == 0) {
+ cmt_counter_set(ctx->stream_upstreams->connections, ts,
+ (double)map->ptr[p].val.via.i64, 2,
+ (char *[]){backend, server});
+ }
+ else if (strncmp(key->ptr, "response_time", key->size) == 0) {
+ cmt_gauge_set(ctx->stream_upstreams->response_time, ts,
+ (double)map->ptr[p].val.via.i64, 2,
+ (char *[]){backend, server});
+ }
+ else if (strncmp(key->ptr, "sent", key->size) == 0) {
+ cmt_counter_set(ctx->stream_upstreams->sent, ts,
+ (double)map->ptr[p].val.via.i64, 2,
+ (char *[]){backend, server});
+ }
+ else if (strncmp(key->ptr, "state", key->size) == 0) {
+ cmt_gauge_set(ctx->stream_upstreams->state, ts,
+ (double)map->ptr[p].val.via.i64, 2,
+ (char *[]){backend, server});
+ }
+ else if (strncmp(key->ptr, "unavail", key->size) == 0) {
+ cmt_counter_set(ctx->stream_upstreams->unavail, ts,
+ (double)map->ptr[p].val.via.i64, 2,
+ (char *[]){backend, server});
+ }
+ }
+ flb_free(server);
+ }
+ return 0;
+}
+
+void *process_stream_upstreams(struct nginx_ctx *ctx, char *backend, uint64_t ts,
+ msgpack_object_map *map)
+{
+ int i = 0;
+ msgpack_object_str *key;
+
+ for (i = 0; i < map->size; i++) {
+ key = &map->ptr[i].key.via.str;
+ if (strncmp(key->ptr, "zombies", key->size) == 0) {
+ cmt_gauge_set(ctx->stream_upstreams->zombies, ts,
+ (double)map->ptr[i].val.via.i64, 1, (char *[]){backend});
+ }
+ // go into the peer...
+ else if (strncmp(key->ptr, "peers", key->size) == 0) {
+ process_stream_upstream_peers(ctx, backend, ts, &map->ptr[i].val.via.array);
+ }
+ }
+ //msgpack_unpacked_destroy(&result);
+ return ctx;
+}
+
+static ssize_t parse_payload_json_table(struct nginx_ctx *ctx, int64_t ts,
+ void *(*process)(struct nginx_ctx *, char *,
+ uint64_t, msgpack_object_map *),
+ char *payload, size_t size)
+{
+ size_t off = 0;
+ msgpack_unpacked result;
+ msgpack_object_str *name;
+ int i = 0;
+ int ret;
+ int out_size;
+ char *pack;
+ struct flb_pack_state pack_state;
+ char *zone;
+
+ /* Initialize packer */
+ flb_pack_state_init(&pack_state);
+
+ /* Pack JSON as msgpack */
+ ret = flb_pack_json_state(payload, size, &pack, &out_size, &pack_state);
+ flb_pack_state_reset(&pack_state);
+
+ /* Handle exceptions */
+ if (ret == FLB_ERR_JSON_PART) {
+ flb_plg_warn(ctx->ins, "JSON data is incomplete, skipping");
+ return -1;
+ }
+ else if (ret == FLB_ERR_JSON_INVAL) {
+ flb_plg_warn(ctx->ins, "invalid JSON message, skipping");
+ return -1;
+ }
+ else if (ret == -1) {
+ return -1;
+ }
+
+ msgpack_unpacked_init(&result);
+ while (msgpack_unpack_next(&result, pack, out_size, &off) == MSGPACK_UNPACK_SUCCESS) {
+ if (result.data.type == MSGPACK_OBJECT_MAP) {
+ for (i = 0; i < result.data.via.map.size; i++) {
+ name = &result.data.via.map.ptr[i].key.via.str;
+ zone = flb_calloc(1, name->size+1);
+ memcpy(zone, name->ptr, name->size);
+ process(ctx, zone, ts, &result.data.via.map.ptr[i].val.via.map);
+ flb_free(zone);
+ }
+ } else {
+ msgpack_object_print(stdout, result.data);
+ }
+ }
+
+ flb_free(pack);
+ return 0;
+}
+
+/**
+ * Callback function to gather statistics from the nginx
+ * plus ngx_http module.
+ *
+ * @param ins Pointer to flb_input_instance
+ * @param config Pointer to flb_config
+ * @param in_context void Pointer used to cast to nginx_ctx
+ *
+ * @return int Always returns success
+ */
+static int nginx_collect_plus_server_zones(struct flb_input_instance *ins,
+ struct flb_config *config, struct nginx_ctx *ctx, uint64_t ts)
+{
+ struct flb_connection *u_conn;
+ struct flb_http_client *client;
+ char url[1024];
+ size_t b_sent;
+ int ret = -1;
+ int rc = -1;
+
+
+ u_conn = flb_upstream_conn_get(ctx->upstream);
+ if (!u_conn) {
+ flb_plg_error(ins, "upstream connection initialization error");
+ goto conn_error;
+ }
+
+ snprintf(url, sizeof(url)-1, "%s/%d/http/server_zones", ctx->status_url,
+ ctx->nginx_plus_version);
+ client = flb_http_client(u_conn, FLB_HTTP_GET, url,
+ NULL, 0, ctx->ins->host.name, ctx->ins->host.port, NULL, 0);
+ if (!client) {
+ flb_plg_error(ins, "unable to create http client");
+ goto client_error;
+ }
+
+ ret = flb_http_do(client, &b_sent);
+ if (ret != 0) {
+ flb_plg_error(ins, "http do error");
+ goto http_error;
+ }
+
+ if (client->resp.status != 200) {
+ flb_plg_error(ins, "http status code error: %d", client->resp.status);
+ goto http_error;
+ }
+
+ if (client->resp.payload_size <= 0) {
+ flb_plg_error(ins, "empty response");
+ goto http_error;
+ }
+
+ parse_payload_json_table(ctx, ts, process_server_zone,
+ client->resp.payload, client->resp.payload_size);
+ rc = 0;
+http_error:
+ flb_http_client_destroy(client);
+client_error:
+ flb_upstream_conn_release(u_conn);
+conn_error:
+ return rc;
+}
+
+/**
+ * Callback function to gather statistics from the nginx
+ * plus ngx_http module.
+ *
+ * @param ins Pointer to flb_input_instance
+ * @param config Pointer to flb_config
+ * @param in_context void Pointer used to cast to nginx_ctx
+ *
+ * @return int Always returns success
+ */
+static int nginx_collect_plus_location_zones(struct flb_input_instance *ins,
+ struct flb_config *config, struct nginx_ctx *ctx, uint64_t ts)
+{
+ struct flb_connection *u_conn;
+ struct flb_http_client *client;
+ char url[1024];
+ size_t b_sent;
+ int ret = -1;
+ int rc = -1;
+
+
+ u_conn = flb_upstream_conn_get(ctx->upstream);
+ if (!u_conn) {
+ flb_plg_error(ins, "upstream connection initialization error");
+ goto conn_error;
+ }
+
+ snprintf(url, sizeof(url)-1, "%s/%d/http/location_zones", ctx->status_url,
+ ctx->nginx_plus_version);
+ client = flb_http_client(u_conn, FLB_HTTP_GET, url,
+ NULL, 0, ctx->ins->host.name, ctx->ins->host.port, NULL, 0);
+ if (!client) {
+ flb_plg_error(ins, "unable to create http client");
+ goto client_error;
+ }
+
+ ret = flb_http_do(client, &b_sent);
+ if (ret != 0) {
+ flb_plg_error(ins, "http do error");
+ goto http_error;
+ }
+
+ if (client->resp.status != 200) {
+ flb_plg_error(ins, "http status code error: [%s] %d", url, client->resp.status);
+ goto http_error;
+ }
+
+ if (client->resp.payload_size <= 0) {
+ flb_plg_error(ins, "empty response");
+ goto http_error;
+ }
+
+ parse_payload_json_table(ctx, ts, process_location_zone,
+ client->resp.payload, client->resp.payload_size);
+ rc = 0;
+http_error:
+ flb_http_client_destroy(client);
+client_error:
+ flb_upstream_conn_release(u_conn);
+conn_error:
+ return rc;
+}
+
+/**
+ * Callback function to gather statistics from the nginx
+ * plus ngx_http module.
+ *
+ * @param ins Pointer to flb_input_instance
+ * @param config Pointer to flb_config
+ * @param in_context void Pointer used to cast to nginx_ctx
+ *
+ * @return int Always returns success
+ */
+static int nginx_collect_plus_upstreams(struct flb_input_instance *ins,
+ struct flb_config *config, struct nginx_ctx *ctx, uint64_t ts)
+{
+ struct flb_connection *u_conn;
+ struct flb_http_client *client;
+ char url[1024];
+ size_t b_sent;
+ int ret = -1;
+ int rc = -1;
+
+
+ u_conn = flb_upstream_conn_get(ctx->upstream);
+ if (!u_conn) {
+ flb_plg_error(ins, "upstream connection initialization error");
+ goto conn_error;
+ }
+
+ snprintf(url, sizeof(url)-1, "%s/%d/http/upstreams", ctx->status_url,
+ ctx->nginx_plus_version);
+ client = flb_http_client(u_conn, FLB_HTTP_GET, url,
+ NULL, 0, ctx->ins->host.name, ctx->ins->host.port, NULL, 0);
+ if (!client) {
+ flb_plg_error(ins, "unable to create http client");
+ goto client_error;
+ }
+
+ ret = flb_http_do(client, &b_sent);
+ if (ret != 0) {
+ flb_plg_error(ins, "http do error");
+ goto http_error;
+ }
+
+ if (client->resp.status != 200) {
+ flb_plg_error(ins, "http status code error: [%s] %d", url, client->resp.status);
+ goto http_error;
+ }
+
+ if (client->resp.payload_size <= 0) {
+ flb_plg_error(ins, "empty response");
+ goto http_error;
+ }
+
+ parse_payload_json_table(ctx, ts, process_upstreams,
+ client->resp.payload, client->resp.payload_size);
+ rc = 0;
+http_error:
+ flb_http_client_destroy(client);
+client_error:
+ flb_upstream_conn_release(u_conn);
+conn_error:
+ return rc;
+}
+
+/**
+ * Callback function to gather statistics from the nginx
+ * plus ngx_http module.
+ *
+ * @param ins Pointer to flb_input_instance
+ * @param config Pointer to flb_config
+ * @param in_context void Pointer used to cast to nginx_ctx
+ *
+ * @return int Always returns success
+ */
+static int nginx_collect_plus_stream_server_zones(struct flb_input_instance *ins,
+ struct flb_config *config, struct nginx_ctx *ctx, uint64_t ts)
+{
+ struct flb_connection *u_conn;
+ struct flb_http_client *client;
+ char url[1024];
+ size_t b_sent;
+ int ret = -1;
+ int rc = -1;
+
+
+ u_conn = flb_upstream_conn_get(ctx->upstream);
+ if (!u_conn) {
+ flb_plg_error(ins, "upstream connection initialization error");
+ goto conn_error;
+ }
+
+ snprintf(url, sizeof(url)-1, "%s/%d/stream/server_zones", ctx->status_url,
+ ctx->nginx_plus_version);
+ client = flb_http_client(u_conn, FLB_HTTP_GET, url,
+ NULL, 0, ctx->ins->host.name, ctx->ins->host.port, NULL, 0);
+ if (!client) {
+ flb_plg_error(ins, "unable to create http client");
+ goto client_error;
+ }
+
+ ret = flb_http_do(client, &b_sent);
+ if (ret != 0) {
+ flb_plg_error(ins, "http do error");
+ goto http_error;
+ }
+
+ if (client->resp.status != 200) {
+ flb_plg_error(ins, "http status code error: [%s] %d", url, client->resp.status);
+ goto http_error;
+ }
+
+ if (client->resp.payload_size <= 0) {
+ flb_plg_error(ins, "empty response");
+ goto http_error;
+ }
+
+ parse_payload_json_table(ctx, ts, process_stream_server_zone,
+ client->resp.payload, client->resp.payload_size);
+ rc = 0;
+http_error:
+ flb_http_client_destroy(client);
+client_error:
+ flb_upstream_conn_release(u_conn);
+conn_error:
+ return rc;
+}
+
+/**
+ * Callback function to gather statistics from the nginx
+ * plus ngx_http module.
+ *
+ * @param ins Pointer to flb_input_instance
+ * @param config Pointer to flb_config
+ * @param in_context void Pointer used to cast to nginx_ctx
+ *
+ * @return int Always returns success
+ */
+static int nginx_collect_plus_stream_upstreams(struct flb_input_instance *ins,
+ struct flb_config *config,
+ struct nginx_ctx *ctx, uint64_t ts)
+{
+ struct flb_connection *u_conn;
+ struct flb_http_client *client;
+ char url[1024];
+ size_t b_sent;
+ int ret = -1;
+ int rc = -1;
+
+
+ u_conn = flb_upstream_conn_get(ctx->upstream);
+ if (!u_conn) {
+ flb_plg_error(ins, "upstream connection initialization error");
+ goto conn_error;
+ }
+
+ snprintf(url, sizeof(url)-1, "%s/%d/stream/upstreams", ctx->status_url,
+ ctx->nginx_plus_version);
+ client = flb_http_client(u_conn, FLB_HTTP_GET, url,
+ NULL, 0, ctx->ins->host.name, ctx->ins->host.port, NULL, 0);
+ if (!client) {
+ flb_plg_error(ins, "unable to create http client");
+ goto client_error;
+ }
+
+ ret = flb_http_do(client, &b_sent);
+ if (ret != 0) {
+ flb_plg_error(ins, "http do error");
+ goto http_error;
+ }
+
+ if (client->resp.status != 200) {
+ flb_plg_error(ins, "http status code error: [%s] %d", url, client->resp.status);
+ goto http_error;
+ }
+
+ if (client->resp.payload_size <= 0) {
+ flb_plg_error(ins, "empty response");
+ goto http_error;
+ }
+
+ parse_payload_json_table(ctx, ts, process_stream_upstreams,
+ client->resp.payload, client->resp.payload_size);
+ rc = 0;
+http_error:
+ flb_http_client_destroy(client);
+client_error:
+ flb_upstream_conn_release(u_conn);
+conn_error:
+ return rc;
+}
+
+/**
+ * Get the current highest REST API version
+ *
+ * @param ins Pointer to flb_input_instance
+ * @param config Pointer to flb_config
+ * @param in_context void Pointer used to cast to nginx_ctx
+ *
+ * @return int highest version if > 0, error otherwise.
+ */
+static int nginx_plus_get_version(struct flb_input_instance *ins,
+ struct flb_config *config,
+ struct nginx_ctx *ctx)
+{
+ struct flb_connection *u_conn;
+ struct flb_http_client *client;
+ char url[1024];
+ size_t b_sent;
+ int rc = -1;
+ int out_size;
+ char *pack;
+ struct flb_pack_state pack_state;
+ size_t off = 0;
+ msgpack_unpacked result;
+ int maxversion = 1;
+ int i = 0;
+
+
+ u_conn = flb_upstream_conn_get(ctx->upstream);
+ if (!u_conn) {
+ flb_plg_error(ins, "upstream connection initialization error");
+ goto conn_error;
+ }
+
+ snprintf(url, sizeof(url)-1, "%s/", ctx->status_url);
+ client = flb_http_client(u_conn, FLB_HTTP_GET, url,
+ NULL, 0, ctx->ins->host.name, ctx->ins->host.port, NULL, 0);
+ if (!client) {
+ flb_plg_error(ins, "unable to create http client");
+ goto client_error;
+ }
+
+ rc = flb_http_do(client, &b_sent);
+ if (rc != 0) {
+ flb_plg_error(ins, "http do error");
+ goto http_error;
+ }
+
+ if (client->resp.status != 200) {
+ flb_plg_error(ins, "http status code error: [%s] %d", url, client->resp.status);
+ goto http_error;
+ }
+
+ if (client->resp.payload_size <= 0) {
+ flb_plg_error(ins, "empty response");
+ goto http_error;
+ }
+
+ /* Initialize packer */
+ flb_pack_state_init(&pack_state);
+
+ /* Pack JSON as msgpack */
+ rc = flb_pack_json_state(client->resp.payload, client->resp.payload_size,
+ &pack, &out_size, &pack_state);
+ flb_pack_state_reset(&pack_state);
+
+ /* Handle exceptions */
+ if (rc == FLB_ERR_JSON_PART) {
+ flb_plg_warn(ins, "JSON data is incomplete, skipping");
+ goto json_error;
+ }
+ else if (rc == FLB_ERR_JSON_INVAL) {
+ flb_plg_warn(ins, "invalid JSON message, skipping");
+ goto json_error;
+ }
+ else if (rc == -1) {
+ flb_plg_error(ins, "unable to parse JSON response");
+ goto json_error;
+ }
+
+ msgpack_unpacked_init(&result);
+ while (msgpack_unpack_next(&result, pack, out_size, &off) == MSGPACK_UNPACK_SUCCESS) {
+ if (result.data.type == MSGPACK_OBJECT_ARRAY) {
+ for (i = 0; i < result.data.via.array.size; i++) {
+ if (result.data.via.array.ptr[i].via.i64 > maxversion) {
+ maxversion = result.data.via.array.ptr[i].via.i64;
+ }
+ }
+ } else {
+ flb_plg_error(ins, "NOT AN ARRAY");
+ goto rest_error;
+ }
+ }
+
+rest_error:
+ msgpack_unpacked_destroy(&result);
+json_error:
+ flb_free(pack);
+http_error:
+ flb_http_client_destroy(client);
+client_error:
+ flb_upstream_conn_release(u_conn);
+conn_error:
+ return maxversion;
+}
+
+
+/**
+ * Callback function to gather statistics from the nginx
+ * plus ngx_http module.
+ *
+ * @param ins Pointer to flb_input_instance
+ * @param config Pointer to flb_config
+ * @param in_context void Pointer used to cast to nginx_ctx
+ *
+ * @return int Always returns success
+ */
+static int nginx_collect_plus(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ int version = -1;
+ struct nginx_ctx *ctx = (struct nginx_ctx *)in_context;
+ int rc = -1;
+ int ret = -1;
+ uint64_t ts = cfl_time_now();
+
+
+ version = nginx_plus_get_version(ins, config, in_context);
+ if (version <= 0) {
+ flb_plg_error(ins, "bad NGINX plus REST API version = %d", version);
+ goto error;
+ }
+ ctx->nginx_plus_version = version;
+
+ rc = nginx_collect_plus_connections(ins, config, ctx, ts);
+ if (rc != 0) {
+ goto error;
+ }
+ rc = nginx_collect_plus_ssl(ins, config, ctx, ts);
+ if (rc != 0) {
+ goto error;
+ }
+ rc = nginx_collect_plus_http_requests(ins, config, ctx, ts);
+ if (rc != 0) {
+ goto error;
+ }
+ rc = nginx_collect_plus_server_zones(ins, config, ctx, ts);
+ if (rc != 0) {
+ goto error;
+ }
+
+ if (ctx->nginx_plus_version >= 5) {
+ rc = nginx_collect_plus_location_zones(ins, config, ctx, ts);
+ if (rc != 0) {
+ goto error;
+ }
+ }
+
+ rc = nginx_collect_plus_upstreams(ins, config, ctx, ts);
+ if (rc != 0) {
+ goto error;
+ }
+ rc = nginx_collect_plus_stream_server_zones(ins, config, ctx, ts);
+ if (rc != 0) {
+ goto error;
+ }
+ rc = nginx_collect_plus_stream_upstreams(ins, config, ctx, ts);
+ if (rc != 0) {
+ goto error;
+ }
+error:
+ if (rc == 0) {
+ cmt_gauge_set(ctx->connection_up, ts, (double)1.0, 0, NULL);
+ } else {
+ cmt_gauge_set(ctx->connection_up, ts, (double)0.0, 0, NULL);
+ }
+ ret = flb_input_metrics_append(ins, NULL, 0, ctx->cmt);
+ if (ret != 0) {
+ flb_plg_error(ins, "could not append metrics");
+ }
+ return rc;
+}
+
+/**
+ * Function to initialize nginx metrics plugin.
+ *
+ * @param ins Pointer to flb_input_instance
+ * @param config Pointer to flb_config
+ *
+ * @return struct nginx_ctx_init* Pointer to the plugin's
+ * structure on success, NULL on failure.
+ */
+struct nginx_ctx *nginx_ctx_init(struct flb_input_instance *ins,
+ struct flb_config *config)
+{
+ int ret;
+ int upstream_flags;
+ struct nginx_ctx *ctx;
+ struct flb_upstream *upstream;
+
+ if (ins->host.name == NULL) {
+ ins->host.name = flb_sds_create("localhost");
+ }
+ if (ins->host.port == 0) {
+ ins->host.port = 80;
+ }
+
+ ctx = flb_calloc(1, sizeof(struct nginx_ctx));
+ if (!ctx) {
+ flb_errno();
+ return NULL;
+ }
+ ctx->is_up = FLB_FALSE;
+
+ ctx->ins = ins;
+
+ /* Load the config map */
+ ret = flb_input_config_map_set(ins, (void *)ctx);
+ if (ret == -1) {
+ flb_free(ctx);
+ return NULL;
+ }
+
+ ctx->cmt = cmt_create();
+ if (!ctx->cmt) {
+ flb_plg_error(ins, "could not initialize CMetrics");
+ flb_free(ctx);
+ return NULL;
+ }
+
+ upstream_flags = FLB_IO_TCP;
+
+ if (ins->use_tls) {
+ upstream_flags |= FLB_IO_TLS;
+ }
+
+ upstream = flb_upstream_create(config, ins->host.name, ins->host.port,
+ upstream_flags, ins->tls);
+
+ if (!upstream) {
+ flb_plg_error(ins, "upstream initialization error");
+ cmt_destroy(ctx->cmt);
+ flb_free(ctx);
+ return NULL;
+ }
+ ctx->upstream = upstream;
+
+ return ctx;
+}
+
+static int nginx_collect(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ int rc;
+ struct nginx_ctx *ctx = (struct nginx_ctx *)in_context;
+ if (ctx->is_nginx_plus == FLB_TRUE) {
+ rc = nginx_collect_plus(ins, config, in_context);
+ } else {
+ rc = nginx_collect_stub_status(ins, config, in_context);
+ }
+ FLB_INPUT_RETURN(rc);
+}
+
+static int nginx_ctx_destroy(struct nginx_ctx *ctx);
+/**
+ * Callback function to initialize nginx metrics plugin
+ *
+ * @param ins Pointer to flb_input_instance
+ * @param config Pointer to flb_config
+ * @param data Unused
+ *
+ * @return int 0 on success, -1 on failure
+ */
+static int nginx_init(struct flb_input_instance *ins,
+ struct flb_config *config, void *data)
+{
+ struct nginx_ctx *ctx = NULL;
+ struct cmt_counter *c;
+ struct cmt_gauge *g;
+ int ret = -1;
+
+ /* Allocate space for the configuration */
+ ctx = nginx_ctx_init(ins, config);
+ if (!ctx) {
+ return -1;
+ }
+
+
+ flb_input_set_context(ins, ctx);
+
+ if (ctx->is_nginx_plus == FLB_FALSE) {
+ /* These metrics follow the same format as those define here:
+ * https://github.com/nginxinc/nginx-prometheus-exporter#metrics-for-nginx-oss
+ */
+ ctx->connections_accepted = cmt_counter_create(ctx->cmt, "nginx", "connections",
+ "accepted",
+ "Accepted client connections", 0,
+ NULL);
+ if (ctx->connections_accepted == NULL) {
+ goto nginx_init_end;
+ }
+ cmt_counter_allow_reset(ctx->connections_accepted);
+
+ ctx->connections_active = cmt_gauge_create(ctx->cmt, "nginx", "connections",
+ "active", "active client connections",
+ 0, NULL);
+ if (ctx->connections_active == NULL) {
+ goto nginx_init_end;
+ }
+
+ ctx->connections_handled = cmt_counter_create(ctx->cmt, "nginx", "connections",
+ "handled",
+ "Handled client connections", 0,
+ NULL);
+ if (ctx->connections_handled == NULL) {
+ goto nginx_init_end;
+ }
+ cmt_counter_allow_reset(ctx->connections_handled);
+
+ ctx->connections_reading = cmt_gauge_create(ctx->cmt, "nginx", "connections",
+ "reading",
+ "reading client connections",
+ 0, NULL);
+ if (ctx->connections_reading == NULL) {
+ goto nginx_init_end;
+ }
+
+ ctx->connections_writing = cmt_gauge_create(ctx->cmt, "nginx", "connections",
+ "writing",
+ "writing client connections",
+ 0, NULL);
+ if (ctx->connections_writing == NULL) {
+ goto nginx_init_end;
+ }
+
+ ctx->connections_waiting = cmt_gauge_create(ctx->cmt, "nginx", "connections",
+ "waiting",
+ "waiting client connections",
+ 0, NULL);
+ if (ctx->connections_waiting == NULL) {
+ goto nginx_init_end;
+ }
+
+ ctx->connections_total = cmt_counter_create(ctx->cmt, "nginx", "http",
+ "requests_total",
+ "Total http requests", 0, NULL);
+ if (ctx->connections_total == NULL) {
+ goto nginx_init_end;
+ }
+ cmt_counter_allow_reset(ctx->connections_total);
+
+ ctx->connection_up = cmt_gauge_create(ctx->cmt, "nginx", "", "up",
+ "Shows the status of the last metric "
+ "scrape: 1 for a successful scrape and "
+ "0 for a failed one",
+ 0, NULL);
+ } else {
+ flb_plg_info(ins, "nginx-plus mode on");
+
+ ctx->plus_connections = flb_calloc(1, sizeof(struct nginx_plus_connections));
+ ctx->plus_ssl = flb_calloc(1, sizeof(struct nginx_plus_ssl));
+ ctx->plus_http_requests = flb_calloc(1, sizeof(struct nginx_plus_http_requests));
+ ctx->server_zones = flb_calloc(1, sizeof(struct nginx_plus_server_zones));
+ ctx->location_zones = flb_calloc(1, sizeof(struct nginx_plus_location_zones));
+ ctx->upstreams = flb_calloc(1, sizeof(struct nginx_plus_upstreams));
+ ctx->streams = flb_calloc(1, sizeof(struct nginx_plus_streams));
+ ctx->stream_upstreams = flb_calloc(1, sizeof(struct nginx_plus_stream_upstreams));
+
+ g = cmt_gauge_create(ctx->cmt, "nginxplus", "", "up",
+ "Shows the status of the last metric scrape: "
+ "1 for a successful scrape and 0 for a failed "
+ "one", 0, NULL);
+ if (g == NULL) {
+ goto nginx_init_end;
+ }
+ ctx->connection_up = g;
+
+ c = cmt_counter_create(ctx->cmt,
+ "nginxplus", "connections", "accepted",
+ "NGINX Plus Total Connections",
+ 0, NULL);
+ if (c == NULL) {
+ goto nginx_init_end;
+ }
+ cmt_counter_allow_reset(c);
+ ctx->plus_connections->connections_accepted = c;
+
+
+ c = cmt_counter_create(ctx->cmt,
+ "nginxplus", "connections", "dropped",
+ "NGINX Plus Total Connections",
+ 0, NULL);
+ if (c == NULL) {
+ goto nginx_init_end;
+ }
+ cmt_counter_allow_reset(c);
+ ctx->plus_connections->connections_dropped = c;
+
+ c = cmt_counter_create(ctx->cmt,
+ "nginxplus", "connections", "active",
+ "NGINX Plus Total Connections",
+ 0, NULL);
+ if (c == NULL) {
+ goto nginx_init_end;
+ }
+ cmt_counter_allow_reset(c);
+ ctx->plus_connections->connections_active = c;
+
+ c = cmt_counter_create(ctx->cmt,
+ "nginxplus", "connections", "idle",
+ "NGINX Plus Total Connections",
+ 0, NULL);
+ if (c == NULL) {
+ goto nginx_init_end;
+ }
+ cmt_counter_allow_reset(c);
+ ctx->plus_connections->connections_idle = c;
+
+ c = cmt_counter_create(ctx->cmt,
+ "nginxplus", "ssl", "handshakes",
+ "NGINX Plus Total Connections",
+ 0, NULL);
+ if (c == NULL) {
+ goto nginx_init_end;
+ }
+ cmt_counter_allow_reset(c);
+ ctx->plus_ssl->handshakes = c;
+
+ c = cmt_counter_create(ctx->cmt,
+ "nginxplus", "ssl", "handshakes_failed",
+ "NGINX Plus Total Connections",
+ 0, NULL);
+ if (c == NULL) {
+ goto nginx_init_end;
+ }
+ cmt_counter_allow_reset(c);
+ ctx->plus_ssl->handshakes_failed = c;
+
+ c = cmt_counter_create(ctx->cmt,
+ "nginxplus", "ssl", "session_reuses",
+ "NGINX Plus Total Connections",
+ 0, NULL);
+ if (c == NULL) {
+ goto nginx_init_end;
+ }
+ cmt_counter_allow_reset(c);
+ ctx->plus_ssl->session_reuses = c;
+
+ c = cmt_counter_create(ctx->cmt,
+ "nginxplus", "http_requests", "total",
+ "NGINX Plus Total Connections",
+ 0, NULL);
+ if (c == NULL) {
+ goto nginx_init_end;
+ }
+ cmt_counter_allow_reset(c);
+ ctx->plus_http_requests->total = c;
+
+ c = cmt_counter_create(ctx->cmt,
+ "nginxplus", "http_requests", "current",
+ "NGINX Plus Total Connections",
+ 0, NULL);
+ if (c == NULL) {
+ goto nginx_init_end;
+ }
+ cmt_counter_allow_reset(c);
+ ctx->plus_http_requests->current = c;
+
+ c = cmt_counter_create(ctx->cmt,
+ "nginxplus",
+ "server_zone",
+ "discarded",
+ "NGINX Server Zone discarded",
+ 1, (char *[]){"server_zone"});
+ if (c == NULL) {
+ goto nginx_init_end;
+ }
+ cmt_counter_allow_reset(c);
+ ctx->server_zones->discarded = c;
+
+ c = cmt_counter_create(ctx->cmt,
+ "nginxplus",
+ "server_zone",
+ "processing",
+ "NGINX Server Zone processing",
+ 1, (char *[]){"server_zone"});
+ if (c == NULL) {
+ goto nginx_init_end;
+ }
+ cmt_counter_allow_reset(c);
+ ctx->server_zones->processing = c;
+
+ c = cmt_counter_create(ctx->cmt,
+ "nginxplus",
+ "server_zone",
+ "received",
+ "NGINX Server Zone received",
+ 1, (char *[]){"server_zone"});
+ if (c == NULL) {
+ goto nginx_init_end;
+ }
+ cmt_counter_allow_reset(c);
+ ctx->server_zones->received = c;
+
+ c = cmt_counter_create(ctx->cmt,
+ "nginxplus",
+ "server_zone",
+ "requests",
+ "NGINX Server Zone requests",
+ 1, (char *[]){"server_zone"});
+ if (c == NULL) {
+ goto nginx_init_end;
+ }
+ cmt_counter_allow_reset(c);
+ ctx->server_zones->requests = c;
+
+ c = cmt_counter_create(ctx->cmt,
+ "nginxplus",
+ "server_zone",
+ "responses",
+ "NGINX Server Zone responses",
+ 2, (char *[]){"server_zone", "code"});
+ if (c == NULL) {
+ goto nginx_init_end;
+ }
+ cmt_counter_allow_reset(c);
+ ctx->server_zones->responses = c;
+
+ c = cmt_counter_create(ctx->cmt,
+ "nginxplus",
+ "server_zone",
+ "sent",
+ "NGINX Server Zone sent",
+ 1, (char *[]){"server_zone"});
+ if (c == NULL) {
+ goto nginx_init_end;
+ }
+ cmt_counter_allow_reset(c);
+ ctx->server_zones->sent = c;
+
+ c = cmt_counter_create(ctx->cmt,
+ "nginxplus",
+ "server_zone",
+ "discarded",
+ "NGINX Server Zone discarded",
+ 1, (char *[]){"location_zone"});
+ if (c == NULL) {
+ goto nginx_init_end;
+ }
+ cmt_counter_allow_reset(c);
+ ctx->location_zones->discarded = c;
+
+ c = cmt_counter_create(ctx->cmt,
+ "nginxplus",
+ "location_zone",
+ "received",
+ "NGINX Server Zone received",
+ 1, (char *[]){"location_zone"});
+ if (c == NULL) {
+ goto nginx_init_end;
+ }
+ cmt_counter_allow_reset(c);
+ ctx->location_zones->received = c;
+
+ c = cmt_counter_create(ctx->cmt,
+ "nginxplus",
+ "location_zone",
+ "requests",
+ "NGINX Server Zone requests",
+ 1, (char *[]){"location_zone"});
+ if (c == NULL) {
+ goto nginx_init_end;
+ }
+ cmt_counter_allow_reset(c);
+ ctx->location_zones->requests = c;
+
+ c = cmt_counter_create(ctx->cmt,
+ "nginxplus",
+ "location_zone",
+ "responses",
+ "NGINX Server Zone responses",
+ 2, (char *[]){"location_zone", "code"});
+ if (c == NULL) {
+ goto nginx_init_end;
+ }
+ cmt_counter_allow_reset(c);
+ ctx->location_zones->responses = c;
+
+ c = cmt_counter_create(ctx->cmt,
+ "nginxplus",
+ "location_zone",
+ "sent",
+ "NGINX Server Zone sent",
+ 1, (char *[]){"location_zone"});
+ if (c == NULL) {
+ goto nginx_init_end;
+ }
+ cmt_counter_allow_reset(c);
+ ctx->location_zones->sent = c;
+
+ g = cmt_gauge_create(ctx->cmt,
+ "nginxplus",
+ "upstream",
+ "keepalives",
+ "NGINX Upstream Keepalives",
+ 1, (char *[]){"upstream"});
+ if (g == NULL) {
+ goto nginx_init_end;
+ }
+ ctx->upstreams->keepalives = g;
+
+ g = cmt_gauge_create(ctx->cmt,
+ "nginxplus",
+ "upstream",
+ "zombies",
+ "NGINX Upstream Zombies",
+ 1, (char *[]){"upstream"});
+ if (g == NULL) {
+ goto nginx_init_end;
+ }
+ ctx->upstreams->zombies = g;
+
+ g = cmt_gauge_create(ctx->cmt,
+ "nginxplus",
+ "upstream_server",
+ "active",
+ "NGINX Upstream Active",
+ 2, (char *[]){"upstream","server"});
+ if (g == NULL) {
+ goto nginx_init_end;
+ }
+ ctx->upstreams->active = g;
+
+ c = cmt_counter_create(ctx->cmt,
+ "nginxplus",
+ "upstream_server",
+ "fails",
+ "NGINX Upstream Fails",
+ 2, (char *[]){"upstream","server"});
+ if (c == NULL) {
+ goto nginx_init_end;
+ }
+ cmt_counter_allow_reset(c);
+ ctx->upstreams->fails = c;
+
+ g = cmt_gauge_create(ctx->cmt,
+ "nginxplus",
+ "upstream_server",
+ "header_time",
+ "NGINX Upstream Header Time",
+ 2, (char *[]){"upstream","server"});
+ if (g == NULL) {
+ goto nginx_init_end;
+ }
+ ctx->upstreams->header_time = g;
+
+ g = cmt_gauge_create(ctx->cmt,
+ "nginxplus",
+ "upstream_server",
+ "limit",
+ "NGINX Upstream Limit",
+ 2, (char *[]){"upstream","server"});
+ if (g == NULL) {
+ goto nginx_init_end;
+ }
+ ctx->upstreams->limit = g;
+
+ c = cmt_counter_create(ctx->cmt,
+ "nginxplus",
+ "upstream_server",
+ "received",
+ "NGINX Upstream Received",
+ 2, (char *[]){"upstream","server"});
+ if (c == NULL) {
+ goto nginx_init_end;
+ }
+ cmt_counter_allow_reset(c);
+ ctx->upstreams->received = c;
+
+ c = cmt_counter_create(ctx->cmt,
+ "nginxplus",
+ "upstream_server",
+ "requests",
+ "NGINX Upstream Requests",
+ 2, (char *[]){"upstream","server"});
+ if (c == NULL) {
+ goto nginx_init_end;
+ }
+ cmt_counter_allow_reset(c);
+ ctx->upstreams->requests = c;
+
+ c = cmt_counter_create(ctx->cmt,
+ "nginxplus",
+ "upstream_server",
+ "responses",
+ "NGINX Upstream Responses",
+ 3, (char *[]){"code", "upstream","server"});
+ if (c == NULL) {
+ goto nginx_init_end;
+ }
+ cmt_counter_allow_reset(c);
+ ctx->upstreams->responses = c;
+
+ g = cmt_gauge_create(ctx->cmt,
+ "nginxplus",
+ "upstream_server",
+ "response_time",
+ "NGINX Upstream Response Time",
+ 2, (char *[]){"upstream","server"});
+ if (g == NULL) {
+ goto nginx_init_end;
+ }
+ ctx->upstreams->response_time = g;
+
+ c = cmt_counter_create(ctx->cmt,
+ "nginxplus",
+ "upstream_server",
+ "sent",
+ "NGINX Upstream Sent",
+ 2, (char *[]){"upstream","server"});
+ if (c == NULL) {
+ goto nginx_init_end;
+ }
+ cmt_counter_allow_reset(c);
+ ctx->upstreams->sent = c;
+
+ g = cmt_gauge_create(ctx->cmt,
+ "nginxplus",
+ "upstream_server",
+ "state",
+ "NGINX Upstream State",
+ 2, (char *[]){"upstream","server"});
+ if (g == NULL) {
+ goto nginx_init_end;
+ }
+ ctx->upstreams->state = g;
+
+ c = cmt_counter_create(ctx->cmt,
+ "nginxplus",
+ "upstream_server",
+ "unavail",
+ "NGINX Upstream Unavailable",
+ 2, (char *[]){"upstream","server"});
+ if (c == NULL) {
+ goto nginx_init_end;
+ }
+ cmt_counter_allow_reset(c);
+ ctx->upstreams->unavail = c;
+
+ c = cmt_counter_create(ctx->cmt,
+ "nginxplus",
+ "stream_server_zone",
+ "connections",
+ "NGINX Stream Server Zone connections",
+ 1, (char *[]){"server_zone"});
+ if (c == NULL) {
+ goto nginx_init_end;
+ }
+ cmt_counter_allow_reset(c);
+ ctx->streams->connections = c;
+
+ c = cmt_counter_create(ctx->cmt,
+ "nginxplus",
+ "stream_server_zone",
+ "discarded",
+ "NGINX Stream Server Zone discarded",
+ 1, (char *[]){"server_zone"});
+ if (c == NULL) {
+ goto nginx_init_end;
+ }
+ cmt_counter_allow_reset(c);
+ ctx->streams->discarded = c;
+
+ c = cmt_counter_create(ctx->cmt,
+ "nginxplus",
+ "stream_server_zone",
+ "processing",
+ "NGINX Stream Server Zone "
+ "processing",
+ 1, (char *[]){"server_zone"});
+ if (c == NULL) {
+ goto nginx_init_end;
+ }
+ cmt_counter_allow_reset(c);
+ ctx->streams->processing = c;
+
+ c = cmt_counter_create(ctx->cmt,
+ "nginxplus",
+ "stream_server_zone",
+ "received",
+ "NGINX Stream Server Zone received",
+ 1, (char *[]){"server_zone"});
+ if (c == NULL) {
+ goto nginx_init_end;
+ }
+ cmt_counter_allow_reset(c);
+ ctx->streams->received = c;
+
+ c = cmt_counter_create(ctx->cmt,
+ "nginxplus",
+ "server_zone",
+ "sent",
+ "NGINX Stream Server Zone sent",
+ 1, (char *[]){"server_zone"});
+ if (c == NULL) {
+ goto nginx_init_end;
+ }
+ cmt_counter_allow_reset(c);
+ ctx->streams->sent = c;
+
+ c = cmt_counter_create(ctx->cmt,
+ "nginxplus",
+ "stream_server_zone",
+ "sessions",
+ "NGINX Stream Server Zone Sessions",
+ 2, (char *[]){"server_zone", "code"});
+ if (c == NULL) {
+ goto nginx_init_end;
+ }
+ cmt_counter_allow_reset(c);
+ ctx->streams->sessions = c;
+
+ g = cmt_gauge_create(ctx->cmt,
+ "nginxplus",
+ "stream_upstream",
+ "zombies",
+ "NGINX Upstream Zombies",
+ 1, (char *[]){"upstream"});
+ if (g == NULL) {
+ goto nginx_init_end;
+ }
+ ctx->stream_upstreams->zombies = g;
+
+ g = cmt_gauge_create(ctx->cmt,
+ "nginxplus",
+ "stream_upstream_server",
+ "active",
+ "NGINX Upstream Active",
+ 2, (char *[]){"upstream","server"});
+ if (g == NULL) {
+ goto nginx_init_end;
+ }
+ ctx->stream_upstreams->active = g;
+
+ c = cmt_counter_create(ctx->cmt,
+ "nginxplus",
+ "stream_upstream_server",
+ "fails",
+ "NGINX Upstream Fails",
+ 2, (char *[]){"upstream","server"});
+ if (c == NULL) {
+ goto nginx_init_end;
+ }
+ cmt_counter_allow_reset(c);
+ ctx->stream_upstreams->fails = c;
+
+ g = cmt_gauge_create(ctx->cmt,
+ "nginxplus",
+ "stream_upstream_server",
+ "limit",
+ "NGINX Upstream Limit",
+ 2, (char *[]){"upstream","server"});
+ if (g == NULL) {
+ goto nginx_init_end;
+ }
+ ctx->stream_upstreams->limit = g;
+
+ c = cmt_counter_create(ctx->cmt,
+ "nginxplus",
+ "stream_upstream_server",
+ "received",
+ "NGINX Upstream Received",
+ 2, (char *[]){"upstream","server"});
+ if (c == NULL) {
+ goto nginx_init_end;
+ }
+ cmt_counter_allow_reset(c);
+ ctx->stream_upstreams->received = c;
+
+ g = cmt_gauge_create(ctx->cmt,
+ "nginxplus",
+ "stream_upstream_server",
+ "connect_time",
+ "NGINX Upstream Header Time",
+ 2, (char *[]){"upstream", "server"});
+ if (g == NULL) {
+ goto nginx_init_end;
+ }
+ ctx->stream_upstreams->connect_time = g;
+
+ g = cmt_gauge_create(ctx->cmt,
+ "nginxplus",
+ "stream_upstream_server",
+ "first_byte_time",
+ "NGINX Upstream Header Time",
+ 2, (char *[]){"upstream", "server"});
+ if (g == NULL) {
+ goto nginx_init_end;
+ }
+ ctx->stream_upstreams->first_byte_time = g;
+
+ c = cmt_counter_create(ctx->cmt,
+ "nginxplus",
+ "stream_upstream_server",
+ "connections",
+ "NGINX Upstream Requests",
+ 2, (char *[]){"upstream","server"});
+ if (c == NULL) {
+ goto nginx_init_end;
+ }
+ cmt_counter_allow_reset(c);
+ ctx->stream_upstreams->connections = c;
+
+ g = cmt_gauge_create(ctx->cmt,
+ "nginxplus",
+ "stream_upstream_server",
+ "response_time",
+ "NGINX Upstream Response Time",
+ 2, (char *[]){"upstream","server"});
+ if (g == NULL) {
+ goto nginx_init_end;
+ }
+ ctx->stream_upstreams->response_time = g;
+
+ c = cmt_counter_create(ctx->cmt,
+ "nginxplus",
+ "stream_upstream_server",
+ "sent",
+ "NGINX Upstream Sent",
+ 2, (char *[]){"upstream","server"});
+ if (c == NULL) {
+ goto nginx_init_end;
+ }
+ cmt_counter_allow_reset(c);
+ ctx->stream_upstreams->sent = c;
+
+ g = cmt_gauge_create(ctx->cmt,
+ "nginxplus",
+ "stream_upstream_server",
+ "state",
+ "NGINX Upstream State",
+ 2, (char *[]){"upstream","server"});
+ if (g == NULL) {
+ goto nginx_init_end;
+ }
+ ctx->stream_upstreams->state = g;
+
+ c = cmt_counter_create(ctx->cmt,
+ "nginxplus",
+ "stream_upstream_server",
+ "unavail",
+ "NGINX Upstream Unavailable",
+ 2, (char *[]){"upstream","server"});
+ if (c == NULL) {
+ goto nginx_init_end;
+ }
+ cmt_counter_allow_reset(c);
+ ctx->stream_upstreams->unavail = c;
+
+ }
+ ctx->coll_id = flb_input_set_collector_time(ins,
+ nginx_collect,
+ 1,
+ 0, config);
+ ret = 0;
+ nginx_init_end:
+ if (ret < 0) {
+ nginx_ctx_destroy(ctx);
+ }
+
+ return ret;
+}
+
+
+/**
+ * Function to destroy nginx metrics plugin.
+ *
+ * @param ctx Pointer to nginx_ctx
+ *
+ * @return int 0
+ */
+static int nginx_ctx_destroy(struct nginx_ctx *ctx)
+{
+ if (ctx->upstream) {
+ flb_upstream_destroy(ctx->upstream);
+ }
+ if (ctx->cmt) {
+ cmt_destroy(ctx->cmt);
+ }
+ if (ctx->is_nginx_plus) {
+ if (ctx->plus_connections) flb_free(ctx->plus_connections);
+ if (ctx->plus_ssl) flb_free(ctx->plus_ssl);
+ if (ctx->plus_http_requests) flb_free(ctx->plus_http_requests);
+ if (ctx->server_zones) flb_free(ctx->server_zones);
+ if (ctx->location_zones) flb_free(ctx->location_zones);
+ if (ctx->upstreams) flb_free(ctx->upstreams);
+ if (ctx->streams) flb_free(ctx->streams);
+ if (ctx->stream_upstreams) flb_free(ctx->stream_upstreams);
+ }
+ flb_free(ctx);
+ return 0;
+}
+
+/**
+ * Callback exit function to cleanup plugin
+ *
+ * @param data Pointer cast to flb_in_de_config
+ * @param config Unused
+ *
+ * @return int Always returns 0
+ */
+static int nginx_exit(void *data, struct flb_config *config)
+{
+ struct nginx_ctx *ctx = (struct nginx_ctx *)data;
+
+ if (!ctx) {
+ return 0;
+ }
+
+ nginx_ctx_destroy(ctx);
+
+ return 0;
+}
+
+/* Configuration properties map */
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_STR, "status_url", DEFAULT_STATUS_URL,
+ 0, FLB_TRUE, offsetof(struct nginx_ctx, status_url),
+ "Define URL of stub status handler"
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "nginx_plus", "true",
+ 0, FLB_TRUE, offsetof(struct nginx_ctx, is_nginx_plus),
+ "Turn on NGINX plus mode"
+ },
+ /* EOF */
+ {0}
+};
+
+/* Plugin reference */
+struct flb_input_plugin in_nginx_exporter_metrics_plugin = {
+ .name = "nginx_metrics",
+ .description = "Nginx status metrics",
+ .cb_init = nginx_init,
+ .cb_pre_run = NULL,
+ .cb_collect = nginx_collect,
+ .cb_flush_buf = NULL,
+ .cb_exit = nginx_exit,
+ .config_map = config_map,
+ .flags = FLB_INPUT_NET|FLB_INPUT_CORO,
+};
diff --git a/src/fluent-bit/plugins/in_nginx_exporter_metrics/nginx.h b/src/fluent-bit/plugins/in_nginx_exporter_metrics/nginx.h
new file mode 100644
index 000000000..97538587b
--- /dev/null
+++ b/src/fluent-bit/plugins/in_nginx_exporter_metrics/nginx.h
@@ -0,0 +1,150 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_NGINX_H
+#define FLB_IN_NGINX_H
+
+#include <msgpack.h>
+#include <fluent-bit/flb_input.h>
+#include <fluent-bit/flb_parser.h>
+#include <fluent-bit/flb_network.h>
+
+#define DEFAULT_STATUS_URL "/status"
+
+struct nginx_ctx
+{
+ int coll_id; /* collector id */
+ flb_sds_t status_url;
+ struct flb_parser *parser;
+ struct flb_input_instance *ins; /* Input plugin instace */
+ struct flb_upstream *upstream;
+ struct cmt *cmt;
+ struct cmt_counter *connections_accepted;
+ struct cmt_counter *connections_handled;
+ struct cmt_counter *connections_total;
+ struct cmt_gauge *connection_active;
+ struct cmt_gauge *connections_active;
+ struct cmt_gauge *connections_reading;
+ struct cmt_gauge *connections_writing;
+ struct cmt_gauge *connections_waiting;
+ struct cmt_gauge *connection_up;
+ bool is_up;
+ bool is_nginx_plus;
+ int nginx_plus_version;
+
+ struct nginx_plus_connections *plus_connections;
+ struct nginx_plus_http_requests *plus_http_requests;
+ struct nginx_plus_ssl *plus_ssl;
+ struct nginx_plus_server_zones *server_zones;
+ struct nginx_plus_location_zones *location_zones;
+ struct nginx_plus_upstreams *upstreams;
+ struct nginx_plus_streams *streams;
+ struct nginx_plus_stream_upstreams *stream_upstreams;
+};
+
+struct nginx_status
+{
+ uint64_t active;
+ uint64_t reading;
+ uint64_t writing;
+ uint64_t waiting;
+ uint64_t accepts;
+ uint64_t handled;
+ uint64_t requests;
+};
+
+struct nginx_plus_connections {
+ struct cmt_counter *connections_accepted;
+ struct cmt_counter *connections_dropped;
+ struct cmt_counter *connections_active;
+ struct cmt_counter *connections_idle;
+};
+
+struct nginx_plus_ssl {
+ struct cmt_counter *handshakes;
+ struct cmt_counter *handshakes_failed;
+ struct cmt_counter *session_reuses;
+};
+
+struct nginx_plus_http_requests {
+ struct cmt_counter *total;
+ struct cmt_counter *current;
+};
+
+struct nginx_plus_server_zones {
+ struct cmt_counter *discarded;
+ struct cmt_counter *processing;
+ struct cmt_counter *received;
+ struct cmt_counter *requests;
+ struct cmt_counter *responses;
+ struct cmt_counter *sent;
+};
+
+struct nginx_plus_upstreams {
+ //struct nginx_plux_upstream_peer **peers;
+ struct cmt_gauge *keepalives;
+ struct cmt_gauge *zombies;
+ // per peer
+ struct cmt_gauge *active;
+ struct cmt_counter *fails;
+ struct cmt_gauge *header_time;
+ struct cmt_gauge *limit;
+ struct cmt_counter *received;
+ struct cmt_counter *requests;
+ struct cmt_counter *responses;
+ struct cmt_gauge *response_time;
+ struct cmt_counter *sent;
+ struct cmt_gauge *state;
+ struct cmt_counter *unavail;
+};
+
+struct nginx_plus_location_zones {
+ struct cmt_counter *discarded;
+ struct cmt_counter *received;
+ struct cmt_counter *requests;
+ struct cmt_counter *responses;
+ struct cmt_counter *sent;
+};
+
+struct nginx_plus_streams {
+ struct cmt_counter *connections;
+ struct cmt_counter *discarded;
+ struct cmt_counter *processing;
+ struct cmt_counter *received;
+ struct cmt_counter *sent;
+ struct cmt_counter *sessions;
+};
+
+struct nginx_plus_stream_upstreams {
+ struct cmt_gauge *zombies;
+ // per peer
+ struct cmt_gauge *active;
+ struct cmt_counter *fails;
+ struct cmt_gauge *limit;
+ struct cmt_counter *received;
+ struct cmt_gauge *connect_time;
+ struct cmt_gauge *first_byte_time;
+ struct cmt_counter *connections;
+ struct cmt_gauge *response_time;
+ struct cmt_counter *sent;
+ struct cmt_gauge *state;
+ struct cmt_counter *unavail;
+};
+
+#endif \ No newline at end of file
diff --git a/src/fluent-bit/plugins/in_node_exporter_metrics/CMakeLists.txt b/src/fluent-bit/plugins/in_node_exporter_metrics/CMakeLists.txt
new file mode 100644
index 000000000..16584dc06
--- /dev/null
+++ b/src/fluent-bit/plugins/in_node_exporter_metrics/CMakeLists.txt
@@ -0,0 +1,26 @@
+set(src
+ ne_cpu.c
+ ne_meminfo.c
+ ne_diskstats.c
+ ne_filesystem.c
+ ne_uname.c
+ ne_stat_linux.c
+ ne_vmstat_linux.c
+ ne_netdev.c
+ ne_time.c
+ ne_loadavg.c
+ ne_filefd_linux.c
+ ne_textfile.c
+ ne_utils.c
+ ne_config.c
+ ne.c
+ )
+
+if(FLB_HAVE_SYSTEMD_SDBUS)
+set(src
+ ${src}
+ ne_systemd.c
+ )
+endif()
+
+FLB_PLUGIN(in_node_exporter_metrics "${src}" "")
diff --git a/src/fluent-bit/plugins/in_node_exporter_metrics/ne.c b/src/fluent-bit/plugins/in_node_exporter_metrics/ne.c
new file mode 100644
index 000000000..d77817957
--- /dev/null
+++ b/src/fluent-bit/plugins/in_node_exporter_metrics/ne.c
@@ -0,0 +1,1107 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_config_map.h>
+#include <fluent-bit/flb_error.h>
+#include <fluent-bit/flb_pack.h>
+
+#include "ne.h"
+#include "ne_config.h"
+#include "ne_filefd_linux.h"
+
+/* collectors */
+#include "ne_cpu.h"
+#include "ne_cpufreq.h"
+#include "ne_meminfo.h"
+#include "ne_diskstats.h"
+#include "ne_filesystem.h"
+#include "ne_uname.h"
+#include "ne_stat_linux.h"
+#include "ne_time.h"
+#include "ne_loadavg.h"
+#include "ne_vmstat_linux.h"
+#include "ne_netdev.h"
+#include "ne_textfile.h"
+#include "ne_systemd.h"
+
+static int ne_timer_cpu_metrics_cb(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ struct flb_ne *ctx = in_context;
+
+ ne_cpu_update(ctx);
+
+ return 0;
+}
+
+static int ne_timer_cpufreq_metrics_cb(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ struct flb_ne *ctx = in_context;
+
+ ne_cpufreq_update(ctx);
+
+ return 0;
+}
+
+static int ne_timer_meminfo_metrics_cb(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ struct flb_ne *ctx = in_context;
+
+ ne_meminfo_update(ctx);
+
+ return 0;
+}
+
+static int ne_timer_diskstats_metrics_cb(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ struct flb_ne *ctx = in_context;
+
+ ne_diskstats_update(ctx);
+
+ return 0;
+}
+
+static int ne_timer_filesystem_metrics_cb(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ struct flb_ne *ctx = in_context;
+
+ ne_filesystem_update(ctx);
+
+ return 0;
+}
+
+static int ne_timer_uname_metrics_cb(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ struct flb_ne *ctx = in_context;
+
+ ne_uname_update(ctx);
+
+ return 0;
+}
+
+static int ne_timer_stat_metrics_cb(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ struct flb_ne *ctx = in_context;
+
+ ne_stat_update(ctx);
+
+ return 0;
+}
+
+static int ne_timer_time_metrics_cb(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ struct flb_ne *ctx = in_context;
+
+ ne_time_update(ctx);
+
+ return 0;
+}
+
+static int ne_timer_loadavg_metrics_cb(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ struct flb_ne *ctx = in_context;
+
+ ne_loadavg_update(ctx);
+
+ return 0;
+}
+
+static int ne_timer_vmstat_metrics_cb(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ struct flb_ne *ctx = in_context;
+
+ ne_vmstat_update(ctx);
+
+ return 0;
+}
+
+static int ne_timer_netdev_metrics_cb(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ struct flb_ne *ctx = in_context;
+
+ ne_netdev_update(ctx);
+
+ return 0;
+}
+
+static int ne_timer_filefd_metrics_cb(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ struct flb_ne *ctx = in_context;
+
+ ne_filefd_update(ctx);
+
+ return 0;
+}
+
+static int ne_timer_textfile_metrics_cb(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ struct flb_ne *ctx = in_context;
+
+ ne_textfile_update(ctx);
+
+ return 0;
+}
+
+static int ne_timer_systemd_metrics_cb(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ struct flb_ne *ctx = in_context;
+
+ ne_systemd_update(ctx);
+
+ return 0;
+}
+
+struct flb_ne_callback {
+ char *name;
+ void (*func)(char *, void *, void *);
+};
+
+static int ne_update_cb(struct flb_ne *ctx, char *name);
+
+static void update_metrics(struct flb_input_instance *ins, struct flb_ne *ctx)
+{
+ int ret;
+ struct mk_list *head;
+ struct flb_slist_entry *entry;
+
+ /* Update our metrics */
+ if (ctx->metrics) {
+ mk_list_foreach(head, ctx->metrics) {
+ entry = mk_list_entry(head, struct flb_slist_entry, _head);
+ ret = flb_callback_exists(ctx->callback, entry->str);
+ if (ret == FLB_TRUE) {
+ ne_update_cb(ctx, entry->str);
+ }
+ else {
+ flb_plg_debug(ctx->ins, "Callback for metrics '%s' is not registered", entry->str);
+ }
+ }
+ }
+}
+
+/*
+ * Update the metrics, this function is invoked every time 'scrape_interval'
+ * expires.
+ */
+static int cb_ne_collect(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ int ret;
+ struct flb_ne *ctx = in_context;
+
+ update_metrics(ins, ctx);
+
+ /* Append the updated metrics */
+ ret = flb_input_metrics_append(ins, NULL, 0, ctx->cmt);
+ if (ret != 0) {
+ flb_plg_error(ins, "could not append metrics");
+ }
+
+ return 0;
+}
+
+static void ne_cpu_update_cb(char *name, void *p1, void *p2)
+{
+ struct flb_ne *ctx = p1;
+
+ ne_cpu_update(ctx);
+}
+
+static void ne_cpufreq_update_cb(char *name, void *p1, void *p2)
+{
+ struct flb_ne *ctx = p1;
+
+ ne_cpufreq_update(ctx);
+}
+
+static void ne_meminfo_update_cb(char *name, void *p1, void *p2)
+{
+ struct flb_ne *ctx = p1;
+
+ ne_meminfo_update(ctx);
+}
+
+static void ne_diskstats_update_cb(char *name, void *p1, void *p2)
+{
+ struct flb_ne *ctx = p1;
+
+ ne_diskstats_update(ctx);
+}
+
+static void ne_filesystem_update_cb(char *name, void *p1, void *p2)
+{
+ struct flb_ne *ctx = p1;
+
+ ne_filesystem_update(ctx);
+}
+
+static void ne_uname_update_cb(char *name, void *p1, void *p2)
+{
+ struct flb_ne *ctx = p1;
+
+ ne_uname_update(ctx);
+}
+
+static void ne_stat_update_cb(char *name, void *p1, void *p2)
+{
+ struct flb_ne *ctx = p1;
+
+ ne_stat_update(ctx);
+}
+
+static void ne_time_update_cb(char *name, void *p1, void *p2)
+{
+ struct flb_ne *ctx = p1;
+
+ ne_time_update(ctx);
+}
+
+static void ne_loadavg_update_cb(char *name, void *p1, void *p2)
+{
+ struct flb_ne *ctx = p1;
+
+ ne_loadavg_update(ctx);
+}
+
+static void ne_vmstat_update_cb(char *name, void *p1, void *p2)
+{
+ struct flb_ne *ctx = p1;
+
+ ne_vmstat_update(ctx);
+}
+
+static void ne_netdev_update_cb(char *name, void *p1, void *p2)
+{
+ struct flb_ne *ctx = p1;
+
+ ne_netdev_update(ctx);
+}
+
+static void ne_filefd_update_cb(char *name, void *p1, void *p2)
+{
+ struct flb_ne *ctx = p1;
+
+ ne_filefd_update(ctx);
+}
+
+static void ne_textfile_update_cb(char *name, void *p1, void *p2)
+{
+ struct flb_ne *ctx = p1;
+
+ ne_textfile_update(ctx);
+}
+
+static void ne_systemd_update_cb(char *name, void *p1, void *p2)
+{
+ struct flb_ne *ctx = p1;
+
+ ne_systemd_update(ctx);
+}
+
+static int ne_update_cb(struct flb_ne *ctx, char *name)
+{
+ int ret;
+
+ ret = flb_callback_do(ctx->callback, name, ctx, NULL);
+ return ret;
+}
+
+/*
+ * Callbacks Table
+ */
+struct flb_ne_callback ne_callbacks[] = {
+ /* metrics */
+ { "cpufreq", ne_cpufreq_update_cb },
+ { "cpu", ne_cpu_update_cb },
+ { "meminfo", ne_meminfo_update_cb },
+ { "diskstats", ne_diskstats_update_cb },
+ { "filesystem", ne_filesystem_update_cb },
+ { "uname", ne_uname_update_cb },
+ { "stat", ne_stat_update_cb },
+ { "time", ne_time_update_cb },
+ { "loadavg", ne_loadavg_update_cb },
+ { "vmstat", ne_vmstat_update_cb },
+ { "netdev", ne_netdev_update_cb },
+ { "filefd", ne_filefd_update_cb },
+ { "textfile", ne_textfile_update_cb },
+ { "systemd", ne_systemd_update_cb },
+ { 0 }
+};
+
+static int in_ne_init(struct flb_input_instance *in,
+ struct flb_config *config, void *data)
+{
+ int ret;
+ int metric_idx = -1;
+ struct flb_ne *ctx;
+ struct mk_list *head;
+ struct flb_slist_entry *entry;
+ struct flb_ne_callback *cb;
+
+ /* Create plugin context */
+ ctx = flb_ne_config_create(in, config);
+ if (!ctx) {
+ flb_errno();
+ return -1;
+ }
+
+ /* Initialize fds */
+ ctx->coll_fd = -1;
+ ctx->coll_cpu_fd = -1;
+ ctx->coll_cpufreq_fd = -1;
+ ctx->coll_meminfo_fd = -1;
+ ctx->coll_diskstats_fd = -1;
+ ctx->coll_filesystem_fd = -1;
+ ctx->coll_uname_fd = -1;
+ ctx->coll_stat_fd = -1;
+ ctx->coll_time_fd = -1;
+ ctx->coll_loadavg_fd = -1;
+ ctx->coll_vmstat_fd = -1;
+ ctx->coll_netdev_fd = -1;
+ ctx->coll_filefd_fd = -1;
+ ctx->coll_textfile_fd = -1;
+ ctx->coll_systemd_fd = -1;
+
+ ctx->callback = flb_callback_create(in->name);
+ if (!ctx->callback) {
+ flb_plg_error(ctx->ins, "Create callback failed");
+ return -1;
+ }
+
+ /* Associate context with the instance */
+ flb_input_set_context(in, ctx);
+
+ /* Create the collector */
+ ret = flb_input_set_collector_time(in,
+ cb_ne_collect,
+ ctx->scrape_interval, 0,
+ config);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins,
+ "could not set collector for Node Exporter Metrics plugin");
+ return -1;
+ }
+ ctx->coll_fd = ret;
+
+ /* Check and initialize enabled metrics */
+ if (ctx->metrics) {
+ mk_list_foreach(head, ctx->metrics) {
+ entry = mk_list_entry(head, struct flb_slist_entry, _head);
+ ret = flb_callback_exists(ctx->callback, entry->str);
+
+ if (ret == FLB_FALSE) {
+ if (strncmp(entry->str, "cpufreq", 7) == 0) {
+ if (ctx->cpu_scrape_interval == 0) {
+ flb_plg_debug(ctx->ins, "enabled metrics %s", entry->str);
+ metric_idx = 0;
+ }
+ else if (ctx->cpufreq_scrape_interval > 0) {
+ /* Create the cpufreq collector */
+ ret = flb_input_set_collector_time(in,
+ ne_timer_cpufreq_metrics_cb,
+ ctx->cpufreq_scrape_interval, 0,
+ config);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins,
+ "could not set cpufreq collector for Node Exporter Metrics plugin");
+ return -1;
+ }
+ ctx->coll_cpufreq_fd = ret;
+ }
+ ne_cpufreq_init(ctx);
+ }
+ else if (strncmp(entry->str, "cpu", 3) == 0) {
+ if (ctx->cpufreq_scrape_interval == 0) {
+ flb_plg_debug(ctx->ins, "enabled metrics %s", entry->str);
+ metric_idx = 1;
+ }
+ else if (ctx->cpu_scrape_interval > 0) {
+ /* Create the cpu collector */
+ ret = flb_input_set_collector_time(in,
+ ne_timer_cpu_metrics_cb,
+ ctx->cpu_scrape_interval, 0,
+ config);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins,
+ "could not set cpu collector for Node Exporter Metrics plugin");
+ return -1;
+ }
+ ctx->coll_cpu_fd = ret;
+ }
+ ne_cpu_init(ctx);
+ }
+ else if (strncmp(entry->str, "meminfo", 7) == 0) {
+ if (ctx->meminfo_scrape_interval == 0) {
+ flb_plg_debug(ctx->ins, "enabled metrics %s", entry->str);
+ metric_idx = 2;
+ }
+ else if (ctx->meminfo_scrape_interval > 0) {
+ /* Create the meminfo collector */
+ ret = flb_input_set_collector_time(in,
+ ne_timer_meminfo_metrics_cb,
+ ctx->meminfo_scrape_interval, 0,
+ config);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins,
+ "could not set meminfo collector for Node Exporter Metrics plugin");
+ return -1;
+ }
+ ctx->coll_meminfo_fd = ret;
+ }
+ ne_meminfo_init(ctx);
+ }
+ else if (strncmp(entry->str, "diskstats", 9) == 0) {
+ if (ctx->diskstats_scrape_interval == 0) {
+ flb_plg_debug(ctx->ins, "enabled metrics %s", entry->str);
+ metric_idx = 3;
+ }
+ else if (ctx->diskstats_scrape_interval > 0) {
+ /* Create the diskstats collector */
+ ret = flb_input_set_collector_time(in,
+ ne_timer_diskstats_metrics_cb,
+ ctx->diskstats_scrape_interval, 0,
+ config);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins,
+ "could not set diskstats collector for Node Exporter Metrics plugin");
+ return -1;
+ }
+ ctx->coll_diskstats_fd = ret;
+ }
+ ne_diskstats_init(ctx);
+ }
+ else if (strncmp(entry->str, "filesystem", 10) == 0) {
+ if (ctx->diskstats_scrape_interval == 0) {
+ flb_plg_debug(ctx->ins, "enabled metrics %s", entry->str);
+ metric_idx = 4;
+ }
+ else if (ctx->filesystem_scrape_interval > 0) {
+ /* Create the diskstats collector */
+ ret = flb_input_set_collector_time(in,
+ ne_timer_filesystem_metrics_cb,
+ ctx->filesystem_scrape_interval, 0,
+ config);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins,
+ "could not set filesystem collector for Node Exporter Metrics plugin");
+ return -1;
+ }
+ ctx->coll_filesystem_fd = ret;
+ }
+ ne_filesystem_init(ctx);
+ }
+ else if (strncmp(entry->str, "uname", 5) == 0) {
+ if (ctx->uname_scrape_interval == 0) {
+ flb_plg_debug(ctx->ins, "enabled metrics %s", entry->str);
+ metric_idx = 5;
+ }
+ else if (ctx->uname_scrape_interval > 0) {
+ /* Create the uname collector */
+ ret = flb_input_set_collector_time(in,
+ ne_timer_uname_metrics_cb,
+ ctx->uname_scrape_interval, 0,
+ config);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins,
+ "could not set uname collector for Node Exporter Metrics plugin");
+ return -1;
+ }
+ ctx->coll_uname_fd = ret;
+ }
+ ne_uname_init(ctx);
+ }
+ else if (strncmp(entry->str, "stat", 4) == 0) {
+ if (ctx->stat_scrape_interval == 0) {
+ flb_plg_debug(ctx->ins, "enabled metrics %s", entry->str);
+ metric_idx = 6;
+ }
+ else if (ctx->stat_scrape_interval > 0) {
+ /* Create the meminfo collector */
+ ret = flb_input_set_collector_time(in,
+ ne_timer_stat_metrics_cb,
+ ctx->stat_scrape_interval, 0,
+ config);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins,
+ "could not set meminfo collector for Node Exporter Metrics plugin");
+ return -1;
+ }
+ ctx->coll_stat_fd = ret;
+ }
+ ne_stat_init(ctx);
+ }
+ else if (strncmp(entry->str, "time", 4) == 0) {
+ if (ctx->time_scrape_interval == 0) {
+ flb_plg_debug(ctx->ins, "enabled metrics %s", entry->str);
+ metric_idx = 7;
+ }
+ else if (ctx->time_scrape_interval > 0) {
+ /* Create the time collector */
+ ret = flb_input_set_collector_time(in,
+ ne_timer_time_metrics_cb,
+ ctx->time_scrape_interval, 0,
+ config);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins,
+ "could not set time collector for Node Exporter Metrics plugin");
+ return -1;
+ }
+ ctx->coll_time_fd = ret;
+ }
+ ne_time_init(ctx);
+ }
+ else if (strncmp(entry->str, "loadavg", 7) == 0) {
+ if (ctx->loadavg_scrape_interval == 0) {
+ flb_plg_debug(ctx->ins, "enabled metrics %s", entry->str);
+ metric_idx = 8;
+ }
+ else if (ctx->loadavg_scrape_interval > 0) {
+ /* Create the loadavg collector */
+ ret = flb_input_set_collector_time(in,
+ ne_timer_loadavg_metrics_cb,
+ ctx->loadavg_scrape_interval, 0,
+ config);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins,
+ "could not set loadavg collector for Node Exporter Metrics plugin");
+ return -1;
+ }
+ ctx->coll_loadavg_fd = ret;
+ }
+ ne_loadavg_init(ctx);
+ }
+ else if (strncmp(entry->str, "vmstat", 6) == 0) {
+ if (ctx->vmstat_scrape_interval == 0) {
+ flb_plg_debug(ctx->ins, "enabled metrics %s", entry->str);
+ metric_idx = 9;
+ }
+ else if (ctx->vmstat_scrape_interval > 0) {
+ /* Create the vmstat collector */
+ ret = flb_input_set_collector_time(in,
+ ne_timer_vmstat_metrics_cb,
+ ctx->vmstat_scrape_interval, 0,
+ config);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins,
+ "could not set vmstat collector for Node Exporter Metrics plugin");
+ return -1;
+ }
+ ctx->coll_vmstat_fd = ret;
+ }
+ ne_vmstat_init(ctx);
+ }
+ else if (strncmp(entry->str, "netdev", 6) == 0) {
+ if (ctx->netdev_scrape_interval == 0) {
+ flb_plg_debug(ctx->ins, "enabled metrics %s", entry->str);
+ metric_idx = 10;
+ }
+ else if (ctx->netdev_scrape_interval > 0) {
+ /* Create the netdev collector */
+ ret = flb_input_set_collector_time(in,
+ ne_timer_netdev_metrics_cb,
+ ctx->netdev_scrape_interval, 0,
+ config);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins,
+ "could not set netdev collector for Node Exporter Metrics plugin");
+ return -1;
+ }
+ ctx->coll_netdev_fd = ret;
+ }
+ ne_netdev_init(ctx);
+ }
+ else if (strncmp(entry->str, "filefd", 6) == 0) {
+ if (ctx->filefd_scrape_interval == 0) {
+ flb_plg_debug(ctx->ins, "enabled metrics %s", entry->str);
+ metric_idx = 11;
+ }
+ else if (ctx->filefd_scrape_interval > 0) {
+ /* Create the filefd collector */
+ ret = flb_input_set_collector_time(in,
+ ne_timer_filefd_metrics_cb,
+ ctx->filefd_scrape_interval, 0,
+ config);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins,
+ "could not set filefd collector for Node Exporter Metrics plugin");
+ return -1;
+ }
+ ctx->coll_filefd_fd = ret;
+ }
+ ne_filefd_init(ctx);
+ }
+ else if (strncmp(entry->str, "textfile", 8) == 0) {
+ if (ctx->textfile_scrape_interval == 0) {
+ flb_plg_debug(ctx->ins, "enabled metrics %s", entry->str);
+ metric_idx = 12;
+ }
+ else if (ctx->textfile_scrape_interval > 0) {
+ /* Create the filefd collector */
+ ret = flb_input_set_collector_time(in,
+ ne_timer_textfile_metrics_cb,
+ ctx->textfile_scrape_interval, 0,
+ config);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins,
+ "could not set textfile collector for Node Exporter Metrics plugin");
+ return -1;
+ }
+ ctx->coll_textfile_fd = ret;
+ }
+ ne_textfile_init(ctx);
+ }
+ else if (strncmp(entry->str, "systemd", 7) == 0) {
+ if (ctx->systemd_scrape_interval == 0) {
+ flb_plg_debug(ctx->ins, "enabled metrics %s", entry->str);
+ metric_idx = 13;
+ }
+ else if (ctx->systemd_scrape_interval > 0) {
+ /* Create the filefd collector */
+ ret = flb_input_set_collector_time(in,
+ ne_timer_systemd_metrics_cb,
+ ctx->systemd_scrape_interval, 0,
+ config);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins,
+ "could not set systemd collector for Node Exporter Metrics plugin");
+ return -1;
+ }
+ ctx->coll_systemd_fd = ret;
+ }
+ ne_systemd_init(ctx);
+ }
+ else {
+ flb_plg_warn(ctx->ins, "Unknown metrics: %s", entry->str);
+ metric_idx = -1;
+ }
+
+ if (metric_idx >= 0) {
+ cb = &ne_callbacks[metric_idx];
+ ret = flb_callback_set(ctx->callback, cb->name, cb->func);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "error setting up default "
+ "callback '%s'", cb->name);
+ }
+ }
+ }
+ }
+ }
+ else {
+ flb_plg_error(ctx->ins, "No metrics is specified");
+
+ return -1;
+ }
+
+ return 0;
+}
+
+static int in_ne_exit(void *data, struct flb_config *config)
+{
+ int ret;
+ struct flb_ne *ctx = data;
+ struct mk_list *head;
+ struct flb_slist_entry *entry;
+
+ if (!ctx) {
+ return 0;
+ }
+
+ /* Teardown for callback tied up resources */
+ if (ctx->metrics) {
+ mk_list_foreach(head, ctx->metrics) {
+ entry = mk_list_entry(head, struct flb_slist_entry, _head);
+ ret = flb_callback_exists(ctx->callback, entry->str);
+
+ if (ret == FLB_TRUE) {
+ if (strncmp(entry->str, "cpufreq", 7) == 0) {
+ /* nop */
+ }
+ else if (strncmp(entry->str, "cpu", 3) == 0) {
+ /* nop */
+ }
+ else if (strncmp(entry->str, "meminfo", 7) == 0) {
+ ne_meminfo_exit(ctx);
+ }
+ else if (strncmp(entry->str, "diskstats", 9) == 0) {
+ ne_diskstats_exit(ctx);
+ }
+ else if (strncmp(entry->str, "filesystem", 10) == 0) {
+ ne_filesystem_exit(ctx);
+ }
+ else if (strncmp(entry->str, "uname", 5) == 0) {
+ /* nop */
+ }
+ else if (strncmp(entry->str, "stat", 4) == 0) {
+ /* nop */
+ }
+ else if (strncmp(entry->str, "time", 4) == 0) {
+ /* nop */
+ }
+ else if (strncmp(entry->str, "loadavg", 7) == 0) {
+ /* nop */
+ }
+ else if (strncmp(entry->str, "vmstat", 6) == 0) {
+ ne_vmstat_exit(ctx);
+ }
+ else if (strncmp(entry->str, "netdev", 6) == 0) {
+ ne_netdev_exit(ctx);
+ }
+ else if (strncmp(entry->str, "filefd", 6) == 0) {
+ /* nop */
+ }
+ else if (strncmp(entry->str, "textfile", 8) == 0) {
+ /* nop */
+ }
+ else if (strncmp(entry->str, "systemd", 7) == 0) {
+ ne_systemd_exit(ctx);
+ }
+ else {
+ flb_plg_warn(ctx->ins, "Unknown metrics: %s", entry->str);
+ }
+ }
+ }
+ }
+
+ /* destroy callback context */
+ if (ctx->callback) {
+ flb_callback_destroy(ctx->callback);
+ }
+
+ /* Teardown for timer tied up resources */
+ if (ctx->coll_meminfo_fd != -1) {
+ ne_meminfo_exit(ctx);
+ }
+ if (ctx->coll_diskstats_fd != -1) {
+ ne_diskstats_exit(ctx);
+ }
+ if (ctx->coll_filesystem_fd != -1) {
+ ne_filesystem_exit(ctx);
+ }
+ if (ctx->coll_vmstat_fd != -1) {
+ ne_vmstat_exit(ctx);
+ }
+ if (ctx->coll_netdev_fd != -1) {
+ ne_netdev_exit(ctx);
+ }
+ if (ctx->coll_systemd_fd != -1) {
+ ne_systemd_exit(ctx);
+ }
+
+ flb_ne_config_destroy(ctx);
+
+ return 0;
+}
+
+static void in_ne_pause(void *data, struct flb_config *config)
+{
+ struct flb_ne *ctx = data;
+
+ flb_input_collector_pause(ctx->coll_fd, ctx->ins);
+ if (ctx->coll_cpu_fd != -1) {
+ flb_input_collector_pause(ctx->coll_cpu_fd, ctx->ins);
+ }
+ if (ctx->coll_cpufreq_fd != -1) {
+ flb_input_collector_pause(ctx->coll_cpufreq_fd, ctx->ins);
+ }
+ if (ctx->coll_meminfo_fd != -1) {
+ flb_input_collector_pause(ctx->coll_meminfo_fd, ctx->ins);
+ }
+ if (ctx->coll_diskstats_fd != -1) {
+ flb_input_collector_pause(ctx->coll_diskstats_fd, ctx->ins);
+ }
+ if (ctx->coll_filesystem_fd != -1) {
+ flb_input_collector_pause(ctx->coll_filesystem_fd, ctx->ins);
+ }
+ if (ctx->coll_uname_fd != -1) {
+ flb_input_collector_pause(ctx->coll_uname_fd, ctx->ins);
+ }
+ if (ctx->coll_stat_fd != -1) {
+ flb_input_collector_pause(ctx->coll_stat_fd, ctx->ins);
+ }
+ if (ctx->coll_time_fd != -1) {
+ flb_input_collector_pause(ctx->coll_time_fd, ctx->ins);
+ }
+ if (ctx->coll_loadavg_fd != -1) {
+ flb_input_collector_pause(ctx->coll_loadavg_fd, ctx->ins);
+ }
+ if (ctx->coll_vmstat_fd != -1) {
+ flb_input_collector_pause(ctx->coll_vmstat_fd, ctx->ins);
+ }
+ if (ctx->coll_netdev_fd != -1) {
+ flb_input_collector_pause(ctx->coll_netdev_fd, ctx->ins);
+ }
+ if (ctx->coll_filefd_fd != -1) {
+ flb_input_collector_pause(ctx->coll_filefd_fd, ctx->ins);
+ }
+ if (ctx->coll_textfile_fd != -1) {
+ flb_input_collector_pause(ctx->coll_textfile_fd, ctx->ins);
+ }
+ if (ctx->coll_systemd_fd != -1) {
+ flb_input_collector_pause(ctx->coll_systemd_fd, ctx->ins);
+ }
+}
+
+static void in_ne_resume(void *data, struct flb_config *config)
+{
+ struct flb_ne *ctx = data;
+
+ flb_input_collector_resume(ctx->coll_fd, ctx->ins);
+ if (ctx->coll_cpu_fd != -1) {
+ flb_input_collector_resume(ctx->coll_cpu_fd, ctx->ins);
+ }
+ if (ctx->coll_cpufreq_fd != -1) {
+ flb_input_collector_resume(ctx->coll_cpufreq_fd, ctx->ins);
+ }
+ if (ctx->coll_meminfo_fd != -1) {
+ flb_input_collector_resume(ctx->coll_meminfo_fd, ctx->ins);
+ }
+ if (ctx->coll_diskstats_fd != -1) {
+ flb_input_collector_resume(ctx->coll_diskstats_fd, ctx->ins);
+ }
+ if (ctx->coll_filesystem_fd != -1) {
+ flb_input_collector_resume(ctx->coll_filesystem_fd, ctx->ins);
+ }
+ if (ctx->coll_uname_fd != -1) {
+ flb_input_collector_resume(ctx->coll_uname_fd, ctx->ins);
+ }
+ if (ctx->coll_stat_fd != -1) {
+ flb_input_collector_resume(ctx->coll_stat_fd, ctx->ins);
+ }
+ if (ctx->coll_time_fd != -1) {
+ flb_input_collector_resume(ctx->coll_time_fd, ctx->ins);
+ }
+ if (ctx->coll_loadavg_fd != -1) {
+ flb_input_collector_resume(ctx->coll_loadavg_fd, ctx->ins);
+ }
+ if (ctx->coll_vmstat_fd != -1) {
+ flb_input_collector_resume(ctx->coll_vmstat_fd, ctx->ins);
+ }
+ if (ctx->coll_netdev_fd != -1) {
+ flb_input_collector_resume(ctx->coll_netdev_fd, ctx->ins);
+ }
+ if (ctx->coll_filefd_fd != -1) {
+ flb_input_collector_resume(ctx->coll_filefd_fd, ctx->ins);
+ }
+ if (ctx->coll_textfile_fd != -1) {
+ flb_input_collector_resume(ctx->coll_textfile_fd, ctx->ins);
+ }
+ if (ctx->coll_systemd_fd != -1) {
+ flb_input_collector_resume(ctx->coll_systemd_fd, ctx->ins);
+ }
+}
+
+/* Configuration properties map */
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_TIME, "scrape_interval", "5",
+ 0, FLB_TRUE, offsetof(struct flb_ne, scrape_interval),
+ "scrape interval to collect metrics from the node."
+ },
+
+ {
+ FLB_CONFIG_MAP_TIME, "collector.cpu.scrape_interval", "0",
+ 0, FLB_TRUE, offsetof(struct flb_ne, cpu_scrape_interval),
+ "scrape interval to collect cpu metrics from the node."
+ },
+
+ {
+ FLB_CONFIG_MAP_TIME, "collector.cpufreq.scrape_interval", "0",
+ 0, FLB_TRUE, offsetof(struct flb_ne, cpufreq_scrape_interval),
+ "scrape interval to collect cpufreq metrics from the node."
+ },
+
+ {
+ FLB_CONFIG_MAP_TIME, "collector.meminfo.scrape_interval", "0",
+ 0, FLB_TRUE, offsetof(struct flb_ne, meminfo_scrape_interval),
+ "scrape interval to collect meminfo metrics from the node."
+ },
+
+ {
+ FLB_CONFIG_MAP_TIME, "collector.diskstats.scrape_interval", "0",
+ 0, FLB_TRUE, offsetof(struct flb_ne, diskstats_scrape_interval),
+ "scrape interval to collect diskstats metrics from the node."
+ },
+
+ {
+ FLB_CONFIG_MAP_TIME, "collector.filesystem.scrape_interval", "0",
+ 0, FLB_TRUE, offsetof(struct flb_ne, filesystem_scrape_interval),
+ "scrape interval to collect filesystem metrics from the node."
+ },
+
+ {
+ FLB_CONFIG_MAP_TIME, "collector.uname.scrape_interval", "0",
+ 0, FLB_TRUE, offsetof(struct flb_ne, uname_scrape_interval),
+ "scrape interval to collect uname metrics from the node."
+ },
+
+ {
+ FLB_CONFIG_MAP_TIME, "collector.stat.scrape_interval", "0",
+ 0, FLB_TRUE, offsetof(struct flb_ne, stat_scrape_interval),
+ "scrape interval to collect stat metrics from the node."
+ },
+
+ {
+ FLB_CONFIG_MAP_TIME, "collector.time.scrape_interval", "0",
+ 0, FLB_TRUE, offsetof(struct flb_ne, time_scrape_interval),
+ "scrape interval to collect time metrics from the node."
+ },
+
+ {
+ FLB_CONFIG_MAP_TIME, "collector.loadavg.scrape_interval", "0",
+ 0, FLB_TRUE, offsetof(struct flb_ne, loadavg_scrape_interval),
+ "scrape interval to collect loadavg metrics from the node."
+ },
+
+ {
+ FLB_CONFIG_MAP_TIME, "collector.vmstat.scrape_interval", "0",
+ 0, FLB_TRUE, offsetof(struct flb_ne, vmstat_scrape_interval),
+ "scrape interval to collect vmstat metrics from the node."
+ },
+
+ {
+ FLB_CONFIG_MAP_TIME, "collector.netdev.scrape_interval", "0",
+ 0, FLB_TRUE, offsetof(struct flb_ne, netdev_scrape_interval),
+ "scrape interval to collect netdev metrics from the node."
+ },
+
+ {
+ FLB_CONFIG_MAP_TIME, "collector.filefd.scrape_interval", "0",
+ 0, FLB_TRUE, offsetof(struct flb_ne, filefd_scrape_interval),
+ "scrape interval to collect filefd metrics from the node."
+ },
+
+ {
+ FLB_CONFIG_MAP_TIME, "collector.textfile.scrape_interval", "0",
+ 0, FLB_TRUE, offsetof(struct flb_ne, textfile_scrape_interval),
+ "scrape interval to collect textfile metrics from the node."
+ },
+
+ {
+ FLB_CONFIG_MAP_TIME, "collector.systemd.scrape_interval", "0",
+ 0, FLB_TRUE, offsetof(struct flb_ne, systemd_scrape_interval),
+ "scrape interval to collect systemd metrics from the node."
+ },
+
+ {
+ FLB_CONFIG_MAP_CLIST, "metrics",
+ "cpu,cpufreq,meminfo,diskstats,filesystem,uname,stat,time,loadavg,vmstat,netdev,filefd,systemd",
+ 0, FLB_TRUE, offsetof(struct flb_ne, metrics),
+ "Comma separated list of keys to enable metrics."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "collector.textfile.path", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_ne, path_textfile),
+ "Specify file path or directory to collect textfile metrics from the node."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "path.procfs", "/proc",
+ 0, FLB_TRUE, offsetof(struct flb_ne, path_procfs),
+ "procfs mount point"
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "path.sysfs", "/sys",
+ 0, FLB_TRUE, offsetof(struct flb_ne, path_sysfs),
+ "sysfs mount point"
+ },
+
+ /* Systemd specific settings */
+ {
+ FLB_CONFIG_MAP_BOOL, "systemd_service_restart_metrics", "false",
+ 0, FLB_TRUE, offsetof(struct flb_ne, systemd_include_service_restarts),
+ "include systemd service restart metrics"
+ },
+
+ {
+ FLB_CONFIG_MAP_BOOL, "systemd_unit_start_time_metrics", "false",
+ 0, FLB_TRUE, offsetof(struct flb_ne, systemd_include_unit_start_times),
+ "include systemd unit start time metrics"
+ },
+
+ {
+ FLB_CONFIG_MAP_BOOL, "systemd_include_service_task_metrics", "false",
+ 0, FLB_TRUE, offsetof(struct flb_ne, systemd_include_service_task_metrics),
+ "include systemd service task metrics"
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "systemd_include_pattern", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_ne, systemd_regex_include_list_text),
+ "include list regular expression"
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "systemd_exclude_pattern", ".+\\.(automount|device|mount|scope|slice)",
+ 0, FLB_TRUE, offsetof(struct flb_ne, systemd_regex_exclude_list_text),
+ "exclude list regular expression"
+ },
+
+ /* filesystem specific settings */
+ {
+ FLB_CONFIG_MAP_STR, "filesystem.ignore_mount_point_regex", IGNORED_MOUNT_POINTS,
+ 0, FLB_TRUE, offsetof(struct flb_ne, fs_regex_ingore_mount_point_text),
+ "ignore regular expression for mount points"
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "filesystem.ignore_filesystem_type_regex", IGNORED_FS_TYPES,
+ 0, FLB_TRUE, offsetof(struct flb_ne, fs_regex_ingore_filesystem_type_text),
+ "ignore regular expression for filesystem types"
+ },
+
+ /* diskstats specific settings */
+ {
+ FLB_CONFIG_MAP_STR, "diskstats.ignore_device_regex", IGNORED_DEVICES,
+ 0, FLB_TRUE, offsetof(struct flb_ne, dt_regex_skip_devices_text),
+ "ignore regular expression for disk devices"
+ },
+ /* EOF */
+ {0}
+};
+
+struct flb_input_plugin in_node_exporter_metrics_plugin = {
+ .name = "node_exporter_metrics",
+ .description = "Node Exporter Metrics (Prometheus Compatible)",
+ .cb_init = in_ne_init,
+ .cb_pre_run = NULL,
+ .cb_collect = cb_ne_collect,
+ .cb_flush_buf = NULL,
+ .config_map = config_map,
+ .cb_pause = in_ne_pause,
+ .cb_resume = in_ne_resume,
+ .cb_exit = in_ne_exit,
+ .flags = FLB_INPUT_THREADED
+};
diff --git a/src/fluent-bit/plugins/in_node_exporter_metrics/ne.h b/src/fluent-bit/plugins/in_node_exporter_metrics/ne.h
new file mode 100644
index 000000000..ba6e89caa
--- /dev/null
+++ b/src/fluent-bit/plugins/in_node_exporter_metrics/ne.h
@@ -0,0 +1,191 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_NODE_EXPORTER_H
+#define FLB_NODE_EXPORTER_H
+
+/* utils: scan content type expected */
+#define NE_SCAN_FILE 1
+#define NE_SCAN_DIR 2
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_regex.h>
+#include <fluent-bit/flb_hash_table.h>
+#include <fluent-bit/flb_metrics.h>
+
+/* filesystem: regex for ignoring mount points and filesystem types */
+
+#define IGNORED_MOUNT_POINTS "^/(dev|proc|run/credentials/.+|sys|var/lib/docker/.+|var/lib/containers/storage/.+)($|/)"
+#define IGNORED_FS_TYPES "^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$"
+
+/* diskstats: regex for ignoring devices */
+#define IGNORED_DEVICES "^(ram|loop|fd|(h|s|v|xv)d[a-z]|nvme\\d+n\\d+p)\\d+$"
+
+struct flb_ne {
+ /* configuration */
+ flb_sds_t path_procfs;
+ flb_sds_t path_sysfs;
+ flb_sds_t path_textfile;
+ int scrape_interval;
+
+ int coll_fd; /* collector fd */
+ struct cmt *cmt; /* cmetrics context */
+ struct flb_input_instance *ins; /* input instance */
+ struct flb_callback *callback; /* metric callback */
+ struct mk_list *metrics; /* enabled metrics */
+
+ /* Individual intervals for metrics */
+ int cpu_scrape_interval;
+ int cpufreq_scrape_interval;
+ int meminfo_scrape_interval;
+ int diskstats_scrape_interval;
+ int filesystem_scrape_interval;
+ int uname_scrape_interval;
+ int stat_scrape_interval;
+ int time_scrape_interval;
+ int loadavg_scrape_interval;
+ int vmstat_scrape_interval;
+ int netdev_scrape_interval;
+ int filefd_scrape_interval;
+ int textfile_scrape_interval;
+ int systemd_scrape_interval;
+
+ int coll_cpu_fd; /* collector fd (cpu) */
+ int coll_cpufreq_fd; /* collector fd (cpufreq) */
+ int coll_meminfo_fd; /* collector fd (meminfo) */
+ int coll_diskstats_fd; /* collector fd (diskstat) */
+ int coll_filesystem_fd; /* collector fd (filesystem) */
+ int coll_uname_fd; /* collector fd (uname) */
+ int coll_stat_fd; /* collector fd (stat) */
+ int coll_time_fd; /* collector fd (time) */
+ int coll_loadavg_fd; /* collector fd (loadavg) */
+ int coll_vmstat_fd; /* collector fd (vmstat) */
+ int coll_netdev_fd; /* collector fd (netdev) */
+ int coll_filefd_fd; /* collector fd (filefd) */
+ int coll_textfile_fd; /* collector fd (textfile) */
+ int coll_systemd_fd ; /* collector fd (systemd) */
+
+ /*
+ * Metrics Contexts
+ * ----------------
+ */
+
+ /* cpu_linux */
+ struct cmt_counter *cpu_core_throttles;
+ struct cmt_counter *cpu_package_throttles;
+
+ /* cpufreq_linux */
+ struct cmt_gauge *cpu_freq_hertz;
+ struct cmt_gauge *cpu_freq_min_hertz;
+ struct cmt_gauge *cpu_freq_max_hertz;
+
+ /* cpufreq scaling linux */
+ struct cmt_gauge *cpu_scaling_freq_hertz;
+ struct cmt_gauge *cpu_scaling_freq_max_hertz;
+ struct cmt_gauge *cpu_scaling_freq_min_hertz;
+
+ /* cpu seconds & guest seconds */
+ struct cmt_counter *cpu_seconds;
+ struct cmt_counter *cpu_guest_seconds;
+
+ /* meminfo hash table */
+ struct flb_hash_table *meminfo_ht;
+
+ /* diskstats: abbreviation 'dt' */
+ void *dt_metrics;
+ struct flb_regex *dt_regex_skip_devices;
+ flb_sds_t dt_regex_skip_devices_text;
+
+ /* uname */
+ struct cmt_gauge *uname;
+
+ /* stat_linux */
+ struct cmt_counter *st_intr;
+ struct cmt_counter *st_context_switches;
+ struct cmt_gauge *st_boot_time;
+ struct cmt_counter *st_forks;
+ struct cmt_gauge *st_procs_running;
+ struct cmt_gauge *st_procs_blocked;
+
+ /* vmstat_linux */
+ struct flb_hash_table *vml_ht;
+ struct flb_regex *vml_regex_fields;
+
+ /* netdev */
+ struct flb_hash_table *netdev_ht;
+
+ /* time */
+ struct cmt_gauge *time;
+
+ /* loadavg */
+ struct cmt_gauge *lavg_1;
+ struct cmt_gauge *lavg_5;
+ struct cmt_gauge *lavg_15;
+
+ /* filefd_linux */
+ struct cmt_gauge *filefd_allocated;
+ struct cmt_gauge *filefd_maximum;
+
+ /* filesystem: abbreviation 'fs' */
+ struct cmt_gauge *fs_avail_bytes;
+ struct cmt_gauge *fs_device_error;
+ struct cmt_gauge *fs_files;
+ struct cmt_gauge *fs_files_free;
+ struct cmt_gauge *fs_free_bytes;
+ struct cmt_gauge *fs_readonly;
+ struct cmt_gauge *fs_size_bytes;
+ flb_sds_t fs_regex_ingore_mount_point_text;
+ flb_sds_t fs_regex_ingore_filesystem_type_text;
+
+ struct flb_regex *fs_regex_read_only;
+ struct flb_regex *fs_regex_skip_mount;
+ struct flb_regex *fs_regex_skip_fs_types;
+
+ /* testfile */
+ struct cmt_counter *load_errors;
+
+ /* systemd */
+
+ struct cmt_gauge *systemd_socket_accepted_connections;
+ struct cmt_gauge *systemd_socket_active_connections;
+ struct cmt_gauge *systemd_socket_refused_connections;
+ struct cmt_counter *systemd_service_restarts;
+ struct cmt_gauge *systemd_unit_start_times;
+ struct cmt_gauge *systemd_system_running;
+ struct cmt_gauge *systemd_timer_last_trigger_seconds;
+ struct cmt_gauge *systemd_unit_state;
+ struct cmt_gauge *systemd_unit_tasks;
+ struct cmt_gauge *systemd_unit_tasks_max;
+ struct cmt_gauge *systemd_units;
+ struct cmt_gauge *systemd_version;
+ void *systemd_dbus_handle;
+ int systemd_initialization_flag;
+ int systemd_include_unit_start_times;
+ int systemd_include_service_restarts;
+ int systemd_include_service_task_metrics;
+ flb_sds_t systemd_regex_include_list_text;
+ flb_sds_t systemd_regex_exclude_list_text;
+ struct flb_regex *systemd_regex_include_list;
+ struct flb_regex *systemd_regex_exclude_list;
+ double libsystemd_version;
+ char *libsystemd_version_text;
+};
+
+#endif
diff --git a/src/fluent-bit/plugins/in_node_exporter_metrics/ne_config.c b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_config.c
new file mode 100644
index 000000000..ccf99d8ee
--- /dev/null
+++ b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_config.c
@@ -0,0 +1,69 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_input_plugin.h>
+#include "ne.h"
+
+struct flb_ne *flb_ne_config_create(struct flb_input_instance *ins,
+ struct flb_config *config)
+{
+ int ret;
+ struct flb_ne *ctx;
+
+ ctx = flb_calloc(1, sizeof(struct flb_ne));
+ if (!ctx) {
+ flb_errno();
+ return NULL;
+ }
+ ctx->ins = ins;
+
+ /* Load the config map */
+ ret = flb_input_config_map_set(ins, (void *) ctx);
+ if (ret == -1) {
+ flb_free(ctx);
+ return NULL;
+ }
+
+ /* mount points */
+ flb_plg_info(ins, "path.procfs = %s", ctx->path_procfs);
+ flb_plg_info(ins, "path.sysfs = %s", ctx->path_sysfs);
+
+ ctx->cmt = cmt_create();
+ if (!ctx->cmt) {
+ flb_plg_error(ins, "could not initialize CMetrics");
+ flb_free(ctx);
+ return NULL;
+ }
+
+
+ return ctx;
+}
+
+void flb_ne_config_destroy(struct flb_ne *ctx)
+{
+ if (!ctx) {
+ return;
+ }
+
+ if (ctx->cmt) {
+ cmt_destroy(ctx->cmt);
+ }
+
+ flb_free(ctx);
+}
diff --git a/src/fluent-bit/plugins/in_node_exporter_metrics/ne_config.h b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_config.h
new file mode 100644
index 000000000..2bc1960e0
--- /dev/null
+++ b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_config.h
@@ -0,0 +1,31 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_NE_CONFIG_H
+#define FLB_NE_CONFIG_H
+
+#include <fluent-bit/flb_input_plugin.h>
+#include "ne.h"
+
+struct flb_ne *flb_ne_config_create(struct flb_input_instance *ins,
+ struct flb_config *config);
+
+void flb_ne_config_destroy(struct flb_ne *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/in_node_exporter_metrics/ne_cpu.c b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_cpu.c
new file mode 100644
index 000000000..70fd33ca1
--- /dev/null
+++ b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_cpu.c
@@ -0,0 +1,23 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifdef __linux__
+#include "ne_cpu_linux.c"
+#include "ne_cpufreq_linux.c"
+#endif
diff --git a/src/fluent-bit/plugins/in_node_exporter_metrics/ne_cpu.h b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_cpu.h
new file mode 100644
index 000000000..52c6b574f
--- /dev/null
+++ b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_cpu.h
@@ -0,0 +1,28 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_NE_CPU_H
+#define FLB_IN_NE_CPU_H
+
+#include "ne.h"
+
+int ne_cpu_init(struct flb_ne *ctx);
+int ne_cpu_update(struct flb_ne *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/in_node_exporter_metrics/ne_cpu_linux.c b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_cpu_linux.c
new file mode 100644
index 000000000..8963f0c55
--- /dev/null
+++ b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_cpu_linux.c
@@ -0,0 +1,396 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_input_plugin.h>
+
+#include "ne.h"
+#include "ne_utils.h"
+
+#include <unistd.h>
+
+/*
+ * See kernel documentation for a description:
+ * https://www.kernel.org/doc/html/latest/filesystems/proc.html
+ *
+ * user: normal processes executing in user mode
+ * nice: niced processes executing in user mode
+ * system: processes executing in kernel mode
+ * idle: twiddling thumbs
+ * iowait: In a word, iowait stands for waiting for I/O to complete. But there are several problems:
+ * irq: servicing interrupts
+ * softirq: servicing softirqs
+ * steal: involuntary wait
+ * guest: running a normal guest
+ * guest_nice: running a niced guest
+ *
+ * Ensure to pick the correct version of the documentation, older versions here:
+ * https://github.com/torvalds/linux/tree/master/Documentation
+ */
+struct cpu_stat_info {
+ double user;
+ double nice;
+ double system;
+ double idle;
+ double iowait;
+ double irq;
+ double softirq;
+ double steal;
+ double guest;
+ double guest_nice;
+};
+
+/*
+ * Thermal throttle stats, reads /sys/devices/system/cpu/cpu*
+ * ----------------------------------------------------------
+ */
+static inline int cpu_thermal_init(struct flb_ne *ctx)
+{
+ struct cmt_counter *c;
+
+ c = cmt_counter_create(ctx->cmt, "node", "cpu", "core_throttles_total",
+ "Number of times this CPU core has been throttled.",
+ 2, (char *[]) {"core", "package"});
+ if (!c) {
+ return -1;
+ }
+ ctx->cpu_core_throttles = c;
+
+
+ c = cmt_counter_create(ctx->cmt, "node", "cpu", "package_throttles_total",
+ "Number of times this CPU package has been throttled.",
+ 1, (char *[]) {"package"});
+ if (!c) {
+ return -1;
+ }
+ ctx->cpu_package_throttles = c;
+
+ return 0;
+}
+
+static int cpu_thermal_update(struct flb_ne *ctx, uint64_t ts)
+{
+ int ret;
+ uint64_t core_id = 0;
+ uint64_t physical_package_id = 0;
+ uint64_t core_throttle_count;
+ uint64_t package_throttle_count;
+ char tmp1[32];
+ char tmp2[32];
+ struct mk_list *head;
+ struct mk_list list;
+ struct flb_slist_entry *entry;
+ const char *pattern = "/devices/system/cpu/cpu[0-9]*";
+ /* Status arrays */
+ uint64_t core_throttles_set[32][256];
+ uint64_t package_throttles_set[32];
+
+ ret = ne_utils_path_scan(ctx, ctx->path_sysfs, pattern, NE_SCAN_DIR, &list);
+ if (ret != 0) {
+ return -1;
+ }
+
+ if (mk_list_size(&list) == 0) {
+ return 0;
+ }
+
+ /* Reset arrays status */
+ memset(&core_throttles_set, 0, sizeof(core_throttles_set));
+ memset(&package_throttles_set, 0, sizeof(package_throttles_set));
+
+ /* Process entries */
+ mk_list_foreach(head, &list) {
+ entry = mk_list_entry(head, struct flb_slist_entry, _head);
+
+ /* Core ID */
+ ret = ne_utils_file_read_uint64(ctx->path_sysfs,
+ entry->str,
+ "topology", "core_id",
+ &core_id);
+ if (ret != 0) {
+ continue;
+ }
+
+ /* Physical ID */
+ ret = ne_utils_file_read_uint64(ctx->path_sysfs,
+ entry->str,
+ "topology", "physical_package_id",
+ &physical_package_id);
+ if (ret != 0) {
+ continue;
+ }
+
+ /* Only update this kv pair once */
+ if (core_throttles_set[physical_package_id][core_id] != 0) {
+ continue;
+ }
+ core_throttles_set[physical_package_id][core_id] = 1;
+
+ /* Package Metric: node_cpu_core_throttles_total */
+ ret = ne_utils_file_read_uint64(ctx->path_sysfs,
+ entry->str,
+ "thermal_throttle", "core_throttle_count",
+ &core_throttle_count);
+ if (ret != 0) {
+ flb_plg_debug(ctx->ins,
+ "CPU is missing core_throttle_count: %s",
+ entry->str);
+ }
+ else {
+ snprintf(tmp1, sizeof(tmp1) -1, "%" PRIu64, core_id);
+ snprintf(tmp2, sizeof(tmp2) -1, "%" PRIu64, physical_package_id);
+
+ /* Set new value */
+ cmt_counter_set(ctx->cpu_core_throttles, ts,
+ (double) core_throttle_count,
+ 2, (char *[]) {tmp1, tmp2});
+ }
+
+ /* Only update this entry once */
+ if (package_throttles_set[physical_package_id] != 0) {
+ continue;
+ }
+ package_throttles_set[physical_package_id] = 1;
+
+ /* Package Metric: node_cpu_package_throttles_total */
+ ret = ne_utils_file_read_uint64(ctx->path_sysfs,
+ entry->str,
+ "thermal_throttle", "package_throttle_count",
+ &package_throttle_count);
+ if (ret != 0) {
+ flb_plg_debug(ctx->ins,
+ "CPU is missing package_throttle_count: %s",
+ entry->str);
+ }
+ else {
+ /* Set new value */
+ cmt_counter_set(ctx->cpu_package_throttles, ts,
+ (double) package_throttle_count,
+ 1, (char *[]) {tmp2});
+ }
+ }
+ flb_slist_destroy(&list);
+
+ /*
+ * FIXME: continue fixing this:
+ *
+ * https://github.com/prometheus/node_exporter/blob/master/collector/cpu_linux.go#L194
+ */
+
+ return 0;
+}
+
+/*
+ * CPU stats, reads /proc/stat
+ * ---------------------------
+ */
+static inline int cpu_stat_init(struct flb_ne *ctx)
+{
+ struct cmt_counter *c;
+
+ c = cmt_counter_create(ctx->cmt, "node", "cpu", "seconds_total",
+ "Seconds the CPUs spent in each mode.",
+ 2, (char *[]) {"cpu", "mode"});
+ if (!c) {
+ return -1;
+ }
+ ctx->cpu_seconds = c;
+
+ c = cmt_counter_create(ctx->cmt, "node", "cpu", "guest_seconds_total",
+ "Seconds the CPUs spent in guests (VMs) for each mode.",
+ 2, (char *[]) {"cpu", "mode"});
+ if (!c) {
+ return -1;
+ }
+ ctx->cpu_guest_seconds = c;
+
+ return 0;
+}
+
+static int stat_line(char *line, struct cpu_stat_info *st)
+{
+ int ret;
+ double user_hz = sysconf(_SC_CLK_TCK);
+ const char *cpu_fmt = "%lf %lf %lf %lf %lf %lf %lf %lf %lf %lf";
+
+ ret = sscanf(line, cpu_fmt,
+ &st->user,
+ &st->nice,
+ &st->system,
+ &st->idle,
+ &st->iowait,
+ &st->irq,
+ &st->softirq,
+ &st->steal,
+ &st->guest,
+ &st->guest_nice);
+
+ /* On some older kernels the 'guest_nice' value may be missing */
+ if (ret < 9) {
+ return -1;
+ }
+ /* Ensure we zero initialise it */
+ if ( ret == 9 ) {
+ st->guest_nice = 0;
+ }
+
+ /* Convert to seconds based on USER_HZ kernel param */
+ st->user /= user_hz;
+ st->nice /= user_hz;
+ st->system /= user_hz;
+ st->idle /= user_hz;
+ st->iowait /= user_hz;
+ st->irq /= user_hz;
+ st->softirq /= user_hz;
+ st->steal /= user_hz;
+ st->guest /= user_hz;
+ st->guest_nice /= user_hz;
+
+ return 0;
+}
+
+static int cpu_stat_set_metrics(struct flb_ne *ctx, char *cpu_id,
+ struct cpu_stat_info *st, uint64_t ts)
+{
+
+ /* CPU seconds */
+ cmt_counter_set(ctx->cpu_seconds, ts,
+ st->idle,
+ 2, (char *[]) {cpu_id, "idle"});
+
+ cmt_counter_set(ctx->cpu_seconds, ts,
+ st->iowait,
+ 2, (char *[]) {cpu_id, "iowait"});
+
+ cmt_counter_set(ctx->cpu_seconds, ts,
+ st->irq,
+ 2, (char *[]) {cpu_id, "irq"});
+
+ cmt_counter_set(ctx->cpu_seconds, ts,
+ st->nice,
+ 2, (char *[]) {cpu_id, "nice"});
+
+ cmt_counter_set(ctx->cpu_seconds, ts,
+ st->softirq,
+ 2, (char *[]) {cpu_id, "softirq"});
+
+
+ cmt_counter_set(ctx->cpu_seconds, ts,
+ st->steal,
+ 2, (char *[]) {cpu_id, "steal"});
+
+ cmt_counter_set(ctx->cpu_seconds, ts,
+ st->system,
+ 2, (char *[]) {cpu_id, "system"});
+
+ cmt_counter_set(ctx->cpu_seconds, ts,
+ st->user,
+ 2, (char *[]) {cpu_id, "user"});
+
+ /* CPU Guest Seconds */
+ cmt_counter_set(ctx->cpu_guest_seconds, ts,
+ st->guest,
+ 2, (char *[]) {cpu_id, "user"});
+
+ cmt_counter_set(ctx->cpu_guest_seconds, ts,
+ st->guest_nice,
+ 2, (char *[]) {cpu_id, "nice"});
+
+ return 0;
+}
+
+static int cpu_stat_update(struct flb_ne *ctx, uint64_t ts)
+{
+ int len;
+ int ret;
+ char *p;
+ char tmp[32];
+ struct mk_list list;
+ struct mk_list *head;
+ struct flb_slist_entry *line;
+ struct cpu_stat_info st = {0};
+
+ ret = ne_utils_file_read_lines(ctx->path_procfs, "/stat", &list);
+ if (ret == -1) {
+ return -1;
+ }
+
+ mk_list_foreach(head, &list) {
+ line = mk_list_entry(head, struct flb_slist_entry, _head);
+
+ if (strncmp(line->str, "cpu ", 4) == 0) {
+ /* CPU total, we skip this state since we care only about per core stats */
+ continue;
+ }
+ else if (strncmp(line->str, "cpu", 3) == 0) {
+ /* CPU ID (per core) */
+ p = strchr(line->str + 3, ' ');
+ len = p - (line->str + 3);
+ memcpy(tmp, line->str + 3, len);
+ tmp[len] = '\0';
+
+ /* Capture metrics */
+ ret = stat_line(p, &st);
+ if (ret != 0) {
+ flb_plg_error(ctx->ins,
+ "could not process line: %s", line->str);
+ continue;
+ }
+
+ /* Update our counters */
+ cpu_stat_set_metrics(ctx, tmp, &st, ts);
+ }
+ }
+
+ flb_slist_destroy(&list);
+ return 0;
+}
+
+int ne_cpu_init(struct flb_ne *ctx)
+{
+ int ret;
+
+ /* CPU Thermal */
+ ret = cpu_thermal_init(ctx);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "could not initialize cpu_thermal metrics");
+ return -1;
+ }
+
+ /* CPU Stats */
+ ret = cpu_stat_init(ctx);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "could not initialize cpu_stat metrics");
+ return -1;
+ }
+ cpu_stat_init(ctx);
+ return 0;
+}
+
+int ne_cpu_update(struct flb_ne *ctx)
+{
+ uint64_t ts;
+
+ ts = cfl_time_now();
+
+ cpu_thermal_update(ctx, ts);
+ cpu_stat_update(ctx, ts);
+
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/in_node_exporter_metrics/ne_cpufreq.h b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_cpufreq.h
new file mode 100644
index 000000000..b6712dd5a
--- /dev/null
+++ b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_cpufreq.h
@@ -0,0 +1,28 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_NE_CPUFREQ_H
+#define FLB_IN_NE_CPUFREQ_H
+
+#include "ne.h"
+
+int ne_cpufreq_init(struct flb_ne *ctx);
+int ne_cpufreq_update(struct flb_ne *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/in_node_exporter_metrics/ne_cpufreq_linux.c b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_cpufreq_linux.c
new file mode 100644
index 000000000..e31b976d8
--- /dev/null
+++ b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_cpufreq_linux.c
@@ -0,0 +1,196 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_input_plugin.h>
+
+#include "ne.h"
+#include "ne_utils.h"
+
+static int cpufreq_init(struct flb_ne *ctx)
+{
+ struct cmt_gauge *g;
+
+ /* node_cpu_frequency_hertz */
+ g = cmt_gauge_create(ctx->cmt, "node", "cpu", "frequency_hertz",
+ "Current cpu thread frequency in hertz.",
+ 1, (char *[]) {"cpu"});
+ if (!g) {
+ return -1;
+ }
+ ctx->cpu_freq_hertz = g;
+
+ /* node_cpu_frequency_max_hertz */
+ g = cmt_gauge_create(ctx->cmt, "node", "cpu", "frequency_max_hertz",
+ "Maximum cpu thread frequency in hertz.",
+ 1, (char *[]) {"cpu"});
+ if (!g) {
+ return -1;
+ }
+ ctx->cpu_freq_max_hertz = g;
+
+ /* node_cpu_frequency_min_hertz */
+ g = cmt_gauge_create(ctx->cmt, "node", "cpu", "frequency_min_hertz",
+ "Minimum cpu thread frequency in hertz.",
+ 1, (char *[]) {"cpu"});
+ if (!g) {
+ return -1;
+ }
+ ctx->cpu_freq_min_hertz = g;
+
+ /* node_cpu_scaling_frequency_hertz */
+ g = cmt_gauge_create(ctx->cmt, "node", "cpu", "scaling_frequency_hertz",
+ "Current scaled CPU thread frequency in hertz.",
+ 1, (char *[]) {"cpu"});
+ if (!g) {
+ return -1;
+ }
+ ctx->cpu_scaling_freq_hertz = g;
+
+ /* node_cpu_scaling_frequency_max_hertz */
+ g = cmt_gauge_create(ctx->cmt, "node", "cpu", "scaling_frequency_max_hertz",
+ "Maximum scaled CPU thread frequency in hertz.",
+ 1, (char *[]) {"cpu"});
+ if (!g) {
+ return -1;
+ }
+ ctx->cpu_scaling_freq_max_hertz = g;
+
+ /* node_cpu_scaling_frequency_min_hertz */
+ g = cmt_gauge_create(ctx->cmt, "node", "cpu", "scaling_frequency_min_hertz",
+ "Minimum scaled CPU thread frequency in hertz.",
+ 1, (char *[]) {"cpu"});
+ if (!g) {
+ return -1;
+ }
+ ctx->cpu_scaling_freq_min_hertz = g;
+
+ return 0;
+}
+
+static int cpufreq_update(struct flb_ne *ctx)
+{
+ int ret;
+ int len;
+ uint64_t ts;
+ uint64_t val;
+ char *cpu_id;
+ struct mk_list list;
+ struct mk_list *head;
+ struct flb_slist_entry *entry;
+ const char *pattern = "/devices/system/cpu/cpu[0-9]*";
+
+ ret = ne_utils_path_scan(ctx, ctx->path_sysfs, pattern, NE_SCAN_DIR, &list);
+ if (ret != 0) {
+ return -1;
+ }
+
+ if (mk_list_size(&list) == 0) {
+ return 0;
+ }
+
+ ts = cfl_time_now();
+
+ /* Process entries */
+ mk_list_foreach(head, &list) {
+ entry = mk_list_entry(head, struct flb_slist_entry, _head);
+
+ /* Locate CPU ID string */
+ len = flb_sds_len(entry->str);
+ cpu_id = entry->str + len;
+ while (*cpu_id != 'u') cpu_id--;
+ cpu_id++;
+
+ /* node_cpu_frequency_hertz */
+ ret = ne_utils_file_read_uint64(ctx->path_sysfs,
+ entry->str, "cpufreq", "cpuinfo_cur_freq",
+ &val);
+ if (ret == 0) {
+ cmt_gauge_set(ctx->cpu_freq_hertz, ts,
+ (double) (val * 1000.0),
+ 1, (char *[]) {cpu_id});
+ }
+
+ /* node_cpu_frequency_max_hertz */
+ ret = ne_utils_file_read_uint64(ctx->path_sysfs,
+ entry->str, "cpufreq", "cpuinfo_max_freq",
+ &val);
+ if (ret == 0) {
+ cmt_gauge_set(ctx->cpu_freq_max_hertz, ts,
+ (double) (val * 1000.0),
+ 1, (char *[]) {cpu_id});
+ }
+
+ /* node_cpu_frequency_min_hertz */
+ ret = ne_utils_file_read_uint64(ctx->path_sysfs,
+ entry->str, "cpufreq", "cpuinfo_min_freq",
+ &val);
+ if (ret == 0) {
+ cmt_gauge_set(ctx->cpu_freq_min_hertz, ts,
+ (double) (val * 1000.0),
+ 1, (char *[]) {cpu_id});
+ }
+
+
+ /* node_cpu_scaling_frequency_hertz */
+ ret = ne_utils_file_read_uint64(ctx->path_sysfs,
+ entry->str, "cpufreq", "scaling_cur_freq",
+ &val);
+ if (ret == 0) {
+ cmt_gauge_set(ctx->cpu_scaling_freq_hertz, ts,
+ ((double) val) * 1000.0,
+ 1, (char *[]) {cpu_id});
+ }
+
+ /* node_cpu_scaling_frequency_max_hertz */
+ ret = ne_utils_file_read_uint64(ctx->path_sysfs,
+ entry->str, "cpufreq", "scaling_max_freq",
+ &val);
+ if (ret == 0) {
+ cmt_gauge_set(ctx->cpu_scaling_freq_max_hertz, ts,
+ (double) (val * 1000.0),
+ 1, (char *[]) {cpu_id});
+ }
+
+ /* node_cpu_frequency_min_hertz */
+ ret = ne_utils_file_read_uint64(ctx->path_sysfs,
+ entry->str, "cpufreq", "scaling_min_freq",
+ &val);
+ if (ret == 0) {
+ cmt_gauge_set(ctx->cpu_scaling_freq_min_hertz, ts,
+ (double) (val * 1000.0),
+ 1, (char *[]) {cpu_id});
+ }
+ }
+
+ flb_slist_destroy(&list);
+ return 0;
+}
+
+int ne_cpufreq_init(struct flb_ne *ctx)
+{
+ cpufreq_init(ctx);
+ return 0;
+}
+
+int ne_cpufreq_update(struct flb_ne *ctx)
+{
+ cpufreq_update(ctx);
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/in_node_exporter_metrics/ne_diskstats.c b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_diskstats.c
new file mode 100644
index 000000000..0b4ac57f1
--- /dev/null
+++ b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_diskstats.c
@@ -0,0 +1,22 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifdef __linux__
+#include "ne_diskstats_linux.c"
+#endif
diff --git a/src/fluent-bit/plugins/in_node_exporter_metrics/ne_diskstats.h b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_diskstats.h
new file mode 100644
index 000000000..4904e33ef
--- /dev/null
+++ b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_diskstats.h
@@ -0,0 +1,29 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_NE_DISKSTATS_H
+#define FLB_IN_NE_DISKSTATS_H
+
+#include "ne.h"
+
+int ne_diskstats_init(struct flb_ne *ctx);
+int ne_diskstats_update(struct flb_ne *ctx);
+int ne_diskstats_exit(struct flb_ne *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/in_node_exporter_metrics/ne_diskstats_linux.c b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_diskstats_linux.c
new file mode 100644
index 000000000..26ba9cb93
--- /dev/null
+++ b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_diskstats_linux.c
@@ -0,0 +1,449 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_sds.h>
+#include <fluent-bit/flb_input_plugin.h>
+
+#include "ne.h"
+#include "ne_utils.h"
+
+#include <unistd.h>
+#include <float.h>
+
+/*
+ * Diskstats interface references
+ * ------------------------------
+ * https://www.kernel.org/doc/Documentation/ABI/testing/procfs-diskstats
+ * https://www.kernel.org/doc/Documentation/iostats.txt
+ *
+ * From the documentation, Kernel versions and expected fields:
+ *
+ * == ===================================
+ * 1 major number
+ * 2 minor mumber
+ * 3 device name
+ * 4 reads completed successfully
+ * 5 reads merged
+ * 6 sectors read
+ * 7 time spent reading (ms)
+ * 8 writes completed
+ * 9 writes merged
+ * 10 sectors written
+ * 11 time spent writing (ms)
+ * 12 I/Os currently in progress
+ * 13 time spent doing I/Os (ms)
+ * 14 weighted time spent doing I/Os (ms)
+ * == ===================================
+ *
+ * Kernel 4.18+ appends four more fields for discard
+ * tracking putting the total at 18:
+ *
+ * == ===================================
+ * 15 discards completed successfully
+ * 16 discards merged
+ * 17 sectors discarded
+ * 18 time spent discarding
+ * == ===================================
+ *
+ * Kernel 5.5+ appends two more fields for flush requests:
+ *
+ * == =====================================
+ * 19 flush requests completed successfully
+ * 20 time spent flushing
+ * == =====================================
+ */
+
+#define KNOWN_FIELDS 17
+#define SECTOR_SIZE 512
+
+struct dt_metric {
+ void *metric;
+ double factor;
+};
+
+static void metric_cache_set(struct flb_ne *ctx, void *metric, double factor, int *offset)
+{
+ int id;
+ struct dt_metric *m;
+ struct dt_metric **cache;
+
+ id = *offset;
+
+ cache = (struct dt_metric **) ctx->dt_metrics;
+ m = (struct dt_metric *) &cache[id];
+ m->metric = metric;
+ m->factor = factor;
+ (*offset)++;
+}
+
+static void metric_cache_update(struct flb_ne *ctx, int id, flb_sds_t device,
+ flb_sds_t str_val)
+{
+ int ret = -1;
+ uint64_t ts;
+ double val;
+ struct dt_metric *m;
+ struct dt_metric **cache;
+ struct cmt_gauge *g;
+ struct cmt_counter *c;
+
+ cache = (struct dt_metric **) ctx->dt_metrics;
+ m = (struct dt_metric *) &cache[id];
+
+ ret = ne_utils_str_to_double(str_val, &val);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins,
+ "could not represent string value '%s' for metric id '%i', "
+ "device '%s'",
+ str_val, id, device);
+ return;
+ }
+
+ ts = cfl_time_now();
+
+ if (m->factor > DBL_EPSILON) {
+ val *= m->factor;
+ }
+
+ if (id == 8) {
+ g = (struct cmt_gauge *) m->metric;
+ ret = cmt_gauge_set(g, ts, val, 1, (char *[]) {device});
+ }
+ else {
+ c = (struct cmt_counter *) m->metric;
+ ret = cmt_counter_set(c, ts, val, 1, (char *[]) {device});
+ }
+
+ if (ret == -1) {
+ flb_plg_error(ctx->ins,
+ "could not update metric id '%i', device '%s'",
+ id, device);
+ }
+
+}
+
+/* Setup metrics contexts */
+static int ne_diskstats_configure(struct flb_ne *ctx)
+{
+ int offset = 0;
+ struct cmt_counter *c;
+ struct cmt_gauge *g;
+
+ /* Create cache for metrics */
+ ctx->dt_metrics = flb_calloc(1, sizeof(struct dt_metric) * KNOWN_FIELDS);
+ if (!ctx->dt_metrics) {
+ flb_errno();
+ return -1;
+ }
+
+ /* Initialize regex for skipped devices */
+ ctx->dt_regex_skip_devices = flb_regex_create(ctx->dt_regex_skip_devices_text);
+ if (!ctx->dt_regex_skip_devices) {
+ flb_plg_error(ctx->ins,
+ "could not initialize regex pattern for ignored "
+ "devices: '%s'",
+ IGNORED_DEVICES);
+ return -1;
+ }
+
+ /* node_disk_reads_completed_total */
+ c = cmt_counter_create(ctx->cmt, "node", "disk", "reads_completed_total",
+ "The total number of reads completed successfully.",
+ 1, (char *[]) {"device"});
+ if (!c) {
+ return -1;
+ }
+ metric_cache_set(ctx, c, 0, &offset);
+
+ /* node_disk_reads_merged_total */
+ c = cmt_counter_create(ctx->cmt, "node", "disk", "reads_merged_total",
+ "The total number of reads merged.",
+ 1, (char *[]) {"device"});
+ if (!c) {
+ return -1;
+ }
+ metric_cache_set(ctx, c, 0, &offset);
+
+ /* node_disk_read_bytes_total */
+ c = cmt_counter_create(ctx->cmt, "node", "disk", "read_bytes_total",
+ "The total number of bytes read successfully.",
+ 1, (char *[]) {"device"});
+ if (!c) {
+ return -1;
+ }
+ metric_cache_set(ctx, c, SECTOR_SIZE, &offset);
+
+ /* node_disk_read_time_seconds_total */
+ c = cmt_counter_create(ctx->cmt, "node", "disk", "read_time_seconds_total",
+ "The total number of seconds spent by all reads.",
+ 1, (char *[]) {"device"});
+ if (!c) {
+ return -1;
+ }
+ metric_cache_set(ctx, c, .001, &offset);
+
+ /* node_disk_writes_completed_total */
+ c = cmt_counter_create(ctx->cmt, "node", "disk", "writes_completed_total",
+ "The total number of writes completed successfully.",
+ 1, (char *[]) {"device"});
+ if (!c) {
+ return -1;
+ }
+ metric_cache_set(ctx, c, 0, &offset);
+
+ /* node_disk_writes_merged_total */
+ c = cmt_counter_create(ctx->cmt, "node", "disk", "writes_merged_total",
+ "The number of writes merged.",
+ 1, (char *[]) {"device"});
+ if (!c) {
+ return -1;
+ }
+ metric_cache_set(ctx, c, 0, &offset);
+
+ /* node_disk_written_bytes_total */
+ c = cmt_counter_create(ctx->cmt, "node", "disk", "written_bytes_total",
+ "The total number of bytes written successfully.",
+ 1, (char *[]) {"device"});
+ if (!c) {
+ return -1;
+ }
+ metric_cache_set(ctx, c, SECTOR_SIZE, &offset);
+
+ /* node_disk_write_time_seconds_total */
+ c = cmt_counter_create(ctx->cmt, "node", "disk", "write_time_seconds_total",
+ "This is the total number of seconds spent by all writes.",
+ 1, (char *[]) {"device"});
+ if (!c) {
+ return -1;
+ }
+ metric_cache_set(ctx, c, .001, &offset);
+
+ /* node_disk_io_now */
+ g = cmt_gauge_create(ctx->cmt, "node", "disk", "io_now",
+ "The number of I/Os currently in progress.",
+ 1, (char *[]) {"device"});
+ if (!g) {
+ return -1;
+ }
+ metric_cache_set(ctx, g, 0, &offset);
+
+ /* node_disk_io_time_seconds */
+ c = cmt_counter_create(ctx->cmt, "node", "disk", "io_time_seconds_total",
+ "Total seconds spent doing I/Os.",
+ 1, (char *[]) {"device"});
+ if (!c) {
+ return -1;
+ }
+ metric_cache_set(ctx, c, .001, &offset);
+
+ /* node_disk_io_time_weighted_seconds */
+ c = cmt_counter_create(ctx->cmt, "node", "disk", "io_time_weighted_seconds_total",
+ "The weighted # of seconds spent doing I/Os.",
+ 1, (char *[]) {"device"});
+ if (!c) {
+ return -1;
+ }
+ metric_cache_set(ctx, c, .001, &offset);
+
+ /*
+ * Linux Kernel >= 4.18
+ * ====================
+ */
+
+ /* node_disk_discards_completed */
+ c = cmt_counter_create(ctx->cmt, "node", "disk", "discards_completed_total",
+ "The total number of discards completed successfully.",
+ 1, (char *[]) {"device"});
+ if (!c) {
+ return -1;
+ }
+ metric_cache_set(ctx, c, 0, &offset);
+
+ /* node_disk_discards_merged */
+ c = cmt_counter_create(ctx->cmt, "node", "disk", "discards_merged_total",
+ "The total number of discards merged.",
+ 1, (char *[]) {"device"});
+ if (!c) {
+ return -1;
+ }
+ metric_cache_set(ctx, c, 0, &offset);
+
+ /* node_disk_discarded_sectors */
+ c = cmt_counter_create(ctx->cmt, "node", "disk", "discarded_sectors_total",
+ "The total number of sectors discarded successfully.",
+ 1, (char *[]) {"device"});
+ if (!c) {
+ return -1;
+ }
+ metric_cache_set(ctx, c, 0, &offset);
+
+ /* node_disk_discard_time_seconds */
+ c = cmt_counter_create(ctx->cmt, "node", "disk", "discard_time_seconds_total",
+ "This is the total number of seconds spent by all discards.",
+ 1, (char *[]) {"device"});
+ if (!c) {
+ return -1;
+ }
+ metric_cache_set(ctx, c, .001, &offset);
+
+ /*
+ * Linux Kernel >= 5.5
+ * ===================
+ */
+
+ /* node_disk_flush_requests */
+ c = cmt_counter_create(ctx->cmt, "node", "disk", "flush_requests_total",
+ "The total number of flush requests completed successfully",
+ 1, (char *[]) {"device"});
+ if (!c) {
+ return -1;
+ }
+ metric_cache_set(ctx, c, 0, &offset);
+
+ /* node_disk_flush_requests_time_seconds */
+ c = cmt_counter_create(ctx->cmt, "node", "disk", "flush_requests_time_seconds_total",
+ "This is the total number of seconds spent by all flush "
+ "requests.",
+ 1, (char *[]) {"device"});
+ if (!c) {
+ return -1;
+ }
+ metric_cache_set(ctx, c, .001, &offset);
+
+ return 0;
+}
+
+static flb_sds_t get_part_id(struct mk_list *list, int id)
+{
+ int i = 0;
+ struct mk_list *head;
+ struct flb_slist_entry *entry;
+
+ mk_list_foreach(head, list) {
+ if (i == id) {
+ entry = mk_list_entry(head, struct flb_slist_entry, _head);
+ return entry->str;
+ }
+ i++;
+ }
+ return NULL;
+}
+
+static int skip_device(struct flb_ne *ctx, flb_sds_t device)
+{
+ return flb_regex_match(ctx->dt_regex_skip_devices,
+ (unsigned char *) device, flb_sds_len(device));
+}
+
+static int update_stats(struct flb_ne *ctx, struct mk_list *list, int parts)
+{
+ int id = 0;
+ flb_sds_t device;
+ struct mk_list *head;
+ struct flb_slist_entry *entry;
+
+ /* Get device name: third entry */
+ device = get_part_id(list, 2);
+ if (!device) {
+ flb_plg_error(ctx->ins, "cannot retrieve device name");
+ return -1;
+ }
+
+ /* Check if we should process or skip this device */
+ if (skip_device(ctx, device)) {
+ flb_plg_debug(ctx->ins, "skip device: %s", device);
+ return 0;
+ }
+
+ mk_list_foreach(head, list) {
+ /* Skip: major number, minor number and device name */
+ if (id <= 2) {
+ id++;
+ continue;
+ }
+ entry = mk_list_entry(head, struct flb_slist_entry, _head);
+
+ /* update the metric */
+ metric_cache_update(ctx, id - 3, device, entry->str);
+ id++;
+
+ /* Do not process more than the known fields as of this version */
+ if (id - 3 == KNOWN_FIELDS) {
+ break;
+ }
+ }
+ return 0;
+}
+
+static int diskstats_update(struct flb_ne *ctx)
+{
+ int ret;
+ int parts;
+ struct mk_list *head;
+ struct mk_list list;
+ struct mk_list split_list;
+ struct flb_slist_entry *line;
+
+ mk_list_init(&list);
+ mk_list_init(&split_list);
+
+ ret = ne_utils_file_read_lines(ctx->path_procfs, "/diskstats", &list);
+ if (ret == -1) {
+ return -1;
+ }
+
+ mk_list_foreach(head, &list) {
+ line = mk_list_entry(head, struct flb_slist_entry, _head);
+
+ mk_list_init(&split_list);
+ ret = flb_slist_split_string(&split_list, line->str, ' ', -1);
+ if (ret == -1) {
+ continue;
+ }
+ parts = ret;
+
+ update_stats(ctx, &split_list, parts);
+ flb_slist_destroy(&split_list);
+ }
+
+ flb_slist_destroy(&list);
+ return 0;
+}
+
+int ne_diskstats_init(struct flb_ne *ctx)
+{
+ ne_diskstats_configure(ctx);
+ return 0;
+}
+
+int ne_diskstats_update(struct flb_ne *ctx)
+{
+ diskstats_update(ctx);
+ return 0;
+}
+
+int ne_diskstats_exit(struct flb_ne *ctx)
+{
+ flb_free(ctx->dt_metrics);
+ if (ctx->dt_regex_skip_devices) {
+ flb_regex_destroy(ctx->dt_regex_skip_devices);
+ }
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/in_node_exporter_metrics/ne_filefd_linux.c b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_filefd_linux.c
new file mode 100644
index 000000000..5c0c0166d
--- /dev/null
+++ b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_filefd_linux.c
@@ -0,0 +1,115 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define _GNU_SOURCE
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_input_plugin.h>
+
+#include "ne.h"
+#include "ne_utils.h"
+
+static int filefd_configure(struct flb_ne *ctx)
+{
+ struct cmt_gauge *g;
+
+ /* node_filefd_allocated */
+ g = cmt_gauge_create(ctx->cmt, "node", "filefd", "allocated",
+ "File descriptor statistics: allocated.",
+ 0, NULL);
+ ctx->filefd_allocated = g;
+
+ /* node_filefd_maximum */
+ g = cmt_gauge_create(ctx->cmt, "node", "filefd", "maximum",
+ "File descriptor statistics: maximum.",
+ 0, NULL);
+ ctx->filefd_maximum = g;
+
+ return 0;
+}
+
+static int filefd_update(struct flb_ne *ctx)
+{
+ int ret;
+ int parts;
+ uint64_t ts;
+ double d_val;
+ struct mk_list *head;
+ struct mk_list list;
+ struct mk_list split_list;
+ struct flb_slist_entry *line;
+ struct flb_slist_entry *alloc;
+ struct flb_slist_entry *max;
+
+ mk_list_init(&list);
+ ret = ne_utils_file_read_lines(ctx->path_procfs, "/sys/fs/file-nr", &list);
+ if (ret == -1) {
+ return -1;
+ }
+
+ ts = cfl_time_now();
+
+ mk_list_foreach(head, &list) {
+ line = mk_list_entry(head, struct flb_slist_entry, _head);
+
+ mk_list_init(&split_list);
+ ret = flb_slist_split_string(&split_list, line->str, '\t', -1);
+ if (ret == -1) {
+ continue;
+ }
+ parts = ret;
+ if (parts == 0) {
+ flb_slist_destroy(&split_list);
+ continue;
+ }
+ else if (parts != 3) {
+ flb_plg_warn(ctx->ins, "/sys/fs/file-nr: invalid number of fields");
+ flb_slist_destroy(&split_list);
+ break;
+ }
+
+ /* allocated (0) */
+ alloc = flb_slist_entry_get(&split_list, 0);
+ ne_utils_str_to_double(alloc->str, &d_val);
+ cmt_gauge_set(ctx->filefd_allocated, ts, d_val, 0, NULL);
+
+ /* maximum (2) */
+ max = flb_slist_entry_get(&split_list, 2);
+ ne_utils_str_to_double(max->str, &d_val);
+ cmt_gauge_set(ctx->filefd_maximum, ts, d_val, 0, NULL);
+
+ flb_slist_destroy(&split_list);
+ break;
+ }
+ flb_slist_destroy(&list);
+
+ return 0;
+}
+
+int ne_filefd_init(struct flb_ne *ctx)
+{
+ filefd_configure(ctx);
+ return 0;
+}
+
+int ne_filefd_update(struct flb_ne *ctx)
+{
+ filefd_update(ctx);
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/in_node_exporter_metrics/ne_filefd_linux.h b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_filefd_linux.h
new file mode 100644
index 000000000..ef9df5d1c
--- /dev/null
+++ b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_filefd_linux.h
@@ -0,0 +1,28 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_NE_FILEFD_H
+#define FLB_IN_NE_FILEFD_H
+
+#include "ne.h"
+
+int ne_filefd_init(struct flb_ne *ctx);
+int ne_filefd_update(struct flb_ne *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/in_node_exporter_metrics/ne_filesystem.c b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_filesystem.c
new file mode 100644
index 000000000..6cc2a1960
--- /dev/null
+++ b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_filesystem.c
@@ -0,0 +1,39 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifdef __linux__
+#include "ne_filesystem_linux.c"
+#else
+
+int ne_filesystem_init(struct flb_ne *ctx)
+{
+ return 0;
+}
+
+int ne_filesystem_update(struct flb_ne *ctx)
+{
+ return 0;
+}
+
+int ne_filesystem_exit(struct flb_ne *ctx)
+{
+ return 0;
+}
+
+#endif \ No newline at end of file
diff --git a/src/fluent-bit/plugins/in_node_exporter_metrics/ne_filesystem.h b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_filesystem.h
new file mode 100644
index 000000000..1e87b2825
--- /dev/null
+++ b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_filesystem.h
@@ -0,0 +1,29 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_NE_FILESYSTEM_H
+#define FLB_IN_NE_FILESYSTEM_H
+
+#include "ne.h"
+
+int ne_filesystem_init(struct flb_ne *ctx);
+int ne_filesystem_update(struct flb_ne *ctx);
+int ne_filesystem_exit(struct flb_ne *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/in_node_exporter_metrics/ne_filesystem_linux.c b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_filesystem_linux.c
new file mode 100644
index 000000000..5f054aa78
--- /dev/null
+++ b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_filesystem_linux.c
@@ -0,0 +1,404 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_sds.h>
+#include <fluent-bit/flb_input_plugin.h>
+#include <cfl/cfl_list.h>
+#include <linux/limits.h>
+#include <sys/statvfs.h>
+#include <sys/statfs.h>
+
+#include "ne.h"
+#include "ne_utils.h"
+
+#include <unistd.h>
+#include <float.h>
+
+#define NE_ERROR_MOUNT_POINT_LIST_FETCH_SUCCESS 0
+#define NE_ERROR_MOUNT_POINT_LIST_FETCH_GENERIC_ERROR -1
+#define NE_ERROR_MOUNT_POINT_LIST_FETCH_FILE_ACCESS_ERROR -2
+#define NE_ERROR_MOUNT_POINT_LIST_FETCH_CORRUPTED_DATA -3
+
+static void unescape_character(cfl_sds_t input_buffer, char character)
+{
+ size_t needle_length;
+ char needle[8];
+ char *haystack;
+ char *match;
+
+ needle_length = snprintf(needle, sizeof(needle), "\\0%02o", character);
+
+ haystack = (char *) input_buffer;
+
+ do {
+ match = strstr(haystack, needle);
+
+ if (match != NULL) {
+ match[0] = character;
+
+ memmove(&match[1],
+ &match[needle_length],
+ strlen(match) - needle_length + 1);
+ }
+
+ haystack = match;
+ }
+ while (match != NULL);
+}
+
+static cfl_sds_t greedy_read_file(char *path)
+{
+ char read_buffer[1024];
+ cfl_sds_t temporary_buffer;
+ FILE *file_handle;
+ size_t read_size;
+ cfl_sds_t contents;
+
+ file_handle = fopen(path, "rb");
+
+ if (file_handle == NULL) {
+ return NULL;
+ }
+
+ contents = cfl_sds_create_size(0);
+
+ if (contents == NULL) {
+ flb_errno();
+ fclose(file_handle);
+
+ return NULL;
+ }
+
+ do {
+ read_size = fread(read_buffer,
+ 1,
+ sizeof(read_buffer),
+ file_handle);
+
+ if (read_size > 0) {
+ temporary_buffer = cfl_sds_cat(contents, read_buffer, read_size);
+
+ if (temporary_buffer == NULL) {
+ cfl_sds_set_len(contents, 0);
+
+ read_size = 0;
+ }
+ else {
+ contents = temporary_buffer;
+ }
+ }
+ }
+ while (read_size > 0);
+
+ fclose(file_handle);
+
+ if (cfl_sds_len(contents) == 0) {
+ cfl_sds_destroy(contents);
+
+ contents = NULL;
+ }
+
+ return contents;
+}
+
+static int greedy_read_file_lines(char *path, struct mk_list *lines)
+{
+ cfl_sds_t contents;
+ int result;
+
+ contents = greedy_read_file(path);
+
+ if (contents == NULL) {
+ return NE_ERROR_MOUNT_POINT_LIST_FETCH_FILE_ACCESS_ERROR;
+ }
+
+ mk_list_init(lines);
+
+ result = flb_slist_split_string(lines, contents, '\n', -1);
+
+ cfl_sds_destroy(contents);
+
+ if (result == -1) {
+ return NE_ERROR_MOUNT_POINT_LIST_FETCH_CORRUPTED_DATA;
+ }
+
+ return NE_ERROR_MOUNT_POINT_LIST_FETCH_SUCCESS;
+}
+
+static int filesystem_update(struct flb_ne *ctx,
+ char *mounts_file_path)
+{
+ struct statfs mount_point_info;
+ char *field_values[4];
+ struct mk_list *field_iterator;
+ struct mk_list *line_iterator;
+ int readonly_flag;
+ int field_index;
+ int skip_flag;
+ uint64_t timestamp;
+ char *labels[3];
+ int result;
+ struct mk_list fields;
+ struct mk_list lines;
+ struct flb_slist_entry *field;
+ struct flb_slist_entry *line;
+
+ result = greedy_read_file_lines(mounts_file_path, &lines);
+
+ if (result != NE_ERROR_MOUNT_POINT_LIST_FETCH_SUCCESS) {
+ return result;
+ }
+
+ mk_list_foreach(line_iterator, &lines) {
+ line = mk_list_entry(line_iterator, struct flb_slist_entry, _head);
+
+ mk_list_init(&fields);
+
+ result = flb_slist_split_string(&fields, line->str, ' ', -1);
+ if (result == -1) {
+ continue;
+ }
+
+ field_index = 0;
+
+ memset(field_values, 0, sizeof(field_values));
+
+ mk_list_foreach(field_iterator, &fields) {
+ field = mk_list_entry(field_iterator,
+ struct flb_slist_entry,
+ _head);
+
+ if (field_index < 4) {
+ field_values[field_index] = field->str;
+ }
+ else {
+ break;
+ }
+
+ field_index++;
+ }
+
+ if (field_values[0] != NULL && /* device */
+ field_values[1] != NULL && /* path */
+ field_values[2] != NULL && /* fs type */
+ field_values[3] != NULL) { /* options */
+ skip_flag = flb_regex_match(ctx->fs_regex_skip_fs_types,
+ (unsigned char *) field_values[2],
+ strlen(field_values[2]));
+
+ if (!skip_flag) {
+ unescape_character(field_values[1], ' ');
+ unescape_character(field_values[1], '\t');
+
+ skip_flag = flb_regex_match(ctx->fs_regex_skip_mount,
+ (unsigned char *) field_values[1],
+ strlen(field_values[1]));
+
+ if (!skip_flag) {
+ timestamp = cfl_time_now();
+
+ result = statfs(field_values[1], &mount_point_info);
+
+ if (result == 0) {
+ labels[0] = field_values[0];
+ labels[1] = field_values[2];
+ labels[2] = field_values[1];
+
+ readonly_flag = mount_point_info.f_flags & ST_RDONLY;
+ readonly_flag = (readonly_flag != 0);
+
+ cmt_gauge_set(ctx->fs_avail_bytes,
+ timestamp,
+ mount_point_info.f_bsize *
+ mount_point_info.f_bavail,
+ 3, labels);
+
+ /* We don't support device error couting yet */
+ cmt_gauge_set(ctx->fs_device_error,
+ timestamp,
+ 0,
+ 3, labels);
+
+ cmt_gauge_set(ctx->fs_files,
+ timestamp,
+ mount_point_info.f_files,
+ 3, labels);
+
+ cmt_gauge_set(ctx->fs_files_free,
+ timestamp,
+ mount_point_info.f_ffree,
+ 3, labels);
+
+ cmt_gauge_set(ctx->fs_free_bytes,
+ timestamp,
+ mount_point_info.f_bsize *
+ mount_point_info.f_bfree,
+ 3, labels);
+
+ cmt_gauge_set(ctx->fs_readonly,
+ timestamp,
+ readonly_flag,
+ 3, labels);
+
+ cmt_gauge_set(ctx->fs_size_bytes,
+ timestamp,
+ mount_point_info.f_bsize *
+ mount_point_info.f_blocks,
+ 3, labels);
+ }
+ }
+ }
+ }
+
+ flb_slist_destroy(&fields);
+ }
+
+ flb_slist_destroy(&lines);
+
+ return NE_ERROR_MOUNT_POINT_LIST_FETCH_SUCCESS;
+}
+
+int ne_filesystem_init(struct flb_ne *ctx)
+{
+ ctx->fs_regex_skip_mount = flb_regex_create(ctx->fs_regex_ingore_mount_point_text);
+ ctx->fs_regex_skip_fs_types = flb_regex_create(ctx->fs_regex_ingore_filesystem_type_text);
+
+ ctx->fs_avail_bytes = cmt_gauge_create(ctx->cmt,
+ "node",
+ "filesystem",
+ "avail_bytes",
+ "Filesystem space available to " \
+ "non-root users in bytes.",
+ 3, (char *[]) {"device",
+ "fstype",
+ "mountpoint"});
+
+ if (ctx->fs_avail_bytes == NULL) {
+ return -1;
+ }
+
+ ctx->fs_device_error = cmt_gauge_create(ctx->cmt,
+ "node",
+ "filesystem",
+ "device_error",
+ "Whether an error occurred while " \
+ "getting statistics for the given " \
+ "device.",
+ 3, (char *[]) {"device",
+ "fstype",
+ "mountpoint"});
+
+ if (ctx->fs_device_error == NULL) {
+ return -1;
+ }
+
+ ctx->fs_files = cmt_gauge_create(ctx->cmt,
+ "node",
+ "filesystem",
+ "files",
+ "Filesystem total file nodes.",
+ 3, (char *[]) {"device",
+ "fstype",
+ "mountpoint"});
+
+ if (ctx->fs_files == NULL) {
+ return -1;
+ }
+
+ ctx->fs_files_free = cmt_gauge_create(ctx->cmt,
+ "node",
+ "filesystem",
+ "files_free",
+ "Filesystem total free file nodes.",
+ 3, (char *[]) {"device",
+ "fstype",
+ "mountpoint"});
+
+ if (ctx->fs_files_free == NULL) {
+ return -1;
+ }
+
+ ctx->fs_free_bytes = cmt_gauge_create(ctx->cmt,
+ "node",
+ "filesystem",
+ "free_bytes",
+ "Filesystem free space in bytes.",
+ 3, (char *[]) {"device",
+ "fstype",
+ "mountpoint"});
+
+ if (ctx->fs_free_bytes == NULL) {
+ return -1;
+ }
+
+ ctx->fs_readonly = cmt_gauge_create(ctx->cmt,
+ "node",
+ "filesystem",
+ "readonly",
+ "Filesystem read-only status.",
+ 3, (char *[]) {"device",
+ "fstype",
+ "mountpoint"});
+
+ if (ctx->fs_readonly == NULL) {
+ return -1;
+ }
+
+ ctx->fs_size_bytes = cmt_gauge_create(ctx->cmt,
+ "node",
+ "filesystem",
+ "size_bytes",
+ "Filesystem size in bytes.",
+ 3, (char *[]) {"device",
+ "fstype",
+ "mountpoint"});
+
+ if (ctx->fs_size_bytes == NULL) {
+ return -1;
+ }
+
+ return 0;
+}
+
+int ne_filesystem_update(struct flb_ne *ctx)
+{
+ int result;
+
+ result = filesystem_update(ctx, "/proc/1/mounts");
+
+ if (result != NE_ERROR_MOUNT_POINT_LIST_FETCH_SUCCESS) {
+ result = filesystem_update(ctx, "/proc/self/mounts");
+ }
+
+ return 0;
+}
+
+int ne_filesystem_exit(struct flb_ne *ctx)
+{
+ if (ctx->fs_regex_skip_mount != NULL) {
+ flb_regex_destroy(ctx->fs_regex_skip_mount);
+ }
+
+ if (ctx->fs_regex_skip_fs_types != NULL) {
+ flb_regex_destroy(ctx->fs_regex_skip_fs_types);
+ }
+
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/in_node_exporter_metrics/ne_loadavg.c b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_loadavg.c
new file mode 100644
index 000000000..123ab1dc8
--- /dev/null
+++ b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_loadavg.c
@@ -0,0 +1,22 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifdef __linux__
+#include "ne_loadavg_linux.c"
+#endif
diff --git a/src/fluent-bit/plugins/in_node_exporter_metrics/ne_loadavg.h b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_loadavg.h
new file mode 100644
index 000000000..95540ac37
--- /dev/null
+++ b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_loadavg.h
@@ -0,0 +1,29 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_NE_LOADAVG_H
+#define FLB_IN_NE_LOADAVG_H
+
+#include "ne.h"
+
+int ne_loadavg_init(struct flb_ne *ctx);
+int ne_loadavg_update(struct flb_ne *ctx);
+int ne_loadavg_exit(struct flb_ne *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/in_node_exporter_metrics/ne_loadavg_linux.c b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_loadavg_linux.c
new file mode 100644
index 000000000..13d872593
--- /dev/null
+++ b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_loadavg_linux.c
@@ -0,0 +1,126 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_sds.h>
+#include <fluent-bit/flb_input_plugin.h>
+
+#include "ne.h"
+#include "ne_utils.h"
+
+#include <unistd.h>
+
+/* Setup metrics contexts */
+static int ne_loadavg_configure(struct flb_ne *ctx)
+{
+ struct cmt_gauge *g;
+
+ /* loadavg 1m */
+ g = cmt_gauge_create(ctx->cmt, "node", "", "load1",
+ "1m load average.",
+ 0, NULL);
+ ctx->lavg_1 = g;
+
+ /* loadavg 5m */
+ g = cmt_gauge_create(ctx->cmt, "node", "", "load5",
+ "5m load average.",
+ 0, NULL);
+ ctx->lavg_5 = g;
+
+ /* loadavg 15m */
+ g = cmt_gauge_create(ctx->cmt, "node", "", "load15",
+ "15m load average.",
+ 0, NULL);
+ ctx->lavg_15 = g;
+
+ return 0;
+}
+
+static int loadavg_update(struct flb_ne *ctx)
+{
+ int ret;
+ int parts;
+ double val;
+ uint64_t ts;
+ struct mk_list *head;
+ struct mk_list list;
+ struct mk_list split_list;
+ struct flb_slist_entry *line;
+ struct flb_slist_entry *load;
+
+ mk_list_init(&list);
+ mk_list_init(&split_list);
+
+ ret = ne_utils_file_read_lines(ctx->path_procfs, "/loadavg", &list);
+ if (ret == -1) {
+ return -1;
+ }
+
+ ts = cfl_time_now();
+ mk_list_foreach(head, &list) {
+ line = mk_list_entry(head, struct flb_slist_entry, _head);
+
+ mk_list_init(&split_list);
+ ret = flb_slist_split_string(&split_list, line->str, ' ', -1);
+ if (ret == -1) {
+ continue;
+ }
+ parts = ret;
+
+ parts = ret;
+ if (parts == 0) {
+ flb_slist_destroy(&split_list);
+ continue;
+ }
+
+ /* 1m */
+ load = flb_slist_entry_get(&split_list, 0);
+ ne_utils_str_to_double(load->str, &val);
+ cmt_gauge_set(ctx->lavg_1, ts, val, 0, NULL);
+
+ /* 5m */
+ load = flb_slist_entry_get(&split_list, 1);
+ ne_utils_str_to_double(load->str, &val);
+ cmt_gauge_set(ctx->lavg_5, ts, val, 0, NULL);
+
+ /* 15m */
+ load = flb_slist_entry_get(&split_list, 2);
+ ne_utils_str_to_double(load->str, &val);
+ cmt_gauge_set(ctx->lavg_15, ts, val, 0, NULL);
+
+ flb_slist_destroy(&split_list);
+
+ break;
+ }
+
+ flb_slist_destroy(&list);
+ return 0;
+}
+
+int ne_loadavg_init(struct flb_ne *ctx)
+{
+ ne_loadavg_configure(ctx);
+ return 0;
+}
+
+int ne_loadavg_update(struct flb_ne *ctx)
+{
+ loadavg_update(ctx);
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/in_node_exporter_metrics/ne_meminfo.c b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_meminfo.c
new file mode 100644
index 000000000..8dfeaa678
--- /dev/null
+++ b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_meminfo.c
@@ -0,0 +1,23 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#ifdef __linux__
+#include "ne_meminfo_linux.c"
+#endif
diff --git a/src/fluent-bit/plugins/in_node_exporter_metrics/ne_meminfo.h b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_meminfo.h
new file mode 100644
index 000000000..bd3edc195
--- /dev/null
+++ b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_meminfo.h
@@ -0,0 +1,29 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_NE_MEMINFO_H
+#define FLB_IN_NE_MEMINFO_H
+
+#include "ne.h"
+
+int ne_meminfo_init(struct flb_ne *ctx);
+int ne_meminfo_update(struct flb_ne *ctx);
+int ne_meminfo_exit(struct flb_ne *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/in_node_exporter_metrics/ne_meminfo_linux.c b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_meminfo_linux.c
new file mode 100644
index 000000000..3189d53c0
--- /dev/null
+++ b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_meminfo_linux.c
@@ -0,0 +1,283 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_hash_table.h>
+#include <fluent-bit/flb_sds.h>
+#include <fluent-bit/flb_input_plugin.h>
+
+#include "ne.h"
+#include "ne_utils.h"
+
+#include <unistd.h>
+
+static int meminfo_configure(struct flb_ne *ctx)
+{
+ int ret;
+ int parts;
+ int len;
+ char *p;
+ char desc[] = "Memory information field ";
+ struct cmt_gauge *g;
+ struct mk_list *head;
+ struct mk_list list;
+ struct mk_list split_list;
+ struct flb_slist_entry *entry;
+ struct flb_slist_entry *line;
+ flb_sds_t metric_name;
+ flb_sds_t metric_desc;
+
+ /* Initialize hash table */
+ ctx->meminfo_ht = flb_hash_table_create(FLB_HASH_TABLE_EVICT_NONE, 16, 0);
+ if (!ctx->meminfo_ht) {
+ return -1;
+ }
+
+ mk_list_init(&list);
+ mk_list_init(&split_list);
+
+ ret = ne_utils_file_read_lines(ctx->path_procfs, "/meminfo", &list);
+ if (ret == -1) {
+ return -1;
+ }
+ metric_name = flb_sds_create_size(128);
+ if (!metric_name) {
+ flb_hash_table_destroy(ctx->meminfo_ht);
+ flb_slist_destroy(&list);
+ return -1;
+ }
+
+ metric_desc = flb_sds_create_size(256);
+ if (!metric_desc) {
+ flb_hash_table_destroy(ctx->meminfo_ht);
+ flb_slist_destroy(&list);
+ return -1;
+ }
+
+ mk_list_foreach(head, &list) {
+ line = mk_list_entry(head, struct flb_slist_entry, _head);
+
+ mk_list_init(&split_list);
+ ret = flb_slist_split_string(&split_list, line->str, ' ', -1);
+ if (ret == -1) {
+ continue;
+ }
+ parts = ret;
+
+ /* set metric name */
+ entry = mk_list_entry_first(&split_list, struct flb_slist_entry, _head);
+
+ if ((p = strstr(entry->str, "(anon)")) ||
+ (p = strstr(entry->str, "(file)"))) {
+ *p = '_';
+ len = flb_sds_len(entry->str) - 2;
+ flb_sds_len_set(entry->str, len);
+ }
+ else {
+ len = flb_sds_len(entry->str) - 1;
+ flb_sds_len_set(entry->str, len);
+ }
+ entry->str[len] = '\0';
+
+ flb_sds_len_set(metric_name, 0);
+ flb_sds_cat(metric_name, entry->str, flb_sds_len(entry->str));
+
+ /* Metric description */
+ flb_sds_len_set(metric_desc, 0);
+ flb_sds_cat(metric_desc, desc, sizeof(desc) - 1);
+
+ if (parts == 2) {
+ /* No unit */
+ flb_sds_cat(metric_desc, metric_name, flb_sds_len(metric_name));
+ flb_sds_cat(metric_desc, ".", 1);
+
+ g = cmt_gauge_create(ctx->cmt, "node", "memory", metric_name,
+ metric_desc,
+ 0, NULL);
+ if (!g) {
+ flb_slist_destroy(&split_list);
+ goto error;
+ }
+ }
+ else if (parts == 3) {
+ /* It has an extra 'kB' string in the line */
+ flb_sds_cat(metric_name, "_bytes", 6);
+ flb_sds_cat(metric_desc, metric_name, flb_sds_len(metric_name));
+ flb_sds_cat(metric_desc, ".", 1);
+ g = cmt_gauge_create(ctx->cmt, "node", "memory", metric_name,
+ metric_desc,
+ 0, NULL);
+ if (!g) {
+ flb_slist_destroy(&split_list);
+ goto error;
+ }
+ }
+ else {
+ flb_slist_destroy(&split_list);
+ continue;
+ }
+ flb_slist_destroy(&split_list);
+
+ /*
+ * Register the gauge context into the hash table: note that depending
+ * of the number of parts in the list, if it contains the extra 'kB'
+ * the metric name gets appended the '_bytes' string.
+ */
+ ret = flb_hash_table_add(ctx->meminfo_ht,
+ metric_name, flb_sds_len(metric_name), g, 0);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins,
+ "could not add hash for metric: %s", metric_name);
+ goto error;
+ }
+ }
+
+ flb_sds_destroy(metric_name);
+ flb_sds_destroy(metric_desc);
+ flb_slist_destroy(&list);
+ return 0;
+
+ error:
+ flb_sds_destroy(metric_name);
+ flb_sds_destroy(metric_desc);
+ flb_slist_destroy(&list);
+ return -1;
+}
+
+static int meminfo_update(struct flb_ne *ctx)
+{
+ int i = 0;
+ int ret;
+ int len;
+ int parts;
+ uint64_t ts;
+ double val;
+ size_t out_size;
+ char *p;
+ flb_sds_t tmp;
+ flb_sds_t metric_name = NULL;
+ struct cmt_gauge *g;
+ struct mk_list *head;
+ struct mk_list list;
+ struct mk_list split_list;
+ struct flb_slist_entry *line;
+ struct flb_slist_entry *entry;
+
+ mk_list_init(&list);
+ ret = ne_utils_file_read_lines(ctx->path_procfs, "/meminfo", &list);
+ if (ret == -1) {
+ return -1;
+ }
+
+ ts = cfl_time_now();
+
+ mk_list_foreach(head, &list) {
+ line = mk_list_entry(head, struct flb_slist_entry, _head);
+
+ mk_list_init(&split_list);
+ ret = flb_slist_split_string(&split_list, line->str, ' ', -1);
+ if (ret == -1) {
+ continue;
+ }
+ parts = ret;
+ if (parts == 0) {
+ flb_slist_destroy(&split_list);
+ continue;
+ }
+
+ /* Metric name */
+ entry = mk_list_entry_first(&split_list, struct flb_slist_entry, _head);
+ metric_name = entry->str;
+
+ if ((p = strstr(entry->str, "(anon)")) ||
+ (p = strstr(entry->str, "(file)"))) {
+ *p = '_';
+ len = flb_sds_len(metric_name) - 1;
+ flb_sds_len_set(metric_name, len);
+ }
+
+ /* Metric value */
+ entry = mk_list_entry_next(&split_list, struct flb_slist_entry, _head,
+ &entry->_head);
+
+ ret = ne_utils_str_to_double(entry->str, &val);
+ if (ret == -1) {
+ i++;
+ flb_slist_destroy(&split_list);
+ }
+
+ g = NULL;
+ if (parts == 2) {
+ /* Metric name is the same, no extra bytes */
+ ret = flb_hash_table_get(ctx->meminfo_ht,
+ metric_name, flb_sds_len(metric_name) - 1,
+ (void *) &g, &out_size);
+ }
+ else if (parts == 3) {
+ /* Compose new metric name */
+ tmp = flb_sds_create_size(256);
+ flb_sds_cat_safe(&tmp, metric_name, flb_sds_len(metric_name) - 1);
+ flb_sds_cat_safe(&tmp, "_bytes", 6);
+
+ /* Get metric context */
+ ret = flb_hash_table_get(ctx->meminfo_ht,
+ tmp, flb_sds_len(tmp),
+ (void *) &g, &out_size);
+ flb_sds_destroy(tmp);
+
+ /* Value is in kB, convert to bytes */
+ val *= 1024;
+ }
+
+ if (!g) {
+ flb_plg_error(ctx->ins,
+ "gauge content for metric '%s' not found",
+ metric_name);
+ flb_slist_destroy(&split_list);
+ continue;
+ }
+
+ /* Update metric */
+ cmt_gauge_set(g, ts, val, 0, NULL);
+ flb_slist_destroy(&split_list);
+ }
+
+ flb_slist_destroy(&list);
+ return 0;
+}
+
+int ne_meminfo_init(struct flb_ne *ctx)
+{
+ meminfo_configure(ctx);
+ return 0;
+}
+
+int ne_meminfo_update(struct flb_ne *ctx)
+{
+ meminfo_update(ctx);
+ return 0;
+}
+
+int ne_meminfo_exit(struct flb_ne *ctx)
+{
+ if (ctx->meminfo_ht) {
+ flb_hash_table_destroy(ctx->meminfo_ht);
+ }
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/in_node_exporter_metrics/ne_netdev.c b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_netdev.c
new file mode 100644
index 000000000..848ca0995
--- /dev/null
+++ b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_netdev.c
@@ -0,0 +1,22 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifdef __linux__
+#include "ne_netdev_linux.c"
+#endif
diff --git a/src/fluent-bit/plugins/in_node_exporter_metrics/ne_netdev.h b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_netdev.h
new file mode 100644
index 000000000..8155bc3f5
--- /dev/null
+++ b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_netdev.h
@@ -0,0 +1,29 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_NE_NETDEV_H
+#define FLB_IN_NE_NETDEV_H
+
+#include "ne.h"
+
+int ne_netdev_init(struct flb_ne *ctx);
+int ne_netdev_update(struct flb_ne *ctx);
+int ne_netdev_exit(struct flb_ne *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/in_node_exporter_metrics/ne_netdev_linux.c b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_netdev_linux.c
new file mode 100644
index 000000000..771a66189
--- /dev/null
+++ b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_netdev_linux.c
@@ -0,0 +1,363 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_sds.h>
+#include <fluent-bit/flb_input_plugin.h>
+
+#include "ne.h"
+#include "ne_utils.h"
+
+#include <unistd.h>
+
+static int netdev_hash_set(struct flb_ne *ctx, struct cmt_counter *c,
+ char *metric_name)
+{
+ int ret;
+ int len;
+
+ len = strlen(metric_name);
+ ret = flb_hash_table_add(ctx->netdev_ht,
+ metric_name, len, c, 0);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "could not register hash entry");
+ return -1;
+ }
+
+ return 0;
+}
+
+static struct cmt_counter *netdev_hash_get(struct flb_ne *ctx,
+ char *device, char *metric_name)
+{
+ int ret;
+ int len;
+ size_t out_size;
+ struct cmt_counter *c;
+
+ len = strlen(metric_name);
+ ret = flb_hash_table_get(ctx->netdev_ht,
+ metric_name, len,
+ (void *) &c, &out_size);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "hash entry '%s' not found", metric_name);
+ return NULL;
+ }
+
+ return c;
+}
+
+static int netdev_configure(struct flb_ne *ctx)
+{
+ int ret;
+ int parts;
+ int n = 0;
+ int len;
+ char tmp[256];
+ char metric_name[256];
+ struct mk_list *head;
+ struct mk_list *prop_head;
+ struct mk_list list;
+ struct mk_list head_list;
+ struct mk_list split_list;
+ struct mk_list rx_list;
+ struct mk_list tx_list;
+ struct flb_slist_entry *line;
+ struct flb_slist_entry *dev;
+ struct flb_slist_entry *rx_header;
+ struct flb_slist_entry *tx_header;
+ struct flb_slist_entry *prop;
+
+ struct cmt_counter *c;
+
+ /* Initialize hash table */
+ ctx->netdev_ht = flb_hash_table_create(FLB_HASH_TABLE_EVICT_NONE, 16, 0);
+ if (!ctx->netdev_ht) {
+ return -1;
+ }
+
+ mk_list_init(&list);
+ mk_list_init(&head_list);
+ mk_list_init(&split_list);
+ mk_list_init(&rx_list);
+ mk_list_init(&tx_list);
+
+ ret = ne_utils_file_read_lines(ctx->path_procfs, "/net/dev", &list);
+ if (ret == -1) {
+ return -1;
+ }
+
+ /* Validate file header (second header) */
+ line = flb_slist_entry_get(&list, 1);
+ ret = flb_slist_split_string(&head_list, line->str, '|', -1);
+ if (ret != 3) {
+ flb_plg_error(ctx->ins, "invalid header line in net/dev: %s",
+ line->str);
+ flb_slist_destroy(&list);
+ return -1;
+ }
+
+ /* column names */
+ rx_header = flb_slist_entry_get(&head_list, 1);
+ tx_header = flb_slist_entry_get(&head_list, 2);
+
+ flb_slist_split_string(&rx_list, rx_header->str, ' ', -1);
+ flb_slist_split_string(&tx_list, tx_header->str, ' ', -1);
+
+ n = 0;
+ mk_list_foreach(head, &list) {
+ line = mk_list_entry(head, struct flb_slist_entry, _head);
+
+ if (n < 2) {
+ /* skip first two lines */
+ n++;
+ continue;
+ }
+
+ mk_list_init(&split_list);
+ ret = flb_slist_split_string(&split_list, line->str, ' ', 1);
+ if (ret == -1) {
+ continue;
+ }
+ parts = ret;
+
+ if (parts < 1) {
+ flb_slist_destroy(&split_list);
+ continue;
+ }
+
+ /* device */
+ dev = flb_slist_entry_get(&split_list, 0);
+
+ /* sanitize device name */
+ len = flb_sds_len(dev->str);
+ len--;
+ flb_sds_len_set(dev->str, len - 1);
+ dev->str[len] = '\0';
+
+ /* iterate all rx and tx fields to create a unique metric for each one */
+ mk_list_foreach(prop_head, &rx_list) {
+ prop = mk_list_entry(prop_head, struct flb_slist_entry, _head);
+
+ /* help string */
+ snprintf(tmp, sizeof(tmp) - 1,
+ "Network device statistic %s.",
+ prop->str);
+
+ /* metric name */
+ snprintf(metric_name, sizeof(metric_name) - 1,
+ "receive_%s_total", prop->str);
+
+ /* create the metric */
+ c = cmt_counter_create(ctx->cmt, "node", "network", metric_name,
+ tmp,
+ 1, (char *[]) {"device"});
+
+ netdev_hash_set(ctx, c, metric_name);
+ }
+
+ mk_list_foreach(prop_head, &tx_list) {
+ prop = mk_list_entry(prop_head, struct flb_slist_entry, _head);
+
+ /* help string */
+ snprintf(tmp, sizeof(tmp) - 1, "Network device statistic %s.",
+ prop->str);
+
+ /* metric name */
+ snprintf(metric_name, sizeof(metric_name) - 1,
+ "transmit_%s_total", prop->str);
+
+ /* create the metric */
+ c = cmt_counter_create(ctx->cmt, "node", "network", metric_name,
+ tmp,
+ 1, (char *[]) {"device"});
+
+ netdev_hash_set(ctx, c, metric_name);
+ }
+
+ flb_slist_destroy(&split_list);
+ }
+
+ flb_slist_destroy(&head_list);
+ flb_slist_destroy(&rx_list);
+ flb_slist_destroy(&tx_list);
+ flb_slist_destroy(&list);
+
+ return 0;
+}
+
+static int netdev_update(struct flb_ne *ctx)
+{
+ int ret;
+ int parts;
+ int n = 0;
+ int len;
+ int pos;
+ int rx_len;
+ uint64_t ts;
+ double val;
+ char metric_name[256];
+ char *type;
+ struct mk_list *head;
+ struct mk_list *prop_head;
+ struct mk_list list;
+ struct mk_list head_list;
+ struct mk_list split_list;
+ struct mk_list rx_list;
+ struct mk_list tx_list;
+ struct flb_slist_entry *line;
+ struct flb_slist_entry *dev;
+ struct flb_slist_entry *rx_header;
+ struct flb_slist_entry *tx_header;
+ struct flb_slist_entry *prop;
+ struct flb_slist_entry *prop_name;
+
+ struct cmt_counter *c;
+
+ mk_list_init(&list);
+ mk_list_init(&head_list);
+ mk_list_init(&split_list);
+ mk_list_init(&rx_list);
+ mk_list_init(&tx_list);
+
+ ret = ne_utils_file_read_lines(ctx->path_procfs, "/net/dev", &list);
+ if (ret == -1) {
+ return -1;
+ }
+
+ /* Validate file header (second header) */
+ line = flb_slist_entry_get(&list, 1);
+ ret = flb_slist_split_string(&head_list, line->str, '|', -1);
+ if (ret != 3) {
+ flb_plg_error(ctx->ins, "invalid header line in net/dev: %s",
+ line->str);
+ flb_slist_destroy(&list);
+ return -1;
+ }
+
+ /* column names */
+ rx_header = flb_slist_entry_get(&head_list, 1);
+ tx_header = flb_slist_entry_get(&head_list, 2);
+
+ /* split rx properties */
+ flb_slist_split_string(&rx_list, rx_header->str, ' ', -1);
+ rx_len = mk_list_size(&rx_list);
+
+ /* split tx properties */
+ flb_slist_split_string(&tx_list, tx_header->str, ' ', -1);
+
+ n = 0;
+ ts = cfl_time_now();
+ mk_list_foreach(head, &list) {
+ line = mk_list_entry(head, struct flb_slist_entry, _head);
+
+ if (n < 2) {
+ /* skip first two lines */
+ n++;
+ continue;
+ }
+
+ mk_list_init(&split_list);
+ ret = flb_slist_split_string(&split_list, line->str, ' ', -1);
+ if (ret == -1) {
+ continue;
+ }
+ parts = ret;
+
+ if (parts < 1) {
+ flb_slist_destroy(&split_list);
+ continue;
+ }
+
+ /* device */
+ dev = flb_slist_entry_get(&split_list, 0);
+
+ /* sanitize device name */
+ len = flb_sds_len(dev->str);
+ len--;
+ flb_sds_len_set(dev->str, len - 1);
+ dev->str[len] = '\0';
+
+ /* iterate line fields */
+ n = 0;
+ mk_list_foreach(prop_head, &split_list) {
+ if (n == 0) {
+ /* skip device name */
+ n++;
+ continue;
+ }
+
+ prop = mk_list_entry(prop_head, struct flb_slist_entry, _head);
+ pos = n - 1;
+ if (pos < rx_len) {
+ prop_name = flb_slist_entry_get(&rx_list, pos);
+ type = "receive";
+ }
+ else {
+ pos = (n - 1) - rx_len;
+ prop_name = flb_slist_entry_get(&tx_list, pos);
+ type = "transmit";
+ }
+
+ /* metric name */
+ snprintf(metric_name, sizeof(metric_name) - 1,
+ "%s_%s_total", type, prop_name->str);
+
+ c = netdev_hash_get(ctx, dev->str, metric_name);
+ if (!c) {
+ flb_plg_error(ctx->ins, "no hash metric found for %s:%s",
+ dev->str, prop->str);
+ continue;
+ }
+
+ ne_utils_str_to_double(prop->str, &val);
+ ret = cmt_counter_set(c, ts, val, 1, (char *[]) {dev->str});
+ n++;
+ }
+ flb_slist_destroy(&split_list);
+ }
+
+ flb_slist_destroy(&head_list);
+ flb_slist_destroy(&rx_list);
+ flb_slist_destroy(&tx_list);
+ flb_slist_destroy(&list);
+
+ return 0;
+}
+
+
+int ne_netdev_init(struct flb_ne *ctx)
+{
+ netdev_configure(ctx);
+ return 0;
+}
+
+int ne_netdev_update(struct flb_ne *ctx)
+{
+ netdev_update(ctx);
+ return 0;
+}
+
+int ne_netdev_exit(struct flb_ne *ctx)
+{
+ if (ctx->netdev_ht) {
+ flb_hash_table_destroy(ctx->netdev_ht);
+ }
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/in_node_exporter_metrics/ne_stat_linux.c b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_stat_linux.c
new file mode 100644
index 000000000..053fb2181
--- /dev/null
+++ b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_stat_linux.c
@@ -0,0 +1,152 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define _GNU_SOURCE
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_input_plugin.h>
+
+#include "ne.h"
+#include "ne_utils.h"
+
+static int stat_configure(struct flb_ne *ctx)
+{
+ struct cmt_counter *c;
+ struct cmt_gauge *g;
+
+ /* node_intr_total */
+ c = cmt_counter_create(ctx->cmt, "node", "", "intr_total",
+ "Total number of interrupts serviced.",
+ 0, NULL);
+ ctx->st_intr = c;
+
+ /* node_context_switches_total */
+ c = cmt_counter_create(ctx->cmt, "node", "", "context_switches_total",
+ "Total number of context switches.",
+ 0, NULL);
+ ctx->st_context_switches = c;
+
+ /* node_forks_total */
+ c = cmt_counter_create(ctx->cmt, "node", "", "forks_total",
+ "Total number of forks.",
+ 0, NULL);
+ ctx->st_forks = c;
+
+ /* node_boot_time_seconds */
+ g = cmt_gauge_create(ctx->cmt, "node", "", "boot_time_seconds",
+ "Node boot time, in unixtime.",
+ 0, NULL);
+ ctx->st_boot_time = g;
+
+ /* node_procs_running */
+ g = cmt_gauge_create(ctx->cmt, "node", "", "procs_running",
+ "Number of processes in runnable state.",
+ 0, NULL);
+ ctx->st_procs_running = g;
+
+ /* node_procs_blocked */
+ g = cmt_gauge_create(ctx->cmt, "node", "", "procs_blocked",
+ "Number of processes blocked waiting for I/O to complete.",
+ 0, NULL);
+ ctx->st_procs_blocked = g;
+
+ return 0;
+}
+
+static int stat_update(struct flb_ne *ctx)
+{
+ int ret;
+ int parts;
+ uint64_t ts;
+ double d_val;
+ struct mk_list *head;
+ struct mk_list list;
+ struct mk_list split_list;
+ struct flb_slist_entry *line;
+ struct flb_slist_entry *entry;
+ struct flb_slist_entry *s_val;
+
+ mk_list_init(&list);
+ ret = ne_utils_file_read_lines(ctx->path_procfs, "/stat", &list);
+ if (ret == -1) {
+ return -1;
+ }
+
+ ts = cfl_time_now();
+
+ mk_list_foreach(head, &list) {
+ line = mk_list_entry(head, struct flb_slist_entry, _head);
+
+ mk_list_init(&split_list);
+ ret = flb_slist_split_string(&split_list, line->str, ' ', -1);
+ if (ret == -1) {
+ continue;
+ }
+ parts = ret;
+ if (parts == 0) {
+ flb_slist_destroy(&split_list);
+ continue;
+ }
+
+ /* metric name and value */
+ entry = flb_slist_entry_get(&split_list, 0);
+ s_val = flb_slist_entry_get(&split_list, 1);
+
+ if (strcmp(entry->str, "intr") == 0) {
+ ne_utils_str_to_double(s_val->str, &d_val);
+ cmt_counter_set(ctx->st_intr, ts, d_val, 0, NULL);
+ }
+ else if (strcmp(entry->str, "ctxt") == 0) {
+ ne_utils_str_to_double(s_val->str, &d_val);
+ cmt_counter_set(ctx->st_context_switches, ts, d_val, 0, NULL);
+ }
+ else if (strcmp(entry->str, "btime") == 0) {
+ ne_utils_str_to_double(s_val->str, &d_val);
+ cmt_gauge_set(ctx->st_boot_time, ts, d_val, 0, NULL);
+ }
+ else if (strcmp(entry->str, "processes") == 0) {
+ ne_utils_str_to_double(s_val->str, &d_val);
+ cmt_counter_set(ctx->st_forks, ts, d_val, 0, NULL);
+ }
+ else if (strcmp(entry->str, "procs_running") == 0) {
+ ne_utils_str_to_double(s_val->str, &d_val);
+ cmt_gauge_set(ctx->st_procs_running, ts, d_val, 0, NULL);
+ }
+ else if (strcmp(entry->str, "procs_blocked") == 0) {
+ ne_utils_str_to_double(s_val->str, &d_val);
+ cmt_gauge_set(ctx->st_procs_blocked, ts, d_val, 0, NULL);
+ }
+ flb_slist_destroy(&split_list);
+ }
+ flb_slist_destroy(&list);
+
+ return 0;
+}
+
+int ne_stat_init(struct flb_ne *ctx)
+{
+ stat_configure(ctx);
+ return 0;
+}
+
+int ne_stat_update(struct flb_ne *ctx)
+{
+ stat_update(ctx);
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/in_node_exporter_metrics/ne_stat_linux.h b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_stat_linux.h
new file mode 100644
index 000000000..6d92a1492
--- /dev/null
+++ b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_stat_linux.h
@@ -0,0 +1,28 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_NE_STAT_H
+#define FLB_IN_NE_STAT_H
+
+#include "ne.h"
+
+int ne_stat_init(struct flb_ne *ctx);
+int ne_stat_update(struct flb_ne *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/in_node_exporter_metrics/ne_systemd.c b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_systemd.c
new file mode 100644
index 000000000..ec4df2455
--- /dev/null
+++ b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_systemd.c
@@ -0,0 +1,807 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_sds.h>
+#include <fluent-bit/flb_input_plugin.h>
+#include <cmetrics/cmt_math.h>
+#include <systemd/sd-bus.h>
+#include <stdarg.h>
+
+#include "ne.h"
+#include "ne_utils.h"
+#include "ne_systemd.h"
+
+#include <unistd.h>
+#include <float.h>
+
+static int str_ends_with(char *haystack, char *needle, int caseless) {
+ size_t haystack_length;
+ size_t trailer_offset;
+ size_t needle_length;
+ int result;
+
+ haystack_length = strlen(haystack);
+ needle_length = strlen(needle);
+
+ if (needle_length > haystack_length) {
+ return FLB_FALSE;
+ }
+
+ trailer_offset = haystack_length - needle_length;
+
+ if (caseless) {
+ result = strcasecmp(&haystack[trailer_offset],
+ needle);
+ }
+ else {
+ result = strcmp(&haystack[trailer_offset],
+ needle);
+ }
+
+ if (result == 0) {
+ return FLB_TRUE;
+ }
+
+ return FLB_FALSE;
+}
+
+static void clear_property_variable(char property_type, void *property_value)
+{
+ if (property_type == 'y') {
+ *((uint8_t *) property_value) = 0;
+ }
+ else if (property_type == 'b') {
+ *((int *) property_value) = 0;
+ }
+ else if (property_type == 'n') {
+ *((int16_t *) property_value) = 0;
+ }
+ else if (property_type == 'q') {
+ *((uint16_t *) property_value) = 0;
+ }
+ else if (property_type == 'i') {
+ *((int32_t *) property_value) = 0;
+ }
+ else if (property_type == 'u') {
+ *((uint32_t *) property_value) = 0;
+ }
+ else if (property_type == 'x') {
+ *((int64_t *) property_value) = 0;
+ }
+ else if (property_type == 't') {
+ *((uint64_t *) property_value) = 0;
+ }
+ else if (property_type == 'd') {
+ *((double *) property_value) = 0;
+ }
+ else if (property_type == 's') {
+ *((char **) property_value) = NULL;
+ }
+ else if (property_type == 'o') {
+ *((char **) property_value) = NULL;
+ }
+ else if (property_type == 'g') {
+ *((char **) property_value) = NULL;
+ }
+ else if (property_type == 'h') {
+ *((int32_t *) property_value) = -1;
+ }
+}
+
+static int get_system_property(struct flb_ne *ctx,
+ char *interface,
+ char *property_name,
+ char property_type,
+ void *property_value)
+{
+ int result;
+
+ clear_property_variable(property_type, property_value);
+
+ if (interface == NULL) {
+ interface = "org.freedesktop.systemd1.Manager";
+ }
+
+ if (property_type == 's' ||
+ property_type == 'o' ||
+ property_type == 'g') {
+ result = sd_bus_get_property_string((sd_bus *) ctx->systemd_dbus_handle,
+ "org.freedesktop.systemd1",
+ "/org/freedesktop/systemd1",
+ interface,
+ property_name,
+ NULL,
+ property_value);
+ }
+ else {
+ result = sd_bus_get_property_trivial((sd_bus *) ctx->systemd_dbus_handle,
+ "org.freedesktop.systemd1",
+ "/org/freedesktop/systemd1",
+ interface,
+ property_name,
+ NULL,
+ property_type,
+ property_value);
+ }
+
+ if (result < 0) {
+ return -1;
+ }
+
+ return 0;
+}
+
+static int get_unit_property(struct flb_ne *ctx,
+ struct ne_systemd_unit *unit,
+ char *interface,
+ char *property_name,
+ char property_type,
+ void *property_value)
+{
+ int result;
+
+ clear_property_variable(property_type, property_value);
+
+ if (interface == NULL) {
+ if (unit->unit_type == SYSTEMD_UNIT_TYPE_SERVICE) {
+ interface = "org.freedesktop.systemd1.Service";
+ }
+ else if (unit->unit_type == SYSTEMD_UNIT_TYPE_MOUNT) {
+ interface = "org.freedesktop.systemd1.Mount";
+ }
+ else if (unit->unit_type == SYSTEMD_UNIT_TYPE_SOCKET) {
+ interface = "org.freedesktop.systemd1.Socket";
+ }
+ else if (unit->unit_type == SYSTEMD_UNIT_TYPE_TIMER) {
+ interface = "org.freedesktop.systemd1.Timer";
+ }
+ else {
+ interface = unit->name;
+ }
+ }
+
+ if (property_type == 's' ||
+ property_type == 'o' ||
+ property_type == 'g') {
+ result = sd_bus_get_property_string((sd_bus *) ctx->systemd_dbus_handle,
+ "org.freedesktop.systemd1",
+ unit->path,
+ interface,
+ property_name,
+ NULL,
+ property_value);
+ }
+ else {
+ result = sd_bus_get_property_trivial((sd_bus *) ctx->systemd_dbus_handle,
+ "org.freedesktop.systemd1",
+ unit->path,
+ interface,
+ property_name,
+ NULL,
+ property_type,
+ property_value);
+ }
+
+ if (result < 0) {
+ return -1;
+ }
+
+ return 0;
+}
+
+static int ne_systemd_update_unit_state(struct flb_ne *ctx)
+{
+ char *unit_states[] = { "activating", "active",
+ "deactivating", "inactive",
+ "failed" };
+ double timer_trigger_timestamp;
+ uint64_t deactivating_units;
+ uint64_t activating_units;
+ double unit_start_time;
+ uint64_t inactive_units;
+ uint64_t active_units;
+ uint64_t failed_units;
+ int include_flag;
+ uint64_t timestamp;
+ int result;
+ size_t index;
+ sd_bus_message *reply;
+ struct ne_systemd_unit unit;
+ sd_bus *bus;
+
+ bus = (sd_bus *) ctx->systemd_dbus_handle;
+
+ result = sd_bus_call_method(bus,
+ "org.freedesktop.systemd1",
+ "/org/freedesktop/systemd1",
+ "org.freedesktop.systemd1.Manager",
+ "ListUnits",
+ NULL,
+ &reply,
+ "");
+
+ if (result < 0) {
+ return -1;
+ }
+
+ result = sd_bus_message_enter_container(reply, 'a', "(ssssssouso)");
+
+ if (result < 0) {
+ sd_bus_message_unref(reply);
+
+ return -2;
+ }
+
+ timestamp = cfl_time_now();
+
+ deactivating_units = 0;
+ activating_units = 0;
+ inactive_units = 0;
+ active_units = 0;
+ failed_units = 0;
+
+ do {
+ result = sd_bus_message_read(reply,
+ "(ssssssouso)",
+ &unit.name,
+ &unit.description,
+ &unit.load_state,
+ &unit.active_state,
+ &unit.sub_state,
+ &unit.followed,
+ &unit.path,
+ &unit.job_id,
+ &unit.job_type,
+ &unit.object_path);
+
+
+ if (result < 0) {
+ sd_bus_message_unref(reply);
+
+ return -3;
+ }
+ else if(result > 0) {
+ unit.type = NULL;
+
+ if (strcasecmp(unit.active_state, "activating") == 0) {
+ activating_units++;
+ }
+ else if (strcasecmp(unit.active_state, "deactivating") == 0) {
+ deactivating_units++;
+ }
+ else if (strcasecmp(unit.active_state, "inactive") == 0) {
+ inactive_units++;
+ }
+ else if (strcasecmp(unit.active_state, "active") == 0) {
+ active_units++;
+ }
+ else if (strcasecmp(unit.active_state, "failed") == 0) {
+ failed_units++;
+ }
+
+ if (ctx->systemd_regex_include_list != NULL) {
+ include_flag = flb_regex_match(ctx->systemd_regex_include_list,
+ (unsigned char *) unit.name,
+ strlen(unit.name));
+ }
+ else {
+ include_flag = FLB_TRUE;
+ }
+
+ if (!include_flag) {
+ continue;
+ }
+
+ if (ctx->systemd_regex_exclude_list != NULL) {
+ include_flag = !flb_regex_match(ctx->systemd_regex_exclude_list,
+ (unsigned char *) unit.name,
+ strlen(unit.name));
+ }
+ else {
+ include_flag = FLB_TRUE;
+ }
+
+ if (!include_flag) {
+ continue;
+ }
+
+ if (strcasecmp(unit.load_state, "loaded") != 0) {
+ continue;
+ }
+
+ if (str_ends_with(unit.name, ".service", FLB_TRUE)) {
+ unit.unit_type = SYSTEMD_UNIT_TYPE_SERVICE;
+
+ result = get_service_type(ctx,
+ &unit,
+ &unit.type);
+
+ if (ctx->systemd_include_service_restarts) {
+ result = get_service_restart_count(ctx,
+ &unit,
+ &unit.restart_count);
+
+ cmt_counter_set(ctx->systemd_service_restarts,
+ timestamp,
+ unit.restart_count,
+ 1,
+ (char *[]){ unit.name });
+
+ }
+
+ if (ctx->systemd_include_service_task_metrics) {
+ result = get_service_active_tasks(ctx,
+ &unit,
+ &unit.active_tasks);
+
+ if (unit.active_tasks != UINT64_MAX) {
+ cmt_gauge_set(ctx->systemd_unit_tasks,
+ timestamp,
+ unit.active_tasks,
+ 1,
+ (char *[]){ unit.name });
+ }
+
+ result = get_service_max_tasks(ctx,
+ &unit,
+ &unit.max_tasks);
+
+ if (unit.max_tasks != UINT64_MAX) {
+ cmt_gauge_set(ctx->systemd_unit_tasks_max,
+ timestamp,
+ unit.max_tasks,
+ 1,
+ (char *[]){ unit.name });
+ }
+ }
+
+ result = 1;
+ }
+ else if (str_ends_with(unit.name, ".mount", FLB_TRUE)) {
+ unit.unit_type = SYSTEMD_UNIT_TYPE_MOUNT;
+ }
+ else if (str_ends_with(unit.name, ".socket", FLB_TRUE)) {
+ unit.unit_type = SYSTEMD_UNIT_TYPE_SOCKET;
+
+ result = get_socket_accepted_connection_count(
+ ctx,
+ &unit,
+ &unit.accepted_connections);
+
+ result = get_socket_active_connection_count(
+ ctx,
+ &unit,
+ &unit.active_connections);
+
+ result = get_socket_refused_connection_count(
+ ctx,
+ &unit,
+ &unit.refused_connections);
+
+ cmt_gauge_set(ctx->systemd_socket_accepted_connections,
+ timestamp,
+ unit.accepted_connections,
+ 1,
+ (char *[]){ unit.name });
+
+ cmt_gauge_set(ctx->systemd_socket_active_connections,
+ timestamp,
+ unit.active_connections,
+ 1,
+ (char *[]){ unit.name });
+
+ cmt_gauge_set(ctx->systemd_socket_refused_connections,
+ timestamp,
+ unit.refused_connections,
+ 1,
+ (char *[]){ unit.name });
+
+ result = 1;
+ }
+ else if (str_ends_with(unit.name, ".timer", FLB_TRUE)) {
+ unit.unit_type = SYSTEMD_UNIT_TYPE_TIMER;
+
+ result = get_timer_last_trigger_timestamp(
+ ctx,
+ &unit,
+ &unit.last_trigger_timestamp);
+
+ timer_trigger_timestamp = (double) unit.last_trigger_timestamp;
+ timer_trigger_timestamp /= 1000000.0;
+
+ cmt_gauge_set(ctx->systemd_timer_last_trigger_seconds,
+ timestamp,
+ timer_trigger_timestamp,
+ 1,
+ (char *[]){ unit.name });
+
+ result = 1;
+ }
+ else {
+ unit.unit_type = SYSTEMD_UNIT_TYPE_UNDEFINED;
+ }
+
+ if (ctx->systemd_include_unit_start_times) {
+ if (strcasecmp(unit.active_state, "active") == 0) {
+ result = get_unit_start_time(ctx, &unit, &unit.start_time);
+
+ unit_start_time = (double) unit.start_time;
+ unit_start_time /= 1000000.0;
+ }
+ else {
+ unit_start_time = 0;
+ }
+
+ cmt_gauge_set(ctx->systemd_unit_start_times,
+ timestamp,
+ unit_start_time,
+ 1,
+ (char *[]){ unit.name });
+
+ result = 1;
+ }
+
+ for(index = 0 ; index < 5 ; index++) {
+ cmt_gauge_add(ctx->systemd_unit_state,
+ timestamp,
+ 0,
+ 3,
+ (char *[]){ unit.name,
+ unit_states[index],
+ unit.type
+ });
+ }
+
+ cmt_gauge_inc(ctx->systemd_unit_state,
+ timestamp,
+ 3,
+ (char *[]){ unit.name,
+ unit.active_state,
+ unit.type
+ });
+
+
+ if (unit.type != NULL) {
+ free(unit.type);
+ }
+ }
+ }
+ while (result > 0);
+
+ sd_bus_message_exit_container(reply);
+
+ sd_bus_message_unref(reply);
+
+ cmt_gauge_set(ctx->systemd_units,
+ timestamp,
+ activating_units,
+ 1,
+ (char *[]){ "activating" });
+
+ cmt_gauge_set(ctx->systemd_units,
+ timestamp,
+ deactivating_units,
+ 1,
+ (char *[]){ "deactivating" });
+
+ cmt_gauge_set(ctx->systemd_units,
+ timestamp,
+ inactive_units,
+ 1,
+ (char *[]){ "inactive" });
+
+ cmt_gauge_set(ctx->systemd_units,
+ timestamp,
+ active_units,
+ 1,
+ (char *[]){ "active" });
+
+ cmt_gauge_set(ctx->systemd_units,
+ timestamp,
+ failed_units,
+ 1,
+ (char *[]){ "failed" });
+
+ return 0;
+}
+
+static int ne_systemd_update_system_state(struct flb_ne *ctx)
+{
+ int system_running;
+ uint64_t timestamp;
+ char *version;
+ int result;
+ char *state;
+
+ timestamp = cfl_time_now();
+
+ if (!ctx->systemd_initialization_flag) {
+ result = get_system_version(ctx, &version);
+
+ if (result != 0) {
+ return -1;
+ }
+
+ ctx->libsystemd_version_text = version;
+ ctx->libsystemd_version = strtod(version, NULL);
+
+ cmt_gauge_set(ctx->systemd_version,
+ timestamp,
+ ctx->libsystemd_version,
+ 1,
+ (char *[]){ ctx->libsystemd_version_text });
+ }
+ else {
+ cmt_gauge_add(ctx->systemd_version,
+ timestamp,
+ 0,
+ 1,
+ (char *[]){ ctx->libsystemd_version_text });
+ }
+
+ result = get_system_state(ctx, &state);
+
+ if (result != 0) {
+ return -2;
+ }
+
+ system_running = 0;
+
+ if (strcasecmp(state, "running") == 0) {
+ system_running = 1;
+ }
+
+ cmt_gauge_set(ctx->systemd_system_running,
+ timestamp,
+ system_running,
+ 0,
+ NULL);
+ free(state);
+
+ return 0;
+}
+
+int ne_systemd_init(struct flb_ne *ctx)
+{
+ int result;
+
+ ctx->systemd_dbus_handle = NULL;
+
+ result = sd_bus_open_system((sd_bus **) &ctx->systemd_dbus_handle);
+
+ if (result < 0) {
+ return -1;
+ }
+
+ ctx->systemd_socket_accepted_connections = cmt_gauge_create(ctx->cmt,
+ "node",
+ "systemd",
+ "socket_accepted_connections_total",
+ "Total number of accepted " \
+ "socket connections.",
+ 1,
+ (char *[]) {"name"});
+
+ if (ctx->systemd_socket_accepted_connections == NULL) {
+ return -1;
+ }
+
+ ctx->systemd_socket_active_connections = cmt_gauge_create(ctx->cmt,
+ "node",
+ "systemd",
+ "socket_current_connections",
+ "Current number of socket " \
+ "connections.",
+ 1,
+ (char *[]) {"name"});
+
+ if (ctx->systemd_socket_active_connections == NULL) {
+ return -1;
+ }
+
+ ctx->systemd_socket_refused_connections = cmt_gauge_create(ctx->cmt,
+ "node",
+ "systemd",
+ "socket_refused_connections_total",
+ "Total number of refused " \
+ "socket connections.",
+ 1,
+ (char *[]) {"name"});
+
+ if (ctx->systemd_socket_refused_connections == NULL) {
+ return -1;
+ }
+
+ ctx->systemd_system_running = cmt_gauge_create(ctx->cmt,
+ "node",
+ "systemd",
+ "system_running",
+ "Whether the system is " \
+ "operational (see 'systemctl" \
+ " is-system-running')",
+ 0, NULL);
+
+ if (ctx->systemd_system_running == NULL) {
+ return -1;
+ }
+
+ ctx->systemd_timer_last_trigger_seconds = cmt_gauge_create(ctx->cmt,
+ "node",
+ "systemd",
+ "timer_last_trigger_seconds",
+ "Seconds since epoch of " \
+ "last trigger.",
+ 1,
+ (char *[]) {"name"});
+
+ if (ctx->systemd_timer_last_trigger_seconds == NULL) {
+ return -1;
+ }
+
+ ctx->systemd_service_restarts = cmt_counter_create(ctx->cmt,
+ "node",
+ "systemd",
+ "service_restart_total",
+ "Service unit count of " \
+ "Restart triggers",
+ 1, (char *[]) {"name"});
+
+ if (ctx->systemd_service_restarts == NULL) {
+ return -1;
+ }
+
+ cmt_counter_allow_reset(ctx->systemd_service_restarts);
+
+ ctx->systemd_unit_tasks = cmt_gauge_create(ctx->cmt,
+ "node",
+ "systemd",
+ "unit_tasks_current",
+ "Current number of tasks " \
+ "per Systemd unit.",
+ 1, (char *[]) {"name"});
+
+ if (ctx->systemd_unit_tasks == NULL) {
+ return -1;
+ }
+
+ ctx->systemd_unit_tasks_max = cmt_gauge_create(ctx->cmt,
+ "node",
+ "systemd",
+ "unit_tasks_max",
+ "Maximum number of tasks " \
+ "per Systemd unit.",
+ 1, (char *[]) {"name"});
+
+ if (ctx->systemd_unit_tasks == NULL) {
+ return -1;
+ }
+
+ ctx->systemd_unit_start_times = cmt_gauge_create(ctx->cmt,
+ "node",
+ "systemd",
+ "unit_start_time_seconds",
+ "Start time of the unit since " \
+ "unix epoch in seconds.",
+ 1, (char *[]) {"name"});
+
+ if (ctx->systemd_unit_start_times == NULL) {
+ return -1;
+ }
+
+ ctx->systemd_unit_state = cmt_gauge_create(ctx->cmt,
+ "node",
+ "systemd",
+ "unit_state",
+ "Systemd unit",
+ 3, (char *[]) {"name",
+ "state",
+ "type"});
+
+ if (ctx->systemd_unit_state == NULL) {
+ return -1;
+ }
+
+ ctx->systemd_units = cmt_gauge_create(ctx->cmt,
+ "node",
+ "systemd",
+ "units",
+ "Summary of systemd unit states",
+ 1, (char *[]) {"state"});
+
+ if (ctx->systemd_units == NULL) {
+ return -1;
+ }
+
+ ctx->systemd_version = cmt_gauge_create(ctx->cmt,
+ "node",
+ "systemd",
+ "version",
+ "Detected systemd version",
+ 1, (char *[]) {"version"});
+
+ if (ctx->systemd_version == NULL) {
+ return -1;
+ }
+
+ if (ctx->systemd_regex_include_list_text != NULL) {
+ ctx->systemd_regex_include_list = \
+ flb_regex_create(ctx->systemd_regex_include_list_text);
+
+ if (ctx->systemd_regex_include_list == NULL) {
+ return -1;
+ }
+ }
+
+ if (ctx->systemd_regex_exclude_list_text != NULL) {
+ ctx->systemd_regex_exclude_list = \
+ flb_regex_create(ctx->systemd_regex_exclude_list_text);
+
+ if (ctx->systemd_regex_exclude_list == NULL) {
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+int ne_systemd_update(struct flb_ne *ctx)
+{
+ int result;
+
+ result = ne_systemd_update_system_state(ctx);
+
+ if (result != 0) {
+ return result;
+ }
+
+ result = ne_systemd_update_unit_state(ctx);
+
+ if (result != 0) {
+ return result;
+ }
+
+ if (!ctx->systemd_initialization_flag) {
+ ctx->systemd_initialization_flag = FLB_TRUE;
+ }
+
+ return 0;
+}
+
+int ne_systemd_exit(struct flb_ne *ctx)
+{
+ if (ctx->systemd_dbus_handle != NULL) {
+ sd_bus_unref((sd_bus *) ctx->systemd_dbus_handle);
+
+ ctx->systemd_dbus_handle = NULL;
+ }
+
+ if (ctx->systemd_regex_include_list != NULL) {
+ flb_regex_destroy(ctx->systemd_regex_include_list);
+ }
+
+ if (ctx->systemd_regex_exclude_list != NULL) {
+ flb_regex_destroy(ctx->systemd_regex_exclude_list);
+ }
+
+ if (ctx->libsystemd_version_text != NULL) {
+ flb_free(ctx->libsystemd_version_text);
+ }
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/in_node_exporter_metrics/ne_systemd.h b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_systemd.h
new file mode 100644
index 000000000..8c6fb26c1
--- /dev/null
+++ b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_systemd.h
@@ -0,0 +1,127 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_NE_SYSTEMD_H
+#define FLB_IN_NE_SYSTEMD_H
+
+#include "ne.h"
+
+#define SYSTEMD_UNIT_TYPE_UNDEFINED 0
+#define SYSTEMD_UNIT_TYPE_SERVICE 1
+#define SYSTEMD_UNIT_TYPE_SOCKET 2
+#define SYSTEMD_UNIT_TYPE_MOUNT 3
+#define SYSTEMD_UNIT_TYPE_TIMER 4
+
+struct ne_systemd_unit {
+ char *name;
+ char *description;
+ char *load_state;
+ char *active_state;
+ char *sub_state;
+ char *followed;
+ char *path;
+ uint32_t job_id;
+ char *job_type;
+ char *object_path;
+
+ /* not part of the unit list result */
+ uint64_t start_time;
+ int unit_type;
+ char *type;
+
+ /* services */
+ uint32_t restart_count;
+ uint64_t active_tasks;
+ uint64_t max_tasks;
+
+ /* sockets */
+ uint32_t accepted_connections;
+ uint32_t active_connections;
+ uint32_t refused_connections;
+
+ /* timers */
+ uint64_t last_trigger_timestamp;
+};
+
+#ifdef FLB_HAVE_SYSTEMD_SDBUS
+int ne_systemd_init(struct flb_ne *ctx);
+int ne_systemd_update(struct flb_ne *ctx);
+int ne_systemd_exit(struct flb_ne *ctx);
+#else
+static int ne_systemd_init(struct flb_ne *ctx)
+{
+ return 0;
+}
+static int ne_systemd_update(struct flb_ne *ctx)
+{
+ return 0;
+}
+static int ne_systemd_exit(struct flb_ne *ctx)
+{
+ return 0;
+}
+#endif
+
+#define get_system_state(context, output_variable) \
+ get_system_property(context, NULL, "SystemState", \
+ 's', (void *) (output_variable))
+
+#define get_system_version(context, output_variable) \
+ get_system_property(context, NULL, "Version", \
+ 's', (void *) (output_variable))
+
+#define get_service_type(context, unit, output_variable) \
+ get_unit_property(context, unit, NULL, "Type", \
+ 's', (void *) (output_variable))
+
+#define get_service_active_tasks(context, unit, output_variable) \
+ get_unit_property(context, unit, NULL, "TasksCurrent", \
+ 't', (void *) (output_variable))
+
+#define get_service_max_tasks(context, unit, output_variable) \
+ get_unit_property(context, unit, NULL, "TasksMax", \
+ 't', (void *) (output_variable))
+
+#define get_service_restart_count(context, unit, output_variable) \
+ get_unit_property(context, unit, NULL, "NRestarts", \
+ 'u', (void *) (output_variable))
+
+#define get_socket_accepted_connection_count(context, unit, output_variable) \
+ get_unit_property(context, unit, NULL, "NAccepted", \
+ 'u', (void *) (output_variable))
+
+#define get_socket_active_connection_count(context, unit, output_variable) \
+ get_unit_property(context, unit, NULL, "NConnections", \
+ 'u', (void *) (output_variable))
+
+#define get_socket_refused_connection_count(context, unit, output_variable) \
+ get_unit_property(context, unit, NULL, "NRefused", \
+ 'u', (void *) (output_variable))
+
+#define get_timer_last_trigger_timestamp(context, unit, output_variable) \
+ get_unit_property(context, unit, NULL, "LastTriggerUSec", \
+ 't', (void *) (output_variable))
+
+#define get_unit_start_time(context, unit, output_variable) \
+ get_unit_property(context, \
+ unit, \
+ "org.freedesktop.systemd1.Unit", \
+ "ActiveEnterTimestamp", \
+ 't', (void *) (output_variable))
+#endif
diff --git a/src/fluent-bit/plugins/in_node_exporter_metrics/ne_textfile.c b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_textfile.c
new file mode 100644
index 000000000..f9c584453
--- /dev/null
+++ b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_textfile.c
@@ -0,0 +1,22 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2023 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifdef __linux__
+#include "ne_textfile_linux.c"
+#endif
diff --git a/src/fluent-bit/plugins/in_node_exporter_metrics/ne_textfile.h b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_textfile.h
new file mode 100644
index 000000000..7f67d817f
--- /dev/null
+++ b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_textfile.h
@@ -0,0 +1,28 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2023 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_NE_TEXTFILE_H
+#define FLB_IN_NE_TEXTFILE_H
+
+#include "ne.h"
+
+int ne_textfile_init(struct flb_ne *ctx);
+int ne_textfile_update(struct flb_ne *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/in_node_exporter_metrics/ne_textfile_linux.c b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_textfile_linux.c
new file mode 100644
index 000000000..d65e01441
--- /dev/null
+++ b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_textfile_linux.c
@@ -0,0 +1,204 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_sds.h>
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_file.h>
+
+#include "ne.h"
+#include "ne_utils.h"
+
+#include <unistd.h>
+#include <float.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+
+/* Prometheus decoder */
+#include <cmetrics/cmt_decode_prometheus.h>
+#include "cmt_decode_prometheus_parser.h"
+
+static char *error_reason(int cmt_error)
+{
+ static char *reason = NULL;
+
+ switch(cmt_error) {
+ case CMT_DECODE_PROMETHEUS_SYNTAX_ERROR:
+ reason = "syntax error";
+ break;
+ case CMT_DECODE_PROMETHEUS_ALLOCATION_ERROR:
+ reason = "allocation error";
+ break;
+ case CMT_DECODE_PROMETHEUS_MAX_LABEL_COUNT_EXCEEDED:
+ reason = "max label count exceeded";
+ break;
+ case CMT_DECODE_PROMETHEUS_CMT_SET_ERROR:
+ reason = "cmt set error";
+ break;
+ case CMT_DECODE_PROMETHEUS_CMT_CREATE_ERROR:
+ reason = "cmt create error";
+ break;
+ case CMT_DECODE_PROMETHEUS_PARSE_VALUE_FAILED:
+ reason = "parse value failed";
+ break;
+ case CMT_DECODE_PROMETHEUS_PARSE_TIMESTAMP_FAILED:
+ reason = "parse timestamp failed";
+ break;
+ default:
+ reason = "unknown reason";
+ }
+
+ return reason;
+}
+
+static int textfile_update(struct flb_ne *ctx)
+{
+ int ret;
+ char errbuf[256];
+ flb_sds_t contents;
+ struct cmt_decode_prometheus_parse_opts opts;
+ uint64_t timestamp;
+ struct cmt *cmt;
+ struct mk_list *head;
+ struct mk_list list;
+ struct flb_slist_entry *entry;
+ const char *nop_pattern = "";
+ const char *dir_pattern = "/*.prom";
+ char *ext;
+ struct stat st;
+ int use_directory_pattern = FLB_FALSE;
+
+ timestamp = cfl_time_now();
+
+ memset(&opts, 0, sizeof(opts));
+ opts.errbuf = errbuf;
+ opts.errbuf_size = sizeof(errbuf);
+ opts.default_timestamp = timestamp;
+
+ flb_plg_debug(ctx->ins, "scanning path %s", ctx->path_textfile);
+
+ if (ctx->path_textfile == NULL) {
+ flb_plg_warn(ctx->ins, "No valid path for textfile metric is registered");
+ return -1;
+ }
+
+ ext = strrchr(ctx->path_textfile, '.');
+ if (ext != NULL) {
+ if (strncmp(ext, ".prom", 5) == 0) {
+ flb_plg_debug(ctx->ins, "specified path %s has \".prom\" extension",
+ ctx->path_textfile);
+ use_directory_pattern = FLB_FALSE;
+ }
+ else {
+ ret = stat(ctx->path_textfile, &st);
+ if (ret != 0) {
+ flb_plg_warn(ctx->ins, "specified path %s is not accesible",
+ ctx->path_textfile);
+ }
+ if (S_ISREG(st.st_mode)) {
+ flb_plg_warn(ctx->ins, "specified path %s does not have \".prom\" extension. Assuming directory",
+ ctx->path_textfile);
+ use_directory_pattern = FLB_TRUE;
+ }
+ }
+ }
+ else {
+ flb_plg_debug(ctx->ins, "specified file path %s does not have extension part. Globbing directory with \"%s\" suffix",
+ ctx->path_textfile, dir_pattern);
+ use_directory_pattern = FLB_TRUE;
+ }
+
+ if (use_directory_pattern == FLB_TRUE) {
+ /* Scan the given directory path */
+ ret = ne_utils_path_scan(ctx, ctx->path_textfile, dir_pattern, NE_SCAN_FILE, &list);
+ if (ret != 0) {
+ return -1;
+ }
+ }
+ else {
+ /* Scan the given file path */
+ ret = ne_utils_path_scan(ctx, ctx->path_textfile, nop_pattern, NE_SCAN_FILE, &list);
+ if (ret != 0) {
+ return -1;
+ }
+ }
+
+ /* Process entries */
+ mk_list_foreach(head, &list) {
+ entry = mk_list_entry(head, struct flb_slist_entry, _head);
+ /* Update metrics from text file */
+ contents = flb_file_read(entry->str);
+ if (contents == NULL) {
+ flb_plg_debug(ctx->ins, "skip invalid file of prometheus: %s",
+ entry->str);
+ continue;
+ }
+
+ if (flb_sds_len(contents) == 0) {
+ flb_plg_debug(ctx->ins, "skip empty payload of prometheus: %s",
+ entry->str);
+ continue;
+ }
+
+ ret = cmt_decode_prometheus_create(&cmt, contents, flb_sds_len(contents), &opts);
+ if (ret == 0) {
+ flb_plg_debug(ctx->ins, "parse a payload of prometheus: %s",
+ entry->str);
+ cmt_cat(ctx->cmt, cmt);
+ cmt_decode_prometheus_destroy(cmt);
+ }
+ else {
+ flb_plg_debug(ctx->ins, "parse a payload of prometheus: dismissed: %s, error: %d",
+ entry->str, ret);
+ cmt_counter_set(ctx->load_errors, timestamp, 1.0, 1, (char*[]){error_reason(ret)});
+ }
+ flb_sds_destroy(contents);
+ }
+ flb_slist_destroy(&list);
+
+ return 0;
+}
+
+int ne_textfile_init(struct flb_ne *ctx)
+{
+ ctx->load_errors = cmt_counter_create(ctx->cmt,
+ "node",
+ "textfile",
+ "node_textfile_scrape_error",
+ "Greater equal than 1 if there was an error opening, reading, or parsing a file, 0 otherwise.",
+ 1, (char *[]) {"reason"});
+
+ if (ctx->load_errors == NULL) {
+ return -1;
+ }
+
+ return 0;
+}
+
+int ne_textfile_update(struct flb_ne *ctx)
+{
+ textfile_update(ctx);
+
+ return 0;
+}
+
+int ne_textfile_exit(struct flb_ne *ctx)
+{
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/in_node_exporter_metrics/ne_time.c b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_time.c
new file mode 100644
index 000000000..81bf57ed9
--- /dev/null
+++ b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_time.c
@@ -0,0 +1,59 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_input_plugin.h>
+
+#include "ne.h"
+#include "ne_utils.h"
+
+static int time_configure(struct flb_ne *ctx)
+{
+ struct cmt_gauge *g;
+
+ g = cmt_gauge_create(ctx->cmt, "node", "", "time_seconds",
+ "System time in seconds since epoch (1970).",
+ 0, NULL);
+ ctx->time = g;
+ return 0;
+}
+
+static int time_update(struct flb_ne *ctx)
+{
+ double val;
+ uint64_t ts;
+
+ ts = cfl_time_now();
+ val = ((double) ts) / 1e9;
+ cmt_gauge_set(ctx->time, ts, val, 0, NULL);
+
+ return 0;
+}
+
+int ne_time_init(struct flb_ne *ctx)
+{
+ time_configure(ctx);
+ return 0;
+}
+
+int ne_time_update(struct flb_ne *ctx)
+{
+ time_update(ctx);
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/in_node_exporter_metrics/ne_time.h b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_time.h
new file mode 100644
index 000000000..e594c935c
--- /dev/null
+++ b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_time.h
@@ -0,0 +1,28 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_NE_TIME_H
+#define FLB_IN_NE_TIME_H
+
+#include "ne.h"
+
+int ne_time_init(struct flb_ne *ctx);
+int ne_time_update(struct flb_ne *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/in_node_exporter_metrics/ne_uname.c b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_uname.c
new file mode 100644
index 000000000..7ab7b1613
--- /dev/null
+++ b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_uname.c
@@ -0,0 +1,22 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifdef __linux__
+#include "ne_uname_linux.c"
+#endif
diff --git a/src/fluent-bit/plugins/in_node_exporter_metrics/ne_uname.h b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_uname.h
new file mode 100644
index 000000000..dc94bc42b
--- /dev/null
+++ b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_uname.h
@@ -0,0 +1,28 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_NE_UNAME_H
+#define FLB_IN_NE_UNAME_H
+
+#include "ne.h"
+
+int ne_uname_init(struct flb_ne *ctx);
+int ne_uname_update(struct flb_ne *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/in_node_exporter_metrics/ne_uname_linux.c b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_uname_linux.c
new file mode 100644
index 000000000..e42cab6a3
--- /dev/null
+++ b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_uname_linux.c
@@ -0,0 +1,84 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define _GNU_SOURCE
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_input_plugin.h>
+
+#include "ne.h"
+#include "ne_utils.h"
+
+#include <unistd.h>
+#include <sys/utsname.h>
+
+static int uname_configure(struct flb_ne *ctx)
+{
+ struct cmt_gauge *g;
+
+ g = cmt_gauge_create(ctx->cmt, "node", "uname", "info",
+ "Labeled system information as provided by the uname system call.",
+ 6, (char *[])
+ {
+ "sysname",
+ "release",
+ "version",
+ "machine",
+ "nodename",
+ "domainname"
+ });
+ if (!g) {
+ return -1;
+ }
+ ctx->uname = g;
+ return 0;
+}
+
+static int uname_update(struct flb_ne *ctx)
+{
+ int ret;
+ uint64_t ts;
+ struct utsname u = {0};
+
+
+ uname(&u);
+
+ ts = cfl_time_now();
+ ret = cmt_gauge_set(ctx->uname, ts, 1, 6,
+ (char *[]) {
+ u.sysname,
+ u.release,
+ u.version,
+ u.machine,
+ u.nodename,
+ u.domainname});
+ return ret;
+}
+
+int ne_uname_init(struct flb_ne *ctx)
+{
+ uname_configure(ctx);
+ return 0;
+}
+
+int ne_uname_update(struct flb_ne *ctx)
+{
+ uname_update(ctx);
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/in_node_exporter_metrics/ne_utils.c b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_utils.c
new file mode 100644
index 000000000..54cb2e2da
--- /dev/null
+++ b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_utils.c
@@ -0,0 +1,256 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_sds.h>
+#include "ne.h"
+
+/* required by stat(2), open(2) */
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <fcntl.h>
+
+#include <glob.h>
+
+int ne_utils_str_to_double(char *str, double *out_val)
+{
+ double val;
+ char *end;
+
+ errno = 0;
+ val = strtod(str, &end);
+ if (errno != 0 || *end != '\0') {
+ return -1;
+ }
+ *out_val = val;
+ return 0;
+}
+
+int ne_utils_str_to_uint64(char *str, uint64_t *out_val)
+{
+ uint64_t val;
+ char *end;
+
+ errno = 0;
+ val = strtoll(str, &end, 10);
+ if ((errno == ERANGE && (val == LONG_MAX || val == LONG_MIN))
+ || (errno != 0 && val == 0)) {
+ flb_errno();
+ return -1;
+ }
+
+ if (end == str) {
+ return -1;
+ }
+
+ *out_val = val;
+ return 0;
+}
+
+int ne_utils_file_read_uint64(const char *mount,
+ const char *path,
+ const char *join_a, const char *join_b,
+ uint64_t *out_val)
+{
+ int fd;
+ int len;
+ int ret;
+ flb_sds_t p;
+ uint64_t val;
+ ssize_t bytes;
+ char tmp[32];
+
+ /* Check the path starts with the mount point to prevent duplication. */
+ if (strncasecmp(path, mount, strlen(mount)) == 0 &&
+ path[strlen(mount)] == '/') {
+ mount = "";
+ }
+
+ /* Compose the final path */
+ p = flb_sds_create(mount);
+ if (!p) {
+ return -1;
+ }
+
+ len = strlen(path);
+ flb_sds_cat_safe(&p, path, len);
+
+ if (join_a) {
+ flb_sds_cat_safe(&p, "/", 1);
+ len = strlen(join_a);
+ flb_sds_cat_safe(&p, join_a, len);
+ }
+
+ if (join_b) {
+ flb_sds_cat_safe(&p, "/", 1);
+ len = strlen(join_b);
+ flb_sds_cat_safe(&p, join_b, len);
+ }
+
+ fd = open(p, O_RDONLY);
+ if (fd == -1) {
+ flb_sds_destroy(p);
+ return -1;
+ }
+ flb_sds_destroy(p);
+
+ bytes = read(fd, &tmp, sizeof(tmp));
+ if (bytes == -1) {
+ flb_errno();
+ close(fd);
+ return -1;
+ }
+ close(fd);
+
+ ret = ne_utils_str_to_uint64(tmp, &val);
+ if (ret == -1) {
+ return -1;
+ }
+
+ *out_val = val;
+ return 0;
+}
+
+/*
+ * Read a file and every non-empty line is stored as a flb_slist_entry in the
+ * given list.
+ */
+int ne_utils_file_read_lines(const char *mount, const char *path, struct mk_list *list)
+{
+ int len;
+ int ret;
+ FILE *f;
+ char line[512];
+ char real_path[2048];
+
+ mk_list_init(list);
+
+ /* Check the path starts with the mount point to prevent duplication. */
+ if (strncasecmp(path, mount, strlen(mount)) == 0 &&
+ path[strlen(mount)] == '/') {
+ mount = "";
+ }
+
+ snprintf(real_path, sizeof(real_path) - 1, "%s%s", mount, path);
+ f = fopen(real_path, "r");
+ if (f == NULL) {
+ flb_errno();
+ return -1;
+ }
+
+ /* Read the content */
+ while (fgets(line, sizeof(line) - 1, f)) {
+ len = strlen(line);
+ if (line[len - 1] == '\n') {
+ line[--len] = 0;
+ if (len && line[len - 1] == '\r') {
+ line[--len] = 0;
+ }
+ }
+
+ ret = flb_slist_add(list, line);
+ if (ret == -1) {
+ fclose(f);
+ flb_slist_destroy(list);
+ return -1;
+ }
+ }
+
+ fclose(f);
+ return 0;
+}
+
+int ne_utils_path_scan(struct flb_ne *ctx, const char *mount, const char *path,
+ int expected, struct mk_list *list)
+{
+ int i;
+ int ret;
+ glob_t globbuf;
+ struct stat st;
+ char real_path[2048];
+
+ if (!path) {
+ return -1;
+ }
+
+ /* Safe reset for globfree() */
+ globbuf.gl_pathv = NULL;
+
+ /* Scan the real path */
+ snprintf(real_path, sizeof(real_path) - 1, "%s%s", mount, path);
+ ret = glob(real_path, GLOB_TILDE | GLOB_ERR, NULL, &globbuf);
+ if (ret != 0) {
+ switch (ret) {
+ case GLOB_NOSPACE:
+ flb_plg_error(ctx->ins, "no memory space available");
+ return -1;
+ case GLOB_ABORTED:
+ flb_plg_error(ctx->ins, "read error, check permissions: %s", path);
+ return -1;;
+ case GLOB_NOMATCH:
+ ret = stat(path, &st);
+ if (ret == -1) {
+ flb_plg_debug(ctx->ins, "cannot read info from: %s", path);
+ }
+ else {
+ ret = access(path, R_OK);
+ if (ret == -1 && errno == EACCES) {
+ flb_plg_error(ctx->ins, "NO read access for path: %s", path);
+ }
+ else {
+ flb_plg_debug(ctx->ins, "NO matches for path: %s", path);
+ }
+ }
+ return -1;
+ }
+ }
+
+ if (globbuf.gl_pathc <= 0) {
+ globfree(&globbuf);
+ return -1;
+ }
+
+ /* Initialize list */
+ flb_slist_create(list);
+
+ /* For every entry found, generate an output list */
+ for (i = 0; i < globbuf.gl_pathc; i++) {
+ ret = stat(globbuf.gl_pathv[i], &st);
+ if (ret != 0) {
+ continue;
+ }
+
+ if ((expected == NE_SCAN_FILE && S_ISREG(st.st_mode)) ||
+ (expected == NE_SCAN_DIR && S_ISDIR(st.st_mode))) {
+
+ /* Compose the path */
+ ret = flb_slist_add(list, globbuf.gl_pathv[i]);
+ if (ret != 0) {
+ globfree(&globbuf);
+ flb_slist_destroy(list);
+ return -1;
+ }
+ }
+ }
+
+ globfree(&globbuf);
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/in_node_exporter_metrics/ne_utils.h b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_utils.h
new file mode 100644
index 000000000..448293a03
--- /dev/null
+++ b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_utils.h
@@ -0,0 +1,39 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_NODE_EXPORTER_UTILS_H
+#define FLB_NODE_EXPORTER_UTILS_H
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_sds.h>
+#include "ne.h"
+
+int ne_utils_str_to_double(char *str, double *out_val);
+int ne_utils_str_to_uint64(char *str, uint64_t *out_val);
+
+int ne_utils_file_read_uint64(const char *mount,
+ const char *path,
+ const char *join_a, const char *join_b,
+ uint64_t *out_val);
+
+int ne_utils_file_read_lines(const char *mount, const char *path, struct mk_list *list);
+int ne_utils_path_scan(struct flb_ne *ctx, const char *mount, const char *path,
+ int expected, struct mk_list *list);
+#endif
diff --git a/src/fluent-bit/plugins/in_node_exporter_metrics/ne_vmstat_linux.c b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_vmstat_linux.c
new file mode 100644
index 000000000..a0240d4ee
--- /dev/null
+++ b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_vmstat_linux.c
@@ -0,0 +1,216 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_hash_table.h>
+
+#include <fluent-bit/flb_input_plugin.h>
+
+#include "ne.h"
+#include "ne_utils.h"
+
+#define VMSTAT_ENTRIES "^(oom_kill|pgpg|pswp|pg.*fault).*"
+
+static int keep_field(struct flb_ne *ctx, flb_sds_t field)
+{
+ return flb_regex_match(ctx->vml_regex_fields,
+ (unsigned char *) field, flb_sds_len(field));
+}
+
+static int vmstat_configure(struct flb_ne *ctx)
+{
+ int ret;
+ int parts;
+ char tmp[256];
+ struct mk_list *head;
+ struct mk_list list;
+ struct mk_list split_list;
+ struct flb_slist_entry *line;
+ struct flb_slist_entry *key;
+ struct cmt_counter *c;
+
+ /* Initialize regex for skipped devices */
+ ctx->vml_regex_fields = flb_regex_create(VMSTAT_ENTRIES);
+ if (!ctx->vml_regex_fields) {
+ flb_plg_error(ctx->ins,
+ "could not initialize regex pattern for matching "
+ "fields: '%s'",
+ VMSTAT_ENTRIES);
+ return -1;
+ }
+
+ /* Initialize hash table */
+ ctx->vml_ht = flb_hash_table_create(FLB_HASH_TABLE_EVICT_NONE, 16, 0);
+ if (!ctx->vml_ht) {
+ return -1;
+ }
+
+ mk_list_init(&list);
+ mk_list_init(&split_list);
+
+ ret = ne_utils_file_read_lines(ctx->path_procfs, "/vmstat", &list);
+ if (ret == -1) {
+ return -1;
+ }
+
+ mk_list_foreach(head, &list) {
+ line = mk_list_entry(head, struct flb_slist_entry, _head);
+
+ mk_list_init(&split_list);
+ ret = flb_slist_split_string(&split_list, line->str, ' ', 2);
+ if (ret == -1) {
+ continue;
+ }
+ parts = ret;
+
+ parts = ret;
+ if (parts < 2) {
+ flb_slist_destroy(&split_list);
+ continue;
+ }
+
+ /* retrieve key and value */
+ key = flb_slist_entry_get(&split_list, 0);
+
+ /* keep field ? */
+ if (!keep_field(ctx, key->str)) {
+ flb_slist_destroy(&split_list);
+ continue;
+ }
+
+ snprintf(tmp, sizeof(tmp) - 1,
+ "/proc/vmstat information field %s.", key->str);
+ c = cmt_counter_create(ctx->cmt, "node", "vmstat", key->str,
+ tmp, 0, NULL);
+ if (!c) {
+ flb_slist_destroy(&split_list);
+ flb_slist_destroy(&list);
+ return -1;
+ }
+
+ ret = flb_hash_table_add(ctx->vml_ht,
+ key->str, flb_sds_len(key->str), c, 0);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins,
+ "could not add hash for vmstat metric: %s", key->str);
+ flb_slist_destroy(&split_list);
+ flb_slist_destroy(&list);
+ return -1;
+ }
+
+ flb_slist_destroy(&split_list);
+ }
+
+ flb_slist_destroy(&list);
+ return 0;
+}
+
+static int vmstat_update(struct flb_ne *ctx)
+{
+ int ret;
+ int parts;
+ double v;
+ uint64_t ts;
+ size_t out_size = 0;
+ struct mk_list *head;
+ struct mk_list list;
+ struct mk_list split_list;
+ struct flb_slist_entry *line;
+ struct flb_slist_entry *key;
+ struct flb_slist_entry *val;
+ struct cmt_untyped *u;
+
+ mk_list_init(&list);
+ mk_list_init(&split_list);
+
+ ret = ne_utils_file_read_lines(ctx->path_procfs, "/vmstat", &list);
+ if (ret == -1) {
+ return -1;
+ }
+
+ ts = cfl_time_now();
+ mk_list_foreach(head, &list) {
+ line = mk_list_entry(head, struct flb_slist_entry, _head);
+
+ mk_list_init(&split_list);
+ ret = flb_slist_split_string(&split_list, line->str, ' ', 2);
+ if (ret == -1) {
+ continue;
+ }
+
+ parts = ret;
+ if (parts == 0) {
+ flb_slist_destroy(&split_list);
+ continue;
+ }
+
+ /* retrieve key and value */
+ key = flb_slist_entry_get(&split_list, 0);
+ val = flb_slist_entry_get(&split_list, 1);
+
+ /* keep field ? */
+ if (!keep_field(ctx, key->str)) {
+ flb_slist_destroy(&split_list);
+ continue;
+ }
+
+ ret = flb_hash_table_get(ctx->vml_ht,
+ key->str, flb_sds_len(key->str),
+ (void *) &u, &out_size);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins,
+ "could not retrieve vmstat hash metric: '%s'", key->str);
+ flb_slist_destroy(&split_list);
+ continue;
+ }
+
+ /* set metric */
+ ne_utils_str_to_double(val->str, &v);
+ cmt_untyped_set(u, ts, v, 0, NULL);
+
+ flb_slist_destroy(&split_list);
+ }
+
+ flb_slist_destroy(&list);
+ return 0;
+}
+
+int ne_vmstat_init(struct flb_ne *ctx)
+{
+ vmstat_configure(ctx);
+ return 0;
+}
+
+int ne_vmstat_update(struct flb_ne *ctx)
+{
+ vmstat_update(ctx);
+ return 0;
+}
+
+int ne_vmstat_exit(struct flb_ne *ctx)
+{
+ if (ctx->vml_regex_fields) {
+ flb_regex_destroy(ctx->vml_regex_fields);
+ }
+
+ if (ctx->vml_ht) {
+ flb_hash_table_destroy(ctx->vml_ht);
+ }
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/in_node_exporter_metrics/ne_vmstat_linux.h b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_vmstat_linux.h
new file mode 100644
index 000000000..08b78f4f4
--- /dev/null
+++ b/src/fluent-bit/plugins/in_node_exporter_metrics/ne_vmstat_linux.h
@@ -0,0 +1,29 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_NE_VMSTAT_LINUX_H
+#define FLB_IN_NE_VMSTAT_LINUX_H
+
+#include "ne.h"
+
+int ne_vmstat_init(struct flb_ne *ctx);
+int ne_vmstat_update(struct flb_ne *ctx);
+int ne_vmstat_exit(struct flb_ne *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/in_opentelemetry/CMakeLists.txt b/src/fluent-bit/plugins/in_opentelemetry/CMakeLists.txt
new file mode 100644
index 000000000..4c3d6db32
--- /dev/null
+++ b/src/fluent-bit/plugins/in_opentelemetry/CMakeLists.txt
@@ -0,0 +1,12 @@
+if(NOT FLB_METRICS)
+ message(FATAL_ERROR "OpenTelemetry input plugin requires FLB_HTTP_SERVER=On.")
+endif()
+
+set(src
+ http_conn.c
+ opentelemetry.c
+ opentelemetry_prot.c
+ opentelemetry_config.c
+ )
+
+FLB_PLUGIN(in_opentelemetry "${src}" "monkey-core-static")
diff --git a/src/fluent-bit/plugins/in_opentelemetry/http_conn.c b/src/fluent-bit/plugins/in_opentelemetry/http_conn.c
new file mode 100644
index 000000000..a402295b1
--- /dev/null
+++ b/src/fluent-bit/plugins/in_opentelemetry/http_conn.c
@@ -0,0 +1,301 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_engine.h>
+#include <fluent-bit/flb_downstream.h>
+
+#include "opentelemetry.h"
+#include "http_conn.h"
+#include "opentelemetry_prot.h"
+
+static void opentelemetry_conn_request_init(struct mk_http_session *session,
+ struct mk_http_request *request);
+
+static int opentelemetry_conn_event(void *data)
+{
+ int status;
+ size_t size;
+ ssize_t available;
+ ssize_t bytes;
+ char *tmp;
+ char *request_end;
+ size_t request_len;
+ struct http_conn *conn;
+ struct mk_event *event;
+ struct flb_opentelemetry *ctx;
+ struct flb_connection *connection;
+
+ connection = (struct flb_connection *) data;
+
+ conn = connection->user_data;
+
+ ctx = conn->ctx;
+
+ event = &connection->event;
+
+ if (event->mask & MK_EVENT_READ) {
+ available = (conn->buf_size - conn->buf_len) - 1;
+ if (available < 1) {
+ if (conn->buf_size + ctx->buffer_chunk_size > ctx->buffer_max_size) {
+ flb_plg_trace(ctx->ins,
+ "fd=%i incoming data exceed limit (%zu KB)",
+ event->fd, (ctx->buffer_max_size / 1024));
+ opentelemetry_conn_del(conn);
+ return -1;
+ }
+
+ size = conn->buf_size + ctx->buffer_chunk_size;
+ tmp = flb_realloc(conn->buf_data, size);
+ if (!tmp) {
+ flb_errno();
+ return -1;
+ }
+ flb_plg_trace(ctx->ins, "fd=%i buffer realloc %i -> %zu",
+ event->fd, conn->buf_size, size);
+
+ conn->buf_data = tmp;
+ conn->buf_size = size;
+ available = (conn->buf_size - conn->buf_len) - 1;
+ }
+
+ /* Read data */
+ bytes = flb_io_net_read(connection,
+ (void *) &conn->buf_data[conn->buf_len],
+ available);
+
+ if (bytes <= 0) {
+ flb_plg_trace(ctx->ins, "fd=%i closed connection", event->fd);
+ opentelemetry_conn_del(conn);
+ return -1;
+ }
+
+ flb_plg_trace(ctx->ins, "read()=%zi pre_len=%i now_len=%zi",
+ bytes, conn->buf_len, conn->buf_len + bytes);
+ conn->buf_len += bytes;
+ conn->buf_data[conn->buf_len] = '\0';
+
+ status = mk_http_parser(&conn->request, &conn->session.parser,
+ conn->buf_data, conn->buf_len, conn->session.server);
+
+ if (status == MK_HTTP_PARSER_OK) {
+ /* Do more logic parsing and checks for this request */
+ opentelemetry_prot_handle(ctx, conn, &conn->session, &conn->request);
+
+ /* Evict the processed request from the connection buffer and reinitialize
+ * the HTTP parser.
+ */
+
+ request_end = NULL;
+
+ if (NULL != conn->request.data.data) {
+ request_end = &conn->request.data.data[conn->request.data.len];
+ }
+ else {
+ request_end = strstr(conn->buf_data, "\r\n\r\n");
+
+ if(NULL != request_end) {
+ request_end = &request_end[4];
+ }
+ }
+
+ if (NULL != request_end) {
+ request_len = (size_t)(request_end - conn->buf_data);
+
+ if (0 < (conn->buf_len - request_len)) {
+ memmove(conn->buf_data, &conn->buf_data[request_len],
+ conn->buf_len - request_len);
+
+ conn->buf_data[conn->buf_len - request_len] = '\0';
+ conn->buf_len -= request_len;
+ }
+ else {
+ memset(conn->buf_data, 0, request_len);
+
+ conn->buf_len = 0;
+ }
+
+ /* Reinitialize the parser so the next request is properly
+ * handled, the additional memset intends to wipe any left over data
+ * from the headers parsed in the previous request.
+ */
+ memset(&conn->session.parser, 0, sizeof(struct mk_http_parser));
+ mk_http_parser_init(&conn->session.parser);
+ opentelemetry_conn_request_init(&conn->session, &conn->request);
+ }
+ }
+ else if (status == MK_HTTP_PARSER_ERROR) {
+ opentelemetry_prot_handle_error(ctx, conn, &conn->session, &conn->request);
+
+ /* Reinitialize the parser so the next request is properly
+ * handled, the additional memset intends to wipe any left over data
+ * from the headers parsed in the previous request.
+ */
+ memset(&conn->session.parser, 0, sizeof(struct mk_http_parser));
+ mk_http_parser_init(&conn->session.parser);
+ opentelemetry_conn_request_init(&conn->session, &conn->request);
+ }
+
+ /* FIXME: add Protocol handler here */
+ return bytes;
+ }
+
+ if (event->mask & MK_EVENT_CLOSE) {
+ flb_plg_trace(ctx->ins, "fd=%i hangup", event->fd);
+ opentelemetry_conn_del(conn);
+ return -1;
+ }
+
+ return 0;
+
+}
+
+static void opentelemetry_conn_session_init(struct mk_http_session *session,
+ struct mk_server *server,
+ int client_fd)
+{
+ /* Alloc memory for node */
+ session->_sched_init = MK_TRUE;
+ session->pipelined = MK_FALSE;
+ session->counter_connections = 0;
+ session->close_now = MK_FALSE;
+ session->status = MK_REQUEST_STATUS_INCOMPLETE;
+ session->server = server;
+ session->socket = client_fd;
+
+ /* creation time in unix time */
+ session->init_time = time(NULL);
+
+ session->channel = mk_channel_new(MK_CHANNEL_SOCKET, session->socket);
+ session->channel->io = session->server->network;
+
+ /* Init session request list */
+ mk_list_init(&session->request_list);
+
+ /* Initialize the parser */
+ mk_http_parser_init(&session->parser);
+}
+
+static void opentelemetry_conn_request_init(struct mk_http_session *session,
+ struct mk_http_request *request)
+{
+ memset(request, 0, sizeof(struct mk_http_request));
+
+ mk_http_request_init(session, request, session->server);
+
+ request->in_headers.type = MK_STREAM_IOV;
+ request->in_headers.dynamic = MK_FALSE;
+ request->in_headers.cb_consumed = NULL;
+ request->in_headers.cb_finished = NULL;
+ request->in_headers.stream = &request->stream;
+
+ mk_list_add(&request->in_headers._head, &request->stream.inputs);
+
+ request->session = session;
+}
+
+struct http_conn *opentelemetry_conn_add(struct flb_connection *connection,
+ struct flb_opentelemetry *ctx)
+{
+ struct http_conn *conn;
+ int ret;
+
+ conn = flb_calloc(1, sizeof(struct http_conn));
+ if (!conn) {
+ flb_errno();
+ return NULL;
+ }
+ conn->connection = connection;
+
+ /* Set data for the event-loop */
+ MK_EVENT_NEW(&connection->event);
+
+ connection->user_data = conn;
+ connection->event.type = FLB_ENGINE_EV_CUSTOM;
+ connection->event.handler = opentelemetry_conn_event;
+
+ /* Connection info */
+ conn->ctx = ctx;
+ conn->buf_len = 0;
+
+ conn->buf_data = flb_malloc(ctx->buffer_chunk_size);
+ if (!conn->buf_data) {
+ flb_errno();
+ flb_plg_error(ctx->ins, "could not allocate new connection");
+ flb_free(conn);
+ return NULL;
+ }
+ conn->buf_size = ctx->buffer_chunk_size;
+
+ /* Register instance into the event loop */
+ ret = mk_event_add(flb_engine_evl_get(),
+ connection->fd,
+ FLB_ENGINE_EV_CUSTOM,
+ MK_EVENT_READ,
+ &connection->event);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "could not register new connection");
+ flb_free(conn->buf_data);
+ flb_free(conn);
+ return NULL;
+ }
+
+ /* Initialize HTTP Session: this is a custom context for Monkey HTTP */
+ opentelemetry_conn_session_init(&conn->session, ctx->server, connection->fd);
+
+ /* Initialize HTTP Request: this is the initial request and it will be reinitialized
+ * automatically after the request is handled so it can be used for the next one.
+ */
+ opentelemetry_conn_request_init(&conn->session, &conn->request);
+
+ /* Link connection node to parent context list */
+ mk_list_add(&conn->_head, &ctx->connections);
+ return conn;
+}
+
+int opentelemetry_conn_del(struct http_conn *conn)
+{
+ if (conn->session.channel != NULL) {
+ mk_channel_release(conn->session.channel);
+ }
+
+ /* The downstream unregisters the file descriptor from the event-loop
+ * so there's nothing to be done by the plugin
+ */
+ flb_downstream_conn_release(conn->connection);
+
+ mk_list_del(&conn->_head);
+
+ flb_free(conn->buf_data);
+ flb_free(conn);
+
+ return 0;
+}
+
+void opentelemetry_conn_release_all(struct flb_opentelemetry *ctx)
+{
+ struct mk_list *tmp;
+ struct mk_list *head;
+ struct http_conn *conn;
+
+ mk_list_foreach_safe(head, tmp, &ctx->connections) {
+ conn = mk_list_entry(head, struct http_conn, _head);
+ opentelemetry_conn_del(conn);
+ }
+}
diff --git a/src/fluent-bit/plugins/in_opentelemetry/http_conn.h b/src/fluent-bit/plugins/in_opentelemetry/http_conn.h
new file mode 100644
index 000000000..60627d860
--- /dev/null
+++ b/src/fluent-bit/plugins/in_opentelemetry/http_conn.h
@@ -0,0 +1,57 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_HTTP_CONN
+#define FLB_IN_HTTP_CONN
+
+#include <fluent-bit/flb_input_plugin.h>
+#include <monkey/mk_http.h>
+#include <monkey/mk_http_parser.h>
+#include <monkey/mk_utils.h>
+
+#include "opentelemetry.h"
+
+struct http_conn {
+ struct mk_event event; /* Built-in event data for mk_events */
+
+ /* Buffer */
+ char *buf_data; /* Buffer data */
+ int buf_len; /* Data length */
+ int buf_size; /* Buffer size */
+
+ /*
+ * Parser context: we only held one parser per connection
+ * which is re-used everytime we have a new request.
+ */
+ struct mk_http_parser parser;
+ struct mk_http_request request;
+ struct mk_http_session session;
+ struct flb_connection *connection;
+
+ void *ctx; /* Plugin parent context */
+ struct mk_list _head; /* link to flb_opentelemetry->connections */
+};
+
+struct http_conn *opentelemetry_conn_add(struct flb_connection *connection,
+ struct flb_opentelemetry *ctx);
+int opentelemetry_conn_del(struct http_conn *conn);
+void opentelemetry_conn_release_all(struct flb_opentelemetry *ctx);
+
+
+#endif
diff --git a/src/fluent-bit/plugins/in_opentelemetry/opentelemetry.c b/src/fluent-bit/plugins/in_opentelemetry/opentelemetry.c
new file mode 100644
index 000000000..5cd26f8e6
--- /dev/null
+++ b/src/fluent-bit/plugins/in_opentelemetry/opentelemetry.c
@@ -0,0 +1,200 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_downstream.h>
+#include <fluent-bit/flb_network.h>
+#include <fluent-bit/flb_config.h>
+
+#include "http_conn.h"
+#include "opentelemetry.h"
+#include "opentelemetry_config.h"
+
+/*
+ * For a server event, the collection event means a new client have arrived, we
+ * accept the connection and create a new TCP instance which will wait for
+ * JSON map messages.
+ */
+static int in_opentelemetry_collect(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ struct flb_connection *connection;
+ struct http_conn *conn;
+ struct flb_opentelemetry *ctx;
+
+ ctx = in_context;
+
+ connection = flb_downstream_conn_get(ctx->downstream);
+
+ if (connection == NULL) {
+ flb_plg_error(ctx->ins, "could not accept new connection");
+
+ return -1;
+ }
+
+ flb_plg_trace(ctx->ins, "new TCP connection arrived FD=%i", connection->fd);
+
+ conn = opentelemetry_conn_add(connection, ctx);
+
+ if (conn == NULL) {
+ return -1;
+ }
+
+ return 0;
+}
+
+static int in_opentelemetry_init(struct flb_input_instance *ins,
+ struct flb_config *config, void *data)
+{
+ unsigned short int port;
+ int ret;
+ struct flb_opentelemetry *ctx;
+
+ (void) data;
+
+ /* Create context and basic conf */
+ ctx = opentelemetry_config_create(ins);
+ if (!ctx) {
+ return -1;
+ }
+ ctx->collector_id = -1;
+
+ /* Populate context with config map defaults and incoming properties */
+ ret = flb_input_config_map_set(ins, (void *) ctx);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "configuration error");
+ opentelemetry_config_destroy(ctx);
+ return -1;
+ }
+
+ /* Set the context */
+ flb_input_set_context(ins, ctx);
+
+ port = (unsigned short int) strtoul(ctx->tcp_port, NULL, 10);
+
+ ctx->downstream = flb_downstream_create(FLB_TRANSPORT_TCP,
+ ins->flags,
+ ctx->listen,
+ port,
+ ins->tls,
+ config,
+ &ins->net_setup);
+
+ if (ctx->downstream == NULL) {
+ flb_plg_error(ctx->ins,
+ "could not initialize downstream on %s:%s. Aborting",
+ ctx->listen, ctx->tcp_port);
+
+ opentelemetry_config_destroy(ctx);
+
+ return -1;
+ }
+
+ flb_input_downstream_set(ctx->downstream, ctx->ins);
+
+ flb_plg_info(ctx->ins, "listening on %s:%s", ctx->listen, ctx->tcp_port);
+
+ if (ctx->successful_response_code != 200 &&
+ ctx->successful_response_code != 201 &&
+ ctx->successful_response_code != 204) {
+ flb_plg_error(ctx->ins, "%d is not supported response code. Use default 201",
+ ctx->successful_response_code);
+ ctx->successful_response_code = 201;
+ }
+
+ /* Collect upon data available on the standard input */
+ ret = flb_input_set_collector_socket(ins,
+ in_opentelemetry_collect,
+ ctx->downstream->server_fd,
+ config);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "Could not set collector for IN_TCP input plugin");
+ opentelemetry_config_destroy(ctx);
+ return -1;
+ }
+
+ ctx->collector_id = ret;
+
+ return 0;
+}
+
+static int in_opentelemetry_exit(void *data, struct flb_config *config)
+{
+ struct flb_opentelemetry *ctx;
+
+ (void) config;
+
+ ctx = data;
+
+ if (ctx != NULL) {
+ opentelemetry_config_destroy(ctx);
+ }
+
+ return 0;
+}
+
+/* Configuration properties map */
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_SIZE, "buffer_max_size", HTTP_BUFFER_MAX_SIZE,
+ 0, FLB_TRUE, offsetof(struct flb_opentelemetry, buffer_max_size),
+ ""
+ },
+
+ {
+ FLB_CONFIG_MAP_SIZE, "buffer_chunk_size", HTTP_BUFFER_CHUNK_SIZE,
+ 0, FLB_TRUE, offsetof(struct flb_opentelemetry, buffer_chunk_size),
+ ""
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "tag_key", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_opentelemetry, tag_key),
+ ""
+ },
+ {
+ FLB_CONFIG_MAP_INT, "successful_response_code", "201",
+ 0, FLB_TRUE, offsetof(struct flb_opentelemetry, successful_response_code),
+ "Set successful response code. 200, 201 and 204 are supported."
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "raw_traces", "false",
+ 0, FLB_TRUE, offsetof(struct flb_opentelemetry, raw_traces),
+ "Forward traces without processing"
+ },
+
+ /* EOF */
+ {0}
+};
+
+/* Plugin reference */
+struct flb_input_plugin in_opentelemetry_plugin = {
+ .name = "opentelemetry",
+ .description = "OpenTelemetry",
+ .cb_init = in_opentelemetry_init,
+ .cb_pre_run = NULL,
+ .cb_collect = in_opentelemetry_collect,
+ .cb_flush_buf = NULL,
+ .cb_pause = NULL,
+ .cb_resume = NULL,
+ .cb_exit = in_opentelemetry_exit,
+ .config_map = config_map,
+ .flags = FLB_INPUT_NET_SERVER | FLB_IO_OPT_TLS
+};
diff --git a/src/fluent-bit/plugins/in_opentelemetry/opentelemetry.h b/src/fluent-bit/plugins/in_opentelemetry/opentelemetry.h
new file mode 100644
index 000000000..512f2ab6f
--- /dev/null
+++ b/src/fluent-bit/plugins/in_opentelemetry/opentelemetry.h
@@ -0,0 +1,51 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_OPENTELEMETRY_H
+#define FLB_IN_OPENTELEMETRY_H
+
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_input.h>
+#include <fluent-bit/flb_utils.h>
+
+#include <monkey/monkey.h>
+
+#define HTTP_BUFFER_MAX_SIZE "4M"
+#define HTTP_BUFFER_CHUNK_SIZE "512K"
+
+struct flb_opentelemetry {
+ int successful_response_code;
+ flb_sds_t listen;
+ flb_sds_t tcp_port;
+ const char *tag_key;
+ bool raw_traces;
+
+ size_t buffer_max_size; /* Maximum buffer size */
+ size_t buffer_chunk_size; /* Chunk allocation size */
+
+ int collector_id; /* Listener collector id */
+ struct flb_downstream *downstream; /* Client manager */
+ struct mk_list connections; /* linked list of connections */
+
+ struct mk_server *server;
+ struct flb_input_instance *ins;
+};
+
+
+#endif
diff --git a/src/fluent-bit/plugins/in_opentelemetry/opentelemetry_config.c b/src/fluent-bit/plugins/in_opentelemetry/opentelemetry_config.c
new file mode 100644
index 000000000..b57596f94
--- /dev/null
+++ b/src/fluent-bit/plugins/in_opentelemetry/opentelemetry_config.c
@@ -0,0 +1,92 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_downstream.h>
+
+#include "opentelemetry.h"
+#include "http_conn.h"
+
+/* default HTTP port for OTLP/HTTP is 4318 */
+#define OTLP_HTTP_PORT 4318
+
+struct flb_opentelemetry *opentelemetry_config_create(struct flb_input_instance *ins)
+{
+ int ret;
+ char port[8];
+ struct flb_opentelemetry *ctx;
+
+ ctx = flb_calloc(1, sizeof(struct flb_opentelemetry));
+ if (!ctx) {
+ flb_errno();
+ return NULL;
+ }
+ ctx->ins = ins;
+ mk_list_init(&ctx->connections);
+
+ /* Load the config map */
+ ret = flb_input_config_map_set(ins, (void *) ctx);
+ if (ret == -1) {
+ flb_free(ctx);
+ return NULL;
+ }
+
+ /* Listen interface (if not set, defaults to 0.0.0.0:4318) */
+ flb_input_net_default_listener("0.0.0.0", OTLP_HTTP_PORT, ins);
+
+ ctx->listen = flb_strdup(ins->host.listen);
+ snprintf(port, sizeof(port) - 1, "%d", ins->host.port);
+ ctx->tcp_port = flb_strdup(port);
+
+ /* HTTP Server specifics */
+ ctx->server = flb_calloc(1, sizeof(struct mk_server));
+ ctx->server->keep_alive = MK_TRUE;
+
+ /* monkey detects server->workers == 0 as the server not being initialized at the
+ * moment so we want to make sure that it stays that way!
+ */
+
+ return ctx;
+}
+
+int opentelemetry_config_destroy(struct flb_opentelemetry *ctx)
+{
+ /* release all connections */
+ opentelemetry_conn_release_all(ctx);
+
+ if (ctx->collector_id != -1) {
+ flb_input_collector_delete(ctx->collector_id, ctx->ins);
+
+ ctx->collector_id = -1;
+ }
+
+ if (ctx->downstream != NULL) {
+ flb_downstream_destroy(ctx->downstream);
+ }
+
+ if (ctx->server) {
+ flb_free(ctx->server);
+ }
+
+ flb_free(ctx->listen);
+ flb_free(ctx->tcp_port);
+ flb_free(ctx);
+
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/in_opentelemetry/opentelemetry_config.h b/src/fluent-bit/plugins/in_opentelemetry/opentelemetry_config.h
new file mode 100644
index 000000000..0d980c7aa
--- /dev/null
+++ b/src/fluent-bit/plugins/in_opentelemetry/opentelemetry_config.h
@@ -0,0 +1,29 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_OPENTELEMETRY_CONFIG_H
+#define FLB_IN_OPENTELEMETRY_CONFIG_H
+
+#include <fluent-bit/flb_input_plugin.h>
+#include "opentelemetry.h"
+
+struct flb_opentelemetry *opentelemetry_config_create(struct flb_input_instance *ins);
+int opentelemetry_config_destroy(struct flb_opentelemetry *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/in_opentelemetry/opentelemetry_prot.c b/src/fluent-bit/plugins/in_opentelemetry/opentelemetry_prot.c
new file mode 100644
index 000000000..c9ccba7f9
--- /dev/null
+++ b/src/fluent-bit/plugins/in_opentelemetry/opentelemetry_prot.c
@@ -0,0 +1,1674 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_version.h>
+#include <fluent-bit/flb_error.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_gzip.h>
+#include <fluent-bit/flb_snappy.h>
+#include <fluent-bit/flb_log_event_encoder.h>
+
+#include <monkey/monkey.h>
+#include <monkey/mk_core.h>
+#include <cmetrics/cmt_decode_opentelemetry.h>
+
+#include <fluent-otel-proto/fluent-otel.h>
+#include "opentelemetry.h"
+#include "http_conn.h"
+
+#define HTTP_CONTENT_JSON 0
+
+static int json_payload_append_converted_value(
+ struct flb_log_event_encoder *encoder,
+ int target_field,
+ msgpack_object *object);
+
+static int json_payload_append_converted_array(
+ struct flb_log_event_encoder *encoder,
+ int target_field,
+ msgpack_object *object);
+
+static int json_payload_append_converted_kvlist(
+ struct flb_log_event_encoder *encoder,
+ int target_field,
+ msgpack_object *object);
+
+static int json_payload_to_msgpack(struct flb_opentelemetry *ctx,
+ struct flb_log_event_encoder *encoder,
+ const char *body,
+ size_t len);
+
+static int otlp_pack_any_value(msgpack_packer *mp_pck,
+ Opentelemetry__Proto__Common__V1__AnyValue *body);
+
+static int send_response(struct http_conn *conn, int http_status, char *message)
+{
+ int len;
+ flb_sds_t out;
+ size_t sent;
+
+ out = flb_sds_create_size(256);
+ if (!out) {
+ return -1;
+ }
+
+ if (message) {
+ len = strlen(message);
+ }
+ else {
+ len = 0;
+ }
+
+ if (http_status == 201) {
+ flb_sds_printf(&out,
+ "HTTP/1.1 201 Created \r\n"
+ "Server: Fluent Bit v%s\r\n"
+ "Content-Length: 0\r\n\r\n",
+ FLB_VERSION_STR);
+ }
+ else if (http_status == 200) {
+ flb_sds_printf(&out,
+ "HTTP/1.1 200 OK\r\n"
+ "Server: Fluent Bit v%s\r\n"
+ "Content-Length: 0\r\n\r\n",
+ FLB_VERSION_STR);
+ }
+ else if (http_status == 204) {
+ flb_sds_printf(&out,
+ "HTTP/1.1 204 No Content\r\n"
+ "Server: Fluent Bit v%s\r\n"
+ "\r\n",
+ FLB_VERSION_STR);
+ }
+ else if (http_status == 400) {
+ flb_sds_printf(&out,
+ "HTTP/1.1 400 Forbidden\r\n"
+ "Server: Fluent Bit v%s\r\n"
+ "Content-Length: %i\r\n\r\n%s",
+ FLB_VERSION_STR,
+ len, message);
+ }
+
+ /* We should check the outcome of this operation */
+ flb_io_net_write(conn->connection,
+ (void *) out,
+ flb_sds_len(out),
+ &sent);
+
+ flb_sds_destroy(out);
+
+ return 0;
+}
+
+static int process_payload_metrics(struct flb_opentelemetry *ctx, struct http_conn *conn,
+ flb_sds_t tag,
+ struct mk_http_session *session,
+ struct mk_http_request *request)
+{
+ struct cfl_list decoded_contexts;
+ struct cfl_list *iterator;
+ struct cmt *context;
+ size_t offset;
+ int result;
+
+ offset = 0;
+
+ result = cmt_decode_opentelemetry_create(&decoded_contexts,
+ request->data.data,
+ request->data.len,
+ &offset);
+
+ if (result == CMT_DECODE_OPENTELEMETRY_SUCCESS) {
+ cfl_list_foreach(iterator, &decoded_contexts) {
+ context = cfl_list_entry(iterator, struct cmt, _head);
+
+ result = flb_input_metrics_append(ctx->ins, NULL, 0, context);
+
+ if (result != 0) {
+ flb_plg_debug(ctx->ins, "could not ingest metrics context : %d", result);
+ }
+ }
+
+ cmt_decode_opentelemetry_destroy(&decoded_contexts);
+ }
+
+ return 0;
+}
+
+static int process_payload_traces_proto(struct flb_opentelemetry *ctx, struct http_conn *conn,
+ flb_sds_t tag,
+ struct mk_http_session *session,
+ struct mk_http_request *request)
+{
+ struct ctrace *decoded_context;
+ size_t offset;
+ int result;
+
+ offset = 0;
+ result = ctr_decode_opentelemetry_create(&decoded_context,
+ request->data.data,
+ request->data.len,
+ &offset);
+ if (result == 0) {
+ result = flb_input_trace_append(ctx->ins, NULL, 0, decoded_context);
+ ctr_decode_opentelemetry_destroy(decoded_context);
+ }
+
+ return result;
+}
+
+static int process_payload_raw_traces(struct flb_opentelemetry *ctx, struct http_conn *conn,
+ flb_sds_t tag,
+ struct mk_http_session *session,
+ struct mk_http_request *request)
+{
+ int ret;
+ int root_type;
+ char *out_buf = NULL;
+ size_t out_size;
+
+ msgpack_packer mp_pck;
+ msgpack_sbuffer mp_sbuf;
+
+ msgpack_sbuffer_init(&mp_sbuf);
+ msgpack_packer_init(&mp_pck, &mp_sbuf, msgpack_sbuffer_write);
+
+ msgpack_pack_array(&mp_pck, 2);
+ flb_pack_time_now(&mp_pck);
+
+ /* Check if the incoming payload is a valid JSON message and convert it to msgpack */
+ ret = flb_pack_json(request->data.data, request->data.len,
+ &out_buf, &out_size, &root_type, NULL);
+
+ if (ret == 0 && root_type == JSMN_OBJECT) {
+ /* JSON found, pack it msgpack representation */
+ msgpack_sbuffer_write(&mp_sbuf, out_buf, out_size);
+ }
+ else {
+ /* the content might be a binary payload or invalid JSON */
+ msgpack_pack_map(&mp_pck, 1);
+ msgpack_pack_str_with_body(&mp_pck, "trace", 5);
+ msgpack_pack_str_with_body(&mp_pck, request->data.data, request->data.len);
+ }
+
+ /* release 'out_buf' if it was allocated */
+ if (out_buf) {
+ flb_free(out_buf);
+ }
+
+ flb_input_log_append(ctx->ins, tag, flb_sds_len(tag), mp_sbuf.data, mp_sbuf.size);
+ msgpack_sbuffer_destroy(&mp_sbuf);
+
+ return 0;
+}
+
+static int process_payload_traces(struct flb_opentelemetry *ctx, struct http_conn *conn,
+ flb_sds_t tag,
+ struct mk_http_session *session,
+ struct mk_http_request *request)
+{
+ int result;
+
+ if (ctx->raw_traces) {
+ result = process_payload_raw_traces(ctx, conn, tag, session, request);
+ }
+ else {
+ result = process_payload_traces_proto(ctx, conn, tag, session, request);
+ }
+
+ return result;
+}
+
+static int otel_pack_string(msgpack_packer *mp_pck, char *str)
+{
+ return msgpack_pack_str_with_body(mp_pck, str, strlen(str));
+}
+
+static int otel_pack_bool(msgpack_packer *mp_pck, bool val)
+{
+ if (val) {
+ return msgpack_pack_true(mp_pck);
+ }
+ else {
+ return msgpack_pack_false(mp_pck);
+ }
+}
+
+static int otel_pack_int(msgpack_packer *mp_pck, int val)
+{
+ return msgpack_pack_int64(mp_pck, val);
+}
+
+static int otel_pack_double(msgpack_packer *mp_pck, double val)
+{
+ return msgpack_pack_double(mp_pck, val);
+}
+
+static int otel_pack_kvarray(msgpack_packer *mp_pck,
+ Opentelemetry__Proto__Common__V1__KeyValue **kv_array,
+ size_t kv_count)
+{
+ int result;
+ int index;
+
+ result = msgpack_pack_map(mp_pck, kv_count);
+
+ if (result != 0) {
+ return result;
+ }
+
+ for (index = 0; index < kv_count && result == 0; index++) {
+ result = otel_pack_string(mp_pck, kv_array[index]->key);
+
+ if(result == 0) {
+ result = otlp_pack_any_value(mp_pck, kv_array[index]->value);
+ }
+ }
+
+ return result;
+}
+
+static int otel_pack_kvlist(msgpack_packer *mp_pck,
+ Opentelemetry__Proto__Common__V1__KeyValueList *kv_list)
+{
+ int kv_index;
+ int ret;
+ char *key;
+ Opentelemetry__Proto__Common__V1__AnyValue *value;
+
+ ret = msgpack_pack_map(mp_pck, kv_list->n_values);
+ if (ret != 0) {
+ return ret;
+ }
+
+ for (kv_index = 0; kv_index < kv_list->n_values && ret == 0; kv_index++) {
+ key = kv_list->values[kv_index]->key;
+ value = kv_list->values[kv_index]->value;
+
+ ret = otel_pack_string(mp_pck, key);
+
+ if(ret == 0) {
+ ret = otlp_pack_any_value(mp_pck, value);
+ }
+ }
+
+ return ret;
+}
+
+static int otel_pack_array(msgpack_packer *mp_pck,
+ Opentelemetry__Proto__Common__V1__ArrayValue *array)
+{
+ int ret;
+ int array_index;
+
+ ret = msgpack_pack_array(mp_pck, array->n_values);
+
+ if (ret != 0) {
+ return ret;
+ }
+
+ for (array_index = 0; array_index < array->n_values && ret == 0; array_index++) {
+ ret = otlp_pack_any_value(mp_pck, array->values[array_index]);
+ }
+
+ return ret;
+}
+
+static int otel_pack_bytes(msgpack_packer *mp_pck,
+ ProtobufCBinaryData bytes)
+{
+ return msgpack_pack_bin_with_body(mp_pck, bytes.data, bytes.len);
+}
+
+static int otlp_pack_any_value(msgpack_packer *mp_pck,
+ Opentelemetry__Proto__Common__V1__AnyValue *body)
+{
+ int result;
+
+ result = -2;
+
+ switch(body->value_case){
+ case OPENTELEMETRY__PROTO__COMMON__V1__ANY_VALUE__VALUE_STRING_VALUE:
+ result = otel_pack_string(mp_pck, body->string_value);
+ break;
+
+ case OPENTELEMETRY__PROTO__COMMON__V1__ANY_VALUE__VALUE_BOOL_VALUE:
+ result = otel_pack_bool(mp_pck, body->bool_value);
+ break;
+
+ case OPENTELEMETRY__PROTO__COMMON__V1__ANY_VALUE__VALUE_INT_VALUE:
+ result = otel_pack_int(mp_pck, body->int_value);
+ break;
+
+ case OPENTELEMETRY__PROTO__COMMON__V1__ANY_VALUE__VALUE_DOUBLE_VALUE:
+ result = otel_pack_double(mp_pck, body->double_value);
+ break;
+
+ case OPENTELEMETRY__PROTO__COMMON__V1__ANY_VALUE__VALUE_ARRAY_VALUE:
+ result = otel_pack_array(mp_pck, body->array_value);
+ break;
+
+ case OPENTELEMETRY__PROTO__COMMON__V1__ANY_VALUE__VALUE_KVLIST_VALUE:
+ result = otel_pack_kvlist(mp_pck, body->kvlist_value);
+ break;
+
+ case OPENTELEMETRY__PROTO__COMMON__V1__ANY_VALUE__VALUE_BYTES_VALUE:
+ result = otel_pack_bytes(mp_pck, body->bytes_value);
+ break;
+
+ default:
+ break;
+ }
+
+ if (result == -2) {
+ flb_error("[otel]: invalid value type in pack_any_value");
+ result = -1;
+ }
+
+ return result;
+}
+
+static int binary_payload_to_msgpack(struct flb_log_event_encoder *encoder,
+ uint8_t *in_buf,
+ size_t in_size)
+{
+ int ret;
+ msgpack_packer packer;
+ msgpack_sbuffer buffer;
+ int resource_logs_index;
+ int scope_log_index;
+ int log_record_index;
+
+ Opentelemetry__Proto__Collector__Logs__V1__ExportLogsServiceRequest *input_logs;
+ Opentelemetry__Proto__Logs__V1__ScopeLogs **scope_logs;
+ Opentelemetry__Proto__Logs__V1__ScopeLogs *scope_log;
+ Opentelemetry__Proto__Logs__V1__ResourceLogs **resource_logs;
+ Opentelemetry__Proto__Logs__V1__ResourceLogs *resource_log;
+ Opentelemetry__Proto__Logs__V1__LogRecord **log_records;
+
+ msgpack_sbuffer_init(&buffer);
+ msgpack_packer_init(&packer, &buffer, msgpack_sbuffer_write);
+
+ input_logs = opentelemetry__proto__collector__logs__v1__export_logs_service_request__unpack(NULL, in_size, in_buf);
+ if (input_logs == NULL) {
+ flb_error("[otel] Failed to unpack input logs");
+ return -1;
+ }
+
+ resource_logs = input_logs->resource_logs;
+ if (resource_logs == NULL) {
+ flb_error("[otel] No resource logs found");
+ return -1;
+ }
+
+ for (resource_logs_index = 0; resource_logs_index < input_logs->n_resource_logs; resource_logs_index++) {
+ resource_log = resource_logs[resource_logs_index];
+ scope_logs = resource_log->scope_logs;
+
+ if (resource_log->n_scope_logs > 0 && scope_logs == NULL) {
+ flb_error("[otel] No scope logs found");
+ return -1;
+ }
+
+ for (scope_log_index = 0; scope_log_index < resource_log->n_scope_logs; scope_log_index++) {
+ scope_log = scope_logs[scope_log_index];
+ log_records = scope_log->log_records;
+
+ if (log_records == NULL) {
+ flb_error("[otel] No log records found");
+ return -1;
+ }
+
+ for (log_record_index=0; log_record_index < scope_log->n_log_records; log_record_index++) {
+ ret = flb_log_event_encoder_begin_record(encoder);
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_current_timestamp(encoder);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = otel_pack_kvarray(
+ &packer,
+ log_records[log_record_index]->attributes,
+ log_records[log_record_index]->n_attributes);
+
+ if (ret != 0) {
+ flb_error("[otel] Failed to convert log record attributes");
+
+ ret = FLB_EVENT_ENCODER_ERROR_SERIALIZATION_FAILURE;
+ }
+ else {
+ ret = flb_log_event_encoder_set_metadata_from_raw_msgpack(
+ encoder,
+ buffer.data,
+ buffer.size);
+ }
+
+ msgpack_sbuffer_clear(&buffer);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = otlp_pack_any_value(
+ &packer,
+ log_records[log_record_index]->body);
+
+ if (ret != 0) {
+ flb_error("[otel] Failed to convert log record body");
+
+ ret = FLB_EVENT_ENCODER_ERROR_SERIALIZATION_FAILURE;
+ }
+ else {
+ if (log_records[log_record_index]->body->value_case ==
+ OPENTELEMETRY__PROTO__COMMON__V1__ANY_VALUE__VALUE_KVLIST_VALUE) {
+ ret = flb_log_event_encoder_set_body_from_raw_msgpack(
+ encoder,
+ buffer.data,
+ buffer.size);
+ }
+ else {
+ ret = flb_log_event_encoder_append_body_values(
+ encoder,
+ FLB_LOG_EVENT_CSTRING_VALUE("message"),
+ FLB_LOG_EVENT_MSGPACK_RAW_VALUE(buffer.data, buffer.size));
+ }
+ }
+
+ msgpack_sbuffer_clear(&buffer);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_commit_record(encoder);
+ }
+ else {
+ flb_error("[otel] marshalling error");
+
+ msgpack_sbuffer_destroy(&buffer);
+
+ return -1;
+ }
+ }
+ }
+ }
+
+ msgpack_sbuffer_destroy(&buffer);
+
+ return 0;
+}
+
+static int find_map_entry_by_key(msgpack_object_map *map,
+ char *key,
+ size_t match_index,
+ int case_insensitive)
+{
+ size_t match_count;
+ int result;
+ int index;
+
+ match_count = 0;
+
+ for (index = 0 ; index < (int) map->size ; index++) {
+ if (map->ptr[index].key.type == MSGPACK_OBJECT_STR) {
+ if (case_insensitive) {
+ result = strncasecmp(map->ptr[index].key.via.str.ptr,
+ key,
+ map->ptr[index].key.via.str.size);
+ }
+ else {
+ result = strncmp(map->ptr[index].key.via.str.ptr,
+ key,
+ map->ptr[index].key.via.str.size);
+ }
+
+ if (result == 0) {
+ if (match_count == match_index) {
+ return index;
+ }
+
+ match_count++;
+ }
+ }
+ }
+
+ return -1;
+}
+
+static int json_payload_get_wrapped_value(msgpack_object *wrapper,
+ msgpack_object **value,
+ int *type)
+{
+ int internal_type;
+ msgpack_object *kv_value;
+ msgpack_object_str *kv_key;
+ msgpack_object_map *map;
+
+ if (wrapper->type != MSGPACK_OBJECT_MAP) {
+ return -1;
+ }
+
+ map = &wrapper->via.map;
+ kv_value = NULL;
+ internal_type = -1;
+
+ if (map->size == 1) {
+ if (map->ptr[0].key.type == MSGPACK_OBJECT_STR) {
+ kv_value = &map->ptr[0].val;
+ kv_key = &map->ptr[0].key.via.str;
+
+ if (strncasecmp(kv_key->ptr, "stringValue", kv_key->size) == 0 ||
+ strncasecmp(kv_key->ptr, "string_value", kv_key->size) == 0) {
+ internal_type = MSGPACK_OBJECT_STR;
+ }
+ else if (strncasecmp(kv_key->ptr, "boolValue", kv_key->size) == 0 ||
+ strncasecmp(kv_key->ptr, "bool_value", kv_key->size) == 0) {
+ internal_type = MSGPACK_OBJECT_BOOLEAN;
+ }
+ else if (strncasecmp(kv_key->ptr, "intValue", kv_key->size) == 0 ||
+ strncasecmp(kv_key->ptr, "int_value", kv_key->size) == 0) {
+ internal_type = MSGPACK_OBJECT_POSITIVE_INTEGER;
+ }
+ else if (strncasecmp(kv_key->ptr, "doubleValue", kv_key->size) == 0 ||
+ strncasecmp(kv_key->ptr, "double_value", kv_key->size) == 0) {
+ internal_type = MSGPACK_OBJECT_FLOAT;
+ }
+ else if (strncasecmp(kv_key->ptr, "bytesValue", kv_key->size) == 0 ||
+ strncasecmp(kv_key->ptr, "bytes_value", kv_key->size) == 0) {
+ internal_type = MSGPACK_OBJECT_BIN;
+ }
+ else if (strncasecmp(kv_key->ptr, "arrayValue", kv_key->size) == 0 ||
+ strncasecmp(kv_key->ptr, "array_value", kv_key->size) == 0) {
+ internal_type = MSGPACK_OBJECT_ARRAY;
+ }
+ else if (strncasecmp(kv_key->ptr, "kvlistValue", kv_key->size) == 0 ||
+ strncasecmp(kv_key->ptr, "kvlist_value", kv_key->size) == 0) {
+ internal_type = MSGPACK_OBJECT_MAP;
+ }
+ }
+ }
+
+ if (internal_type != -1) {
+ if (type != NULL) {
+ *type = internal_type;
+ }
+
+ if (value != NULL) {
+ *value = kv_value;
+ }
+
+ if (kv_value->type == MSGPACK_OBJECT_MAP) {
+ map = &kv_value->via.map;
+
+ if (map->size == 1) {
+ kv_value = &map->ptr[0].val;
+ kv_key = &map->ptr[0].key.via.str;
+
+ if (strncasecmp(kv_key->ptr, "values", kv_key->size) == 0) {
+ if (value != NULL) {
+ *value = kv_value;
+ }
+ }
+ else {
+ return -3;
+ }
+ }
+ }
+ }
+ else {
+ return -2;
+ }
+
+ return 0;
+}
+
+static int json_payload_append_unwrapped_value(
+ struct flb_log_event_encoder *encoder,
+ int target_field,
+ msgpack_object *object,
+ int *encoder_result)
+{
+ char temporary_buffer[33];
+ int unwrap_value;
+ int result;
+ msgpack_object *value;
+ int type;
+
+ result = json_payload_get_wrapped_value(object,
+ &value,
+ &type);
+
+ if (result == 0) {
+ unwrap_value = FLB_FALSE;
+
+ if (type == MSGPACK_OBJECT_STR) {
+ unwrap_value = FLB_TRUE;
+ }
+ else if (type == MSGPACK_OBJECT_BOOLEAN) {
+ unwrap_value = FLB_TRUE;
+ }
+ else if (type == MSGPACK_OBJECT_POSITIVE_INTEGER) {
+ if (value->type == MSGPACK_OBJECT_STR) {
+ memset(temporary_buffer, 0, sizeof(temporary_buffer));
+
+ if (value->via.str.size < sizeof(temporary_buffer)) {
+ strncpy(temporary_buffer,
+ value->via.str.ptr,
+ value->via.str.size);
+ }
+ else {
+ strncpy(temporary_buffer,
+ value->via.str.ptr,
+ sizeof(temporary_buffer) - 1);
+ }
+
+ result = flb_log_event_encoder_append_int64(
+ encoder,
+ target_field,
+ strtoll(temporary_buffer, NULL, 10));
+ }
+ else {
+ unwrap_value = FLB_TRUE;
+ }
+ }
+ else if (type == MSGPACK_OBJECT_FLOAT) {
+ unwrap_value = FLB_TRUE;
+ }
+ else if (type == MSGPACK_OBJECT_BIN) {
+ unwrap_value = FLB_TRUE;
+ }
+ else if (type == MSGPACK_OBJECT_ARRAY) {
+ result = json_payload_append_converted_array(encoder,
+ target_field,
+ value);
+ }
+ else if (type == MSGPACK_OBJECT_MAP) {
+ result = json_payload_append_converted_kvlist(encoder,
+ target_field,
+ value);
+ }
+ else {
+ return -2;
+ }
+
+ if (unwrap_value) {
+ result = json_payload_append_converted_value(encoder,
+ target_field,
+ value);
+ }
+
+ *encoder_result = result;
+
+ return 0;
+ }
+ else {
+ return -1;
+ }
+
+ return -1;
+}
+
+
+static int json_payload_append_converted_map(
+ struct flb_log_event_encoder *encoder,
+ int target_field,
+ msgpack_object *object)
+{
+ int encoder_result;
+ int result;
+ size_t index;
+ msgpack_object_map *map;
+
+ map = &object->via.map;
+
+ result = json_payload_append_unwrapped_value(
+ encoder,
+ target_field,
+ object,
+ &encoder_result);
+
+ if (result == 0 && encoder_result == FLB_EVENT_ENCODER_SUCCESS) {
+ return result;
+ }
+
+ result = flb_log_event_encoder_begin_map(encoder, target_field);
+
+ for (index = 0 ;
+ index < map->size &&
+ result == FLB_EVENT_ENCODER_SUCCESS;
+ index++) {
+ result = json_payload_append_converted_value(
+ encoder,
+ target_field,
+ &map->ptr[index].key);
+
+ if (result == FLB_EVENT_ENCODER_SUCCESS) {
+ result = json_payload_append_converted_value(
+ encoder,
+ target_field,
+ &map->ptr[index].val);
+ }
+ }
+
+ if (result == FLB_EVENT_ENCODER_SUCCESS) {
+ result = flb_log_event_encoder_commit_map(encoder, target_field);
+ }
+ else {
+ flb_log_event_encoder_rollback_map(encoder, target_field);
+ }
+
+ return result;
+}
+
+static int json_payload_append_converted_array(
+ struct flb_log_event_encoder *encoder,
+ int target_field,
+ msgpack_object *object)
+{
+ int result;
+ size_t index;
+ msgpack_object_array *array;
+
+ array = &object->via.array;
+
+ result = flb_log_event_encoder_begin_array(encoder, target_field);
+
+ for (index = 0 ;
+ index < array->size &&
+ result == FLB_EVENT_ENCODER_SUCCESS;
+ index++) {
+ result = json_payload_append_converted_value(
+ encoder,
+ target_field,
+ &array->ptr[index]);
+ }
+
+ if (result == FLB_EVENT_ENCODER_SUCCESS) {
+ result = flb_log_event_encoder_commit_array(encoder, target_field);
+ }
+ else {
+ flb_log_event_encoder_rollback_array(encoder, target_field);
+ }
+
+ return result;
+}
+
+static int json_payload_append_converted_kvlist(
+ struct flb_log_event_encoder *encoder,
+ int target_field,
+ msgpack_object *object)
+{
+ int value_index;
+ int key_index;
+ int result;
+ size_t index;
+ msgpack_object_array *array;
+ msgpack_object_map *entry;
+
+ array = &object->via.array;
+
+ result = flb_log_event_encoder_begin_map(encoder, target_field);
+
+ for (index = 0 ;
+ index < array->size &&
+ result == FLB_EVENT_ENCODER_SUCCESS;
+ index++) {
+
+ if (array->ptr[index].type != MSGPACK_OBJECT_MAP) {
+ result = FLB_EVENT_ENCODER_ERROR_INVALID_ARGUMENT;
+ }
+ else {
+ entry = &array->ptr[index].via.map;
+
+ key_index = find_map_entry_by_key(entry, "key", 0, FLB_TRUE);
+
+ if (key_index == -1) {
+ result = FLB_EVENT_ENCODER_ERROR_INVALID_ARGUMENT;
+ }
+
+ if (result == FLB_EVENT_ENCODER_SUCCESS) {
+ value_index = find_map_entry_by_key(entry, "value", 0, FLB_TRUE);
+ }
+
+ if (value_index == -1) {
+ result = FLB_EVENT_ENCODER_ERROR_INVALID_ARGUMENT;
+ }
+
+ if (result == FLB_EVENT_ENCODER_SUCCESS) {
+ result = json_payload_append_converted_value(
+ encoder,
+ target_field,
+ &entry->ptr[key_index].val);
+ }
+
+ if (result == FLB_EVENT_ENCODER_SUCCESS) {
+ result = json_payload_append_converted_value(
+ encoder,
+ target_field,
+ &entry->ptr[value_index].val);
+ }
+ }
+ }
+
+ if (result == FLB_EVENT_ENCODER_SUCCESS) {
+ result = flb_log_event_encoder_commit_map(encoder, target_field);
+ }
+ else {
+ flb_log_event_encoder_rollback_map(encoder, target_field);
+ }
+
+ return result;
+}
+
+static int json_payload_append_converted_value(
+ struct flb_log_event_encoder *encoder,
+ int target_field,
+ msgpack_object *object)
+{
+ int result;
+
+ result = FLB_EVENT_ENCODER_SUCCESS;
+
+ switch (object->type) {
+ case MSGPACK_OBJECT_BOOLEAN:
+ result = flb_log_event_encoder_append_boolean(
+ encoder,
+ target_field,
+ object->via.boolean);
+ break;
+
+ case MSGPACK_OBJECT_POSITIVE_INTEGER:
+ result = flb_log_event_encoder_append_uint64(
+ encoder,
+ target_field,
+ object->via.u64);
+ break;
+ case MSGPACK_OBJECT_NEGATIVE_INTEGER:
+ result = flb_log_event_encoder_append_int64(
+ encoder,
+ target_field,
+ object->via.i64);
+ break;
+
+ case MSGPACK_OBJECT_FLOAT32:
+ case MSGPACK_OBJECT_FLOAT64:
+ result = flb_log_event_encoder_append_double(
+ encoder,
+ target_field,
+ object->via.f64);
+ break;
+
+ case MSGPACK_OBJECT_STR:
+ result = flb_log_event_encoder_append_string(
+ encoder,
+ target_field,
+ (char *) object->via.str.ptr,
+ object->via.str.size);
+
+ break;
+
+ case MSGPACK_OBJECT_BIN:
+ result = flb_log_event_encoder_append_binary(
+ encoder,
+ target_field,
+ (char *) object->via.bin.ptr,
+ object->via.bin.size);
+ break;
+
+ case MSGPACK_OBJECT_ARRAY:
+ result = json_payload_append_converted_array(
+ encoder,
+ target_field,
+ object);
+ break;
+
+ case MSGPACK_OBJECT_MAP:
+ result = json_payload_append_converted_map(
+ encoder,
+ target_field,
+ object);
+
+ break;
+
+ default:
+ break;
+ }
+
+ return result;
+}
+
+static int process_json_payload_log_records_entry(
+ struct flb_opentelemetry *ctx,
+ struct flb_log_event_encoder *encoder,
+ msgpack_object *log_records_object)
+{
+ msgpack_object_map *log_records_entry;
+ char timestamp_str[32];
+ msgpack_object *timestamp_object;
+ uint64_t timestamp_uint64;
+ msgpack_object *metadata_object;
+ msgpack_object *body_object;
+ int body_type;
+ struct flb_time timestamp;
+ int result;
+
+ if (log_records_object->type != MSGPACK_OBJECT_MAP) {
+ flb_plg_error(ctx->ins, "unexpected logRecords entry type");
+
+ return -4;
+ }
+
+ log_records_entry = &log_records_object->via.map;
+
+ result = find_map_entry_by_key(log_records_entry, "timeUnixNano", 0, FLB_TRUE);
+
+ if (result == -1) {
+ result = find_map_entry_by_key(log_records_entry, "time_unix_nano", 0, FLB_TRUE);
+ }
+
+ if (result == -1) {
+ result = find_map_entry_by_key(log_records_entry, "observedTimeUnixNano", 0, FLB_TRUE);
+ }
+
+ if (result == -1) {
+ result = find_map_entry_by_key(log_records_entry, "observed_time_unix_nano", 0, FLB_TRUE);
+ }
+
+ if (result == -1) {
+ flb_plg_info(ctx->ins, "neither timeUnixNano nor observedTimeUnixNano found");
+
+ flb_time_get(&timestamp);
+ }
+ else {
+ timestamp_object = &log_records_entry->ptr[result].val;
+
+ if (timestamp_object->type == MSGPACK_OBJECT_POSITIVE_INTEGER) {
+ timestamp_uint64 = timestamp_object->via.u64;
+ }
+ else if (timestamp_object->type == MSGPACK_OBJECT_STR) {
+ memset(timestamp_str, 0, sizeof(timestamp_str));
+
+ if (timestamp_object->via.str.size < sizeof(timestamp_str)) {
+ strncpy(timestamp_str,
+ timestamp_object->via.str.ptr,
+ timestamp_object->via.str.size);
+ }
+ else {
+ strncpy(timestamp_str,
+ timestamp_object->via.str.ptr,
+ sizeof(timestamp_str) - 1);
+ }
+
+ timestamp_uint64 = strtoul(timestamp_str, NULL, 10);
+ }
+ else {
+ flb_plg_error(ctx->ins, "unexpected timeUnixNano type");
+
+ return -4;
+ }
+
+ flb_time_from_uint64(&timestamp, timestamp_uint64);
+ }
+
+
+ result = find_map_entry_by_key(log_records_entry, "attributes", 0, FLB_TRUE);
+
+ if (result == -1) {
+ flb_plg_debug(ctx->ins, "attributes missing");
+
+ metadata_object = NULL;
+ }
+ else {
+ if (log_records_entry->ptr[result].val.type != MSGPACK_OBJECT_ARRAY) {
+ flb_plg_error(ctx->ins, "unexpected attributes type");
+
+ return -4;
+ }
+
+ metadata_object = &log_records_entry->ptr[result].val;
+ }
+
+ result = find_map_entry_by_key(log_records_entry, "body", 0, FLB_TRUE);
+
+ if (result == -1) {
+ flb_plg_info(ctx->ins, "body missing");
+
+ body_object = NULL;
+ }
+ else {
+ if (log_records_entry->ptr[result].val.type != MSGPACK_OBJECT_MAP) {
+ flb_plg_error(ctx->ins, "unexpected body type");
+
+ return -4;
+ }
+
+ body_object = &log_records_entry->ptr[result].val;
+ }
+
+ result = flb_log_event_encoder_begin_record(encoder);
+
+ if (result == FLB_EVENT_ENCODER_SUCCESS) {
+ result = flb_log_event_encoder_set_timestamp(encoder, &timestamp);
+ }
+
+ if (result == FLB_EVENT_ENCODER_SUCCESS &&
+ metadata_object != NULL) {
+ flb_log_event_encoder_dynamic_field_reset(&encoder->metadata);
+
+ result = json_payload_append_converted_kvlist(
+ encoder,
+ FLB_LOG_EVENT_METADATA,
+ metadata_object);
+ }
+
+ if (result == FLB_EVENT_ENCODER_SUCCESS &&
+ body_object != NULL) {
+ result = json_payload_get_wrapped_value(body_object, NULL, &body_type);
+
+ if (result != 0 || body_type == MSGPACK_OBJECT_MAP) {
+ flb_log_event_encoder_dynamic_field_reset(&encoder->body);
+ }
+ else {
+ flb_log_event_encoder_append_cstring(
+ encoder,
+ FLB_LOG_EVENT_BODY,
+ "log");
+ }
+
+ result = json_payload_append_converted_value(
+ encoder,
+ FLB_LOG_EVENT_BODY,
+ body_object);
+ }
+
+ result = flb_log_event_encoder_dynamic_field_flush(&encoder->body);
+
+ if (result == FLB_EVENT_ENCODER_SUCCESS) {
+ result = flb_log_event_encoder_commit_record(encoder);
+ }
+ else {
+ flb_plg_error(ctx->ins, "log event encoder failure : %d", result);
+
+ flb_log_event_encoder_rollback_record(encoder);
+
+ result = -4;
+ }
+
+ return result;
+}
+
+static int process_json_payload_scope_logs_entry(
+ struct flb_opentelemetry *ctx,
+ struct flb_log_event_encoder *encoder,
+ msgpack_object *scope_logs_object)
+{
+ msgpack_object_map *scope_logs_entry;
+ msgpack_object_array *log_records;
+ int result;
+ size_t index;
+
+ if (scope_logs_object->type != MSGPACK_OBJECT_MAP) {
+ flb_plg_error(ctx->ins, "unexpected scopeLogs entry type");
+
+ return -3;
+ }
+
+ scope_logs_entry = &scope_logs_object->via.map;
+
+ result = find_map_entry_by_key(scope_logs_entry, "logRecords", 0, FLB_TRUE);
+
+ if (result == -1) {
+ result = find_map_entry_by_key(scope_logs_entry, "logRecords", 0, FLB_TRUE);
+
+ if (result == -1) {
+ flb_plg_error(ctx->ins, "scopeLogs missing");
+
+ return -3;
+ }
+ }
+
+ if (scope_logs_entry->ptr[result].val.type != MSGPACK_OBJECT_ARRAY) {
+ flb_plg_error(ctx->ins, "unexpected logRecords type");
+
+ return -3;
+ }
+
+ log_records = &scope_logs_entry->ptr[result].val.via.array;
+
+ result = 0;
+
+ for (index = 0 ; index < log_records->size ; index++) {
+ result = process_json_payload_log_records_entry(
+ ctx,
+ encoder,
+ &log_records->ptr[index]);
+ }
+
+ return result;
+}
+
+
+static int process_json_payload_resource_logs_entry(
+ struct flb_opentelemetry *ctx,
+ struct flb_log_event_encoder *encoder,
+ msgpack_object *resource_logs_object)
+{
+ msgpack_object_map *resource_logs_entry;
+ msgpack_object_array *scope_logs;
+ int result;
+ size_t index;
+
+
+ if (resource_logs_object->type != MSGPACK_OBJECT_MAP) {
+ flb_plg_error(ctx->ins, "unexpected resourceLogs entry type");
+
+ return -2;
+ }
+
+ resource_logs_entry = &resource_logs_object->via.map;
+
+ result = find_map_entry_by_key(resource_logs_entry, "scopeLogs", 0, FLB_TRUE);
+
+ if (result == -1) {
+ result = find_map_entry_by_key(resource_logs_entry, "scope_logs", 0, FLB_TRUE);
+
+ if (result == -1) {
+ flb_plg_error(ctx->ins, "scopeLogs missing");
+
+ return -2;
+ }
+ }
+
+ if (resource_logs_entry->ptr[result].val.type != MSGPACK_OBJECT_ARRAY) {
+ flb_plg_error(ctx->ins, "unexpected scopeLogs type");
+
+ return -2;
+ }
+
+ scope_logs = &resource_logs_entry->ptr[result].val.via.array;
+
+ result = 0;
+
+ for (index = 0 ; index < scope_logs->size ; index++) {
+ result = process_json_payload_scope_logs_entry(
+ ctx,
+ encoder,
+ &scope_logs->ptr[index]);
+ }
+
+ return result;
+}
+
+static int process_json_payload_root(struct flb_opentelemetry *ctx,
+ struct flb_log_event_encoder *encoder,
+ msgpack_object *root_object)
+{
+ msgpack_object_array *resource_logs;
+ int result;
+ size_t index;
+ msgpack_object_map *root;
+
+ if (root_object->type != MSGPACK_OBJECT_MAP) {
+ flb_plg_error(ctx->ins, "unexpected root object type");
+
+ return -1;
+ }
+
+ root = &root_object->via.map;
+
+ result = find_map_entry_by_key(root, "resourceLogs", 0, FLB_TRUE);
+
+ if (result == -1) {
+ result = find_map_entry_by_key(root, "resource_logs", 0, FLB_TRUE);
+
+ if (result == -1) {
+ flb_plg_error(ctx->ins, "resourceLogs missing");
+
+ return -1;
+ }
+ }
+
+ if (root->ptr[result].val.type != MSGPACK_OBJECT_ARRAY) {
+ flb_plg_error(ctx->ins, "unexpected resourceLogs type");
+
+ return -1;
+ }
+
+ resource_logs = &root->ptr[result].val.via.array;
+
+ result = 0;
+
+ for (index = 0 ; index < resource_logs->size ; index++) {
+ result = process_json_payload_resource_logs_entry(
+ ctx,
+ encoder,
+ &resource_logs->ptr[index]);
+ }
+
+ return result;
+}
+
+/* This code is definitely not complete and beyond fishy, it needs to be
+ * refactored.
+ */
+static int json_payload_to_msgpack(struct flb_opentelemetry *ctx,
+ struct flb_log_event_encoder *encoder,
+ const char *body,
+ size_t len)
+{
+ size_t msgpack_body_length;
+ msgpack_unpacked unpacked_root;
+ char *msgpack_body;
+ int root_type;
+ size_t offset;
+ int result;
+
+ result = flb_pack_json(body, len, &msgpack_body, &msgpack_body_length,
+ &root_type, NULL);
+
+ if (result != 0) {
+ flb_plg_error(ctx->ins, "json to msgpack conversion error");
+ }
+ else {
+ msgpack_unpacked_init(&unpacked_root);
+
+ offset = 0;
+ result = msgpack_unpack_next(&unpacked_root,
+ msgpack_body,
+ msgpack_body_length,
+ &offset);
+
+ if (result == MSGPACK_UNPACK_SUCCESS) {
+ result = process_json_payload_root(ctx,
+ encoder,
+ &unpacked_root.data);
+ }
+ else {
+ result = -1;
+ }
+
+ msgpack_unpacked_destroy(&unpacked_root);
+
+ flb_free(msgpack_body);
+ }
+
+ return result;
+}
+
+static int process_payload_logs(struct flb_opentelemetry *ctx, struct http_conn *conn,
+ flb_sds_t tag,
+ struct mk_http_session *session,
+ struct mk_http_request *request)
+{
+ struct flb_log_event_encoder *encoder;
+ int ret;
+
+ encoder = flb_log_event_encoder_create(FLB_LOG_EVENT_FORMAT_FLUENT_BIT_V2);
+
+ if (encoder == NULL) {
+ return -1;
+ }
+
+ /* Check if the incoming payload is a valid JSON message and convert it to msgpack */
+ if (strncasecmp(request->content_type.data,
+ "application/json",
+ request->content_type.len) == 0) {
+ ret = json_payload_to_msgpack(ctx,
+ encoder,
+ request->data.data,
+ request->data.len);
+ }
+ else if (strncasecmp(request->content_type.data,
+ "application/x-protobuf",
+ request->content_type.len) == 0) {
+ ret = binary_payload_to_msgpack(encoder, (uint8_t *) request->data.data, request->data.len);
+ }
+ else {
+ flb_error("[otel] Unsupported content type %.*s", (int)request->content_type.len, request->content_type.data);
+
+ ret = -1;
+ }
+
+ if (ret == 0) {
+ ret = flb_input_log_append(ctx->ins,
+ tag,
+ flb_sds_len(tag),
+ encoder->output_buffer,
+ encoder->output_length);
+ }
+
+ flb_log_event_encoder_destroy(encoder);
+
+ return ret;
+}
+
+static inline int mk_http_point_header(mk_ptr_t *h,
+ struct mk_http_parser *parser, int key)
+{
+ struct mk_http_header *header;
+
+ header = &parser->headers[key];
+ if (header->type == key) {
+ h->data = header->val.data;
+ h->len = header->val.len;
+ return 0;
+ }
+ else {
+ h->data = NULL;
+ h->len = -1;
+ }
+
+ return -1;
+}
+
+static \
+int uncompress_zlib(char **output_buffer,
+ size_t *output_size,
+ char *input_buffer,
+ size_t input_size)
+{
+ flb_error("[opentelemetry] unsupported compression format");
+
+ return -1;
+}
+
+static \
+int uncompress_zstd(char **output_buffer,
+ size_t *output_size,
+ char *input_buffer,
+ size_t input_size)
+{
+ flb_error("[opentelemetry] unsupported compression format");
+
+ return -1;
+}
+
+static \
+int uncompress_deflate(char **output_buffer,
+ size_t *output_size,
+ char *input_buffer,
+ size_t input_size)
+{
+ flb_error("[opentelemetry] unsupported compression format");
+
+ return -1;
+}
+
+static \
+int uncompress_snappy(char **output_buffer,
+ size_t *output_size,
+ char *input_buffer,
+ size_t input_size)
+{
+ int ret;
+
+ ret = flb_snappy_uncompress_framed_data(input_buffer,
+ input_size,
+ output_buffer,
+ output_size);
+
+ if (ret != 0) {
+ flb_error("[opentelemetry] snappy decompression failed");
+
+ return -1;
+ }
+
+ return 1;
+}
+
+static \
+int uncompress_gzip(char **output_buffer,
+ size_t *output_size,
+ char *input_buffer,
+ size_t input_size)
+{
+ int ret;
+
+ ret = flb_gzip_uncompress(input_buffer,
+ input_size,
+ (void *) output_buffer,
+ output_size);
+
+ if (ret == -1) {
+ flb_error("[opentelemetry] gzip decompression failed");
+
+ return -1;
+ }
+
+ return 1;
+}
+
+int opentelemetry_prot_uncompress(struct mk_http_session *session,
+ struct mk_http_request *request,
+ char **output_buffer,
+ size_t *output_size)
+{
+ struct mk_http_header *header;
+ size_t index;
+
+ *output_buffer = NULL;
+ *output_size = 0;
+
+ for (index = 0;
+ index < session->parser.headers_extra_count;
+ index++) {
+ header = &session->parser.headers_extra[index];
+
+ if (strncasecmp(header->key.data, "Content-Encoding", 16) == 0) {
+ if (strncasecmp(header->val.data, "gzip", 4) == 0) {
+ return uncompress_gzip(output_buffer,
+ output_size,
+ request->data.data,
+ request->data.len);
+ }
+ else if (strncasecmp(header->val.data, "zlib", 4) == 0) {
+ return uncompress_zlib(output_buffer,
+ output_size,
+ request->data.data,
+ request->data.len);
+ }
+ else if (strncasecmp(header->val.data, "zstd", 4) == 0) {
+ return uncompress_zstd(output_buffer,
+ output_size,
+ request->data.data,
+ request->data.len);
+ }
+ else if (strncasecmp(header->val.data, "snappy", 6) == 0) {
+ return uncompress_snappy(output_buffer,
+ output_size,
+ request->data.data,
+ request->data.len);
+ }
+ else if (strncasecmp(header->val.data, "deflate", 4) == 0) {
+ return uncompress_deflate(output_buffer,
+ output_size,
+ request->data.data,
+ request->data.len);
+ }
+ else {
+ return -2;
+ }
+ }
+ }
+
+ return 0;
+}
+
+
+/*
+ * Handle an incoming request. It perform extra checks over the request, if
+ * everything is OK, it enqueue the incoming payload.
+ */
+int opentelemetry_prot_handle(struct flb_opentelemetry *ctx, struct http_conn *conn,
+ struct mk_http_session *session,
+ struct mk_http_request *request)
+{
+ int i;
+ int ret = -1;
+ int len;
+ char *uri;
+ char *qs;
+ off_t diff;
+ flb_sds_t tag;
+ struct mk_http_header *header;
+ char *original_data;
+ size_t original_data_size;
+ char *uncompressed_data;
+ size_t uncompressed_data_size;
+
+ if (request->uri.data[0] != '/') {
+ send_response(conn, 400, "error: invalid request\n");
+ return -1;
+ }
+
+ /* Decode URI */
+ uri = mk_utils_url_decode(request->uri);
+ if (!uri) {
+ uri = mk_mem_alloc_z(request->uri.len + 1);
+ if (!uri) {
+ return -1;
+ }
+ memcpy(uri, request->uri.data, request->uri.len);
+ uri[request->uri.len] = '\0';
+ }
+
+ if (strcmp(uri, "/v1/metrics") != 0 &&
+ strcmp(uri, "/v1/traces") != 0 &&
+ strcmp(uri, "/v1/logs") != 0) {
+
+ send_response(conn, 400, "error: invalid endpoint\n");
+ mk_mem_free(uri);
+
+ return -1;
+ }
+
+ /* Try to match a query string so we can remove it */
+ qs = strchr(uri, '?');
+ if (qs) {
+ /* remove the query string part */
+ diff = qs - uri;
+ uri[diff] = '\0';
+ }
+
+ /* Compose the query string using the URI */
+ len = strlen(uri);
+
+ if (len == 1) {
+ tag = NULL; /* use default tag */
+ }
+ else {
+ tag = flb_sds_create_size(len);
+ if (!tag) {
+ mk_mem_free(uri);
+ return -1;
+ }
+
+ /* New tag skipping the URI '/' */
+ flb_sds_cat(tag, uri + 1, len - 1);
+
+ /* Sanitize, only allow alphanum chars */
+ for (i = 0; i < flb_sds_len(tag); i++) {
+ if (!isalnum(tag[i]) && tag[i] != '_' && tag[i] != '.') {
+ tag[i] = '_';
+ }
+ }
+ }
+
+ /* Check if we have a Host header: Hostname ; port */
+ mk_http_point_header(&request->host, &session->parser, MK_HEADER_HOST);
+
+ /* Header: Connection */
+ mk_http_point_header(&request->connection, &session->parser,
+ MK_HEADER_CONNECTION);
+
+ /* HTTP/1.1 needs Host header */
+ if (!request->host.data && request->protocol == MK_HTTP_PROTOCOL_11) {
+ flb_sds_destroy(tag);
+ mk_mem_free(uri);
+ return -1;
+ }
+
+ /* Should we close the session after this request ? */
+ mk_http_keepalive_check(session, request, ctx->server);
+
+ /* Content Length */
+ header = &session->parser.headers[MK_HEADER_CONTENT_LENGTH];
+ if (header->type == MK_HEADER_CONTENT_LENGTH) {
+ request->_content_length.data = header->val.data;
+ request->_content_length.len = header->val.len;
+ }
+ else {
+ request->_content_length.data = NULL;
+ }
+
+ mk_http_point_header(&request->content_type, &session->parser, MK_HEADER_CONTENT_TYPE);
+
+ if (request->method != MK_METHOD_POST) {
+ flb_sds_destroy(tag);
+ mk_mem_free(uri);
+ send_response(conn, 400, "error: invalid HTTP method\n");
+ return -1;
+ }
+
+ original_data = request->data.data;
+ original_data_size = request->data.len;
+
+ ret = opentelemetry_prot_uncompress(session, request,
+ &uncompressed_data,
+ &uncompressed_data_size);
+
+ if (ret > 0) {
+ request->data.data = uncompressed_data;
+ request->data.len = uncompressed_data_size;
+ }
+
+ if (strcmp(uri, "/v1/metrics") == 0) {
+ ret = process_payload_metrics(ctx, conn, tag, session, request);
+ }
+ else if (strcmp(uri, "/v1/traces") == 0) {
+ ret = process_payload_traces(ctx, conn, tag, session, request);
+ }
+ else if (strcmp(uri, "/v1/logs") == 0) {
+ ret = process_payload_logs(ctx, conn, tag, session, request);
+ }
+
+ if (uncompressed_data != NULL) {
+ flb_free(uncompressed_data);
+ }
+
+ request->data.data = original_data;
+ request->data.len = original_data_size;
+
+ mk_mem_free(uri);
+ flb_sds_destroy(tag);
+
+ send_response(conn, ctx->successful_response_code, NULL);
+
+ return ret;
+}
+
+/*
+ * Handle an incoming request which has resulted in an http parser error.
+ */
+int opentelemetry_prot_handle_error(struct flb_opentelemetry *ctx, struct http_conn *conn,
+ struct mk_http_session *session,
+ struct mk_http_request *request)
+{
+ send_response(conn, 400, "error: invalid request\n");
+ return -1;
+}
diff --git a/src/fluent-bit/plugins/in_opentelemetry/opentelemetry_prot.h b/src/fluent-bit/plugins/in_opentelemetry/opentelemetry_prot.h
new file mode 100644
index 000000000..bbfd8332f
--- /dev/null
+++ b/src/fluent-bit/plugins/in_opentelemetry/opentelemetry_prot.h
@@ -0,0 +1,31 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_OPENTELEMETRY_PROT
+#define FLB_IN_OPENTELEMETRY_PROT
+
+int opentelemetry_prot_handle(struct flb_opentelemetry *ctx, struct http_conn *conn,
+ struct mk_http_session *session,
+ struct mk_http_request *request);
+
+int opentelemetry_prot_handle_error(struct flb_opentelemetry *ctx, struct http_conn *conn,
+ struct mk_http_session *session,
+ struct mk_http_request *request);
+
+#endif
diff --git a/src/fluent-bit/plugins/in_podman_metrics/CMakeLists.txt b/src/fluent-bit/plugins/in_podman_metrics/CMakeLists.txt
new file mode 100644
index 000000000..9de0e5331
--- /dev/null
+++ b/src/fluent-bit/plugins/in_podman_metrics/CMakeLists.txt
@@ -0,0 +1,6 @@
+set(src
+ podman_metrics.c
+ podman_metrics_data.c
+ )
+
+FLB_PLUGIN(in_podman_metrics "${src}" "")
diff --git a/src/fluent-bit/plugins/in_podman_metrics/podman_metrics.c b/src/fluent-bit/plugins/in_podman_metrics/podman_metrics.c
new file mode 100644
index 000000000..df64452ff
--- /dev/null
+++ b/src/fluent-bit/plugins/in_podman_metrics/podman_metrics.c
@@ -0,0 +1,515 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_metrics.h>
+#include <fluent-bit/flb_metrics_exporter.h>
+#include <fluent-bit/flb_jsmn.h>
+
+#include <monkey/mk_core/mk_list.h>
+
+#include "podman_metrics.h"
+#include "podman_metrics_config.h"
+#include "podman_metrics_data.h"
+
+/*
+ * Collect information about podman containers (ID and Name) from podman configuration
+ * file (default is /var/lib/containers/storage/overlay-containers/containers.json).
+ * Since flb_jsmn library show JSON as a tree, search for objects with parent 0 (objects
+ * that are children to root array, and in them, search for ID and name (which is also
+ * an array.
+ */
+static int collect_container_data(struct flb_in_metrics *ctx)
+{
+ /* Buffers for reading data from JSON */
+ char *buffer;
+ char name[CONTAINER_NAME_SIZE];
+ char id[CONTAINER_ID_SIZE];
+ char image_name[IMAGE_NAME_SIZE];
+ char metadata[CONTAINER_METADATA_SIZE];
+ char *metadata_token_start;
+ char *metadata_token_stop;
+ int metadata_token_size;
+
+ int array_id;
+ int r, i, j;
+ size_t read_bytes = 0;
+ int collected_containers = 0;
+ int token_len;
+
+ jsmn_parser p;
+ jsmntok_t t[JSON_TOKENS];
+
+ flb_utils_read_file(ctx->config, &buffer, &read_bytes);
+ if (!read_bytes) {
+ flb_plg_warn(ctx->ins, "Failed to open %s", ctx->config);
+ return -1;
+ }
+ buffer[read_bytes] = 0;
+ flb_plg_debug(ctx->ins, "Read %zu bytes", read_bytes);
+
+ jsmn_init(&p);
+ r = jsmn_parse(&p, buffer, strlen(buffer), t, sizeof(t) / sizeof(t[0]));
+ if (r < 0) {
+ flb_plg_warn(ctx->ins, "Failed to parse JSON %d: %s", r, buffer);
+ free(buffer);
+ return -1;
+ }
+
+ flb_plg_debug(ctx->ins, "Got %d nested tokens", t[0].size);
+
+ if (r < 1 || t[0].type != JSMN_ARRAY) {
+ flb_plg_warn(ctx->ins, "Expected array at the json root");
+ free(buffer);
+ return -1;
+ }
+
+ for (i=0; i<r; i++) {
+ if (t[i].type == JSMN_STRING) {
+ if (sizeof(JSON_FIELD_ID)-1 == t[i].end - t[i].start &&
+ strncmp(buffer + t[i].start, JSON_FIELD_ID, t[i].end - t[i].start) == 0) {
+ token_len = t[i + 1].end - t[i + 1].start;
+ strncpy(id, buffer + t[i+1].start, t[i + 1].end - t[i + 1].start);
+ id[token_len] = '\0';
+ flb_plg_trace(ctx->ins, "Found id %s", id);
+ }
+ else if (sizeof(JSON_FIELD_NAMES)-1 == t[i].end - t[i].start &&
+ strncmp(buffer + t[i].start, JSON_FIELD_NAMES, t[i].end - t[i].start) == 0) {
+ array_id = i + 1;
+ if (t[array_id].type == JSMN_ARRAY) {
+ j = array_id + 1;
+ while (t[j].parent == array_id)
+ {
+ strncpy(name, buffer + t[j].start, t[j].end - t[j].start);
+ name[t[j].end - t[j].start] = '\0';
+ flb_plg_trace(ctx->ins, "Found name %s", name);
+ j++;
+ }
+ }
+ }
+ else if (sizeof(JSON_FIELD_METADATA)-1 == t[i].end - t[i].start &&
+ strncmp(buffer + t[i].start, JSON_FIELD_METADATA, t[i].end - t[i].start) == 0) {
+ token_len = t[i + 1].end - t[i + 1].start;
+ strncpy(metadata, buffer + t[i+1].start, t[i + 1].end - t[i + 1].start);
+ metadata[token_len] = '\0';
+
+ metadata_token_start = strstr(metadata, JSON_SUBFIELD_IMAGE_NAME);
+ if (metadata_token_start) {
+ metadata_token_stop = strstr(metadata_token_start + JSON_SUBFIELD_SIZE_IMAGE_NAME+1, "\\\"");
+ metadata_token_size = metadata_token_stop - metadata_token_start - JSON_SUBFIELD_SIZE_IMAGE_NAME;
+
+ strncpy(image_name, metadata_token_start+JSON_SUBFIELD_SIZE_IMAGE_NAME, metadata_token_size);
+ image_name[metadata_token_size] = '\0';
+
+ flb_plg_trace(ctx->ins, "Found image name %s", image_name);
+ add_container_to_list(ctx, id, name, image_name);
+ }
+ else {
+ flb_plg_warn(ctx->ins, "Image name was not found for %s", id);
+ add_container_to_list(ctx, id, name, "unknown");
+ }
+ collected_containers++;
+ }
+ }
+ }
+
+ flb_plg_debug(ctx->ins, "Collected %d containers from podman config file", collected_containers);
+ free(buffer);
+ return collected_containers;
+}
+
+/*
+ * Create structure instance based on previously found id, name and image name. Set all its values (like
+ * memory or cpu to UINT64_MAX, in case it won't be found later. This function also adds this structure
+ * to internal list, so it can be found by iteration later on.
+ */
+static int add_container_to_list(struct flb_in_metrics *ctx, flb_sds_t id, flb_sds_t name, flb_sds_t image_name)
+{
+ struct container *cnt;
+ cnt = flb_malloc(sizeof(struct container));
+ if (!cnt) {
+ flb_errno();
+ return -1;
+ }
+ cnt->id = flb_sds_create(id);
+ cnt->name = flb_sds_create(name);
+ cnt->image_name = flb_sds_create(image_name);
+
+ cnt->memory_usage = UINT64_MAX;
+ cnt->memory_max_usage = UINT64_MAX;
+ cnt->memory_limit = UINT64_MAX;
+ cnt->rss = UINT64_MAX;
+ cnt->cpu_user = UINT64_MAX;
+ cnt->cpu = UINT64_MAX;
+
+ mk_list_init(&cnt->net_data);
+
+ mk_list_add(&cnt->_head, &ctx->items);
+ return 0;
+}
+
+/*
+ * Iterate over container list and remove collected data
+ */
+static int destroy_container_list(struct flb_in_metrics *ctx)
+{
+ struct container *cnt;
+ struct net_iface *iface;
+ struct sysfs_path *pth;
+ struct mk_list *head;
+ struct mk_list *tmp;
+ struct mk_list *inner_head;
+ struct mk_list *inner_tmp;
+
+ mk_list_foreach_safe(head, tmp, &ctx->items) {
+ cnt = mk_list_entry(head, struct container, _head);
+ flb_plg_debug(ctx->ins, "Destroying container data (id: %s, name: %s", cnt->id, cnt->name);
+
+ flb_sds_destroy(cnt->id);
+ flb_sds_destroy(cnt->name);
+ flb_sds_destroy(cnt->image_name);
+ mk_list_foreach_safe(inner_head, inner_tmp, &cnt->net_data) {
+ iface = mk_list_entry(inner_head, struct net_iface, _head);
+ flb_sds_destroy(iface->name);
+ mk_list_del(&iface->_head);
+ flb_free(iface);
+ }
+ mk_list_del(&cnt->_head);
+ flb_free(cnt);
+ }
+
+ mk_list_foreach_safe(head, tmp, &ctx->sysfs_items) {
+ pth = mk_list_entry(head, struct sysfs_path, _head);
+ flb_plg_trace(ctx->ins, "Destroying sysfs data (name: %s", pth->path);
+ flb_sds_destroy(pth->path);
+ mk_list_del(&pth->_head);
+ flb_free(pth);
+ }
+ return 0;
+}
+
+
+/*
+ * Create counter for given metric name, using name, image name and value as counter labels. Counters
+ * are created per counter name, so they are "shared" between multiple containers - counter
+ * name remains the same, only labels like ID are changed.
+ * This function creates counter only once per counter name - every next call only sets counter
+ * value for specific labels.
+ */
+static int create_counter(struct flb_in_metrics *ctx, struct cmt_counter **counter, flb_sds_t id, flb_sds_t name, flb_sds_t image_name, flb_sds_t metric_prefix,
+ flb_sds_t *fields, flb_sds_t metric_name, flb_sds_t description, flb_sds_t interface, uint64_t value)
+{
+ flb_sds_t *labels;
+ uint64_t fvalue = value;
+
+ int label_count;
+ if (value == UINT64_MAX) {
+ flb_plg_debug(ctx->ins, "Ignoring invalid counter for %s, %s_%s_%s", name, COUNTER_PREFIX, metric_prefix, metric_name);
+ return -1;
+ }
+
+ if (strcmp(metric_name, COUNTER_CPU) == 0 || strcmp(metric_name, COUNTER_CPU_USER) == 0) {
+ fvalue = fvalue / 1000000000;
+ flb_plg_trace(ctx->ins, "Converting %s from nanoseconds to seconds (%lu -> %lu)", metric_name, value, fvalue);
+
+ }
+
+ if (interface == NULL) {
+ labels = (char *[]){id, name, image_name};
+ label_count = 3;
+ }
+ else {
+ labels = (char *[]){id, name, image_name, interface};
+ label_count = 4;
+ }
+
+ /* if counter was not yet created, it means that this function is called for the first time per counter type */
+ if (*counter == NULL) {
+ flb_plg_debug(ctx->ins, "Creating counter for %s, %s_%s_%s", name, COUNTER_PREFIX, metric_prefix, metric_name);
+ *counter = cmt_counter_create(ctx->ins->cmt, COUNTER_PREFIX, metric_prefix, metric_name, description, label_count, fields);
+ }
+
+ /* Allow setting value that is not grater that current one (if, for example, memory usage stays exactly the same) */
+ cmt_counter_allow_reset(*counter);
+ flb_plg_debug(ctx->ins, "Set counter for %s, %s_%s_%s: %lu", name, COUNTER_PREFIX, metric_prefix, metric_name, fvalue);
+ if (cmt_counter_set(*counter, cfl_time_now(), fvalue, label_count, labels) == -1) {
+ flb_plg_warn(ctx->ins, "Failed to set counter for %s, %s_%s_%s", name, COUNTER_PREFIX, metric_prefix, metric_name);
+ return -1;
+ }
+ return 0;
+}
+
+/*
+ * Create gauge for given metric name, using name, image name and value as counter labels. Gauges
+ * are created per counter name, so they are "shared" between multiple containers - counter
+ * name remains the same, only labels like ID are changed.
+ * This function creates gauge only once per counter name - every next call only sets gauge
+ * value for specific labels.
+ */
+static int create_gauge(struct flb_in_metrics *ctx, struct cmt_gauge **gauge, flb_sds_t id, flb_sds_t name, flb_sds_t image_name, flb_sds_t metric_prefix,
+ flb_sds_t *fields, flb_sds_t metric_name, flb_sds_t description, flb_sds_t interface, uint64_t value)
+{
+ flb_sds_t *labels;
+ int label_count;
+ if (value == UINT64_MAX) {
+ flb_plg_debug(ctx->ins, "Ignoring invalid gauge for %s, %s_%s_%s", name, COUNTER_PREFIX, metric_prefix, metric_name);
+ return -1;
+ }
+
+ labels = (char *[]){id, name, image_name};
+ label_count = 3;
+
+ /* if gauge was not yet created, it means that this function is called for the first time per counter type */
+ if (*gauge == NULL) {
+ flb_plg_debug(ctx->ins, "Creating gauge for %s, %s_%s_%s", name, COUNTER_PREFIX, metric_prefix, metric_name);
+ *gauge = cmt_gauge_create(ctx->ins->cmt, COUNTER_PREFIX, metric_prefix, metric_name, description, label_count, fields);
+ }
+
+ flb_plg_debug(ctx->ins, "Set gauge for %s, %s_%s_%s: %lu", name, COUNTER_PREFIX, metric_prefix, metric_name, value);
+ if (cmt_gauge_set(*gauge, cfl_time_now(), value, label_count, labels) == -1) {
+ flb_plg_warn(ctx->ins, "Failed to set gauge for %s, %s_%s_%s", name, COUNTER_PREFIX, metric_prefix, metric_name);
+ return -1;
+ }
+ return 0;
+}
+
+/*
+ * Call create_counter for every counter type defined in this plugin.
+ *
+ * Currently supported counters are:
+ * - container_memory_usage_bytes
+ * - container_memory_max_usage_bytes
+ * - container_memory_rss
+ * - container_spec_memory_limit_bytes
+ * - container_cpu_user_seconds_total
+ * - container_cpu_usage_seconds_total
+ * - container_network_receive_bytes_total
+ * - container_network_receive_errors_total
+ * - container_network_transmit_bytes_total
+ * - container_network_transmit_errors_total
+ */
+static int create_counters(struct flb_in_metrics *ctx)
+{
+ struct container *cnt;
+ struct net_iface *iface;
+ struct mk_list *head;
+ struct mk_list *tmp;
+ struct mk_list *inner_head;
+ struct mk_list *inner_tmp;
+
+ mk_list_foreach_safe(head, tmp, &ctx->items)
+ {
+ cnt = mk_list_entry(head, struct container, _head);
+ create_counter(ctx, &ctx->c_memory_usage, cnt->id, cnt->name, cnt->image_name, COUNTER_MEMORY_PREFIX, FIELDS_METRIC, COUNTER_MEMORY_USAGE,
+ DESCRIPTION_MEMORY_USAGE, NULL, cnt->memory_usage);
+ create_counter(ctx, &ctx->c_memory_max_usage, cnt->id, cnt->name, cnt->image_name, COUNTER_MEMORY_PREFIX, FIELDS_METRIC, COUNTER_MEMORY_MAX_USAGE,
+ DESCRIPTION_MEMORY_MAX_USAGE, NULL, cnt->memory_max_usage);
+ create_counter(ctx, &ctx->c_memory_limit, cnt->id, cnt->name, cnt->image_name, COUNTER_SPEC_MEMORY_PREFIX, FIELDS_METRIC, COUNTER_MEMORY_LIMIT,
+ DESCRIPTION_MEMORY_LIMIT, NULL, cnt->memory_limit);
+ create_gauge(ctx, &ctx->g_rss, cnt->id, cnt->name, cnt->image_name, COUNTER_MEMORY_PREFIX, FIELDS_METRIC, GAUGE_MEMORY_RSS,
+ DESCRIPTION_MEMORY_RSS, NULL, cnt->rss);
+ create_counter(ctx, &ctx->c_cpu_user, cnt->id, cnt->name, cnt->image_name, COUNTER_CPU_PREFIX, FIELDS_METRIC, COUNTER_CPU_USER,
+ DESCRIPTION_CPU_USER, NULL, cnt->cpu_user);
+ create_counter(ctx, &ctx->c_cpu, cnt->id, cnt->name, cnt->image_name, COUNTER_CPU_PREFIX, FIELDS_METRIC, COUNTER_CPU,
+ DESCRIPTION_CPU, NULL, cnt->cpu);
+ mk_list_foreach_safe(inner_head, inner_tmp, &cnt->net_data)
+ {
+ iface = mk_list_entry(inner_head, struct net_iface, _head);
+ create_counter(ctx, &ctx->rx_bytes, cnt->id, cnt->name, cnt->image_name, COUNTER_NETWORK_PREFIX, FIELDS_METRIC_WITH_IFACE, COUNTER_RX_BYTES,
+ DESCRIPTION_RX_BYTES, iface->name, iface->rx_bytes);
+ create_counter(ctx, &ctx->rx_errors, cnt->id, cnt->name, cnt->image_name, COUNTER_NETWORK_PREFIX, FIELDS_METRIC_WITH_IFACE, COUNTER_RX_ERRORS,
+ DESCRIPTION_RX_ERRORS, iface->name, iface->rx_errors);
+ create_counter(ctx, &ctx->tx_bytes, cnt->id, cnt->name, cnt->image_name, COUNTER_NETWORK_PREFIX, FIELDS_METRIC_WITH_IFACE, COUNTER_TX_BYTES,
+ DESCRIPTION_TX_BYTES, iface->name, iface->tx_bytes);
+ create_counter(ctx, &ctx->tx_errors, cnt->id, cnt->name, cnt->image_name, COUNTER_NETWORK_PREFIX, FIELDS_METRIC_WITH_IFACE, COUNTER_TX_ERRORS,
+ DESCRIPTION_TX_ERRORS, iface->name, iface->tx_errors);
+ }
+ }
+ return 0;
+}
+
+/* Main function. Destroy (optionally) previous data, gather container data and
+ * create counters.
+ */
+static int scrape_metrics(struct flb_config *config, struct flb_in_metrics *ctx)
+{
+ uint64_t start_ts = cfl_time_now();
+ flb_plg_debug(ctx->ins, "Starting to scrape podman metrics");
+ if (destroy_container_list(ctx) == -1) {
+ flb_plg_error(ctx->ins, "Could not destroy previous container data");
+ return -1;
+ }
+
+ if (collect_container_data(ctx) == -1) {
+ flb_plg_error(ctx->ins, "Could not collect container ids");
+ return -1;
+ }
+
+ if (collect_sysfs_directories(ctx, ctx->sysfs_path) == -1)
+ {
+ flb_plg_error(ctx->ins, "Could not collect sysfs data");
+ return -1;
+ }
+
+ if (ctx->cgroup_version == CGROUP_V1) {
+ if (fill_counters_with_sysfs_data_v1(ctx) == -1) {
+ flb_plg_error(ctx->ins, "Could not collect V1 sysfs data");
+ return -1;
+ }
+ }
+ else if (ctx->cgroup_version == CGROUP_V2) {
+ if (fill_counters_with_sysfs_data_v2(ctx) == -1) {
+ flb_plg_error(ctx->ins, "Could not collect V2 sysfs data");
+ return -1;
+ }
+ }
+
+ if (create_counters(ctx) == -1) {
+ flb_plg_error(ctx->ins, "Could not create container counters");
+ return -1;
+ }
+
+ if (flb_input_metrics_append(ctx->ins, NULL, 0, ctx->ins->cmt) == -1) {
+ flb_plg_error(ctx->ins, "Could not append metrics");
+ return -1;
+ }
+
+ flb_plg_info(ctx->ins, "Scraping metrics took %luns", cfl_time_now() - start_ts);
+ return 0;
+}
+
+/*
+ * Call scrape_metrics function every `scrape interval`.
+ */
+static int cb_metrics_collect_runtime(struct flb_input_instance *ins, struct flb_config *config, void *in_context)
+{
+ return scrape_metrics(config, in_context);
+}
+
+/*
+ * Initialize plugin, setup config file path and (optionally) scrape container
+ * data (if `scrape_at_start` is set).
+ */
+static int in_metrics_init(struct flb_input_instance *in, struct flb_config *config, void *data)
+{
+ struct flb_in_metrics *ctx;
+ int coll_fd_runtime;
+
+ ctx = flb_calloc(1, sizeof(struct flb_in_metrics));
+ if (!ctx) {
+ return -1;
+ }
+ ctx->ins = in;
+
+ ctx->c_memory_usage = NULL;
+ ctx->c_memory_max_usage = NULL;
+ ctx->g_rss = NULL;
+ ctx->c_memory_limit = NULL;
+ ctx->c_cpu_user = NULL;
+ ctx->c_cpu = NULL;
+ ctx->rx_bytes = NULL;
+ ctx->rx_errors = NULL;
+ ctx->tx_bytes = NULL;
+ ctx->tx_errors = NULL;
+
+ if (flb_input_config_map_set(in, (void *) ctx) == -1) {
+ flb_free(ctx);
+ return -1;
+ }
+
+ flb_input_set_context(in, ctx);
+ coll_fd_runtime = flb_input_set_collector_time(in, cb_metrics_collect_runtime, ctx->scrape_interval, 0, config);
+ if (coll_fd_runtime == -1) {
+ flb_plg_error(ctx->ins, "Could not set collector for podman metrics plugin");
+ return -1;
+ }
+ ctx->coll_fd_runtime = coll_fd_runtime;
+
+ if (ctx->podman_config_path) {
+ flb_plg_info(ctx->ins, "Using config file %s", ctx->podman_config_path);
+ ctx->config = flb_sds_create(ctx->podman_config_path);
+ }
+ else {
+ flb_plg_info(ctx->ins, "Using default config file %s", PODMAN_CONFIG_DEFAULT_PATH);
+ ctx->config = flb_sds_create(PODMAN_CONFIG_DEFAULT_PATH);
+ }
+
+ if (get_cgroup_version(ctx) == CGROUP_V2) {
+ flb_plg_info(ctx->ins, "Detected cgroups v2");
+ ctx->cgroup_version = CGROUP_V2;
+ }
+ else {
+ flb_plg_info(ctx->ins, "Detected cgroups v1");
+ ctx->cgroup_version = CGROUP_V1;
+ }
+
+ mk_list_init(&ctx->items);
+ mk_list_init(&ctx->sysfs_items);
+
+ if (ctx->scrape_interval >= 2 && ctx->scrape_on_start) {
+ flb_plg_info(ctx->ins, "Generating podman metrics (initial scrape)");
+ if (scrape_metrics(config, ctx) == -1) {
+ flb_plg_error(ctx->ins, "Could not start collector for podman metrics plugin");
+ flb_sds_destroy(ctx->config);
+ destroy_container_list(ctx);
+ flb_free(ctx);
+ return -1;
+ }
+ }
+
+ flb_plg_info(ctx->ins, "Generating podman metrics");
+
+ return 0;
+}
+
+/*
+ * Function called at plugin exit - destroy collected container data list.
+ */
+static int in_metrics_exit(void *data, struct flb_config *config)
+{
+ struct flb_in_metrics *ctx = data;
+
+ if (!ctx) {
+ return 0;
+ }
+
+ flb_sds_destroy(ctx->config);
+ destroy_container_list(ctx);
+ flb_free(ctx);
+ return 0;
+}
+
+/*
+ * Function called at plugin pause.
+ */
+static void in_metrics_pause(void *data, struct flb_config *config)
+{
+ struct flb_in_metrics *ctx = data;
+ flb_input_collector_pause(ctx->coll_fd_runtime, ctx->ins);
+}
+
+/*
+ * Function called at plugin resume.
+ */
+static void in_metrics_resume(void *data, struct flb_config *config)
+{
+ struct flb_in_metrics *ctx = data;
+ flb_input_collector_resume(ctx->coll_fd_runtime, ctx->ins);
+}
diff --git a/src/fluent-bit/plugins/in_podman_metrics/podman_metrics.h b/src/fluent-bit/plugins/in_podman_metrics/podman_metrics.h
new file mode 100644
index 000000000..3b02d24ed
--- /dev/null
+++ b/src/fluent-bit/plugins/in_podman_metrics/podman_metrics.h
@@ -0,0 +1,98 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_PODMAN_METRICS_H
+#define FLB_IN_PODMAN_METRICS_H
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_input.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_jsmn.h>
+
+#include <monkey/mk_core/mk_list.h>
+
+#include "podman_metrics_config.h"
+
+static int collect_container_data(struct flb_in_metrics *ctx);
+static int add_container_to_list(struct flb_in_metrics *ctx, flb_sds_t id, flb_sds_t name, flb_sds_t image_name);
+static int destroy_container_list(struct flb_in_metrics *ctx);
+
+static int create_counter(struct flb_in_metrics *ctx, struct cmt_counter **counter, flb_sds_t id, flb_sds_t name, flb_sds_t image_name, flb_sds_t metric_prefix,
+ flb_sds_t *fieds, flb_sds_t metric_name, flb_sds_t description, flb_sds_t interface, uint64_t value);
+static int create_gauge(struct flb_in_metrics *ctx, struct cmt_gauge **gauge, flb_sds_t id, flb_sds_t name, flb_sds_t image_name, flb_sds_t metric_prefix,
+ flb_sds_t *fields, flb_sds_t metric_name, flb_sds_t description, flb_sds_t interface, uint64_t value);
+static int create_counters(struct flb_in_metrics *ctx);
+
+static int scrape_metrics(struct flb_config *config, struct flb_in_metrics *ctx);
+
+static int cb_metrics_collect_runtime(struct flb_input_instance *ins, struct flb_config *config, void *in_context);
+static int in_metrics_init(struct flb_input_instance *in, struct flb_config *config, void *data);
+static int in_metrics_exit(void *data, struct flb_config *config);
+static void in_metrics_pause(void *data, struct flb_config *config);
+static void in_metrics_resume(void *data, struct flb_config *config);
+
+
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_TIME, "scrape_interval", "30",
+ 0, FLB_TRUE, offsetof(struct flb_in_metrics, scrape_interval),
+ "Scrape interval to collect the metrics of podman containers"
+ "(defaults to 30s)"
+ },
+
+ {
+ FLB_CONFIG_MAP_BOOL, "scrape_on_start", "false",
+ 0, FLB_TRUE, offsetof(struct flb_in_metrics, scrape_on_start),
+ "Scrape metrics upon start, useful to avoid waiting for 'scrape_interval' "
+ "for the first round of metrics."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "path.config", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_in_metrics, podman_config_path),
+ "Path to podman config file"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "path.sysfs", SYSFS_PATH,
+ 0, FLB_TRUE, offsetof(struct flb_in_metrics, sysfs_path),
+ "Path to sysfs subsystem directory"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "path.procfs", PROCFS_PATH,
+ 0, FLB_TRUE, offsetof(struct flb_in_metrics, procfs_path),
+ "Path to proc subsystem directory"
+ },
+
+ /* EOF */
+ {0}
+};
+
+struct flb_input_plugin in_podman_metrics_plugin = {
+ .name = "podman_metrics",
+ .description = "Podman metrics",
+ .cb_init = in_metrics_init,
+ .cb_pre_run = NULL,
+ .cb_flush_buf = NULL,
+ .config_map = config_map,
+ .cb_pause = in_metrics_pause,
+ .cb_resume = in_metrics_resume,
+ .cb_exit = in_metrics_exit
+};
+
+#endif
diff --git a/src/fluent-bit/plugins/in_podman_metrics/podman_metrics_config.h b/src/fluent-bit/plugins/in_podman_metrics/podman_metrics_config.h
new file mode 100644
index 000000000..fabdc0a8d
--- /dev/null
+++ b/src/fluent-bit/plugins/in_podman_metrics/podman_metrics_config.h
@@ -0,0 +1,211 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_PODMAN_METRICS_CONFIG_H
+#define FLB_IN_PODMAN_METRICS_CONFIG_H
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_input.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_jsmn.h>
+
+#include <monkey/mk_core/mk_list.h>
+
+/* Buffers and sizes */
+#define JSON_TOKENS 2048
+#define CONTAINER_NAME_SIZE 50
+#define CONTAINER_ID_SIZE 80
+#define CONTAINER_METADATA_SIZE 512
+#define IMAGE_NAME_SIZE 512
+#define PID_BUFFER_SIZE 21
+#define SYSFS_FILE_PATH_SIZE 512
+#define PROCFS_FILE_PATH_SIZE 512
+#define CGROUP_PATH_SIZE 25
+
+/* Special paths for sysfs traversal */
+#define CURRENT_DIR "."
+#define PREV_DIR ".."
+
+/* Ignored network interfaces */
+#define VETH_INTERFACE "veth"
+
+#define JSON_FIELD_NAMES "names"
+#define JSON_FIELD_ID "id"
+#define JSON_FIELD_METADATA "metadata"
+
+#define JSON_SUBFIELD_IMAGE_NAME "image-name\\\":\\\""
+#define JSON_SUBFIELD_SIZE_IMAGE_NAME 15
+
+#define CGROUP_V2_PATH "cgroup.controllers"
+#define CGROUP_V1 1
+#define CGROUP_V2 2
+
+/* Paths in /proc subsystem */
+#define PROCFS_PATH "/proc"
+#define PROC_NET_SUFFIX "net/dev"
+
+/* Paths in /sys subsystem */
+#define SYSFS_PATH "/sys/fs/cgroup"
+#define V1_SYSFS_MEMORY "memory"
+#define V1_SYSFS_CPU "cpuacct"
+#define V1_SYSFS_SYSTEMD "systemd"
+#define SYSFS_CONTAINER_PREFIX "libpod"
+#define SYSFS_LIBPOD_PARENT "libpod_parent"
+#define SYSFS_CONMON "conmon"
+
+/* Default podman config file path, in case of not provided one */
+#define PODMAN_CONFIG_DEFAULT_PATH "/var/lib/containers/storage/overlay-containers/containers.json"
+
+/* Markers of network values in /proc/<pid>/dev/net */
+#define DEV_NET_IGNORE_LINES 2
+#define DEV_NET_NAME 0
+#define DEV_NET_RX_BYTES 1
+#define DEV_NET_RX_ERRORS 3
+#define DEV_NET_TX_BYTES 9
+#define DEV_NET_TX_ERRORS 11
+
+/* Key names in .stat files */
+#define STAT_KEY_RSS "rss"
+#define STAT_KEY_CPU "usage_usec"
+#define STAT_KEY_CPU_USER "user_usec"
+
+/* Static lists of fields in counters or gauges */
+#define FIELDS_METRIC (char*[3]){"id", "name", "image" }
+#define FIELDS_METRIC_WITH_IFACE (char*[4]){"id", "name", "image", "interface" }
+
+/* Files from sysfs containing required data (cgroups v1) */
+#define V1_SYSFS_FILE_MEMORY "memory.usage_in_bytes"
+#define V1_SYSFS_FILE_MAX_MEMORY "memory.max_usage_in_bytes"
+#define V1_SYSFS_FILE_MEMORY_STAT "memory.stat"
+#define V1_SYSFS_FILE_MEMORY_LIMIT "memory.limit_in_bytes"
+#define V1_SYSFS_FILE_CPU_USER "cpuacct.usage_user"
+#define V1_SYSFS_FILE_CPU "cpuacct.usage"
+#define V1_SYSFS_FILE_PIDS "cgroup.procs"
+
+/* Files from sysfs containing required data (cgroups v2) */
+#define V2_SYSFS_FILE_MEMORY "memory.current"
+#define V2_SYSFS_FILE_MAX_MEMORY "memory.peak"
+#define V2_SYSFS_FILE_MEMORY_STAT "memory.stat"
+#define V2_SYSFS_FILE_MEMORY_LIMIT "memory.max"
+#define V2_SYSFS_FILE_CPU_STAT "cpu.stat"
+#define V2_SYSFS_FILE_PIDS "cgroup.procs"
+#define V2_SYSFS_FILE_PIDS_ALT "containers/cgroup.procs"
+
+/* Values used to construct counters/gauges names and descriptions */
+#define COUNTER_PREFIX "container"
+
+#define COUNTER_MEMORY_PREFIX "memory"
+#define COUNTER_SPEC_MEMORY_PREFIX "spec_memory"
+#define COUNTER_MEMORY_USAGE "usage_bytes"
+#define DESCRIPTION_MEMORY_USAGE "Container memory usage in bytes"
+#define COUNTER_MEMORY_MAX_USAGE "max_usage_bytes"
+#define DESCRIPTION_MEMORY_MAX_USAGE "Container max memory usage in bytes"
+#define COUNTER_MEMORY_LIMIT "limit_bytes"
+#define DESCRIPTION_MEMORY_LIMIT "Container memory limit in bytes"
+#define GAUGE_MEMORY_RSS "rss"
+#define DESCRIPTION_MEMORY_RSS "Container RSS in bytes"
+
+#define COUNTER_CPU_PREFIX "cpu"
+#define COUNTER_CPU_USER "user_seconds_total"
+#define DESCRIPTION_CPU_USER "Container cpu usage in seconds in user mode"
+#define COUNTER_CPU "usage_seconds_total"
+#define DESCRIPTION_CPU "Container cpu usage in seconds"
+
+#define COUNTER_NETWORK_PREFIX "network"
+#define COUNTER_RX_BYTES "receive_bytes_total"
+#define DESCRIPTION_RX_BYTES "Network received bytes"
+#define COUNTER_RX_ERRORS "receive_errors_total"
+#define DESCRIPTION_RX_ERRORS "Network received errors"
+#define COUNTER_TX_BYTES "transmit_bytes_total"
+#define DESCRIPTION_TX_BYTES "Network transmited bytes"
+#define COUNTER_TX_ERRORS "transmit_errors_total"
+#define DESCRIPTION_TX_ERRORS "Network transmitedd errors"
+
+
+struct net_iface {
+ flb_sds_t name;
+ uint64_t rx_bytes;
+ uint64_t rx_errors;
+ uint64_t tx_bytes;
+ uint64_t tx_errors;
+ struct mk_list _head;
+};
+
+struct container {
+ flb_sds_t name;
+ flb_sds_t id;
+ flb_sds_t image_name;
+ struct mk_list _head;
+
+ uint64_t memory_usage;
+ uint64_t memory_max_usage;
+ uint64_t memory_limit;
+ uint64_t cpu;
+ uint64_t cpu_user;
+ uint64_t rss;
+
+ struct mk_list net_data;
+};
+
+struct sysfs_path {
+ flb_sds_t path;
+ struct mk_list _head;
+};
+
+struct flb_in_metrics {
+ /* config map options */
+ int scrape_on_start;
+ int scrape_interval;
+ flb_sds_t podman_config_path;
+
+ /* container list */
+ struct mk_list items;
+
+ /* sysfs path list */
+ struct mk_list sysfs_items;
+
+ /* counters */
+ struct cmt_counter *c_memory_usage;
+ struct cmt_counter *c_memory_max_usage;
+ struct cmt_counter *c_memory_limit;
+ struct cmt_gauge *g_rss;
+ struct cmt_counter *c_cpu_user;
+ struct cmt_counter *c_cpu;
+ struct cmt_counter *rx_bytes;
+ struct cmt_counter *rx_errors;
+ struct cmt_counter *tx_bytes;
+ struct cmt_counter *tx_errors;
+
+ /* cgroup version used by host */
+ int cgroup_version;
+
+ /* podman config file path */
+ flb_sds_t config;
+
+ /* proc and sys paths, overwriting mostly for testing */
+ flb_sds_t sysfs_path;
+ flb_sds_t procfs_path;
+
+ /* internal */
+ int coll_fd_runtime;
+ struct flb_input_instance *ins;
+};
+
+#endif
diff --git a/src/fluent-bit/plugins/in_podman_metrics/podman_metrics_data.c b/src/fluent-bit/plugins/in_podman_metrics/podman_metrics_data.c
new file mode 100644
index 000000000..e747fe4b8
--- /dev/null
+++ b/src/fluent-bit/plugins/in_podman_metrics/podman_metrics_data.c
@@ -0,0 +1,407 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_metrics.h>
+#include <fluent-bit/flb_metrics_exporter.h>
+#include <fluent-bit/flb_jsmn.h>
+
+#include <monkey/mk_core/mk_list.h>
+
+#include "podman_metrics_data.h"
+#include "podman_metrics_config.h"
+
+/*
+ * Read uint64_t value from given path. If this function fails, it
+ * returns UINT64_MAX, which will be later interpeted as invalid counter value
+ * (it cannot return 0, because it is a valid value for counter
+ */
+uint64_t read_from_file(struct flb_in_metrics *ctx, flb_sds_t path)
+{
+ int c;
+ uint64_t value = UINT64_MAX;
+ FILE *fp;
+
+ fp = fopen(path, "r");
+ if (!fp) {
+ flb_plg_warn(ctx->ins, "Failed to read %s", path);
+ return value;
+ }
+
+ c = fscanf(fp, "%lu", &value);
+ fclose(fp);
+ if (c != 1) {
+ flb_plg_warn(ctx->ins, "Failed to read a number from %s", path);
+ return value;
+ }
+ return value;
+}
+
+/*
+ * Read uint64_t value from given path. Check for key: <VALUE> and return it.
+ * If this function fails, it
+ * returns UINT64_MAX, which will be later interpeted as invalid counter value
+ * (it cannot return 0, because it is a valid value for counter
+ */
+uint64_t read_key_value_from_file(struct flb_in_metrics *ctx, flb_sds_t path, flb_sds_t key)
+{
+ uint64_t value = UINT64_MAX;
+ FILE *fp;
+ flb_sds_t line = NULL;
+ flb_sds_t field = NULL;
+ flb_sds_t line2 = NULL;
+ size_t len = 0;
+ ssize_t read = 0;
+ int key_found = 0;
+
+ fp = fopen(path, "r");
+ if (!fp) {
+ flb_plg_warn(ctx->ins, "Failed to read %s", path);
+ return value;
+ }
+
+ while ((read = getline(&line, &len, fp)) != -1) {
+ line2 = line;
+
+ while( (field = strsep(&line2, " :")) != NULL ) {
+ if( *field == '\0' ) {
+ continue;
+ }
+ if (strcmp(field, key) == 0) {
+ key_found = 1;
+ continue;
+ }
+ if (key_found) {
+ value = strtoull(field, NULL, 10);
+ flb_plg_trace(ctx->ins, "Found key %s: %lu", key, value);
+ fclose(fp);
+ flb_free(line);
+ flb_free(line2);
+ return value;
+ }
+
+ }
+ flb_free(line2);
+ }
+ flb_free(line);
+ flb_plg_warn(ctx->ins, "%s not found in %s", key, path);
+ fclose(fp);
+ return value;
+}
+
+/*
+ * Read uint64_t value from path previously picked from sysfs directory list.
+ * If key is not NULL, it will be used to search a file instead of reading single value.
+ */
+uint64_t get_data_from_sysfs(struct flb_in_metrics *ctx, flb_sds_t dir, flb_sds_t name, flb_sds_t key)
+{
+ char path[SYSFS_FILE_PATH_SIZE];
+ uint64_t data = UINT64_MAX;
+ path[0]=0;
+
+ if (dir == NULL) {
+ return data;
+ }
+
+ snprintf(path, sizeof(path), "%s/%s", dir, name);
+
+ if (key == NULL) {
+ data = read_from_file(ctx, path);
+ }
+ else {
+ data = read_key_value_from_file(ctx, path, key);
+ }
+ flb_plg_debug(ctx->ins, "%s: %lu", path, data);
+ return data;
+}
+
+/*
+ * Check if container sysfs data is pressent in previously generated list of sysfs directories.
+ * For cgroups v1, use subsystem (directory, for example memory) to search full path.
+ */
+int get_container_sysfs_subdirectory(struct flb_in_metrics *ctx, flb_sds_t id, flb_sds_t subsystem, flb_sds_t *path)
+{
+ struct sysfs_path *pth;
+ struct mk_list *head;
+ struct mk_list *tmp;
+
+ mk_list_foreach_safe(head, tmp, &ctx->sysfs_items) {
+ pth = mk_list_entry(head, struct sysfs_path, _head);
+ if (strstr(pth->path, id) != 0) {
+ if (subsystem != NULL && strstr(pth->path, subsystem) == 0) {
+ continue;
+ }
+ *path = pth->path;
+ flb_plg_trace(ctx->ins, "Found path for %s: %s", id, pth->path);
+ return 0;
+ }
+ }
+ *path = NULL;
+ return -1;
+}
+
+/*
+* Read data from /proc/ subsystem containing all data about network usage for pid (so, in this case,
+* for container). These fields seem to be in constant positions, so check only specific fields in each
+* row.
+*/
+int get_net_data_from_proc(struct flb_in_metrics *ctx, struct container *cnt, uint64_t pid) {
+ char path[PROCFS_FILE_PATH_SIZE];
+ char pid_buff[PID_BUFFER_SIZE];
+
+ FILE * fp;
+ flb_sds_t line = NULL;
+ flb_sds_t field = NULL;
+ flb_sds_t line2 = NULL;
+
+ size_t len = 0;
+ ssize_t read = 0;
+ int curr_line = 0;
+ int curr_field = 0;
+
+ struct net_iface *iface;
+
+ path[0]=0;
+ sprintf(pid_buff, "%" PRIu64, pid);
+ snprintf(path, sizeof(path), "%s/%s/%s", ctx->procfs_path, pid_buff, PROC_NET_SUFFIX);
+
+ fp = fopen(path, "r");
+ if (fp == NULL) {
+ flb_plg_warn(ctx->ins, "Failed to open %s", path);
+ return -1;
+ }
+
+ while ((read = getline(&line, &len, fp)) != -1) {
+ line2 = line;
+ if (curr_line++ <= DEV_NET_IGNORE_LINES) {
+ flb_plg_trace(ctx->ins, "Ignoring line %d in %s", curr_line, path);
+ continue;
+ }
+
+ iface = flb_malloc(sizeof(struct net_iface));
+ if (!iface) {
+ flb_errno();
+ return -1;
+ }
+ iface->name = NULL;
+ iface->rx_bytes = UINT64_MAX;
+ iface->rx_errors = UINT64_MAX;
+ iface->tx_bytes = UINT64_MAX;
+ iface->tx_errors = UINT64_MAX;
+
+
+ while( (field = strsep(&line2, " ")) != NULL ) {
+ if( *field == '\0' ) {
+ continue;
+ }
+ switch (curr_field++)
+ {
+ case DEV_NET_NAME:
+ /* Remove ':' from the end of name */
+ iface->name = flb_sds_create_len(field, strlen(field)-1);
+ flb_plg_trace(ctx->ins, "Reading name from %s: %s", path, iface->name);
+ break;
+
+ case DEV_NET_RX_BYTES:
+ iface->rx_bytes = strtoull(field, NULL, 10);
+ flb_plg_trace(ctx->ins, "Reading rx_bytes from %s: %lu", path, iface->rx_bytes);
+ break;
+
+ case DEV_NET_RX_ERRORS:
+ iface->rx_errors = strtoull(field, NULL, 10);
+ flb_plg_trace(ctx->ins, "Reading rx_errors from %s: %lu", path, iface->rx_errors);
+ break;
+
+ case DEV_NET_TX_BYTES:
+ iface->tx_bytes = strtoull(field, NULL, 10);
+ flb_plg_trace(ctx->ins, "Reading tx_bytes from %s: %lu", path, iface->tx_bytes);
+ break;
+
+ case DEV_NET_TX_ERRORS:
+ iface->tx_errors = strtoull(field, NULL, 10);
+ flb_plg_trace(ctx->ins, "Reading tx_errors from %s: %lu", path, iface->tx_errors);
+ break;
+ }
+ }
+ flb_free(line2);
+ curr_field = 0;
+
+ /* Ignore virtual interfaces connected to podman containers */
+ if (name_starts_with(iface->name, VETH_INTERFACE) == 0) {
+ flb_plg_trace(ctx->ins, "Ignoring virtual interface %s", iface->name);
+ flb_sds_destroy(iface->name);
+ flb_free(iface);
+ continue;
+ }
+ mk_list_add(&iface->_head, &cnt->net_data);
+ }
+
+ flb_free(line);
+ fclose(fp);
+ return 0;
+}
+
+/*
+ * Iterate over directories in sysfs system and collect all libpod-* directories
+ */
+int collect_sysfs_directories(struct flb_in_metrics *ctx, flb_sds_t name)
+{
+ char path[SYSFS_FILE_PATH_SIZE];
+ path[0] = 0;
+ DIR *dir;
+ struct dirent *entry;
+ struct sysfs_path *pth;
+
+ if (!(dir = opendir(name))) {
+ flb_plg_warn(ctx->ins, "Failed to open %s", name);
+ return -1;
+ }
+
+ while ((entry = readdir(dir)) != NULL) {
+ if (entry->d_type == DT_DIR) {
+ if (strcmp(entry->d_name, CURRENT_DIR) == 0 || strcmp(entry->d_name, PREV_DIR) == 0) {
+ continue;
+ }
+ snprintf(path, sizeof(path), "%s/%s", name, entry->d_name);
+
+ if (name_starts_with(entry->d_name, SYSFS_CONTAINER_PREFIX) == 0 &&
+ strcmp(entry->d_name, SYSFS_LIBPOD_PARENT) != 0 &&
+ strstr(entry->d_name, SYSFS_CONMON) == 0) {
+ pth = flb_malloc(sizeof(struct sysfs_path));
+ if (!pth) {
+ flb_errno();
+ return -1;
+ }
+ pth->path = flb_sds_create(path);
+ flb_plg_debug(ctx->ins, "Collected sysfs directory: %s", pth->path);
+ mk_list_add(&pth->_head, &ctx->sysfs_items);
+ }
+
+ collect_sysfs_directories(ctx, path);
+ }
+ }
+ closedir(dir);
+ return 0;
+}
+
+/*
+ * Iterate over previously created container list. For each entry, generate its
+ * paths in sysfs system directory. From this path, grab data about container metrics
+ * and put it this entry.
+ * This function is used in cgroups v1 - meaning different directories for files.
+ */
+int fill_counters_with_sysfs_data_v1(struct flb_in_metrics *ctx)
+{
+ uint64_t pid;
+ flb_sds_t mem_path;
+ flb_sds_t cpu_path;
+ flb_sds_t systemd_path;
+ struct container *cnt;
+ struct mk_list *head;
+ struct mk_list *tmp;
+
+ mk_list_foreach_safe(head, tmp, &ctx->items) {
+ cnt = mk_list_entry(head, struct container, _head);
+
+ get_container_sysfs_subdirectory(ctx, cnt->id, V1_SYSFS_MEMORY, &mem_path);
+ get_container_sysfs_subdirectory(ctx, cnt->id, V1_SYSFS_CPU, &cpu_path);
+ get_container_sysfs_subdirectory(ctx, cnt->id, V1_SYSFS_SYSTEMD, &systemd_path);
+
+ cnt->memory_usage = get_data_from_sysfs(ctx, mem_path, V1_SYSFS_FILE_MEMORY, NULL);
+ cnt->memory_max_usage = get_data_from_sysfs(ctx, mem_path, V1_SYSFS_FILE_MAX_MEMORY, NULL);
+ cnt->rss = get_data_from_sysfs(ctx, mem_path, V1_SYSFS_FILE_MEMORY_STAT, STAT_KEY_RSS);
+ cnt->memory_limit = get_data_from_sysfs(ctx, mem_path, V1_SYSFS_FILE_MEMORY_LIMIT, NULL);
+ cnt->cpu_user = get_data_from_sysfs(ctx, cpu_path, V1_SYSFS_FILE_CPU_USER, NULL);
+ cnt->cpu = get_data_from_sysfs(ctx, cpu_path, V1_SYSFS_FILE_CPU, NULL);
+ pid = get_data_from_sysfs(ctx, systemd_path, V1_SYSFS_FILE_PIDS, NULL);
+ if (pid && pid != UINT64_MAX) {
+ get_net_data_from_proc(ctx, cnt, pid);
+ }
+ else {
+ flb_plg_warn(ctx->ins, "Failed to collect PID for %s", cnt->name);
+ }
+ }
+ return 0;
+}
+
+/*
+ * Iterate over previously created container list. For each entry, generate its
+ * path in sysfs system directory. From this path, grab data about container metrics
+ * and put it this entry.
+ * This function is used in cgroups v2 - meaning same directory for all files.
+ */
+int fill_counters_with_sysfs_data_v2(struct flb_in_metrics *ctx)
+{
+ uint64_t pid;
+ flb_sds_t path;
+ struct container *cnt;
+ struct mk_list *head;
+ struct mk_list *tmp;
+
+ mk_list_foreach_safe(head, tmp, &ctx->items) {
+ cnt = mk_list_entry(head, struct container, _head);
+
+ get_container_sysfs_subdirectory(ctx, cnt->id, NULL, &path);
+
+ cnt->memory_usage = get_data_from_sysfs(ctx, path, V2_SYSFS_FILE_MEMORY, NULL);
+ cnt->memory_max_usage = get_data_from_sysfs(ctx, path, V2_SYSFS_FILE_MAX_MEMORY, NULL);
+ cnt->rss = get_data_from_sysfs(ctx, path, V2_SYSFS_FILE_MEMORY_STAT, STAT_KEY_RSS);
+ cnt->memory_limit = get_data_from_sysfs(ctx, path, V2_SYSFS_FILE_MEMORY_LIMIT, NULL);
+ cnt->cpu_user = get_data_from_sysfs(ctx, path, V2_SYSFS_FILE_CPU_STAT, STAT_KEY_CPU_USER);
+ cnt->cpu = get_data_from_sysfs(ctx, path, V2_SYSFS_FILE_CPU_STAT, STAT_KEY_CPU);
+ pid = get_data_from_sysfs(ctx, path, V2_SYSFS_FILE_PIDS, NULL);
+ if (!pid || pid == UINT64_MAX) {
+ pid = get_data_from_sysfs(ctx, path, V2_SYSFS_FILE_PIDS_ALT, NULL);
+ }
+ if (pid && pid != UINT64_MAX) {
+ get_net_data_from_proc(ctx, cnt, pid);
+ }
+ else {
+ flb_plg_warn(ctx->ins, "Failed to collect PID for %s", cnt->name);
+ }
+ }
+ return 0;
+}
+
+/*
+ * Check if flb_sds_t starts with given string
+ */
+int name_starts_with(flb_sds_t s, const char *str)
+{
+ size_t len = strlen(str);
+ size_t flen = flb_sds_len(s);
+
+ if (s == NULL || len > flen) {
+ return -1;
+ }
+
+ return strncmp(s, str, len);
+}
+
+/*
+ * Calculate which cgroup version is used on host by checing existence of
+ * cgroup.controllers file (if it exists, it is V2).
+ */
+int get_cgroup_version(struct flb_in_metrics *ctx)
+{
+ char path[SYSFS_FILE_PATH_SIZE];
+ snprintf(path, sizeof(path), "%s/%s", ctx->sysfs_path, CGROUP_V2_PATH);
+ return (access(path, F_OK) == 0) ? CGROUP_V2 : CGROUP_V1;
+}
diff --git a/src/fluent-bit/plugins/in_podman_metrics/podman_metrics_data.h b/src/fluent-bit/plugins/in_podman_metrics/podman_metrics_data.h
new file mode 100644
index 000000000..93fa6de00
--- /dev/null
+++ b/src/fluent-bit/plugins/in_podman_metrics/podman_metrics_data.h
@@ -0,0 +1,51 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_PODMAN_METRICS_DATA_H
+#define FLB_IN_PODMAN_METRICS_DATA_H
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_input.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_jsmn.h>
+
+#include <dirent.h>
+#include <monkey/mk_core/mk_list.h>
+
+#include "podman_metrics_config.h"
+
+int destroy_counter(struct flb_in_metrics *ctx, struct cmt_counter **c);
+int destroy_gauge(struct flb_in_metrics *ctx, struct cmt_gauge **g);
+
+uint64_t read_from_file(struct flb_in_metrics *ctx, flb_sds_t path);
+uint64_t read_key_value_from_file(struct flb_in_metrics *ctx, flb_sds_t path, flb_sds_t key);
+uint64_t get_data_from_sysfs(struct flb_in_metrics *ctx, flb_sds_t dir, flb_sds_t name, flb_sds_t key);
+
+int get_container_sysfs_subdirectory(struct flb_in_metrics *ctx, flb_sds_t id, flb_sds_t subsystem, flb_sds_t *path);
+int get_net_data_from_proc(struct flb_in_metrics *ctx, struct container *cnt, uint64_t pid);
+
+int collect_sysfs_directories(struct flb_in_metrics *ctx, flb_sds_t name);
+int fill_counters_with_sysfs_data_v1(struct flb_in_metrics *ctx);
+int fill_counters_with_sysfs_data_v2(struct flb_in_metrics *ctx);
+
+int name_starts_with(flb_sds_t s, const char *str);
+int get_cgroup_version(struct flb_in_metrics *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/in_proc/CMakeLists.txt b/src/fluent-bit/plugins/in_proc/CMakeLists.txt
new file mode 100644
index 000000000..92f1071fc
--- /dev/null
+++ b/src/fluent-bit/plugins/in_proc/CMakeLists.txt
@@ -0,0 +1,4 @@
+set(src
+ in_proc.c)
+
+FLB_PLUGIN(in_proc "${src}" "")
diff --git a/src/fluent-bit/plugins/in_proc/in_proc.c b/src/fluent-bit/plugins/in_proc/in_proc.c
new file mode 100644
index 000000000..ff1df585c
--- /dev/null
+++ b/src/fluent-bit/plugins/in_proc/in_proc.c
@@ -0,0 +1,534 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_pack.h>
+#include <msgpack.h>
+
+#include <stdio.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <signal.h>
+#include <glob.h>
+#include <libgen.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <limits.h>
+#include <dirent.h>
+
+#include "in_proc.h"
+
+struct flb_in_proc_mem_offset mem_linux[] = {
+ {
+ "Peak",
+ "mem.VmPeak",
+ offsetof(struct flb_in_proc_mem_linux, vmpeak)
+ },
+ {
+ "Size",
+ "mem.VmSize",
+ offsetof(struct flb_in_proc_mem_linux, vmsize)
+ },
+ {
+ "Lck",
+ "mem.VmLck",
+ offsetof(struct flb_in_proc_mem_linux, vmlck)
+ },
+ {
+ "HWM",
+ "mem.VmHWM",
+ offsetof(struct flb_in_proc_mem_linux, vmhwm)
+ },
+ {
+ "RSS",
+ "mem.VmRSS",
+ offsetof(struct flb_in_proc_mem_linux, vmrss)
+ },
+ {
+ "Data",
+ "mem.VmData",
+ offsetof(struct flb_in_proc_mem_linux, vmdata)
+ },
+ {
+ "Stk",
+ "mem.VmStk",
+ offsetof(struct flb_in_proc_mem_linux, vmstk)
+ },
+ {
+ "Exe",
+ "mem.VmExe",
+ offsetof(struct flb_in_proc_mem_linux, vmexe)
+ },
+ {
+ "Lib",
+ "mem.VmLib",
+ offsetof(struct flb_in_proc_mem_linux, vmlib)
+ },
+ {
+ "PTE",
+ "mem.VmPTE",
+ offsetof(struct flb_in_proc_mem_linux, vmpte)
+ },
+ {
+ "Swap",
+ "mem.VmSwap",
+ offsetof(struct flb_in_proc_mem_linux, vmswap)
+ },
+ {NULL, NULL, 0}/* end of array */
+};
+
+
+
+static pid_t get_pid_from_procname_linux(struct flb_in_proc_config *ctx,
+ const char* proc)
+{
+ pid_t ret = -1;
+ glob_t glb;
+ int i;
+ int fd = -1;
+ long ret_scan = -1;
+ int ret_glb = -1;
+ ssize_t count;
+
+ char cmdname[FLB_CMD_LEN];
+ char* bname = NULL;
+
+ ret_glb = glob("/proc/*/cmdline", 0 ,NULL, &glb);
+ if (ret_glb != 0) {
+ switch(ret_glb){
+ case GLOB_NOSPACE:
+ flb_plg_warn(ctx->ins, "glob: no space");
+ break;
+ case GLOB_NOMATCH:
+ flb_plg_warn(ctx->ins, "glob: no match");
+ break;
+ case GLOB_ABORTED:
+ flb_plg_warn(ctx->ins, "glob: aborted");
+ break;
+ default:
+ flb_plg_warn(ctx->ins, "glob: other error");
+ }
+ return ret;
+ }
+
+ for (i = 0; i < glb.gl_pathc; i++) {
+ fd = open(glb.gl_pathv[i], O_RDONLY);
+ if (fd < 0) {
+ continue;
+ }
+ count = read(fd, &cmdname, FLB_CMD_LEN);
+ if (count <= 0){
+ close(fd);
+ continue;
+ }
+ cmdname[FLB_CMD_LEN-1] = '\0';
+ bname = basename(cmdname);
+
+ if (strncmp(proc, bname, FLB_CMD_LEN) == 0) {
+ sscanf(glb.gl_pathv[i],"/proc/%ld/cmdline",&ret_scan);
+ ret = (pid_t)ret_scan;
+ close(fd);
+ break;
+ }
+ close(fd);
+ }
+ globfree(&glb);
+ return ret;
+}
+
+static int configure(struct flb_in_proc_config *ctx,
+ struct flb_input_instance *in)
+{
+ int ret;
+
+ /* Load the config map */
+ ret = flb_input_config_map_set(in, (void *)ctx);
+ if (ret == -1) {
+ flb_plg_error(in, "unable to load configuration");
+ return -1;
+ }
+
+ if (ctx->interval_sec <= 0 && ctx->interval_nsec <= 0) {
+ /* Illegal settings. Override them. */
+ ctx->interval_sec = atoi(DEFAULT_INTERVAL_SEC);
+ ctx->interval_nsec = atoi(DEFAULT_INTERVAL_NSEC);
+ }
+
+ if (ctx->proc_name != NULL && strcmp(ctx->proc_name, "") != 0) {
+ ctx->len_proc_name = strlen(ctx->proc_name);
+ }
+
+ return 0;
+}
+
+static int get_pid_status(pid_t pid)
+{
+ int ret = kill(pid, 0);
+ return ((ret != ESRCH) && (ret != EPERM) && (ret != ESRCH));
+}
+
+static int generate_record_linux(struct flb_input_instance *i_ins,
+ struct flb_config *config, void *in_context,
+ struct flb_in_proc_mem_linux *mem_stat,
+ uint64_t fds)
+{
+ int i;
+ int ret;
+ struct flb_in_proc_config *ctx = in_context;
+
+ if (ctx->alive == FLB_TRUE && ctx->alert == FLB_TRUE) {
+ return 0;
+ }
+
+ ret = flb_log_event_encoder_begin_record(ctx->log_encoder);
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_current_timestamp(ctx->log_encoder);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_append_body_values(
+ ctx->log_encoder,
+ FLB_LOG_EVENT_CSTRING_VALUE("alive"),
+ FLB_LOG_EVENT_BOOLEAN_VALUE(ctx->alive),
+ /* proc name */
+ FLB_LOG_EVENT_CSTRING_VALUE("proc_name"),
+ FLB_LOG_EVENT_CSTRING_VALUE(ctx->proc_name),
+ /* pid */
+ FLB_LOG_EVENT_CSTRING_VALUE("pid"),
+ FLB_LOG_EVENT_INT64_VALUE(ctx->pid));
+ }
+
+ /* memory */
+ if (ctx->mem == FLB_TRUE) {
+ char *str = NULL;
+ uint64_t *val = NULL;
+ for (i = 0;
+ mem_linux[i].key != NULL &&
+ ret == FLB_EVENT_ENCODER_SUCCESS;
+ i++) {
+ str = mem_linux[i].msgpack_key;
+ val = (uint64_t*)((char*)mem_stat + mem_linux[i].offset);
+
+ ret = flb_log_event_encoder_append_body_values(
+ ctx->log_encoder,
+ FLB_LOG_EVENT_CSTRING_VALUE(str),
+ FLB_LOG_EVENT_UINT64_VALUE(*val));
+ }
+ }
+
+ /* file descriptor */
+ if (ctx->fds) {
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_append_body_values(
+ ctx->log_encoder,
+ FLB_LOG_EVENT_CSTRING_VALUE("fd"),
+ FLB_LOG_EVENT_UINT64_VALUE(fds));
+ }
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_commit_record(ctx->log_encoder);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ flb_input_log_append(i_ins, NULL, 0,
+ ctx->log_encoder->output_buffer,
+ ctx->log_encoder->output_length);
+ ret = 0;
+ }
+ else {
+ flb_plg_error(i_ins, "log event encoding error : %d", ret);
+
+ ret = -1;
+ }
+
+ flb_log_event_encoder_reset(ctx->log_encoder);
+
+ return ret;
+}
+
+static void update_alive(struct flb_in_proc_config *ctx)
+{
+ if (ctx->pid >= 0 && get_pid_status(ctx->pid)) {
+ ctx->alive = FLB_TRUE;
+ }
+ else {
+ ctx->alive = FLB_FALSE;
+ }
+}
+
+static void mem_linux_clear(struct flb_in_proc_mem_linux *mem_stat)
+{
+ int i;
+ uint64_t *temp = NULL;
+ for (i=0;mem_linux[i].key != NULL;i++) {
+ temp = (uint64_t*)((char*)mem_stat + mem_linux[i].offset);
+ *temp = 0;
+ }
+}
+
+static int update_mem_linux(struct flb_in_proc_config *ctx,
+ struct flb_in_proc_mem_linux *mem_stat)
+{
+ int ret = -1;
+ int i;
+ char path[PATH_MAX] = {0};
+ char str_name[32] = {0};
+ char *line = NULL;
+ char *fmt = NULL;
+ char *buf = NULL;
+ ssize_t count;
+ size_t len = 256;
+ uint64_t mem_size;
+ uint64_t *temp = NULL;
+ FILE *fp = NULL;
+
+ snprintf(path, sizeof(path), "/proc/%d/status",ctx->pid);
+ fp = fopen(path, "r");
+
+ if (fp == NULL) {
+ flb_plg_error(ctx->ins, "open error: %s", path);
+ mem_linux_clear(mem_stat);
+ return -1;
+ }
+
+ line = (char*)flb_malloc(len);
+ while(1) {
+ count = getline(&line, &len, fp);
+ if (count < 0) {
+ break;
+ }
+
+ /* VmPeak: 14860 kB */
+ fmt = "Vm%s"; /* e.g. "Peak:" */
+ memset(str_name, '\0', sizeof(str_name));
+ ret = sscanf(line, fmt, str_name);
+ if (ret < 1) {
+ continue;
+ }
+ /* replace : -> NULL char*/
+ if ((buf = strchr(str_name, ':')) != NULL) {
+ *buf = '\0';
+ }
+
+ /* calcurate size */
+ mem_size = 0;
+ for (i=0;line[i] != '\0';i++) {
+ if (line[i] >= 0x30 && line[i] <= 0x39 /* is number*/) {
+ mem_size *= 10;
+ mem_size += line[i] - 0x30;
+ }
+ }
+
+ for (i=0;mem_linux[i].key != NULL;i++) {
+ if (!strcmp(str_name, mem_linux[i].key)) {
+ temp = (uint64_t*)((char*)mem_stat + mem_linux[i].offset);
+ *temp = mem_size * 1000; /* kB size */
+ break;
+ }
+ }
+ }
+ flb_free(line);
+ fclose(fp);
+ return ret;
+}
+
+static int update_fds_linux(struct flb_in_proc_config *ctx,
+ uint64_t *fds)
+{
+ DIR *dirp = NULL;
+ struct dirent *entry = NULL;
+ char path[PATH_MAX] = {0};
+
+ *fds = 0;
+
+ snprintf(path, sizeof(path), "/proc/%d/fd", ctx->pid);
+ dirp = opendir(path);
+ if (dirp == NULL) {
+ perror("opendir");
+ flb_plg_error(ctx->ins, "opendir error %s", path);
+ return -1;
+ }
+
+ entry = readdir(dirp);
+ while (entry != NULL) {
+ *fds += 1;/* should we check entry->d_name ? */
+ entry = readdir(dirp);
+ }
+ *fds -= 2; /* '.' and '..' */
+ closedir(dirp);
+
+ return 0;
+}
+
+static int in_proc_collect_linux(struct flb_input_instance *i_ins,
+ struct flb_config *config, void *in_context)
+{
+ uint64_t fds = 0;
+ struct flb_in_proc_config *ctx = in_context;
+ struct flb_in_proc_mem_linux mem;
+
+ if (ctx->proc_name != NULL){
+ ctx->pid = get_pid_from_procname_linux(ctx, ctx->proc_name);
+ update_alive(ctx);
+
+ if (ctx->mem == FLB_TRUE && ctx->alive == FLB_TRUE) {
+ mem_linux_clear(&mem);
+ update_mem_linux(ctx, &mem);
+ }
+ if (ctx->fds == FLB_TRUE && ctx->alive == FLB_TRUE) {
+ update_fds_linux(ctx, &fds);
+ }
+ generate_record_linux(i_ins, config, in_context, &mem, fds);
+ }
+
+ return 0;
+}
+
+static int in_proc_collect(struct flb_input_instance *i_ins,
+ struct flb_config *config, void *in_context)
+{
+ return in_proc_collect_linux(i_ins, config, in_context);
+}
+
+static int in_proc_init(struct flb_input_instance *in,
+ struct flb_config *config, void *data)
+{
+ int ret;
+ struct flb_in_proc_config *ctx = NULL;
+ (void) data;
+
+ /* Allocate space for the configuration */
+ ctx = flb_calloc(1, sizeof(struct flb_in_proc_config));
+ if (!ctx) {
+ flb_errno();
+ return -1;
+ }
+ ctx->alert = FLB_FALSE;
+ ctx->mem = FLB_TRUE;
+ ctx->fds = FLB_TRUE;
+ ctx->proc_name = NULL;
+ ctx->pid = -1;
+ ctx->ins = in;
+
+ ctx->log_encoder = flb_log_event_encoder_create(FLB_LOG_EVENT_FORMAT_DEFAULT);
+
+ if (ctx->log_encoder == NULL) {
+ flb_plg_error(in, "event encoder initialization error");
+ flb_free(ctx);
+
+ return -1;
+ }
+
+ configure(ctx, in);
+
+ if (ctx->proc_name == NULL) {
+ flb_plg_error(ctx->ins, "'proc_name' is not set");
+ flb_free(ctx);
+ return -1;
+ }
+
+ /* Set the context */
+ flb_input_set_context(in, ctx);
+
+ /* Set our collector based on time */
+ ret = flb_input_set_collector_time(in,
+ in_proc_collect,
+ ctx->interval_sec,
+ ctx->interval_nsec,
+ config);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "could not set collector for Proc input plugin");
+ flb_free(ctx);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int in_proc_exit(void *data, struct flb_config *config)
+{
+ (void) *config;
+ struct flb_in_proc_config *ctx = data;
+
+ if (!ctx) {
+ return 0;
+ }
+
+ if (ctx->log_encoder != NULL) {
+ flb_log_event_encoder_destroy(ctx->log_encoder);
+ }
+
+ /* Destroy context */
+ flb_free(ctx);
+
+ return 0;
+}
+
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_INT, "interval_sec", DEFAULT_INTERVAL_SEC,
+ 0, FLB_TRUE, offsetof(struct flb_in_proc_config, interval_sec),
+ "Set the collector interval"
+ },
+ {
+ FLB_CONFIG_MAP_INT, "interval_nsec", DEFAULT_INTERVAL_NSEC,
+ 0, FLB_TRUE, offsetof(struct flb_in_proc_config, interval_nsec),
+ "Set the collector interval (nanoseconds)"
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "alert", "false",
+ 0, FLB_TRUE, offsetof(struct flb_in_proc_config, alert),
+ "Only generate alerts if process is down"
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "mem", "true",
+ 0, FLB_TRUE, offsetof(struct flb_in_proc_config, mem),
+ "Append memory usage to record"
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "fd", "true",
+ 0, FLB_TRUE, offsetof(struct flb_in_proc_config, fds),
+ "Append fd count to record"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "proc_name", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_in_proc_config, proc_name),
+ "Define process name to health check"
+ },
+ /* EOF */
+ {0}
+};
+
+/* Plugin reference */
+struct flb_input_plugin in_proc_plugin = {
+ .name = "proc",
+ .description = "Check Process health",
+ .cb_init = in_proc_init,
+ .cb_pre_run = NULL,
+ .cb_collect = in_proc_collect,
+ .cb_flush_buf = NULL,
+ .cb_exit = in_proc_exit,
+ .config_map = config_map,
+ .flags = 0,
+};
diff --git a/src/fluent-bit/plugins/in_proc/in_proc.h b/src/fluent-bit/plugins/in_proc/in_proc.h
new file mode 100644
index 000000000..8aa155660
--- /dev/null
+++ b/src/fluent-bit/plugins/in_proc/in_proc.h
@@ -0,0 +1,78 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_PROC_H
+#define FLB_IN_PROC_H
+
+#include <stdint.h>
+#include <unistd.h>
+#include <fluent-bit/flb_input.h>
+#include <fluent-bit/flb_log_event_encoder.h>
+#include <msgpack.h>
+
+#define DEFAULT_INTERVAL_SEC "1"
+#define DEFAULT_INTERVAL_NSEC "0"
+
+#define FLB_CMD_LEN 256
+#define FLB_IN_PROC_NAME "in_proc"
+
+struct flb_in_proc_mem_linux {
+ uint64_t vmpeak;
+ uint64_t vmsize;
+ uint64_t vmlck;
+ uint64_t vmhwm;
+ uint64_t vmrss;
+ uint64_t vmdata;
+ uint64_t vmstk;
+ uint64_t vmexe;
+ uint64_t vmlib;
+ uint64_t vmpte;
+ uint64_t vmswap;
+};
+
+struct flb_in_proc_mem_offset {
+ char *key;
+ char *msgpack_key;
+ size_t offset;
+};
+
+struct flb_in_proc_config {
+ uint8_t alert;
+ uint8_t alive;
+
+ /* Checking process */
+ flb_sds_t proc_name;
+ pid_t pid;
+ size_t len_proc_name;
+
+ /* Time interval check */
+ int interval_sec;
+ int interval_nsec;
+
+ /* Memory */
+ uint8_t mem;
+
+ /* File descriptor */
+ uint8_t fds;
+
+ struct flb_input_instance *ins;
+ struct flb_log_event_encoder *log_encoder;
+};
+
+#endif /*FLB_IN_PROC_H*/
diff --git a/src/fluent-bit/plugins/in_prometheus_scrape/CMakeLists.txt b/src/fluent-bit/plugins/in_prometheus_scrape/CMakeLists.txt
new file mode 100644
index 000000000..73ae0fbd4
--- /dev/null
+++ b/src/fluent-bit/plugins/in_prometheus_scrape/CMakeLists.txt
@@ -0,0 +1,4 @@
+set(src
+ prom_scrape.c)
+
+FLB_PLUGIN(in_prometheus_scrape "${src}" "")
diff --git a/src/fluent-bit/plugins/in_prometheus_scrape/prom_scrape.c b/src/fluent-bit/plugins/in_prometheus_scrape/prom_scrape.c
new file mode 100644
index 000000000..68d4540bc
--- /dev/null
+++ b/src/fluent-bit/plugins/in_prometheus_scrape/prom_scrape.c
@@ -0,0 +1,261 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_http_client.h>
+#include <fluent-bit/flb_upstream.h>
+
+#include <cmetrics/cmt_decode_prometheus.h>
+
+#include "prom_scrape.h"
+
+static struct prom_scrape *prom_scrape_create(struct flb_input_instance *ins,
+ struct flb_config *config)
+{
+ int ret;
+ int upstream_flags;
+ struct prom_scrape *ctx;
+ struct flb_upstream *upstream;
+
+ if (ins->host.name == NULL) {
+ ins->host.name = flb_sds_create("localhost");
+ }
+ if (ins->host.port == 0) {
+ ins->host.port = 9100;
+ }
+
+ ctx = flb_calloc(1, sizeof(struct prom_scrape));
+ if (!ctx) {
+ flb_errno();
+ return NULL;
+ }
+ ctx->ins = ins;
+
+ /* Load the config map */
+ ret = flb_input_config_map_set(ins, (void *) ctx);
+ if (ret == -1) {
+ flb_free(ctx);
+ return NULL;
+ }
+
+ upstream_flags = FLB_IO_TCP;
+
+ if (ins->use_tls) {
+ upstream_flags |= FLB_IO_TLS;
+ }
+
+ upstream = flb_upstream_create(config, ins->host.name, ins->host.port,
+ upstream_flags, ins->tls);
+
+ if (!upstream) {
+ flb_plg_error(ins, "upstream initialization error");
+ return NULL;
+ }
+ ctx->upstream = upstream;
+
+ return ctx;
+}
+
+static int collect_metrics(struct prom_scrape *ctx)
+{
+ int ret = -1;
+ char errbuf[1024];
+ size_t b_sent;
+ struct flb_http_client *c;
+ struct flb_connection *u_conn;
+ struct cmt *cmt = NULL;
+ struct cmt_decode_prometheus_parse_opts opts = {0};
+
+ /* get upstream connection */
+ u_conn = flb_upstream_conn_get(ctx->upstream);
+ if (!u_conn) {
+ flb_plg_error(ctx->ins, "could not get an upstream connection to %s:%u",
+ ctx->ins->host.name, ctx->ins->host.port);
+ return -1;
+ }
+
+ c = flb_http_client(u_conn, FLB_HTTP_GET, ctx->metrics_path,
+ NULL, 0,
+ ctx->ins->host.name, ctx->ins->host.port, NULL, 0);
+ if (!c) {
+ flb_plg_error(ctx->ins, "unable to create http client");
+ goto client_error;
+ }
+
+ flb_http_buffer_size(c, ctx->buffer_max_size);
+
+ /* Auth headers */
+ if (ctx->http_user && ctx->http_passwd) { /* Basic */
+ flb_http_basic_auth(c, ctx->http_user, ctx->http_passwd);
+ } else if (ctx->bearer_token) { /* Bearer token */
+ flb_http_bearer_auth(c, ctx->bearer_token);
+ }
+
+ ret = flb_http_do(c, &b_sent);
+ if (ret != 0) {
+ flb_plg_error(ctx->ins, "http do error");
+ goto http_error;
+ }
+
+ if (c->resp.status != 200) {
+ flb_plg_error(ctx->ins, "http status code error: [%s] %d",
+ ctx->metrics_path, c->resp.status);
+ goto http_error;
+ }
+
+ if (c->resp.payload_size <= 0) {
+ flb_plg_error(ctx->ins, "empty response");
+ goto http_error;
+ }
+
+ /* configure prometheus decoder options */
+ opts.default_timestamp = cfl_time_now();
+ opts.errbuf = errbuf;
+ opts.errbuf_size = sizeof(errbuf);
+
+ /* convert Prometheus Text to CMetrics */
+ ret = cmt_decode_prometheus_create(&cmt,
+ c->resp.payload,
+ c->resp.payload_size,
+ &opts);
+ if (ret == 0) {
+ /* Append the updated metrics */
+ ret = flb_input_metrics_append(ctx->ins, NULL, 0, cmt);
+ if (ret != 0) {
+ flb_plg_error(ctx->ins, "could not append metrics");
+ }
+ cmt_destroy(cmt);
+ }
+ else {
+ flb_plg_error(ctx->ins, "error decoding Prometheus Text format");
+ }
+
+http_error:
+ flb_http_client_destroy(c);
+client_error:
+ flb_upstream_conn_release(u_conn);
+
+ return ret;
+}
+
+static int cb_prom_scrape_collect(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ int rc;
+ struct prom_scrape *ctx = (struct prom_scrape *) in_context;
+
+ rc = collect_metrics(ctx);
+ FLB_INPUT_RETURN(rc);
+}
+
+static int cb_prom_scrape_init(struct flb_input_instance *ins,
+ struct flb_config *config, void *data)
+{
+ struct prom_scrape *ctx;
+
+ /* Allocate space for the configuration */
+ ctx = prom_scrape_create(ins, config);
+ if (!ctx) {
+ return -1;
+ }
+
+ flb_input_set_context(ins, ctx);
+ ctx->coll_id = flb_input_set_collector_time(ins,
+ cb_prom_scrape_collect,
+ ctx->scrape_interval,
+ 0, config);
+ return 0;
+}
+
+static int prom_scrape_destroy(struct prom_scrape *ctx)
+{
+ if (ctx->upstream) {
+ flb_upstream_destroy(ctx->upstream);
+ }
+ flb_free(ctx);
+
+ return 0;
+}
+
+static int cb_prom_scrape_exit(void *data, struct flb_config *config)
+{
+ struct prom_scrape *ctx = (struct prom_scrape *) data;
+
+ if (!ctx) {
+ return 0;
+ }
+
+ prom_scrape_destroy(ctx);
+ return 0;
+}
+
+/* Configuration properties map */
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_TIME, "scrape_interval", "10s",
+ 0, FLB_TRUE, offsetof(struct prom_scrape, scrape_interval),
+ "Scraping interval."
+ },
+
+ {
+ FLB_CONFIG_MAP_SIZE, "buffer_max_size", HTTP_BUFFER_MAX_SIZE,
+ 0, FLB_TRUE, offsetof(struct prom_scrape, buffer_max_size),
+ ""
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "metrics_path", DEFAULT_URI,
+ 0, FLB_TRUE, offsetof(struct prom_scrape, metrics_path),
+ "Set the metrics URI endpoint, it must start with a forward slash."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "http_user", NULL,
+ 0, FLB_TRUE, offsetof(struct prom_scrape, http_user),
+ "Set HTTP auth user"
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "http_passwd", "",
+ 0, FLB_TRUE, offsetof(struct prom_scrape, http_passwd),
+ "Set HTTP auth password"
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "bearer_token", NULL,
+ 0, FLB_TRUE, offsetof(struct prom_scrape, bearer_token),
+ "Set bearer token auth"
+ },
+
+ /* EOF */
+ {0}
+};
+
+/* Plugin reference */
+struct flb_input_plugin in_prometheus_scrape_plugin = {
+ .name = "prometheus_scrape",
+ .description = "Scrape metrics from Prometheus Endpoint",
+ .cb_init = cb_prom_scrape_init,
+ .cb_pre_run = NULL,
+ .cb_collect = cb_prom_scrape_collect,
+ .cb_flush_buf = NULL,
+ .cb_exit = cb_prom_scrape_exit,
+ .config_map = config_map,
+ .flags = FLB_INPUT_NET | FLB_INPUT_CORO,
+};
diff --git a/src/fluent-bit/plugins/in_prometheus_scrape/prom_scrape.h b/src/fluent-bit/plugins/in_prometheus_scrape/prom_scrape.h
new file mode 100644
index 000000000..9510abfef
--- /dev/null
+++ b/src/fluent-bit/plugins/in_prometheus_scrape/prom_scrape.h
@@ -0,0 +1,45 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_PROMETHEUS_SCRAPE_H
+#define FLB_IN_PROMETHEUS_SCRAPE_H
+
+#include <fluent-bit/flb_input_plugin.h>
+
+#define DEFAULT_URI "/metrics"
+#define HTTP_BUFFER_MAX_SIZE "10M"
+
+struct prom_scrape
+{
+ int coll_id; /* collector id */
+ uint64_t scrape_interval;
+ flb_sds_t metrics_path;
+ struct flb_upstream *upstream;
+ struct flb_input_instance *ins; /* input plugin instance */
+ size_t buffer_max_size; /* Maximum buffer size */
+
+ /* HTTP Auth */
+ flb_sds_t http_user;
+ flb_sds_t http_passwd;
+
+ /* Bearer Token Auth */
+ flb_sds_t bearer_token;
+};
+
+#endif
diff --git a/src/fluent-bit/plugins/in_random/CMakeLists.txt b/src/fluent-bit/plugins/in_random/CMakeLists.txt
new file mode 100644
index 000000000..2cb59e83d
--- /dev/null
+++ b/src/fluent-bit/plugins/in_random/CMakeLists.txt
@@ -0,0 +1,4 @@
+set(src
+ random.c)
+
+FLB_PLUGIN(in_random "${src}" "")
diff --git a/src/fluent-bit/plugins/in_random/random.c b/src/fluent-bit/plugins/in_random/random.c
new file mode 100644
index 000000000..ab6e59ffd
--- /dev/null
+++ b/src/fluent-bit/plugins/in_random/random.c
@@ -0,0 +1,245 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_error.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_random.h>
+#include <fluent-bit/flb_log_event_encoder.h>
+#include <msgpack.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+#define DEFAULT_INTERVAL_SEC "1"
+#define DEFAULT_INTERVAL_NSEC "0"
+
+struct flb_in_random_config {
+ /* Config properties */
+ int interval_sec;
+ int interval_nsec;
+ int samples;
+
+ /* Internal */
+ int samples_count;
+ int coll_fd;
+
+ struct flb_input_instance *ins;
+ struct flb_log_event_encoder *log_encoder;
+};
+
+/* cb_collect callback */
+static int in_random_collect(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ int ret;
+ uint64_t val;
+ struct flb_in_random_config *ctx = in_context;
+
+ if (ctx->samples == 0) {
+ return -1;
+ }
+
+ if (ctx->samples > 0 && (ctx->samples_count >= ctx->samples)) {
+ return -1;
+ }
+
+ if (flb_random_bytes((unsigned char *) &val, sizeof(uint64_t))) {
+ val = time(NULL);
+ }
+
+ ret = flb_log_event_encoder_begin_record(ctx->log_encoder);
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_current_timestamp(ctx->log_encoder);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_append_body_values(
+ ctx->log_encoder,
+ FLB_LOG_EVENT_CSTRING_VALUE("rand_value"),
+ FLB_LOG_EVENT_UINT64_VALUE(val));
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_commit_record(ctx->log_encoder);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ flb_input_log_append(ctx->ins, NULL, 0,
+ ctx->log_encoder->output_buffer,
+ ctx->log_encoder->output_length);
+ ret = 0;
+ }
+ else {
+ flb_plg_error(ctx->ins, "log event encoding error : %d", ret);
+
+ ret = -1;
+ }
+
+ flb_log_event_encoder_reset(ctx->log_encoder);
+
+ ctx->samples_count++;
+
+ return 0;
+}
+
+/* Set plugin configuration */
+static int in_random_config_read(struct flb_in_random_config *ctx,
+ struct flb_input_instance *in)
+{
+ int ret;
+ /* Load the config map */
+ ret = flb_input_config_map_set(in, (void *)ctx);
+ if (ret == -1) {
+ return -1;
+ }
+
+ /* interval settings */
+ if (ctx->interval_sec <= 0 && ctx->interval_nsec <= 0) {
+ /* Illegal settings. Override them. */
+ ctx->interval_sec = atoi(DEFAULT_INTERVAL_SEC);
+ ctx->interval_nsec = atoi(DEFAULT_INTERVAL_NSEC);
+ }
+
+
+ flb_plg_debug(ctx->ins, "interval_sec=%d interval_nsec=%d",
+ ctx->interval_sec, ctx->interval_nsec);
+
+ return 0;
+}
+
+/* Initialize plugin */
+static int in_random_init(struct flb_input_instance *in,
+ struct flb_config *config, void *data)
+{
+ int ret = -1;
+ struct flb_in_random_config *ctx = NULL;
+
+ /* Allocate space for the configuration */
+ ctx = flb_calloc(1, sizeof(struct flb_in_random_config));
+ if (!ctx) {
+ return -1;
+ }
+ ctx->samples_count = 0;
+ ctx->ins = in;
+
+ ctx->log_encoder = flb_log_event_encoder_create(FLB_LOG_EVENT_FORMAT_DEFAULT);
+
+ if (ctx->log_encoder == NULL) {
+ flb_plg_error(in, "could not initialize event encoder");
+ flb_free(ctx);
+
+ return -1;
+ }
+
+ /* Initialize head config */
+ ret = in_random_config_read(ctx, in);
+ if (ret < 0) {
+ flb_free(ctx);
+ return -1;
+ }
+
+ flb_input_set_context(in, ctx);
+ ret = flb_input_set_collector_time(in,
+ in_random_collect,
+ ctx->interval_sec,
+ ctx->interval_nsec, config);
+ if (ret < 0) {
+ flb_plg_error(ctx->ins, "could not set collector for head input plugin");
+ flb_free(ctx);
+ return -1;
+ }
+ ctx->coll_fd = ret;
+ return 0;
+}
+
+static void in_random_pause(void *data, struct flb_config *config)
+{
+ struct flb_in_random_config *ctx = data;
+
+ flb_input_collector_pause(ctx->coll_fd, ctx->ins);
+
+}
+
+static void in_random_resume(void *data, struct flb_config *config)
+{
+ struct flb_in_random_config *ctx = data;
+
+ flb_input_collector_resume(ctx->coll_fd, ctx->ins);
+}
+
+static int in_random_exit(void *data, struct flb_config *config)
+{
+ struct flb_in_random_config *ctx = data;
+ (void) *config;
+
+ if (!ctx) {
+ return 0;
+ }
+
+ if (ctx->log_encoder != NULL) {
+ flb_log_event_encoder_destroy(ctx->log_encoder);
+ }
+
+ flb_free(ctx);
+ return 0;
+}
+
+
+static struct flb_config_map config_map[] = {
+ // samples
+ // interval_sec
+ // interval_nsec
+ {
+ FLB_CONFIG_MAP_INT, "samples", "-1",
+ 0, FLB_TRUE, offsetof(struct flb_in_random_config, samples),
+ "Number of samples to send, -1 for infinite"
+ },
+ {
+ FLB_CONFIG_MAP_INT, "interval_sec", DEFAULT_INTERVAL_SEC,
+ 0, FLB_TRUE, offsetof(struct flb_in_random_config, interval_sec),
+ "Set the collector interval"
+ },
+ {
+ FLB_CONFIG_MAP_INT, "interval_nsec", DEFAULT_INTERVAL_NSEC,
+ 0, FLB_TRUE, offsetof(struct flb_in_random_config, interval_nsec),
+ "Set the collector interval (sub seconds)"
+ },
+ /* EOF */
+ {0}
+
+};
+
+struct flb_input_plugin in_random_plugin = {
+ .name = "random",
+ .description = "Random",
+ .cb_init = in_random_init,
+ .cb_pre_run = NULL,
+ .cb_collect = in_random_collect,
+ .cb_flush_buf = NULL,
+ .cb_pause = in_random_pause,
+ .cb_resume = in_random_resume,
+ .cb_exit = in_random_exit,
+ .config_map = config_map
+};
diff --git a/src/fluent-bit/plugins/in_serial/CMakeLists.txt b/src/fluent-bit/plugins/in_serial/CMakeLists.txt
new file mode 100644
index 000000000..cf15742c2
--- /dev/null
+++ b/src/fluent-bit/plugins/in_serial/CMakeLists.txt
@@ -0,0 +1,4 @@
+set(src
+ in_serial.c in_serial_config.c)
+
+FLB_PLUGIN(in_serial "${src}" "")
diff --git a/src/fluent-bit/plugins/in_serial/in_serial.c b/src/fluent-bit/plugins/in_serial/in_serial.c
new file mode 100644
index 000000000..3675f1e92
--- /dev/null
+++ b/src/fluent-bit/plugins/in_serial/in_serial.c
@@ -0,0 +1,443 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Serial input plugin for Fluent Bit
+ * ==================================
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ * Copyright (C) 2015-2016 Takeshi HASEGAWA
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_input.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_engine.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_error.h>
+#include <msgpack.h>
+
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <limits.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <ctype.h>
+#include <sys/stat.h>
+#include <inttypes.h>
+#include <termios.h>
+
+#include "in_serial.h"
+#include "in_serial_config.h"
+
+static inline int process_line(const char *line, int len,
+ struct flb_in_serial_config *ctx)
+{
+ int ret;
+
+ ret = flb_log_event_encoder_begin_record(ctx->log_encoder);
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_current_timestamp(ctx->log_encoder);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_append_body_values(
+ ctx->log_encoder,
+ FLB_LOG_EVENT_CSTRING_VALUE("msg"),
+ FLB_LOG_EVENT_STRING_VALUE(line, len));
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_commit_record(ctx->log_encoder);
+ }
+
+ flb_debug("[in_serial] message '%s'", line);
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = 0;
+ }
+ else {
+ ret = -1;
+ }
+
+ return ret;
+}
+
+static inline int process_pack(struct flb_in_serial_config *ctx,
+ char *pack, size_t size)
+{
+ int ret;
+ size_t off = 0;
+ msgpack_unpacked result;
+ msgpack_object entry;
+
+ ret = FLB_EVENT_ENCODER_SUCCESS;
+
+ /* First pack the results, iterate concatenated messages */
+ msgpack_unpacked_init(&result);
+ while (msgpack_unpack_next(&result, pack, size, &off) == MSGPACK_UNPACK_SUCCESS) {
+ entry = result.data;
+
+ ret = flb_log_event_encoder_begin_record(ctx->log_encoder);
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_current_timestamp(ctx->log_encoder);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_append_body_values(
+ ctx->log_encoder,
+ FLB_LOG_EVENT_CSTRING_VALUE("msg"),
+ FLB_LOG_EVENT_MSGPACK_OBJECT_VALUE(&entry));
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_commit_record(ctx->log_encoder);
+ }
+ }
+
+ msgpack_unpacked_destroy(&result);
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = 0;
+ }
+ else {
+ ret = -1;
+ }
+
+ return ret;
+}
+
+static inline void consume_bytes(char *buf, int bytes, int length)
+{
+ memmove(buf, buf + bytes, length - bytes);
+}
+
+/* Callback triggered when some serial msgs are available */
+static int cb_serial_collect(struct flb_input_instance *in,
+ struct flb_config *config, void *in_context)
+{
+ int ret;
+ int bytes = 0;
+ int available;
+ int len;
+ int hits;
+ char *sep;
+ char *buf;
+ struct flb_in_serial_config *ctx = in_context;
+
+ flb_log_event_encoder_reset(ctx->log_encoder);
+
+ ret = 0;
+
+ while (1) {
+ available = (sizeof(ctx->buf_data) -1) - ctx->buf_len;
+ if (available > 1) {
+ bytes = read(ctx->fd, ctx->buf_data + ctx->buf_len, available);
+
+ if (bytes == -1) {
+ if (errno == EPIPE || errno == EINTR) {
+ ret = -1;
+ }
+ else {
+ ret = 0;
+ }
+
+ break;
+ }
+ else if (bytes == 0) {
+ ret = 0;
+
+ break;
+ }
+ }
+ ctx->buf_len += bytes;
+
+ /* Always set a delimiter to avoid buffer trash */
+ ctx->buf_data[ctx->buf_len] = '\0';
+
+ /* Check if our buffer is full */
+ if (ctx->buffer_id + 1 == SERIAL_BUFFER_SIZE) {
+ ret = flb_engine_flush(config, &in_serial_plugin);
+ if (ret == -1) {
+ ctx->buffer_id = 0;
+ }
+ }
+
+ sep = NULL;
+ buf = ctx->buf_data;
+ len = ctx->buf_len;
+ hits = 0;
+
+ /* Handle FTDI handshake */
+ if (ctx->buf_data[0] == '\0') {
+ consume_bytes(ctx->buf_data, 1, ctx->buf_len);
+ ctx->buf_len--;
+ }
+
+ /* Strip CR or LF if found at first byte */
+ if (ctx->buf_data[0] == '\r' || ctx->buf_data[0] == '\n') {
+ /* Skip message with one byte with CR or LF */
+ flb_trace("[in_serial] skip one byte message with ASCII code=%i",
+ ctx->buf_data[0]);
+ consume_bytes(ctx->buf_data, 1, ctx->buf_len);
+ ctx->buf_len--;
+ }
+
+ /* Handle the case when a Separator is set */
+ if (ctx->separator) {
+ while ((sep = strstr(ctx->buf_data, ctx->separator))) {
+ len = (sep - ctx->buf_data);
+ if (len > 0) {
+ /* process the line based in the separator position */
+ process_line(buf, len, ctx);
+ consume_bytes(ctx->buf_data, len + ctx->sep_len, ctx->buf_len);
+ ctx->buf_len -= (len + ctx->sep_len);
+ hits++;
+ }
+ else {
+ consume_bytes(ctx->buf_data, ctx->sep_len, ctx->buf_len);
+ ctx->buf_len -= ctx->sep_len;
+ }
+ ctx->buf_data[ctx->buf_len] = '\0';
+ }
+
+ if (hits == 0 && available <= 1) {
+ flb_debug("[in_serial] no separator found, no more space");
+ ctx->buf_len = 0;
+ ret = 0;
+
+ break;
+ }
+ }
+ else if (ctx->format == FLB_SERIAL_FORMAT_JSON) {
+ /* JSON Format handler */
+ char *pack;
+ int out_size;
+
+ ret = flb_pack_json_state(ctx->buf_data, ctx->buf_len,
+ &pack, &out_size, &ctx->pack_state);
+ if (ret == FLB_ERR_JSON_PART) {
+ flb_debug("[in_serial] JSON incomplete, waiting for more data...");
+
+ ret = 0;
+
+ break;
+ }
+ else if (ret == FLB_ERR_JSON_INVAL) {
+ flb_debug("[in_serial] invalid JSON message, skipping");
+ flb_pack_state_reset(&ctx->pack_state);
+ flb_pack_state_init(&ctx->pack_state);
+ ctx->pack_state.multiple = FLB_TRUE;
+
+ ret = -1;
+
+ break;
+ }
+
+ /*
+ * Given the Tokens used for the packaged message, append
+ * the records and then adjust buffer.
+ */
+ process_pack(ctx, pack, out_size);
+ flb_free(pack);
+
+ consume_bytes(ctx->buf_data, ctx->pack_state.last_byte, ctx->buf_len);
+ ctx->buf_len -= ctx->pack_state.last_byte;
+ ctx->buf_data[ctx->buf_len] = '\0';
+
+ flb_pack_state_reset(&ctx->pack_state);
+ flb_pack_state_init(&ctx->pack_state);
+ ctx->pack_state.multiple = FLB_TRUE;
+ }
+ else {
+ /* Process and enqueue the received line */
+ process_line(ctx->buf_data, ctx->buf_len, ctx);
+ ctx->buf_len = 0;
+ }
+ }
+
+ if (ctx->log_encoder->output_length > 0) {
+ flb_input_log_append(in, NULL, 0,
+ ctx->log_encoder->output_buffer,
+ ctx->log_encoder->output_length);
+ }
+
+ flb_log_event_encoder_reset(ctx->log_encoder);
+
+ return ret;
+}
+
+/* Cleanup serial input */
+static int cb_serial_exit(void *in_context, struct flb_config *config)
+{
+ struct flb_in_serial_config *ctx = in_context;
+
+ flb_trace("[in_serial] Restoring original termios...");
+ tcsetattr(ctx->fd, TCSANOW, &ctx->tio_orig);
+
+ if (ctx->log_encoder != NULL) {
+ flb_log_event_encoder_destroy(ctx->log_encoder);
+ }
+
+ flb_pack_state_reset(&ctx->pack_state);
+ flb_free(ctx);
+
+ return 0;
+}
+
+/* Init serial input */
+static int cb_serial_init(struct flb_input_instance *in,
+ struct flb_config *config, void *data)
+{
+ int fd;
+ int ret;
+ int br;
+ struct flb_in_serial_config *ctx;
+ (void) data;
+
+ ctx = flb_calloc(1, sizeof(struct flb_in_serial_config));
+ if (!ctx) {
+ flb_errno();
+ return -1;
+ }
+ ctx->format = FLB_SERIAL_FORMAT_NONE;
+
+ ctx->log_encoder = flb_log_event_encoder_create(FLB_LOG_EVENT_FORMAT_DEFAULT);
+
+ if (ctx->log_encoder == NULL) {
+ flb_plg_error(in, "could not initialize event encoder");
+ flb_free(ctx);
+
+ return -1;
+ }
+
+ if (!serial_config_read(ctx, in)) {
+ flb_log_event_encoder_destroy(ctx->log_encoder);
+ flb_free(ctx);
+ return -1;
+ }
+
+ /* Initialize JSON pack state */
+ if (ctx->format == FLB_SERIAL_FORMAT_JSON) {
+ flb_pack_state_init(&ctx->pack_state);
+ ctx->pack_state.multiple = FLB_TRUE;
+ }
+
+ /* Input instance */
+ ctx->i_ins = in;
+
+ /* set context */
+ flb_input_set_context(in, ctx);
+
+ /* open device */
+ fd = open(ctx->file, O_RDWR | O_NOCTTY | O_NONBLOCK);
+ if (fd == -1) {
+ perror("open");
+ flb_error("[in_serial] Could not open serial port device");
+ flb_log_event_encoder_destroy(ctx->log_encoder);
+ flb_free(ctx);
+ return -1;
+ }
+ ctx->fd = fd;
+
+ /* Store original settings */
+ tcgetattr(fd, &ctx->tio_orig);
+
+ /* Reset for new... */
+ memset(&ctx->tio, 0, sizeof(ctx->tio));
+ tcgetattr(fd, &ctx->tio);
+
+ br = atoi(ctx->bitrate);
+ cfsetospeed(&ctx->tio, (speed_t) flb_serial_speed(br));
+ cfsetispeed(&ctx->tio, (speed_t) flb_serial_speed(br));
+
+ /* Settings */
+ ctx->tio.c_cflag &= ~PARENB; /* 8N1 */
+ ctx->tio.c_cflag &= ~CSTOPB;
+ ctx->tio.c_cflag &= ~CSIZE;
+ ctx->tio.c_cflag |= CS8;
+ ctx->tio.c_cflag &= ~CRTSCTS; /* No flow control */
+ ctx->tio.c_cc[VMIN] = ctx->min_bytes; /* Min number of bytes to read */
+ ctx->tio.c_cflag |= CREAD | CLOCAL; /* Enable READ & ign ctrl lines */
+
+ tcflush(fd, TCIFLUSH);
+ tcsetattr(fd, TCSANOW, &ctx->tio);
+
+#if __linux__
+ /* Set our collector based on a file descriptor event */
+ ret = flb_input_set_collector_event(in,
+ cb_serial_collect,
+ ctx->fd,
+ config);
+#else
+ /* Set our collector based on a timer event */
+ ret = flb_input_set_collector_time(in,
+ cb_serial_collect,
+ IN_SERIAL_COLLECT_SEC,
+ IN_SERIAL_COLLECT_NSEC,
+ config);
+#endif
+
+ if (ret == -1) {
+ flb_log_event_encoder_destroy(ctx->log_encoder);
+ return -1;
+ }
+
+ return 0;
+}
+
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_STR, "file", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_in_serial_config, file),
+ "Set the serial character device file name"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "bitrate", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_in_serial_config, bitrate),
+ "Set the serial bitrate (baudrate)"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "separator", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_in_serial_config, separator),
+ "Set the record separator"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "format", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_in_serial_config, format_str),
+ "Set the serial format: json or none"
+ },
+ {
+ FLB_CONFIG_MAP_INT, "min_bytes", "0",
+ 0, FLB_TRUE, offsetof(struct flb_in_serial_config, min_bytes),
+ "Set the serial minimum bytes"
+ },
+ /* EOF */
+ {0}
+};
+
+/* Plugin reference */
+struct flb_input_plugin in_serial_plugin = {
+ .name = "serial",
+ .description = "Serial input",
+ .cb_init = cb_serial_init,
+ .cb_pre_run = NULL,
+ .cb_collect = cb_serial_collect,
+ .cb_flush_buf = NULL,
+ .cb_exit = cb_serial_exit,
+ .config_map = config_map,
+};
diff --git a/src/fluent-bit/plugins/in_serial/in_serial.h b/src/fluent-bit/plugins/in_serial/in_serial.h
new file mode 100644
index 000000000..2bed91c00
--- /dev/null
+++ b/src/fluent-bit/plugins/in_serial/in_serial.h
@@ -0,0 +1,63 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Serial input plugin for Fluent Bit
+ * ==================================
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ * Copyright (C) 2015-2016 Takeshi HASEGAWA
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_SERIAL
+#define FLB_IN_SERIAL
+
+#include <stdint.h>
+
+#define SERIAL_BUFFER_SIZE 256
+#define IN_SERIAL_COLLECT_SEC 1
+#define IN_SERIAL_COLLECT_NSEC 0
+
+static inline speed_t flb_serial_speed(int br)
+{
+ switch (br) {
+ case 0: return B0;
+ case 50: return B50;
+ case 75: return B75;
+ case 110: return B110;
+ case 134: return B134;
+ case 150: return B150;
+ case 200: return B200;
+ case 300: return B300;
+ case 600: return B600;
+ case 1200: return B1200;
+ case 1800: return B1800;
+ case 2400: return B2400;
+ case 4800: return B4800;
+ case 9600: return B9600;
+ case 19200: return B19200;
+ case 38400: return B38400;
+ case 57600: return B57600;
+ case 115200: return B115200;
+ case 230400: return B230400;
+ default: return B9600;
+ };
+
+ return 0;
+}
+
+int in_serial_start();
+
+
+extern struct flb_input_plugin in_serial_plugin;
+
+#endif
diff --git a/src/fluent-bit/plugins/in_serial/in_serial_config.c b/src/fluent-bit/plugins/in_serial/in_serial_config.c
new file mode 100644
index 000000000..152caee3d
--- /dev/null
+++ b/src/fluent-bit/plugins/in_serial/in_serial_config.c
@@ -0,0 +1,82 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Serial input plugin for Fluent Bit
+ * ==================================
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ * Copyright (C) 2015-2016 Takeshi HASEGAWA
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdlib.h>
+#include <fluent-bit/flb_input.h>
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_error.h>
+
+#include "in_serial_config.h"
+
+struct flb_in_serial_config *serial_config_read(struct flb_in_serial_config *config,
+ struct flb_input_instance *i_ins)
+{
+ int ret;
+
+ /* Load the config map */
+ ret = flb_input_config_map_set(i_ins, (void *)config);
+ if (ret == -1) {
+ flb_plg_error(i_ins, "unable to load configuration");
+ return NULL;
+ }
+
+ if (!config->file) {
+ flb_error("[serial] error reading filename from "
+ "configuration");
+ return NULL;
+ }
+
+ if (!config->bitrate) {
+ flb_error("[serial] error reading bitrate from "
+ "configuration");
+ return NULL;
+ }
+
+ if (config->min_bytes <= 0) {
+ config->min_bytes = 1;
+ }
+
+ config->fd = -1;
+ config->buf_len = 0;
+
+ if (config->format_str && config->separator) {
+ flb_error("[in_serial] specify 'format' or 'separator', not both");
+ return NULL;
+ }
+
+ if (config->separator) {
+ config->sep_len = strlen(config->separator);
+ }
+ else {
+ config->sep_len = 0;
+ }
+
+ if (config->format_str) {
+ if (strcasecmp(config->format_str, "json") == 0) {
+ config->format = FLB_SERIAL_FORMAT_JSON;
+ }
+ }
+
+ flb_debug("[in_serial] file='%s' bitrate='%s' min_bytes=%i format=%i",
+ config->file, config->bitrate, config->min_bytes, config->format);
+
+ return config;
+}
diff --git a/src/fluent-bit/plugins/in_serial/in_serial_config.h b/src/fluent-bit/plugins/in_serial/in_serial_config.h
new file mode 100644
index 000000000..e625635e7
--- /dev/null
+++ b/src/fluent-bit/plugins/in_serial/in_serial_config.h
@@ -0,0 +1,77 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Serial input plugin for Fluent Bit
+ * ==================================
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ * Copyright (C) 2015-2016 Takeshi HASEGAWA
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_SERIAL_CONFIG_H
+#define FLB_IN_SERIAL_CONFIG_H
+
+#define FLB_SERIAL_FORMAT_NONE 0
+#define FLB_SERIAL_FORMAT_JSON 1
+
+#include <termios.h>
+#include <msgpack.h>
+
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_log_event_encoder.h>
+
+struct flb_in_serial_config {
+ int fd; /* Socket to destination/backend */
+
+ /* Buffer */
+ int buf_len;
+ char buf_data[8192];
+
+ /* config */
+ int min_bytes;
+ flb_sds_t file;
+ flb_sds_t bitrate;
+
+ /* separator */
+ int sep_len;
+ flb_sds_t separator;
+
+ /* Incoming format: JSON only for now */
+ int format;
+ flb_sds_t format_str;
+
+ struct termios tio;
+ struct termios tio_orig;
+
+ /* Tag: used to extend original tag */
+ int tag_len; /* The real string length */
+ char tag[32]; /* Custom Tag for this input */
+
+ /* Line processing */
+ int buffer_id;
+
+ /* Input instance reference */
+ struct flb_input_instance *i_ins;
+ struct flb_log_event_encoder *log_encoder;
+
+ /*
+ * If (format == FLB_SERIAL_FORMAT_JSON), we use this pack_state
+ * to perform validation of the incomming JSON message.
+ */
+ struct flb_pack_state pack_state;
+};
+
+struct flb_in_serial_config *serial_config_read(struct flb_in_serial_config *config,
+ struct flb_input_instance *i_ins);
+
+#endif
diff --git a/src/fluent-bit/plugins/in_splunk/CMakeLists.txt b/src/fluent-bit/plugins/in_splunk/CMakeLists.txt
new file mode 100644
index 000000000..42ecf2e31
--- /dev/null
+++ b/src/fluent-bit/plugins/in_splunk/CMakeLists.txt
@@ -0,0 +1,12 @@
+if(NOT FLB_METRICS)
+ message(FATAL_ERROR "Splunk input plugin requires FLB_HTTP_SERVER=On.")
+endif()
+
+set(src
+ splunk.c
+ splunk_conn.c
+ splunk_prot.c
+ splunk_config.c
+ )
+
+FLB_PLUGIN(in_splunk "${src}" "monkey-core-static")
diff --git a/src/fluent-bit/plugins/in_splunk/splunk.c b/src/fluent-bit/plugins/in_splunk/splunk.c
new file mode 100644
index 000000000..78589037c
--- /dev/null
+++ b/src/fluent-bit/plugins/in_splunk/splunk.c
@@ -0,0 +1,213 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_network.h>
+#include <fluent-bit/flb_config.h>
+
+#include "splunk.h"
+#include "splunk_conn.h"
+#include "splunk_config.h"
+
+/*
+ * For a server event, the collection event means a new client have arrived, we
+ * accept the connection and create a new TCP instance which will wait for
+ * JSON map messages.
+ */
+static int in_splunk_collect(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ struct flb_connection *connection;
+ struct splunk_conn *conn;
+ struct flb_splunk *ctx;
+
+ ctx = in_context;
+
+ connection = flb_downstream_conn_get(ctx->downstream);
+
+ if (connection == NULL) {
+ flb_plg_error(ctx->ins, "could not accept new connection");
+
+ return -1;
+ }
+
+ flb_plg_trace(ctx->ins, "new TCP connection arrived FD=%i",
+ connection->fd);
+
+ conn = splunk_conn_add(connection, ctx);
+
+ if (conn == NULL) {
+ flb_downstream_conn_release(connection);
+
+ return -1;
+ }
+
+ return 0;
+}
+
+static int in_splunk_init(struct flb_input_instance *ins,
+ struct flb_config *config, void *data)
+{
+ unsigned short int port;
+ int ret;
+ struct flb_splunk *ctx;
+
+ (void) data;
+
+ /* Create context and basic conf */
+ ctx = splunk_config_create(ins);
+ if (!ctx) {
+ return -1;
+ }
+
+ ctx->collector_id = -1;
+
+ /* Populate context with config map defaults and incoming properties */
+ ret = flb_input_config_map_set(ins, (void *) ctx);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "configuration error");
+ splunk_config_destroy(ctx);
+ return -1;
+ }
+
+ /* Set the context */
+ flb_input_set_context(ins, ctx);
+
+ port = (unsigned short int) strtoul(ctx->tcp_port, NULL, 10);
+
+ ctx->downstream = flb_downstream_create(FLB_TRANSPORT_TCP,
+ ins->flags,
+ ctx->listen,
+ port,
+ ins->tls,
+ config,
+ &ins->net_setup);
+
+ if (ctx->downstream == NULL) {
+ flb_plg_error(ctx->ins,
+ "could not initialize downstream on %s:%s. Aborting",
+ ctx->listen, ctx->tcp_port);
+
+ splunk_config_destroy(ctx);
+
+ return -1;
+ }
+
+ flb_input_downstream_set(ctx->downstream, ctx->ins);
+
+ /* Collect upon data available on the standard input */
+ ret = flb_input_set_collector_socket(ins,
+ in_splunk_collect,
+ ctx->downstream->server_fd,
+ config);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "Could not set collector for IN_TCP input plugin");
+ splunk_config_destroy(ctx);
+
+ return -1;
+ }
+
+ ctx->collector_id = ret;
+
+ return 0;
+}
+
+static int in_splunk_exit(void *data, struct flb_config *config)
+{
+ struct flb_splunk *ctx;
+
+ (void) config;
+
+ ctx = data;
+
+ if (ctx != NULL) {
+ splunk_config_destroy(ctx);
+ }
+
+ return 0;
+}
+
+
+static void in_splunk_pause(void *data, struct flb_config *config)
+{
+ struct flb_splunk *ctx = data;
+
+ flb_input_collector_pause(ctx->collector_id, ctx->ins);
+
+}
+
+static void in_splunk_resume(void *data, struct flb_config *config)
+{
+ struct flb_splunk *ctx = data;
+
+ flb_input_collector_resume(ctx->collector_id, ctx->ins);
+}
+
+/* Configuration properties map */
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_SIZE, "buffer_max_size", HTTP_BUFFER_MAX_SIZE,
+ 0, FLB_TRUE, offsetof(struct flb_splunk, buffer_max_size),
+ ""
+ },
+
+ {
+ FLB_CONFIG_MAP_SIZE, "buffer_chunk_size", HTTP_BUFFER_CHUNK_SIZE,
+ 0, FLB_TRUE, offsetof(struct flb_splunk, buffer_chunk_size),
+ ""
+ },
+
+ {
+ FLB_CONFIG_MAP_SLIST_1, "success_header", NULL,
+ FLB_CONFIG_MAP_MULT, FLB_TRUE, offsetof(struct flb_splunk, success_headers),
+ "Add an HTTP header key/value pair on success. Multiple headers can be set"
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "splunk_token", NULL,
+ 0, FLB_FALSE, 0,
+ "Set valid Splunk HEC tokens for the requests"
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "tag_key", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_splunk, tag_key),
+ ""
+ },
+
+
+ /* EOF */
+ {0}
+};
+
+/* Plugin reference */
+struct flb_input_plugin in_splunk_plugin = {
+ .name = "splunk",
+ .description = "Input plugin for Splunk HEC payloads",
+ .cb_init = in_splunk_init,
+ .cb_pre_run = NULL,
+ .cb_collect = in_splunk_collect,
+ .cb_flush_buf = NULL,
+ .cb_pause = in_splunk_pause,
+ .cb_resume = in_splunk_resume,
+ .cb_exit = in_splunk_exit,
+ .config_map = config_map,
+ .flags = FLB_INPUT_NET_SERVER | FLB_IO_OPT_TLS
+};
diff --git a/src/fluent-bit/plugins/in_splunk/splunk.h b/src/fluent-bit/plugins/in_splunk/splunk.h
new file mode 100644
index 000000000..bf935ea22
--- /dev/null
+++ b/src/fluent-bit/plugins/in_splunk/splunk.h
@@ -0,0 +1,60 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_SPLUNK_H
+#define FLB_IN_SPLUNK_H
+
+#include <fluent-bit/flb_downstream.h>
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_input.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_log_event_encoder.h>
+
+#include <monkey/monkey.h>
+
+#define HTTP_BUFFER_MAX_SIZE "4M"
+#define HTTP_BUFFER_CHUNK_SIZE "512K"
+
+struct flb_splunk {
+ flb_sds_t listen;
+ flb_sds_t tcp_port;
+ const char *tag_key;
+
+ int collector_id;
+
+ /* Success HTTP headers */
+ struct mk_list *success_headers;
+ flb_sds_t success_headers_str;
+
+ size_t buffer_max_size; /* Maximum buffer size */
+ size_t buffer_chunk_size; /* Chunk allocation size */
+
+ /* Token Auth */
+ flb_sds_t auth_header;
+
+ struct flb_log_event_encoder log_encoder;
+ struct flb_downstream *downstream; /* Client manager */
+ struct mk_list connections; /* linked list of connections */
+
+ struct mk_server *server;
+ struct flb_input_instance *ins;
+};
+
+
+#endif
diff --git a/src/fluent-bit/plugins/in_splunk/splunk_config.c b/src/fluent-bit/plugins/in_splunk/splunk_config.c
new file mode 100644
index 000000000..f61070153
--- /dev/null
+++ b/src/fluent-bit/plugins/in_splunk/splunk_config.c
@@ -0,0 +1,184 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_input_plugin.h>
+
+#include "splunk.h"
+#include "splunk_config.h"
+#include "splunk_conn.h"
+#include "splunk_config.h"
+
+struct flb_splunk *splunk_config_create(struct flb_input_instance *ins)
+{
+ struct mk_list *header_iterator;
+ struct flb_slist_entry *header_value;
+ struct flb_slist_entry *header_name;
+ struct flb_config_map_val *header_pair;
+ char port[8];
+ int ret;
+ struct flb_splunk *ctx;
+ const char *tmp;
+
+ ctx = flb_calloc(1, sizeof(struct flb_splunk));
+ if (!ctx) {
+ flb_errno();
+ return NULL;
+ }
+ ctx->ins = ins;
+ mk_list_init(&ctx->connections);
+
+ /* Load the config map */
+ ret = flb_input_config_map_set(ins, (void *) ctx);
+ if (ret == -1) {
+ flb_free(ctx);
+ return NULL;
+ }
+
+ ctx->auth_header = NULL;
+ tmp = flb_input_get_property("splunk_token", ins);
+ if (tmp) {
+ ctx->auth_header = flb_sds_create("Splunk ");
+ if (ctx->auth_header == NULL) {
+ flb_plg_error(ctx->ins, "error on prefix of auth_header generation");
+ splunk_config_destroy(ctx);
+ return NULL;
+ }
+ ret = flb_sds_cat_safe(&ctx->auth_header, tmp, strlen(tmp));
+ if (ret < 0) {
+ flb_plg_error(ctx->ins, "error on token generation");
+ splunk_config_destroy(ctx);
+ return NULL;
+ }
+ }
+
+ /* Listen interface (if not set, defaults to 0.0.0.0:8088) */
+ flb_input_net_default_listener("0.0.0.0", 8088, ins);
+
+ ctx->listen = flb_strdup(ins->host.listen);
+ snprintf(port, sizeof(port) - 1, "%d", ins->host.port);
+ ctx->tcp_port = flb_strdup(port);
+
+ /* HTTP Server specifics */
+ ctx->server = flb_calloc(1, sizeof(struct mk_server));
+ if (ctx->server == NULL) {
+ flb_plg_error(ctx->ins, "error on mk_server allocation");
+ splunk_config_destroy(ctx);
+ return NULL;
+ }
+ ctx->server->keep_alive = MK_TRUE;
+
+ /* monkey detects server->workers == 0 as the server not being initialized at the
+ * moment so we want to make sure that it stays that way!
+ */
+
+ ret = flb_log_event_encoder_init(&ctx->log_encoder,
+ FLB_LOG_EVENT_FORMAT_DEFAULT);
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ flb_plg_error(ctx->ins, "error initializing event encoder : %d", ret);
+
+ splunk_config_destroy(ctx);
+
+ return NULL;
+ }
+
+ ctx->success_headers_str = flb_sds_create_size(1);
+
+ if (ctx->success_headers_str == NULL) {
+ splunk_config_destroy(ctx);
+
+ return NULL;
+ }
+
+ flb_config_map_foreach(header_iterator, header_pair, ctx->success_headers) {
+ header_name = mk_list_entry_first(header_pair->val.list,
+ struct flb_slist_entry,
+ _head);
+
+ header_value = mk_list_entry_last(header_pair->val.list,
+ struct flb_slist_entry,
+ _head);
+
+ ret = flb_sds_cat_safe(&ctx->success_headers_str,
+ header_name->str,
+ flb_sds_len(header_name->str));
+
+ if (ret == 0) {
+ ret = flb_sds_cat_safe(&ctx->success_headers_str,
+ ": ",
+ 2);
+ }
+
+ if (ret == 0) {
+ ret = flb_sds_cat_safe(&ctx->success_headers_str,
+ header_value->str,
+ flb_sds_len(header_value->str));
+ }
+
+ if (ret == 0) {
+ ret = flb_sds_cat_safe(&ctx->success_headers_str,
+ "\r\n",
+ 2);
+ }
+
+ if (ret != 0) {
+ splunk_config_destroy(ctx);
+
+ return NULL;
+ }
+ }
+
+ return ctx;
+}
+
+int splunk_config_destroy(struct flb_splunk *ctx)
+{
+ /* release all connections */
+ splunk_conn_release_all(ctx);
+
+ flb_log_event_encoder_destroy(&ctx->log_encoder);
+
+ if (ctx->collector_id != -1) {
+ flb_input_collector_delete(ctx->collector_id, ctx->ins);
+
+ ctx->collector_id = -1;
+ }
+
+ if (ctx->auth_header != NULL) {
+ flb_sds_destroy(ctx->auth_header);
+ }
+
+ if (ctx->downstream != NULL) {
+ flb_downstream_destroy(ctx->downstream);
+ }
+
+ if (ctx->server) {
+ flb_free(ctx->server);
+ }
+
+ if (ctx->success_headers_str != NULL) {
+ flb_sds_destroy(ctx->success_headers_str);
+ }
+
+
+ flb_free(ctx->listen);
+ flb_free(ctx->tcp_port);
+ flb_free(ctx);
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/in_splunk/splunk_config.h b/src/fluent-bit/plugins/in_splunk/splunk_config.h
new file mode 100644
index 000000000..24d2008f2
--- /dev/null
+++ b/src/fluent-bit/plugins/in_splunk/splunk_config.h
@@ -0,0 +1,29 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_SPLUNK_CONFIG_H
+#define FLB_IN_SPLUNK_CONFIG_H
+
+#include <fluent-bit/flb_input_plugin.h>
+#include "splunk.h"
+
+struct flb_splunk *splunk_config_create(struct flb_input_instance *ins);
+int splunk_config_destroy(struct flb_splunk *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/in_splunk/splunk_conn.c b/src/fluent-bit/plugins/in_splunk/splunk_conn.c
new file mode 100644
index 000000000..f605a16c7
--- /dev/null
+++ b/src/fluent-bit/plugins/in_splunk/splunk_conn.c
@@ -0,0 +1,306 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for thet specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_engine.h>
+
+#include "splunk.h"
+#include "splunk_conn.h"
+#include "splunk_prot.h"
+
+static void splunk_conn_request_init(struct mk_http_session *session,
+ struct mk_http_request *request);
+
+static int splunk_conn_event(void *data)
+{
+ int status;
+ size_t size;
+ ssize_t available;
+ ssize_t bytes;
+ char *tmp;
+ char *request_end;
+ size_t request_len;
+ struct flb_connection *connection;
+ struct splunk_conn *conn;
+ struct mk_event *event;
+ struct flb_splunk *ctx;
+
+ connection = (struct flb_connection *) data;
+
+ conn = connection->user_data;
+
+ ctx = conn->ctx;
+
+ event = &connection->event;
+
+ if (event->mask & MK_EVENT_READ) {
+ available = (conn->buf_size - conn->buf_len) - 1;
+ if (available < 1) {
+ if (conn->buf_size + ctx->buffer_chunk_size > ctx->buffer_max_size) {
+ flb_plg_trace(ctx->ins,
+ "fd=%i incoming data exceed limit (%zu KB)",
+ event->fd, (ctx->buffer_max_size / 1024));
+ splunk_conn_del(conn);
+ return -1;
+ }
+
+ size = conn->buf_size + ctx->buffer_chunk_size;
+ tmp = flb_realloc(conn->buf_data, size);
+ if (!tmp) {
+ flb_errno();
+ return -1;
+ }
+ flb_plg_trace(ctx->ins, "fd=%i buffer realloc %i -> %zu",
+ event->fd, conn->buf_size, size);
+
+ conn->buf_data = tmp;
+ conn->buf_size = size;
+ available = (conn->buf_size - conn->buf_len) - 1;
+ }
+
+ /* Read data */
+ bytes = flb_io_net_read(connection,
+ (void *) &conn->buf_data[conn->buf_len],
+ available);
+
+ if (bytes <= 0) {
+ flb_plg_trace(ctx->ins, "fd=%i closed connection", event->fd);
+ splunk_conn_del(conn);
+ return -1;
+ }
+
+ flb_plg_trace(ctx->ins, "read()=%zi pre_len=%i now_len=%zi",
+ bytes, conn->buf_len, conn->buf_len + bytes);
+ conn->buf_len += bytes;
+ conn->buf_data[conn->buf_len] = '\0';
+
+ status = mk_http_parser(&conn->request, &conn->session.parser,
+ conn->buf_data, conn->buf_len, conn->session.server);
+
+ if (status == MK_HTTP_PARSER_OK) {
+ /* Do more logic parsing and checks for this request */
+ splunk_prot_handle(ctx, conn, &conn->session, &conn->request);
+
+ /* Evict the processed request from the connection buffer and reinitialize
+ * the HTTP parser.
+ */
+
+ request_end = NULL;
+
+ if (NULL != conn->request.data.data) {
+ request_end = &conn->request.data.data[conn->request.data.len];
+ }
+ else {
+ request_end = strstr(conn->buf_data, "\r\n\r\n");
+
+ if(NULL != request_end) {
+ request_end = &request_end[4];
+ }
+ }
+
+ if (NULL != request_end) {
+ request_len = (size_t)(request_end - conn->buf_data);
+
+ if (0 < (conn->buf_len - request_len)) {
+ memmove(conn->buf_data, &conn->buf_data[request_len],
+ conn->buf_len - request_len);
+
+ conn->buf_data[conn->buf_len - request_len] = '\0';
+ conn->buf_len -= request_len;
+ }
+ else {
+ memset(conn->buf_data, 0, request_len);
+
+ conn->buf_len = 0;
+ }
+
+ /* Reinitialize the parser so the next request is properly
+ * handled, the additional memset intends to wipe any left over data
+ * from the headers parsed in the previous request.
+ */
+ memset(&conn->session.parser, 0, sizeof(struct mk_http_parser));
+ mk_http_parser_init(&conn->session.parser);
+ splunk_conn_request_init(&conn->session, &conn->request);
+ }
+ }
+ else if (status == MK_HTTP_PARSER_ERROR) {
+ splunk_prot_handle_error(ctx, conn, &conn->session, &conn->request);
+
+ /* Reinitialize the parser so the next request is properly
+ * handled, the additional memset intends to wipe any left over data
+ * from the headers parsed in the previous request.
+ */
+ memset(&conn->session.parser, 0, sizeof(struct mk_http_parser));
+ mk_http_parser_init(&conn->session.parser);
+ splunk_conn_request_init(&conn->session, &conn->request);
+ }
+
+ /* FIXME: add Protocol handler here */
+ return bytes;
+ }
+
+ if (event->mask & MK_EVENT_CLOSE) {
+ flb_plg_trace(ctx->ins, "fd=%i hangup", event->fd);
+ splunk_conn_del(conn);
+ return -1;
+ }
+
+ return 0;
+
+}
+
+static void splunk_conn_session_init(struct mk_http_session *session,
+ struct mk_server *server,
+ int client_fd)
+{
+ /* Alloc memory for node */
+ session->_sched_init = MK_TRUE;
+ session->pipelined = MK_FALSE;
+ session->counter_connections = 0;
+ session->close_now = MK_FALSE;
+ session->status = MK_REQUEST_STATUS_INCOMPLETE;
+ session->server = server;
+ session->socket = client_fd;
+
+ /* creation time in unix time */
+ session->init_time = time(NULL);
+
+ session->channel = mk_channel_new(MK_CHANNEL_SOCKET, session->socket);
+ session->channel->io = session->server->network;
+
+ /* Init session request list */
+ mk_list_init(&session->request_list);
+
+ /* Initialize the parser */
+ mk_http_parser_init(&session->parser);
+}
+
+static void splunk_conn_request_init(struct mk_http_session *session,
+ struct mk_http_request *request)
+{
+ memset(request, 0, sizeof(struct mk_http_request));
+
+ mk_http_request_init(session, request, session->server);
+
+ request->in_headers.type = MK_STREAM_IOV;
+ request->in_headers.dynamic = MK_FALSE;
+ request->in_headers.cb_consumed = NULL;
+ request->in_headers.cb_finished = NULL;
+ request->in_headers.stream = &request->stream;
+
+ mk_list_add(&request->in_headers._head, &request->stream.inputs);
+
+ request->session = session;
+}
+
+struct splunk_conn *splunk_conn_add(struct flb_connection *connection,
+ struct flb_splunk *ctx)
+{
+ struct splunk_conn *conn;
+ int ret;
+
+ conn = flb_calloc(1, sizeof(struct splunk_conn));
+ if (!conn) {
+ flb_errno();
+ return NULL;
+ }
+
+ conn->connection = connection;
+
+ /* Set data for the event-loop */
+ MK_EVENT_NEW(&connection->event);
+
+ connection->user_data = conn;
+ connection->event.type = FLB_ENGINE_EV_CUSTOM;
+ connection->event.handler = splunk_conn_event;
+
+ /* Connection info */
+ conn->ctx = ctx;
+ conn->buf_len = 0;
+
+ conn->buf_data = flb_malloc(ctx->buffer_chunk_size);
+ if (!conn->buf_data) {
+ flb_errno();
+
+ flb_plg_error(ctx->ins, "could not allocate new connection");
+ flb_free(conn);
+
+ return NULL;
+ }
+ conn->buf_size = ctx->buffer_chunk_size;
+
+ /* Register instance into the event loop */
+ ret = mk_event_add(flb_engine_evl_get(),
+ connection->fd,
+ FLB_ENGINE_EV_CUSTOM,
+ MK_EVENT_READ,
+ &connection->event);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "could not register new connection");
+
+ flb_free(conn->buf_data);
+ flb_free(conn);
+
+ return NULL;
+ }
+
+ /* Initialize HTTP Session: this is a custom context for Monkey HTTP */
+ splunk_conn_session_init(&conn->session, ctx->server, conn->connection->fd);
+
+ /* Initialize HTTP Request: this is the initial request and it will be reinitialized
+ * automatically after the request is handled so it can be used for the next one.
+ */
+ splunk_conn_request_init(&conn->session, &conn->request);
+
+ /* Link connection node to parent context list */
+ mk_list_add(&conn->_head, &ctx->connections);
+
+ return conn;
+}
+
+int splunk_conn_del(struct splunk_conn *conn)
+{
+ if (conn->session.channel != NULL) {
+ mk_channel_release(conn->session.channel);
+ }
+
+ /* The downstream unregisters the file descriptor from the event-loop
+ * so there's nothing to be done by the plugin
+ */
+ flb_downstream_conn_release(conn->connection);
+
+ mk_list_del(&conn->_head);
+
+ flb_free(conn->buf_data);
+ flb_free(conn);
+
+ return 0;
+}
+
+void splunk_conn_release_all(struct flb_splunk *ctx)
+{
+ struct mk_list *tmp;
+ struct mk_list *head;
+ struct splunk_conn *conn;
+
+ mk_list_foreach_safe(head, tmp, &ctx->connections) {
+ conn = mk_list_entry(head, struct splunk_conn, _head);
+ splunk_conn_del(conn);
+ }
+}
diff --git a/src/fluent-bit/plugins/in_splunk/splunk_conn.h b/src/fluent-bit/plugins/in_splunk/splunk_conn.h
new file mode 100644
index 000000000..f4c955fc0
--- /dev/null
+++ b/src/fluent-bit/plugins/in_splunk/splunk_conn.h
@@ -0,0 +1,54 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_SPLUNK_CONN
+#define FLB_IN_SPLUNK_CONN
+
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_connection.h>
+
+#include <monkey/mk_http.h>
+#include <monkey/mk_http_parser.h>
+#include <monkey/mk_utils.h>
+
+struct splunk_conn {
+ /* Buffer */
+ char *buf_data; /* Buffer data */
+ int buf_len; /* Data length */
+ int buf_size; /* Buffer size */
+
+ /*
+ * Parser context: we only held one parser per connection
+ * which is re-used everytime we have a new request.
+ */
+ struct mk_http_parser parser;
+ struct mk_http_request request;
+ struct mk_http_session session;
+ struct flb_connection *connection;
+
+ void *ctx; /* Plugin parent context */
+ struct mk_list _head; /* link to flb_http->connections */
+};
+
+struct splunk_conn *splunk_conn_add(struct flb_connection *connection, struct flb_splunk *ctx);
+int splunk_conn_del(struct splunk_conn *conn);
+void splunk_conn_release_all(struct flb_splunk *ctx);
+
+
+#endif
diff --git a/src/fluent-bit/plugins/in_splunk/splunk_prot.c b/src/fluent-bit/plugins/in_splunk/splunk_prot.c
new file mode 100644
index 000000000..5b0606083
--- /dev/null
+++ b/src/fluent-bit/plugins/in_splunk/splunk_prot.c
@@ -0,0 +1,779 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_version.h>
+#include <fluent-bit/flb_error.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_gzip.h>
+
+#include <monkey/monkey.h>
+#include <monkey/mk_core.h>
+
+#include "splunk.h"
+#include "splunk_conn.h"
+#include "splunk_prot.h"
+
+#define HTTP_CONTENT_JSON 0
+#define HTTP_CONTENT_TEXT 1
+#define HTTP_CONTENT_UNKNOWN 2
+
+static int send_response(struct splunk_conn *conn, int http_status, char *message)
+{
+ struct flb_splunk *context;
+ size_t sent;
+ int len;
+ flb_sds_t out;
+
+ context = (struct flb_splunk *) conn->ctx;
+
+ out = flb_sds_create_size(256);
+ if (!out) {
+ return -1;
+ }
+
+ if (message) {
+ len = strlen(message);
+ }
+ else {
+ len = 0;
+ }
+
+ if (http_status == 201) {
+ flb_sds_printf(&out,
+ "HTTP/1.1 201 Created \r\n"
+ "Server: Fluent Bit v%s\r\n"
+ "%s"
+ "Content-Length: 0\r\n\r\n",
+ FLB_VERSION_STR,
+ context->success_headers_str);
+ }
+ else if (http_status == 200) {
+ flb_sds_printf(&out,
+ "HTTP/1.1 200 OK\r\n"
+ "Server: Fluent Bit v%s\r\n"
+ "%s"
+ "Content-Length: 0\r\n\r\n",
+ FLB_VERSION_STR,
+ context->success_headers_str);
+ }
+ else if (http_status == 204) {
+ flb_sds_printf(&out,
+ "HTTP/1.1 204 No Content\r\n"
+ "Server: Fluent Bit v%s\r\n"
+ "%s"
+ "\r\n\r\n",
+ FLB_VERSION_STR,
+ context->success_headers_str);
+ }
+ else if (http_status == 400) {
+ flb_sds_printf(&out,
+ "HTTP/1.1 400 Forbidden\r\n"
+ "Server: Fluent Bit v%s\r\n"
+ "Content-Length: %i\r\n\r\n%s",
+ FLB_VERSION_STR,
+ len, message);
+ }
+ else if (http_status == 401) {
+ flb_sds_printf(&out,
+ "HTTP/1.1 401 Unauthorized\r\n"
+ "Server: Fluent Bit v%s\r\n"
+ "Content-Length: %i\r\n\r\n%s",
+ FLB_VERSION_STR,
+ len, message);
+ }
+ /* We should check this operations result */
+ flb_io_net_write(conn->connection,
+ (void *) out,
+ flb_sds_len(out),
+ &sent);
+
+ flb_sds_destroy(out);
+
+ return 0;
+}
+
+static int send_json_message_response(struct splunk_conn *conn, int http_status, char *message)
+{
+ size_t sent;
+ int len;
+ flb_sds_t out;
+
+ out = flb_sds_create_size(256);
+ if (!out) {
+ return -1;
+ }
+
+ if (message) {
+ len = strlen(message);
+ }
+ else {
+ len = 0;
+ }
+
+ if (http_status == 200) {
+ flb_sds_printf(&out,
+ "HTTP/1.1 200 OK\r\n"
+ "Content-Type: application/json\r\n"
+ "Content-Length: %i\r\n\r\n%s",
+ len, message);
+ }
+
+ /* We should check this operations result */
+ flb_io_net_write(conn->connection,
+ (void *) out,
+ flb_sds_len(out),
+ &sent);
+
+ flb_sds_destroy(out);
+
+ return 0;
+}
+
+/* implements functionality to get tag from key in record */
+static flb_sds_t tag_key(struct flb_splunk *ctx, msgpack_object *map)
+{
+ size_t map_size = map->via.map.size;
+ msgpack_object_kv *kv;
+ msgpack_object key;
+ msgpack_object val;
+ char *key_str = NULL;
+ char *val_str = NULL;
+ size_t key_str_size = 0;
+ size_t val_str_size = 0;
+ int j;
+ int check = FLB_FALSE;
+ int found = FLB_FALSE;
+ flb_sds_t tag;
+
+ kv = map->via.map.ptr;
+
+ for(j=0; j < map_size; j++) {
+ check = FLB_FALSE;
+ found = FLB_FALSE;
+ key = (kv+j)->key;
+ if (key.type == MSGPACK_OBJECT_BIN) {
+ key_str = (char *) key.via.bin.ptr;
+ key_str_size = key.via.bin.size;
+ check = FLB_TRUE;
+ }
+ if (key.type == MSGPACK_OBJECT_STR) {
+ key_str = (char *) key.via.str.ptr;
+ key_str_size = key.via.str.size;
+ check = FLB_TRUE;
+ }
+
+ if (check == FLB_TRUE) {
+ if (strncmp(ctx->tag_key, key_str, key_str_size) == 0) {
+ val = (kv+j)->val;
+ if (val.type == MSGPACK_OBJECT_BIN) {
+ val_str = (char *) val.via.bin.ptr;
+ val_str_size = val.via.str.size;
+ found = FLB_TRUE;
+ break;
+ }
+ if (val.type == MSGPACK_OBJECT_STR) {
+ val_str = (char *) val.via.str.ptr;
+ val_str_size = val.via.str.size;
+ found = FLB_TRUE;
+ break;
+ }
+ }
+ }
+ }
+
+ if (found == FLB_TRUE) {
+ tag = flb_sds_create_len(val_str, val_str_size);
+ if (!tag) {
+ flb_errno();
+ return NULL;
+ }
+ return tag;
+ }
+
+
+ flb_plg_error(ctx->ins, "Could not find tag_key %s in record", ctx->tag_key);
+ return NULL;
+}
+
+/*
+ * Process a raw text payload for Splunk HEC requests, uses the delimited character to split records,
+ * return the number of processed bytes
+ */
+static int process_raw_payload_pack(struct flb_splunk *ctx, flb_sds_t tag, char *buf, size_t size)
+{
+ int ret = FLB_EVENT_ENCODER_SUCCESS;
+
+ ret = flb_log_event_encoder_begin_record(&ctx->log_encoder);
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_current_timestamp(&ctx->log_encoder);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_append_body_values(
+ &ctx->log_encoder,
+ FLB_LOG_EVENT_CSTRING_VALUE("log"),
+ FLB_LOG_EVENT_STRING_VALUE(buf, size));
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_commit_record(&ctx->log_encoder);
+ }
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ flb_log_event_encoder_rollback_record(&ctx->log_encoder);
+ return -1;
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ if (tag) {
+ flb_input_log_append(ctx->ins, tag, flb_sds_len(tag),
+ ctx->log_encoder.output_buffer,
+ ctx->log_encoder.output_length);
+ }
+ else {
+ /* use default plugin Tag (it internal name, e.g: http.0 */
+ flb_input_log_append(ctx->ins, NULL, 0,
+ ctx->log_encoder.output_buffer,
+ ctx->log_encoder.output_length);
+ }
+ }
+ else {
+ flb_plg_error(ctx->ins, "log event encoding error : %d", ret);
+ }
+
+ return 0;
+}
+
+static void process_flb_log_append(struct flb_splunk *ctx, msgpack_object *record,
+ flb_sds_t tag, flb_sds_t tag_from_record,
+ struct flb_time tm) {
+ int ret;
+
+ ret = flb_log_event_encoder_begin_record(&ctx->log_encoder);
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_timestamp(
+ &ctx->log_encoder,
+ &tm);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_body_from_msgpack_object(
+ &ctx->log_encoder,
+ record);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_commit_record(&ctx->log_encoder);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ if (tag_from_record) {
+ flb_input_log_append(ctx->ins,
+ tag_from_record,
+ flb_sds_len(tag_from_record),
+ ctx->log_encoder.output_buffer,
+ ctx->log_encoder.output_length);
+
+ flb_sds_destroy(tag_from_record);
+ }
+ else if (tag) {
+ flb_input_log_append(ctx->ins, tag, flb_sds_len(tag),
+ ctx->log_encoder.output_buffer,
+ ctx->log_encoder.output_length);
+ }
+ else {
+ /* use default plugin Tag (it internal name, e.g: http.0 */
+ flb_input_log_append(ctx->ins, NULL, 0,
+ ctx->log_encoder.output_buffer,
+ ctx->log_encoder.output_length);
+ }
+ }
+ else {
+ flb_plg_error(ctx->ins, "Error encoding record : %d", ret);
+ }
+}
+
+static int process_json_payload_pack(struct flb_splunk *ctx, flb_sds_t tag, char *buf, size_t size)
+{
+ size_t off = 0;
+ msgpack_unpacked result;
+ struct flb_time tm;
+ int i = 0;
+ msgpack_object *obj;
+ msgpack_object record;
+ flb_sds_t tag_from_record = NULL;
+
+ flb_time_get(&tm);
+
+ msgpack_unpacked_init(&result);
+ while (msgpack_unpack_next(&result, buf, size, &off) == MSGPACK_UNPACK_SUCCESS) {
+ if (result.data.type == MSGPACK_OBJECT_MAP) {
+ tag_from_record = NULL;
+ if (ctx->tag_key) {
+ tag_from_record = tag_key(ctx, &result.data);
+ }
+
+ process_flb_log_append(ctx, &result.data, tag, tag_from_record, tm);
+
+ flb_log_event_encoder_reset(&ctx->log_encoder);
+ }
+ else if (result.data.type == MSGPACK_OBJECT_ARRAY) {
+ obj = &result.data;
+ for (i = 0; i < obj->via.array.size; i++)
+ {
+ record = obj->via.array.ptr[i];
+
+ tag_from_record = NULL;
+ if (ctx->tag_key) {
+ tag_from_record = tag_key(ctx, &record);
+ }
+
+ process_flb_log_append(ctx, &record, tag, tag_from_record, tm);
+
+ /* TODO : Optimize this
+ *
+ * This is wasteful, considering that we are emitting a series
+ * of records we should start and commit each one and then
+ * emit them all at once after the loop.
+ */
+
+ flb_log_event_encoder_reset(&ctx->log_encoder);
+ }
+
+ break;
+ }
+ else {
+ flb_plg_error(ctx->ins, "skip record from invalid type: %i",
+ result.data.type);
+
+ msgpack_unpacked_destroy(&result);
+
+ return -1;
+ }
+ }
+
+ msgpack_unpacked_destroy(&result);
+
+ return 0;
+}
+
+static ssize_t parse_hec_payload_json(struct flb_splunk *ctx, flb_sds_t tag,
+ char *payload, size_t size)
+{
+ int ret;
+ int out_size;
+ char *pack;
+ struct flb_pack_state pack_state;
+
+ /* Initialize packer */
+ flb_pack_state_init(&pack_state);
+
+ /* Pack JSON as msgpack */
+ ret = flb_pack_json_state(payload, size,
+ &pack, &out_size, &pack_state);
+ flb_pack_state_reset(&pack_state);
+
+ /* Handle exceptions */
+ if (ret == FLB_ERR_JSON_PART) {
+ flb_plg_warn(ctx->ins, "JSON data is incomplete, skipping");
+ return -1;
+ }
+ else if (ret == FLB_ERR_JSON_INVAL) {
+ flb_plg_warn(ctx->ins, "invalid JSON message, skipping");
+ return -1;
+ }
+ else if (ret == -1) {
+ return -1;
+ }
+
+ /* Process the packaged JSON and return the last byte used */
+ process_json_payload_pack(ctx, tag, pack, out_size);
+ flb_free(pack);
+
+ return 0;
+}
+
+static int validate_auth_header(struct flb_splunk *ctx, struct mk_http_request *request)
+{
+ struct mk_http_header *auth_header = NULL;
+
+ if (ctx->auth_header == NULL) {
+ return SPLUNK_AUTH_UNAUTH;
+ }
+
+ auth_header = mk_http_header_get(MK_HEADER_AUTHORIZATION, request, NULL, 0);
+
+ if (auth_header == NULL) {
+ return SPLUNK_AUTH_MISSING_CRED;
+ }
+
+ if (auth_header != NULL && auth_header->val.len > 0) {
+ if (strncmp(ctx->auth_header,
+ auth_header->val.data,
+ strlen(ctx->auth_header)) == 0) {
+ return SPLUNK_AUTH_SUCCESS;
+ }
+ else {
+ return SPLUNK_AUTH_UNAUTHORIZED;
+ }
+ }
+ else {
+ return SPLUNK_AUTH_MISSING_CRED;
+ }
+
+ return SPLUNK_AUTH_SUCCESS;
+}
+
+static int handle_hec_payload(struct flb_splunk *ctx, int content_type,
+ flb_sds_t tag, char *buf, size_t size)
+{
+ int ret = -1;
+
+ if (content_type == HTTP_CONTENT_JSON) {
+ ret = parse_hec_payload_json(ctx, tag, buf, size);
+ }
+ else if (content_type == HTTP_CONTENT_TEXT) {
+ ret = process_raw_payload_pack(ctx, tag, buf, size);
+ }
+ else if (content_type == HTTP_CONTENT_UNKNOWN) {
+ if (buf[0] == '{') {
+ ret = parse_hec_payload_json(ctx, tag, buf, size);
+ }
+ else {
+ ret = process_raw_payload_pack(ctx, tag, buf, size);
+ }
+ }
+
+ return ret;
+}
+
+static int process_hec_payload(struct flb_splunk *ctx, struct splunk_conn *conn,
+ flb_sds_t tag,
+ struct mk_http_session *session,
+ struct mk_http_request *request)
+{
+ int i = 0;
+ int ret = 0;
+ int type = -1;
+ struct mk_http_header *header;
+ int extra_size = -1;
+ struct mk_http_header *headers_extra;
+ int gzip_compressed = FLB_FALSE;
+ void *gz_data = NULL;
+ size_t gz_size = -1;
+
+ header = &session->parser.headers[MK_HEADER_CONTENT_TYPE];
+ if (header->key.data == NULL) {
+ send_response(conn, 400, "error: header 'Content-Type' is not set\n");
+ return -1;
+ }
+
+ if (header->val.len == 16 &&
+ strncasecmp(header->val.data, "application/json", 16) == 0) {
+ type = HTTP_CONTENT_JSON;
+ }
+ else if (header->val.len == 10 &&
+ strncasecmp(header->val.data, "text/plain", 10) == 0) {
+ type = HTTP_CONTENT_TEXT;
+ }
+ else {
+ /* Not neccesary to specify content-type for Splunk HEC. */
+ flb_plg_debug(ctx->ins, "Mark as unknown type for ingested payloads");
+ type = HTTP_CONTENT_UNKNOWN;
+ }
+
+ if (request->data.len <= 0) {
+ send_response(conn, 400, "error: no payload found\n");
+ return -1;
+ }
+
+ extra_size = session->parser.headers_extra_count;
+ if (extra_size > 0) {
+ for (i = 0; i < extra_size; i++) {
+ headers_extra = &session->parser.headers_extra[i];
+ if (headers_extra->key.len == 16 &&
+ strncasecmp(headers_extra->key.data, "Content-Encoding", 16) == 0) {
+ if (headers_extra->val.len == 4 &&
+ strncasecmp(headers_extra->val.data, "gzip", 4) == 0) {
+ flb_plg_debug(ctx->ins, "body is gzipped");
+ gzip_compressed = FLB_TRUE;
+ }
+ }
+ }
+ }
+
+ if (gzip_compressed == FLB_TRUE) {
+ ret = flb_gzip_uncompress((void *) request->data.data, request->data.len,
+ &gz_data, &gz_size);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "gzip uncompress is failed");
+ return -1;
+ }
+
+ ret = handle_hec_payload(ctx, type, tag, gz_data, gz_size);
+ flb_free(gz_data);
+ }
+ else {
+ ret = handle_hec_payload(ctx, type, tag, request->data.data, request->data.len);
+ }
+
+ return 0;
+}
+
+static int process_hec_raw_payload(struct flb_splunk *ctx, struct splunk_conn *conn,
+ flb_sds_t tag,
+ struct mk_http_session *session,
+ struct mk_http_request *request)
+{
+ int ret = -1;
+ struct mk_http_header *header;
+
+ header = &session->parser.headers[MK_HEADER_CONTENT_TYPE];
+ if (header->key.data == NULL) {
+ send_response(conn, 400, "error: header 'Content-Type' is not set\n");
+ return -1;
+ }
+ else if (header->val.len != 10 ||
+ strncasecmp(header->val.data, "text/plain", 10) != 0) {
+ /* Not neccesary to specify content-type for Splunk HEC. */
+ flb_plg_debug(ctx->ins, "Mark as unknown type for ingested payloads");
+ }
+
+ if (request->data.len <= 0) {
+ send_response(conn, 400, "error: no payload found\n");
+ return -1;
+ }
+
+ /* Always handle as raw type of payloads here */
+ ret = process_raw_payload_pack(ctx, tag, request->data.data, request->data.len);
+
+ return ret;
+}
+
+static inline int mk_http_point_header(mk_ptr_t *h,
+ struct mk_http_parser *parser, int key)
+{
+ struct mk_http_header *header;
+
+ header = &parser->headers[key];
+ if (header->type == key) {
+ h->data = header->val.data;
+ h->len = header->val.len;
+ return 0;
+ }
+ else {
+ h->data = NULL;
+ h->len = -1;
+ }
+
+ return -1;
+}
+
+/*
+ * Handle an incoming request. It perform extra checks over the request, if
+ * everything is OK, it enqueue the incoming payload.
+ */
+int splunk_prot_handle(struct flb_splunk *ctx, struct splunk_conn *conn,
+ struct mk_http_session *session,
+ struct mk_http_request *request)
+{
+ int i;
+ int ret;
+ int len;
+ char *uri;
+ char *qs;
+ off_t diff;
+ flb_sds_t tag;
+ struct mk_http_header *header;
+
+ if (request->uri.data[0] != '/') {
+ send_response(conn, 400, "error: invalid request\n");
+ return -1;
+ }
+
+ /* Decode URI */
+ uri = mk_utils_url_decode(request->uri);
+ if (!uri) {
+ uri = mk_mem_alloc_z(request->uri.len + 1);
+ if (!uri) {
+ return -1;
+ }
+ memcpy(uri, request->uri.data, request->uri.len);
+ uri[request->uri.len] = '\0';
+ }
+
+ /* Try to match a query string so we can remove it */
+ qs = strchr(uri, '?');
+ if (qs) {
+ /* remove the query string part */
+ diff = qs - uri;
+ uri[diff] = '\0';
+ }
+
+ /* Refer the tag at first*/
+ if (ctx->ins->tag && !ctx->ins->tag_default) {
+ tag = flb_sds_create(ctx->ins->tag);
+ if (tag == NULL) {
+ return -1;
+ }
+ }
+ else {
+ /* Compose the query string using the URI */
+ len = strlen(uri);
+
+ if (len == 1) {
+ tag = NULL; /* use default tag */
+ }
+ else {
+ /* New tag skipping the URI '/' */
+ tag = flb_sds_create_len(&uri[1], len - 1);
+ if (!tag) {
+ mk_mem_free(uri);
+ return -1;
+ }
+
+ /* Sanitize, only allow alphanum chars */
+ for (i = 0; i < flb_sds_len(tag); i++) {
+ if (!isalnum(tag[i]) && tag[i] != '_' && tag[i] != '.') {
+ tag[i] = '_';
+ }
+ }
+ }
+ }
+
+ /* Check if we have a Host header: Hostname ; port */
+ mk_http_point_header(&request->host, &session->parser, MK_HEADER_HOST);
+
+ /* Header: Connection */
+ mk_http_point_header(&request->connection, &session->parser,
+ MK_HEADER_CONNECTION);
+
+ /* HTTP/1.1 needs Host header */
+ if (request->host.data == NULL && request->protocol == MK_HTTP_PROTOCOL_11) {
+ flb_sds_destroy(tag);
+ mk_mem_free(uri);
+
+ return -1;
+ }
+
+ /* Should we close the session after this request ? */
+ mk_http_keepalive_check(session, request, ctx->server);
+
+ /* Content Length */
+ header = &session->parser.headers[MK_HEADER_CONTENT_LENGTH];
+ if (header->type == MK_HEADER_CONTENT_LENGTH) {
+ request->_content_length.data = header->val.data;
+ request->_content_length.len = header->val.len;
+ }
+ else {
+ request->_content_length.data = NULL;
+ }
+
+ if (request->method == MK_METHOD_GET) {
+ /* Handle health minotoring of splunk hec endpoint for load balancers */
+ if (strcasecmp(uri, "/services/collector/health") == 0) {
+ send_json_message_response(conn, 200, "{\"text\":\"Success\",\"code\":200}");
+ }
+ else {
+ send_response(conn, 400, "error: invalid HTTP endpoint\n");
+ }
+
+ flb_sds_destroy(tag);
+ mk_mem_free(uri);
+
+ return 0;
+ }
+
+ /* Under services/collector endpoints are required for
+ * authentication if provided splunk_token */
+ ret = validate_auth_header(ctx, request);
+ if (ret < 0){
+ send_response(conn, 401, "error: unauthroized\n");
+ if (ret == SPLUNK_AUTH_MISSING_CRED) {
+ flb_plg_warn(ctx->ins, "missing credentials in request headers");
+ }
+ else if (ret == SPLUNK_AUTH_UNAUTHORIZED) {
+ flb_plg_warn(ctx->ins, "wrong credentials in request headers");
+ }
+
+ flb_sds_destroy(tag);
+ mk_mem_free(uri);
+
+ return -1;
+ }
+
+ /* Handle every ingested payload cleanly */
+ flb_log_event_encoder_reset(&ctx->log_encoder);
+
+ if (request->method == MK_METHOD_POST) {
+ if (strcasecmp(uri, "/services/collector/raw") == 0) {
+ ret = process_hec_raw_payload(ctx, conn, tag, session, request);
+
+ if (!ret) {
+ send_json_message_response(conn, 400, "{\"text\":\"Invalid data format\",\"code\":6}");
+ }
+ send_json_message_response(conn, 200, "{\"text\":\"Success\",\"code\":0}");
+ }
+ else if (strcasecmp(uri, "/services/collector/event") == 0 ||
+ strcasecmp(uri, "/services/collector") == 0) {
+ ret = process_hec_payload(ctx, conn, tag, session, request);
+
+ if (!ret) {
+ send_json_message_response(conn, 400, "{\"text\":\"Invalid data format\",\"code\":6}");
+ }
+ send_json_message_response(conn, 200, "{\"text\":\"Success\",\"code\":0}");
+ }
+ else {
+ send_response(conn, 400, "error: invalid HTTP endpoint\n");
+
+ flb_sds_destroy(tag);
+ mk_mem_free(uri);
+
+ return -1;
+ }
+ }
+ else {
+ /* HEAD, PUT, PATCH, and DELETE methods are prohibited to use.*/
+
+ flb_sds_destroy(tag);
+ mk_mem_free(uri);
+
+ send_response(conn, 400, "error: invalid HTTP method\n");
+ return -1;
+ }
+
+ flb_sds_destroy(tag);
+ mk_mem_free(uri);
+
+ return ret;
+}
+
+/*
+ * Handle an incoming request which has resulted in an http parser error.
+ */
+int splunk_prot_handle_error(struct flb_splunk *ctx, struct splunk_conn *conn,
+ struct mk_http_session *session,
+ struct mk_http_request *request)
+{
+ send_response(conn, 400, "error: invalid request\n");
+ return -1;
+}
diff --git a/src/fluent-bit/plugins/in_splunk/splunk_prot.h b/src/fluent-bit/plugins/in_splunk/splunk_prot.h
new file mode 100644
index 000000000..100f12d2e
--- /dev/null
+++ b/src/fluent-bit/plugins/in_splunk/splunk_prot.h
@@ -0,0 +1,36 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_SPLUNK_PROT
+#define FLB_IN_SPLUNK_PROT
+
+#define SPLUNK_AUTH_UNAUTH 1
+#define SPLUNK_AUTH_SUCCESS 0
+#define SPLUNK_AUTH_MISSING_CRED -1
+#define SPLUNK_AUTH_UNAUTHORIZED -2
+
+int splunk_prot_handle(struct flb_splunk *ctx, struct splunk_conn *conn,
+ struct mk_http_session *session,
+ struct mk_http_request *request);
+
+int splunk_prot_handle_error(struct flb_splunk *ctx, struct splunk_conn *conn,
+ struct mk_http_session *session,
+ struct mk_http_request *request);
+
+#endif
diff --git a/src/fluent-bit/plugins/in_statsd/CMakeLists.txt b/src/fluent-bit/plugins/in_statsd/CMakeLists.txt
new file mode 100644
index 000000000..5b9dde230
--- /dev/null
+++ b/src/fluent-bit/plugins/in_statsd/CMakeLists.txt
@@ -0,0 +1,4 @@
+set(src
+ statsd.c)
+
+FLB_PLUGIN(in_statsd "${src}" "")
diff --git a/src/fluent-bit/plugins/in_statsd/statsd.c b/src/fluent-bit/plugins/in_statsd/statsd.c
new file mode 100644
index 000000000..0cccb679a
--- /dev/null
+++ b/src/fluent-bit/plugins/in_statsd/statsd.c
@@ -0,0 +1,386 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_log_event_encoder.h>
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_socket.h>
+#include <fluent-bit/flb_pack.h>
+
+#define MAX_PACKET_SIZE 65536
+#define DEFAULT_LISTEN "0.0.0.0"
+#define DEFAULT_PORT 8125
+
+#define STATSD_TYPE_COUNTER 1
+#define STATSD_TYPE_GAUGE 2
+#define STATSD_TYPE_TIMER 3
+#define STATSD_TYPE_SET 4
+
+struct flb_statsd {
+ char *buf; /* buffer */
+ char listen[256]; /* listening address (RFC-2181) */
+ char port[6]; /* listening port (RFC-793) */
+ flb_sockfd_t server_fd; /* server socket */
+ flb_pipefd_t coll_fd; /* server handler */
+ struct flb_input_instance *ins; /* input instance */
+ struct flb_log_event_encoder *log_encoder;
+};
+
+/*
+ * The "statsd_message" represents a single line in UDP packet.
+ * It's just a bunch of pointers to ephemeral buffer.
+ */
+struct statsd_message {
+ char *bucket;
+ int bucket_len;
+ char *value;
+ int value_len;
+ int type;
+ double sample_rate;
+};
+
+static int get_statsd_type(char *str)
+{
+ switch (*str) {
+ case 'g':
+ return STATSD_TYPE_GAUGE;
+ case 's':
+ return STATSD_TYPE_SET;
+ case 'c':
+ return STATSD_TYPE_COUNTER;
+ case 'm':
+ if (*(str + 1) == 's') {
+ return STATSD_TYPE_TIMER;
+ }
+ }
+ return STATSD_TYPE_COUNTER;
+}
+
+static int is_incremental(char *str)
+{
+ return (*str == '+' || *str == '-');
+}
+
+static int statsd_process_message(struct flb_statsd *ctx,
+ struct statsd_message *m)
+{
+ int ret;
+
+ ret = flb_log_event_encoder_begin_record(ctx->log_encoder);
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_current_timestamp(ctx->log_encoder);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ switch (m->type) {
+ case STATSD_TYPE_COUNTER:
+ ret = flb_log_event_encoder_append_body_values(
+ ctx->log_encoder,
+
+ FLB_LOG_EVENT_CSTRING_VALUE("type"),
+ FLB_LOG_EVENT_CSTRING_VALUE("counter"),
+ FLB_LOG_EVENT_CSTRING_VALUE("bucket"),
+ FLB_LOG_EVENT_STRING_VALUE(m->bucket, m->bucket_len),
+ FLB_LOG_EVENT_CSTRING_VALUE("value"),
+ FLB_LOG_EVENT_DOUBLE_VALUE(strtod(m->value, NULL)),
+ FLB_LOG_EVENT_CSTRING_VALUE("sample_rate"),
+ FLB_LOG_EVENT_DOUBLE_VALUE(m->sample_rate));
+
+ break;
+ case STATSD_TYPE_GAUGE:
+ ret = flb_log_event_encoder_append_body_values(
+ ctx->log_encoder,
+
+ FLB_LOG_EVENT_CSTRING_VALUE("type"),
+ FLB_LOG_EVENT_CSTRING_VALUE("gauge"),
+ FLB_LOG_EVENT_CSTRING_VALUE("bucket"),
+ FLB_LOG_EVENT_STRING_VALUE(m->bucket, m->bucket_len),
+ FLB_LOG_EVENT_CSTRING_VALUE("value"),
+ FLB_LOG_EVENT_DOUBLE_VALUE(strtod(m->value, NULL)),
+ FLB_LOG_EVENT_CSTRING_VALUE("incremental"),
+ FLB_LOG_EVENT_INT64_VALUE(is_incremental(m->value)));
+ break;
+ case STATSD_TYPE_TIMER:
+ ret = flb_log_event_encoder_append_body_values(
+ ctx->log_encoder,
+
+ FLB_LOG_EVENT_CSTRING_VALUE("type"),
+ FLB_LOG_EVENT_CSTRING_VALUE("timer"),
+ FLB_LOG_EVENT_CSTRING_VALUE("bucket"),
+ FLB_LOG_EVENT_STRING_VALUE(m->bucket, m->bucket_len),
+ FLB_LOG_EVENT_CSTRING_VALUE("value"),
+ FLB_LOG_EVENT_DOUBLE_VALUE(strtod(m->value, NULL)),
+ FLB_LOG_EVENT_CSTRING_VALUE("sample_rate"),
+ FLB_LOG_EVENT_DOUBLE_VALUE(m->sample_rate));
+
+ case STATSD_TYPE_SET:
+ ret = flb_log_event_encoder_append_body_values(
+ ctx->log_encoder,
+
+ FLB_LOG_EVENT_CSTRING_VALUE("type"),
+ FLB_LOG_EVENT_CSTRING_VALUE("set"),
+ FLB_LOG_EVENT_CSTRING_VALUE("bucket"),
+ FLB_LOG_EVENT_STRING_VALUE(m->bucket, m->bucket_len),
+ FLB_LOG_EVENT_CSTRING_VALUE("value"),
+ FLB_LOG_EVENT_STRING_VALUE(m->value, m->value_len));
+ break;
+ }
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_commit_record(ctx->log_encoder);
+ }
+
+ return ret;
+}
+
+static int statsd_process_line(struct flb_statsd *ctx, char *line)
+{
+ char *colon, *bar, *atmark;
+ struct statsd_message m;
+
+ /*
+ * bucket:value|type|@sample_rate
+ * ------
+ */
+ colon = strchr(line, ':');
+ if (colon == NULL) {
+ flb_plg_error(ctx->ins, "no bucket name found");
+ return -1;
+ }
+ m.bucket = line;
+ m.bucket_len = (colon - line);
+
+ /*
+ * bucket:value|type|@sample_rate
+ * ----
+ */
+ bar = strchr(colon + 1, '|');
+ if (bar == NULL) {
+ flb_plg_error(ctx->ins, "no metric type found");
+ return -1;
+ }
+ m.type = get_statsd_type(bar + 1);
+
+ /*
+ * bucket:value|type|@sample_rate
+ * -----
+ */
+ m.value = colon + 1;
+ m.value_len = (bar - colon - 1);
+
+ /*
+ * bucket:value|type|@sample_rate
+ * ------------
+ */
+ atmark = strstr(bar + 1, "|@");
+ if (atmark == NULL || atof(atmark + 2) == 0) {
+ m.sample_rate = 1.0;
+ }
+ else {
+ m.sample_rate = atof(atmark + 2);
+ }
+
+ return statsd_process_message(ctx, &m);
+}
+
+
+static int cb_statsd_receive(struct flb_input_instance *ins,
+ struct flb_config *config, void *data)
+{
+ int ret;
+ char *line;
+ int len;
+ struct flb_statsd *ctx = data;
+
+ /* Receive a UDP datagram */
+ len = recv(ctx->server_fd, ctx->buf, MAX_PACKET_SIZE - 1, 0);
+ if (len < 0) {
+ flb_errno();
+ return -1;
+ }
+ ctx->buf[len] = '\0';
+
+ ret = FLB_EVENT_ENCODER_SUCCESS;
+ /* Process all messages in buffer */
+ line = strtok(ctx->buf, "\n");
+ while (line != NULL) {
+ flb_plg_trace(ctx->ins, "received a line: '%s'", line);
+
+ ret = statsd_process_line(ctx, line);
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ flb_plg_error(ctx->ins, "failed to process line: '%s'", line);
+
+ break;
+ }
+
+ line = strtok(NULL, "\n");
+ }
+
+ if (ctx->log_encoder->output_length > 0) {
+ flb_input_log_append(ctx->ins, NULL, 0,
+ ctx->log_encoder->output_buffer,
+ ctx->log_encoder->output_length);
+ }
+ else {
+ flb_plg_error(ctx->ins, "log event encoding error : %d", ret);
+ }
+
+ flb_log_event_encoder_reset(ctx->log_encoder);
+
+ return 0;
+}
+
+static int cb_statsd_init(struct flb_input_instance *ins,
+ struct flb_config *config, void *data)
+{
+ struct flb_statsd *ctx;
+ char *listen;
+ int port;
+ int ret;
+
+ ctx = flb_calloc(1, sizeof(struct flb_statsd));
+ if (!ctx) {
+ flb_errno();
+ return -1;
+ }
+ ctx->ins = ins;
+
+ ctx->log_encoder = flb_log_event_encoder_create(FLB_LOG_EVENT_FORMAT_DEFAULT);
+
+ if (ctx->log_encoder == NULL) {
+ flb_plg_error(ins, "could not initialize event encoder");
+ flb_free(ctx);
+
+ return -1;
+ }
+
+ ctx->buf = flb_malloc(MAX_PACKET_SIZE);
+ if (!ctx->buf) {
+ flb_errno();
+ flb_log_event_encoder_destroy(ctx->log_encoder);
+ flb_free(ctx);
+ return -1;
+ }
+
+ /* Load the config map */
+ ret = flb_input_config_map_set(ins, (void *)ctx);
+ if (ret == -1) {
+ flb_plg_error(ins, "unable to load configuration");
+ flb_log_event_encoder_destroy(ctx->log_encoder);
+ flb_free(ctx);
+ return -1;
+ }
+
+ /* Listening address */
+ if (ins->host.listen) {
+ listen = ins->host.listen;
+ }
+ else {
+ listen = DEFAULT_LISTEN;
+ }
+ strncpy(ctx->listen, listen, sizeof(ctx->listen) - 1);
+
+ /* Listening port */
+ if (ins->host.port) {
+ port = ins->host.port;
+ }
+ else {
+ port = DEFAULT_PORT;
+ }
+ snprintf(ctx->port, sizeof(ctx->port), "%hu", (unsigned short) port);
+
+ /* Export plugin context */
+ flb_input_set_context(ins, ctx);
+
+ /* Accepts metrics from UDP connections. */
+ ctx->server_fd = flb_net_server_udp(ctx->port, ctx->listen);
+ if (ctx->server_fd == -1) {
+ flb_plg_error(ctx->ins, "can't bind to %s:%s", ctx->listen, ctx->port);
+ flb_log_event_encoder_destroy(ctx->log_encoder);
+ flb_free(ctx->buf);
+ flb_free(ctx);
+ return -1;
+ }
+
+ /* Set up the UDP connection callback */
+ ctx->coll_fd = flb_input_set_collector_socket(ins, cb_statsd_receive,
+ ctx->server_fd, config);
+ if (ctx->coll_fd == -1) {
+ flb_plg_error(ctx->ins, "cannot set up connection callback ");
+ flb_log_event_encoder_destroy(ctx->log_encoder);
+ flb_socket_close(ctx->server_fd);
+ flb_free(ctx->buf);
+ flb_free(ctx);
+ return -1;
+ }
+
+ flb_plg_info(ctx->ins, "start UDP server on %s:%s", ctx->listen, ctx->port);
+ return 0;
+}
+
+static void cb_statsd_pause(void *data, struct flb_config *config)
+{
+ struct flb_statsd *ctx = data;
+ flb_input_collector_pause(ctx->coll_fd, ctx->ins);
+}
+
+static void cb_statsd_resume(void *data, struct flb_config *config)
+{
+ struct flb_statsd *ctx = data;
+ flb_input_collector_resume(ctx->coll_fd, ctx->ins);
+}
+
+static int cb_statsd_exit(void *data, struct flb_config *config)
+{
+ struct flb_statsd *ctx = data;
+
+ if (ctx->log_encoder != NULL) {
+ flb_log_event_encoder_destroy(ctx->log_encoder);
+ }
+
+ flb_socket_close(ctx->server_fd);
+ flb_free(ctx->buf);
+ flb_free(ctx);
+
+ return 0;
+}
+
+static struct flb_config_map config_map[] = {
+ /* EOF */
+ {0}
+};
+
+/* Plugin reference */
+struct flb_input_plugin in_statsd_plugin = {
+ .name = "statsd",
+ .description = "StatsD input plugin",
+ .cb_init = cb_statsd_init,
+ .cb_pre_run = NULL,
+ .cb_collect = NULL,
+ .cb_ingest = NULL,
+ .cb_flush_buf = NULL,
+ .cb_pause = cb_statsd_pause,
+ .cb_resume = cb_statsd_resume,
+ .cb_exit = cb_statsd_exit,
+ .config_map = config_map,
+ .flags = FLB_INPUT_NET_SERVER,
+};
diff --git a/src/fluent-bit/plugins/in_stdin/CMakeLists.txt b/src/fluent-bit/plugins/in_stdin/CMakeLists.txt
new file mode 100644
index 000000000..3c2e2bdfe
--- /dev/null
+++ b/src/fluent-bit/plugins/in_stdin/CMakeLists.txt
@@ -0,0 +1,10 @@
+# FIXME: there is something wrong when linking objects and this
+# static plugin, I should not require to link to a specific symbol
+# if the object was already linked from fluent-bit core on src/, also
+# jsmn should not be required.
+
+set(src
+ in_stdin.c
+ )
+
+FLB_PLUGIN(in_stdin "${src}" "")
diff --git a/src/fluent-bit/plugins/in_stdin/in_stdin.c b/src/fluent-bit/plugins/in_stdin/in_stdin.c
new file mode 100644
index 000000000..ff3114067
--- /dev/null
+++ b/src/fluent-bit/plugins/in_stdin/in_stdin.c
@@ -0,0 +1,472 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_engine.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_parser.h>
+#include <fluent-bit/flb_error.h>
+#include <fluent-bit/flb_utils.h>
+
+#include <msgpack.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+#include "in_stdin.h"
+
+static inline void consume_bytes(char *buf, int bytes, int length)
+{
+ memmove(buf, buf + bytes, length - bytes);
+}
+
+static inline int process_pack(struct flb_in_stdin_config *ctx,
+ char *data, size_t data_size)
+{
+ struct flb_log_event_decoder log_decoder;
+ struct flb_log_event log_event;
+ msgpack_unpacked result;
+ msgpack_object entry;
+ int ret;
+ size_t off;
+
+ ret = flb_log_event_decoder_init(&log_decoder, NULL, 0);
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ return -1;
+ }
+
+ /* Queue the data with time field */
+ msgpack_unpacked_init(&result);
+
+ off = 0;
+ while (msgpack_unpack_next(&result, data, data_size, &off) == MSGPACK_UNPACK_SUCCESS) {
+ entry = result.data;
+
+ if (entry.type == MSGPACK_OBJECT_MAP) {
+ ret = flb_log_event_encoder_begin_record(ctx->log_encoder);
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_current_timestamp(ctx->log_encoder);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_body_from_msgpack_object(
+ ctx->log_encoder, &entry);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_commit_record(ctx->log_encoder);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = 0;
+ }
+ else {
+ ret = -1;
+
+ break;
+ }
+ }
+ else if (entry.type == MSGPACK_OBJECT_ARRAY) {
+ ret = flb_event_decoder_decode_object(&log_decoder,
+ &log_event,
+ &entry);
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ ret = -1;
+
+ break;
+ }
+
+ ret = flb_log_event_encoder_begin_record(ctx->log_encoder);
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_timestamp(ctx->log_encoder,
+ &log_event.timestamp);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_metadata_from_msgpack_object(
+ ctx->log_encoder, log_event.metadata);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_body_from_msgpack_object(
+ ctx->log_encoder, log_event.body);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_commit_record(ctx->log_encoder);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = 0;
+ }
+ else {
+ ret = -1;
+
+ break;
+ }
+ }
+ else {
+ /*
+ * Upon exception, acknowledge the user about the problem but continue
+ * working, do not discard valid JSON entries.
+ */
+ flb_plg_error(ctx->ins, "invalid record found, "
+ "it's not a JSON map or array");
+ ret = -1;
+ break;
+ }
+ }
+
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ msgpack_unpacked_destroy(&result);
+
+ return ret;
+}
+
+static inline int pack_regex(struct flb_in_stdin_config *ctx,
+ struct flb_time *t, char *data, size_t data_size)
+{
+ int ret;
+
+ ret = flb_log_event_encoder_begin_record(ctx->log_encoder);
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_timestamp(ctx->log_encoder, t);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_body_from_raw_msgpack(
+ ctx->log_encoder, data, data_size);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_commit_record(ctx->log_encoder);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = 0;
+ }
+ else {
+ ret = -1;
+ }
+
+ return ret;
+}
+
+static int in_stdin_collect(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ int bytes = 0;
+ int pack_size;
+ int ret;
+ char *pack;
+ void *out_buf;
+ size_t out_size;
+ struct flb_time out_time;
+ struct flb_in_stdin_config *ctx = in_context;
+
+ bytes = read(ctx->fd,
+ ctx->buf + ctx->buf_len,
+ ctx->buf_size - ctx->buf_len - 1);
+ flb_plg_trace(ctx->ins, "stdin read() = %i", bytes);
+
+ if (bytes == 0) {
+ flb_plg_warn(ctx->ins, "end of file (stdin closed by remote end)");
+ }
+
+ if (bytes <= 0) {
+ flb_input_collector_pause(ctx->coll_fd, ctx->ins);
+ flb_engine_exit(config);
+ return -1;
+ }
+ ctx->buf_len += bytes;
+ ctx->buf[ctx->buf_len] = '\0';
+
+ while (ctx->buf_len > 0) {
+ /* Try built-in JSON parser */
+ if (!ctx->parser) {
+ ret = flb_pack_json_state(ctx->buf, ctx->buf_len,
+ &pack, &pack_size, &ctx->pack_state);
+ if (ret == FLB_ERR_JSON_PART) {
+ flb_plg_debug(ctx->ins, "data incomplete, waiting for more...");
+ return 0;
+ }
+ else if (ret == FLB_ERR_JSON_INVAL) {
+ flb_plg_debug(ctx->ins, "invalid JSON message, skipping");
+ flb_pack_state_reset(&ctx->pack_state);
+ flb_pack_state_init(&ctx->pack_state);
+ ctx->pack_state.multiple = FLB_TRUE;
+ ctx->buf_len = 0;
+ return -1;
+ }
+
+ /* Process valid packaged records */
+ process_pack(ctx, pack, pack_size);
+
+ /* Move out processed bytes */
+ consume_bytes(ctx->buf, ctx->pack_state.last_byte, ctx->buf_len);
+ ctx->buf_len -= ctx->pack_state.last_byte;
+ ctx->buf[ctx->buf_len] = '\0';
+
+ flb_pack_state_reset(&ctx->pack_state);
+ flb_pack_state_init(&ctx->pack_state);
+ ctx->pack_state.multiple = FLB_TRUE;
+
+ flb_free(pack);
+
+ if (ctx->log_encoder->output_length > 0) {
+ flb_input_log_append(ctx->ins, NULL, 0,
+ ctx->log_encoder->output_buffer,
+ ctx->log_encoder->output_length);
+ }
+
+ flb_log_event_encoder_reset(ctx->log_encoder);
+
+ return 0;
+ }
+ else {
+ /* Reset time for each line */
+ flb_time_zero(&out_time);
+
+ /* Use the defined parser */
+ ret = flb_parser_do(ctx->parser, ctx->buf, ctx->buf_len,
+ &out_buf, &out_size, &out_time);
+
+ if (ret >= 0) {
+ if (flb_time_to_nanosec(&out_time) == 0L) {
+ flb_time_get(&out_time);
+ }
+ pack_regex(ctx, &out_time, out_buf, out_size);
+ flb_free(out_buf);
+
+ if (ctx->log_encoder->output_length > 0) {
+ flb_input_log_append(ctx->ins, NULL, 0,
+ ctx->log_encoder->output_buffer,
+ ctx->log_encoder->output_length);
+ }
+
+ flb_log_event_encoder_reset(ctx->log_encoder);
+ }
+ else {
+ /* we need more data ? */
+ flb_plg_trace(ctx->ins, "data mismatch or incomplete : %d", ret);
+ return 0;
+ }
+ }
+
+ if (ret == ctx->buf_len) {
+ ctx->buf_len = 0;
+ break;
+ }
+ else if (ret >= 0) {
+ /*
+ * 'ret' is the last byte consumed by the regex engine, we need
+ * to advance it position.
+ */
+ ret++;
+ consume_bytes(ctx->buf, ret, ctx->buf_len);
+ ctx->buf_len -= ret;
+ ctx->buf[ctx->buf_len] = '\0';
+ }
+ }
+
+ return 0;
+}
+
+/* Read stdin config*/
+static int in_stdin_config_init(struct flb_in_stdin_config *ctx,
+ struct flb_input_instance *in,
+ struct flb_config *config)
+{
+ int ret;
+
+ ctx->buf_size = DEFAULT_BUF_SIZE;
+ ctx->buf = NULL;
+ ctx->buf_len = 0;
+ ctx->ins = in;
+
+ ret = flb_input_config_map_set(in, (void *)ctx);
+ if (ret == -1) {
+ return -1;
+ }
+
+ /* parser settings */
+ if (ctx->parser_name) {
+ ctx->parser = flb_parser_get(ctx->parser_name, config);
+ if (!ctx->parser) {
+ flb_plg_error(ctx->ins, "requested parser '%s' not found", ctx->parser_name);
+ return -1;
+ }
+ }
+
+ /* buffer size setting */
+ if (ctx->buf_size == -1) {
+ flb_plg_error(ctx->ins, "buffer_size is invalid");
+ return -1;
+ }
+ else if (ctx->buf_size < DEFAULT_BUF_SIZE) {
+ flb_plg_error(ctx->ins, "buffer_size '%zu' must be at least %i bytes",
+ ctx->buf_size, DEFAULT_BUF_SIZE);
+ return -1;
+ }
+
+ flb_plg_debug(ctx->ins, "buf_size=%zu", ctx->buf_size);
+ return 0;
+}
+
+static void in_stdin_config_destroy(struct flb_in_stdin_config *ctx)
+{
+ if (!ctx) {
+ return;
+ }
+
+ if (ctx->log_encoder != NULL) {
+ flb_log_event_encoder_destroy(ctx->log_encoder);
+ }
+
+ /* release buffer */
+ if (ctx->buf) {
+ flb_free(ctx->buf);
+ }
+ flb_free(ctx);
+}
+
+/* Initialize plugin */
+static int in_stdin_init(struct flb_input_instance *in,
+ struct flb_config *config, void *data)
+{
+ int fd;
+ int ret;
+ struct flb_in_stdin_config *ctx;
+
+ /* Allocate space for the configuration context */
+ ctx = flb_calloc(1, sizeof(struct flb_in_stdin_config));
+ if (!ctx) {
+ return -1;
+ }
+
+ ctx->log_encoder = flb_log_event_encoder_create(FLB_LOG_EVENT_FORMAT_DEFAULT);
+
+ if (ctx->log_encoder == NULL) {
+ flb_plg_error(in, "could not initialize event encoder");
+
+ goto init_error;
+ }
+
+ /* Initialize stdin config */
+ ret = in_stdin_config_init(ctx, in, config);
+ if (ret < 0) {
+ goto init_error;
+ }
+
+ ctx->buf = flb_malloc(ctx->buf_size);
+ if (!ctx->buf) {
+ flb_errno();
+ goto init_error;
+ }
+
+ /* Clone the standard input file descriptor */
+ fd = dup(STDIN_FILENO);
+ if (fd == -1) {
+ flb_errno();
+ flb_plg_error(ctx->ins, "Could not open standard input!");
+ goto init_error;
+ }
+ ctx->fd = fd;
+
+ /* Always initialize built-in JSON pack state */
+ flb_pack_state_init(&ctx->pack_state);
+ ctx->pack_state.multiple = FLB_TRUE;
+
+ /* Set the context */
+ flb_input_set_context(in, ctx);
+
+ /* Collect upon data available on the standard input */
+ ret = flb_input_set_collector_event(in,
+ in_stdin_collect,
+ ctx->fd,
+ config);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "Could not set collector for STDIN input plugin");
+ goto init_error;
+ }
+ ctx->coll_fd = ret;
+
+ return 0;
+
+init_error:
+ in_stdin_config_destroy(ctx);
+
+ return -1;
+}
+
+/* Cleanup serial input */
+static int in_stdin_exit(void *in_context, struct flb_config *config)
+{
+ struct flb_in_stdin_config *ctx = in_context;
+
+ if (!ctx) {
+ return 0;
+ }
+
+ if (ctx->fd >= 0) {
+ close(ctx->fd);
+ }
+ flb_pack_state_reset(&ctx->pack_state);
+ in_stdin_config_destroy(ctx);
+
+ return 0;
+}
+
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_STR, "parser", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_in_stdin_config, parser_name),
+ "Set and use a fluent-bit parser"
+ },
+ {
+ FLB_CONFIG_MAP_SIZE, "buffer_size", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_in_stdin_config, buf_size),
+ "Set the read buffer size"
+ },
+ /* EOF */
+ {0}
+};
+
+/* Plugin reference */
+struct flb_input_plugin in_stdin_plugin = {
+ .name = "stdin",
+ .description = "Standard Input",
+ .cb_init = in_stdin_init,
+ .cb_pre_run = NULL,
+ .cb_collect = in_stdin_collect,
+ .cb_flush_buf = NULL,
+ .cb_exit = in_stdin_exit,
+ .config_map = config_map
+};
diff --git a/src/fluent-bit/plugins/in_stdin/in_stdin.h b/src/fluent-bit/plugins/in_stdin/in_stdin.h
new file mode 100644
index 000000000..0c165809b
--- /dev/null
+++ b/src/fluent-bit/plugins/in_stdin/in_stdin.h
@@ -0,0 +1,48 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_STDIN_H
+#define FLB_IN_STDIN_H
+
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_input.h>
+#include <fluent-bit/flb_log_event_decoder.h>
+#include <fluent-bit/flb_log_event_encoder.h>
+
+#define DEFAULT_BUF_SIZE 16000
+
+/* STDIN Input configuration & context */
+struct flb_in_stdin_config {
+ int fd; /* stdin file descriptor */
+ int coll_fd; /* collector fd */
+ size_t buf_size; /* size of a buffer */
+ int buf_len; /* read buffer length */
+ char *buf; /* read buffer */
+ flb_sds_t parser_name; /* name of the parser */
+
+ /* Parser / Format */
+ struct flb_parser *parser;
+ struct flb_pack_state pack_state;
+ struct flb_input_instance *ins;
+ struct flb_log_event_encoder *log_encoder;
+};
+
+extern struct flb_input_plugin in_stdin_plugin;
+
+#endif
diff --git a/src/fluent-bit/plugins/in_storage_backlog/CMakeLists.txt b/src/fluent-bit/plugins/in_storage_backlog/CMakeLists.txt
new file mode 100644
index 000000000..ae2da6341
--- /dev/null
+++ b/src/fluent-bit/plugins/in_storage_backlog/CMakeLists.txt
@@ -0,0 +1,5 @@
+set(src
+ sb.c
+ )
+
+FLB_PLUGIN(in_storage_backlog "${src}" "chunkio-static")
diff --git a/src/fluent-bit/plugins/in_storage_backlog/sb.c b/src/fluent-bit/plugins/in_storage_backlog/sb.c
new file mode 100644
index 000000000..1380caf8a
--- /dev/null
+++ b/src/fluent-bit/plugins/in_storage_backlog/sb.c
@@ -0,0 +1,713 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output.h>
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_input_chunk.h>
+#include <fluent-bit/flb_storage.h>
+#include <fluent-bit/flb_utils.h>
+#include <chunkio/chunkio.h>
+#include <chunkio/cio_error.h>
+
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#ifndef FLB_SYSTEM_WINDOWS
+#include <unistd.h>
+#endif
+
+struct sb_out_chunk {
+ struct cio_chunk *chunk;
+ struct cio_stream *stream;
+ size_t size;
+ struct mk_list _head;
+};
+
+struct sb_out_queue {
+ struct flb_output_instance *ins;
+ struct mk_list chunks; /* head for every sb_out_chunk */
+ struct mk_list _head;
+};
+
+struct flb_sb {
+ int coll_fd; /* collector id */
+ size_t mem_limit; /* memory limit */
+ struct flb_input_instance *ins; /* input instance */
+ struct cio_ctx *cio; /* chunk i/o instance */
+ struct mk_list backlogs; /* list of all pending chunks segregated by output plugin */
+};
+
+
+static inline struct flb_sb *sb_get_context(struct flb_config *config);
+
+static struct sb_out_chunk *sb_allocate_chunk(struct cio_chunk *chunk,
+ struct cio_stream *stream,
+ size_t size);
+
+static void sb_destroy_chunk(struct sb_out_chunk *chunk);
+
+static void sb_destroy_backlog(struct sb_out_queue *backlog, struct flb_sb *context);
+
+static int sb_allocate_backlogs(struct flb_sb *ctx);
+
+static void sb_destroy_backlogs(struct flb_sb *ctx);
+
+static struct sb_out_queue *sb_find_segregated_backlog_by_output_plugin_instance(
+ struct flb_output_instance *output_plugin,
+ struct flb_sb *context);
+
+static void sb_remove_chunk_from_segregated_backlog(struct cio_chunk *target_chunk,
+ struct sb_out_queue *backlog,
+ int destroy);
+
+static void sb_remove_chunk_from_segregated_backlogs(struct cio_chunk *chunk,
+ struct flb_sb *context);
+
+static int sb_append_chunk_to_segregated_backlog(struct cio_chunk *target_chunk,
+ struct cio_stream *stream,
+ size_t target_chunk_size,
+ struct sb_out_queue *backlog);
+
+static int sb_append_chunk_to_segregated_backlogs(struct cio_chunk *target_chunk,
+ struct cio_stream *stream,
+ struct flb_sb *context);
+
+int sb_segregate_chunks(struct flb_config *config);
+
+int sb_release_output_queue_space(struct flb_output_instance *output_plugin,
+ ssize_t *required_space);
+
+ssize_t sb_get_releasable_output_queue_space(struct flb_output_instance *output_plugin,
+ size_t required_space);
+
+
+static inline struct flb_sb *sb_get_context(struct flb_config *config)
+{
+ if (config == NULL) {
+ return NULL;
+ }
+
+ if (config->storage_input_plugin == NULL) {
+ return NULL;
+ }
+
+ return ((struct flb_input_instance *) config->storage_input_plugin)->context;
+}
+
+static struct sb_out_chunk *sb_allocate_chunk(struct cio_chunk *chunk,
+ struct cio_stream *stream,
+ size_t size)
+{
+ struct sb_out_chunk *result;
+
+ result = (struct sb_out_chunk *) flb_calloc(1, sizeof(struct sb_out_chunk));
+
+ if (result != NULL) {
+ result->chunk = chunk;
+ result->stream = stream;
+ result->size = size;
+ }
+
+ return result;
+}
+
+static void sb_destroy_chunk(struct sb_out_chunk *chunk)
+{
+ flb_free(chunk);
+}
+
+static void sb_destroy_backlog(struct sb_out_queue *backlog, struct flb_sb *context)
+{
+ struct mk_list *chunk_iterator_tmp;
+ struct mk_list *chunk_iterator;
+ struct sb_out_chunk *chunk;
+
+ mk_list_foreach_safe(chunk_iterator, chunk_iterator_tmp, &backlog->chunks) {
+ chunk = mk_list_entry(chunk_iterator, struct sb_out_chunk, _head);
+
+ sb_remove_chunk_from_segregated_backlogs(chunk->chunk, context);
+ }
+}
+
+static int sb_allocate_backlogs(struct flb_sb *context)
+{
+ struct mk_list *output_plugin_iterator;
+ struct flb_output_instance *output_plugin;
+ struct sb_out_queue *backlog;
+
+ mk_list_foreach(output_plugin_iterator, &context->ins->config->outputs) {
+ output_plugin = mk_list_entry(output_plugin_iterator,
+ struct flb_output_instance,
+ _head);
+
+ backlog = (struct sb_out_queue *) \
+ flb_calloc(1, sizeof(struct sb_out_queue));
+
+ if (backlog == NULL) {
+ sb_destroy_backlogs(context);
+
+ return -1;
+ }
+
+ backlog->ins = output_plugin;
+
+ mk_list_init(&backlog->chunks);
+
+ mk_list_add(&backlog->_head, &context->backlogs);
+ }
+
+ return 0;
+}
+
+static void sb_destroy_backlogs(struct flb_sb *context)
+{
+ struct mk_list *backlog_iterator_tmp;
+ struct mk_list *backlog_iterator;
+ struct sb_out_queue *backlog;
+
+ mk_list_foreach_safe(backlog_iterator, backlog_iterator_tmp, &context->backlogs) {
+ backlog = mk_list_entry(backlog_iterator, struct sb_out_queue, _head);
+
+ mk_list_del(&backlog->_head);
+
+ sb_destroy_backlog(backlog, context);
+
+ flb_free(backlog);
+ }
+}
+
+static struct sb_out_queue *sb_find_segregated_backlog_by_output_plugin_instance(
+ struct flb_output_instance *output_plugin,
+ struct flb_sb *context)
+{
+ struct mk_list *backlog_iterator;
+ struct sb_out_queue *backlog;
+
+ mk_list_foreach(backlog_iterator, &context->backlogs) {
+ backlog = mk_list_entry(backlog_iterator, struct sb_out_queue, _head);
+
+ if (output_plugin == backlog->ins) {
+ return backlog;
+ }
+ }
+
+ return NULL;
+}
+
+static void sb_remove_chunk_from_segregated_backlog(struct cio_chunk *target_chunk,
+ struct sb_out_queue *backlog,
+ int destroy)
+{
+ struct mk_list *chunk_iterator_tmp;
+ struct mk_list *chunk_iterator;
+ struct sb_out_chunk *chunk;
+
+ mk_list_foreach_safe(chunk_iterator, chunk_iterator_tmp, &backlog->chunks) {
+ chunk = mk_list_entry(chunk_iterator, struct sb_out_chunk, _head);
+
+ if (chunk->chunk == target_chunk) {
+ mk_list_del(&chunk->_head);
+
+ backlog->ins->fs_backlog_chunks_size -= cio_chunk_get_real_size(target_chunk);
+
+ if (destroy) {
+ sb_destroy_chunk(chunk);
+ }
+
+ break;
+ }
+ }
+}
+
+static void sb_remove_chunk_from_segregated_backlogs(struct cio_chunk *target_chunk,
+ struct flb_sb *context)
+{
+ struct mk_list *backlog_iterator;
+ struct sb_out_queue *backlog;
+
+ mk_list_foreach(backlog_iterator, &context->backlogs) {
+ backlog = mk_list_entry(backlog_iterator, struct sb_out_queue, _head);
+
+ sb_remove_chunk_from_segregated_backlog(target_chunk, backlog, FLB_TRUE);
+ }
+}
+
+static int sb_append_chunk_to_segregated_backlog(struct cio_chunk *target_chunk,
+ struct cio_stream *stream,
+ size_t target_chunk_size,
+ struct sb_out_queue *backlog)
+{
+ struct sb_out_chunk *chunk;
+
+ chunk = sb_allocate_chunk(target_chunk, stream, target_chunk_size);
+
+ if (chunk == NULL) {
+ flb_errno();
+ return -1;
+ }
+
+ mk_list_add(&chunk->_head, &backlog->chunks);
+
+ backlog->ins->fs_backlog_chunks_size += target_chunk_size;
+
+ return 0;
+}
+
+static int sb_append_chunk_to_segregated_backlogs(struct cio_chunk *target_chunk,
+ struct cio_stream *stream,
+ struct flb_sb *context)
+{
+ struct flb_input_chunk dummy_input_chunk;
+ struct mk_list *tmp;
+ struct mk_list *head;
+ size_t chunk_size;
+ struct sb_out_queue *backlog;
+ int tag_len;
+ const char * tag_buf;
+ int result;
+
+ memset(&dummy_input_chunk, 0, sizeof(struct flb_input_chunk));
+
+ dummy_input_chunk.in = context->ins;
+ dummy_input_chunk.chunk = target_chunk;
+
+ chunk_size = cio_chunk_get_real_size(target_chunk);
+
+ if (chunk_size < 0) {
+ flb_warn("[storage backlog] could not get real size of chunk %s/%s",
+ stream->name, target_chunk->name);
+ return -1;
+ }
+
+ result = flb_input_chunk_get_tag(&dummy_input_chunk, &tag_buf, &tag_len);
+ if (result == -1) {
+ flb_error("[storage backlog] could not retrieve chunk tag from %s/%s, "
+ "removing it from the queue",
+ stream->name, target_chunk->name);
+ return -2;
+ }
+
+ flb_routes_mask_set_by_tag(dummy_input_chunk.routes_mask, tag_buf, tag_len,
+ context->ins);
+
+ mk_list_foreach_safe(head, tmp, &context->backlogs) {
+ backlog = mk_list_entry(head, struct sb_out_queue, _head);
+ if (flb_routes_mask_get_bit(dummy_input_chunk.routes_mask,
+ backlog->ins->id)) {
+ result = sb_append_chunk_to_segregated_backlog(target_chunk, stream,
+ chunk_size, backlog);
+ if (result) {
+ return -3;
+ }
+ }
+ }
+
+ return 0;
+}
+
+int sb_segregate_chunks(struct flb_config *config)
+{
+ int ret;
+ size_t size;
+ struct mk_list *tmp;
+ struct mk_list *stream_iterator;
+ struct mk_list *chunk_iterator;
+ int chunk_error;
+ struct flb_sb *context;
+ struct cio_stream *stream;
+ struct cio_chunk *chunk;
+
+ context = sb_get_context(config);
+
+ if (context == NULL) {
+ return 0;
+ }
+
+ ret = sb_allocate_backlogs(context);
+ if (ret) {
+ return -2;
+ }
+
+ mk_list_foreach(stream_iterator, &context->cio->streams) {
+ stream = mk_list_entry(stream_iterator, struct cio_stream, _head);
+
+ mk_list_foreach_safe(chunk_iterator, tmp, &stream->chunks) {
+ chunk = mk_list_entry(chunk_iterator, struct cio_chunk, _head);
+
+ if (!cio_chunk_is_up(chunk)) {
+ ret = cio_chunk_up_force(chunk);
+ if (ret == CIO_CORRUPTED) {
+ if (config->storage_del_bad_chunks) {
+ chunk_error = cio_error_get(chunk);
+
+ if (chunk_error == CIO_ERR_BAD_FILE_SIZE ||
+ chunk_error == CIO_ERR_BAD_LAYOUT)
+ {
+ flb_plg_error(context->ins, "discarding irrecoverable chunk %s/%s", stream->name, chunk->name);
+
+ cio_chunk_close(chunk, CIO_TRUE);
+ }
+ }
+
+ continue;
+ }
+ }
+
+ if (!cio_chunk_is_up(chunk)) {
+ return -3;
+ }
+
+ /* try to segregate a chunk */
+ ret = sb_append_chunk_to_segregated_backlogs(chunk, stream, context);
+ if (ret) {
+ /*
+ * if the chunk could not be segregated, just remove it from the
+ * queue and continue.
+ *
+ * if content size is zero, it's safe to 'delete it'.
+ */
+ size = cio_chunk_get_content_size(chunk);
+ if (size <= 0) {
+ cio_chunk_close(chunk, CIO_TRUE);
+ }
+ else {
+ cio_chunk_close(chunk, CIO_FALSE);
+ }
+ continue;
+ }
+
+ /* lock the chunk */
+ flb_plg_info(context->ins, "register %s/%s", stream->name, chunk->name);
+
+ cio_chunk_lock(chunk);
+ cio_chunk_down(chunk);
+ }
+ }
+
+ return 0;
+}
+
+ssize_t sb_get_releasable_output_queue_space(struct flb_output_instance *output_plugin,
+ size_t required_space)
+{
+ ssize_t releasable_space;
+ struct mk_list *chunk_iterator;
+ struct flb_sb *context;
+ struct sb_out_queue *backlog;
+ struct sb_out_chunk *chunk;
+
+ context = sb_get_context(output_plugin->config);
+
+ if (context == NULL) {
+ return 0;
+ }
+
+ backlog = sb_find_segregated_backlog_by_output_plugin_instance(
+ output_plugin, context);
+
+ if (backlog == NULL) {
+ return 0;
+ }
+
+ releasable_space = 0;
+
+ mk_list_foreach(chunk_iterator, &backlog->chunks) {
+ chunk = mk_list_entry(chunk_iterator, struct sb_out_chunk, _head);
+
+ releasable_space += chunk->size;
+
+ if (releasable_space >= required_space) {
+ break;
+ }
+ }
+
+ return releasable_space;
+}
+
+int sb_release_output_queue_space(struct flb_output_instance *output_plugin,
+ ssize_t *required_space)
+{
+ struct mk_list *chunk_iterator_tmp;
+ struct cio_chunk *underlying_chunk;
+ struct mk_list *chunk_iterator;
+ size_t released_space;
+ struct flb_sb *context;
+ struct sb_out_queue *backlog;
+ struct sb_out_chunk *chunk;
+
+ context = sb_get_context(output_plugin->config);
+
+ if (context == NULL) {
+ return -1;
+ }
+
+ backlog = sb_find_segregated_backlog_by_output_plugin_instance(
+ output_plugin, context);
+
+ if (backlog == NULL) {
+ return -2;
+ }
+
+ released_space = 0;
+
+ mk_list_foreach_safe(chunk_iterator, chunk_iterator_tmp, &backlog->chunks) {
+ chunk = mk_list_entry(chunk_iterator, struct sb_out_chunk, _head);
+
+ released_space += chunk->size;
+ underlying_chunk = chunk->chunk;
+
+ sb_remove_chunk_from_segregated_backlogs(underlying_chunk, context);
+ cio_chunk_close(underlying_chunk, FLB_TRUE);
+
+ if (released_space >= *required_space) {
+ break;
+ }
+ }
+
+ *required_space -= released_space;
+
+ return 0;
+}
+
+/* Collection callback */
+static int cb_queue_chunks(struct flb_input_instance *in,
+ struct flb_config *config, void *data)
+{
+ size_t empty_output_queue_count;
+ struct mk_list *output_queue_iterator;
+ struct sb_out_queue *output_queue_instance;
+ struct sb_out_chunk *chunk_instance;
+ struct flb_sb *ctx;
+ struct flb_input_chunk *ic;
+ struct flb_input_chunk tmp_ic;
+ void *ch;
+ size_t total = 0;
+ ssize_t size;
+ int ret;
+ int event_type;
+
+ /* Get context */
+ ctx = (struct flb_sb *) data;
+
+ /* Get the total number of bytes already enqueued */
+ total = flb_input_chunk_total_size(in);
+
+ /* If we already hitted our limit, just wait and re-check later */
+ if (total >= ctx->mem_limit) {
+ return 0;
+ }
+
+ empty_output_queue_count = 0;
+
+ while (total < ctx->mem_limit &&
+ empty_output_queue_count < mk_list_size(&ctx->backlogs)) {
+ empty_output_queue_count = 0;
+
+ mk_list_foreach(output_queue_iterator, &ctx->backlogs) {
+ output_queue_instance = mk_list_entry(output_queue_iterator,
+ struct sb_out_queue,
+ _head);
+
+ if (mk_list_is_empty(&output_queue_instance->chunks) != 0) {
+ chunk_instance = mk_list_entry_first(&output_queue_instance->chunks,
+ struct sb_out_chunk,
+ _head);
+
+ /* Try to enqueue one chunk */
+ /*
+ * All chunks on this backlog are 'file' based, always try to set
+ * them up. We validate the status.
+ */
+ ret = cio_chunk_is_up(chunk_instance->chunk);
+
+ if (ret == CIO_FALSE) {
+ ret = cio_chunk_up_force(chunk_instance->chunk);
+
+ if (ret == CIO_CORRUPTED) {
+ flb_plg_error(ctx->ins, "removing corrupted chunk from the "
+ "queue %s:%s",
+ chunk_instance->stream->name, chunk_instance->chunk->name);
+ cio_chunk_close(chunk_instance->chunk, FLB_FALSE);
+ sb_remove_chunk_from_segregated_backlogs(chunk_instance->chunk, ctx);
+ /* This function will indirecly release chunk_instance so it has to be
+ * called last.
+ */
+ continue;
+ }
+ else if (ret == CIO_ERROR || ret == CIO_RETRY) {
+ continue;
+ }
+ }
+
+ /*
+ * Map the chunk file context into a temporary buffer since the
+ * flb_input_chunk_get_event_type() interface needs an
+ * struct fb_input_chunk argument.
+ */
+ tmp_ic.chunk = chunk_instance->chunk;
+
+ /* Retrieve the event type: FLB_INPUT_LOGS, FLB_INPUT_METRICS of FLB_INPUT_TRACES */
+ ret = flb_input_chunk_get_event_type(&tmp_ic);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "removing chunk with wrong metadata "
+ "from the queue %s:%s",
+ chunk_instance->stream->name,
+ chunk_instance->chunk->name);
+ cio_chunk_close(chunk_instance->chunk, FLB_TRUE);
+ sb_remove_chunk_from_segregated_backlogs(chunk_instance->chunk,
+ ctx);
+ continue;
+ }
+ event_type = ret;
+
+ /* get the number of bytes being used by the chunk */
+ size = cio_chunk_get_content_size(chunk_instance->chunk);
+ if (size <= 0) {
+ flb_plg_error(ctx->ins, "removing empty chunk from the "
+ "queue %s:%s",
+ chunk_instance->stream->name, chunk_instance->chunk->name);
+ cio_chunk_close(chunk_instance->chunk, FLB_TRUE);
+ sb_remove_chunk_from_segregated_backlogs(chunk_instance->chunk, ctx);
+ /* This function will indirecly release chunk_instance so it has to be
+ * called last.
+ */
+ continue;
+ }
+
+ ch = chunk_instance->chunk;
+
+ /* Associate this backlog chunk to this instance into the engine */
+ ic = flb_input_chunk_map(in, event_type, ch);
+ if (!ic) {
+ flb_plg_error(ctx->ins, "removing chunk %s:%s from the queue",
+ chunk_instance->stream->name, chunk_instance->chunk->name);
+ cio_chunk_down(chunk_instance->chunk);
+
+ /*
+ * If the file cannot be mapped, just drop it. Failures are all
+ * associated with data corruption.
+ */
+ cio_chunk_close(chunk_instance->chunk, FLB_TRUE);
+ sb_remove_chunk_from_segregated_backlogs(chunk_instance->chunk, ctx);
+ /* This function will indirecly release chunk_instance so it has to be
+ * called last.
+ */
+ continue;
+ }
+
+ flb_plg_info(ctx->ins, "queueing %s:%s",
+ chunk_instance->stream->name, chunk_instance->chunk->name);
+
+ /* We are removing this chunk reference from this specific backlog
+ * queue but we need to leave it in the remainder queues.
+ */
+ sb_remove_chunk_from_segregated_backlogs(chunk_instance->chunk, ctx);
+ cio_chunk_down(ch);
+
+ /* check our limits */
+ total += size;
+ }
+ else {
+ empty_output_queue_count++;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/* Initialize plugin */
+static int cb_sb_init(struct flb_input_instance *in,
+ struct flb_config *config, void *data)
+{
+ int ret;
+ char mem[32];
+ struct flb_sb *ctx;
+
+ ctx = flb_malloc(sizeof(struct flb_sb));
+ if (!ctx) {
+ flb_errno();
+ return -1;
+ }
+
+ ctx->cio = data;
+ ctx->ins = in;
+ ctx->mem_limit = flb_utils_size_to_bytes(config->storage_bl_mem_limit);
+
+ mk_list_init(&ctx->backlogs);
+
+ flb_utils_bytes_to_human_readable_size(ctx->mem_limit, mem, sizeof(mem) - 1);
+ flb_plg_info(ctx->ins, "queue memory limit: %s", mem);
+
+ /* export plugin context */
+ flb_input_set_context(in, ctx);
+
+ /* Set a collector to trigger the callback to queue data every second */
+ ret = flb_input_set_collector_time(in, cb_queue_chunks, 1, 0, config);
+ if (ret < 0) {
+ flb_plg_error(ctx->ins, "could not create collector");
+ flb_free(ctx);
+ return -1;
+ }
+ ctx->coll_fd = ret;
+
+ return 0;
+}
+
+static void cb_sb_pause(void *data, struct flb_config *config)
+{
+ struct flb_sb *ctx = data;
+ flb_input_collector_pause(ctx->coll_fd, ctx->ins);
+}
+
+static void cb_sb_resume(void *data, struct flb_config *config)
+{
+ struct flb_sb *ctx = data;
+ flb_input_collector_resume(ctx->coll_fd, ctx->ins);
+}
+
+static int cb_sb_exit(void *data, struct flb_config *config)
+{
+ struct flb_sb *ctx = data;
+
+ flb_input_collector_pause(ctx->coll_fd, ctx->ins);
+
+ sb_destroy_backlogs(ctx);
+
+ flb_free(ctx);
+
+ return 0;
+}
+
+/* Plugin reference */
+struct flb_input_plugin in_storage_backlog_plugin = {
+ .name = "storage_backlog",
+ .description = "Storage Backlog",
+ .cb_init = cb_sb_init,
+ .cb_pre_run = NULL,
+ .cb_collect = NULL,
+ .cb_ingest = NULL,
+ .cb_flush_buf = NULL,
+ .cb_pause = cb_sb_pause,
+ .cb_resume = cb_sb_resume,
+ .cb_exit = cb_sb_exit,
+
+ /* This plugin can only be configured and invoked by the Engine */
+ .flags = FLB_INPUT_PRIVATE
+};
diff --git a/src/fluent-bit/plugins/in_stream_processor/CMakeLists.txt b/src/fluent-bit/plugins/in_stream_processor/CMakeLists.txt
new file mode 100644
index 000000000..fa6a4b408
--- /dev/null
+++ b/src/fluent-bit/plugins/in_stream_processor/CMakeLists.txt
@@ -0,0 +1,5 @@
+set(src
+ sp.c
+ )
+
+FLB_PLUGIN(in_stream_processor "${src}" "")
diff --git a/src/fluent-bit/plugins/in_stream_processor/sp.c b/src/fluent-bit/plugins/in_stream_processor/sp.c
new file mode 100644
index 000000000..d4056a94c
--- /dev/null
+++ b/src/fluent-bit/plugins/in_stream_processor/sp.c
@@ -0,0 +1,173 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_sds.h>
+
+struct sp_chunk {
+ char *buf_data;
+ size_t buf_size;
+ struct mk_list _head;
+};
+
+struct sp_ctx {
+ int coll_fd; /* collector file descriptor to flush queue */
+ flb_sds_t tag; /* outgoing Tag name */
+ struct mk_list chunks; /* linked list with data chunks to ingest */
+ struct flb_input_instance *ins;
+};
+
+/*
+ * This 'special' function is used by the Stream Processor engine to register
+ * data results of a query that needs to be ingested into the main pipeline.
+ *
+ * We usually don't do this in a plugin but for simplicity and to avoid
+ * extra memory-copies we just expose this function for direct use.
+ */
+int in_stream_processor_add_chunk(char *buf_data, size_t buf_size,
+ struct flb_input_instance *ins)
+{
+ struct sp_chunk *chunk;
+ struct sp_ctx *ctx = (struct sp_ctx *) ins->context;
+
+ chunk = flb_malloc(sizeof(struct sp_chunk));
+ if (!chunk) {
+ flb_errno();
+ return -1;
+ }
+
+ chunk->buf_data = buf_data;
+ chunk->buf_size = buf_size;
+ mk_list_add(&chunk->_head, &ctx->chunks);
+
+ return 0;
+}
+
+/* Callback used to queue pending data chunks */
+static int cb_chunks_append(struct flb_input_instance *in,
+ struct flb_config *config, void *in_context)
+{
+ struct mk_list *tmp;
+ struct mk_list *head;
+ struct sp_chunk *chunk;
+ struct sp_ctx *ctx = in_context;
+ (void) config;
+
+ mk_list_foreach_safe(head, tmp, &ctx->chunks) {
+ chunk = mk_list_entry(head, struct sp_chunk, _head);
+ flb_input_log_append(in,
+ ctx->tag, flb_sds_len(ctx->tag),
+ chunk->buf_data, chunk->buf_size);
+ flb_free(chunk->buf_data);
+ mk_list_del(&chunk->_head);
+ flb_free(chunk);
+ }
+ return 0;
+}
+
+/* Initialize plugin */
+static int cb_sp_init(struct flb_input_instance *in,
+ struct flb_config *config, void *data)
+{
+ int ret;
+ struct sp_ctx *ctx;
+
+ /* Create plugin instance context */
+ ctx = flb_malloc(sizeof(struct sp_ctx));
+ if (!ctx) {
+ flb_errno();
+ return -1;
+ }
+ ctx->ins = in;
+ mk_list_init(&ctx->chunks);
+
+ /* Register context */
+ flb_input_set_context(in, ctx);
+
+ /*
+ * Configure the outgoing tag: when registering records into the Engine
+ * we need to specify a Tag, if we got the default name
+ * stream_processor.N, just override it using the Alias set by the
+ * Stream Processor interface. Otherwise if the Tag is different use
+ * that one.
+ */
+ if (strncmp(in->tag, "stream_processor.", 17) == 0) {
+ ctx->tag = flb_sds_create(in->alias);
+ }
+ else {
+ ctx->tag = flb_sds_create(in->tag);
+ }
+
+ /* Set our collector based on time, queue chunks every 0.5 sec */
+ ret = flb_input_set_collector_time(in,
+ cb_chunks_append,
+ 0,
+ 500000000,
+ config);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "Could not set collector");
+ return -1;
+ }
+ ctx->coll_fd = ret;
+
+ return 0;
+}
+
+static void cb_sp_pause(void *data, struct flb_config *config)
+{
+ struct sp_ctx *ctx = data;
+
+ flb_input_collector_pause(ctx->coll_fd, ctx->ins);
+}
+
+static void cb_sp_resume(void *data, struct flb_config *config)
+{
+ struct sp_ctx *ctx = data;
+
+ flb_input_collector_resume(ctx->coll_fd, ctx->ins);
+}
+
+static int cb_sp_exit(void *data, struct flb_config *config)
+{
+ struct sp_ctx *ctx = data;
+
+ /* Upon exit, put in the queue all pending chunks */
+ cb_chunks_append(ctx->ins, config, ctx);
+ flb_sds_destroy(ctx->tag);
+ flb_free(ctx);
+
+ return 0;
+}
+
+/* Plugin reference */
+struct flb_input_plugin in_stream_processor_plugin = {
+ .name = "stream_processor",
+ .description = "Stream Processor",
+ .cb_init = cb_sp_init,
+ .cb_pre_run = NULL,
+ .cb_collect = NULL,
+ .cb_ingest = NULL,
+ .cb_flush_buf = NULL,
+ .cb_pause = cb_sp_pause,
+ .cb_resume = cb_sp_resume,
+ .cb_exit = cb_sp_exit,
+
+ /* This plugin can only be configured and invoked by the Stream Processor */
+ .flags = FLB_INPUT_PRIVATE | FLB_INPUT_NOTAG
+};
diff --git a/src/fluent-bit/plugins/in_syslog/CMakeLists.txt b/src/fluent-bit/plugins/in_syslog/CMakeLists.txt
new file mode 100644
index 000000000..88f698b12
--- /dev/null
+++ b/src/fluent-bit/plugins/in_syslog/CMakeLists.txt
@@ -0,0 +1,8 @@
+set(src
+ syslog_conf.c
+ syslog_server.c
+ syslog_conn.c
+ syslog_prot.c
+ syslog.c)
+
+FLB_PLUGIN(in_syslog "${src}" "")
diff --git a/src/fluent-bit/plugins/in_syslog/syslog.c b/src/fluent-bit/plugins/in_syslog/syslog.c
new file mode 100644
index 000000000..d478dfc3f
--- /dev/null
+++ b/src/fluent-bit/plugins/in_syslog/syslog.c
@@ -0,0 +1,263 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+#include <msgpack.h>
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_downstream.h>
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_error.h>
+#include <fluent-bit/flb_utils.h>
+
+#include "syslog.h"
+#include "syslog_conf.h"
+#include "syslog_server.h"
+#include "syslog_conn.h"
+#include "syslog_prot.h"
+
+/* cb_collect callback */
+static int in_syslog_collect_tcp(struct flb_input_instance *i_ins,
+ struct flb_config *config, void *in_context)
+{
+ struct flb_connection *connection;
+ struct syslog_conn *conn;
+ struct flb_syslog *ctx;
+
+ (void) i_ins;
+
+ ctx = in_context;
+
+ connection = flb_downstream_conn_get(ctx->downstream);
+
+ if (connection == NULL) {
+ flb_plg_error(ctx->ins, "could not accept new connection");
+
+ return -1;
+ }
+
+ if (ctx->dgram_mode_flag) {
+ return syslog_dgram_conn_event(connection);
+ }
+ else {
+ flb_plg_trace(ctx->ins, "new Unix connection arrived FD=%i", connection->fd);
+
+ conn = syslog_conn_add(connection, ctx);
+
+ if (conn == NULL) {
+ flb_plg_error(ctx->ins, "could not accept new connection");
+
+ flb_downstream_conn_release(connection);
+
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Collect a datagram, per Syslog specification a datagram contains only
+ * one syslog message and it should not exceed 1KB.
+ */
+static int in_syslog_collect_udp(struct flb_input_instance *i_ins,
+ struct flb_config *config,
+ void *in_context)
+{
+ struct flb_syslog *ctx;
+
+ (void) i_ins;
+
+ ctx = in_context;
+
+ return syslog_dgram_conn_event(ctx->dummy_conn->connection);
+}
+
+/* Initialize plugin */
+static int in_syslog_init(struct flb_input_instance *in,
+ struct flb_config *config, void *data)
+{
+ int ret;
+ struct flb_syslog *ctx;
+ struct flb_connection *connection;
+
+ /* Allocate space for the configuration */
+ ctx = syslog_conf_create(in, config);
+ if (!ctx) {
+ flb_plg_error(in, "could not initialize plugin");
+ return -1;
+ }
+ ctx->collector_id = -1;
+
+ if ((ctx->mode == FLB_SYSLOG_UNIX_TCP || ctx->mode == FLB_SYSLOG_UNIX_UDP)
+ && !ctx->unix_path) {
+ flb_plg_error(ctx->ins, "Unix path not defined");
+ syslog_conf_destroy(ctx);
+ return -1;
+ }
+
+ /* Create Unix Socket */
+ ret = syslog_server_create(ctx);
+ if (ret == -1) {
+ syslog_conf_destroy(ctx);
+ return -1;
+ }
+
+ flb_input_downstream_set(ctx->downstream, ctx->ins);
+
+ if (ctx->dgram_mode_flag) {
+ connection = flb_downstream_conn_get(ctx->downstream);
+
+ if (connection == NULL) {
+ flb_plg_error(ctx->ins, "could not get DGRAM server dummy "
+ "connection");
+
+ syslog_conf_destroy(ctx);
+
+ return -1;
+ }
+
+ ctx->dummy_conn = syslog_conn_add(connection, ctx);
+
+ if (ctx->dummy_conn == NULL) {
+ flb_plg_error(ctx->ins, "could not track DGRAM server dummy "
+ "connection");
+
+ syslog_conf_destroy(ctx);
+
+ return -1;
+ }
+ }
+
+ /* Set context */
+ flb_input_set_context(in, ctx);
+
+ /* Collect events for every opened connection to our socket */
+ if (ctx->mode == FLB_SYSLOG_UNIX_TCP ||
+ ctx->mode == FLB_SYSLOG_TCP) {
+ ret = flb_input_set_collector_socket(in,
+ in_syslog_collect_tcp,
+ ctx->downstream->server_fd,
+ config);
+ }
+ else {
+ ret = flb_input_set_collector_socket(in,
+ in_syslog_collect_udp,
+ ctx->downstream->server_fd,
+ config);
+ }
+
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "Could not set collector");
+ syslog_conf_destroy(ctx);
+
+ return -1;
+ }
+
+ ctx->collector_id = ret;
+ ctx->collector_event = flb_input_collector_get_event(ret, in);
+
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "Could not get collector event");
+ syslog_conf_destroy(ctx);
+
+ return -1;
+ }
+
+ return 0;
+}
+
+static int in_syslog_exit(void *data, struct flb_config *config)
+{
+ struct flb_syslog *ctx = data;
+ (void) config;
+
+ syslog_conn_exit(ctx);
+ syslog_conf_destroy(ctx);
+
+ return 0;
+}
+
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_STR, "mode", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_syslog, mode_str),
+ "Set the socket mode: unix_tcp, unix_udp, tcp or udp"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "path", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_syslog, unix_path),
+ "Set the path for the UNIX socket"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "unix_perm", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_syslog, unix_perm_str),
+ "Set the permissions for the UNIX socket"
+ },
+ {
+ FLB_CONFIG_MAP_SIZE, "buffer_chunk_size", FLB_SYSLOG_CHUNK,
+ 0, FLB_TRUE, offsetof(struct flb_syslog, buffer_chunk_size),
+ "Set the buffer chunk size"
+ },
+ {
+ FLB_CONFIG_MAP_SIZE, "buffer_max_size", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_syslog, buffer_max_size),
+ "Set the buffer chunk size"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "parser", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_syslog, parser_name),
+ "Set the parser"
+ },
+ {
+ FLB_CONFIG_MAP_SIZE, "receive_buffer_size", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_syslog, receive_buffer_size),
+ "Set the socket receiving buffer size"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "raw_message_key", (char *) NULL,
+ 0, FLB_TRUE, offsetof(struct flb_syslog, raw_message_key),
+ "Key where the raw message will be preserved"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "source_address_key", (char *) NULL,
+ 0, FLB_TRUE, offsetof(struct flb_syslog, source_address_key),
+ "Key where the source address will be injected"
+ },
+
+
+ /* EOF */
+ {0}
+};
+
+struct flb_input_plugin in_syslog_plugin = {
+ .name = "syslog",
+ .description = "Syslog",
+ .cb_init = in_syslog_init,
+ .cb_pre_run = NULL,
+ .cb_collect = NULL,
+ .cb_flush_buf = NULL,
+ .cb_exit = in_syslog_exit,
+ .config_map = config_map,
+ .flags = FLB_INPUT_NET_SERVER | FLB_IO_OPT_TLS
+};
diff --git a/src/fluent-bit/plugins/in_syslog/syslog.h b/src/fluent-bit/plugins/in_syslog/syslog.h
new file mode 100644
index 000000000..6da2fbd83
--- /dev/null
+++ b/src/fluent-bit/plugins/in_syslog/syslog.h
@@ -0,0 +1,82 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_SYSLOG_H
+#define FLB_IN_SYSLOG_H
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_input.h>
+#include <fluent-bit/flb_log_event_encoder.h>
+
+/* Syslog modes */
+#define FLB_SYSLOG_UNIX_TCP 1
+#define FLB_SYSLOG_UNIX_UDP 2
+#define FLB_SYSLOG_TCP 3
+#define FLB_SYSLOG_UDP 4
+
+/* 32KB chunk size */
+#define FLB_SYSLOG_CHUNK "32768"
+
+struct syslog_conn;
+
+/* Context / Config*/
+struct flb_syslog {
+ /* Listening mode: unix udp, unix tcp or normal tcp */
+ flb_sds_t mode_str;
+ int mode;
+
+ /* Network mode */
+ char *listen;
+ char *port;
+
+ /* Unix socket (UDP/TCP)*/
+ int server_fd;
+ flb_sds_t unix_path;
+ flb_sds_t unix_perm_str;
+ unsigned int unix_perm;
+ size_t receive_buffer_size;
+
+ /* UDP buffer, data length and buffer size */
+ // char *buffer_data;
+ // size_t buffer_len;
+ // size_t buffer_size;
+
+ /* Buffers setup */
+ size_t buffer_max_size;
+ size_t buffer_chunk_size;
+
+ /* Configuration */
+ flb_sds_t parser_name;
+ struct flb_parser *parser;
+ flb_sds_t raw_message_key;
+ flb_sds_t source_address_key;
+
+ int dgram_mode_flag;
+ int collector_id;
+ struct mk_event *collector_event;
+ struct flb_downstream *downstream;
+ struct syslog_conn *dummy_conn;
+
+ /* List for connections and event loop */
+ struct mk_list connections;
+ struct flb_input_instance *ins;
+ struct flb_log_event_encoder *log_encoder;
+};
+
+#endif
diff --git a/src/fluent-bit/plugins/in_syslog/syslog_conf.c b/src/fluent-bit/plugins/in_syslog/syslog_conf.c
new file mode 100644
index 000000000..4db3f1626
--- /dev/null
+++ b/src/fluent-bit/plugins/in_syslog/syslog_conf.c
@@ -0,0 +1,193 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_input.h>
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_str.h>
+#include <fluent-bit/flb_log.h>
+#include <fluent-bit/flb_parser.h>
+#include <fluent-bit/flb_utils.h>
+
+#include "syslog.h"
+#include "syslog_server.h"
+#include "syslog_conf.h"
+
+struct flb_syslog *syslog_conf_create(struct flb_input_instance *ins,
+ struct flb_config *config)
+{
+ int ret;
+ char port[16];
+ struct flb_syslog *ctx;
+
+ ctx = flb_calloc(1, sizeof(struct flb_syslog));
+
+ if (ctx == NULL) {
+ flb_errno();
+
+ return NULL;
+ }
+
+ ctx->ins = ins;
+
+ ctx->log_encoder = flb_log_event_encoder_create(FLB_LOG_EVENT_FORMAT_DEFAULT);
+
+ if (ctx->log_encoder == NULL) {
+ flb_plg_error(ins, "could not initialize event encoder");
+ syslog_conf_destroy(ctx);
+
+ return NULL;
+ }
+
+ mk_list_init(&ctx->connections);
+
+ ret = flb_input_config_map_set(ins, (void *)ctx);
+ if (ret == -1) {
+ flb_log_event_encoder_destroy(ctx->log_encoder);
+
+ flb_plg_error(ins, "unable to load configuration");
+ flb_free(ctx);
+
+ return NULL;
+ }
+
+ /* Syslog mode: unix_udp, unix_tcp, tcp or udp */
+ if (ctx->mode_str) {
+#ifdef FLB_SYSTEM_WINDOWS
+ if (strcasestr(ctx->mode_str, "unix") != NULL) {
+ flb_log_event_encoder_destroy(ctx->log_encoder);
+
+ flb_plg_error(ins, "unix sockets are note available in windows");
+ flb_free(ctx);
+
+ return NULL;
+ }
+
+#undef FLB_SYSLOG_UNIX_UDP
+#define FLB_SYSLOG_UNIX_UDP FLB_SYSLOG_UDP
+#endif
+ if (strcasecmp(ctx->mode_str, "unix_tcp") == 0) {
+ ctx->mode = FLB_SYSLOG_UNIX_TCP;
+ }
+ else if (strcasecmp(ctx->mode_str, "unix_udp") == 0) {
+ ctx->mode = FLB_SYSLOG_UNIX_UDP;
+ }
+ else if (strcasecmp(ctx->mode_str, "tcp") == 0) {
+ ctx->mode = FLB_SYSLOG_TCP;
+ }
+ else if (strcasecmp(ctx->mode_str, "udp") == 0) {
+ ctx->mode = FLB_SYSLOG_UDP;
+ }
+ else {
+ flb_log_event_encoder_destroy(ctx->log_encoder);
+
+ flb_error("[in_syslog] Unknown syslog mode %s", ctx->mode_str);
+ flb_free(ctx);
+ return NULL;
+ }
+ }
+ else {
+ ctx->mode = FLB_SYSLOG_UNIX_UDP;
+ }
+
+ /* Check if TCP mode was requested */
+ if (ctx->mode == FLB_SYSLOG_TCP || ctx->mode == FLB_SYSLOG_UDP) {
+ /* Listen interface (if not set, defaults to 0.0.0.0:5140) */
+ flb_input_net_default_listener("0.0.0.0", 5140, ins);
+ ctx->listen = ins->host.listen;
+ snprintf(port, sizeof(port) - 1, "%d", ins->host.port);
+ ctx->port = flb_strdup(port);
+ }
+
+ /* Unix socket path and permission */
+ if (ctx->mode == FLB_SYSLOG_UNIX_UDP || ctx->mode == FLB_SYSLOG_UNIX_TCP) {
+ if (ctx->unix_perm_str) {
+ ctx->unix_perm = strtol(ctx->unix_perm_str, NULL, 8) & 07777;
+ } else {
+ ctx->unix_perm = 0644;
+ }
+ }
+
+ /* Buffer Chunk Size */
+ if (ctx->buffer_chunk_size == -1) {
+ flb_log_event_encoder_destroy(ctx->log_encoder);
+
+ flb_plg_error(ins, "invalid buffer_chunk_size");
+ flb_free(ctx);
+ return NULL;
+ }
+
+ /* Buffer Max Size */
+ if (ctx->buffer_max_size == -1) {
+ flb_log_event_encoder_destroy(ctx->log_encoder);
+
+ flb_plg_error(ins, "invalid buffer_max_size");
+ flb_free(ctx);
+ return NULL;
+ }
+ else if (ctx->buffer_max_size == 0) {
+ ctx->buffer_max_size = ctx->buffer_chunk_size;
+ }
+
+ /* Socket rcv buffer size */
+ if (ctx->receive_buffer_size == -1 || ctx->receive_buffer_size>INT_MAX) {
+ flb_log_event_encoder_destroy(ctx->log_encoder);
+
+ flb_plg_error(ins, "invalid receive_buffer_size");
+ flb_free(ctx);
+ return NULL;
+ }
+
+ /* Parser */
+ if (ctx->parser_name) {
+ ctx->parser = flb_parser_get(ctx->parser_name, config);
+ }
+ else {
+ if (ctx->mode == FLB_SYSLOG_TCP || ctx->mode == FLB_SYSLOG_UDP) {
+ ctx->parser = flb_parser_get("syslog-rfc5424", config);
+ }
+ else {
+ ctx->parser = flb_parser_get("syslog-rfc3164-local", config);
+ }
+ }
+
+ if (!ctx->parser) {
+ flb_log_event_encoder_destroy(ctx->log_encoder);
+
+ flb_error("[in_syslog] parser not set");
+ syslog_conf_destroy(ctx);
+ return NULL;
+ }
+
+ return ctx;
+}
+
+int syslog_conf_destroy(struct flb_syslog *ctx)
+{
+ if (ctx->log_encoder != NULL) {
+ flb_log_event_encoder_destroy(ctx->log_encoder);
+ }
+
+ syslog_server_destroy(ctx);
+
+ flb_free(ctx);
+
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/in_syslog/syslog_conf.h b/src/fluent-bit/plugins/in_syslog/syslog_conf.h
new file mode 100644
index 000000000..ac2304031
--- /dev/null
+++ b/src/fluent-bit/plugins/in_syslog/syslog_conf.h
@@ -0,0 +1,32 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_SYSLOG_CONF_H
+#define FLB_IN_SYSLOG_CONF_H
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_input.h>
+
+#include "syslog.h"
+
+struct flb_syslog *syslog_conf_create(struct flb_input_instance *i_ins,
+ struct flb_config *config);
+int syslog_conf_destroy(struct flb_syslog *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/in_syslog/syslog_conn.c b/src/fluent-bit/plugins/in_syslog/syslog_conn.c
new file mode 100644
index 000000000..8785c1e86
--- /dev/null
+++ b/src/fluent-bit/plugins/in_syslog/syslog_conn.c
@@ -0,0 +1,247 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_engine.h>
+#include <fluent-bit/flb_network.h>
+#include <fluent-bit/flb_downstream.h>
+
+#include "syslog.h"
+#include "syslog_conf.h"
+#include "syslog_conn.h"
+#include "syslog_prot.h"
+
+/* Callback invoked every time an event is triggered for a connection */
+int syslog_conn_event(void *data)
+{
+ struct flb_connection *connection;
+ struct syslog_conn *conn;
+ struct flb_syslog *ctx;
+
+ connection = (struct flb_connection *) data;
+
+ conn = connection->user_data;
+
+ ctx = conn->ctx;
+
+ if (ctx->dgram_mode_flag) {
+ return syslog_dgram_conn_event(data);
+ }
+
+ return syslog_stream_conn_event(data);
+}
+
+int syslog_stream_conn_event(void *data)
+{
+ int ret;
+ int bytes;
+ int available;
+ size_t size;
+ char *tmp;
+ struct mk_event *event;
+ struct syslog_conn *conn;
+ struct flb_syslog *ctx;
+ struct flb_connection *connection;
+
+ connection = (struct flb_connection *) data;
+
+ conn = connection->user_data;
+
+ ctx = conn->ctx;
+
+ event = &connection->event;
+
+ if (event->mask & MK_EVENT_READ) {
+ available = (conn->buf_size - conn->buf_len) - 1;
+ if (available < 1) {
+ if (conn->buf_size + ctx->buffer_chunk_size > ctx->buffer_max_size) {
+ flb_plg_debug(ctx->ins,
+ "fd=%i incoming data exceed limit (%zd bytes)",
+ event->fd, (ctx->buffer_max_size));
+ syslog_conn_del(conn);
+ return -1;
+ }
+
+ size = conn->buf_size + ctx->buffer_chunk_size;
+ tmp = flb_realloc(conn->buf_data, size);
+ if (!tmp) {
+ flb_errno();
+ return -1;
+ }
+ flb_plg_trace(ctx->ins, "fd=%i buffer realloc %zd -> %zd",
+ event->fd, conn->buf_size, size);
+
+ conn->buf_data = tmp;
+ conn->buf_size = size;
+ available = (conn->buf_size - conn->buf_len) - 1;
+ }
+
+ bytes = flb_io_net_read(connection,
+ (void *) &conn->buf_data[conn->buf_len],
+ available);
+
+ if (bytes > 0) {
+ flb_plg_trace(ctx->ins, "read()=%i pre_len=%zu now_len=%zu",
+ bytes, conn->buf_len, conn->buf_len + bytes);
+ conn->buf_len += bytes;
+ conn->buf_data[conn->buf_len] = '\0';
+ ret = syslog_prot_process(conn);
+ if (ret == -1) {
+ return -1;
+ }
+ return bytes;
+ }
+ else {
+ flb_plg_trace(ctx->ins, "fd=%i closed connection", event->fd);
+ syslog_conn_del(conn);
+ return -1;
+ }
+ }
+
+ if (event->mask & MK_EVENT_CLOSE) {
+ flb_plg_trace(ctx->ins, "fd=%i hangup", event->fd);
+ syslog_conn_del(conn);
+ return -1;
+ }
+ return 0;
+}
+
+int syslog_dgram_conn_event(void *data)
+{
+ struct flb_connection *connection;
+ int bytes;
+ struct syslog_conn *conn;
+
+ connection = (struct flb_connection *) data;
+
+ conn = connection->user_data;
+
+ bytes = flb_io_net_read(connection,
+ (void *) &conn->buf_data[conn->buf_len],
+ conn->buf_size - 1);
+
+ if (bytes > 0) {
+ conn->buf_data[bytes] = '\0';
+ conn->buf_len = bytes;
+
+ syslog_prot_process_udp(conn);
+ }
+ else {
+ flb_errno();
+ }
+
+ conn->buf_len = 0;
+
+ return 0;
+}
+
+/* Create a new mqtt request instance */
+struct syslog_conn *syslog_conn_add(struct flb_connection *connection,
+ struct flb_syslog *ctx)
+{
+ int ret;
+ struct syslog_conn *conn;
+
+ conn = flb_malloc(sizeof(struct syslog_conn));
+ if (!conn) {
+ return NULL;
+ }
+
+ conn->connection = connection;
+
+ /* Set data for the event-loop */
+ MK_EVENT_NEW(&connection->event);
+
+ connection->user_data = conn;
+ connection->event.type = FLB_ENGINE_EV_CUSTOM;
+ connection->event.handler = syslog_conn_event;
+
+ /* Connection info */
+ conn->ctx = ctx;
+ conn->ins = ctx->ins;
+ conn->buf_len = 0;
+ conn->buf_parsed = 0;
+
+ /* Allocate read buffer */
+ conn->buf_data = flb_malloc(ctx->buffer_chunk_size);
+ if (!conn->buf_data) {
+ flb_errno();
+
+ flb_free(conn);
+
+ return NULL;
+ }
+ conn->buf_size = ctx->buffer_chunk_size;
+
+ /* Register instance into the event loop if we're in
+ * stream mode (UDP events are received through the collector)
+ */
+ if (!ctx->dgram_mode_flag) {
+ ret = mk_event_add(flb_engine_evl_get(),
+ connection->fd,
+ FLB_ENGINE_EV_CUSTOM,
+ MK_EVENT_READ,
+ &connection->event);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "could not register new connection");
+
+ flb_free(conn->buf_data);
+ flb_free(conn);
+
+ return NULL;
+ }
+ }
+
+ mk_list_add(&conn->_head, &ctx->connections);
+
+ return conn;
+}
+
+int syslog_conn_del(struct syslog_conn *conn)
+{
+ /* The downstream unregisters the file descriptor from the event-loop
+ * so there's nothing to be done by the plugin
+ */
+ if (!conn->ctx->dgram_mode_flag) {
+ flb_downstream_conn_release(conn->connection);
+ }
+
+ /* Release resources */
+ mk_list_del(&conn->_head);
+
+ flb_free(conn->buf_data);
+ flb_free(conn);
+
+ return 0;
+}
+
+int syslog_conn_exit(struct flb_syslog *ctx)
+{
+ struct mk_list *tmp;
+ struct mk_list *head;
+ struct syslog_conn *conn;
+
+ mk_list_foreach_safe(head, tmp, &ctx->connections) {
+ conn = mk_list_entry(head, struct syslog_conn, _head);
+ syslog_conn_del(conn);
+ }
+
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/in_syslog/syslog_conn.h b/src/fluent-bit/plugins/in_syslog/syslog_conn.h
new file mode 100644
index 000000000..684d3f9a4
--- /dev/null
+++ b/src/fluent-bit/plugins/in_syslog/syslog_conn.h
@@ -0,0 +1,53 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_SYSLOG_CONN_H
+#define FLB_IN_SYSLOG_CONN_H
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_connection.h>
+
+#include "syslog.h"
+
+/* Respresents a connection */
+struct syslog_conn {
+ int status; /* Connection status */
+
+ /* Buffer */
+ char *buf_data; /* Buffer data */
+ size_t buf_size; /* Buffer size */
+ size_t buf_len; /* Buffer length */
+ size_t buf_parsed; /* Parsed buffer (offset) */
+ struct flb_input_instance *ins; /* Parent plugin instance */
+ struct flb_syslog *ctx; /* Plugin configuration context */
+ struct flb_connection *connection;
+
+ struct mk_list _head;
+};
+
+int syslog_conn_event(void *data);
+int syslog_stream_conn_event(void *data);
+int syslog_dgram_conn_event(void *data);
+struct syslog_conn *syslog_conn_add(struct flb_connection *connection,
+ struct flb_syslog *ctx);
+int syslog_conn_del(struct syslog_conn *conn);
+int syslog_conn_exit(struct flb_syslog *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/in_syslog/syslog_prot.c b/src/fluent-bit/plugins/in_syslog/syslog_prot.c
new file mode 100644
index 000000000..1ec2c97cd
--- /dev/null
+++ b/src/fluent-bit/plugins/in_syslog/syslog_prot.c
@@ -0,0 +1,324 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_parser.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_pack.h>
+
+#include "syslog.h"
+#include "syslog_conn.h"
+#include "syslog_prot.h"
+
+#include <string.h>
+
+static inline void consume_bytes(char *buf, int bytes, int length)
+{
+ memmove(buf, buf + bytes, length - bytes);
+}
+
+static int append_message_to_record_data(char **result_buffer,
+ size_t *result_size,
+ flb_sds_t message_key_name,
+ char *base_object_buffer,
+ size_t base_object_size,
+ char *message_buffer,
+ size_t message_size,
+ int message_type)
+{
+ int result = FLB_MAP_NOT_MODIFIED;
+ char *modified_data_buffer;
+ int modified_data_size;
+ msgpack_object_kv *new_map_entries[1];
+ msgpack_object_kv message_entry;
+ *result_buffer = NULL;
+ *result_size = 0;
+ modified_data_buffer = NULL;
+
+ if (message_key_name != NULL) {
+ new_map_entries[0] = &message_entry;
+
+ message_entry.key.type = MSGPACK_OBJECT_STR;
+ message_entry.key.via.str.size = flb_sds_len(message_key_name);
+ message_entry.key.via.str.ptr = message_key_name;
+
+ if (message_type == MSGPACK_OBJECT_BIN) {
+ message_entry.val.type = MSGPACK_OBJECT_BIN;
+ message_entry.val.via.bin.size = message_size;
+ message_entry.val.via.bin.ptr = message_buffer;
+ }
+ else if (message_type == MSGPACK_OBJECT_STR) {
+ message_entry.val.type = MSGPACK_OBJECT_STR;
+ message_entry.val.via.str.size = message_size;
+ message_entry.val.via.str.ptr = message_buffer;
+ }
+ else {
+ result = FLB_MAP_EXPANSION_INVALID_VALUE_TYPE;
+ }
+
+ if (result == FLB_MAP_NOT_MODIFIED) {
+ result = flb_msgpack_expand_map(base_object_buffer,
+ base_object_size,
+ new_map_entries, 1,
+ &modified_data_buffer,
+ &modified_data_size);
+ if (result == 0) {
+ result = FLB_MAP_EXPAND_SUCCESS;
+ }
+ else {
+ result = FLB_MAP_EXPANSION_ERROR;
+ }
+ }
+ }
+
+ if (result == FLB_MAP_EXPAND_SUCCESS) {
+ *result_buffer = modified_data_buffer;
+ *result_size = modified_data_size;
+ }
+
+ return result;
+}
+
+static inline int pack_line(struct flb_syslog *ctx,
+ struct flb_time *time,
+ struct flb_connection *connection,
+ char *data, size_t data_size,
+ char *raw_data, size_t raw_data_size)
+{
+ char *modified_data_buffer;
+ size_t modified_data_size;
+ char *appended_address_buffer;
+ size_t appended_address_size;
+ int result;
+ char *source_address;
+
+ source_address = NULL;
+ modified_data_buffer = NULL;
+ appended_address_buffer = NULL;
+
+ if (ctx->raw_message_key != NULL) {
+ result = append_message_to_record_data(&modified_data_buffer,
+ &modified_data_size,
+ ctx->raw_message_key,
+ data,
+ data_size,
+ raw_data,
+ raw_data_size,
+ MSGPACK_OBJECT_BIN);
+
+ if (result == FLB_MAP_EXPANSION_ERROR) {
+ flb_plg_debug(ctx->ins, "error expanding raw message : %d", result);
+ }
+ }
+
+ if (ctx->source_address_key != NULL) {
+ source_address = flb_connection_get_remote_address(connection);
+ if (source_address != NULL) {
+ if (modified_data_buffer != NULL) {
+ result = append_message_to_record_data(&appended_address_buffer,
+ &appended_address_size,
+ ctx->source_address_key,
+ modified_data_buffer,
+ modified_data_size,
+ source_address,
+ strlen(source_address),
+ MSGPACK_OBJECT_STR);
+ }
+ else {
+ result = append_message_to_record_data(&appended_address_buffer,
+ &appended_address_size,
+ ctx->source_address_key,
+ data,
+ data_size,
+ source_address,
+ strlen(source_address),
+ MSGPACK_OBJECT_STR);
+ }
+
+ if (result == FLB_MAP_EXPANSION_ERROR) {
+ flb_plg_debug(ctx->ins, "error expanding source_address : %d", result);
+ }
+ }
+ }
+
+ result = flb_log_event_encoder_begin_record(ctx->log_encoder);
+
+ if (result == FLB_EVENT_ENCODER_SUCCESS) {
+ result = flb_log_event_encoder_set_timestamp(ctx->log_encoder, time);
+ }
+
+ if (result == FLB_EVENT_ENCODER_SUCCESS) {
+ if (appended_address_buffer != NULL) {
+ result = flb_log_event_encoder_set_body_from_raw_msgpack(
+ ctx->log_encoder, appended_address_buffer, appended_address_size);
+ }
+ else if (modified_data_buffer != NULL) {
+ result = flb_log_event_encoder_set_body_from_raw_msgpack(
+ ctx->log_encoder, modified_data_buffer, modified_data_size);
+ }
+ else {
+ result = flb_log_event_encoder_set_body_from_raw_msgpack(
+ ctx->log_encoder, data, data_size);
+ }
+ }
+
+ if (result == FLB_EVENT_ENCODER_SUCCESS) {
+ result = flb_log_event_encoder_commit_record(ctx->log_encoder);
+ }
+
+ if (result == FLB_EVENT_ENCODER_SUCCESS) {
+ flb_input_log_append(ctx->ins, NULL, 0,
+ ctx->log_encoder->output_buffer,
+ ctx->log_encoder->output_length);
+ result = 0;
+ }
+ else {
+ flb_plg_error(ctx->ins, "log event encoding error : %d", result);
+
+ result = -1;
+ }
+
+ flb_log_event_encoder_reset(ctx->log_encoder);
+
+ if (modified_data_buffer != NULL) {
+ flb_free(modified_data_buffer);
+ }
+ if (appended_address_buffer != NULL) {
+ flb_free(appended_address_buffer);
+ }
+
+ return result;
+}
+
+int syslog_prot_process(struct syslog_conn *conn)
+{
+ int len;
+ int ret;
+ char *p;
+ char *eof;
+ char *end;
+ void *out_buf;
+ size_t out_size;
+ struct flb_time out_time;
+ struct flb_syslog *ctx = conn->ctx;
+
+ eof = conn->buf_data;
+ end = conn->buf_data + conn->buf_len;
+
+ /* Always parse while some remaining bytes exists */
+ while (eof < end) {
+ /* Lookup the ending byte */
+ eof = p = conn->buf_data + conn->buf_parsed;
+ while (*eof != '\n' && *eof != '\0' && eof < end) {
+ eof++;
+ }
+
+ /* Incomplete message */
+ if (eof == end || (*eof != '\n' && *eof != '\0')) {
+ break;
+ }
+
+ /* No data ? */
+ len = (eof - p);
+ if (len == 0) {
+ consume_bytes(conn->buf_data, 1, conn->buf_len);
+ conn->buf_len--;
+ conn->buf_parsed = 0;
+ conn->buf_data[conn->buf_len] = '\0';
+ end = conn->buf_data + conn->buf_len;
+
+ if (conn->buf_len == 0) {
+ break;
+ }
+
+ continue;
+ }
+
+ /* Process the string */
+ ret = flb_parser_do(ctx->parser, p, len,
+ &out_buf, &out_size, &out_time);
+ if (ret >= 0) {
+ if (flb_time_to_nanosec(&out_time) == 0L) {
+ flb_time_get(&out_time);
+ }
+ pack_line(ctx, &out_time,
+ conn->connection,
+ out_buf, out_size,
+ p, len);
+ flb_free(out_buf);
+ }
+ else {
+ flb_plg_warn(ctx->ins, "error parsing log message with parser '%s'",
+ ctx->parser->name);
+ flb_plg_debug(ctx->ins, "unparsed log message: %.*s", len, p);
+ }
+
+ conn->buf_parsed += len + 1;
+ end = conn->buf_data + conn->buf_len;
+ eof = conn->buf_data + conn->buf_parsed;
+ }
+
+ if (conn->buf_parsed > 0) {
+ consume_bytes(conn->buf_data, conn->buf_parsed, conn->buf_len);
+ conn->buf_len -= conn->buf_parsed;
+ conn->buf_parsed = 0;
+ conn->buf_data[conn->buf_len] = '\0';
+ }
+
+ return 0;
+}
+
+int syslog_prot_process_udp(struct syslog_conn *conn)
+{
+ int ret;
+ void *out_buf;
+ size_t out_size;
+ struct flb_time out_time = {0};
+ char *buf;
+ size_t size;
+ struct flb_syslog *ctx;
+ struct flb_connection *connection;
+
+ buf = conn->buf_data;
+ size = conn->buf_len;
+ ctx = conn->ctx;
+ connection = conn->connection;
+
+ ret = flb_parser_do(ctx->parser, buf, size,
+ &out_buf, &out_size, &out_time);
+ if (ret >= 0) {
+ if (flb_time_to_double(&out_time) == 0) {
+ flb_time_get(&out_time);
+ }
+ pack_line(ctx, &out_time,
+ connection,
+ out_buf, out_size,
+ buf, size);
+ flb_free(out_buf);
+ }
+ else {
+ flb_plg_warn(ctx->ins, "error parsing log message with parser '%s'",
+ ctx->parser->name);
+ flb_plg_debug(ctx->ins, "unparsed log message: %.*s",
+ (int) size, buf);
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/in_syslog/syslog_prot.h b/src/fluent-bit/plugins/in_syslog/syslog_prot.h
new file mode 100644
index 000000000..cb5976b7b
--- /dev/null
+++ b/src/fluent-bit/plugins/in_syslog/syslog_prot.h
@@ -0,0 +1,35 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_SYSLOG_PROT_H
+#define FLB_IN_SYSLOG_PROT_H
+
+#include <fluent-bit/flb_info.h>
+
+#include "syslog.h"
+
+#define FLB_MAP_EXPAND_SUCCESS 0
+#define FLB_MAP_NOT_MODIFIED -1
+#define FLB_MAP_EXPANSION_ERROR -2
+#define FLB_MAP_EXPANSION_INVALID_VALUE_TYPE -3
+
+int syslog_prot_process(struct syslog_conn *conn);
+int syslog_prot_process_udp(struct syslog_conn *conn);
+
+#endif
diff --git a/src/fluent-bit/plugins/in_syslog/syslog_server.c b/src/fluent-bit/plugins/in_syslog/syslog_server.c
new file mode 100644
index 000000000..5317851db
--- /dev/null
+++ b/src/fluent-bit/plugins/in_syslog/syslog_server.c
@@ -0,0 +1,235 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_macros.h>
+#include <fluent-bit/flb_log.h>
+#include <fluent-bit/flb_socket.h>
+#include <fluent-bit/flb_network.h>
+#include <fluent-bit/tls/flb_tls.h>
+#include <fluent-bit/flb_downstream.h>
+#include <fluent-bit/flb_input_plugin.h>
+
+#if !defined(FLB_SYSTEM_WINDOWS)
+#include <unistd.h>
+#include <sys/socket.h>
+#include <sys/un.h>
+#endif
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#include "syslog.h"
+
+static int remove_existing_socket_file(char *socket_path)
+{
+ struct stat file_data;
+ int result;
+
+ result = stat(socket_path, &file_data);
+
+ if (result == -1) {
+ if (errno == ENOENT) {
+ return 0;
+ }
+
+ flb_errno();
+
+ return -1;
+ }
+
+ if (S_ISSOCK(file_data.st_mode) == 0) {
+ return -2;
+ }
+
+ result = unlink(socket_path);
+
+ if (result != 0) {
+ return -3;
+ }
+
+ return 0;
+}
+
+#if !defined(FLB_SYSTEM_WINDOWS)
+static int syslog_server_unix_create(struct flb_syslog *ctx)
+{
+ int result;
+ int mode;
+ struct flb_tls *tls;
+
+ if (ctx->mode == FLB_SYSLOG_UNIX_TCP) {
+ mode = FLB_TRANSPORT_UNIX_STREAM;
+ tls = ctx->ins->tls;
+ }
+ else if (ctx->mode == FLB_SYSLOG_UNIX_UDP) {
+ ctx->dgram_mode_flag = FLB_TRUE;
+
+ mode = FLB_TRANSPORT_UNIX_DGRAM;
+ tls = NULL;
+ }
+ else {
+ return -1;
+ }
+
+ result = remove_existing_socket_file(ctx->unix_path);
+
+ if (result != 0) {
+ if (result == -2) {
+ flb_plg_error(ctx->ins,
+ "%s exists and it is not a unix socket. Aborting",
+ ctx->unix_path);
+ }
+ else {
+ flb_plg_error(ctx->ins,
+ "could not remove existing unix socket %s. Aborting",
+ ctx->unix_path);
+ }
+
+ return -1;
+ }
+
+ ctx->downstream = flb_downstream_create(mode,
+ ctx->ins->flags,
+ ctx->unix_path,
+ 0,
+ tls,
+ ctx->ins->config,
+ &ctx->ins->net_setup);
+
+ if (ctx->downstream == NULL) {
+ return -1;
+ }
+
+ if (chmod(ctx->unix_path, ctx->unix_perm)) {
+ flb_errno();
+ flb_error("[in_syslog] cannot set permission on '%s' to %04o",
+ ctx->unix_path, ctx->unix_perm);
+
+ return -1;
+ }
+
+ return 0;
+}
+#else
+static int syslog_server_unix_create(struct flb_syslog *ctx)
+{
+ return -1;
+}
+#endif
+
+static int syslog_server_net_create(struct flb_syslog *ctx)
+{
+ unsigned short int port;
+ int mode;
+ struct flb_tls *tls;
+
+ port = (unsigned short int) strtoul(ctx->port, NULL, 10);
+
+ if (ctx->mode == FLB_SYSLOG_TCP) {
+ mode = FLB_TRANSPORT_TCP;
+ tls = ctx->ins->tls;
+ }
+ else if (ctx->mode == FLB_SYSLOG_UDP) {
+ ctx->dgram_mode_flag = FLB_TRUE;
+
+ mode = FLB_TRANSPORT_UDP;
+ tls = NULL;
+ }
+ else {
+ return -1;
+ }
+
+ ctx->downstream = flb_downstream_create(mode,
+ ctx->ins->flags,
+ ctx->listen,
+ port,
+ tls,
+ ctx->ins->config,
+ &ctx->ins->net_setup);
+
+ if (ctx->downstream != NULL) {
+ flb_info("[in_syslog] %s server binding %s:%s",
+ ((ctx->mode == FLB_SYSLOG_TCP) ? "TCP" : "UDP"),
+ ctx->listen, ctx->port);
+ }
+ else {
+ flb_error("[in_syslog] could not bind address %s:%s. Aborting",
+ ctx->listen, ctx->port);
+
+ return -1;
+ }
+
+ if (ctx->receive_buffer_size) {
+ if (flb_net_socket_rcv_buffer(ctx->downstream->server_fd,
+ ctx->receive_buffer_size)) {
+ flb_error("[in_syslog] could not set rcv buffer to %ld. Aborting",
+ ctx->receive_buffer_size);
+ return -1;
+ }
+ }
+
+ flb_net_socket_nonblocking(ctx->downstream->server_fd);
+
+ return 0;
+}
+
+int syslog_server_create(struct flb_syslog *ctx)
+{
+ int ret;
+
+ if (ctx->mode == FLB_SYSLOG_TCP || ctx->mode == FLB_SYSLOG_UDP) {
+ ret = syslog_server_net_create(ctx);
+ }
+ else {
+ /* Create unix socket end-point */
+ ret = syslog_server_unix_create(ctx);
+ }
+
+ if (ret != 0) {
+ return -1;
+ }
+
+ return 0;
+}
+
+int syslog_server_destroy(struct flb_syslog *ctx)
+{
+ if (ctx->collector_id != -1) {
+ flb_input_collector_delete(ctx->collector_id, ctx->ins);
+
+ ctx->collector_id = -1;
+ }
+
+ if (ctx->downstream != NULL) {
+ flb_downstream_destroy(ctx->downstream);
+
+ ctx->downstream = NULL;
+ }
+
+ if (ctx->mode == FLB_SYSLOG_UNIX_TCP || ctx->mode == FLB_SYSLOG_UNIX_UDP) {
+ if (ctx->unix_path) {
+ unlink(ctx->unix_path);
+ }
+ }
+ else {
+ flb_free(ctx->port);
+ }
+
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/in_syslog/syslog_server.h b/src/fluent-bit/plugins/in_syslog/syslog_server.h
new file mode 100644
index 000000000..d14feba7c
--- /dev/null
+++ b/src/fluent-bit/plugins/in_syslog/syslog_server.h
@@ -0,0 +1,31 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_SYSLOG_SERVER_H
+#define FLB_IN_SYSLOG_SERVER_H
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_config.h>
+
+#include "syslog.h"
+
+int syslog_server_create(struct flb_syslog *ctx);
+int syslog_server_destroy(struct flb_syslog *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/in_systemd/CMakeLists.txt b/src/fluent-bit/plugins/in_systemd/CMakeLists.txt
new file mode 100644
index 000000000..3e52d52b7
--- /dev/null
+++ b/src/fluent-bit/plugins/in_systemd/CMakeLists.txt
@@ -0,0 +1,11 @@
+set(src
+ systemd_config.c
+ systemd.c)
+
+if(FLB_SQLDB)
+set(src
+ ${src}
+ systemd_db.c)
+endif()
+
+FLB_PLUGIN(in_systemd "${src}" ${JOURNALD_LIBRARIES})
diff --git a/src/fluent-bit/plugins/in_systemd/systemd.c b/src/fluent-bit/plugins/in_systemd/systemd.c
new file mode 100644
index 000000000..02f81144b
--- /dev/null
+++ b/src/fluent-bit/plugins/in_systemd/systemd.c
@@ -0,0 +1,555 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_time.h>
+
+#include "systemd_config.h"
+#include "systemd_db.h"
+
+#include <ctype.h>
+
+/* msgpack helpers to pack unsigned ints (it takes care of endianness */
+#define pack_uint16(buf, d) _msgpack_store16(buf, (uint16_t) d)
+#define pack_uint32(buf, d) _msgpack_store32(buf, (uint32_t) d)
+
+/* tag composer */
+static int tag_compose(const char *tag, const char *unit_name,
+ int unit_size, char **out_buf, size_t *out_size)
+{
+ int len;
+ const char *p;
+ char *buf = *out_buf;
+ size_t buf_s = 0;
+
+ p = strchr(tag, '*');
+ if (!p) {
+ return -1;
+ }
+
+ /* Copy tag prefix if any */
+ len = (p - tag);
+ if (len > 0) {
+ memcpy(buf, tag, len);
+ buf_s += len;
+ }
+
+ /* Append file name */
+ memcpy(buf + buf_s, unit_name, unit_size);
+ buf_s += unit_size;
+
+ /* Tag suffix (if any) */
+ p++;
+ if (*p) {
+ len = strlen(tag);
+ memcpy(buf + buf_s, p, (len - (p - tag)));
+ buf_s += (len - (p - tag));
+ }
+
+ buf[buf_s] = '\0';
+ *out_size = buf_s;
+
+ return 0;
+}
+
+static int in_systemd_collect(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ int ret;
+ int ret_j;
+ int i;
+ int len;
+ int entries = 0;
+ int skip_entries = 0;
+ int rows = 0;
+ time_t sec;
+ long nsec;
+ uint64_t usec;
+ size_t length;
+ size_t threshold;
+ const char *sep;
+ const char *key;
+ const char *val;
+ char *buf = NULL;
+#ifdef FLB_HAVE_SQLDB
+ char *cursor = NULL;
+#endif
+ char *tag = NULL;
+ char new_tag[PATH_MAX];
+ char last_tag[PATH_MAX] = {0};
+ size_t tag_len;
+ size_t last_tag_len = 0;
+ const void *data;
+ struct flb_systemd_config *ctx = in_context;
+ struct flb_time tm;
+
+ /* Restricted by mem_buf_limit */
+ if (flb_input_buf_paused(ins) == FLB_TRUE) {
+ return FLB_SYSTEMD_BUSY;
+ }
+
+ /*
+ * if there are not pending records from a previous round, likely we got
+ * some changes in the journal, otherwise go ahead and continue reading
+ * the journal.
+ */
+ if (ctx->pending_records == FLB_FALSE) {
+ ret = sd_journal_process(ctx->j);
+ if (ret == SD_JOURNAL_INVALIDATE) {
+ flb_plg_debug(ctx->ins,
+ "received event on added or removed journal file");
+ }
+ if (ret != SD_JOURNAL_APPEND && ret != SD_JOURNAL_NOP) {
+ return FLB_SYSTEMD_NONE;
+ }
+ }
+
+ if (ctx->lowercase == FLB_TRUE) {
+ ret = sd_journal_get_data_threshold(ctx->j, &threshold);
+ if (ret != 0) {
+ flb_plg_error(ctx->ins,
+ "error setting up systemd data. "
+ "sd_journal_get_data_threshold() return value '%i'",
+ ret);
+ return FLB_SYSTEMD_ERROR;
+ }
+ }
+
+ while ((ret_j = sd_journal_next(ctx->j)) > 0) {
+ /* If the tag is composed dynamically, gather the Systemd Unit name */
+ if (ctx->dynamic_tag) {
+ ret = sd_journal_get_data(ctx->j, "_SYSTEMD_UNIT", &data, &length);
+ if (ret == 0) {
+ tag = new_tag;
+ tag_compose(ctx->ins->tag, (const char *) data + 14, length - 14,
+ &tag, &tag_len);
+ }
+ else {
+ tag = new_tag;
+ tag_compose(ctx->ins->tag,
+ FLB_SYSTEMD_UNKNOWN, sizeof(FLB_SYSTEMD_UNKNOWN) - 1,
+ &tag, &tag_len);
+ }
+ }
+ else {
+ tag = ctx->ins->tag;
+ tag_len = ctx->ins->tag_len;
+ }
+
+ if (last_tag_len == 0) {
+ strncpy(last_tag, tag, tag_len);
+ last_tag_len = tag_len;
+ }
+
+ /* Set time */
+ ret = sd_journal_get_realtime_usec(ctx->j, &usec);
+ if (ret != 0) {
+ flb_plg_error(ctx->ins,
+ "error reading from systemd journal. "
+ "sd_journal_get_realtime_usec() return value '%i'",
+ ret);
+ /* It seems the journal file was deleted (rotated). */
+ ret_j = -1;
+ break;
+ }
+ sec = usec / 1000000;
+ nsec = (usec % 1000000) * 1000;
+ flb_time_set(&tm, sec, nsec);
+
+ /*
+ * The new incoming record can have a different tag than previous one,
+ * so a new msgpack buffer is required. We ingest the data and prepare
+ * a new buffer.
+ */
+ if (ctx->log_encoder->output_length > 0 &&
+ ((last_tag_len != tag_len) ||
+ (strncmp(last_tag, tag, tag_len) != 0))) {
+ flb_input_log_append(ctx->ins,
+ last_tag, last_tag_len,
+ ctx->log_encoder->output_buffer,
+ ctx->log_encoder->output_length);
+
+ flb_log_event_encoder_reset(ctx->log_encoder);
+
+ strncpy(last_tag, tag, tag_len);
+ last_tag_len = tag_len;
+ }
+
+
+ ret = flb_log_event_encoder_begin_record(ctx->log_encoder);
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_timestamp(ctx->log_encoder, &tm);
+ }
+
+ /* Pack every field in the entry */
+ entries = 0;
+ skip_entries = 0;
+ while (sd_journal_enumerate_data(ctx->j, &data, &length) > 0 &&
+ entries < ctx->max_fields) {
+ key = (const char *) data;
+ if (ctx->strip_underscores == FLB_TRUE && key[0] == '_') {
+ key++;
+ length--;
+ }
+
+ sep = strchr(key, '=');
+ if (sep == NULL) {
+ skip_entries++;
+ continue;
+ }
+
+ len = (sep - key);
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_append_body_string_length(
+ ctx->log_encoder, len);
+ }
+
+ if (ctx->lowercase == FLB_TRUE) {
+ /*
+ * Ensure buf to have enough space for the key because the libsystemd
+ * might return larger data than the threshold.
+ */
+ if (buf == NULL) {
+ buf = flb_sds_create_len(NULL, threshold);
+ }
+ if (flb_sds_alloc(buf) < len) {
+ buf = flb_sds_increase(buf, len - flb_sds_alloc(buf));
+ }
+ for (i = 0; i < len; i++) {
+ buf[i] = tolower(key[i]);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_append_body_string_body(
+ ctx->log_encoder, buf, len);
+ }
+ }
+ else {
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_append_body_string_body(
+ ctx->log_encoder, (char *) key, len);
+ }
+ }
+
+ val = sep + 1;
+ len = length - (sep - key) - 1;
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_append_body_string(
+ ctx->log_encoder, (char *) val, len);
+ }
+
+ entries++;
+ }
+ rows++;
+
+ if (skip_entries > 0) {
+ flb_plg_error(ctx->ins, "Skip %d broken entries", skip_entries);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_commit_record(ctx->log_encoder);
+ }
+
+ /*
+ * Some journals can have too much data, pause if we have processed
+ * more than 1MB. Journal will resume later.
+ */
+ if (ctx->log_encoder->output_length > 1024000) {
+ flb_input_log_append(ctx->ins,
+ tag, tag_len,
+ ctx->log_encoder->output_buffer,
+ ctx->log_encoder->output_length);
+
+ flb_log_event_encoder_reset(ctx->log_encoder);
+
+ strncpy(last_tag, tag, tag_len);
+ last_tag_len = tag_len;
+
+ break;
+ }
+
+ if (rows >= ctx->max_entries) {
+ break;
+ }
+ }
+
+ flb_sds_destroy(buf);
+
+#ifdef FLB_HAVE_SQLDB
+ /* Save cursor */
+ if (ctx->db) {
+ sd_journal_get_cursor(ctx->j, &cursor);
+ if (cursor) {
+ flb_systemd_db_set_cursor(ctx, cursor);
+ flb_free(cursor);
+ }
+ }
+#endif
+
+ /* Write any pending data into the buffer */
+ if (ctx->log_encoder->output_length > 0) {
+ flb_input_log_append(ctx->ins,
+ tag, tag_len,
+ ctx->log_encoder->output_buffer,
+ ctx->log_encoder->output_length);
+
+ flb_log_event_encoder_reset(ctx->log_encoder);
+ }
+
+ /* the journal is empty, no more records */
+ if (ret_j == 0) {
+ ctx->pending_records = FLB_FALSE;
+ return FLB_SYSTEMD_OK;
+ }
+ else if (ret_j > 0) {
+ /*
+ * ret_j == 1, but the loop was broken due to some special condition like
+ * buffer size limit or it reach the max number of rows that it supposed to
+ * process on this call. Assume there are pending records.
+ */
+ ctx->pending_records = FLB_TRUE;
+ return FLB_SYSTEMD_MORE;
+ }
+ else {
+ /* Supposedly, current cursor points to a deleted file.
+ * Re-seeking to the first journal entry.
+ * Other failures, such as disk read error, would still lead to infinite loop there,
+ * but at least FLB log will be full of errors. */
+ ret = sd_journal_seek_head(ctx->j);
+ flb_plg_error(ctx->ins,
+ "sd_journal_next() returned error %i; "
+ "journal is re-opened, unread logs are lost; "
+ "sd_journal_seek_head() returned %i", ret_j, ret);
+ ctx->pending_records = FLB_TRUE;
+ return FLB_SYSTEMD_ERROR;
+ }
+}
+
+static int in_systemd_collect_archive(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ int ret;
+ uint64_t val;
+ ssize_t bytes;
+ struct flb_systemd_config *ctx = in_context;
+
+ bytes = read(ctx->ch_manager[0], &val, sizeof(uint64_t));
+ if (bytes == -1) {
+ flb_errno();
+ return -1;
+ }
+
+ ret = in_systemd_collect(ins, config, in_context);
+ if (ret == FLB_SYSTEMD_OK) {
+ /* Events collector: journald events */
+ ret = flb_input_set_collector_event(ins,
+ in_systemd_collect,
+ ctx->fd,
+ config);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "error setting up collector events");
+ flb_systemd_config_destroy(ctx);
+ return -1;
+ }
+ ctx->coll_fd_journal = ret;
+ flb_input_collector_start(ctx->coll_fd_journal, ins);
+
+ /* Timer to collect pending events */
+ ret = flb_input_set_collector_time(ins,
+ in_systemd_collect,
+ 1, 0,
+ config);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins,
+ "error setting up collector for pending events");
+ flb_systemd_config_destroy(ctx);
+ return -1;
+ }
+ ctx->coll_fd_pending = ret;
+ flb_input_collector_start(ctx->coll_fd_pending, ins);
+
+ return 0;
+ }
+
+ /* If FLB_SYSTEMD_NONE or FLB_SYSTEMD_MORE, keep trying */
+ write(ctx->ch_manager[1], &val, sizeof(uint64_t));
+
+ return 0;
+}
+
+static int in_systemd_init(struct flb_input_instance *ins,
+ struct flb_config *config, void *data)
+{
+ int ret;
+ struct flb_systemd_config *ctx;
+
+ ctx = flb_systemd_config_create(ins, config);
+ if (!ctx) {
+ flb_plg_error(ins, "cannot initialize");
+ return -1;
+ }
+
+ /* Set the context */
+ flb_input_set_context(ins, ctx);
+
+ /* Events collector: archive */
+ ret = flb_input_set_collector_event(ins, in_systemd_collect_archive,
+ ctx->ch_manager[0], config);
+ if (ret == -1) {
+ flb_systemd_config_destroy(ctx);
+ return -1;
+ }
+ ctx->coll_fd_archive = ret;
+
+ return 0;
+}
+
+static int in_systemd_pre_run(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ int n;
+ uint64_t val = 0xc002;
+ struct flb_systemd_config *ctx = in_context;
+ (void) ins;
+ (void) config;
+
+ /* Insert a dummy event into the channel manager */
+ n = write(ctx->ch_manager[1], &val, sizeof(val));
+ if (n == -1) {
+ flb_errno();
+ return -1;
+ }
+
+ return n;
+}
+
+static void in_systemd_pause(void *data, struct flb_config *config)
+{
+ int ret;
+ struct flb_systemd_config *ctx = data;
+
+ flb_input_collector_pause(ctx->coll_fd_archive, ctx->ins);
+
+ /* pause only if it's running */
+ ret = flb_input_collector_running(ctx->coll_fd_journal, ctx->ins);
+ if (ret == FLB_TRUE) {
+ flb_input_collector_pause(ctx->coll_fd_journal, ctx->ins);
+ flb_input_collector_pause(ctx->coll_fd_pending, ctx->ins);
+ }
+}
+
+static void in_systemd_resume(void *data, struct flb_config *config)
+{
+ int ret;
+ struct flb_systemd_config *ctx = data;
+
+ flb_input_collector_resume(ctx->coll_fd_archive, ctx->ins);
+
+ /* resume only if is not running */
+ ret = flb_input_collector_running(ctx->coll_fd_journal, ctx->ins);
+ if (ret == FLB_FALSE) {
+ flb_input_collector_resume(ctx->coll_fd_journal, ctx->ins);
+ flb_input_collector_resume(ctx->coll_fd_pending, ctx->ins);
+ }
+}
+
+static int in_systemd_exit(void *data, struct flb_config *config)
+{
+ (void) *config;
+ struct flb_systemd_config *ctx = data;
+
+ flb_systemd_config_destroy(ctx);
+ return 0;
+}
+
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_STR, "path", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_systemd_config, path),
+ "Set the systemd journal path"
+ },
+ {
+ FLB_CONFIG_MAP_INT, "max_fields", FLB_SYSTEMD_MAX_FIELDS,
+ 0, FLB_TRUE, offsetof(struct flb_systemd_config, max_fields),
+ "Set the maximum fields per notification"
+ },
+ {
+ FLB_CONFIG_MAP_INT, "max_entries", FLB_SYSTEMD_MAX_ENTRIES,
+ 0, FLB_TRUE, offsetof(struct flb_systemd_config, max_entries),
+ "Set the maximum entries per notification"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "systemd_filter_type", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_systemd_config, filter_type),
+ "Set the systemd filter type to either 'and' or 'or'"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "systemd_filter", (char *)NULL,
+ FLB_CONFIG_MAP_MULT, FLB_TRUE, offsetof(struct flb_systemd_config, systemd_filters),
+ "Add a systemd filter, can be set multiple times"
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "read_from_tail", "false",
+ 0, FLB_TRUE, offsetof(struct flb_systemd_config, read_from_tail),
+ "Read the journal from the end (tail)"
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "lowercase", "false",
+ 0, FLB_TRUE, offsetof(struct flb_systemd_config, lowercase),
+ "Lowercase the fields"
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "strip_underscores", "false",
+ 0, FLB_TRUE, offsetof(struct flb_systemd_config, strip_underscores),
+ "Strip undersecores from fields"
+ },
+#ifdef FLB_HAVE_SQLDB
+ {
+ FLB_CONFIG_MAP_STR, "db.sync", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_systemd_config, db_sync_mode),
+ "Set the database sync mode: extra, full, normal or off"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "db", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_systemd_config, db_path),
+ "Set the database path"
+ },
+#endif /* FLB_HAVE_SQLDB */
+ /* EOF */
+ {0}
+};
+
+/* Plugin reference */
+struct flb_input_plugin in_systemd_plugin = {
+ .name = "systemd",
+ .description = "Systemd (Journal) reader",
+ .cb_init = in_systemd_init,
+ .cb_pre_run = in_systemd_pre_run,
+ .cb_flush_buf = NULL,
+ .cb_pause = in_systemd_pause,
+ .cb_resume = in_systemd_resume,
+ .cb_exit = in_systemd_exit,
+ .config_map = config_map,
+ .flags = 0
+};
diff --git a/src/fluent-bit/plugins/in_systemd/systemd_config.c b/src/fluent-bit/plugins/in_systemd/systemd_config.c
new file mode 100644
index 000000000..57c13a859
--- /dev/null
+++ b/src/fluent-bit/plugins/in_systemd/systemd_config.c
@@ -0,0 +1,314 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_kv.h>
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#ifdef FLB_HAVE_SQLDB
+#include "systemd_db.h"
+#endif
+
+#include "systemd_config.h"
+
+struct flb_systemd_config *flb_systemd_config_create(struct flb_input_instance *ins,
+ struct flb_config *config)
+{
+ int ret;
+ const char *tmp;
+ char *cursor = NULL;
+ struct stat st;
+ struct mk_list *head;
+ struct flb_systemd_config *ctx;
+ int journal_filter_is_and;
+ size_t size;
+ struct flb_config_map_val *mv;
+
+
+ /* Allocate space for the configuration */
+ ctx = flb_calloc(1, sizeof(struct flb_systemd_config));
+ if (!ctx) {
+ flb_errno();
+ return NULL;
+ }
+ ctx->ins = ins;
+#ifdef FLB_HAVE_SQLDB
+ ctx->db_sync = -1;
+#endif
+
+ /* Load the config_map */
+ ret = flb_input_config_map_set(ins, (void *)ctx);
+ if (ret == -1) {
+ flb_plg_error(ins, "unable to load configuration");
+ flb_free(config);
+ return NULL;
+ }
+
+ /* Create the channel manager */
+ ret = pipe(ctx->ch_manager);
+ if (ret == -1) {
+ flb_errno();
+ flb_free(ctx);
+ return NULL;
+ }
+
+ /* Config: path */
+ if (ctx->path) {
+ ret = stat(ctx->path, &st);
+ if (ret == -1) {
+ flb_errno();
+ flb_plg_error(ctx->ins, "given path %s is invalid", ctx->path);
+ flb_free(ctx);
+ return NULL;
+ }
+
+ if (!S_ISDIR(st.st_mode)) {
+ flb_errno();
+ flb_plg_error(ctx->ins, "given path is not a directory: %s", ctx->path);
+ flb_free(ctx);
+ return NULL;
+ }
+ }
+ else {
+ ctx->path = NULL;
+ }
+
+ /* Open the Journal */
+ if (ctx->path) {
+ ret = sd_journal_open_directory(&ctx->j, ctx->path, 0);
+ }
+ else {
+ ret = sd_journal_open(&ctx->j, SD_JOURNAL_LOCAL_ONLY);
+ }
+ if (ret != 0) {
+ flb_plg_error(ctx->ins, "could not open the Journal");
+ flb_free(ctx);
+ return NULL;
+ }
+ ctx->fd = sd_journal_get_fd(ctx->j);
+
+ /* Tag settings */
+ tmp = strchr(ins->tag, '*');
+ if (tmp) {
+ ctx->dynamic_tag = FLB_TRUE;
+ }
+ else {
+ ctx->dynamic_tag = FLB_FALSE;
+ }
+
+#ifdef FLB_HAVE_SQLDB
+ /* Database options (needs to be set before the context) */
+ if (ctx->db_sync_mode) {
+ if (strcasecmp(ctx->db_sync_mode, "extra") == 0) {
+ ctx->db_sync = 3;
+ }
+ else if (strcasecmp(ctx->db_sync_mode, "full") == 0) {
+ ctx->db_sync = 2;
+ }
+ else if (strcasecmp(ctx->db_sync_mode, "normal") == 0) {
+ ctx->db_sync = 1;
+ }
+ else if (strcasecmp(ctx->db_sync_mode, "off") == 0) {
+ ctx->db_sync = 0;
+ }
+ else {
+ flb_plg_error(ctx->ins, "invalid database 'db.sync' value: %s", ctx->db_sync_mode);
+ }
+ }
+
+ /* Database file */
+ if (ctx->db_path) {
+ ctx->db = flb_systemd_db_open(ctx->db_path, ins, ctx, config);
+ if (!ctx->db) {
+ flb_plg_error(ctx->ins, "could not open/create database '%s'", ctx->db_path);
+ }
+ }
+
+#endif
+
+ if (ctx->filter_type) {
+ if (strcasecmp(ctx->filter_type, "and") == 0) {
+ journal_filter_is_and = FLB_TRUE;
+ }
+ else if (strcasecmp(ctx->filter_type, "or") == 0) {
+ journal_filter_is_and = FLB_FALSE;
+ }
+ else {
+ flb_plg_error(ctx->ins,
+ "systemd_filter_type must be 'and' or 'or'. Got %s",
+ ctx->filter_type);
+ flb_free(ctx);
+ return NULL;
+ }
+ }
+ else {
+ journal_filter_is_and = FLB_FALSE;
+ }
+
+ /* Load Systemd filters */
+ if (ctx->systemd_filters) {
+ flb_config_map_foreach(head, mv, ctx->systemd_filters) {
+ flb_plg_debug(ctx->ins, "add filter: %s (%s)", mv->val.str,
+ journal_filter_is_and ? "and" : "or");
+ ret = sd_journal_add_match(ctx->j, mv->val.str, 0);
+ if (ret < 0) {
+ if (ret == -EINVAL) {
+ flb_plg_error(ctx->ins,
+ "systemd_filter error: invalid input '%s'",
+ mv->val.str);
+ }
+ else {
+ flb_plg_error(ctx->ins,
+ "systemd_filter error: status=%d input '%s'",
+ ret, mv->val.str);
+ }
+ flb_systemd_config_destroy(ctx);
+ return NULL;
+ }
+ if (journal_filter_is_and) {
+ ret = sd_journal_add_conjunction(ctx->j);
+ if (ret < 0) {
+ flb_plg_error(ctx->ins,
+ "sd_journal_add_conjunction failed. ret=%d",
+ ret);
+ flb_systemd_config_destroy(ctx);
+ return NULL;
+ }
+ }
+ else {
+ ret = sd_journal_add_disjunction(ctx->j);
+ if (ret < 0) {
+ flb_plg_error(ctx->ins,
+ "sd_journal_add_disjunction failed. ret=%d",
+ ret);
+ flb_systemd_config_destroy(ctx);
+ return NULL;
+ }
+ }
+ }
+ }
+
+ if (ctx->read_from_tail == FLB_TRUE) {
+ sd_journal_seek_tail(ctx->j);
+ /*
+ * Skip up to 350 records until the end of journal is found.
+ * Workaround for bug https://github.com/systemd/systemd/issues/9934
+ * Due to the bug, sd_journal_next() returns 2 last records of each journal file.
+ * 4 GB is the default journal limit, so with 25 MB/file we may get
+ * up to 4096/25*2 ~= 350 old log messages. See also fluent-bit PR #1565.
+ */
+ ret = sd_journal_next_skip(ctx->j, 350);
+ flb_plg_debug(ctx->ins,
+ "jump to the end of journal and skip %d last entries", ret);
+ }
+ else {
+ ret = sd_journal_seek_head(ctx->j);
+ }
+
+#ifdef FLB_HAVE_SQLDB
+ /* Check if we have a cursor in our database */
+ if (ctx->db) {
+ /* Initialize prepared statement */
+ ret = sqlite3_prepare_v2(ctx->db->handler,
+ SQL_UPDATE_CURSOR,
+ -1,
+ &ctx->stmt_cursor,
+ 0);
+ if (ret != SQLITE_OK) {
+ flb_plg_error(ctx->ins, "error preparing database SQL statement");
+ flb_systemd_config_destroy(ctx);
+ return NULL;
+ }
+
+ /* Get current cursor */
+ cursor = flb_systemd_db_get_cursor(ctx);
+ if (cursor) {
+ ret = sd_journal_seek_cursor(ctx->j, cursor);
+ if (ret == 0) {
+ flb_plg_info(ctx->ins, "seek_cursor=%.40s... OK", cursor);
+
+ /* Skip the first entry, already processed */
+ sd_journal_next_skip(ctx->j, 1);
+ }
+ else {
+ flb_plg_warn(ctx->ins, "seek_cursor failed");
+ }
+ flb_free(cursor);
+ }
+ else {
+ /* Insert the first row */
+ cursor = NULL;
+ flb_systemd_db_init_cursor(ctx, cursor);
+ if (cursor) {
+ flb_free(cursor);
+ }
+ }
+ }
+#endif
+
+ ctx->log_encoder = flb_log_event_encoder_create(FLB_LOG_EVENT_FORMAT_DEFAULT);
+
+ if (ctx->log_encoder == NULL) {
+ flb_plg_error(ctx->ins, "could not initialize event encoder");
+ flb_systemd_config_destroy(ctx);
+
+ return NULL;
+ }
+
+
+ sd_journal_get_data_threshold(ctx->j, &size);
+ flb_plg_debug(ctx->ins,
+ "sd_journal library may truncate values "
+ "to sd_journal_get_data_threshold() bytes: %zu", size);
+
+ return ctx;
+}
+
+int flb_systemd_config_destroy(struct flb_systemd_config *ctx)
+{
+ if (ctx->log_encoder != NULL) {
+ flb_log_event_encoder_destroy(ctx->log_encoder);
+
+ ctx->log_encoder = NULL;
+ }
+
+ /* Close context */
+ if (ctx->j) {
+ sd_journal_close(ctx->j);
+ }
+
+#ifdef FLB_HAVE_SQLDB
+ if (ctx->db) {
+ sqlite3_finalize(ctx->stmt_cursor);
+ flb_systemd_db_close(ctx->db);
+ }
+#endif
+
+ close(ctx->ch_manager[0]);
+ close(ctx->ch_manager[1]);
+
+ flb_free(ctx);
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/in_systemd/systemd_config.h b/src/fluent-bit/plugins/in_systemd/systemd_config.h
new file mode 100644
index 000000000..db5c4cf53
--- /dev/null
+++ b/src/fluent-bit/plugins/in_systemd/systemd_config.h
@@ -0,0 +1,82 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_SYSTEMD_CONFIG_H
+#define FLB_SYSTEMD_CONFIG_H
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_input.h>
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_sqldb.h>
+#include <fluent-bit/flb_log_event_encoder.h>
+
+#include <systemd/sd-journal.h>
+
+/* return values */
+#define FLB_SYSTEMD_ERROR -1 /* Systemd journal file read error. */
+#define FLB_SYSTEMD_NONE 0
+#define FLB_SYSTEMD_OK 1
+#define FLB_SYSTEMD_MORE 2
+#define FLB_SYSTEMD_BUSY 3
+
+/* constants */
+#define FLB_SYSTEMD_UNIT "_SYSTEMD_UNIT"
+#define FLB_SYSTEMD_UNKNOWN "unknown"
+#define FLB_SYSTEMD_MAX_FIELDS "8000"
+#define FLB_SYSTEMD_MAX_ENTRIES "5000"
+
+/* Input configuration & context */
+struct flb_systemd_config {
+ /* Journal */
+ int fd; /* Journal file descriptor */
+ sd_journal *j; /* Journal context */
+ char *cursor;
+ flb_sds_t path;
+ flb_sds_t filter_type; /* sysytemd filter type: and|or */
+ struct mk_list *systemd_filters;
+ int pending_records;
+ int read_from_tail; /* read_from_tail option */
+ int lowercase;
+ int strip_underscores;
+
+ /* Internal */
+ int ch_manager[2]; /* pipe: channel manager */
+ int coll_fd_archive; /* archive collector */
+ int coll_fd_journal; /* journal, events mode */
+ int coll_fd_pending; /* pending records */
+ int dynamic_tag;
+ int max_fields; /* max number of fields per record */
+ int max_entries; /* max number of records per iteration */
+
+#ifdef FLB_HAVE_SQLDB
+ flb_sds_t db_path;
+ struct flb_sqldb *db;
+ flb_sds_t db_sync_mode;
+ int db_sync;
+ sqlite3_stmt *stmt_cursor;
+#endif
+ struct flb_input_instance *ins;
+ struct flb_log_event_encoder *log_encoder;
+};
+
+struct flb_systemd_config *flb_systemd_config_create(struct flb_input_instance *i_ins,
+ struct flb_config *config);
+
+int flb_systemd_config_destroy(struct flb_systemd_config *ctx);
+#endif
diff --git a/src/fluent-bit/plugins/in_systemd/systemd_db.c b/src/fluent-bit/plugins/in_systemd/systemd_db.c
new file mode 100644
index 000000000..0abc4d999
--- /dev/null
+++ b/src/fluent-bit/plugins/in_systemd/systemd_db.c
@@ -0,0 +1,197 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_sqldb.h>
+
+#include "systemd_config.h"
+#include "systemd_db.h"
+
+struct query_status {
+ int rows;
+ char *cursor;
+ time_t updated;
+};
+
+static int cb_cursor_check(void *data, int argc, char **argv, char **cols)
+{
+ struct query_status *qs = data;
+
+ qs->cursor = flb_strdup(argv[0]); /* cursor string */
+ qs->updated = atoll(argv[1]); /* timestamp */
+ qs->rows++;
+
+ return 0;
+}
+
+static int cb_count_check(void *data, int argc, char **argv, char **cols)
+{
+ struct query_status *qs = data;
+
+ qs->rows = atoll(argv[0]);
+ return 0;
+}
+
+/* sanitize database table if required */
+static void flb_systemd_db_sanitize(struct flb_sqldb *db,
+ struct flb_input_instance *ins)
+{
+ int ret;
+ struct query_status qs = {0};
+
+ memset(&qs, '\0', sizeof(qs));
+ ret = flb_sqldb_query(db,
+ SQL_COUNT_CURSOR, cb_count_check, &qs);
+ if (ret != FLB_OK) {
+ flb_plg_error(ins, "db: failed counting number of rows");
+ return;
+ }
+
+ if (qs.rows > 1) {
+ flb_plg_warn(ins,
+ "db: table in_systemd_cursor looks corrupted, it has "
+ "more than one entry (rows=%i), the table content will be "
+ "fixed", qs.rows);
+
+ /* Delete duplicates, we only preserve the last record based on it ROWID */
+ ret = flb_sqldb_query(db, SQL_DELETE_DUPS, NULL, NULL);
+ if (ret != FLB_OK) {
+ flb_plg_error(ins, "could not delete in_systemd_cursor duplicates");
+ return;
+ }
+ flb_plg_info(ins, "table in_systemd_cursor has been fixed");
+ }
+
+}
+
+struct flb_sqldb *flb_systemd_db_open(const char *path,
+ struct flb_input_instance *ins,
+ struct flb_systemd_config *ctx,
+ struct flb_config *config)
+{
+ int ret;
+ char tmp[64];
+ struct flb_sqldb *db;
+
+ /* Open/create the database */
+ db = flb_sqldb_open(path, ins->name, config);
+ if (!db) {
+ return NULL;
+ }
+
+ /* Create table schema if it don't exists */
+ ret = flb_sqldb_query(db, SQL_CREATE_CURSOR, NULL, NULL);
+ if (ret != FLB_OK) {
+ flb_plg_error(ins, "db: could not create 'cursor' table");
+ flb_sqldb_close(db);
+ return NULL;
+ }
+
+ if (ctx->db_sync >= 0) {
+ snprintf(tmp, sizeof(tmp) - 1, SQL_PRAGMA_SYNC,
+ ctx->db_sync);
+ ret = flb_sqldb_query(db, tmp, NULL, NULL);
+ if (ret != FLB_OK) {
+ flb_plg_error(ctx->ins, "db could not set pragma 'sync'");
+ flb_sqldb_close(db);
+ return NULL;
+ }
+ }
+
+ flb_systemd_db_sanitize(db, ins);
+
+ return db;
+}
+
+int flb_systemd_db_close(struct flb_sqldb *db)
+{
+ flb_sqldb_close(db);
+ return 0;
+}
+
+int flb_systemd_db_init_cursor(struct flb_systemd_config *ctx, const char *cursor)
+{
+ int ret;
+ char query[PATH_MAX];
+ struct query_status qs = {0};
+
+ /* Check if the file exists */
+ memset(&qs, '\0', sizeof(qs));
+ ret = flb_sqldb_query(ctx->db,
+ SQL_GET_CURSOR, cb_cursor_check, &qs);
+
+ if (ret != FLB_OK) {
+ return -1;
+ }
+
+ if (qs.rows == 0) {
+ /* Register the cursor */
+ snprintf(query, sizeof(query) - 1,
+ SQL_INSERT_CURSOR,
+ cursor, time(NULL));
+ ret = flb_sqldb_query(ctx->db,
+ query, NULL, NULL);
+ if (ret == FLB_ERROR) {
+ return -1;
+ }
+ return 0;
+ }
+
+ return -1;
+}
+
+int flb_systemd_db_set_cursor(struct flb_systemd_config *ctx, const char *cursor)
+{
+ int ret;
+
+ /* Bind parameters */
+ sqlite3_bind_text(ctx->stmt_cursor, 1, (char *) cursor, -1, 0);
+ sqlite3_bind_int64(ctx->stmt_cursor, 2, time(NULL));
+
+ ret = sqlite3_step(ctx->stmt_cursor);
+
+ sqlite3_clear_bindings(ctx->stmt_cursor);
+ sqlite3_reset(ctx->stmt_cursor);
+
+ if (ret != SQLITE_DONE) {
+ return -1;
+ }
+ return 0;
+}
+
+char *flb_systemd_db_get_cursor(struct flb_systemd_config *ctx)
+{
+ int ret;
+ struct query_status qs = {0};
+
+ memset(&qs, '\0', sizeof(qs));
+ ret = flb_sqldb_query(ctx->db,
+ SQL_GET_CURSOR, cb_cursor_check, &qs);
+ if (ret != FLB_OK) {
+ return NULL;
+ }
+
+ if (qs.rows > 0) {
+ /* cursor must be freed by the caller */
+ return qs.cursor;
+ }
+
+ return NULL;
+}
diff --git a/src/fluent-bit/plugins/in_systemd/systemd_db.h b/src/fluent-bit/plugins/in_systemd/systemd_db.h
new file mode 100644
index 000000000..da8d6e195
--- /dev/null
+++ b/src/fluent-bit/plugins/in_systemd/systemd_db.h
@@ -0,0 +1,64 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_SYSTEMD_DB_H
+#define FLB_SYSTEMD_DB_H
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_input.h>
+#include <fluent-bit/flb_config.h>
+
+#include "systemd_config.h"
+
+#define SQL_CREATE_CURSOR \
+ "CREATE TABLE IF NOT EXISTS in_systemd_cursor (" \
+ " cursor TEXT NOT NULL," \
+ " updated INTEGER" \
+ ");"
+
+#define SQL_GET_CURSOR \
+ "SELECT * FROM in_systemd_cursor LIMIT 1;"
+
+#define SQL_INSERT_CURSOR \
+ "INSERT INTO in_systemd_cursor (cursor, updated)" \
+ " VALUES ('%s', %lu);"
+
+#define SQL_COUNT_CURSOR \
+ "SELECT COUNT(*) FROM in_systemd_cursor;"
+
+#define SQL_UPDATE_CURSOR \
+ "UPDATE in_systemd_cursor SET cursor=@cursor, updated=@updated;"
+
+#define SQL_DELETE_DUPS \
+ "DELETE FROM in_systemd_cursor WHERE ROWID < " \
+ "(SELECT MAX(ROWID) FROM in_systemd_cursor);"
+
+#define SQL_PRAGMA_SYNC \
+ "PRAGMA synchronous=%i;"
+
+struct flb_sqldb *flb_systemd_db_open(const char *path,
+ struct flb_input_instance *ins,
+ struct flb_systemd_config *ctx,
+ struct flb_config *config);
+int flb_systemd_db_close(struct flb_sqldb *db);
+int flb_systemd_db_init_cursor(struct flb_systemd_config *ctx, const char *cursor);
+int flb_systemd_db_set_cursor(struct flb_systemd_config *ctx, const char *cursor);
+char *flb_systemd_db_get_cursor(struct flb_systemd_config *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/in_tail/CMakeLists.txt b/src/fluent-bit/plugins/in_tail/CMakeLists.txt
new file mode 100644
index 000000000..31d865218
--- /dev/null
+++ b/src/fluent-bit/plugins/in_tail/CMakeLists.txt
@@ -0,0 +1,37 @@
+set(src
+ tail_file.c
+ tail_dockermode.c
+ tail_scan.c
+ tail_config.c
+ tail_fs_stat.c
+ tail.c)
+
+if(FLB_HAVE_INOTIFY)
+set(src
+ ${src}
+ tail_fs_inotify.c)
+endif()
+
+if(FLB_SQLDB)
+set(src
+ ${src}
+ tail_db.c)
+endif()
+
+if(FLB_PARSER)
+ set(src
+ ${src}
+ tail_multiline.c
+ )
+endif()
+
+if(MSVC)
+ set(src
+ ${src}
+ win32/stat.c
+ win32/io.c
+ )
+ FLB_PLUGIN(in_tail "${src}" "Shlwapi")
+else()
+ FLB_PLUGIN(in_tail "${src}" "")
+endif()
diff --git a/src/fluent-bit/plugins/in_tail/tail.c b/src/fluent-bit/plugins/in_tail/tail.c
new file mode 100644
index 000000000..34a0fec3d
--- /dev/null
+++ b/src/fluent-bit/plugins/in_tail/tail.c
@@ -0,0 +1,783 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_compat.h>
+#include <fluent-bit/flb_input.h>
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_config_map.h>
+#include <fluent-bit/flb_error.h>
+#include <fluent-bit/flb_utils.h>
+
+#include "tail.h"
+#include "tail_fs.h"
+#include "tail_db.h"
+#include "tail_file.h"
+#include "tail_scan.h"
+#include "tail_signal.h"
+#include "tail_config.h"
+#include "tail_dockermode.h"
+#include "tail_multiline.h"
+
+static inline int consume_byte(flb_pipefd_t fd)
+{
+ int ret;
+ uint64_t val;
+
+ /* We need to consume the byte */
+ ret = flb_pipe_r(fd, (char *) &val, sizeof(val));
+ if (ret <= 0) {
+ flb_errno();
+ return -1;
+ }
+
+ return 0;
+}
+
+/* cb_collect callback */
+static int in_tail_collect_pending(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ int ret;
+ int active = 0;
+ struct mk_list *tmp;
+ struct mk_list *head;
+ struct flb_tail_config *ctx = in_context;
+ struct flb_tail_file *file;
+ struct stat st;
+ uint64_t pre;
+ uint64_t total_processed = 0;
+
+ /* Iterate promoted event files with pending bytes */
+ mk_list_foreach_safe(head, tmp, &ctx->files_event) {
+ file = mk_list_entry(head, struct flb_tail_file, _head);
+
+ if (file->watch_fd == -1 ||
+ (file->offset >= file->size)) {
+ ret = fstat(file->fd, &st);
+
+ if (ret == -1) {
+ flb_errno();
+ flb_tail_file_remove(file);
+ continue;
+ }
+
+ file->size = st.st_size;
+ file->pending_bytes = (file->size - file->offset);
+ }
+ else {
+ memset(&st, 0, sizeof(struct stat));
+ }
+
+ if (file->pending_bytes <= 0) {
+ file->pending_bytes = (file->size - file->offset);
+ }
+
+ if (file->pending_bytes <= 0) {
+ continue;
+ }
+
+ if (ctx->event_batch_size > 0 &&
+ total_processed >= ctx->event_batch_size) {
+ break;
+ }
+
+ /* get initial offset to calculate the number of processed bytes later */
+ pre = file->offset;
+
+ ret = flb_tail_file_chunk(file);
+
+ /* Update the total number of bytes processed */
+ if (file->offset > pre) {
+ total_processed += (file->offset - pre);
+ }
+
+ switch (ret) {
+ case FLB_TAIL_ERROR:
+ /* Could not longer read the file */
+ flb_tail_file_remove(file);
+ break;
+ case FLB_TAIL_OK:
+ case FLB_TAIL_BUSY:
+ /*
+ * Adjust counter to verify if we need a further read(2) later.
+ * For more details refer to tail_fs_inotify.c:96.
+ */
+ if (file->offset < file->size) {
+ file->pending_bytes = (file->size - file->offset);
+ active++;
+ }
+ else {
+ file->pending_bytes = 0;
+ }
+ break;
+ }
+ }
+
+ /* If no more active files, consume pending signal so we don't get called again. */
+ if (active == 0) {
+ tail_consume_pending(ctx);
+ }
+
+ return 0;
+}
+
+/* cb_collect callback */
+static int in_tail_collect_static(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ int ret;
+ int active = 0;
+ int pre_size;
+ int pos_size;
+ int alter_size = 0;
+ int completed = FLB_FALSE;
+ char s_size[32];
+ struct mk_list *tmp;
+ struct mk_list *head;
+ struct flb_tail_config *ctx = in_context;
+ struct flb_tail_file *file;
+ uint64_t pre;
+ uint64_t total_processed = 0;
+
+ /* Do a data chunk collection for each file */
+ mk_list_foreach_safe(head, tmp, &ctx->files_static) {
+ file = mk_list_entry(head, struct flb_tail_file, _head);
+
+ /*
+ * The list 'files_static' represents all the files that were discovered
+ * on startup that already contains data: these are called 'static files'.
+ *
+ * When processing static files, we don't know what kind of content they
+ * have and what kind of 'latency' might add to process all of them in
+ * a row. Despite we always 'try' to do a full round and process a
+ * fraction of them on every invocation of this function if we have a
+ * huge number of files we will face latency and make the main pipeline
+ * to degrade performance.
+ *
+ * In order to avoid this situation, we added a new option to the plugin
+ * called 'static_batch_size' which basically defines how many bytes can
+ * be processed on every invocation to process the static files.
+ *
+ * When the limit is reached, we just break the loop and as a side effect
+ * we allow other events keep processing.
+ */
+ if (ctx->static_batch_size > 0 &&
+ total_processed >= ctx->static_batch_size) {
+ break;
+ }
+
+ /* get initial offset to calculate the number of processed bytes later */
+ pre = file->offset;
+
+ /* Process the file */
+ ret = flb_tail_file_chunk(file);
+
+ /* Update the total number of bytes processed */
+ if (file->offset > pre) {
+ total_processed += (file->offset - pre);
+ }
+
+ switch (ret) {
+ case FLB_TAIL_ERROR:
+ /* Could not longer read the file */
+ flb_plg_debug(ctx->ins, "inode=%"PRIu64" collect static ERROR",
+ file->inode);
+ flb_tail_file_remove(file);
+ break;
+ case FLB_TAIL_OK:
+ case FLB_TAIL_BUSY:
+ active++;
+ break;
+ case FLB_TAIL_WAIT:
+ if (file->config->exit_on_eof) {
+ flb_plg_info(ctx->ins, "inode=%"PRIu64" file=%s ended, stop",
+ file->inode, file->name);
+ if (ctx->files_static_count == 1) {
+ flb_engine_exit(config);
+ }
+ }
+ /* Promote file to 'events' type handler */
+ flb_plg_debug(ctx->ins, "inode=%"PRIu64" file=%s promote to TAIL_EVENT",
+ file->inode, file->name);
+
+ /*
+ * When promoting a file from 'static' to 'event' mode, the promoter
+ * will check if the file has been rotated while it was being
+ * processed on this function, if so, it will try to check for the
+ * following condition:
+ *
+ * "discover a new possible file created due to rotation"
+ *
+ * If the condition above is met, a new file entry will be added to
+ * the list that we are processing and a 'new signal' will be send
+ * to the signal manager. But the signal manager will trigger the
+ * message only if no pending messages exists (to avoid queue size
+ * exhaustion).
+ *
+ * All good, but there is a corner case where if no 'active' files
+ * exists, the signal will be read and this function will not be
+ * called again and since the signal did not triggered the
+ * message, the 'new file' enqueued by the nested function
+ * might stay in stale mode (note that altering the length of this
+ * list will not be reflected yet)
+ *
+ * To fix the corner case, we use a variable called 'alter_size'
+ * that determinate if the size of the list keeps the same after
+ * a rotation, so it means: a new file was added.
+ *
+ * We use 'alter_size' as a helper in the conditional below to know
+ * when to stop processing the static list.
+ */
+ if (alter_size == 0) {
+ pre_size = ctx->files_static_count;
+ }
+ ret = flb_tail_file_to_event(file);
+ if (ret == -1) {
+ flb_plg_debug(ctx->ins, "file=%s cannot promote, unregistering",
+ file->name);
+ flb_tail_file_remove(file);
+ }
+
+ if (alter_size == 0) {
+ pos_size = ctx->files_static_count;
+ if (pre_size == pos_size) {
+ alter_size++;
+ }
+ }
+ break;
+ }
+ }
+
+ /*
+ * If there are no more active static handlers, we consume the 'byte' that
+ * triggered this event so this is not longer called again.
+ */
+ if (active == 0 && alter_size == 0) {
+ consume_byte(ctx->ch_manager[0]);
+ ctx->ch_reads++;
+ completed = FLB_TRUE;
+ }
+
+ /* Debugging number of processed bytes */
+ if (flb_log_check_level(ctx->ins->log_level, FLB_LOG_DEBUG)) {
+ flb_utils_bytes_to_human_readable_size(total_processed,
+ s_size, sizeof(s_size));
+ if (completed) {
+ flb_plg_debug(ctx->ins, "[static files] processed %s, done", s_size);
+ }
+ else {
+ flb_plg_debug(ctx->ins, "[static files] processed %s", s_size);
+ }
+ }
+
+ return 0;
+}
+
+static int in_tail_watcher_callback(struct flb_input_instance *ins,
+ struct flb_config *config, void *context)
+{
+ int ret = 0;
+ struct mk_list *tmp;
+ struct mk_list *head;
+ struct flb_tail_config *ctx = context;
+ struct flb_tail_file *file;
+ (void) config;
+
+ mk_list_foreach_safe(head, tmp, &ctx->files_event) {
+ file = mk_list_entry(head, struct flb_tail_file, _head);
+ if (file->is_link == FLB_TRUE) {
+ ret = flb_tail_file_is_rotated(ctx, file);
+ if (ret == FLB_FALSE) {
+ continue;
+ }
+
+ /* The symbolic link name has been rotated */
+ flb_tail_file_rotated(file);
+ }
+ }
+ return ret;
+}
+
+int in_tail_collect_event(void *file, struct flb_config *config)
+{
+ int ret;
+ struct stat st;
+ struct flb_tail_file *f = file;
+
+ ret = fstat(f->fd, &st);
+ if (ret == -1) {
+ flb_tail_file_remove(f);
+ return 0;
+ }
+
+ ret = flb_tail_file_chunk(f);
+ switch (ret) {
+ case FLB_TAIL_ERROR:
+ /* Could not longer read the file */
+ flb_tail_file_remove(f);
+ break;
+ case FLB_TAIL_OK:
+ case FLB_TAIL_WAIT:
+ break;
+ }
+
+ return 0;
+}
+
+/* Initialize plugin */
+static int in_tail_init(struct flb_input_instance *in,
+ struct flb_config *config, void *data)
+{
+ int ret = -1;
+ struct flb_tail_config *ctx = NULL;
+
+ /* Allocate space for the configuration */
+ ctx = flb_tail_config_create(in, config);
+ if (!ctx) {
+ return -1;
+ }
+ ctx->ins = in;
+
+ /* Initialize file-system watcher */
+ ret = flb_tail_fs_init(in, ctx, config);
+ if (ret == -1) {
+ flb_tail_config_destroy(ctx);
+ return -1;
+ }
+
+ /* Scan path */
+ flb_tail_scan(ctx->path_list, ctx);
+
+ /*
+ * After the first scan (on start time), all new files discovered needs to be
+ * read from head, so we switch the 'read_from_head' flag to true so any
+ * other file discovered after a scan or a rotation are read from the
+ * beginning.
+ */
+ ctx->read_from_head = FLB_TRUE;
+
+ /* Set plugin context */
+ flb_input_set_context(in, ctx);
+
+ /* Register an event collector */
+ ret = flb_input_set_collector_event(in, in_tail_collect_static,
+ ctx->ch_manager[0], config);
+ if (ret == -1) {
+ flb_tail_config_destroy(ctx);
+ return -1;
+ }
+ ctx->coll_fd_static = ret;
+
+ /* Register re-scan: time managed by 'refresh_interval' property */
+ ret = flb_input_set_collector_time(in, flb_tail_scan_callback,
+ ctx->refresh_interval_sec,
+ ctx->refresh_interval_nsec,
+ config);
+ if (ret == -1) {
+ flb_tail_config_destroy(ctx);
+ return -1;
+ }
+ ctx->coll_fd_scan = ret;
+
+ /* Register watcher, interval managed by 'watcher_interval' property */
+ ret = flb_input_set_collector_time(in, in_tail_watcher_callback,
+ ctx->watcher_interval, 0,
+ config);
+ if (ret == -1) {
+ flb_tail_config_destroy(ctx);
+ return -1;
+ }
+ ctx->coll_fd_watcher = ret;
+
+ /* Register callback to purge rotated files */
+ ret = flb_input_set_collector_time(in, flb_tail_file_purge,
+ ctx->rotate_wait, 0,
+ config);
+ if (ret == -1) {
+ flb_tail_config_destroy(ctx);
+ return -1;
+ }
+ ctx->coll_fd_rotated = ret;
+
+ /* Register callback to process pending bytes in promoted files */
+ ret = flb_input_set_collector_event(in, in_tail_collect_pending,
+ ctx->ch_pending[0], config);//1, 0, config);
+ if (ret == -1) {
+ flb_tail_config_destroy(ctx);
+ return -1;
+ }
+ ctx->coll_fd_pending = ret;
+
+
+ if (ctx->multiline == FLB_TRUE && ctx->parser) {
+ ctx->parser = NULL;
+ flb_plg_warn(in, "on multiline mode 'Parser' is not allowed "
+ "(parser disabled)");
+ }
+
+ /* Register callback to process docker mode queued buffer */
+ if (ctx->docker_mode == FLB_TRUE) {
+ ret = flb_input_set_collector_time(in, flb_tail_dmode_pending_flush,
+ ctx->docker_mode_flush, 0,
+ config);
+ if (ret == -1) {
+ ctx->docker_mode = FLB_FALSE;
+ flb_tail_config_destroy(ctx);
+ return -1;
+ }
+ ctx->coll_fd_dmode_flush = ret;
+ }
+
+#ifdef FLB_HAVE_PARSER
+ /* Register callback to process multiline queued buffer */
+ if (ctx->multiline == FLB_TRUE) {
+ ret = flb_input_set_collector_time(in, flb_tail_mult_pending_flush,
+ ctx->multiline_flush, 0,
+ config);
+ if (ret == -1) {
+ ctx->multiline = FLB_FALSE;
+ flb_tail_config_destroy(ctx);
+ return -1;
+ }
+ ctx->coll_fd_mult_flush = ret;
+ }
+#endif
+
+ return 0;
+}
+
+/* Pre-run callback / before the event loop */
+static int in_tail_pre_run(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ struct flb_tail_config *ctx = in_context;
+ (void) ins;
+
+ return tail_signal_manager(ctx);
+}
+
+static int in_tail_exit(void *data, struct flb_config *config)
+{
+ (void) *config;
+ struct flb_tail_config *ctx = data;
+
+ flb_tail_file_remove_all(ctx);
+ flb_tail_fs_exit(ctx);
+ flb_tail_config_destroy(ctx);
+
+ return 0;
+}
+
+static void in_tail_pause(void *data, struct flb_config *config)
+{
+ struct flb_tail_config *ctx = data;
+
+ /*
+ * Pause general collectors:
+ *
+ * - static : static files lookup before promotion
+ */
+ flb_input_collector_pause(ctx->coll_fd_static, ctx->ins);
+ flb_input_collector_pause(ctx->coll_fd_pending, ctx->ins);
+
+ if (ctx->docker_mode == FLB_TRUE) {
+ flb_input_collector_pause(ctx->coll_fd_dmode_flush, ctx->ins);
+ if (config->is_ingestion_active == FLB_FALSE) {
+ flb_plg_info(ctx->ins, "flushing pending docker mode data...");
+ flb_tail_dmode_pending_flush_all(ctx);
+ }
+ }
+
+ if (ctx->multiline == FLB_TRUE) {
+ flb_input_collector_pause(ctx->coll_fd_mult_flush, ctx->ins);
+ if (config->is_ingestion_active == FLB_FALSE) {
+ flb_plg_info(ctx->ins, "flushing pending multiline data...");
+ flb_tail_mult_pending_flush_all(ctx);
+ }
+ }
+
+ /* Pause file system backend handlers */
+ flb_tail_fs_pause(ctx);
+}
+
+static void in_tail_resume(void *data, struct flb_config *config)
+{
+ struct flb_tail_config *ctx = data;
+
+ flb_input_collector_resume(ctx->coll_fd_static, ctx->ins);
+ flb_input_collector_resume(ctx->coll_fd_pending, ctx->ins);
+
+ if (ctx->docker_mode == FLB_TRUE) {
+ flb_input_collector_resume(ctx->coll_fd_dmode_flush, ctx->ins);
+ }
+
+ if (ctx->multiline == FLB_TRUE) {
+ flb_input_collector_resume(ctx->coll_fd_mult_flush, ctx->ins);
+ }
+
+ /* Pause file system backend handlers */
+ flb_tail_fs_resume(ctx);
+}
+
+/* Configuration properties map */
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_CLIST, "path", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_tail_config, path_list),
+ "pattern specifying log files or multiple ones through "
+ "the use of common wildcards."
+ },
+ {
+ FLB_CONFIG_MAP_CLIST, "exclude_path", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_tail_config, exclude_list),
+ "Set one or multiple shell patterns separated by commas to exclude "
+ "files matching a certain criteria, e.g: 'exclude_path *.gz,*.zip'"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "key", "log",
+ 0, FLB_TRUE, offsetof(struct flb_tail_config, key),
+ "when a message is unstructured (no parser applied), it's appended "
+ "as a string under the key name log. This option allows to define an "
+ "alternative name for that key."
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "read_from_head", "false",
+ 0, FLB_TRUE, offsetof(struct flb_tail_config, read_from_head),
+ "For new discovered files on start (without a database offset/position), read the "
+ "content from the head of the file, not tail."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "refresh_interval", "60",
+ 0, FLB_FALSE, 0,
+ "interval to refresh the list of watched files expressed in seconds."
+ },
+ {
+ FLB_CONFIG_MAP_TIME, "watcher_interval", "2s",
+ 0, FLB_TRUE, offsetof(struct flb_tail_config, watcher_interval),
+ },
+ {
+ FLB_CONFIG_MAP_TIME, "progress_check_interval", "2s",
+ 0, FLB_TRUE, offsetof(struct flb_tail_config, progress_check_interval),
+ },
+ {
+ FLB_CONFIG_MAP_INT, "progress_check_interval_nsec", "0",
+ 0, FLB_TRUE, offsetof(struct flb_tail_config, progress_check_interval_nsec),
+ },
+ {
+ FLB_CONFIG_MAP_TIME, "rotate_wait", FLB_TAIL_ROTATE_WAIT,
+ 0, FLB_TRUE, offsetof(struct flb_tail_config, rotate_wait),
+ "specify the number of extra time in seconds to monitor a file once is "
+ "rotated in case some pending data is flushed."
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "docker_mode", "false",
+ 0, FLB_TRUE, offsetof(struct flb_tail_config, docker_mode),
+ "If enabled, the plugin will recombine split Docker log lines before "
+ "passing them to any parser as configured above. This mode cannot be "
+ "used at the same time as Multiline."
+ },
+ {
+ FLB_CONFIG_MAP_INT, "docker_mode_flush", "4",
+ 0, FLB_TRUE, offsetof(struct flb_tail_config, docker_mode_flush),
+ "wait period time in seconds to flush queued unfinished split lines."
+
+ },
+#ifdef FLB_HAVE_REGEX
+ {
+ FLB_CONFIG_MAP_STR, "docker_mode_parser", NULL,
+ 0, FLB_FALSE, 0,
+ "specify the parser name to fetch log first line for muliline log"
+ },
+#endif
+ {
+ FLB_CONFIG_MAP_STR, "path_key", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_tail_config, path_key),
+ "set the 'key' name where the name of monitored file will be appended."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "offset_key", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_tail_config, offset_key),
+ "set the 'key' name where the offset of monitored file will be appended."
+ },
+ {
+ FLB_CONFIG_MAP_TIME, "ignore_older", "0",
+ 0, FLB_TRUE, offsetof(struct flb_tail_config, ignore_older),
+ "ignore files older than 'ignore_older'. Supports m,h,d (minutes, "
+ "hours, days) syntax. Default behavior is to read all the files."
+ },
+ {
+ FLB_CONFIG_MAP_SIZE, "buffer_chunk_size", FLB_TAIL_CHUNK,
+ 0, FLB_TRUE, offsetof(struct flb_tail_config, buf_chunk_size),
+ "set the initial buffer size to read data from files. This value is "
+ "used too to increase buffer size."
+ },
+ {
+ FLB_CONFIG_MAP_SIZE, "buffer_max_size", FLB_TAIL_CHUNK,
+ 0, FLB_TRUE, offsetof(struct flb_tail_config, buf_max_size),
+ "set the limit of the buffer size per monitored file. When a buffer "
+ "needs to be increased (e.g: very long lines), this value is used to "
+ "restrict how much the memory buffer can grow. If reading a file exceed "
+ "this limit, the file is removed from the monitored file list."
+ },
+ {
+ FLB_CONFIG_MAP_SIZE, "static_batch_size", FLB_TAIL_STATIC_BATCH_SIZE,
+ 0, FLB_TRUE, offsetof(struct flb_tail_config, static_batch_size),
+ "On start, Fluent Bit might process files which already contains data, "
+ "these files are called 'static' files. The configuration property "
+ "in question set's the maximum number of bytes to process per iteration "
+ "for the static files monitored."
+ },
+ {
+ FLB_CONFIG_MAP_SIZE, "event_batch_size", FLB_TAIL_EVENT_BATCH_SIZE,
+ 0, FLB_TRUE, offsetof(struct flb_tail_config, event_batch_size),
+ "When Fluent Bit is processing files in event based mode the amount of"
+ "data available for consumption could be too much and cause the input plugin "
+ "to over extend and smother other plugins"
+ "The configuration property sets the maximum number of bytes to process per iteration "
+ "for the files monitored (in event mode)."
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "skip_long_lines", "false",
+ 0, FLB_TRUE, offsetof(struct flb_tail_config, skip_long_lines),
+ "if a monitored file reach it buffer capacity due to a very long line "
+ "(buffer_max_size), the default behavior is to stop monitoring that "
+ "file. This option alter that behavior and instruct Fluent Bit to skip "
+ "long lines and continue processing other lines that fits into the buffer."
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "exit_on_eof", "false",
+ 0, FLB_TRUE, offsetof(struct flb_tail_config, exit_on_eof),
+ "exit Fluent Bit when reaching EOF on a monitored file."
+ },
+
+ {
+ FLB_CONFIG_MAP_BOOL, "skip_empty_lines", "false",
+ 0, FLB_TRUE, offsetof(struct flb_tail_config, skip_empty_lines),
+ "Allows to skip empty lines."
+ },
+
+#ifdef FLB_HAVE_INOTIFY
+ {
+ FLB_CONFIG_MAP_BOOL, "inotify_watcher", "true",
+ 0, FLB_TRUE, offsetof(struct flb_tail_config, inotify_watcher),
+ "set to false to use file stat watcher instead of inotify."
+ },
+#endif
+#ifdef FLB_HAVE_REGEX
+ {
+ FLB_CONFIG_MAP_STR, "parser", NULL,
+ 0, FLB_FALSE, 0,
+ "specify the parser name to process an unstructured message."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "tag_regex", NULL,
+ 0, FLB_FALSE, 0,
+ "set a regex to extract fields from the file name and use them later to "
+ "compose the Tag."
+ },
+#endif
+
+#ifdef FLB_HAVE_SQLDB
+ {
+ FLB_CONFIG_MAP_STR, "db", NULL,
+ 0, FLB_FALSE, 0,
+ "set a database file to keep track of monitored files and it offsets."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "db.sync", "normal",
+ 0, FLB_FALSE, 0,
+ "set a database sync method. values: extra, full, normal and off."
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "db.locking", "false",
+ 0, FLB_TRUE, offsetof(struct flb_tail_config, db_locking),
+ "set exclusive locking mode, increase performance but don't allow "
+ "external connections to the database file."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "db.journal_mode", "WAL",
+ 0, FLB_TRUE, offsetof(struct flb_tail_config, db_journal_mode),
+ "Option to provide WAL configuration for Work Ahead Logging mechanism (WAL). Enabling WAL "
+ "provides higher performance. Note that WAL is not compatible with "
+ "shared network file systems."
+ },
+#endif
+
+ /* Multiline Options */
+#ifdef FLB_HAVE_PARSER
+ {
+ FLB_CONFIG_MAP_BOOL, "multiline", "false",
+ 0, FLB_TRUE, offsetof(struct flb_tail_config, multiline),
+ "if enabled, the plugin will try to discover multiline messages and use "
+ "the proper parsers to compose the outgoing messages. Note that when this "
+ "option is enabled the Parser option is not used."
+ },
+ {
+ FLB_CONFIG_MAP_TIME, "multiline_flush", FLB_TAIL_MULT_FLUSH,
+ 0, FLB_TRUE, offsetof(struct flb_tail_config, multiline_flush),
+ "wait period time in seconds to process queued multiline messages."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "parser_firstline", NULL,
+ 0, FLB_FALSE, 0,
+ "name of the parser that matches the beginning of a multiline message. "
+ "Note that the regular expression defined in the parser must include a "
+ "group name (named capture)."
+ },
+ {
+ FLB_CONFIG_MAP_STR_PREFIX, "parser_", NULL,
+ 0, FLB_FALSE, 0,
+ "optional extra parser to interpret and structure multiline entries. This "
+ "option can be used to define multiple parsers, e.g: Parser_1 ab1, "
+ "Parser_2 ab2, Parser_N abN."
+ },
+
+ /* Multiline Core Engine based API */
+ {
+ FLB_CONFIG_MAP_CLIST, "multiline.parser", NULL,
+ FLB_CONFIG_MAP_MULT, FLB_TRUE, offsetof(struct flb_tail_config, multiline_parsers),
+ "specify one or multiple multiline parsers: docker, cri, go, java, etc."
+ },
+#endif
+
+ /* EOF */
+ {0}
+};
+
+struct flb_input_plugin in_tail_plugin = {
+ .name = "tail",
+ .description = "Tail files",
+ .cb_init = in_tail_init,
+ .cb_pre_run = in_tail_pre_run,
+ .cb_collect = NULL,
+ .cb_flush_buf = NULL,
+ .cb_pause = in_tail_pause,
+ .cb_resume = in_tail_resume,
+ .cb_exit = in_tail_exit,
+ .config_map = config_map,
+ .flags = 0
+};
diff --git a/src/fluent-bit/plugins/in_tail/tail.h b/src/fluent-bit/plugins/in_tail/tail.h
new file mode 100644
index 000000000..074f7a49f
--- /dev/null
+++ b/src/fluent-bit/plugins/in_tail/tail.h
@@ -0,0 +1,45 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_TAIL_H
+#define FLB_TAIL_H
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_input.h>
+
+/* Internal return values */
+#define FLB_TAIL_ERROR -1
+#define FLB_TAIL_OK 0
+#define FLB_TAIL_WAIT 1
+#define FLB_TAIL_BUSY 2
+
+/* Consuming mode */
+#define FLB_TAIL_STATIC 0 /* Data is being consumed through read(2) */
+#define FLB_TAIL_EVENT 1 /* Data is being consumed through inotify */
+
+/* Config */
+#define FLB_TAIL_CHUNK "32768" /* buffer chunk = 32KB */
+#define FLB_TAIL_REFRESH 60 /* refresh every 60 seconds */
+#define FLB_TAIL_ROTATE_WAIT "5" /* time to monitor after rotation */
+#define FLB_TAIL_STATIC_BATCH_SIZE "50M" /* static batch size */
+#define FLB_TAIL_EVENT_BATCH_SIZE "50M" /* event batch size */
+
+int in_tail_collect_event(void *file, struct flb_config *config);
+
+#endif
diff --git a/src/fluent-bit/plugins/in_tail/tail_config.c b/src/fluent-bit/plugins/in_tail/tail_config.c
new file mode 100644
index 000000000..360b3dbea
--- /dev/null
+++ b/src/fluent-bit/plugins/in_tail/tail_config.c
@@ -0,0 +1,472 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/multiline/flb_ml.h>
+#include <fluent-bit/multiline/flb_ml_parser.h>
+
+#include <stdlib.h>
+#include <fcntl.h>
+
+#include "tail_fs.h"
+#include "tail_db.h"
+#include "tail_config.h"
+#include "tail_scan.h"
+#include "tail_sql.h"
+#include "tail_dockermode.h"
+
+#ifdef FLB_HAVE_PARSER
+#include "tail_multiline.h"
+#endif
+
+static int multiline_load_parsers(struct flb_tail_config *ctx)
+{
+ struct mk_list *head;
+ struct mk_list *head_p;
+ struct flb_config_map_val *mv;
+ struct flb_slist_entry *val = NULL;
+ struct flb_ml_parser_ins *parser_i;
+
+ if (!ctx->multiline_parsers) {
+ return 0;
+ }
+
+ /* Create Multiline context using the plugin instance name */
+ ctx->ml_ctx = flb_ml_create(ctx->config, ctx->ins->name);
+ if (!ctx->ml_ctx) {
+ return -1;
+ }
+
+ /*
+ * Iterate all 'multiline.parser' entries. Every entry is considered
+ * a group which can have multiple multiline parser instances.
+ */
+ flb_config_map_foreach(head, mv, ctx->multiline_parsers) {
+ mk_list_foreach(head_p, mv->val.list) {
+ val = mk_list_entry(head_p, struct flb_slist_entry, _head);
+
+ /* Create an instance of the defined parser */
+ parser_i = flb_ml_parser_instance_create(ctx->ml_ctx, val->str);
+ if (!parser_i) {
+ return -1;
+ }
+ }
+ }
+
+ return 0;
+}
+
+struct flb_tail_config *flb_tail_config_create(struct flb_input_instance *ins,
+ struct flb_config *config)
+{
+ int ret;
+ int sec;
+ int i;
+ long nsec;
+ const char *tmp;
+ struct flb_tail_config *ctx;
+
+ ctx = flb_calloc(1, sizeof(struct flb_tail_config));
+ if (!ctx) {
+ flb_errno();
+ return NULL;
+ }
+ ctx->config = config;
+ ctx->ins = ins;
+ ctx->ignore_older = 0;
+ ctx->skip_long_lines = FLB_FALSE;
+#ifdef FLB_HAVE_SQLDB
+ ctx->db_sync = 1; /* sqlite sync 'normal' */
+#endif
+
+ /* Load the config map */
+ ret = flb_input_config_map_set(ins, (void *) ctx);
+ if (ret == -1) {
+ flb_free(ctx);
+ return NULL;
+ }
+
+ /* Create the channel manager */
+ ret = flb_pipe_create(ctx->ch_manager);
+ if (ret == -1) {
+ flb_errno();
+ flb_free(ctx);
+ return NULL;
+ }
+ ctx->ch_reads = 0;
+ ctx->ch_writes = 0;
+
+ /* Create the pending channel */
+ ret = flb_pipe_create(ctx->ch_pending);
+ if (ret == -1) {
+ flb_errno();
+ flb_tail_config_destroy(ctx);
+ return NULL;
+ }
+ /* Make pending channel non-blocking */
+ for (i = 0; i <= 1; i++) {
+ ret = flb_pipe_set_nonblocking(ctx->ch_pending[i]);
+ if (ret == -1) {
+ flb_errno();
+ flb_tail_config_destroy(ctx);
+ return NULL;
+ }
+ }
+
+ /* Config: path/pattern to read files */
+ if (!ctx->path_list || mk_list_size(ctx->path_list) == 0) {
+ flb_plg_error(ctx->ins, "no input 'path' was given");
+ flb_tail_config_destroy(ctx);
+ return NULL;
+ }
+
+ /* Config: seconds interval before to re-scan the path */
+ tmp = flb_input_get_property("refresh_interval", ins);
+ if (!tmp) {
+ ctx->refresh_interval_sec = FLB_TAIL_REFRESH;
+ ctx->refresh_interval_nsec = 0;
+ }
+ else {
+ ret = flb_utils_time_split(tmp, &sec, &nsec);
+ if (ret == 0) {
+ ctx->refresh_interval_sec = sec;
+ ctx->refresh_interval_nsec = nsec;
+
+ if (sec == 0 && nsec == 0) {
+ flb_plg_error(ctx->ins, "invalid 'refresh_interval' config "
+ "value (%s)", tmp);
+ flb_free(ctx);
+ return NULL;
+ }
+
+ if (sec == 0 && nsec <= 1000000) {
+ flb_plg_warn(ctx->ins, "very low refresh_interval "
+ "(%i.%lu nanoseconds) might cause high CPU usage",
+ sec, nsec);
+ }
+ }
+ else {
+ flb_plg_error(ctx->ins,
+ "invalid 'refresh_interval' config value (%s)",
+ tmp);
+ flb_tail_config_destroy(ctx);
+ return NULL;
+ }
+ }
+
+ /* Config: seconds interval to monitor file after rotation */
+ if (ctx->rotate_wait <= 0) {
+ flb_plg_error(ctx->ins, "invalid 'rotate_wait' config value");
+ flb_free(ctx);
+ return NULL;
+ }
+
+#ifdef FLB_HAVE_PARSER
+ /* Config: multi-line support */
+ if (ctx->multiline == FLB_TRUE) {
+ ret = flb_tail_mult_create(ctx, ins, config);
+ if (ret == -1) {
+ flb_tail_config_destroy(ctx);
+ return NULL;
+ }
+ }
+#endif
+
+ /* Config: Docker mode */
+ if(ctx->docker_mode == FLB_TRUE) {
+ ret = flb_tail_dmode_create(ctx, ins, config);
+ if (ret == -1) {
+ flb_tail_config_destroy(ctx);
+ return NULL;
+ }
+ }
+
+ /* Validate buffer limit */
+ if (ctx->buf_chunk_size > ctx->buf_max_size) {
+ flb_plg_error(ctx->ins, "buffer_max_size must be >= buffer_chunk");
+ flb_free(ctx);
+ return NULL;
+ }
+
+#ifdef FLB_HAVE_REGEX
+ /* Parser / Format */
+ tmp = flb_input_get_property("parser", ins);
+ if (tmp) {
+ ctx->parser = flb_parser_get(tmp, config);
+ if (!ctx->parser) {
+ flb_plg_error(ctx->ins, "parser '%s' is not registered", tmp);
+ }
+ }
+#endif
+
+ mk_list_init(&ctx->files_static);
+ mk_list_init(&ctx->files_event);
+ mk_list_init(&ctx->files_rotated);
+
+ /* hash table for files lookups */
+ ctx->static_hash = flb_hash_table_create(FLB_HASH_TABLE_EVICT_NONE, 1000, 0);
+ if (!ctx->static_hash) {
+ flb_plg_error(ctx->ins, "could not create static hash");
+ flb_tail_config_destroy(ctx);
+ return NULL;
+ }
+
+ ctx->event_hash = flb_hash_table_create(FLB_HASH_TABLE_EVICT_NONE, 1000, 0);
+ if (!ctx->event_hash) {
+ flb_plg_error(ctx->ins, "could not create event hash");
+ flb_tail_config_destroy(ctx);
+ return NULL;
+ }
+
+#ifdef FLB_HAVE_SQLDB
+ ctx->db = NULL;
+#endif
+
+#ifdef FLB_HAVE_REGEX
+ tmp = flb_input_get_property("tag_regex", ins);
+ if (tmp) {
+ ctx->tag_regex = flb_regex_create(tmp);
+ if (ctx->tag_regex) {
+ ctx->dynamic_tag = FLB_TRUE;
+ }
+ else {
+ flb_plg_error(ctx->ins, "invalid 'tag_regex' config value");
+ }
+ }
+ else {
+ ctx->tag_regex = NULL;
+ }
+#endif
+
+ /* Check if it should use dynamic tags */
+ tmp = strchr(ins->tag, '*');
+ if (tmp) {
+ ctx->dynamic_tag = FLB_TRUE;
+ }
+
+#ifdef FLB_HAVE_SQLDB
+ /* Database options (needs to be set before the context) */
+ tmp = flb_input_get_property("db.sync", ins);
+ if (tmp) {
+ if (strcasecmp(tmp, "extra") == 0) {
+ ctx->db_sync = 3;
+ }
+ else if (strcasecmp(tmp, "full") == 0) {
+ ctx->db_sync = 2;
+ }
+ else if (strcasecmp(tmp, "normal") == 0) {
+ ctx->db_sync = 1;
+ }
+ else if (strcasecmp(tmp, "off") == 0) {
+ ctx->db_sync = 0;
+ }
+ else {
+ flb_plg_error(ctx->ins, "invalid database 'db.sync' value");
+ }
+ }
+
+ /* Initialize database */
+ tmp = flb_input_get_property("db", ins);
+ if (tmp) {
+ ctx->db = flb_tail_db_open(tmp, ins, ctx, config);
+ if (!ctx->db) {
+ flb_plg_error(ctx->ins, "could not open/create database");
+ flb_tail_config_destroy(ctx);
+ return NULL;
+ }
+ }
+
+ /* Journal mode check */
+ tmp = flb_input_get_property("db.journal_mode", ins);
+ if (tmp) {
+ if (strcasecmp(tmp, "DELETE") != 0 &&
+ strcasecmp(tmp, "TRUNCATE") != 0 &&
+ strcasecmp(tmp, "PERSIST") != 0 &&
+ strcasecmp(tmp, "MEMORY") != 0 &&
+ strcasecmp(tmp, "WAL") != 0 &&
+ strcasecmp(tmp, "OFF") != 0) {
+
+ flb_plg_error(ctx->ins, "invalid db.journal_mode=%s", tmp);
+ flb_tail_config_destroy(ctx);
+ return NULL;
+ }
+ }
+
+ /* Prepare Statement */
+ if (ctx->db) {
+ /* SQL_GET_FILE */
+ ret = sqlite3_prepare_v2(ctx->db->handler,
+ SQL_GET_FILE,
+ -1,
+ &ctx->stmt_get_file,
+ 0);
+ if (ret != SQLITE_OK) {
+ flb_plg_error(ctx->ins, "error preparing database SQL statement");
+ flb_tail_config_destroy(ctx);
+ return NULL;
+ }
+
+ /* SQL_INSERT_FILE */
+ ret = sqlite3_prepare_v2(ctx->db->handler,
+ SQL_INSERT_FILE,
+ -1,
+ &ctx->stmt_insert_file,
+ 0);
+ if (ret != SQLITE_OK) {
+ flb_plg_error(ctx->ins, "error preparing database SQL statement");
+ flb_tail_config_destroy(ctx);
+ return NULL;
+ }
+
+ /* SQL_ROTATE_FILE */
+ ret = sqlite3_prepare_v2(ctx->db->handler,
+ SQL_ROTATE_FILE,
+ -1,
+ &ctx->stmt_rotate_file,
+ 0);
+ if (ret != SQLITE_OK) {
+ flb_plg_error(ctx->ins, "error preparing database SQL statement");
+ flb_tail_config_destroy(ctx);
+ return NULL;
+ }
+
+ /* SQL_UPDATE_OFFSET */
+ ret = sqlite3_prepare_v2(ctx->db->handler,
+ SQL_UPDATE_OFFSET,
+ -1,
+ &ctx->stmt_offset,
+ 0);
+ if (ret != SQLITE_OK) {
+ flb_plg_error(ctx->ins, "error preparing database SQL statement");
+ flb_tail_config_destroy(ctx);
+ return NULL;
+ }
+
+ /* SQL_DELETE_FILE */
+ ret = sqlite3_prepare_v2(ctx->db->handler,
+ SQL_DELETE_FILE,
+ -1,
+ &ctx->stmt_delete_file,
+ 0);
+ if (ret != SQLITE_OK) {
+ flb_plg_error(ctx->ins, "error preparing database SQL statement");
+ flb_tail_config_destroy(ctx);
+ return NULL;
+ }
+
+ }
+#endif
+
+#ifdef FLB_HAVE_PARSER
+ /* Multiline core API */
+ if (ctx->multiline_parsers && mk_list_size(ctx->multiline_parsers) > 0) {
+ ret = multiline_load_parsers(ctx);
+ if (ret != 0) {
+ flb_plg_error(ctx->ins, "could not load multiline parsers");
+ flb_tail_config_destroy(ctx);
+ return NULL;
+ }
+
+ /* Enable auto-flush routine */
+ ret = flb_ml_auto_flush_init(ctx->ml_ctx);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "could not start multiline auto-flush");
+ flb_tail_config_destroy(ctx);
+ return NULL;
+ }
+ flb_plg_info(ctx->ins, "multiline core started");
+ }
+#endif
+
+#ifdef FLB_HAVE_METRICS
+ ctx->cmt_files_opened = cmt_counter_create(ins->cmt,
+ "fluentbit", "input",
+ "files_opened_total",
+ "Total number of opened files",
+ 1, (char *[]) {"name"});
+
+ ctx->cmt_files_closed = cmt_counter_create(ins->cmt,
+ "fluentbit", "input",
+ "files_closed_total",
+ "Total number of closed files",
+ 1, (char *[]) {"name"});
+
+ ctx->cmt_files_rotated = cmt_counter_create(ins->cmt,
+ "fluentbit", "input",
+ "files_rotated_total",
+ "Total number of rotated files",
+ 1, (char *[]) {"name"});
+
+ /* OLD metrics */
+ flb_metrics_add(FLB_TAIL_METRIC_F_OPENED,
+ "files_opened", ctx->ins->metrics);
+ flb_metrics_add(FLB_TAIL_METRIC_F_CLOSED,
+ "files_closed", ctx->ins->metrics);
+ flb_metrics_add(FLB_TAIL_METRIC_F_ROTATED,
+ "files_rotated", ctx->ins->metrics);
+#endif
+
+ return ctx;
+}
+
+int flb_tail_config_destroy(struct flb_tail_config *config)
+{
+
+#ifdef FLB_HAVE_PARSER
+ flb_tail_mult_destroy(config);
+
+ if (config->ml_ctx) {
+ flb_ml_destroy(config->ml_ctx);
+ }
+#endif
+
+ /* Close pipe ends */
+ flb_pipe_close(config->ch_manager[0]);
+ flb_pipe_close(config->ch_manager[1]);
+ flb_pipe_close(config->ch_pending[0]);
+ flb_pipe_close(config->ch_pending[1]);
+
+#ifdef FLB_HAVE_REGEX
+ if (config->tag_regex) {
+ flb_regex_destroy(config->tag_regex);
+ }
+#endif
+
+#ifdef FLB_HAVE_SQLDB
+ if (config->db != NULL) {
+ sqlite3_finalize(config->stmt_get_file);
+ sqlite3_finalize(config->stmt_insert_file);
+ sqlite3_finalize(config->stmt_delete_file);
+ sqlite3_finalize(config->stmt_rotate_file);
+ sqlite3_finalize(config->stmt_offset);
+ flb_tail_db_close(config->db);
+ }
+#endif
+
+ if (config->static_hash) {
+ flb_hash_table_destroy(config->static_hash);
+ }
+ if (config->event_hash) {
+ flb_hash_table_destroy(config->event_hash);
+ }
+
+ flb_free(config);
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/in_tail/tail_config.h b/src/fluent-bit/plugins/in_tail/tail_config.h
new file mode 100644
index 000000000..dcfa54e02
--- /dev/null
+++ b/src/fluent-bit/plugins/in_tail/tail_config.h
@@ -0,0 +1,168 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_TAIL_CONFIG_H
+#define FLB_TAIL_CONFIG_H
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_input.h>
+#include <fluent-bit/flb_parser.h>
+#include <fluent-bit/flb_macros.h>
+#include <fluent-bit/flb_sqldb.h>
+#include <fluent-bit/flb_metrics.h>
+#include <fluent-bit/flb_log_event.h>
+#ifdef FLB_HAVE_REGEX
+#include <fluent-bit/flb_regex.h>
+#endif
+#ifdef FLB_HAVE_PARSER
+#include <fluent-bit/multiline/flb_ml.h>
+#endif
+
+#include <xxhash.h>
+
+/* Metrics */
+#ifdef FLB_HAVE_METRICS
+#define FLB_TAIL_METRIC_F_OPENED 100 /* number of opened files */
+#define FLB_TAIL_METRIC_F_CLOSED 101 /* number of closed files */
+#define FLB_TAIL_METRIC_F_ROTATED 102 /* number of rotated files */
+#endif
+
+struct flb_tail_config {
+ int fd_notify; /* inotify fd */
+ flb_pipefd_t ch_manager[2]; /* pipe: channel manager */
+ flb_pipefd_t ch_pending[2]; /* pipe: pending events */
+ int ch_reads; /* count number if signal reads */
+ int ch_writes; /* count number of signal writes */
+
+ /* Buffer Config */
+ size_t buf_chunk_size; /* allocation chunks */
+ size_t buf_max_size; /* max size of a buffer */
+
+ /* Static files processor */
+ size_t static_batch_size;
+
+ /* Event files processor */
+ size_t event_batch_size;
+
+ /* Collectors */
+ int coll_fd_static;
+ int coll_fd_scan;
+ int coll_fd_watcher;
+ int coll_fd_rotated;
+ int coll_fd_pending;
+ int coll_fd_inactive;
+ int coll_fd_dmode_flush;
+ int coll_fd_mult_flush;
+ int coll_fd_progress_check;
+
+ /* Backend collectors */
+ int coll_fd_fs1; /* used by fs_inotify & fs_stat */
+ int coll_fd_fs2; /* only used by fs_stat */
+
+ /* Configuration */
+ int dynamic_tag; /* dynamic tag ? e.g: abc.* */
+#ifdef FLB_HAVE_REGEX
+ struct flb_regex *tag_regex;/* path to tag regex */
+#endif
+ int refresh_interval_sec; /* seconds to re-scan */
+ long refresh_interval_nsec;/* nanoseconds to re-scan */
+ int read_from_head; /* read new files from head */
+ int rotate_wait; /* sec to wait on rotated files */
+ int watcher_interval; /* watcher interval */
+ int ignore_older; /* ignore fields older than X seconds */
+ time_t last_pending; /* last time a 'pending signal' was emitted' */
+ struct mk_list *path_list; /* list of paths to scan (glob) */
+ flb_sds_t path_key; /* key name of file path */
+ flb_sds_t key; /* key for unstructured record */
+ int skip_long_lines; /* skip long lines */
+ int skip_empty_lines; /* skip empty lines (off) */
+ int exit_on_eof; /* exit fluent-bit on EOF, test */
+
+ int progress_check_interval; /* watcher interval */
+ int progress_check_interval_nsec; /* watcher interval */
+
+#ifdef FLB_HAVE_INOTIFY
+ int inotify_watcher; /* enable/disable inotify monitor */
+#endif
+ flb_sds_t offset_key; /* key name of file offset */
+
+ /* Database */
+#ifdef FLB_HAVE_SQLDB
+ struct flb_sqldb *db;
+ int db_sync;
+ int db_locking;
+ flb_sds_t db_journal_mode;
+ sqlite3_stmt *stmt_get_file;
+ sqlite3_stmt *stmt_insert_file;
+ sqlite3_stmt *stmt_delete_file;
+ sqlite3_stmt *stmt_rotate_file;
+ sqlite3_stmt *stmt_offset;
+#endif
+
+ /* Parser / Format */
+ struct flb_parser *parser;
+
+ /* Multiline */
+ int multiline; /* multiline enabled ? */
+ int multiline_flush; /* multiline flush/wait */
+ struct flb_parser *mult_parser_firstline;
+ struct mk_list mult_parsers;
+
+ /* Docker mode */
+ int docker_mode; /* Docker mode enabled ? */
+ int docker_mode_flush; /* Docker mode flush/wait */
+ struct flb_parser *docker_mode_parser; /* Parser for separate multiline logs */
+
+ /* Multiline core engine */
+ struct flb_ml *ml_ctx;
+ struct mk_list *multiline_parsers;
+
+ uint64_t files_static_count; /* number of items in the static file list */
+ struct mk_list files_static;
+ struct mk_list files_event;
+
+ /* List of rotated files that needs to be removed after 'rotate_wait' */
+ struct mk_list files_rotated;
+
+ /* List of shell patterns used to exclude certain file names */
+ struct mk_list *exclude_list;
+
+ /* Plugin input instance */
+ struct flb_input_instance *ins;
+
+ struct flb_log_event_encoder log_event_encoder;
+ struct flb_log_event_decoder log_event_decoder;
+
+ /* Metrics */
+ struct cmt_counter *cmt_files_opened;
+ struct cmt_counter *cmt_files_closed;
+ struct cmt_counter *cmt_files_rotated;
+
+ /* Hash: hash tables for quick acess to registered files */
+ struct flb_hash_table *static_hash;
+ struct flb_hash_table *event_hash;
+
+ struct flb_config *config;
+};
+
+struct flb_tail_config *flb_tail_config_create(struct flb_input_instance *ins,
+ struct flb_config *config);
+int flb_tail_config_destroy(struct flb_tail_config *config);
+
+#endif
diff --git a/src/fluent-bit/plugins/in_tail/tail_db.c b/src/fluent-bit/plugins/in_tail/tail_db.c
new file mode 100644
index 000000000..664963b6d
--- /dev/null
+++ b/src/fluent-bit/plugins/in_tail/tail_db.c
@@ -0,0 +1,277 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_sqldb.h>
+
+#include "tail_db.h"
+#include "tail_sql.h"
+#include "tail_file.h"
+
+struct query_status {
+ int id;
+ int rows;
+ int64_t offset;
+};
+
+/* Open or create database required by tail plugin */
+struct flb_sqldb *flb_tail_db_open(const char *path,
+ struct flb_input_instance *in,
+ struct flb_tail_config *ctx,
+ struct flb_config *config)
+{
+ int ret;
+ char tmp[64];
+ struct flb_sqldb *db;
+
+ /* Open/create the database */
+ db = flb_sqldb_open(path, in->name, config);
+ if (!db) {
+ return NULL;
+ }
+
+ /* Create table schema if it don't exists */
+ ret = flb_sqldb_query(db, SQL_CREATE_FILES, NULL, NULL);
+ if (ret != FLB_OK) {
+ flb_plg_error(ctx->ins, "db: could not create 'in_tail_files' table");
+ flb_sqldb_close(db);
+ return NULL;
+ }
+
+ if (ctx->db_sync >= 0) {
+ snprintf(tmp, sizeof(tmp) - 1, SQL_PRAGMA_SYNC,
+ ctx->db_sync);
+ ret = flb_sqldb_query(db, tmp, NULL, NULL);
+ if (ret != FLB_OK) {
+ flb_plg_error(ctx->ins, "db could not set pragma 'sync'");
+ flb_sqldb_close(db);
+ return NULL;
+ }
+ }
+
+ if (ctx->db_locking == FLB_TRUE) {
+ ret = flb_sqldb_query(db, SQL_PRAGMA_LOCKING_MODE, NULL, NULL);
+ if (ret != FLB_OK) {
+ flb_plg_error(ctx->ins, "db: could not set pragma 'locking_mode'");
+ flb_sqldb_close(db);
+ return NULL;
+ }
+ }
+
+ if (ctx->db_journal_mode) {
+ snprintf(tmp, sizeof(tmp) - 1, SQL_PRAGMA_JOURNAL_MODE,
+ ctx->db_journal_mode);
+ ret = flb_sqldb_query(db, tmp, NULL, NULL);
+ if (ret != FLB_OK) {
+ flb_plg_error(ctx->ins, "db could not set pragma 'journal_mode'");
+ flb_sqldb_close(db);
+ return NULL;
+ }
+ }
+
+ return db;
+}
+
+int flb_tail_db_close(struct flb_sqldb *db)
+{
+ flb_sqldb_close(db);
+ return 0;
+}
+
+/*
+ * Check if an file inode exists in the database. Return FLB_TRUE or
+ * FLB_FALSE
+ */
+static int db_file_exists(struct flb_tail_file *file,
+ struct flb_tail_config *ctx,
+ uint64_t *id, uint64_t *inode, off_t *offset)
+{
+ int ret;
+ int exists = FLB_FALSE;
+
+ /* Bind parameters */
+ sqlite3_bind_int64(ctx->stmt_get_file, 1, file->inode);
+ ret = sqlite3_step(ctx->stmt_get_file);
+
+ if (ret == SQLITE_ROW) {
+ exists = FLB_TRUE;
+
+ /* id: column 0 */
+ *id = sqlite3_column_int64(ctx->stmt_get_file, 0);
+
+ /* offset: column 2 */
+ *offset = sqlite3_column_int64(ctx->stmt_get_file, 2);
+
+ /* inode: column 3 */
+ *inode = sqlite3_column_int64(ctx->stmt_get_file, 3);
+ }
+ else if (ret == SQLITE_DONE) {
+ /* all good */
+ }
+ else {
+ exists = -1;
+ }
+
+ sqlite3_clear_bindings(ctx->stmt_get_file);
+ sqlite3_reset(ctx->stmt_get_file);
+
+ return exists;
+
+}
+
+static int db_file_insert(struct flb_tail_file *file, struct flb_tail_config *ctx)
+
+{
+ int ret;
+ time_t created;
+
+ /* Register the file */
+ created = time(NULL);
+
+ /* Bind parameters */
+ sqlite3_bind_text(ctx->stmt_insert_file, 1, file->name, -1, 0);
+ sqlite3_bind_int64(ctx->stmt_insert_file, 2, file->offset);
+ sqlite3_bind_int64(ctx->stmt_insert_file, 3, file->inode);
+ sqlite3_bind_int64(ctx->stmt_insert_file, 4, created);
+
+ /* Run the insert */
+ ret = sqlite3_step(ctx->stmt_insert_file);
+ if (ret != SQLITE_DONE) {
+ sqlite3_clear_bindings(ctx->stmt_insert_file);
+ sqlite3_reset(ctx->stmt_insert_file);
+ flb_plg_error(ctx->ins, "cannot execute insert file %s inode=%lu",
+ file->name, file->inode);
+ return -1;
+ }
+
+ sqlite3_clear_bindings(ctx->stmt_insert_file);
+ sqlite3_reset(ctx->stmt_insert_file);
+
+ /* Get the database ID for this file */
+ return flb_sqldb_last_id(ctx->db);
+}
+
+int flb_tail_db_file_set(struct flb_tail_file *file,
+ struct flb_tail_config *ctx)
+{
+ int ret;
+ uint64_t id = 0;
+ off_t offset = 0;
+ uint64_t inode = 0;
+
+ /* Check if the file exists */
+ ret = db_file_exists(file, ctx, &id, &inode, &offset);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "cannot execute query to check inode: %lu",
+ file->inode);
+ return -1;
+ }
+
+ if (ret == FLB_FALSE) {
+ /* Get the database ID for this file */
+ file->db_id = db_file_insert(file, ctx);
+ }
+ else {
+ file->db_id = id;
+ file->offset = offset;
+ }
+
+ return 0;
+}
+
+/* Update Offset v2 */
+int flb_tail_db_file_offset(struct flb_tail_file *file,
+ struct flb_tail_config *ctx)
+{
+ int ret;
+
+ /* Bind parameters */
+ sqlite3_bind_int64(ctx->stmt_offset, 1, file->offset);
+ sqlite3_bind_int64(ctx->stmt_offset, 2, file->db_id);
+
+ ret = sqlite3_step(ctx->stmt_offset);
+
+ if (ret != SQLITE_DONE) {
+ sqlite3_clear_bindings(ctx->stmt_offset);
+ sqlite3_reset(ctx->stmt_offset);
+ return -1;
+ }
+
+ /* Verify number of updated rows */
+ ret = sqlite3_changes(ctx->db->handler);
+ if (ret == 0) {
+ /*
+ * 'someone' like you 'the reader' or another user has deleted the database
+ * entry, just restore it.
+ */
+ file->db_id = db_file_insert(file, ctx);
+ }
+
+ sqlite3_clear_bindings(ctx->stmt_offset);
+ sqlite3_reset(ctx->stmt_offset);
+
+ return 0;
+}
+
+/* Mark a file as rotated v2 */
+int flb_tail_db_file_rotate(const char *new_name,
+ struct flb_tail_file *file,
+ struct flb_tail_config *ctx)
+{
+ int ret;
+
+ /* Bind parameters */
+ sqlite3_bind_text(ctx->stmt_rotate_file, 1, new_name, -1, 0);
+ sqlite3_bind_int64(ctx->stmt_rotate_file, 2, file->db_id);
+
+ ret = sqlite3_step(ctx->stmt_rotate_file);
+
+ sqlite3_clear_bindings(ctx->stmt_rotate_file);
+ sqlite3_reset(ctx->stmt_rotate_file);
+
+ if (ret != SQLITE_DONE) {
+ return -1;
+ }
+
+ return 0;
+}
+
+/* Delete file entry from the database */
+int flb_tail_db_file_delete(struct flb_tail_file *file,
+ struct flb_tail_config *ctx)
+{
+ int ret;
+
+ /* Bind parameters */
+ sqlite3_bind_int64(ctx->stmt_delete_file, 1, file->db_id);
+ ret = sqlite3_step(ctx->stmt_delete_file);
+
+ sqlite3_clear_bindings(ctx->stmt_delete_file);
+ sqlite3_reset(ctx->stmt_delete_file);
+
+ if (ret != SQLITE_DONE) {
+ flb_plg_error(ctx->ins, "db: error deleting entry from database: %s",
+ file->name);
+ return -1;
+ }
+
+ flb_plg_debug(ctx->ins, "db: file deleted from database: %s", file->name);
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/in_tail/tail_db.h b/src/fluent-bit/plugins/in_tail/tail_db.h
new file mode 100644
index 000000000..7b5355d22
--- /dev/null
+++ b/src/fluent-bit/plugins/in_tail/tail_db.h
@@ -0,0 +1,43 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_TAIL_DB_H
+#define FLB_TAIL_DB_H
+
+#include <fluent-bit/flb_input.h>
+#include <fluent-bit/flb_sqldb.h>
+
+#include "tail_file.h"
+
+struct flb_sqldb *flb_tail_db_open(const char *path,
+ struct flb_input_instance *in,
+ struct flb_tail_config *ctx,
+ struct flb_config *config);
+
+int flb_tail_db_close(struct flb_sqldb *db);
+int flb_tail_db_file_set(struct flb_tail_file *file,
+ struct flb_tail_config *ctx);
+int flb_tail_db_file_offset(struct flb_tail_file *file,
+ struct flb_tail_config *ctx);
+int flb_tail_db_file_rotate(const char *new_name,
+ struct flb_tail_file *file,
+ struct flb_tail_config *ctx);
+int flb_tail_db_file_delete(struct flb_tail_file *file,
+ struct flb_tail_config *ctx);
+#endif
diff --git a/src/fluent-bit/plugins/in_tail/tail_dockermode.c b/src/fluent-bit/plugins/in_tail/tail_dockermode.c
new file mode 100644
index 000000000..a8f20f9cd
--- /dev/null
+++ b/src/fluent-bit/plugins/in_tail/tail_dockermode.c
@@ -0,0 +1,459 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_unescape.h>
+
+#include "tail_config.h"
+#include "tail_dockermode.h"
+#include "tail_file_internal.h"
+
+int flb_tail_dmode_create(struct flb_tail_config *ctx,
+ struct flb_input_instance *ins,
+ struct flb_config *config)
+{
+ const char *tmp;
+
+ if (ctx->multiline == FLB_TRUE) {
+ flb_plg_error(ctx->ins, "Docker mode cannot be enabled when multiline "
+ "is enabled");
+ return -1;
+ }
+
+#ifdef FLB_HAVE_REGEX
+ /* First line Parser */
+ tmp = flb_input_get_property("docker_mode_parser", ins);
+ if (tmp) {
+ ctx->docker_mode_parser = flb_parser_get(tmp, config);
+ if (!ctx->docker_mode_parser) {
+ flb_plg_error(ctx->ins, "parser '%s' is not registered", tmp);
+ }
+ }
+ else {
+ ctx->docker_mode_parser = NULL;
+ }
+#endif
+
+ tmp = flb_input_get_property("docker_mode_flush", ins);
+ if (!tmp) {
+ ctx->docker_mode_flush = FLB_TAIL_DMODE_FLUSH;
+ }
+ else {
+ ctx->docker_mode_flush = atoi(tmp);
+ if (ctx->docker_mode_flush <= 0) {
+ ctx->docker_mode_flush = 1;
+ }
+ }
+
+ return 0;
+}
+
+static int modify_json_cond(char *js, size_t js_len,
+ char **val, size_t *val_len,
+ char **out, size_t *out_len,
+ int cond(char*, size_t),
+ int mod(char*, size_t, char**, size_t*, void*), void *data)
+{
+ int ret;
+ struct flb_pack_state state;
+ jsmntok_t *t;
+ jsmntok_t *t_val = NULL;
+ int i;
+ int i_root = -1;
+ int i_key = -1;
+ char *old_val;
+ size_t old_val_len;
+ char *new_val = NULL;
+ size_t new_val_len = 0;
+ size_t mod_len;
+
+ ret = flb_pack_state_init(&state);
+ if (ret != 0) {
+ ret = -1;
+ goto modify_json_cond_end;
+ }
+
+ ret = flb_json_tokenise(js, js_len, &state);
+ if (ret != 0 || state.tokens_count == 0) {
+ ret = -1;
+ goto modify_json_cond_end;
+ }
+
+ for (i = 0; i < state.tokens_count; i++) {
+ t = &state.tokens[i];
+
+ if (i_key >= 0) {
+ if (t->parent == i_key) {
+ if (t->type == JSMN_STRING) {
+ t_val = t;
+ }
+ break;
+ }
+ continue;
+ }
+
+ if (t->start == 0 && t->parent == -1 && t->type == JSMN_OBJECT) {
+ i_root = i;
+ continue;
+ }
+ if (i_root == -1) {
+ continue;
+ }
+
+ if (t->parent == i_root && t->type == JSMN_STRING && t->end - t->start == 3 && strncmp(js + t->start, "log", 3) == 0) {
+ i_key = i;
+ }
+ }
+
+ if (!t_val) {
+ ret = -1;
+ goto modify_json_cond_end;
+ }
+
+ *out = js;
+ *out_len = js_len;
+
+ if (val) {
+ *val = js + t_val->start;
+ }
+ if (val_len) {
+ *val_len = t_val->end - t_val->start;
+ }
+
+ if (!cond || cond(js + t_val->start, t_val->end - t_val->start)) {
+ old_val = js + t_val->start;
+ old_val_len = t_val->end - t_val->start;
+ ret = mod(old_val, old_val_len, &new_val, &new_val_len, data);
+ if (ret != 0) {
+ ret = -1;
+ goto modify_json_cond_end;
+ }
+
+ ret = 1;
+
+ if (new_val == old_val) {
+ goto modify_json_cond_end;
+ }
+
+ mod_len = js_len + new_val_len - old_val_len;
+ *out = flb_malloc(mod_len);
+ if (!*out) {
+ flb_errno();
+ flb_free(new_val);
+ ret = -1;
+ goto modify_json_cond_end;
+ }
+ *out_len = mod_len;
+
+ memcpy(*out, js, t_val->start);
+ memcpy(*out + t_val->start, new_val, new_val_len);
+ memcpy(*out + t_val->start + new_val_len, js + t_val->end, js_len - t_val->end);
+
+ flb_free(new_val);
+ }
+
+ modify_json_cond_end:
+ flb_pack_state_reset(&state);
+ if (ret < 0) {
+ *out = NULL;
+ }
+ return ret;
+}
+
+static int unesc_ends_with_nl(char *str, size_t len)
+{
+ char* unesc;
+ int unesc_len;
+ int nl;
+
+ unesc = flb_malloc(len + 1);
+ if (!unesc) {
+ flb_errno();
+ return FLB_FALSE;
+ }
+ unesc_len = flb_unescape_string(str, len, &unesc);
+ nl = unesc[unesc_len - 1] == '\n';
+ flb_free(unesc);
+ return nl;
+}
+
+static int prepend_sds_to_str(char *str, size_t len, char **out, size_t *out_len, void *data)
+{
+ flb_sds_t sds = data;
+
+ if (flb_sds_len(sds) == 0) {
+ *out = str;
+ *out_len = len;
+ return 0;
+ }
+
+ size_t mod_len = flb_sds_len(sds) + len;
+ *out = flb_malloc(mod_len);
+ if (!*out) {
+ flb_errno();
+ return -1;
+ }
+ *out_len = mod_len;
+
+ memcpy(*out, sds, flb_sds_len(sds));
+ memcpy(*out + flb_sds_len(sds), str, len);
+ return 0;
+}
+
+static int use_sds(char *str, size_t len, char **out, size_t *out_len, void *data)
+{
+ flb_sds_t sds = data;
+ size_t mod_len = flb_sds_len(sds);
+ *out = flb_malloc(mod_len);
+ if (!*out) {
+ flb_errno();
+ return -1;
+ }
+ *out_len = mod_len;
+
+ memcpy(*out, sds, flb_sds_len(sds));
+ return 0;
+}
+
+int flb_tail_dmode_process_content(time_t now,
+ char* line, size_t line_len,
+ char **repl_line, size_t *repl_line_len,
+ struct flb_tail_file *file,
+ struct flb_tail_config *ctx)
+{
+ char* val = NULL;
+ size_t val_len;
+ int ret;
+ void *out_buf = NULL;
+ size_t out_size;
+ struct flb_time out_time = {0};
+ *repl_line = NULL;
+ *repl_line_len = 0;
+ flb_sds_t tmp;
+ flb_sds_t tmp_copy;
+
+#ifdef FLB_HAVE_REGEX
+ if (ctx->docker_mode_parser) {
+ ret = flb_parser_do(ctx->docker_mode_parser, line, line_len,
+ &out_buf, &out_size, &out_time);
+ flb_free(out_buf);
+
+ /*
+ * Set dmode_firstline if the line meets the first-line requirement
+ */
+ if(ret >= 0) {
+ file->dmode_firstline = true;
+ }
+
+ /*
+ * Process if buffer contains full log line
+ */
+ if (flb_sds_len(file->dmode_lastline) > 0 && file->dmode_complete) {
+ /*
+ * Buffered log should be flushed out
+ * as current line meets first-line requirement
+ */
+ if(ret >= 0) {
+ flb_tail_dmode_flush(file, ctx);
+ }
+
+ /*
+ * Flush the buffer if multiline has not been detected yet
+ */
+ if (!file->dmode_firstline) {
+ flb_tail_dmode_flush(file, ctx);
+ }
+ }
+ }
+#endif
+
+ ret = modify_json_cond(line, line_len,
+ &val, &val_len,
+ repl_line, repl_line_len,
+ unesc_ends_with_nl,
+ prepend_sds_to_str, file->dmode_buf);
+ if (ret >= 0) {
+ /* line is a valid json */
+ flb_sds_len_set(file->dmode_lastline, 0);
+
+ /* concatenate current log line with buffered one */
+ tmp = flb_sds_cat(file->dmode_buf, val, val_len);
+ if (!tmp) {
+ flb_errno();
+ return -1;
+ }
+ file->dmode_buf = tmp;
+
+ tmp_copy = flb_sds_copy(file->dmode_lastline, line, line_len);
+ if (!tmp_copy) {
+ flb_errno();
+ return -1;
+ }
+
+ file->dmode_lastline = tmp_copy;
+ file->dmode_flush_timeout = now + (ctx->docker_mode_flush - 1);
+
+ if (ret == 0) {
+ /* Line not ended with newline */
+ file->dmode_complete = false;
+ }
+ else {
+ /* Line ended with newline */
+ file->dmode_complete = true;
+#ifdef FLB_HAVE_REGEX
+ if (!ctx->docker_mode_parser) {
+ flb_tail_dmode_flush(file, ctx);
+ }
+#else
+ flb_tail_dmode_flush(file, ctx);
+#endif
+ }
+ }
+ return ret;
+}
+
+void flb_tail_dmode_flush(struct flb_tail_file *file, struct flb_tail_config *ctx)
+{
+ int ret;
+ char *repl_line = NULL;
+ size_t repl_line_len = 0;
+ void *out_buf = NULL;
+ size_t out_size;
+ struct flb_time out_time = {0};
+ time_t now = time(NULL);
+
+ if (flb_sds_len(file->dmode_lastline) == 0) {
+ return;
+ }
+
+ flb_time_zero(&out_time);
+
+ ret = modify_json_cond(file->dmode_lastline,
+ flb_sds_len(file->dmode_lastline),
+ NULL, NULL,
+ &repl_line, &repl_line_len,
+ NULL,
+ use_sds, file->dmode_buf);
+ if (ret < 0) {
+ return;
+ }
+
+ flb_sds_len_set(file->dmode_buf, 0);
+ flb_sds_len_set(file->dmode_lastline, 0);
+ file->dmode_flush_timeout = 0;
+
+#ifdef FLB_HAVE_REGEX
+ if (ctx->parser) {
+ ret = flb_parser_do(ctx->parser, repl_line, repl_line_len,
+ &out_buf, &out_size, &out_time);
+ if (ret >= 0) {
+ if (flb_time_to_double(&out_time) == 0) {
+ flb_time_get(&out_time);
+ }
+ if (ctx->ignore_older > 0 && (now - ctx->ignore_older) > out_time.tm.tv_sec) {
+ goto dmode_flush_end;
+ }
+
+ flb_tail_pack_line_map(&out_time, (char**) &out_buf, &out_size, file, 0);
+
+ goto dmode_flush_end;
+ }
+ }
+#endif
+
+ flb_tail_file_pack_line(NULL, repl_line, repl_line_len, file, 0);
+
+ dmode_flush_end:
+ flb_free(repl_line);
+ flb_free(out_buf);
+}
+
+static void file_pending_flush(struct flb_tail_config *ctx,
+ struct flb_tail_file *file, time_t now)
+{
+ if (file->dmode_flush_timeout > now) {
+ return;
+ }
+
+ if (flb_sds_len(file->dmode_lastline) == 0) {
+ return;
+ }
+
+ flb_tail_dmode_flush(file, ctx);
+
+ if (file->sl_log_event_encoder->output_length > 0) {
+ flb_input_log_append(ctx->ins,
+ file->tag_buf,
+ file->tag_len,
+ file->sl_log_event_encoder->output_buffer,
+ file->sl_log_event_encoder->output_length);
+
+ flb_log_event_encoder_reset(file->sl_log_event_encoder);
+ }
+}
+
+int flb_tail_dmode_pending_flush_all(struct flb_tail_config *ctx)
+{
+ time_t expired;
+ struct mk_list *head;
+ struct flb_tail_file *file;
+
+ expired = time(NULL) + 3600;
+
+ /* Iterate promoted event files with pending bytes */
+ mk_list_foreach(head, &ctx->files_static) {
+ file = mk_list_entry(head, struct flb_tail_file, _head);
+ file_pending_flush(ctx, file, expired);
+ }
+
+ /* Iterate promoted event files with pending bytes */
+ mk_list_foreach(head, &ctx->files_event) {
+ file = mk_list_entry(head, struct flb_tail_file, _head);
+ file_pending_flush(ctx, file, expired);
+ }
+
+ return 0;
+}
+
+int flb_tail_dmode_pending_flush(struct flb_input_instance *ins,
+ struct flb_config *config, void *context)
+{
+ time_t now;
+ struct mk_list *head;
+ struct flb_tail_file *file;
+ struct flb_tail_config *ctx = context;
+
+ now = time(NULL);
+
+ /* Iterate static event files with pending bytes */
+ mk_list_foreach(head, &ctx->files_static) {
+ file = mk_list_entry(head, struct flb_tail_file, _head);
+ file_pending_flush(ctx, file, now);
+ }
+
+ /* Iterate promoted event files with pending bytes */
+ mk_list_foreach(head, &ctx->files_event) {
+ file = mk_list_entry(head, struct flb_tail_file, _head);
+ file_pending_flush(ctx, file, now);
+ }
+
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/in_tail/tail_dockermode.h b/src/fluent-bit/plugins/in_tail/tail_dockermode.h
new file mode 100644
index 000000000..50869ff62
--- /dev/null
+++ b/src/fluent-bit/plugins/in_tail/tail_dockermode.h
@@ -0,0 +1,38 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_TAIL_DOCKERMODE_H
+#define FLB_TAIL_DOCKERMODE_H
+
+#include "tail_file.h"
+#define FLB_TAIL_DMODE_FLUSH 4
+
+int flb_tail_dmode_create(struct flb_tail_config *ctx,
+ struct flb_input_instance *ins, struct flb_config *config);
+int flb_tail_dmode_process_content(time_t now,
+ char* line, size_t line_len,
+ char **repl_line, size_t *repl_line_len,
+ struct flb_tail_file *file,
+ struct flb_tail_config *ctx);
+void flb_tail_dmode_flush(struct flb_tail_file *file, struct flb_tail_config *ctx);
+int flb_tail_dmode_pending_flush(struct flb_input_instance *ins,
+ struct flb_config *config, void *context);
+int flb_tail_dmode_pending_flush_all(struct flb_tail_config *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/in_tail/tail_file.c b/src/fluent-bit/plugins/in_tail/tail_file.c
new file mode 100644
index 000000000..2385f0626
--- /dev/null
+++ b/src/fluent-bit/plugins/in_tail/tail_file.c
@@ -0,0 +1,1860 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <time.h>
+#ifdef FLB_SYSTEM_FREEBSD
+#include <sys/user.h>
+#include <libutil.h>
+#endif
+
+#include <fluent-bit/flb_compat.h>
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_parser.h>
+#ifdef FLB_HAVE_REGEX
+#include <fluent-bit/flb_regex.h>
+#include <fluent-bit/flb_hash_table.h>
+#endif
+
+#include "tail.h"
+#include "tail_file.h"
+#include "tail_config.h"
+#include "tail_db.h"
+#include "tail_signal.h"
+#include "tail_dockermode.h"
+#include "tail_multiline.h"
+#include "tail_scan.h"
+
+#ifdef FLB_SYSTEM_WINDOWS
+#include "win32.h"
+#endif
+
+#include <cfl/cfl.h>
+
+static inline void consume_bytes(char *buf, int bytes, int length)
+{
+ memmove(buf, buf + bytes, length - bytes);
+}
+
+static uint64_t stat_get_st_dev(struct stat *st)
+{
+#ifdef FLB_SYSTEM_WINDOWS
+ /* do you want to contribute with a way to extract volume serial number ? */
+ return 0;
+#else
+ return st->st_dev;
+#endif
+}
+
+static int stat_to_hash_bits(struct flb_tail_config *ctx, struct stat *st,
+ uint64_t *out_hash)
+{
+ int len;
+ uint64_t st_dev;
+ char tmp[64];
+
+ st_dev = stat_get_st_dev(st);
+
+ len = snprintf(tmp, sizeof(tmp) - 1, "%" PRIu64 ":%" PRIu64,
+ st_dev, (uint64_t)st->st_ino);
+
+ *out_hash = cfl_hash_64bits(tmp, len);
+ return 0;
+}
+
+static int stat_to_hash_key(struct flb_tail_config *ctx, struct stat *st,
+ flb_sds_t *key)
+{
+ uint64_t st_dev;
+ flb_sds_t tmp;
+ flb_sds_t buf;
+
+ buf = flb_sds_create_size(64);
+ if (!buf) {
+ return -1;
+ }
+
+ st_dev = stat_get_st_dev(st);
+ tmp = flb_sds_printf(&buf, "%" PRIu64 ":%" PRIu64,
+ st_dev, (uint64_t)st->st_ino);
+ if (!tmp) {
+ flb_sds_destroy(buf);
+ return -1;
+ }
+
+ *key = buf;
+ return 0;
+}
+
+/* Append custom keys and report the number of records processed */
+static int record_append_custom_keys(struct flb_tail_file *file,
+ char *in_data, size_t in_size,
+ char **out_data, size_t *out_size)
+{
+ int i;
+ int ret;
+ int records = 0;
+ msgpack_object k;
+ msgpack_object v;
+ struct flb_log_event event;
+ struct flb_tail_config *ctx;
+ struct flb_log_event_encoder encoder;
+ struct flb_log_event_decoder decoder;
+
+ ctx = (struct flb_tail_config *) file->config;
+
+ ret = flb_log_event_decoder_init(&decoder, in_data, in_size);
+
+ if (ret != FLB_EVENT_DECODER_SUCCESS) {
+ return -1;
+ }
+
+ ret = flb_log_event_encoder_init(&encoder, FLB_LOG_EVENT_FORMAT_DEFAULT);
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ flb_log_event_decoder_destroy(&decoder);
+
+ return -2;
+ }
+
+ while (flb_log_event_decoder_next(&decoder, &event) ==
+ FLB_EVENT_DECODER_SUCCESS) {
+
+ ret = flb_log_event_encoder_begin_record(&encoder);
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_timestamp(&encoder, &event.timestamp);
+ }
+
+ /* append previous map keys */
+ for (i = 0; i < event.body->via.map.size; i++) {
+ k = event.body->via.map.ptr[i].key;
+ v = event.body->via.map.ptr[i].val;
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_append_body_msgpack_object(
+ &encoder,
+ &k);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_append_body_msgpack_object(
+ &encoder,
+ &v);
+ }
+ }
+
+ /* path_key */
+ if (ctx->path_key != NULL) {
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_append_body_cstring(
+ &encoder,
+ file->config->path_key);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_append_body_cstring(
+ &encoder,
+ file->orig_name);
+ }
+ }
+
+ /* offset_key */
+ if (ctx->offset_key != NULL) {
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_append_body_cstring(
+ &encoder,
+ file->config->offset_key);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_append_body_uint64(
+ &encoder,
+ file->offset +
+ file->last_processed_bytes);
+ }
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_commit_record(&encoder);
+ }
+ else {
+ flb_plg_error(file->config->ins, "error packing event : %d", ret);
+
+ flb_log_event_encoder_rollback_record(&encoder);
+ }
+
+ /* counter */
+ records++;
+ }
+
+ *out_data = encoder.output_buffer;
+ *out_size = encoder.output_length;
+
+ /* This function transfers ownership of the internal memory allocated by
+ * sbuffer using msgpack_sbuffer_release which means the caller is
+ * responsible for releasing the memory.
+ */
+ flb_log_event_encoder_claim_internal_buffer_ownership(&encoder);
+
+ flb_log_event_decoder_destroy(&decoder);
+ flb_log_event_encoder_destroy(&encoder);
+
+ return records;
+}
+
+static int flb_tail_repack_map(struct flb_log_event_encoder *encoder,
+ char *data,
+ size_t data_size)
+{
+ msgpack_unpacked source_map;
+ size_t offset;
+ int result;
+ size_t index;
+ msgpack_object value;
+ msgpack_object key;
+
+ result = FLB_EVENT_ENCODER_SUCCESS;
+
+ if (data_size > 0) {
+ msgpack_unpacked_init(&source_map);
+
+ offset = 0;
+ result = msgpack_unpack_next(&source_map,
+ data,
+ data_size,
+ &offset);
+
+ if (result == MSGPACK_UNPACK_SUCCESS) {
+ result = FLB_EVENT_ENCODER_SUCCESS;
+ }
+ else {
+ result = FLB_EVENT_DECODER_ERROR_DESERIALIZATION_FAILURE;
+ }
+
+ for (index = 0;
+ index < source_map.data.via.map.size &&
+ result == FLB_EVENT_ENCODER_SUCCESS;
+ index++) {
+ key = source_map.data.via.map.ptr[index].key;
+ value = source_map.data.via.map.ptr[index].val;
+
+ result = flb_log_event_encoder_append_body_msgpack_object(
+ encoder,
+ &key);
+
+ if (result == FLB_EVENT_ENCODER_SUCCESS) {
+ result = flb_log_event_encoder_append_body_msgpack_object(
+ encoder,
+ &value);
+ }
+ }
+
+ msgpack_unpacked_destroy(&source_map);
+ }
+
+ return result;
+}
+
+int flb_tail_pack_line_map(struct flb_time *time, char **data,
+ size_t *data_size, struct flb_tail_file *file,
+ size_t processed_bytes)
+{
+ int result;
+
+ result = flb_log_event_encoder_begin_record(file->sl_log_event_encoder);
+
+ if (result == FLB_EVENT_ENCODER_SUCCESS) {
+ result = flb_log_event_encoder_set_timestamp(
+ file->sl_log_event_encoder, time);
+ }
+
+ if (result == FLB_EVENT_ENCODER_SUCCESS) {
+ result = flb_tail_repack_map(file->sl_log_event_encoder,
+ *data,
+ *data_size);
+ }
+
+ /* path_key */
+ if (file->config->path_key != NULL) {
+ if (result == FLB_EVENT_ENCODER_SUCCESS) {
+ result = flb_log_event_encoder_append_body_values(
+ file->sl_log_event_encoder,
+ FLB_LOG_EVENT_CSTRING_VALUE(file->config->path_key),
+ FLB_LOG_EVENT_STRING_VALUE(file->orig_name,
+ file->orig_name_len));
+ }
+ }
+
+ /* offset_key */
+ if (file->config->offset_key != NULL) {
+ if (result == FLB_EVENT_ENCODER_SUCCESS) {
+ result = flb_log_event_encoder_append_body_values(
+ file->sl_log_event_encoder,
+ FLB_LOG_EVENT_CSTRING_VALUE(file->config->offset_key),
+ FLB_LOG_EVENT_UINT64_VALUE(file->offset + processed_bytes));
+ }
+ }
+
+ if (result == FLB_EVENT_ENCODER_SUCCESS) {
+ result = flb_log_event_encoder_commit_record(file->sl_log_event_encoder);
+ }
+ else {
+ flb_log_event_encoder_rollback_record(file->sl_log_event_encoder);
+ }
+
+ if (result != FLB_EVENT_ENCODER_SUCCESS) {
+ flb_plg_error(file->config->ins, "error packing event");
+
+ return -1;
+ }
+
+ return 0;
+}
+
+int flb_tail_file_pack_line(struct flb_time *time, char *data, size_t data_size,
+ struct flb_tail_file *file, size_t processed_bytes)
+{
+ int result;
+
+ result = flb_log_event_encoder_begin_record(file->sl_log_event_encoder);
+
+ if (result == FLB_EVENT_ENCODER_SUCCESS) {
+ result = flb_log_event_encoder_set_timestamp(
+ file->sl_log_event_encoder, time);
+ }
+
+ /* path_key */
+ if (file->config->path_key != NULL) {
+ if (result == FLB_EVENT_ENCODER_SUCCESS) {
+ result = flb_log_event_encoder_append_body_values(
+ file->sl_log_event_encoder,
+ FLB_LOG_EVENT_CSTRING_VALUE(file->config->path_key),
+ FLB_LOG_EVENT_STRING_VALUE(file->orig_name,
+ file->orig_name_len));
+ }
+ }
+
+ /* offset_key */
+ if (file->config->offset_key != NULL) {
+ if (result == FLB_EVENT_ENCODER_SUCCESS) {
+ result = flb_log_event_encoder_append_body_values(
+ file->sl_log_event_encoder,
+ FLB_LOG_EVENT_CSTRING_VALUE(file->config->offset_key),
+ FLB_LOG_EVENT_UINT64_VALUE(file->offset + processed_bytes));
+ }
+ }
+
+ if (result == FLB_EVENT_ENCODER_SUCCESS) {
+ result = flb_log_event_encoder_append_body_values(
+ file->sl_log_event_encoder,
+ FLB_LOG_EVENT_CSTRING_VALUE(file->config->key),
+ FLB_LOG_EVENT_STRING_VALUE(data,
+ data_size));
+ }
+
+ if (result == FLB_EVENT_ENCODER_SUCCESS) {
+ result = flb_log_event_encoder_commit_record(file->sl_log_event_encoder);
+ }
+
+ if (result != FLB_EVENT_ENCODER_SUCCESS) {
+ flb_plg_error(file->config->ins, "error packing event : %d", result);
+
+ return -1;
+ }
+
+ return 0;
+}
+
+static int ml_stream_buffer_append(struct flb_tail_file *file, char *buf_data, size_t buf_size)
+{
+ int result;
+
+ result = flb_log_event_encoder_emit_raw_record(
+ file->ml_log_event_encoder,
+ buf_data, buf_size);
+
+ if (result != FLB_EVENT_ENCODER_SUCCESS) {
+ flb_plg_error(file->config->ins,
+ "log event raw append error : %d",
+ result);
+
+ return -1;
+ }
+
+ return 0;
+}
+
+static int ml_stream_buffer_flush(struct flb_tail_config *ctx, struct flb_tail_file *file)
+{
+ if (file->ml_log_event_encoder->output_length > 0) {
+ flb_input_log_append(ctx->ins,
+ file->tag_buf,
+ file->tag_len,
+ file->ml_log_event_encoder->output_buffer,
+ file->ml_log_event_encoder->output_length);
+
+ flb_log_event_encoder_reset(file->ml_log_event_encoder);
+ }
+
+ return 0;
+}
+
+static int process_content(struct flb_tail_file *file, size_t *bytes)
+{
+ size_t len;
+ int lines = 0;
+ int ret;
+ size_t processed_bytes = 0;
+ char *data;
+ char *end;
+ char *p;
+ void *out_buf;
+ size_t out_size;
+ int crlf;
+ char *line;
+ size_t line_len;
+ char *repl_line;
+ size_t repl_line_len;
+ time_t now = time(NULL);
+ struct flb_time out_time = {0};
+ struct flb_tail_config *ctx;
+
+ ctx = (struct flb_tail_config *) file->config;
+
+ /* Parse the data content */
+ data = file->buf_data;
+ end = data + file->buf_len;
+
+ /* reset last processed bytes */
+ file->last_processed_bytes = 0;
+
+ /* Skip null characters from the head (sometimes introduced by copy-truncate log rotation) */
+ while (data < end && *data == '\0') {
+ data++;
+ processed_bytes++;
+ }
+
+ while (data < end && (p = memchr(data, '\n', end - data))) {
+ len = (p - data);
+ crlf = 0;
+ if (file->skip_next == FLB_TRUE) {
+ data += len + 1;
+ processed_bytes += len + 1;
+ file->skip_next = FLB_FALSE;
+ continue;
+ }
+
+ /*
+ * Empty line (just breakline)
+ * ---------------------------
+ * [NOTE] with the new Multiline core feature and Multiline Filter on
+ * Fluent Bit v1.8.2, there are a couple of cases where stack traces
+ * or multi line patterns expects an empty line (meaning only the
+ * breakline), skipping empty lines on this plugin will break that
+ * functionality.
+ *
+ * We are introducing 'skip_empty_lines=off' configuration
+ * property to revert this behavior if some user is affected by
+ * this change.
+ */
+
+ if (len == 0 && ctx->skip_empty_lines) {
+ data++;
+ processed_bytes++;
+ continue;
+ }
+
+ /* Process '\r\n' */
+ if (len >= 2) {
+ crlf = (data[len-1] == '\r');
+ if (len == 1 && crlf) {
+ data += 2;
+ processed_bytes += 2;
+ continue;
+ }
+ }
+
+ /* Reset time for each line */
+ flb_time_zero(&out_time);
+
+ line = data;
+ line_len = len - crlf;
+ repl_line = NULL;
+
+ if (ctx->ml_ctx) {
+ ret = flb_ml_append_text(ctx->ml_ctx,
+ file->ml_stream_id,
+ &out_time,
+ line,
+ line_len);
+ goto go_next;
+ }
+ else if (ctx->docker_mode) {
+ ret = flb_tail_dmode_process_content(now, line, line_len,
+ &repl_line, &repl_line_len,
+ file, ctx);
+ if (ret >= 0) {
+ if (repl_line == line) {
+ repl_line = NULL;
+ }
+ else {
+ line = repl_line;
+ line_len = repl_line_len;
+ }
+ /* Skip normal parsers flow */
+ goto go_next;
+ }
+ else {
+ flb_tail_dmode_flush(file, ctx);
+ }
+ }
+
+#ifdef FLB_HAVE_PARSER
+ if (ctx->parser) {
+ /* Common parser (non-multiline) */
+ ret = flb_parser_do(ctx->parser, line, line_len,
+ &out_buf, &out_size, &out_time);
+ if (ret >= 0) {
+ if (flb_time_to_nanosec(&out_time) == 0L) {
+ flb_time_get(&out_time);
+ }
+
+ /* If multiline is enabled, flush any buffered data */
+ if (ctx->multiline == FLB_TRUE) {
+ flb_tail_mult_flush(file, ctx);
+ }
+
+ flb_tail_pack_line_map(&out_time,
+ (char**) &out_buf, &out_size, file,
+ processed_bytes);
+
+ flb_free(out_buf);
+ }
+ else {
+ /* Parser failed, pack raw text */
+ flb_tail_file_pack_line(NULL, data, len, file, processed_bytes);
+ }
+ }
+ else if (ctx->multiline == FLB_TRUE) {
+ ret = flb_tail_mult_process_content(now,
+ line, line_len,
+ file, ctx, processed_bytes);
+
+ /* No multiline */
+ if (ret == FLB_TAIL_MULT_NA) {
+ flb_tail_mult_flush(file, ctx);
+
+ flb_tail_file_pack_line(NULL,
+ line, line_len, file, processed_bytes);
+ }
+ else if (ret == FLB_TAIL_MULT_MORE) {
+ /* we need more data, do nothing */
+ goto go_next;
+ }
+ else if (ret == FLB_TAIL_MULT_DONE) {
+ /* Finalized */
+ }
+ }
+ else {
+ flb_tail_file_pack_line(NULL,
+ line, line_len, file, processed_bytes);
+ }
+#else
+ flb_tail_file_pack_line(NULL,
+ line, line_len, file, processed_bytes);
+#endif
+
+ go_next:
+ flb_free(repl_line);
+ repl_line = NULL;
+ /* Adjust counters */
+ data += len + 1;
+ processed_bytes += len + 1;
+ lines++;
+ file->parsed = 0;
+ file->last_processed_bytes += processed_bytes;
+ }
+ file->parsed = file->buf_len;
+
+ if (lines > 0) {
+ /* Append buffer content to a chunk */
+ *bytes = processed_bytes;
+
+ if (file->sl_log_event_encoder->output_length > 0) {
+ flb_input_log_append_records(ctx->ins,
+ lines,
+ file->tag_buf,
+ file->tag_len,
+ file->sl_log_event_encoder->output_buffer,
+ file->sl_log_event_encoder->output_length);
+
+ flb_log_event_encoder_reset(file->sl_log_event_encoder);
+ }
+ }
+ else if (file->skip_next) {
+ *bytes = file->buf_len;
+ }
+ else {
+ *bytes = processed_bytes;
+ }
+
+ if (ctx->ml_ctx) {
+ ml_stream_buffer_flush(ctx, file);
+ }
+
+ return lines;
+}
+
+static inline void drop_bytes(char *buf, size_t len, int pos, int bytes)
+{
+ memmove(buf + pos,
+ buf + pos + bytes,
+ len - pos - bytes);
+}
+
+#ifdef FLB_HAVE_REGEX
+static void cb_results(const char *name, const char *value,
+ size_t vlen, void *data)
+{
+ struct flb_hash_table *ht = data;
+
+ if (vlen == 0) {
+ return;
+ }
+
+ flb_hash_table_add(ht, name, strlen(name), (void *) value, vlen);
+}
+#endif
+
+#ifdef FLB_HAVE_REGEX
+static int tag_compose(char *tag, struct flb_regex *tag_regex, char *fname,
+ char *out_buf, size_t *out_size,
+ struct flb_tail_config *ctx)
+#else
+static int tag_compose(char *tag, char *fname, char *out_buf, size_t *out_size,
+ struct flb_tail_config *ctx)
+#endif
+{
+ int i;
+ size_t len;
+ char *p;
+ size_t buf_s = 0;
+#ifdef FLB_HAVE_REGEX
+ ssize_t n;
+ struct flb_regex_search result;
+ struct flb_hash_table *ht;
+ char *beg;
+ char *end;
+ int ret;
+ const char *tmp;
+ size_t tmp_s;
+#endif
+
+#ifdef FLB_HAVE_REGEX
+ if (tag_regex) {
+ n = flb_regex_do(tag_regex, fname, strlen(fname), &result);
+ if (n <= 0) {
+ flb_plg_error(ctx->ins, "invalid tag_regex pattern for file %s",
+ fname);
+ return -1;
+ }
+ else {
+ ht = flb_hash_table_create(FLB_HASH_TABLE_EVICT_NONE,
+ FLB_HASH_TABLE_SIZE, FLB_HASH_TABLE_SIZE);
+ flb_regex_parse(tag_regex, &result, cb_results, ht);
+
+ for (p = tag, beg = p; (beg = strchr(p, '<')); p = end + 2) {
+ if (beg != p) {
+ len = (beg - p);
+ memcpy(out_buf + buf_s, p, len);
+ buf_s += len;
+ }
+
+ beg++;
+
+ end = strchr(beg, '>');
+ if (end && !memchr(beg, '<', end - beg)) {
+ end--;
+
+ len = end - beg + 1;
+ ret = flb_hash_table_get(ht, beg, len, (void *) &tmp, &tmp_s);
+ if (ret != -1) {
+ memcpy(out_buf + buf_s, tmp, tmp_s);
+ buf_s += tmp_s;
+ }
+ else {
+ memcpy(out_buf + buf_s, "_", 1);
+ buf_s++;
+ }
+ }
+ else {
+ flb_plg_error(ctx->ins,
+ "missing closing angle bracket in tag %s "
+ "at position %lu", tag, beg - tag);
+ flb_hash_table_destroy(ht);
+ return -1;
+ }
+ }
+
+ flb_hash_table_destroy(ht);
+ if (*p) {
+ len = strlen(p);
+ memcpy(out_buf + buf_s, p, len);
+ buf_s += len;
+ }
+ }
+ }
+ else {
+#endif
+ p = strchr(tag, '*');
+ if (!p) {
+ return -1;
+ }
+
+ /* Copy tag prefix if any */
+ len = (p - tag);
+ if (len > 0) {
+ memcpy(out_buf, tag, len);
+ buf_s += len;
+ }
+
+ /* Append file name */
+ len = strlen(fname);
+ memcpy(out_buf + buf_s, fname, len);
+ buf_s += len;
+
+ /* Tag suffix (if any) */
+ p++;
+ if (*p) {
+ len = strlen(tag);
+ memcpy(out_buf + buf_s, p, (len - (p - tag)));
+ buf_s += (len - (p - tag));
+ }
+
+ /* Sanitize buffer */
+ for (i = 0; i < buf_s; i++) {
+ if (out_buf[i] == '/' || out_buf[i] == '\\' || out_buf[i] == ':') {
+ if (i > 0) {
+ out_buf[i] = '.';
+ }
+ else {
+ drop_bytes(out_buf, buf_s, i, 1);
+ buf_s--;
+ i--;
+ }
+ }
+
+ if (i > 0 && out_buf[i] == '.') {
+ if (out_buf[i - 1] == '.') {
+ drop_bytes(out_buf, buf_s, i, 1);
+ buf_s--;
+ i--;
+ }
+ }
+ else if (out_buf[i] == '*') {
+ drop_bytes(out_buf, buf_s, i, 1);
+ buf_s--;
+ i--;
+ }
+ }
+
+ /* Check for an ending '.' */
+ if (out_buf[buf_s - 1] == '.') {
+ drop_bytes(out_buf, buf_s, buf_s - 1, 1);
+ buf_s--;
+ }
+#ifdef FLB_HAVE_REGEX
+ }
+#endif
+
+ out_buf[buf_s] = '\0';
+ *out_size = buf_s;
+
+ return 0;
+}
+
+static inline int flb_tail_file_exists(struct stat *st,
+ struct flb_tail_config *ctx)
+{
+ int ret;
+ uint64_t hash;
+
+ ret = stat_to_hash_bits(ctx, st, &hash);
+ if (ret != 0) {
+ return -1;
+ }
+
+ /* static hash */
+ if (flb_hash_table_exists(ctx->static_hash, hash)) {
+ return FLB_TRUE;
+ }
+
+ /* event hash */
+ if (flb_hash_table_exists(ctx->event_hash, hash)) {
+ return FLB_TRUE;
+ }
+
+ return FLB_FALSE;
+}
+
+/*
+ * Based in the configuration or database offset, set the proper 'offset' for the
+ * file in question.
+ */
+static int set_file_position(struct flb_tail_config *ctx,
+ struct flb_tail_file *file)
+{
+ int64_t ret;
+
+#ifdef FLB_HAVE_SQLDB
+ /*
+ * If the database option is enabled, try to gather the file position. The
+ * database function updates the file->offset entry.
+ */
+ if (ctx->db) {
+ ret = flb_tail_db_file_set(file, ctx);
+ if (ret == 0) {
+ if (file->offset > 0) {
+ ret = lseek(file->fd, file->offset, SEEK_SET);
+ if (ret == -1) {
+ flb_errno();
+ return -1;
+ }
+ }
+ else if (ctx->read_from_head == FLB_FALSE) {
+ ret = lseek(file->fd, 0, SEEK_END);
+ if (ret == -1) {
+ flb_errno();
+ return -1;
+ }
+ file->offset = ret;
+ flb_tail_db_file_offset(file, ctx);
+ }
+ return 0;
+ }
+ }
+#endif
+
+ if (ctx->read_from_head == FLB_TRUE) {
+ /* no need to seek, offset position is already zero */
+ return 0;
+ }
+
+ /* tail... */
+ ret = lseek(file->fd, 0, SEEK_END);
+ if (ret == -1) {
+ flb_errno();
+ return -1;
+ }
+ file->offset = ret;
+
+ return 0;
+}
+
+/* Multiline flush callback: invoked every time some content is complete */
+static int ml_flush_callback(struct flb_ml_parser *parser,
+ struct flb_ml_stream *mst,
+ void *data, char *buf_data, size_t buf_size)
+{
+ int result;
+ size_t mult_size = 0;
+ char *mult_buf = NULL;
+ struct flb_tail_file *file = data;
+ struct flb_tail_config *ctx = file->config;
+
+ if (ctx->path_key == NULL && ctx->offset_key == NULL) {
+ ml_stream_buffer_append(file, buf_data, buf_size);
+ }
+ else {
+ /* adjust the records in a new buffer */
+ result = record_append_custom_keys(file,
+ buf_data,
+ buf_size,
+ &mult_buf,
+ &mult_size);
+
+ if (result < 0) {
+ ml_stream_buffer_append(file, buf_data, buf_size);
+ }
+ else {
+ ml_stream_buffer_append(file, mult_buf, mult_size);
+
+ flb_free(mult_buf);
+ }
+ }
+
+ if (mst->forced_flush) {
+ ml_stream_buffer_flush(ctx, file);
+ }
+
+ return 0;
+}
+
+int flb_tail_file_append(char *path, struct stat *st, int mode,
+ struct flb_tail_config *ctx)
+{
+ int fd;
+ int ret;
+ uint64_t stream_id;
+ uint64_t ts;
+ uint64_t hash_bits;
+ flb_sds_t hash_key;
+ size_t len;
+ char *tag;
+ char *name;
+ size_t tag_len;
+ struct flb_tail_file *file;
+ struct stat lst;
+ flb_sds_t inode_str;
+
+ if (!S_ISREG(st->st_mode)) {
+ return -1;
+ }
+
+ if (flb_tail_file_exists(st, ctx) == FLB_TRUE) {
+ return -1;
+ }
+
+ fd = open(path, O_RDONLY);
+ if (fd == -1) {
+ flb_errno();
+ flb_plg_error(ctx->ins, "cannot open %s", path);
+ return -1;
+ }
+
+ file = flb_calloc(1, sizeof(struct flb_tail_file));
+ if (!file) {
+ flb_errno();
+ goto error;
+ }
+
+ /* Initialize */
+ file->watch_fd = -1;
+ file->fd = fd;
+
+ /* On non-windows environments check if the original path is a link */
+ ret = lstat(path, &lst);
+ if (ret == 0) {
+ if (S_ISLNK(lst.st_mode)) {
+ file->is_link = FLB_TRUE;
+ file->link_inode = lst.st_ino;
+ }
+ }
+
+ /* get unique hash for this file */
+ ret = stat_to_hash_bits(ctx, st, &hash_bits);
+ if (ret != 0) {
+ flb_plg_error(ctx->ins, "error procesisng hash bits for file %s", path);
+ goto error;
+ }
+ file->hash_bits = hash_bits;
+
+ /* store the hash key used for hash_bits */
+ ret = stat_to_hash_key(ctx, st, &hash_key);
+ if (ret != 0) {
+ flb_plg_error(ctx->ins, "error procesisng hash key for file %s", path);
+ goto error;
+ }
+ file->hash_key = hash_key;
+
+ file->inode = st->st_ino;
+ file->offset = 0;
+ file->size = st->st_size;
+ file->buf_len = 0;
+ file->parsed = 0;
+ file->config = ctx;
+ file->tail_mode = mode;
+ file->tag_len = 0;
+ file->tag_buf = NULL;
+ file->rotated = 0;
+ file->pending_bytes = 0;
+ file->mult_firstline = FLB_FALSE;
+ file->mult_keys = 0;
+ file->mult_flush_timeout = 0;
+ file->mult_skipping = FLB_FALSE;
+
+ /*
+ * Duplicate string into 'file' structure, the called function
+ * take cares to resolve real-name of the file in case we are
+ * running in a non-Linux system.
+ *
+ * Depending of the operating system, the way to obtain the file
+ * name associated to it file descriptor can have different behaviors
+ * specifically if it root path it's under a symbolic link. On Linux
+ * we can trust the file name but in others it's better to solve it
+ * with some extra calls.
+ */
+ ret = flb_tail_file_name_dup(path, file);
+ if (!file->name) {
+ flb_errno();
+ goto error;
+ }
+
+ /* We keep a copy of the initial filename in orig_name. This is required
+ * for path_key to continue working after rotation. */
+ file->orig_name = flb_strdup(file->name);
+ if (!file->orig_name) {
+ flb_free(file->name);
+ flb_errno();
+ goto error;
+ }
+ file->orig_name_len = file->name_len;
+
+ /* multiline msgpack buffers */
+ file->mult_records = 0;
+ msgpack_sbuffer_init(&file->mult_sbuf);
+ msgpack_packer_init(&file->mult_pck, &file->mult_sbuf,
+ msgpack_sbuffer_write);
+
+ /* docker mode */
+ file->dmode_flush_timeout = 0;
+ file->dmode_complete = true;
+ file->dmode_buf = flb_sds_create_size(ctx->docker_mode == FLB_TRUE ? 65536 : 0);
+ file->dmode_lastline = flb_sds_create_size(ctx->docker_mode == FLB_TRUE ? 20000 : 0);
+ file->dmode_firstline = false;
+#ifdef FLB_HAVE_SQLDB
+ file->db_id = 0;
+#endif
+ file->skip_next = FLB_FALSE;
+ file->skip_warn = FLB_FALSE;
+
+ /* Multiline core mode */
+ if (ctx->ml_ctx) {
+ /*
+ * Create inode str to get stream_id.
+ *
+ * If stream_id is created by filename,
+ * it will be same after file rotation and it causes invalid destruction:
+ *
+ * - https://github.com/fluent/fluent-bit/issues/4190
+ */
+ inode_str = flb_sds_create_size(64);
+ flb_sds_printf(&inode_str, "%"PRIu64, file->inode);
+
+ /* Create a stream for this file */
+ ret = flb_ml_stream_create(ctx->ml_ctx,
+ inode_str, flb_sds_len(inode_str),
+ ml_flush_callback, file,
+ &stream_id);
+ if (ret != 0) {
+ flb_plg_error(ctx->ins,
+ "could not create multiline stream for file: %s",
+ inode_str);
+ flb_sds_destroy(inode_str);
+ goto error;
+ }
+ file->ml_stream_id = stream_id;
+ flb_sds_destroy(inode_str);
+
+ /*
+ * Multiline core file buffer: the multiline core functionality invokes a callback everytime a message is ready
+ * to be processed by the caller, this can be a multiline message or a message that is considered 'complete'. In
+ * the previous version of Tail, when it received a message this message was automatically ingested into the pipeline
+ * without any previous buffering which leads to performance degradation.
+ *
+ * The msgpack buffer 'ml_sbuf' keeps all ML provided records and it's flushed just when the file processor finish
+ * processing the "read() bytes".
+ */
+ }
+
+ /* Local buffer */
+ file->buf_size = ctx->buf_chunk_size;
+ file->buf_data = flb_malloc(file->buf_size);
+ if (!file->buf_data) {
+ flb_errno();
+ goto error;
+ }
+
+ /* Initialize (optional) dynamic tag */
+ if (ctx->dynamic_tag == FLB_TRUE) {
+ len = ctx->ins->tag_len + strlen(path) + 1;
+ tag = flb_malloc(len);
+ if (!tag) {
+ flb_errno();
+ flb_plg_error(ctx->ins, "failed to allocate tag buffer");
+ goto error;
+ }
+#ifdef FLB_HAVE_REGEX
+ ret = tag_compose(ctx->ins->tag, ctx->tag_regex, path, tag, &tag_len, ctx);
+#else
+ ret = tag_compose(ctx->ins->tag, path, tag, &tag_len, ctx);
+#endif
+ if (ret == 0) {
+ file->tag_len = tag_len;
+ file->tag_buf = flb_strdup(tag);
+ }
+ flb_free(tag);
+ if (ret != 0) {
+ flb_plg_error(ctx->ins, "failed to compose tag for file: %s", path);
+ goto error;
+ }
+ }
+ else {
+ file->tag_len = strlen(ctx->ins->tag);
+ file->tag_buf = flb_strdup(ctx->ins->tag);
+ }
+ if (!file->tag_buf) {
+ flb_plg_error(ctx->ins, "failed to set tag for file: %s", path);
+ flb_errno();
+ goto error;
+ }
+
+ if (mode == FLB_TAIL_STATIC) {
+ mk_list_add(&file->_head, &ctx->files_static);
+ ctx->files_static_count++;
+ flb_hash_table_add(ctx->static_hash, file->hash_key, flb_sds_len(file->hash_key),
+ file, sizeof(file));
+ tail_signal_manager(file->config);
+ }
+ else if (mode == FLB_TAIL_EVENT) {
+ mk_list_add(&file->_head, &ctx->files_event);
+ flb_hash_table_add(ctx->event_hash, file->hash_key, flb_sds_len(file->hash_key),
+ file, sizeof(file));
+
+ /* Register this file into the fs_event monitoring */
+ ret = flb_tail_fs_add(ctx, file);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "could not register file into fs_events");
+ goto error;
+ }
+ }
+
+ /* Set the file position (database offset, head or tail) */
+ ret = set_file_position(ctx, file);
+ if (ret == -1) {
+ flb_tail_file_remove(file);
+ goto error;
+ }
+
+ /* Remaining bytes to read */
+ file->pending_bytes = file->size - file->offset;
+
+#ifdef FLB_HAVE_METRICS
+ name = (char *) flb_input_name(ctx->ins);
+ ts = cfl_time_now();
+ cmt_counter_inc(ctx->cmt_files_opened, ts, 1, (char *[]) {name});
+
+ /* Old api */
+ flb_metrics_sum(FLB_TAIL_METRIC_F_OPENED, 1, ctx->ins->metrics);
+#endif
+
+ file->sl_log_event_encoder = flb_log_event_encoder_create(
+ FLB_LOG_EVENT_FORMAT_DEFAULT);
+
+ if (file->sl_log_event_encoder == NULL) {
+ flb_tail_file_remove(file);
+
+ goto error;
+ }
+
+ file->ml_log_event_encoder = flb_log_event_encoder_create(
+ FLB_LOG_EVENT_FORMAT_DEFAULT);
+
+ if (file->ml_log_event_encoder == NULL) {
+ flb_tail_file_remove(file);
+
+ goto error;
+ }
+
+ flb_plg_debug(ctx->ins,
+ "inode=%"PRIu64" with offset=%"PRId64" appended as %s",
+ file->inode, file->offset, path);
+ return 0;
+
+error:
+ if (file) {
+ if (file->buf_data) {
+ flb_free(file->buf_data);
+ }
+ if (file->name) {
+ flb_free(file->name);
+ }
+ flb_free(file);
+ }
+ close(fd);
+
+ return -1;
+}
+
+void flb_tail_file_remove(struct flb_tail_file *file)
+{
+ uint64_t ts;
+ char *name;
+ struct flb_tail_config *ctx;
+
+ ctx = file->config;
+
+ flb_plg_debug(ctx->ins, "inode=%"PRIu64" removing file name %s",
+ file->inode, file->name);
+
+ /* remove the multiline.core stream */
+ if (ctx->ml_ctx && file->ml_stream_id > 0) {
+ /* destroy ml stream */
+ flb_ml_stream_id_destroy_all(ctx->ml_ctx, file->ml_stream_id);
+ }
+
+ if (file->rotated > 0) {
+#ifdef FLB_HAVE_SQLDB
+ /*
+ * Make sure to remove a the file entry from the database if the file
+ * was rotated and it's not longer being monitored.
+ */
+ if (ctx->db) {
+ flb_tail_db_file_delete(file, file->config);
+ }
+#endif
+ mk_list_del(&file->_rotate_head);
+ }
+
+ msgpack_sbuffer_destroy(&file->mult_sbuf);
+
+ if (file->sl_log_event_encoder != NULL) {
+ flb_log_event_encoder_destroy(file->sl_log_event_encoder);
+ }
+
+ if (file->ml_log_event_encoder != NULL) {
+ flb_log_event_encoder_destroy(file->ml_log_event_encoder);
+ }
+
+ flb_sds_destroy(file->dmode_buf);
+ flb_sds_destroy(file->dmode_lastline);
+ mk_list_del(&file->_head);
+ flb_tail_fs_remove(ctx, file);
+
+ /* avoid deleting file with -1 fd */
+ if (file->fd != -1) {
+ close(file->fd);
+ }
+ if (file->tag_buf) {
+ flb_free(file->tag_buf);
+ }
+
+ /* remove any potential entry from the hash tables */
+ flb_hash_table_del(ctx->static_hash, file->hash_key);
+ flb_hash_table_del(ctx->event_hash, file->hash_key);
+
+ flb_free(file->buf_data);
+ flb_free(file->name);
+ flb_free(file->orig_name);
+ flb_free(file->real_name);
+ flb_sds_destroy(file->hash_key);
+
+#ifdef FLB_HAVE_METRICS
+ name = (char *) flb_input_name(ctx->ins);
+ ts = cfl_time_now();
+ cmt_counter_inc(ctx->cmt_files_closed, ts, 1, (char *[]) {name});
+
+ /* old api */
+ flb_metrics_sum(FLB_TAIL_METRIC_F_CLOSED, 1, ctx->ins->metrics);
+#endif
+
+ flb_free(file);
+}
+
+int flb_tail_file_remove_all(struct flb_tail_config *ctx)
+{
+ int count = 0;
+ struct mk_list *head;
+ struct mk_list *tmp;
+ struct flb_tail_file *file;
+
+ mk_list_foreach_safe(head, tmp, &ctx->files_static) {
+ file = mk_list_entry(head, struct flb_tail_file, _head);
+ flb_tail_file_remove(file);
+ count++;
+ }
+
+ mk_list_foreach_safe(head, tmp, &ctx->files_event) {
+ file = mk_list_entry(head, struct flb_tail_file, _head);
+ flb_tail_file_remove(file);
+ count++;
+ }
+
+ return count;
+}
+
+static int adjust_counters(struct flb_tail_config *ctx, struct flb_tail_file *file)
+{
+ int ret;
+ int64_t offset;
+ struct stat st;
+
+ ret = fstat(file->fd, &st);
+ if (ret == -1) {
+ flb_errno();
+ return FLB_TAIL_ERROR;
+ }
+
+ /* Check if the file was truncated */
+ if (file->offset > st.st_size) {
+ offset = lseek(file->fd, 0, SEEK_SET);
+ if (offset == -1) {
+ flb_errno();
+ return FLB_TAIL_ERROR;
+ }
+
+ flb_plg_debug(ctx->ins, "inode=%"PRIu64" file truncated %s",
+ file->inode, file->name);
+ file->offset = offset;
+ file->buf_len = 0;
+
+ /* Update offset in the database file */
+#ifdef FLB_HAVE_SQLDB
+ if (ctx->db) {
+ flb_tail_db_file_offset(file, ctx);
+ }
+#endif
+ }
+ else {
+ file->size = st.st_size;
+ file->pending_bytes = (st.st_size - file->offset);
+ }
+
+ return FLB_TAIL_OK;
+}
+
+int flb_tail_file_chunk(struct flb_tail_file *file)
+{
+ int ret;
+ char *tmp;
+ size_t size;
+ size_t capacity;
+ size_t processed_bytes;
+ ssize_t bytes;
+ struct flb_tail_config *ctx;
+
+ /* Check if we the engine issued a pause */
+ ctx = file->config;
+ if (flb_input_buf_paused(ctx->ins) == FLB_TRUE) {
+ return FLB_TAIL_BUSY;
+ }
+
+ capacity = (file->buf_size - file->buf_len) - 1;
+ if (capacity < 1) {
+ /*
+ * If there is no more room for more data, try to increase the
+ * buffer under the limit of buffer_max_size.
+ */
+ if (file->buf_size >= ctx->buf_max_size) {
+ if (ctx->skip_long_lines == FLB_FALSE) {
+ flb_plg_error(ctx->ins, "file=%s requires a larger buffer size, "
+ "lines are too long. Skipping file.", file->name);
+ return FLB_TAIL_ERROR;
+ }
+
+ /* Warn the user */
+ if (file->skip_warn == FLB_FALSE) {
+ flb_plg_warn(ctx->ins, "file=%s have long lines. "
+ "Skipping long lines.", file->name);
+ file->skip_warn = FLB_TRUE;
+ }
+
+ /* Do buffer adjustments */
+ file->offset += file->buf_len;
+ file->buf_len = 0;
+ file->skip_next = FLB_TRUE;
+ }
+ else {
+ size = file->buf_size + ctx->buf_chunk_size;
+ if (size > ctx->buf_max_size) {
+ size = ctx->buf_max_size;
+ }
+
+ /* Increase the buffer size */
+ tmp = flb_realloc(file->buf_data, size);
+ if (tmp) {
+ flb_plg_trace(ctx->ins, "file=%s increase buffer size "
+ "%lu => %lu bytes",
+ file->name, file->buf_size, size);
+ file->buf_data = tmp;
+ file->buf_size = size;
+ }
+ else {
+ flb_errno();
+ flb_plg_error(ctx->ins, "cannot increase buffer size for %s, "
+ "skipping file.", file->name);
+ return FLB_TAIL_ERROR;
+ }
+ }
+ capacity = (file->buf_size - file->buf_len) - 1;
+ }
+
+ bytes = read(file->fd, file->buf_data + file->buf_len, capacity);
+ if (bytes > 0) {
+ /* we read some data, let the content processor take care of it */
+ file->buf_len += bytes;
+ file->buf_data[file->buf_len] = '\0';
+
+ /* Now that we have some data in the buffer, call the data processor
+ * which aims to cut lines and register the entries into the engine.
+ *
+ * The returned value is the absolute offset the file must be seek
+ * now. It may need to get back a few bytes at the beginning of a new
+ * line.
+ */
+ ret = process_content(file, &processed_bytes);
+ if (ret < 0) {
+ flb_plg_debug(ctx->ins, "inode=%"PRIu64" file=%s process content ERROR",
+ file->inode, file->name);
+ return FLB_TAIL_ERROR;
+ }
+
+ /* Adjust the file offset and buffer */
+ file->offset += processed_bytes;
+ consume_bytes(file->buf_data, processed_bytes, file->buf_len);
+ file->buf_len -= processed_bytes;
+ file->buf_data[file->buf_len] = '\0';
+
+#ifdef FLB_HAVE_SQLDB
+ if (file->config->db) {
+ flb_tail_db_file_offset(file, file->config);
+ }
+#endif
+
+ /* adjust file counters, returns FLB_TAIL_OK or FLB_TAIL_ERROR */
+ ret = adjust_counters(ctx, file);
+
+ /* Data was consumed but likely some bytes still remain */
+ return ret;
+ }
+ else if (bytes == 0) {
+ /* We reached the end of file, let's wait for some incoming data */
+ ret = adjust_counters(ctx, file);
+ if (ret == FLB_TAIL_OK) {
+ return FLB_TAIL_WAIT;
+ }
+ else {
+ return FLB_TAIL_ERROR;
+ }
+ }
+ else {
+ /* error */
+ flb_errno();
+ flb_plg_error(ctx->ins, "error reading %s", file->name);
+ return FLB_TAIL_ERROR;
+ }
+
+ return FLB_TAIL_ERROR;
+}
+
+/* Returns FLB_TRUE if a file has been rotated, otherwise FLB_FALSE */
+int flb_tail_file_is_rotated(struct flb_tail_config *ctx,
+ struct flb_tail_file *file)
+{
+ int ret;
+ char *name;
+ struct stat st;
+
+ /*
+ * Do not double-check already rotated files since the caller of this
+ * function will trigger a rotation.
+ */
+ if (file->rotated != 0) {
+ return FLB_FALSE;
+ }
+
+ /* Check if the 'original monitored file' is a link and rotated */
+ if (file->is_link == FLB_TRUE) {
+ ret = lstat(file->name, &st);
+ if (ret == -1) {
+ /* Broken link or missing file */
+ if (errno == ENOENT) {
+ flb_plg_info(ctx->ins, "inode=%"PRIu64" link_rotated: %s",
+ file->link_inode, file->name);
+ return FLB_TRUE;
+ }
+ else {
+ flb_errno();
+ flb_plg_error(ctx->ins,
+ "link_inode=%"PRIu64" cannot detect if file: %s",
+ file->link_inode, file->name);
+ return -1;
+ }
+ }
+ else {
+ /* The file name is there, check if the same that we have */
+ if (st.st_ino != file->link_inode) {
+ return FLB_TRUE;
+ }
+ }
+ }
+
+ /* Retrieve the real file name, operating system lookup */
+ name = flb_tail_file_name(file);
+ if (!name) {
+ flb_plg_error(ctx->ins,
+ "inode=%"PRIu64" cannot detect if file was rotated: %s",
+ file->inode, file->name);
+ return -1;
+ }
+
+
+ /* Get stats from the file name */
+ ret = stat(name, &st);
+ if (ret == -1) {
+ flb_errno();
+ flb_free(name);
+ return -1;
+ }
+
+ /* Compare inodes and names */
+ if (file->inode == st.st_ino &&
+ flb_tail_target_file_name_cmp(name, file) == 0) {
+ flb_free(name);
+ return FLB_FALSE;
+ }
+
+ flb_plg_debug(ctx->ins, "inode=%"PRIu64" rotated: %s => %s",
+ file->inode, file->name, name);
+
+ flb_free(name);
+ return FLB_TRUE;
+}
+
+/* Promote a event in the static list to the dynamic 'events' interface */
+int flb_tail_file_to_event(struct flb_tail_file *file)
+{
+ int ret;
+ struct stat st;
+ struct flb_tail_config *ctx = file->config;
+
+ /* Check if the file promoted have pending bytes */
+ ret = fstat(file->fd, &st);
+ if (ret != 0) {
+ flb_errno();
+ return -1;
+ }
+
+ if (file->offset < st.st_size) {
+ file->pending_bytes = (st.st_size - file->offset);
+ tail_signal_pending(file->config);
+ }
+ else {
+ file->pending_bytes = 0;
+ }
+
+ /* Check if the file has been rotated */
+ ret = flb_tail_file_is_rotated(ctx, file);
+ if (ret == FLB_TRUE) {
+ flb_tail_file_rotated(file);
+ }
+
+ /* Notify the fs-event handler that we will start monitoring this 'file' */
+ ret = flb_tail_fs_add(ctx, file);
+ if (ret == -1) {
+ return -1;
+ }
+
+ /* List swap: change from 'static' to 'event' list */
+ mk_list_del(&file->_head);
+ ctx->files_static_count--;
+ flb_hash_table_del(ctx->static_hash, file->hash_key);
+
+ mk_list_add(&file->_head, &file->config->files_event);
+ flb_hash_table_add(ctx->event_hash, file->hash_key, flb_sds_len(file->hash_key),
+ file, sizeof(file));
+
+ file->tail_mode = FLB_TAIL_EVENT;
+
+ return 0;
+}
+
+/*
+ * Given an open file descriptor, return the filename. This function is a
+ * bit slow and it aims to be used only when a file is rotated.
+ */
+char *flb_tail_file_name(struct flb_tail_file *file)
+{
+ int ret;
+ char *buf;
+#ifdef __linux__
+ ssize_t s;
+ char tmp[128];
+#elif defined(__APPLE__)
+ char path[PATH_MAX];
+#elif defined(FLB_SYSTEM_WINDOWS)
+ HANDLE h;
+#elif defined(FLB_SYSTEM_FREEBSD)
+ struct kinfo_file *file_entries;
+ int file_count;
+ int file_index;
+#endif
+
+ buf = flb_malloc(PATH_MAX);
+ if (!buf) {
+ flb_errno();
+ return NULL;
+ }
+
+#ifdef __linux__
+ ret = snprintf(tmp, sizeof(tmp) - 1, "/proc/%i/fd/%i", getpid(), file->fd);
+ if (ret == -1) {
+ flb_errno();
+ flb_free(buf);
+ return NULL;
+ }
+
+ s = readlink(tmp, buf, PATH_MAX);
+ if (s == -1) {
+ flb_free(buf);
+ flb_errno();
+ return NULL;
+ }
+ buf[s] = '\0';
+
+#elif __APPLE__
+ int len;
+
+ ret = fcntl(file->fd, F_GETPATH, path);
+ if (ret == -1) {
+ flb_errno();
+ flb_free(buf);
+ return NULL;
+ }
+
+ len = strlen(path);
+ memcpy(buf, path, len);
+ buf[len] = '\0';
+
+#elif defined(FLB_SYSTEM_WINDOWS)
+ int len;
+
+ h = (HANDLE) _get_osfhandle(file->fd);
+ if (h == INVALID_HANDLE_VALUE) {
+ flb_errno();
+ flb_free(buf);
+ return NULL;
+ }
+
+ /* This function returns the length of the string excluding "\0"
+ * and the resulting path has a "\\?\" prefix.
+ */
+ len = GetFinalPathNameByHandleA(h, buf, PATH_MAX, FILE_NAME_NORMALIZED);
+ if (len == 0 || len >= PATH_MAX) {
+ flb_free(buf);
+ return NULL;
+ }
+
+ if (strstr(buf, "\\\\?\\")) {
+ memmove(buf, buf + 4, len + 1);
+ }
+#elif defined(FLB_SYSTEM_FREEBSD)
+ if ((file_entries = kinfo_getfile(getpid(), &file_count)) == NULL) {
+ flb_free(buf);
+ return NULL;
+ }
+
+ for (file_index=0; file_index < file_count; file_index++) {
+ if (file_entries[file_index].kf_fd == file->fd) {
+ strncpy(buf, file_entries[file_index].kf_path, PATH_MAX - 1);
+ buf[PATH_MAX - 1] = 0;
+ break;
+ }
+ }
+ free(file_entries);
+#endif
+ return buf;
+}
+
+int flb_tail_file_name_dup(char *path, struct flb_tail_file *file)
+{
+ file->name = flb_strdup(path);
+ if (!file->name) {
+ flb_errno();
+ return -1;
+ }
+ file->name_len = strlen(file->name);
+
+ if (file->real_name) {
+ flb_free(file->real_name);
+ }
+
+ file->real_name = flb_tail_file_name(file);
+ if (!file->real_name) {
+ flb_errno();
+ flb_free(file->name);
+ file->name = NULL;
+ return -1;
+ }
+
+ return 0;
+}
+
+/* Invoked every time a file was rotated */
+int flb_tail_file_rotated(struct flb_tail_file *file)
+{
+ int ret;
+ uint64_t ts;
+ char *name;
+ char *i_name;
+ char *tmp;
+ struct stat st;
+ struct flb_tail_config *ctx = file->config;
+
+ /* Get the new file name */
+ name = flb_tail_file_name(file);
+ if (!name) {
+ return -1;
+ }
+
+ flb_plg_debug(ctx->ins, "inode=%"PRIu64" rotated %s -> %s",
+ file->inode, file->name, name);
+
+ /* Update local file entry */
+ tmp = file->name;
+ flb_tail_file_name_dup(name, file);
+ flb_plg_info(ctx->ins, "inode=%"PRIu64" handle rotation(): %s => %s",
+ file->inode, tmp, file->name);
+ if (file->rotated == 0) {
+ file->rotated = time(NULL);
+ mk_list_add(&file->_rotate_head, &file->config->files_rotated);
+
+ /* Rotate the file in the database */
+#ifdef FLB_HAVE_SQLDB
+ if (file->config->db) {
+ ret = flb_tail_db_file_rotate(name, file, file->config);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "could not rotate file %s->%s in database",
+ file->name, name);
+ }
+ }
+#endif
+
+#ifdef FLB_HAVE_METRICS
+ i_name = (char *) flb_input_name(ctx->ins);
+ ts = cfl_time_now();
+ cmt_counter_inc(ctx->cmt_files_rotated, ts, 1, (char *[]) {i_name});
+
+ /* OLD api */
+ flb_metrics_sum(FLB_TAIL_METRIC_F_ROTATED,
+ 1, file->config->ins->metrics);
+#endif
+
+ /* Check if a new file has been created */
+ ret = stat(tmp, &st);
+ if (ret == 0 && st.st_ino != file->inode) {
+ if (flb_tail_file_exists(&st, ctx) == FLB_FALSE) {
+ ret = flb_tail_file_append(tmp, &st, FLB_TAIL_STATIC, ctx);
+ if (ret == -1) {
+ flb_tail_scan(ctx->path_list, ctx);
+ }
+ else {
+ tail_signal_manager(file->config);
+ }
+ }
+ }
+ }
+ flb_free(tmp);
+ flb_free(name);
+
+ return 0;
+}
+
+static int check_purge_deleted_file(struct flb_tail_config *ctx,
+ struct flb_tail_file *file, time_t ts)
+{
+ int ret;
+ struct stat st;
+
+ ret = fstat(file->fd, &st);
+ if (ret == -1) {
+ flb_plg_debug(ctx->ins, "error stat(2) %s, removing", file->name);
+ flb_tail_file_remove(file);
+ return FLB_TRUE;
+ }
+
+ if (st.st_nlink == 0) {
+ flb_plg_debug(ctx->ins, "purge: monitored file has been deleted: %s",
+ file->name);
+#ifdef FLB_HAVE_SQLDB
+ if (ctx->db) {
+ /* Remove file entry from the database */
+ flb_tail_db_file_delete(file, file->config);
+ }
+#endif
+ /* Remove file from the monitored list */
+ flb_tail_file_remove(file);
+ return FLB_TRUE;
+ }
+
+ return FLB_FALSE;
+}
+
+/* Purge rotated and deleted files */
+int flb_tail_file_purge(struct flb_input_instance *ins,
+ struct flb_config *config, void *context)
+{
+ int ret;
+ int count = 0;
+ struct mk_list *tmp;
+ struct mk_list *head;
+ struct flb_tail_file *file;
+ struct flb_tail_config *ctx = context;
+ time_t now;
+ struct stat st;
+
+ /* Rotated files */
+ now = time(NULL);
+ mk_list_foreach_safe(head, tmp, &ctx->files_rotated) {
+ file = mk_list_entry(head, struct flb_tail_file, _rotate_head);
+ if ((file->rotated + ctx->rotate_wait) <= now) {
+ ret = fstat(file->fd, &st);
+ if (ret == 0) {
+ flb_plg_debug(ctx->ins,
+ "inode=%"PRIu64" purge rotated file %s " \
+ "(offset=%"PRId64" / size = %"PRIu64")",
+ file->inode, file->name, file->offset, (uint64_t)st.st_size);
+ if (file->pending_bytes > 0 && flb_input_buf_paused(ins)) {
+ flb_plg_warn(ctx->ins, "purged rotated file while data "
+ "ingestion is paused, consider increasing "
+ "rotate_wait");
+ }
+ }
+ else {
+ flb_plg_debug(ctx->ins,
+ "inode=%"PRIu64" purge rotated file %s (offset=%"PRId64")",
+ file->inode, file->name, file->offset);
+ }
+
+ flb_tail_file_remove(file);
+ count++;
+ }
+ }
+
+ /*
+ * Deleted files: under high load scenarios, exists the chances that in
+ * our event loop we miss some notifications about a file. In order to
+ * sanitize our list of monitored files we will iterate all of them and check
+ * if they have been deleted or not.
+ */
+ mk_list_foreach_safe(head, tmp, &ctx->files_static) {
+ file = mk_list_entry(head, struct flb_tail_file, _head);
+ check_purge_deleted_file(ctx, file, now);
+ }
+ mk_list_foreach_safe(head, tmp, &ctx->files_event) {
+ file = mk_list_entry(head, struct flb_tail_file, _head);
+ check_purge_deleted_file(ctx, file, now);
+ }
+
+ return count;
+}
diff --git a/src/fluent-bit/plugins/in_tail/tail_file.h b/src/fluent-bit/plugins/in_tail/tail_file.h
new file mode 100644
index 000000000..796224c37
--- /dev/null
+++ b/src/fluent-bit/plugins/in_tail/tail_file.h
@@ -0,0 +1,137 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_TAIL_FILE_H
+#define FLB_TAIL_FILE_H
+
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#include <fluent-bit/flb_compat.h>
+#include <fluent-bit/flb_input.h>
+
+#include "tail.h"
+#include "tail_fs.h"
+#include "tail_config.h"
+#include "tail_file_internal.h"
+
+#ifdef FLB_SYSTEM_WINDOWS
+#include "win32.h"
+#endif
+
+#ifdef FLB_HAVE_REGEX
+#define FLB_HASH_TABLE_SIZE 50
+#endif
+
+/* return the file modification time in seconds since epoch */
+static inline int64_t flb_tail_stat_mtime(struct stat *st)
+{
+#if defined(FLB_HAVE_WINDOWS)
+ return (int64_t) st->st_mtime;
+#elif defined(__APPLE__) && !defined(_POSIX_C_SOURCE)
+ return (int64_t) st->st_mtimespec.tv_sec;
+#elif (_POSIX_C_SOURCE >= 200809L || \
+ defined(_BSD_SOURCE) || defined(_SVID_SOURCE) || \
+ defined(__BIONIC__) || (defined (__SVR4) && defined (__sun)) || \
+ defined(__FreeBSD__) || defined (__linux__))
+ return (int64_t) st->st_mtim.tv_sec;
+#elif defined(_AIX)
+ return (int64_t) st->st_mtime;
+#else
+ return (int64_t) st->st_mtime;
+#endif
+
+ /* backend unsupported: submit a PR :) */
+ return -1;
+}
+
+static inline int flb_tail_target_file_name_cmp(char *name,
+ struct flb_tail_file *file)
+{
+ int ret;
+ char *name_a = NULL;
+ char *name_b = NULL;
+ char *base_a = NULL;
+ char *base_b = NULL;
+
+ name_a = flb_strdup(name);
+ if (!name_a) {
+ flb_errno();
+ ret = -1;
+ goto out;
+ }
+
+ base_a = flb_strdup(basename(name_a));
+ if (!base_a) {
+ flb_errno();
+ ret = -1;
+ goto out;
+ }
+
+#if defined(FLB_SYSTEM_WINDOWS)
+ name_b = flb_strdup(file->real_name);
+ if (!name_b) {
+ flb_errno();
+ ret = -1;
+ goto out;
+ }
+
+ base_b = basename(name_b);
+ ret = _stricmp(base_a, base_b);
+#else
+ name_b = flb_strdup(file->real_name);
+ if (!name_b) {
+ flb_errno();
+ ret = -1;
+ goto out;
+ }
+ base_b = basename(name_b);
+ ret = strcmp(base_a, base_b);
+#endif
+
+ out:
+ flb_free(name_a);
+ flb_free(name_b);
+ flb_free(base_a);
+
+ /* FYI: 'base_b' never points to a new allocation, no flb_free is needed */
+
+ return ret;
+}
+
+int flb_tail_file_name_dup(char *path, struct flb_tail_file *file);
+int flb_tail_file_to_event(struct flb_tail_file *file);
+int flb_tail_file_chunk(struct flb_tail_file *file);
+int flb_tail_file_append(char *path, struct stat *st, int mode,
+ struct flb_tail_config *ctx);
+void flb_tail_file_remove(struct flb_tail_file *file);
+int flb_tail_file_remove_all(struct flb_tail_config *ctx);
+char *flb_tail_file_name(struct flb_tail_file *file);
+int flb_tail_file_is_rotated(struct flb_tail_config *ctx,
+ struct flb_tail_file *file);
+int flb_tail_file_rotated(struct flb_tail_file *file);
+int flb_tail_file_purge(struct flb_input_instance *ins,
+ struct flb_config *config, void *context);
+int flb_tail_pack_line_map(struct flb_time *time, char **data,
+ size_t *data_size, struct flb_tail_file *file,
+ size_t processed_bytes);
+int flb_tail_file_pack_line(struct flb_time *time, char *data, size_t data_size,
+ struct flb_tail_file *file, size_t processed_bytes);
+
+#endif
diff --git a/src/fluent-bit/plugins/in_tail/tail_file_internal.h b/src/fluent-bit/plugins/in_tail/tail_file_internal.h
new file mode 100644
index 000000000..6d95c87c1
--- /dev/null
+++ b/src/fluent-bit/plugins/in_tail/tail_file_internal.h
@@ -0,0 +1,130 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_TAIL_INTERNAL_H
+#define FLB_TAIL_INTERNAL_H
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_input.h>
+#include <fluent-bit/flb_sds.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_log_event_encoder.h>
+
+#ifdef FLB_HAVE_PARSER
+#include <fluent-bit/multiline/flb_ml.h>
+#endif
+
+#include "tail.h"
+#include "tail_config.h"
+
+struct flb_tail_file {
+ /* Inotify */
+ int watch_fd;
+ /* file lookup info */
+ int fd;
+ int64_t size;
+ int64_t offset;
+ int64_t last_line;
+ uint64_t dev_id;
+ uint64_t inode;
+ uint64_t link_inode;
+ int is_link;
+ char *name; /* target file name given by scan routine */
+ char *real_name; /* real file name in the file system */
+ char *orig_name; /* original file name (before rotation) */
+ size_t name_len;
+ size_t orig_name_len;
+ time_t rotated;
+ int64_t pending_bytes;
+
+ /* dynamic tag for this file */
+ int tag_len;
+ char *tag_buf;
+
+ /* OLD multiline */
+ time_t mult_flush_timeout; /* time when multiline started */
+ int mult_firstline; /* bool: mult firstline found ? */
+ int mult_firstline_append; /* bool: mult firstline appendable ? */
+ int mult_skipping; /* skipping because ignode_older than ? */
+ int mult_keys; /* total number of buffered keys */
+
+ int mult_records; /* multiline records counter mult_sbuf */
+ msgpack_sbuffer mult_sbuf; /* temporary msgpack buffer */
+ msgpack_packer mult_pck; /* temporary msgpack packer */
+ struct flb_time mult_time; /* multiline time parsed from first line */
+
+ /* OLD docker mode */
+ time_t dmode_flush_timeout; /* time when docker mode started */
+ flb_sds_t dmode_buf; /* buffer for docker mode */
+ flb_sds_t dmode_lastline; /* last incomplete line */
+ bool dmode_complete; /* buffer contains completed log */
+ bool dmode_firstline; /* dmode mult firstline found ? */
+
+ /* multiline engine: file stream_id and local buffers */
+ uint64_t ml_stream_id;
+
+ /* content parsing, positions and buffer */
+ size_t parsed;
+ size_t buf_len;
+ size_t buf_size;
+ char *buf_data;
+
+ /*
+ * This value represent the number of bytes procesed by process_content()
+ * in the last iteration.
+ */
+ size_t last_processed_bytes;
+
+ /*
+ * Long-lines handling: this flag is enabled when a previous line was
+ * too long and the buffer did not contain a \n, so when reaching the
+ * missing \n, skip that content and move forward.
+ *
+ * This flag is only set when Skip_Long_Lines is On.
+ */
+ int skip_next;
+
+ /* Did the plugin already warn the user about long lines ? */
+ int skip_warn;
+
+ /* Opaque data type for specific fs-event backend data */
+ void *fs_backend;
+
+ /* database reference */
+ uint64_t db_id;
+
+ uint64_t hash_bits;
+ flb_sds_t hash_key;
+
+ /* There are dedicated log event encoders for
+ * single and multi line events because I am respecting
+ * the old behavior which resulted in grouping both types
+ * of logs in tail_file.c but I don't know if this is
+ * strictly necessary.
+ */
+ struct flb_log_event_encoder *ml_log_event_encoder;
+ struct flb_log_event_encoder *sl_log_event_encoder;
+
+ /* reference */
+ int tail_mode;
+ struct flb_tail_config *config;
+ struct mk_list _head;
+ struct mk_list _rotate_head;
+};
+#endif
diff --git a/src/fluent-bit/plugins/in_tail/tail_fs.h b/src/fluent-bit/plugins/in_tail/tail_fs.h
new file mode 100644
index 000000000..948954333
--- /dev/null
+++ b/src/fluent-bit/plugins/in_tail/tail_fs.h
@@ -0,0 +1,96 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_TAIL_FS_H
+#define FLB_TAIL_FS_H
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_input.h>
+
+#include "tail_config.h"
+#include "tail_file_internal.h"
+
+#include "tail_fs_stat.h"
+#ifdef FLB_HAVE_INOTIFY
+#include "tail_fs_inotify.h"
+#endif
+
+static inline int flb_tail_fs_init(struct flb_input_instance *in,
+ struct flb_tail_config *ctx, struct flb_config *config)
+{
+#ifdef FLB_HAVE_INOTIFY
+ if (ctx->inotify_watcher) {
+ return flb_tail_fs_inotify_init(in, ctx, config);
+ }
+#endif
+ return flb_tail_fs_stat_init(in, ctx, config);
+}
+
+static inline void flb_tail_fs_pause(struct flb_tail_config *ctx)
+{
+#ifdef FLB_HAVE_INOTIFY
+ if (ctx->inotify_watcher) {
+ return flb_tail_fs_inotify_pause(ctx);
+ }
+#endif
+ return flb_tail_fs_stat_pause(ctx);
+}
+
+static inline void flb_tail_fs_resume(struct flb_tail_config *ctx)
+{
+#ifdef FLB_HAVE_INOTIFY
+ if (ctx->inotify_watcher) {
+ return flb_tail_fs_inotify_resume(ctx);
+ }
+#endif
+ return flb_tail_fs_stat_resume(ctx);
+}
+
+static inline int flb_tail_fs_add(struct flb_tail_config *ctx, struct flb_tail_file *file)
+{
+#ifdef FLB_HAVE_INOTIFY
+ if (ctx->inotify_watcher) {
+ return flb_tail_fs_inotify_add(file);
+ }
+#endif
+ return flb_tail_fs_stat_add(file);
+}
+
+static inline int flb_tail_fs_remove(struct flb_tail_config *ctx, struct flb_tail_file *file)
+{
+#ifdef FLB_HAVE_INOTIFY
+ if (ctx->inotify_watcher) {
+ return flb_tail_fs_inotify_remove(file);
+ }
+#endif
+ return flb_tail_fs_stat_remove(file);
+}
+
+static inline int flb_tail_fs_exit(struct flb_tail_config *ctx)
+{
+#ifdef FLB_HAVE_INOTIFY
+ if (ctx->inotify_watcher) {
+ return flb_tail_fs_inotify_exit(ctx);
+ }
+#endif
+ return flb_tail_fs_stat_exit(ctx);
+}
+
+
+#endif
diff --git a/src/fluent-bit/plugins/in_tail/tail_fs_inotify.c b/src/fluent-bit/plugins/in_tail/tail_fs_inotify.c
new file mode 100644
index 000000000..59d10ca08
--- /dev/null
+++ b/src/fluent-bit/plugins/in_tail/tail_fs_inotify.c
@@ -0,0 +1,433 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define _DEFAULT_SOURCE
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_input.h>
+#include <fluent-bit/flb_input_plugin.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/inotify.h>
+
+#include "tail_config.h"
+#include "tail_file.h"
+#include "tail_db.h"
+#include "tail_signal.h"
+
+#include <limits.h>
+#include <fcntl.h>
+
+#include <sys/ioctl.h>
+
+static int debug_event_mask(struct flb_tail_config *ctx,
+ struct flb_tail_file *file,
+ uint32_t mask)
+{
+ flb_sds_t buf;
+ int buf_size = 256;
+
+ /* Only enter this function if debug mode is allowed */
+ if (flb_log_check(FLB_LOG_DEBUG) == 0) {
+ return 0;
+ }
+
+ if (file) {
+ buf_size = file->name_len + 128;
+ }
+
+ if (buf_size < 256) {
+ buf_size = 256;
+ }
+
+ /* Create buffer */
+ buf = flb_sds_create_size(buf_size);
+ if (!buf) {
+ return -1;
+ }
+
+ /* Print info into sds */
+ if (file) {
+ flb_sds_printf(&buf, "inode=%"PRIu64", %s, events: ", file->inode, file->name);
+ }
+ else {
+ flb_sds_printf(&buf, "events: ");
+ }
+
+ if (mask & IN_ATTRIB) {
+ flb_sds_printf(&buf, "IN_ATTRIB ");
+ }
+ if (mask & IN_IGNORED) {
+ flb_sds_printf(&buf, "IN_IGNORED ");
+ }
+ if (mask & IN_MODIFY) {
+ flb_sds_printf(&buf, "IN_MODIFY ");
+ }
+ if (mask & IN_MOVE_SELF) {
+ flb_sds_printf(&buf, "IN_MOVE_SELF ");
+ }
+ if (mask & IN_Q_OVERFLOW) {
+ flb_sds_printf(&buf, "IN_Q_OVERFLOW ");
+ }
+
+ flb_plg_debug(ctx->ins, "%s", buf);
+ flb_sds_destroy(buf);
+
+ return 0;
+}
+
+static int tail_fs_add(struct flb_tail_file *file, int check_rotated)
+{
+ int flags;
+ int watch_fd;
+ char *name;
+ struct flb_tail_config *ctx = file->config;
+
+ /*
+ * If there is no watcher associated, we only want to monitor events if
+ * this file is rotated to somewhere. Note at this point we are polling
+ * lines from the file and once we reach EOF (and a watch_fd exists),
+ * we update the flags to receive notifications.
+ */
+ flags = IN_ATTRIB | IN_IGNORED | IN_MODIFY | IN_Q_OVERFLOW;
+
+ if (check_rotated == FLB_TRUE) {
+ flags |= IN_MOVE_SELF;
+ }
+
+ /*
+ * Double check real name of the file associated to the inode:
+ *
+ * Inotify interface in the Kernel uses the inode number as a real reference
+ * for the file we have opened. If for some reason the file we are pointing
+ * out in file->name has been rotated and not been updated, we might not add
+ * the watch to the real file we aim to.
+ *
+ * A case like this can generate the issue:
+ *
+ * 1. inode=1 : file a.log is being watched
+ * 2. inode=1 : file a.log is rotated to a.log.1, but notification not
+ * delivered yet.
+ * 3. inode=2 : new file 'a.log' is created
+ * 4. inode=2 : the scan_path routine discover the new 'a.log' file
+ * 5. inode=2 : add an inotify watcher for 'a.log'
+ * 6. conflict: inotify_add_watch() receives the path 'a.log',
+ */
+
+ name = flb_tail_file_name(file);
+ if (!name) {
+ flb_plg_error(ctx->ins, "inode=%"PRIu64" cannot get real filename for inotify",
+ file->inode);
+ return -1;
+ }
+
+ /* Register or update the flags */
+ watch_fd = inotify_add_watch(ctx->fd_notify, name, flags);
+ flb_free(name);
+
+ if (watch_fd == -1) {
+ flb_errno();
+ if (errno == ENOSPC) {
+ flb_plg_error(ctx->ins, "inotify: The user limit on the total "
+ "number of inotify watches was reached or the kernel "
+ "failed to allocate a needed resource (ENOSPC)");
+ }
+ return -1;
+ }
+ file->watch_fd = watch_fd;
+ flb_plg_info(ctx->ins, "inotify_fs_add(): inode=%"PRIu64" watch_fd=%i name=%s",
+ file->inode, watch_fd, file->name);
+ return 0;
+}
+
+static int flb_tail_fs_add_rotated(struct flb_tail_file *file)
+{
+ return tail_fs_add(file, FLB_FALSE);
+}
+
+static int tail_fs_event(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ int ret;
+ int64_t offset;
+ struct mk_list *head;
+ struct mk_list *tmp;
+ struct flb_tail_config *ctx = in_context;
+ struct flb_tail_file *file = NULL;
+ struct inotify_event ev;
+ struct stat st;
+
+ /* Read the event */
+ ret = read(ctx->fd_notify, &ev, sizeof(struct inotify_event));
+ if (ret < 1) {
+ return -1;
+ }
+
+ /* Lookup watched file */
+ mk_list_foreach_safe(head, tmp, &ctx->files_event) {
+ file = mk_list_entry(head, struct flb_tail_file, _head);
+ if (file->watch_fd != ev.wd) {
+ file = NULL;
+ continue;
+ }
+ break;
+ }
+
+ if (!file) {
+ return -1;
+ }
+
+ /* Debug event */
+ debug_event_mask(ctx, file, ev.mask);
+
+ if (ev.mask & IN_IGNORED) {
+ flb_plg_debug(ctx->ins, "inode=%"PRIu64" watch_fd=%i IN_IGNORED",
+ file->inode, ev.wd);
+ return -1;
+ }
+
+ /* Check file rotation (only if it has not been rotated before) */
+ if (ev.mask & IN_MOVE_SELF && file->rotated == 0) {
+ flb_plg_debug(ins, "inode=%"PRIu64" rotated IN_MOVE SELF '%s'",
+ file->inode, file->name);
+
+ /* A rotated file must be re-registered */
+ flb_tail_file_rotated(file);
+ flb_tail_fs_remove(ctx, file);
+ flb_tail_fs_add_rotated(file);
+ }
+
+ ret = fstat(file->fd, &st);
+ if (ret == -1) {
+ flb_plg_debug(ins, "inode=%"PRIu64" error stat(2) %s, removing",
+ file->inode, file->name);
+ flb_tail_file_remove(file);
+ return 0;
+ }
+ file->size = st.st_size;
+ file->pending_bytes = (file->size - file->offset);
+
+ /* File was removed ? */
+ if (ev.mask & IN_ATTRIB) {
+ /* Check if the file have been deleted */
+ if (st.st_nlink == 0) {
+ flb_plg_debug(ins, "inode=%"PRIu64" file has been deleted: %s",
+ file->inode, file->name);
+
+#ifdef FLB_HAVE_SQLDB
+ if (ctx->db) {
+ /* Remove file entry from the database */
+ flb_tail_db_file_delete(file, ctx);
+ }
+#endif
+ /* Remove file from the monitored list */
+ flb_tail_file_remove(file);
+ return 0;
+ }
+ }
+
+ if (ev.mask & IN_MODIFY) {
+ /*
+ * The file was modified, check how many new bytes do
+ * we have.
+ */
+
+ /* Check if the file was truncated */
+ if (file->offset > st.st_size) {
+ offset = lseek(file->fd, 0, SEEK_SET);
+ if (offset == -1) {
+ flb_errno();
+ return -1;
+ }
+
+ flb_plg_debug(ctx->ins, "inode=%"PRIu64" file truncated %s",
+ file->inode, file->name);
+ file->offset = offset;
+ file->buf_len = 0;
+
+ /* Update offset in the database file */
+#ifdef FLB_HAVE_SQLDB
+ if (ctx->db) {
+ flb_tail_db_file_offset(file, ctx);
+ }
+#endif
+ }
+ }
+
+ /* Collect the data */
+ ret = in_tail_collect_event(file, config);
+ if (ret != FLB_TAIL_ERROR) {
+ /*
+ * Due to read buffer size capacity, there are some cases where the
+ * read operation cannot consume all new data available on one
+ * round; upon successfull read(2) some data can still remain.
+ *
+ * If that is the case, we set in the structure how
+ * many bytes are available 'now', so then the further
+ * routine that check pending bytes and then the inotified-file
+ * can process them properly after an internal signal.
+ *
+ * The goal to defer this routine is to avoid a blocking
+ * read(2) operation, that might kill performance. Just let's
+ * wait a second and do a good job.
+ */
+ tail_signal_pending(ctx);
+ }
+ else {
+ return ret;
+ }
+
+ return 0;
+}
+
+static int in_tail_progress_check_callback(struct flb_input_instance *ins,
+ struct flb_config *config, void *context)
+{
+ int ret = 0;
+ struct mk_list *tmp;
+ struct mk_list *head;
+ struct flb_tail_config *ctx = context;
+ struct flb_tail_file *file;
+ int pending_data_detected;
+ struct stat st;
+
+ (void) config;
+
+ pending_data_detected = FLB_FALSE;
+
+ mk_list_foreach_safe(head, tmp, &ctx->files_event) {
+ file = mk_list_entry(head, struct flb_tail_file, _head);
+
+ if (file->offset < file->size) {
+ pending_data_detected = FLB_TRUE;
+
+ continue;
+ }
+
+ ret = fstat(file->fd, &st);
+ if (ret == -1) {
+ flb_errno();
+ flb_plg_error(ins, "fstat error");
+
+ continue;
+ }
+
+ if (file->offset < st.st_size) {
+ file->size = st.st_size;
+ file->pending_bytes = (file->size - file->offset);
+
+ pending_data_detected = FLB_TRUE;
+ }
+ }
+
+ if (pending_data_detected) {
+ tail_signal_pending(ctx);
+ }
+
+ return 0;
+}
+
+/* File System events based on Inotify(2). Linux >= 2.6.32 is suggested */
+int flb_tail_fs_inotify_init(struct flb_input_instance *in,
+ struct flb_tail_config *ctx, struct flb_config *config)
+{
+ int fd;
+ int ret;
+
+ flb_plg_debug(ctx->ins, "flb_tail_fs_inotify_init() initializing inotify tail input");
+
+ /* Create inotify instance */
+ fd = inotify_init1(IN_NONBLOCK | IN_CLOEXEC);
+ if (fd == -1) {
+ flb_errno();
+ return -1;
+ }
+ flb_plg_debug(ctx->ins, "inotify watch fd=%i", fd);
+ ctx->fd_notify = fd;
+
+ /* This backend use Fluent Bit event-loop to trigger notifications */
+ ret = flb_input_set_collector_event(in, tail_fs_event,
+ ctx->fd_notify, config);
+ if (ret < 0) {
+ close(fd);
+ return -1;
+ }
+ ctx->coll_fd_fs1 = ret;
+
+ /* Register callback to check current tail offsets */
+ ret = flb_input_set_collector_time(in, in_tail_progress_check_callback,
+ ctx->progress_check_interval,
+ ctx->progress_check_interval_nsec,
+ config);
+ if (ret == -1) {
+ flb_tail_config_destroy(ctx);
+ return -1;
+ }
+ ctx->coll_fd_progress_check = ret;
+
+ return 0;
+}
+
+void flb_tail_fs_inotify_pause(struct flb_tail_config *ctx)
+{
+ flb_input_collector_pause(ctx->coll_fd_fs1, ctx->ins);
+}
+
+void flb_tail_fs_inotify_resume(struct flb_tail_config *ctx)
+{
+ flb_input_collector_resume(ctx->coll_fd_fs1, ctx->ins);
+}
+
+int flb_tail_fs_inotify_add(struct flb_tail_file *file)
+{
+ int ret;
+ struct flb_tail_config *ctx = file->config;
+
+ ret = tail_fs_add(file, FLB_TRUE);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "inode=%"PRIu64" cannot register file %s",
+ file->inode, file->name);
+ return -1;
+ }
+
+ return 0;
+}
+
+int flb_tail_fs_inotify_remove(struct flb_tail_file *file)
+{
+ struct flb_tail_config *ctx = file->config;
+
+ if (file->watch_fd == -1) {
+ return 0;
+ }
+
+ flb_plg_info(ctx->ins, "inotify_fs_remove(): inode=%"PRIu64" watch_fd=%i",
+ file->inode, file->watch_fd);
+
+ inotify_rm_watch(file->config->fd_notify, file->watch_fd);
+ file->watch_fd = -1;
+ return 0;
+}
+
+int flb_tail_fs_inotify_exit(struct flb_tail_config *ctx)
+{
+ return close(ctx->fd_notify);
+}
diff --git a/src/fluent-bit/plugins/in_tail/tail_fs_inotify.h b/src/fluent-bit/plugins/in_tail/tail_fs_inotify.h
new file mode 100644
index 000000000..128ab0624
--- /dev/null
+++ b/src/fluent-bit/plugins/in_tail/tail_fs_inotify.h
@@ -0,0 +1,37 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_TAIL_FS_INOTIFY_H
+#define FLB_TAIL_FS_INOTIFY_H
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_input.h>
+
+#include "tail_config.h"
+#include "tail_file_internal.h"
+
+int flb_tail_fs_inotify_init(struct flb_input_instance *in,
+ struct flb_tail_config *ctx, struct flb_config *config);
+int flb_tail_fs_inotify_add(struct flb_tail_file *file);
+int flb_tail_fs_inotify_remove(struct flb_tail_file *file);
+int flb_tail_fs_inotify_exit(struct flb_tail_config *ctx);
+void flb_tail_fs_inotify_pause(struct flb_tail_config *ctx);
+void flb_tail_fs_inotify_resume(struct flb_tail_config *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/in_tail/tail_fs_stat.c b/src/fluent-bit/plugins/in_tail/tail_fs_stat.c
new file mode 100644
index 000000000..6b312c9bd
--- /dev/null
+++ b/src/fluent-bit/plugins/in_tail/tail_fs_stat.c
@@ -0,0 +1,253 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define _DEFAULT_SOURCE
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_input_plugin.h>
+
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#include "tail_file.h"
+#include "tail_db.h"
+#include "tail_config.h"
+#include "tail_signal.h"
+
+#ifdef FLB_SYSTEM_WINDOWS
+#include "win32.h"
+#endif
+
+struct fs_stat {
+ /* last time check */
+ time_t checked;
+
+ /* previous status */
+ struct stat st;
+};
+
+static int tail_fs_event(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ int ret;
+ struct mk_list *head;
+ struct mk_list *tmp;
+ struct flb_tail_config *ctx = in_context;
+ struct flb_tail_file *file = NULL;
+ struct fs_stat *fst;
+ struct stat st;
+ time_t t;
+
+ t = time(NULL);
+
+ /* Lookup watched file */
+ mk_list_foreach_safe(head, tmp, &ctx->files_event) {
+ file = mk_list_entry(head, struct flb_tail_file, _head);
+ fst = file->fs_backend;
+
+ /* Check current status of the file */
+ ret = fstat(file->fd, &st);
+ if (ret == -1) {
+ flb_errno();
+ continue;
+ }
+
+ /* Check if the file was modified */
+ if ((fst->st.st_mtime != st.st_mtime) ||
+ (fst->st.st_size != st.st_size)) {
+ /* Update stat info and trigger the notification */
+ memcpy(&fst->st, &st, sizeof(struct stat));
+ fst->checked = t;
+ in_tail_collect_event(file, config);
+ }
+ }
+
+ return 0;
+}
+
+static int tail_fs_check(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ int ret;
+ int64_t offset;
+ char *name;
+ struct mk_list *tmp;
+ struct mk_list *head;
+ struct flb_tail_config *ctx = in_context;
+ struct flb_tail_file *file = NULL;
+ struct fs_stat *fst;
+ struct stat st;
+
+ /* Lookup watched file */
+ mk_list_foreach_safe(head, tmp, &ctx->files_event) {
+ file = mk_list_entry(head, struct flb_tail_file, _head);
+ fst = file->fs_backend;
+
+ ret = fstat(file->fd, &st);
+ if (ret == -1) {
+ flb_plg_debug(ctx->ins, "error stat(2) %s, removing", file->name);
+ flb_tail_file_remove(file);
+ continue;
+ }
+
+ /* Check if the file have been deleted */
+ if (st.st_nlink == 0) {
+ flb_plg_debug(ctx->ins, "file has been deleted: %s", file->name);
+#ifdef FLB_HAVE_SQLDB
+ if (ctx->db) {
+ /* Remove file entry from the database */
+ flb_tail_db_file_delete(file, ctx);
+ }
+#endif
+ flb_tail_file_remove(file);
+ continue;
+ }
+
+ /* Check if the file was truncated */
+ if (file->offset > st.st_size) {
+ offset = lseek(file->fd, 0, SEEK_SET);
+ if (offset == -1) {
+ flb_errno();
+ return -1;
+ }
+
+ flb_plg_debug(ctx->ins, "file truncated %s", file->name);
+ file->offset = offset;
+ file->buf_len = 0;
+ memcpy(&fst->st, &st, sizeof(struct stat));
+
+#ifdef FLB_HAVE_SQLDB
+ /* Update offset in database file */
+ if (ctx->db) {
+ flb_tail_db_file_offset(file, ctx);
+ }
+#endif
+ }
+
+ if (file->offset < st.st_size) {
+ file->pending_bytes = (st.st_size - file->offset);
+ tail_signal_pending(ctx);
+ }
+ else {
+ file->pending_bytes = 0;
+ }
+
+
+ /* Discover the current file name for the open file descriptor */
+ name = flb_tail_file_name(file);
+ if (!name) {
+ flb_plg_debug(ctx->ins, "could not resolve %s, removing", file->name);
+ flb_tail_file_remove(file);
+ continue;
+ }
+
+ /*
+ * Check if file still exists. This method requires explicity that the
+ * user is using an absolute path, otherwise we will be rotating the
+ * wrong file.
+ *
+ * flb_tail_target_file_name_cmp is a deeper compare than
+ * flb_tail_file_name_cmp. If applicable, it compares to the underlying
+ * real_name of the file.
+ */
+ if (flb_tail_file_is_rotated(ctx, file) == FLB_TRUE) {
+ flb_tail_file_rotated(file);
+ }
+ flb_free(name);
+
+ }
+
+ return 0;
+}
+
+/* File System events based on stat(2) */
+int flb_tail_fs_stat_init(struct flb_input_instance *in,
+ struct flb_tail_config *ctx, struct flb_config *config)
+{
+ int ret;
+
+ flb_plg_debug(ctx->ins, "flb_tail_fs_stat_init() initializing stat tail input");
+
+ /* Set a manual timer to collect events every 0.250 seconds */
+ ret = flb_input_set_collector_time(in, tail_fs_event,
+ 0, 250000000, config);
+ if (ret < 0) {
+ return -1;
+ }
+ ctx->coll_fd_fs1 = ret;
+
+ /* Set a manual timer to check deleted/rotated files every 2.5 seconds */
+ ret = flb_input_set_collector_time(in, tail_fs_check,
+ 2, 500000000, config);
+ if (ret < 0) {
+ return -1;
+ }
+ ctx->coll_fd_fs2 = ret;
+
+ return 0;
+}
+
+void flb_tail_fs_stat_pause(struct flb_tail_config *ctx)
+{
+ flb_input_collector_pause(ctx->coll_fd_fs1, ctx->ins);
+ flb_input_collector_pause(ctx->coll_fd_fs2, ctx->ins);
+}
+
+void flb_tail_fs_stat_resume(struct flb_tail_config *ctx)
+{
+ flb_input_collector_resume(ctx->coll_fd_fs1, ctx->ins);
+ flb_input_collector_resume(ctx->coll_fd_fs2, ctx->ins);
+}
+
+int flb_tail_fs_stat_add(struct flb_tail_file *file)
+{
+ int ret;
+ struct fs_stat *fst;
+
+ fst = flb_malloc(sizeof(struct fs_stat));
+ if (!fst) {
+ flb_errno();
+ return -1;
+ }
+
+ fst->checked = time(NULL);
+ ret = stat(file->name, &fst->st);
+ if (ret == -1) {
+ flb_errno();
+ flb_free(fst);
+ return -1;
+ }
+ file->fs_backend = fst;
+
+ return 0;
+}
+
+int flb_tail_fs_stat_remove(struct flb_tail_file *file)
+{
+ if (file->tail_mode == FLB_TAIL_EVENT) {
+ flb_free(file->fs_backend);
+ }
+ return 0;
+}
+
+int flb_tail_fs_stat_exit(struct flb_tail_config *ctx)
+{
+ (void) ctx;
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/in_tail/tail_fs_stat.h b/src/fluent-bit/plugins/in_tail/tail_fs_stat.h
new file mode 100644
index 000000000..21a0704cb
--- /dev/null
+++ b/src/fluent-bit/plugins/in_tail/tail_fs_stat.h
@@ -0,0 +1,37 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_TAIL_FS_STAT_H
+#define FLB_TAIL_FS_STAT_H
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_input.h>
+
+#include "tail_config.h"
+#include "tail_file_internal.h"
+
+int flb_tail_fs_stat_init(struct flb_input_instance *in,
+ struct flb_tail_config *ctx, struct flb_config *config);
+int flb_tail_fs_stat_add(struct flb_tail_file *file);
+int flb_tail_fs_stat_remove(struct flb_tail_file *file);
+int flb_tail_fs_stat_exit(struct flb_tail_config *ctx);
+void flb_tail_fs_stat_pause(struct flb_tail_config *ctx);
+void flb_tail_fs_stat_resume(struct flb_tail_config *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/in_tail/tail_multiline.c b/src/fluent-bit/plugins/in_tail/tail_multiline.c
new file mode 100644
index 000000000..71c031014
--- /dev/null
+++ b/src/fluent-bit/plugins/in_tail/tail_multiline.c
@@ -0,0 +1,606 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_kv.h>
+
+#include "tail_config.h"
+#include "tail_multiline.h"
+
+static int tail_mult_append(struct flb_parser *parser,
+ struct flb_tail_config *ctx)
+{
+ struct flb_tail_mult *mp;
+
+ mp = flb_malloc(sizeof(struct flb_tail_mult));
+ if (!mp) {
+ flb_errno();
+ return -1;
+ }
+
+ mp->parser = parser;
+ mk_list_add(&mp->_head, &ctx->mult_parsers);
+
+ return 0;
+}
+
+int flb_tail_mult_create(struct flb_tail_config *ctx,
+ struct flb_input_instance *ins,
+ struct flb_config *config)
+{
+ int ret;
+ const char *tmp;
+ struct mk_list *head;
+ struct flb_parser *parser;
+ struct flb_kv *kv;
+
+ if (ctx->multiline_flush <= 0) {
+ ctx->multiline_flush = 1;
+ }
+
+ mk_list_init(&ctx->mult_parsers);
+
+ /* Get firstline parser */
+ tmp = flb_input_get_property("parser_firstline", ins);
+ if (!tmp) {
+ flb_plg_error(ctx->ins, "multiline: no parser defined for firstline");
+ return -1;
+ }
+ parser = flb_parser_get(tmp, config);
+ if (!parser) {
+ flb_plg_error(ctx->ins, "multiline: invalid parser '%s'", tmp);
+ return -1;
+ }
+
+ ctx->mult_parser_firstline = parser;
+
+ /* Read all multiline rules */
+ mk_list_foreach(head, &ins->properties) {
+ kv = mk_list_entry(head, struct flb_kv, _head);
+ if (strcasecmp("parser_firstline", kv->key) == 0) {
+ continue;
+ }
+
+ if (strncasecmp("parser_", kv->key, 7) == 0) {
+ parser = flb_parser_get(kv->val, config);
+ if (!parser) {
+ flb_plg_error(ctx->ins, "multiline: invalid parser '%s'", kv->val);
+ return -1;
+ }
+
+ ret = tail_mult_append(parser, ctx);
+ if (ret == -1) {
+ return -1;
+ }
+ }
+ }
+
+ return 0;
+}
+
+int flb_tail_mult_destroy(struct flb_tail_config *ctx)
+{
+ struct mk_list *tmp;
+ struct mk_list *head;
+ struct flb_tail_mult *mp;
+
+ if (ctx->multiline == FLB_FALSE) {
+ return 0;
+ }
+
+ mk_list_foreach_safe(head, tmp, &ctx->mult_parsers) {
+ mp = mk_list_entry(head, struct flb_tail_mult, _head);
+ mk_list_del(&mp->_head);
+ flb_free(mp);
+ }
+
+ return 0;
+}
+
+/* Process the result of a firstline match */
+int flb_tail_mult_process_first(time_t now,
+ char *buf, size_t size,
+ struct flb_time *out_time,
+ struct flb_tail_file *file,
+ struct flb_tail_config *ctx)
+{
+ int ret;
+ size_t off;
+ msgpack_object map;
+ msgpack_unpacked result;
+
+ /* If a previous multiline context already exists, flush first */
+ if (file->mult_firstline && !file->mult_skipping) {
+ flb_tail_mult_flush(file, ctx);
+ }
+
+ /* Remark as first multiline message */
+ file->mult_firstline = FLB_TRUE;
+
+ /* Validate obtained time, if not set, set the current time */
+ if (flb_time_to_nanosec(out_time) == 0L) {
+ flb_time_get(out_time);
+ }
+
+ /* Should we skip this multiline record ? */
+ if (ctx->ignore_older > 0) {
+ if ((now - ctx->ignore_older) > out_time->tm.tv_sec) {
+ flb_free(buf);
+ file->mult_skipping = FLB_TRUE;
+ file->mult_firstline = FLB_TRUE;
+
+ /* we expect more data to skip */
+ return FLB_TAIL_MULT_MORE;
+ }
+ }
+
+ /* Re-initiate buffers */
+ msgpack_sbuffer_init(&file->mult_sbuf);
+ msgpack_packer_init(&file->mult_pck, &file->mult_sbuf, msgpack_sbuffer_write);
+
+ /*
+ * flb_parser_do() always return a msgpack buffer, so we tweak our
+ * local msgpack reference to avoid an extra allocation. The only
+ * concern is that we don't know what's the real size of the memory
+ * allocated, so we assume it's just 'out_size'.
+ */
+ file->mult_flush_timeout = now + (ctx->multiline_flush - 1);
+ file->mult_sbuf.data = buf;
+ file->mult_sbuf.size = size;
+ file->mult_sbuf.alloc = size;
+
+ /* Set multiline status */
+ file->mult_firstline = FLB_TRUE;
+ file->mult_skipping = FLB_FALSE;
+ flb_time_copy(&file->mult_time, out_time);
+
+ off = 0;
+ msgpack_unpacked_init(&result);
+ ret = msgpack_unpack_next(&result, buf, size, &off);
+ if (ret != MSGPACK_UNPACK_SUCCESS) {
+ msgpack_sbuffer_destroy(&file->mult_sbuf);
+ msgpack_unpacked_destroy(&result);
+ return FLB_TAIL_MULT_NA;
+ }
+
+ map = result.data;
+ file->mult_keys = map.via.map.size;
+ msgpack_unpacked_destroy(&result);
+
+ /* We expect more data */
+ return FLB_TAIL_MULT_MORE;
+}
+
+/* Append a raw log entry to the last structured field in the mult buffer */
+static inline void flb_tail_mult_append_raw(char *buf, int size,
+ struct flb_tail_file *file,
+ struct flb_tail_config *config)
+{
+ /* Append the raw string */
+ msgpack_pack_str(&file->mult_pck, size);
+ msgpack_pack_str_body(&file->mult_pck, buf, size);
+}
+
+/* Check if the last key value type of a map is string or not */
+static inline int is_last_key_val_string(char *buf, size_t size)
+{
+ int ret = FLB_FALSE;
+ size_t off;
+ msgpack_unpacked result;
+ msgpack_object v;
+ msgpack_object root;
+
+ off = 0;
+ msgpack_unpacked_init(&result);
+ ret = msgpack_unpack_next(&result, buf, size, &off);
+ if (ret != MSGPACK_UNPACK_SUCCESS) {
+ return ret;
+ }
+
+ root = result.data;
+ if (root.type != MSGPACK_OBJECT_MAP) {
+ ret = FLB_FALSE;
+ }
+ else {
+ if (root.via.map.size == 0) {
+ ret = FLB_FALSE;
+ }
+ else {
+ v = root.via.map.ptr[root.via.map.size - 1].val;
+ if (v.type == MSGPACK_OBJECT_STR) {
+ ret = FLB_TRUE;
+ }
+ }
+ }
+
+ msgpack_unpacked_destroy(&result);
+ return ret;
+}
+
+int flb_tail_mult_process_content(time_t now,
+ char *buf, size_t len,
+ struct flb_tail_file *file,
+ struct flb_tail_config *ctx,
+ size_t processed_bytes)
+{
+ int ret;
+ size_t off;
+ void *out_buf;
+ size_t out_size = 0;
+ struct mk_list *head;
+ struct flb_tail_mult *mult_parser = NULL;
+ struct flb_time out_time = {0};
+ msgpack_object map;
+ msgpack_unpacked result;
+
+ /* Always check if this line is the beginning of a new multiline message */
+ ret = flb_parser_do(ctx->mult_parser_firstline,
+ buf, len,
+ &out_buf, &out_size, &out_time);
+ if (ret >= 0) {
+ /*
+ * The content is a candidate for a firstline, but we need to perform
+ * the extra-mandatory check where the last key value type must be
+ * a string, otherwise no string concatenation with continuation lines
+ * will be possible.
+ */
+ ret = is_last_key_val_string(out_buf, out_size);
+ if (ret == FLB_TRUE)
+ file->mult_firstline_append = FLB_TRUE;
+ else
+ file->mult_firstline_append = FLB_FALSE;
+
+ flb_tail_mult_process_first(now, out_buf, out_size, &out_time,
+ file, ctx);
+ return FLB_TAIL_MULT_MORE;
+ }
+
+ if (file->mult_skipping == FLB_TRUE) {
+ return FLB_TAIL_MULT_MORE;
+ }
+
+ /*
+ * Once here means we have some data that is a continuation, iterate
+ * parsers trying to find a match
+ */
+ out_buf = NULL;
+ mk_list_foreach(head, &ctx->mult_parsers) {
+ mult_parser = mk_list_entry(head, struct flb_tail_mult, _head);
+
+ /* Process line text with current parser */
+ out_buf = NULL;
+ out_size = 0;
+ ret = flb_parser_do(mult_parser->parser,
+ buf, len,
+ &out_buf, &out_size, &out_time);
+ if (ret < 0) {
+ mult_parser = NULL;
+ continue;
+ }
+
+ /* The line was processed, break the loop and buffer the data */
+ break;
+ }
+
+ if (!mult_parser) {
+ /*
+ * If no parser was found means the string log must be appended
+ * to the last structured field.
+ */
+ if (file->mult_firstline && file->mult_firstline_append) {
+ flb_tail_mult_append_raw(buf, len, file, ctx);
+ }
+ else {
+ flb_tail_file_pack_line(NULL, buf, len, file, processed_bytes);
+ }
+
+ return FLB_TAIL_MULT_MORE;
+ }
+
+ off = 0;
+ msgpack_unpacked_init(&result);
+ msgpack_unpack_next(&result, out_buf, out_size, &off);
+ map = result.data;
+
+ /* Append new map to our local msgpack buffer */
+ file->mult_keys += map.via.map.size;
+ msgpack_unpacked_destroy(&result);
+ msgpack_sbuffer_write(&file->mult_sbuf, out_buf, out_size);
+ flb_free(out_buf);
+
+ return FLB_TAIL_MULT_MORE;
+}
+
+static int flb_tail_mult_pack_line_body(
+ struct flb_log_event_encoder *context,
+ struct flb_tail_file *file)
+{
+ size_t adjacent_object_offset;
+ size_t continuation_length;
+ msgpack_unpacked adjacent_object;
+ msgpack_unpacked current_object;
+ size_t entry_index;
+ msgpack_object entry_value;
+ msgpack_object entry_key;
+ msgpack_object_map *data_map;
+ int map_size;
+ size_t offset;
+ struct flb_tail_config *config;
+ int result;
+
+ result = FLB_EVENT_ENCODER_SUCCESS;
+ config = (struct flb_tail_config *) file->config;
+
+ /* New Map size */
+ map_size = file->mult_keys;
+
+ if (file->config->path_key != NULL) {
+ map_size++;
+
+ result = flb_log_event_encoder_append_body_values(
+ context,
+ FLB_LOG_EVENT_CSTRING_VALUE(config->path_key),
+ FLB_LOG_EVENT_CSTRING_VALUE(file->name));
+ }
+
+
+ msgpack_unpacked_init(&current_object);
+ msgpack_unpacked_init(&adjacent_object);
+
+ offset = 0;
+
+ while (result == FLB_EVENT_ENCODER_SUCCESS &&
+ msgpack_unpack_next(&current_object,
+ file->mult_sbuf.data,
+ file->mult_sbuf.size,
+ &offset) == MSGPACK_UNPACK_SUCCESS) {
+ if (current_object.data.type != MSGPACK_OBJECT_MAP) {
+ continue;
+ }
+
+ data_map = &current_object.data.via.map;
+
+ continuation_length = 0;
+
+ for (entry_index = 0; entry_index < data_map->size; entry_index++) {
+ entry_key = data_map->ptr[entry_index].key;
+ entry_value = data_map->ptr[entry_index].val;
+
+ result = flb_log_event_encoder_append_body_msgpack_object(context,
+ &entry_key);
+
+ if (result != FLB_EVENT_ENCODER_SUCCESS) {
+ break;
+ }
+
+ /* Check if this is the last entry in the map and if that is
+ * the case then add the lengths of all the trailing string
+ * objects after the map in order to append them to the value
+ * but only if the value object is a string
+ */
+ if (entry_index + 1 == data_map->size &&
+ entry_value.type == MSGPACK_OBJECT_STR) {
+ adjacent_object_offset = offset;
+
+ while (msgpack_unpack_next(
+ &adjacent_object,
+ file->mult_sbuf.data,
+ file->mult_sbuf.size,
+ &adjacent_object_offset) == MSGPACK_UNPACK_SUCCESS) {
+ if (adjacent_object.data.type != MSGPACK_OBJECT_STR) {
+ break;
+ }
+
+ /* Sum total bytes to append */
+ continuation_length += adjacent_object.data.via.str.size + 1;
+ }
+
+ result = flb_log_event_encoder_append_body_string_length(
+ context,
+ entry_value.via.str.size +
+ continuation_length);
+
+ if (result != FLB_EVENT_ENCODER_SUCCESS) {
+ break;
+ }
+
+ result = flb_log_event_encoder_append_body_string_body(
+ context,
+ (char *) entry_value.via.str.ptr,
+ entry_value.via.str.size);
+
+ if (result != FLB_EVENT_ENCODER_SUCCESS) {
+ break;
+ }
+
+ if (continuation_length > 0) {
+ adjacent_object_offset = offset;
+
+ while (msgpack_unpack_next(
+ &adjacent_object,
+ file->mult_sbuf.data,
+ file->mult_sbuf.size,
+ &adjacent_object_offset) == MSGPACK_UNPACK_SUCCESS) {
+ if (adjacent_object.data.type != MSGPACK_OBJECT_STR) {
+ break;
+ }
+
+ result = flb_log_event_encoder_append_body_string_body(
+ context,
+ "\n",
+ 1);
+
+ if (result != FLB_EVENT_ENCODER_SUCCESS) {
+ break;
+ }
+
+ result = flb_log_event_encoder_append_body_string_body(
+ context,
+ (char *) adjacent_object.data.via.str.ptr,
+ adjacent_object.data.via.str.size);
+
+ if (result != FLB_EVENT_ENCODER_SUCCESS) {
+ break;
+ }
+ }
+ }
+ }
+ else {
+ result = flb_log_event_encoder_append_body_msgpack_object(context,
+ &entry_value);
+ }
+ }
+ }
+
+ msgpack_unpacked_destroy(&current_object);
+ msgpack_unpacked_destroy(&adjacent_object);
+
+ /* Reset status */
+ file->mult_firstline = FLB_FALSE;
+ file->mult_skipping = FLB_FALSE;
+ file->mult_keys = 0;
+ file->mult_flush_timeout = 0;
+
+ msgpack_sbuffer_destroy(&file->mult_sbuf);
+
+ file->mult_sbuf.data = NULL;
+
+ flb_time_zero(&file->mult_time);
+
+ return result;
+}
+
+/* Flush any multiline context data into outgoing buffers */
+int flb_tail_mult_flush(struct flb_tail_file *file, struct flb_tail_config *ctx)
+{
+ int result;
+
+ /* nothing to flush */
+ if (file->mult_firstline == FLB_FALSE) {
+ return -1;
+ }
+
+ if (file->mult_keys == 0) {
+ return -1;
+ }
+
+ result = flb_log_event_encoder_begin_record(file->ml_log_event_encoder);
+
+ if (result == FLB_EVENT_ENCODER_SUCCESS) {
+ result = flb_log_event_encoder_set_timestamp(
+ file->ml_log_event_encoder, &file->mult_time);
+ }
+
+ if (result == FLB_EVENT_ENCODER_SUCCESS) {
+ result = flb_tail_mult_pack_line_body(
+ file->ml_log_event_encoder,
+ file);
+ }
+
+ if (result == FLB_EVENT_ENCODER_SUCCESS) {
+ result = flb_log_event_encoder_commit_record(
+ file->ml_log_event_encoder);
+ }
+
+ if (result == FLB_EVENT_ENCODER_SUCCESS) {
+ flb_input_log_append(ctx->ins,
+ file->tag_buf,
+ file->tag_len,
+ file->ml_log_event_encoder->output_buffer,
+ file->ml_log_event_encoder->output_length);
+ result = 0;
+ }
+ else {
+ flb_plg_error(file->config->ins, "error packing event : %d", result);
+
+ result = -1;
+ }
+
+ flb_log_event_encoder_reset(file->ml_log_event_encoder);
+
+ return result;
+}
+
+static void file_pending_flush(struct flb_tail_config *ctx,
+ struct flb_tail_file *file, time_t now)
+{
+ if (file->mult_flush_timeout > now) {
+ return;
+ }
+
+ if (file->mult_firstline == FLB_FALSE) {
+ if (file->mult_sbuf.data == NULL || file->mult_sbuf.size <= 0) {
+ return;
+ }
+ }
+
+ flb_tail_mult_flush(file, ctx);
+}
+
+int flb_tail_mult_pending_flush_all(struct flb_tail_config *ctx)
+{
+ time_t expired;
+ struct mk_list *head;
+ struct flb_tail_file *file;
+
+ expired = time(NULL) + 3600;
+
+ /* Iterate promoted event files with pending bytes */
+ mk_list_foreach(head, &ctx->files_static) {
+ file = mk_list_entry(head, struct flb_tail_file, _head);
+ file_pending_flush(ctx, file, expired);
+ }
+
+ /* Iterate promoted event files with pending bytes */
+ mk_list_foreach(head, &ctx->files_event) {
+ file = mk_list_entry(head, struct flb_tail_file, _head);
+ file_pending_flush(ctx, file, expired);
+ }
+
+ return 0;
+}
+
+int flb_tail_mult_pending_flush(struct flb_input_instance *ins,
+ struct flb_config *config, void *context)
+{
+ time_t now;
+ struct mk_list *head;
+ struct flb_tail_file *file;
+ struct flb_tail_config *ctx = context;
+
+ now = time(NULL);
+
+ /* Iterate promoted event files with pending bytes */
+ mk_list_foreach(head, &ctx->files_static) {
+ file = mk_list_entry(head, struct flb_tail_file, _head);
+
+ file_pending_flush(ctx, file, now);
+ }
+
+ /* Iterate promoted event files with pending bytes */
+ mk_list_foreach(head, &ctx->files_event) {
+ file = mk_list_entry(head, struct flb_tail_file, _head);
+
+ file_pending_flush(ctx, file, now);
+ }
+
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/in_tail/tail_multiline.h b/src/fluent-bit/plugins/in_tail/tail_multiline.h
new file mode 100644
index 000000000..d7f7539b1
--- /dev/null
+++ b/src/fluent-bit/plugins/in_tail/tail_multiline.h
@@ -0,0 +1,57 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_TAIL_TAIL_MULT_H
+#define FLB_TAIL_TAIL_MULT_H
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_input.h>
+
+#include "tail_config.h"
+#include "tail_file.h"
+
+#define FLB_TAIL_MULT_NA -1 /* not applicable as a multiline stream */
+#define FLB_TAIL_MULT_DONE 0 /* finished a multiline stream */
+#define FLB_TAIL_MULT_MORE 1 /* expect more lines to come */
+#define FLB_TAIL_MULT_FLUSH "4" /* max flush time for multiline: 4 seconds */
+
+struct flb_tail_mult {
+ struct flb_parser *parser;
+ struct mk_list _head;
+};
+
+int flb_tail_mult_create(struct flb_tail_config *ctx,
+ struct flb_input_instance *ins,
+ struct flb_config *config);
+
+int flb_tail_mult_destroy(struct flb_tail_config *ctx);
+
+int flb_tail_mult_process_content(time_t now,
+ char *buf, size_t len,
+ struct flb_tail_file *file,
+ struct flb_tail_config *ctx,
+ size_t processed_bytes);
+int flb_tail_mult_flush(struct flb_tail_file *file,
+ struct flb_tail_config *ctx);
+
+int flb_tail_mult_pending_flush(struct flb_input_instance *ins,
+ struct flb_config *config, void *context);
+int flb_tail_mult_pending_flush_all(struct flb_tail_config *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/in_tail/tail_scan.c b/src/fluent-bit/plugins/in_tail/tail_scan.c
new file mode 100644
index 000000000..ccb8e070a
--- /dev/null
+++ b/src/fluent-bit/plugins/in_tail/tail_scan.c
@@ -0,0 +1,71 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_input_plugin.h>
+#include "tail.h"
+#include "tail_config.h"
+
+/*
+ * Include proper scan backend
+ */
+#ifdef FLB_SYSTEM_WINDOWS
+#include "tail_scan_win32.c"
+#else
+#include "tail_scan_glob.c"
+#endif
+
+int flb_tail_scan(struct mk_list *path_list, struct flb_tail_config *ctx)
+{
+ int ret;
+ struct mk_list *head;
+ struct flb_slist_entry *pattern;
+
+ mk_list_foreach(head, path_list) {
+ pattern = mk_list_entry(head, struct flb_slist_entry, _head);
+ ret = tail_scan_path(pattern->str, ctx);
+ if (ret == -1) {
+ flb_plg_warn(ctx->ins, "error scanning path: %s", pattern->str);
+ }
+ else {
+ flb_plg_debug(ctx->ins, "%i new files found on path '%s'",
+ ret, pattern->str);
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Triggered by refresh_interval, it re-scan the path looking for new files
+ * that match the original path pattern.
+ */
+int flb_tail_scan_callback(struct flb_input_instance *ins,
+ struct flb_config *config, void *context)
+{
+ int ret;
+ struct flb_tail_config *ctx = context;
+ (void) config;
+
+ ret = flb_tail_scan(ctx->path_list, ctx);
+ if (ret > 0) {
+ flb_plg_debug(ins, "%i new files found", ret);
+ }
+
+ return ret;
+}
diff --git a/src/fluent-bit/plugins/in_tail/tail_scan.h b/src/fluent-bit/plugins/in_tail/tail_scan.h
new file mode 100644
index 000000000..ec3c96a2a
--- /dev/null
+++ b/src/fluent-bit/plugins/in_tail/tail_scan.h
@@ -0,0 +1,29 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_TAIL_SCAN_H
+#define FLB_TAIL_SCAN_H
+
+#include "tail_config.h"
+
+int flb_tail_scan(struct mk_list *path, struct flb_tail_config *ctx);
+int flb_tail_scan_callback(struct flb_input_instance *ins,
+ struct flb_config *config, void *context);
+
+#endif
diff --git a/src/fluent-bit/plugins/in_tail/tail_scan_glob.c b/src/fluent-bit/plugins/in_tail/tail_scan_glob.c
new file mode 100644
index 000000000..b330b7c3b
--- /dev/null
+++ b/src/fluent-bit/plugins/in_tail/tail_scan_glob.c
@@ -0,0 +1,278 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <glob.h>
+#include <fnmatch.h>
+
+#include <fluent-bit/flb_compat.h>
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_utils.h>
+
+#include "tail.h"
+#include "tail_file.h"
+#include "tail_signal.h"
+#include "tail_scan.h"
+#include "tail_config.h"
+
+/* Define missing GLOB_TILDE if not exists */
+#ifndef GLOB_TILDE
+#define GLOB_TILDE 1<<2 /* use GNU Libc value */
+#define UNSUP_TILDE 1
+
+/* we need these extra headers for path resolution */
+#include <limits.h>
+#include <sys/types.h>
+#include <pwd.h>
+
+static char *expand_tilde(const char *path)
+{
+ int len;
+ char user[256];
+ char *p = NULL;
+ char *dir = NULL;
+ char *tmp = NULL;
+ struct passwd *uinfo = NULL;
+
+ if (path[0] == '~') {
+ p = strchr(path, '/');
+
+ if (p) {
+ /* check case '~/' */
+ if ((p - path) == 1) {
+ dir = getenv("HOME");
+ if (!dir) {
+ return path;
+ }
+ }
+ else {
+ /*
+ * it refers to a different user: ~user/abc, first step grab
+ * the user name.
+ */
+ len = (p - path) - 1;
+ memcpy(user, path + 1, len);
+ user[len] = '\0';
+
+ /* use getpwnam() to resolve user information */
+ uinfo = getpwnam(user);
+ if (!uinfo) {
+ return path;
+ }
+
+ dir = uinfo->pw_dir;
+ }
+ }
+ else {
+ dir = getenv("HOME");
+ if (!dir) {
+ return path;
+ }
+ }
+
+ if (p) {
+ tmp = flb_malloc(PATH_MAX);
+ if (!tmp) {
+ flb_errno();
+ return NULL;
+ }
+ snprintf(tmp, PATH_MAX - 1, "%s%s", dir, p);
+ }
+ else {
+ dir = getenv("HOME");
+ if (!dir) {
+ return path;
+ }
+
+ tmp = flb_strdup(dir);
+ if (!tmp) {
+ return path;
+ }
+ }
+
+ return tmp;
+ }
+
+ return path;
+}
+#endif
+
+static int tail_is_excluded(char *path, struct flb_tail_config *ctx)
+{
+ struct mk_list *head;
+ struct flb_slist_entry *pattern;
+
+ if (!ctx->exclude_list) {
+ return FLB_FALSE;
+ }
+
+ mk_list_foreach(head, ctx->exclude_list) {
+ pattern = mk_list_entry(head, struct flb_slist_entry, _head);
+ if (fnmatch(pattern->str, path, 0) == 0) {
+ return FLB_TRUE;
+ }
+ }
+
+ return FLB_FALSE;
+}
+
+static inline int do_glob(const char *pattern, int flags,
+ void *not_used, glob_t *pglob)
+{
+ int ret;
+ int new_flags;
+ char *tmp = NULL;
+ int tmp_needs_free = FLB_FALSE;
+ (void) not_used;
+
+ /* Save current values */
+ new_flags = flags;
+
+ if (flags & GLOB_TILDE) {
+#ifdef UNSUP_TILDE
+ /*
+ * Some libc libraries like Musl do not support GLOB_TILDE for tilde
+ * expansion. A workaround is to use wordexp(3) but looking at it
+ * implementation in Musl it looks quite expensive:
+ *
+ * http://git.musl-libc.org/cgit/musl/tree/src/misc/wordexp.c
+ *
+ * the workaround is to do our own tilde expansion in a temporary buffer.
+ */
+
+ /* Look for a tilde */
+ tmp = expand_tilde(pattern);
+ if (tmp != pattern) {
+ /* the path was expanded */
+ pattern = tmp;
+ tmp_needs_free = FLB_TRUE;
+ }
+
+ /* remove unused flag */
+ new_flags &= ~GLOB_TILDE;
+#endif
+ }
+
+ /* invoke glob with new parameters */
+ ret = glob(pattern, new_flags, NULL, pglob);
+
+ /* remove temporary buffer, if allocated by expand_tilde above.
+ * Note that this buffer is only used for libc implementations
+ * that do not support the GLOB_TILDE flag, like musl. */
+ if ((tmp != NULL) && (tmp_needs_free == FLB_TRUE)) {
+ flb_free(tmp);
+ }
+
+ return ret;
+}
+
+/* Scan a path, register the entries and return how many */
+static int tail_scan_path(const char *path, struct flb_tail_config *ctx)
+{
+ int i;
+ int ret;
+ int count = 0;
+ glob_t globbuf;
+ time_t now;
+ int64_t mtime;
+ struct stat st;
+
+ flb_plg_debug(ctx->ins, "scanning path %s", path);
+
+ /* Safe reset for globfree() */
+ globbuf.gl_pathv = NULL;
+
+ /* Scan the given path */
+ ret = do_glob(path, GLOB_TILDE | GLOB_ERR, NULL, &globbuf);
+ if (ret != 0) {
+ switch (ret) {
+ case GLOB_NOSPACE:
+ flb_plg_error(ctx->ins, "no memory space available");
+ return -1;
+ case GLOB_ABORTED:
+ flb_plg_error(ctx->ins, "read error, check permissions: %s", path);
+ return -1;
+ case GLOB_NOMATCH:
+ ret = stat(path, &st);
+ if (ret == -1) {
+ flb_plg_debug(ctx->ins, "cannot read info from: %s", path);
+ }
+ else {
+ ret = access(path, R_OK);
+ if (ret == -1 && errno == EACCES) {
+ flb_plg_error(ctx->ins, "NO read access for path: %s", path);
+ }
+ else {
+ flb_plg_debug(ctx->ins, "NO matches for path: %s", path);
+ }
+ }
+ return 0;
+ }
+ }
+
+
+ /* For every entry found, generate an output list */
+ now = time(NULL);
+ for (i = 0; i < globbuf.gl_pathc; i++) {
+ ret = stat(globbuf.gl_pathv[i], &st);
+ if (ret == 0 && S_ISREG(st.st_mode)) {
+ /* Check if this file is blacklisted */
+ if (tail_is_excluded(globbuf.gl_pathv[i], ctx) == FLB_TRUE) {
+ flb_plg_debug(ctx->ins, "excluded=%s", globbuf.gl_pathv[i]);
+ continue;
+ }
+
+ if (ctx->ignore_older > 0) {
+ mtime = flb_tail_stat_mtime(&st);
+ if (mtime > 0) {
+ if ((now - ctx->ignore_older) > mtime) {
+ flb_plg_debug(ctx->ins, "excluded=%s (ignore_older)",
+ globbuf.gl_pathv[i]);
+ continue;
+ }
+ }
+ }
+
+ /* Append file to list */
+ ret = flb_tail_file_append(globbuf.gl_pathv[i], &st,
+ FLB_TAIL_STATIC, ctx);
+ if (ret == 0) {
+ flb_plg_debug(ctx->ins, "scan_glob add(): %s, inode %li",
+ globbuf.gl_pathv[i], st.st_ino);
+ count++;
+ }
+ else {
+ flb_plg_debug(ctx->ins, "scan_blog add(): dismissed: %s, inode %li",
+ globbuf.gl_pathv[i], st.st_ino);
+ }
+ }
+ else {
+ flb_plg_debug(ctx->ins, "skip (invalid) entry=%s",
+ globbuf.gl_pathv[i]);
+ }
+ }
+
+ if (count > 0) {
+ tail_signal_manager(ctx);
+ }
+
+ globfree(&globbuf);
+ return count;
+}
diff --git a/src/fluent-bit/plugins/in_tail/tail_scan_win32.c b/src/fluent-bit/plugins/in_tail/tail_scan_win32.c
new file mode 100644
index 000000000..94733f065
--- /dev/null
+++ b/src/fluent-bit/plugins/in_tail/tail_scan_win32.c
@@ -0,0 +1,245 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * This file implements glob-like patch matching feature for Windows
+ * based on Win32 API.
+ */
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_compat.h>
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_utils.h>
+
+#include <shlwapi.h>
+
+#include "tail.h"
+#include "tail_file.h"
+#include "tail_signal.h"
+#include "tail_config.h"
+
+#include "win32.h"
+
+static int tail_is_excluded(char *path, struct flb_tail_config *ctx)
+{
+ struct mk_list *head;
+ struct flb_slist_entry *pattern;
+
+ if (!ctx->exclude_list) {
+ return FLB_FALSE;
+ }
+
+ mk_list_foreach(head, ctx->exclude_list) {
+ pattern = mk_list_entry(head, struct flb_slist_entry, _head);
+ if (PathMatchSpecA(path, pattern->str)) {
+ return FLB_TRUE;
+ }
+ }
+
+ return FLB_FALSE;
+}
+
+/*
+ * This function is a thin wrapper over flb_tail_file_append(),
+ * adding normalization and sanity checks on top of it.
+ */
+static int tail_register_file(const char *target, struct flb_tail_config *ctx,
+ time_t ts)
+{
+ int64_t mtime;
+ struct stat st;
+ char path[MAX_PATH];
+
+ if (_fullpath(path, target, MAX_PATH) == NULL) {
+ flb_plg_error(ctx->ins, "cannot get absolute path of %s", target);
+ return -1;
+ }
+
+ if (stat(path, &st) != 0 || !S_ISREG(st.st_mode)) {
+ return -1;
+ }
+
+ if (ctx->ignore_older > 0) {
+ mtime = flb_tail_stat_mtime(&st);
+ if (mtime > 0) {
+ if ((ts - ctx->ignore_older) > mtime) {
+ flb_plg_debug(ctx->ins, "excluded=%s (ignore_older)",
+ target);
+ return -1;
+ }
+ }
+ }
+
+ if (tail_is_excluded(path, ctx) == FLB_TRUE) {
+ flb_plg_trace(ctx->ins, "skip '%s' (excluded)", path);
+ return -1;
+ }
+
+ return flb_tail_file_append(path, &st, FLB_TAIL_STATIC, ctx);
+}
+
+/*
+ * Perform patern match on the given path string. This function
+ * supports patterns with "nested" wildcards like below.
+ *
+ * tail_scan_pattern("C:\fluent-bit\*\*.txt", ctx);
+ *
+ * On success, the number of files found is returned (zero indicates
+ * "no file found"). On error, -1 is returned.
+ */
+static int tail_scan_pattern(const char *path, struct flb_tail_config *ctx)
+{
+ char *star, *p0, *p1;
+ char pattern[MAX_PATH];
+ char buf[MAX_PATH];
+ int ret;
+ int n_added = 0;
+ time_t now;
+ int64_t mtime;
+ HANDLE h;
+ WIN32_FIND_DATA data;
+
+ if (strlen(path) > MAX_PATH - 1) {
+ flb_plg_error(ctx->ins, "path too long '%s'");
+ return -1;
+ }
+
+ star = strchr(path, '*');
+ if (star == NULL) {
+ return -1;
+ }
+
+ /*
+ * C:\data\tmp\input_*.conf
+ * 0<-----|
+ */
+ p0 = star;
+ while (path <= p0 && *p0 != '\\') {
+ p0--;
+ }
+
+ /*
+ * C:\data\tmp\input_*.conf
+ * |---->1
+ */
+ p1 = star;
+ while (*p1 && *p1 != '\\') {
+ p1++;
+ }
+
+ memcpy(pattern, path, (p1 - path));
+ pattern[p1 - path] = '\0';
+
+ h = FindFirstFileA(pattern, &data);
+ if (h == INVALID_HANDLE_VALUE) {
+ return 0; /* none matched */
+ }
+
+ now = time(NULL);
+ do {
+ /* Ignore the current and parent dirs */
+ if (!strcmp(".", data.cFileName) || !strcmp("..", data.cFileName)) {
+ continue;
+ }
+
+ /* Avoid an infinite loop */
+ if (strchr(data.cFileName, '*')) {
+ continue;
+ }
+
+ /* Create a path (prefix + filename + suffix) */
+ memcpy(buf, path, p0 - path + 1);
+ buf[p0 - path + 1] = '\0';
+
+ if (strlen(buf) + strlen(data.cFileName) + strlen(p1) > MAX_PATH - 1) {
+ flb_plg_warn(ctx->ins, "'%s%s%s' is too long", buf, data.cFileName, p1);
+ continue;
+ }
+ strcat(buf, data.cFileName);
+ strcat(buf, p1);
+
+ if (strchr(p1, '*')) {
+ ret = tail_scan_pattern(buf, ctx); /* recursive */
+ if (ret >= 0) {
+ n_added += ret;
+ }
+ continue;
+ }
+
+ /* Try to register the target file */
+ ret = tail_register_file(buf, ctx, now);
+ if (ret == 0) {
+ n_added++;
+ }
+ } while (FindNextFileA(h, &data) != 0);
+
+ FindClose(h);
+ return n_added;
+}
+
+static int tail_filepath(char *buf, int len, const char *basedir, const char *filename)
+{
+ char drive[_MAX_DRIVE];
+ char dir[_MAX_DIR];
+ char fname[_MAX_FNAME];
+ char ext[_MAX_EXT];
+ char tmp[MAX_PATH];
+ int ret;
+
+ ret = _splitpath_s(basedir, drive, _MAX_DRIVE, dir, _MAX_DIR, NULL, 0, NULL, 0);
+ if (ret) {
+ return -1;
+ }
+
+ ret = _splitpath_s(filename, NULL, 0, NULL, 0, fname, _MAX_FNAME, ext, _MAX_EXT);
+ if (ret) {
+ return -1;
+ }
+
+ ret = _makepath_s(tmp, MAX_PATH, drive, dir, fname, ext);
+ if (ret) {
+ return -1;
+ }
+
+ if (_fullpath(buf, tmp, len) == NULL) {
+ return -1;
+ }
+
+ return 0;
+}
+
+static int tail_scan_path(const char *path, struct flb_tail_config *ctx)
+{
+ int ret;
+ int n_added = 0;
+ time_t now;
+
+ if (strchr(path, '*')) {
+ return tail_scan_pattern(path, ctx);
+ }
+
+ /* No wildcard involved. Let's just handle the file... */
+ now = time(NULL);
+ ret = tail_register_file(path, ctx, now);
+ if (ret == 0) {
+ n_added++;
+ }
+
+ return n_added;
+}
diff --git a/src/fluent-bit/plugins/in_tail/tail_signal.h b/src/fluent-bit/plugins/in_tail/tail_signal.h
new file mode 100644
index 000000000..1a81fec64
--- /dev/null
+++ b/src/fluent-bit/plugins/in_tail/tail_signal.h
@@ -0,0 +1,98 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_TAIL_SIGNAL_H
+#define FLB_TAIL_SIGNAL_H
+
+#include "tail_config.h"
+
+static inline int tail_signal_manager(struct flb_tail_config *ctx)
+{
+ int n;
+ uint64_t val = 0xc001;
+
+ /*
+ * The number of signal reads might be less than the written signals, this
+ * means that some event is still pending in the queue. On that case we
+ * don't need to signal it again.
+ */
+ if (ctx->ch_reads < ctx->ch_writes) {
+ return 1;
+ }
+
+ /* Reset counters: prevent an overflow, unlikely..but let's keep safe */
+ if (ctx->ch_reads == ctx->ch_writes) {
+ ctx->ch_reads = 0;
+ ctx->ch_writes = 0;
+ }
+
+ /* Insert a dummy event into the channel manager */
+ n = flb_pipe_w(ctx->ch_manager[1], (const char *) &val, sizeof(val));
+ if (n == -1) {
+ flb_errno();
+ return -1;
+ }
+ else {
+ ctx->ch_writes++;
+ }
+
+ return n;
+}
+
+static inline int tail_signal_pending(struct flb_tail_config *ctx)
+{
+ int n;
+ uint64_t val = 0xc002;
+
+ /* Insert a dummy event into the 'pending' channel */
+ n = flb_pipe_w(ctx->ch_pending[1], (const char *) &val, sizeof(val));
+
+ /*
+ * If we get EAGAIN, it simply means pending channel is full. As
+ * notification is already pending, it's safe to ignore.
+ */
+ if (n == -1 && !FLB_PIPE_WOULDBLOCK()) {
+ flb_errno();
+ return -1;
+ }
+
+ return n;
+}
+
+static inline int tail_consume_pending(struct flb_tail_config *ctx)
+{
+ int ret;
+ uint64_t val;
+
+ /*
+ * We need to consume the pending bytes. Loop until we would have
+ * blocked (pipe is empty).
+ */
+ do {
+ ret = flb_pipe_r(ctx->ch_pending[0], (char *) &val, sizeof(val));
+ if (ret <= 0 && !FLB_PIPE_WOULDBLOCK()) {
+ flb_errno();
+ return -1;
+ }
+ } while (!FLB_PIPE_WOULDBLOCK());
+
+ return 0;
+}
+
+#endif
diff --git a/src/fluent-bit/plugins/in_tail/tail_sql.h b/src/fluent-bit/plugins/in_tail/tail_sql.h
new file mode 100644
index 000000000..855933a01
--- /dev/null
+++ b/src/fluent-bit/plugins/in_tail/tail_sql.h
@@ -0,0 +1,65 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_TAIL_SQL_H
+#define FLB_TAIL_SQL_H
+
+/*
+ * In Fluent Bit we try to have a common convention for table names,
+ * if the table belong to an input/output plugin, use plugin name
+ * plus to what it's about, e.g:
+ *
+ * in_tail plugin table to track files: in_tail_files
+ */
+#define SQL_CREATE_FILES \
+ "CREATE TABLE IF NOT EXISTS in_tail_files (" \
+ " id INTEGER PRIMARY KEY," \
+ " name TEXT NOT NULL," \
+ " offset INTEGER," \
+ " inode INTEGER," \
+ " created INTEGER," \
+ " rotated INTEGER DEFAULT 0" \
+ ");"
+
+#define SQL_GET_FILE \
+ "SELECT * from in_tail_files WHERE inode=@inode order by id desc;"
+
+#define SQL_INSERT_FILE \
+ "INSERT INTO in_tail_files (name, offset, inode, created)" \
+ " VALUES (@name, @offset, @inode, @created);"
+
+#define SQL_ROTATE_FILE \
+ "UPDATE in_tail_files set name=@name,rotated=1 WHERE id=@id;"
+
+#define SQL_UPDATE_OFFSET \
+ "UPDATE in_tail_files set offset=@offset WHERE id=@id;"
+
+#define SQL_DELETE_FILE \
+ "DELETE FROM in_tail_files WHERE id=@id;"
+
+#define SQL_PRAGMA_SYNC \
+ "PRAGMA synchronous=%i;"
+
+#define SQL_PRAGMA_JOURNAL_MODE \
+ "PRAGMA journal_mode=%s;"
+
+#define SQL_PRAGMA_LOCKING_MODE \
+ "PRAGMA locking_mode=EXCLUSIVE;"
+
+#endif
diff --git a/src/fluent-bit/plugins/in_tail/win32.h b/src/fluent-bit/plugins/in_tail/win32.h
new file mode 100644
index 000000000..a9414f892
--- /dev/null
+++ b/src/fluent-bit/plugins/in_tail/win32.h
@@ -0,0 +1,67 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * This is the interface file that replaces POSIX functions
+ * with our own custom implementation.
+ */
+
+#ifndef FLB_TAIL_WIN32_H
+#define FLB_TAIL_WIN32_H
+
+#include "win32/interface.h"
+
+#undef open
+#undef stat
+#undef lstat
+#undef fstat
+#undef lseek
+
+#undef S_IFDIR
+#undef S_IFCHR
+#undef S_IFIFO
+#undef S_IFREG
+#undef S_IFLNK
+#undef S_IFMT
+#undef S_ISDIR
+#undef S_ISCHR
+#undef S_ISFIFO
+#undef S_ISREG
+#undef S_ISLNK
+
+#define open win32_open
+#define stat win32_stat
+#define lstat win32_lstat
+#define fstat win32_fstat
+
+#define lseek _lseeki64
+
+#define S_IFDIR WIN32_S_IFDIR
+#define S_IFCHR WIN32_S_IFCHR
+#define S_IFIFO WIN32_S_IFIFO
+#define S_IFREG WIN32_S_IFREG
+#define S_IFLNK WIN32_S_IFLNK
+#define S_IFMT WIN32_S_IFMT
+
+#define S_ISDIR(m) (((m) & S_IFMT) == S_IFDIR)
+#define S_ISCHR(m) (((m) & S_IFMT) == S_IFCHR)
+#define S_ISIFO(m) (((m) & S_IFMT) == S_IFIFO)
+#define S_ISREG(m) (((m) & S_IFMT) == S_IFREG)
+#define S_ISLNK(m) (((m) & S_IFMT) == S_IFLNK)
+#endif
diff --git a/src/fluent-bit/plugins/in_tail/win32/interface.h b/src/fluent-bit/plugins/in_tail/win32/interface.h
new file mode 100644
index 000000000..73b2ef233
--- /dev/null
+++ b/src/fluent-bit/plugins/in_tail/win32/interface.h
@@ -0,0 +1,44 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_TAIL_WIN32_INTERFACE_H
+#define FLB_TAIL_WIN32_INTERFACE_H
+
+struct win32_stat {
+ uint64_t st_ino;
+ uint16_t st_mode;
+ int64_t st_mtime;
+ int16_t st_nlink;
+ int64_t st_size;
+};
+
+int win32_stat(const char *path, struct win32_stat *wst);
+int win32_lstat(const char *path, struct win32_stat *wst);
+int win32_fstat(int fd, struct win32_stat *wst);
+
+int win32_open(const char *path, int flags);
+
+#define WIN32_S_IFDIR 0x1000
+#define WIN32_S_IFCHR 0x2000
+#define WIN32_S_IFIFO 0x4000
+#define WIN32_S_IFREG 0x8000
+#define WIN32_S_IFLNK 0xc000
+#define WIN32_S_IFMT 0xf000
+
+#endif
diff --git a/src/fluent-bit/plugins/in_tail/win32/io.c b/src/fluent-bit/plugins/in_tail/win32/io.c
new file mode 100644
index 000000000..45928b04a
--- /dev/null
+++ b/src/fluent-bit/plugins/in_tail/win32/io.c
@@ -0,0 +1,47 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <Windows.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <fcntl.h>
+#include <io.h>
+#include "interface.h"
+
+/*
+ * POSIX IO emulation tailored for in_tail's usage.
+ *
+ * open(2) that does not acquire an exclusive lock.
+ */
+
+int win32_open(const char *path, int flags)
+{
+ HANDLE h;
+ h = CreateFileA(path,
+ GENERIC_READ,
+ FILE_SHARE_READ|FILE_SHARE_WRITE|FILE_SHARE_DELETE,
+ NULL, /* lpSecurityAttributes */
+ OPEN_EXISTING, /* dwCreationDisposition */
+ 0, /* dwFlagsAndAttributes */
+ NULL); /* hTemplateFile */
+ if (h == INVALID_HANDLE_VALUE) {
+ return -1;
+ }
+ return _open_osfhandle((intptr_t) h, _O_RDONLY);
+}
diff --git a/src/fluent-bit/plugins/in_tail/win32/stat.c b/src/fluent-bit/plugins/in_tail/win32/stat.c
new file mode 100644
index 000000000..bce802749
--- /dev/null
+++ b/src/fluent-bit/plugins/in_tail/win32/stat.c
@@ -0,0 +1,332 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <Windows.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <io.h>
+#include "interface.h"
+
+/*
+ * NTFS stat(2) emulation tailored for in_tail's usage.
+ *
+ * (1) Support st_ino (inode) for Windows NTFS.
+ * (2) Support NTFS symlinks.
+ * (3) Support large files >= 2GB.
+ *
+ * To use it, include "win32.h" and it will transparently
+ * replace stat(), lstat() and fstat().
+ */
+
+#define UINT64(high, low) ((uint64_t) (high) << 32 | (low))
+#define WINDOWS_TICKS_TO_SECONDS_RATIO 10000000
+#define WINDOWS_EPOCH_TO_UNIX_EPOCH_DELTA 11644473600
+
+/*
+ * FILETIME timestamps are represented in 100-nanosecond intervals,
+ * because of this, that's why we need to divide the number by 10000000
+ * in order to convert it to seconds.
+ *
+ * While UNIX timestamps use January 1, 1970 as epoch Windows FILETIME
+ * timestamps use January 1, 1601. Because of this we need to subtract
+ * 11644473600 seconds to account for it.
+ *
+ * Note: Even though this does not account for leap seconds it should be
+ * accurate enough.
+ */
+
+static uint64_t filetime_to_epoch(FILETIME *ft)
+{
+ ULARGE_INTEGER timestamp;
+
+ if (ft == NULL) {
+ return 0;
+ }
+
+ timestamp.HighPart = ft->dwHighDateTime;
+ timestamp.LowPart = ft->dwLowDateTime;
+
+ timestamp.QuadPart /= WINDOWS_TICKS_TO_SECONDS_RATIO;
+ timestamp.QuadPart -= WINDOWS_EPOCH_TO_UNIX_EPOCH_DELTA;
+
+ return timestamp.QuadPart;
+}
+
+static void reset_errno()
+{
+ errno = 0;
+}
+
+static void propagate_last_error_to_errno()
+{
+ DWORD error_code;
+
+ error_code = GetLastError();
+
+ switch (error_code) {
+ case ERROR_INVALID_TARGET_HANDLE:
+ case ERROR_INVALID_HANDLE:
+ errno = EBADF;
+ break;
+
+ case ERROR_TOO_MANY_OPEN_FILES:
+ errno = EMFILE;
+ break;
+
+ case ERROR_INVALID_FLAG_NUMBER:
+ case ERROR_INVALID_PARAMETER:
+ errno = EINVAL;
+ break;
+
+ case ERROR_NOT_ENOUGH_MEMORY:
+ case ERROR_OUTOFMEMORY:
+ errno = ENOMEM;
+ break;
+
+ case ERROR_SHARING_VIOLATION:
+ case ERROR_LOCK_VIOLATION:
+ case ERROR_PATH_BUSY:
+ case ERROR_BUSY:
+ errno = EBUSY;
+ break;
+
+ case ERROR_HANDLE_DISK_FULL:
+ case ERROR_DISK_FULL:
+ errno = ENOSPC;
+ break;
+
+ case ERROR_INVALID_ADDRESS:
+ errno = EFAULT;
+ break;
+
+ case ERROR_FILE_TOO_LARGE:
+ errno = EFBIG;
+ break;
+
+ case ERROR_ALREADY_EXISTS:
+ case ERROR_FILE_EXISTS:
+ errno = EEXIST;
+ break;
+
+ case ERROR_FILE_NOT_FOUND:
+ case ERROR_PATH_NOT_FOUND:
+ case ERROR_INVALID_DRIVE:
+ case ERROR_BAD_PATHNAME:
+ case ERROR_INVALID_NAME:
+ case ERROR_BAD_UNIT:
+ errno = ENOENT;
+ break;
+
+ case ERROR_SEEK_ON_DEVICE:
+ case ERROR_NEGATIVE_SEEK:
+ errno = ESPIPE;
+ break;
+
+ case ERROR_ACCESS_DENIED:
+ errno = EACCES;
+ break;
+
+ case ERROR_DIR_NOT_EMPTY:
+ errno = ENOTEMPTY;
+ break;
+
+ case ERROR_BROKEN_PIPE:
+ errno = EPIPE;
+ break;
+
+ case ERROR_GEN_FAILURE:
+ errno = EIO;
+ break;
+
+ case ERROR_OPEN_FAILED:
+ errno = EIO;
+ break;
+
+ case ERROR_SUCCESS:
+ errno = 0;
+ break;
+
+ default:
+ /* This is just a canary, if you find this
+ * error then it means we need to expand the
+ * translation list.
+ */
+
+ errno = EOWNERDEAD;
+ break;
+ }
+}
+
+static int get_mode(unsigned int attr)
+{
+ if (attr & FILE_ATTRIBUTE_DIRECTORY) {
+ return WIN32_S_IFDIR;
+ }
+ return WIN32_S_IFREG;
+}
+
+
+
+static int is_symlink(const char *path)
+{
+ WIN32_FIND_DATA data;
+ HANDLE h;
+
+ SetLastError(0);
+ reset_errno();
+
+ h = FindFirstFileA(path, &data);
+
+ if (h == INVALID_HANDLE_VALUE) {
+ propagate_last_error_to_errno();
+
+ return 0;
+ }
+
+ FindClose(h);
+
+ /*
+ * A NTFS symlink is a file with a bit of metadata ("reparse point"),
+ * So (1) check if the file has metadata and then (2) confirm that
+ * it is indeed a symlink.
+ */
+ if (data.dwFileAttributes & FILE_ATTRIBUTE_REPARSE_POINT) {
+ if (data.dwReserved0 == IO_REPARSE_TAG_SYMLINK) {
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static int hstat(HANDLE h, struct win32_stat *wst)
+{
+ BY_HANDLE_FILE_INFORMATION info;
+ FILE_STANDARD_INFO std;
+
+ SetLastError(0);
+ reset_errno();
+
+ if (!GetFileInformationByHandle(h, &info)) {
+ propagate_last_error_to_errno();
+
+ return -1;
+ }
+
+ if (!GetFileInformationByHandleEx(h, FileStandardInfo,
+ &std, sizeof(std))) {
+ propagate_last_error_to_errno();
+
+ return -1;
+ }
+
+ wst->st_nlink = std.NumberOfLinks;
+ if (std.DeletePending) {
+ wst->st_nlink = 0;
+ }
+
+ wst->st_mode = get_mode(info.dwFileAttributes);
+ wst->st_size = UINT64(info.nFileSizeHigh, info.nFileSizeLow);
+ wst->st_ino = UINT64(info.nFileIndexHigh, info.nFileIndexLow);
+ wst->st_mtime = filetime_to_epoch(&info.ftLastWriteTime);
+
+ return 0;
+}
+
+int win32_stat(const char *path, struct win32_stat *wst)
+{
+ HANDLE h;
+
+ SetLastError(0);
+ reset_errno();
+
+ h = CreateFileA(path,
+ GENERIC_READ,
+ FILE_SHARE_READ|FILE_SHARE_WRITE|FILE_SHARE_DELETE,
+ NULL, /* lpSecurityAttributes */
+ OPEN_EXISTING, /* dwCreationDisposition */
+ 0, /* dwFlagsAndAttributes */
+ NULL); /* hTemplateFile */
+
+ if (h == INVALID_HANDLE_VALUE) {
+ propagate_last_error_to_errno();
+
+ return -1;
+ }
+
+ if (hstat(h, wst)) {
+ CloseHandle(h);
+ return -1;
+ }
+
+ CloseHandle(h);
+ return 0;
+}
+
+int win32_lstat(const char *path, struct win32_stat *wst)
+{
+ HANDLE h;
+
+ SetLastError(0);
+ reset_errno();
+
+ h = CreateFileA(path,
+ GENERIC_READ,
+ FILE_SHARE_READ|FILE_SHARE_WRITE|FILE_SHARE_DELETE,
+ NULL, /* lpSecurityAttributes */
+ OPEN_EXISTING, /* dwCreationDisposition */
+ FILE_FLAG_OPEN_REPARSE_POINT,
+ NULL); /* hTemplateFile */
+
+ if (h == INVALID_HANDLE_VALUE) {
+ propagate_last_error_to_errno();
+
+ return -1;
+ }
+
+ if (hstat(h, wst)) {
+ CloseHandle(h);
+ return -1;
+ }
+
+ if (is_symlink(path)) {
+ wst->st_mode = WIN32_S_IFLNK;
+ }
+
+ CloseHandle(h);
+ return 0;
+}
+
+int win32_fstat(int fd, struct win32_stat *wst)
+{
+ HANDLE h;
+
+ SetLastError(0);
+ reset_errno();
+
+ h = (HANDLE) _get_osfhandle(fd);
+
+ if (h == INVALID_HANDLE_VALUE) {
+ propagate_last_error_to_errno();
+
+ return -1;
+ }
+
+ return hstat(h, wst);
+}
diff --git a/src/fluent-bit/plugins/in_tcp/CMakeLists.txt b/src/fluent-bit/plugins/in_tcp/CMakeLists.txt
new file mode 100644
index 000000000..df6763cd6
--- /dev/null
+++ b/src/fluent-bit/plugins/in_tcp/CMakeLists.txt
@@ -0,0 +1,6 @@
+set(src
+ tcp.c
+ tcp_conn.c
+ tcp_config.c)
+
+FLB_PLUGIN(in_tcp "${src}" "")
diff --git a/src/fluent-bit/plugins/in_tcp/tcp.c b/src/fluent-bit/plugins/in_tcp/tcp.c
new file mode 100644
index 000000000..084ea6887
--- /dev/null
+++ b/src/fluent-bit/plugins/in_tcp/tcp.c
@@ -0,0 +1,184 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_network.h>
+#include <msgpack.h>
+
+#include "tcp.h"
+#include "tcp_conn.h"
+#include "tcp_config.h"
+
+/*
+ * For a server event, the collection event means a new client have arrived, we
+ * accept the connection and create a new TCP instance which will wait for
+ * JSON map messages.
+ */
+static int in_tcp_collect(struct flb_input_instance *in,
+ struct flb_config *config, void *in_context)
+{
+ struct flb_connection *connection;
+ struct tcp_conn *conn;
+ struct flb_in_tcp_config *ctx;
+
+ ctx = in_context;
+
+ connection = flb_downstream_conn_get(ctx->downstream);
+
+ if (connection == NULL) {
+ flb_plg_error(ctx->ins, "could not accept new connection");
+
+ return -1;
+ }
+
+ flb_plg_trace(ctx->ins, "new TCP connection arrived FD=%i", connection->fd);
+
+ conn = tcp_conn_add(connection, ctx);
+
+ if (conn == NULL) {
+ flb_plg_error(ctx->ins, "could not accept new connection");
+
+ flb_downstream_conn_release(connection);
+
+ return -1;
+ }
+
+ return 0;
+}
+
+/* Initialize plugin */
+static int in_tcp_init(struct flb_input_instance *in,
+ struct flb_config *config, void *data)
+{
+ unsigned short int port;
+ int ret;
+ struct flb_in_tcp_config *ctx;
+
+ (void) data;
+
+ /* Allocate space for the configuration */
+ ctx = tcp_config_init(in);
+ if (!ctx) {
+ return -1;
+ }
+ ctx->collector_id = -1;
+ ctx->ins = in;
+ mk_list_init(&ctx->connections);
+
+ /* Set the context */
+ flb_input_set_context(in, ctx);
+
+ port = (unsigned short int) strtoul(ctx->tcp_port, NULL, 10);
+
+ ctx->downstream = flb_downstream_create(FLB_TRANSPORT_TCP,
+ in->flags,
+ ctx->listen,
+ port,
+ in->tls,
+ config,
+ &in->net_setup);
+
+ if (ctx->downstream == NULL) {
+ flb_plg_error(ctx->ins,
+ "could not initialize downstream on %s:%s. Aborting",
+ ctx->listen, ctx->tcp_port);
+
+ tcp_config_destroy(ctx);
+
+ return -1;
+ }
+
+ flb_input_downstream_set(ctx->downstream, ctx->ins);
+
+ /* Collect upon data available on the standard input */
+ ret = flb_input_set_collector_socket(in,
+ in_tcp_collect,
+ ctx->downstream->server_fd,
+ config);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "Could not set collector for IN_TCP input plugin");
+ tcp_config_destroy(ctx);
+
+ return -1;
+ }
+
+ ctx->collector_id = ret;
+
+ return 0;
+}
+
+static int in_tcp_exit(void *data, struct flb_config *config)
+{
+ struct mk_list *tmp;
+ struct mk_list *head;
+ struct flb_in_tcp_config *ctx;
+ struct tcp_conn *conn;
+
+ (void) *config;
+
+ ctx = data;
+
+ mk_list_foreach_safe(head, tmp, &ctx->connections) {
+ conn = mk_list_entry(head, struct tcp_conn, _head);
+
+ tcp_conn_del(conn);
+ }
+
+ tcp_config_destroy(ctx);
+
+ return 0;
+}
+
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_STR, "format", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_in_tcp_config, format_name),
+ "Set the format: json or none"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "separator", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_in_tcp_config, raw_separator),
+ "Set separator"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "chunk_size", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_in_tcp_config, chunk_size_str),
+ "Set the chunk size"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "buffer_size", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_in_tcp_config, buffer_size_str),
+ "Set the buffer size"
+ },
+ /* EOF */
+ {0}
+};
+
+/* Plugin reference */
+struct flb_input_plugin in_tcp_plugin = {
+ .name = "tcp",
+ .description = "TCP",
+ .cb_init = in_tcp_init,
+ .cb_pre_run = NULL,
+ .cb_collect = in_tcp_collect,
+ .cb_flush_buf = NULL,
+ .cb_exit = in_tcp_exit,
+ .config_map = config_map,
+ .flags = FLB_INPUT_NET_SERVER | FLB_IO_OPT_TLS
+};
diff --git a/src/fluent-bit/plugins/in_tcp/tcp.h b/src/fluent-bit/plugins/in_tcp/tcp.h
new file mode 100644
index 000000000..3ddcbed06
--- /dev/null
+++ b/src/fluent-bit/plugins/in_tcp/tcp.h
@@ -0,0 +1,50 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_TCP_H
+#define FLB_IN_TCP_H
+
+#define FLB_TCP_FMT_JSON 0 /* default */
+#define FLB_TCP_FMT_NONE 1 /* no format, use delimiters */
+
+#include <fluent-bit/flb_downstream.h>
+#include <fluent-bit/flb_input.h>
+#include <fluent-bit/flb_sds.h>
+#include <fluent-bit/flb_log_event_encoder.h>
+#include <msgpack.h>
+
+struct flb_in_tcp_config {
+ flb_sds_t format_name; /* Data format name */
+ int format; /* Data format */
+ size_t buffer_size; /* Buffer size for each reader */
+ flb_sds_t buffer_size_str; /* Buffer size in string form */
+ size_t chunk_size; /* Chunk allocation size */
+ flb_sds_t chunk_size_str; /* Chunk size in string form */
+ char *listen; /* Listen interface */
+ char *tcp_port; /* TCP Port */
+ flb_sds_t raw_separator; /* Unescaped string delimiterr */
+ flb_sds_t separator; /* String delimiter */
+ int collector_id; /* Listener collector id */
+ struct flb_downstream *downstream; /* Client manager */
+ struct mk_list connections; /* List of active connections */
+ struct flb_input_instance *ins; /* Input plugin instace */
+ struct flb_log_event_encoder *log_encoder;
+};
+
+#endif
diff --git a/src/fluent-bit/plugins/in_tcp/tcp_config.c b/src/fluent-bit/plugins/in_tcp/tcp_config.c
new file mode 100644
index 000000000..db9a36a01
--- /dev/null
+++ b/src/fluent-bit/plugins/in_tcp/tcp_config.c
@@ -0,0 +1,155 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_unescape.h>
+
+#include "tcp.h"
+#include "tcp_conn.h"
+#include "tcp_config.h"
+
+#include <stdlib.h>
+
+struct flb_in_tcp_config *tcp_config_init(struct flb_input_instance *ins)
+{
+ int ret;
+ int len;
+ char port[16];
+ char *out;
+ struct flb_in_tcp_config *ctx;
+
+ /* Allocate plugin context */
+ ctx = flb_calloc(1, sizeof(struct flb_in_tcp_config));
+ if (!ctx) {
+ flb_errno();
+ return NULL;
+ }
+ ctx->ins = ins;
+ ctx->format = FLB_TCP_FMT_JSON;
+
+ /* Load the config map */
+ ret = flb_input_config_map_set(ins, (void *)ctx);
+ if (ret == -1) {
+ flb_plg_error(ins, "unable to load configuration");
+ flb_free(ctx);
+ return NULL;
+ }
+
+ /* Data format (expected payload) */
+ if (ctx->format_name) {
+ if (strcasecmp(ctx->format_name, "json") == 0) {
+ ctx->format = FLB_TCP_FMT_JSON;
+ }
+ else if (strcasecmp(ctx->format_name, "none") == 0) {
+ ctx->format = FLB_TCP_FMT_NONE;
+ }
+ else {
+ flb_plg_error(ctx->ins, "unrecognized format value '%s'", ctx->format_name);
+ flb_free(ctx);
+ return NULL;
+ }
+ }
+
+ /* String separator used to split records when using 'format none' */
+ if (ctx->raw_separator) {
+ len = strlen(ctx->raw_separator);
+ out = flb_malloc(len + 1);
+ if (!out) {
+ flb_errno();
+ flb_free(ctx);
+ return NULL;
+ }
+ ret = flb_unescape_string(ctx->raw_separator, len, &out);
+ if (ret <= 0) {
+ flb_plg_error(ctx->ins, "invalid separator");
+ flb_free(out);
+ flb_free(ctx);
+ return NULL;
+ }
+
+ ctx->separator = flb_sds_create_len(out, ret);
+ if (!ctx->separator) {
+ flb_free(out);
+ flb_free(ctx);
+ return NULL;
+ }
+ flb_free(out);
+ }
+ if (!ctx->separator) {
+ ctx->separator = flb_sds_create_len("\n", 1);
+ }
+
+ /* Listen interface (if not set, defaults to 0.0.0.0:5170) */
+ flb_input_net_default_listener("0.0.0.0", 5170, ins);
+ ctx->listen = ins->host.listen;
+ snprintf(port, sizeof(port) - 1, "%d", ins->host.port);
+ ctx->tcp_port = flb_strdup(port);
+
+ /* Chunk size */
+ if (ctx->chunk_size_str) {
+ /* Convert KB unit to Bytes */
+ ctx->chunk_size = (atoi(ctx->chunk_size_str) * 1024);
+ } else {
+ ctx->chunk_size = atoi(FLB_IN_TCP_CHUNK);
+ }
+
+ /* Buffer size */
+ if (!ctx->buffer_size_str) {
+ ctx->buffer_size = ctx->chunk_size;
+ }
+ else {
+ /* Convert KB unit to Bytes */
+ ctx->buffer_size = (atoi(ctx->buffer_size_str) * 1024);
+ }
+
+ ctx->log_encoder = flb_log_event_encoder_create(FLB_LOG_EVENT_FORMAT_DEFAULT);
+
+ if (ctx->log_encoder == NULL) {
+ flb_plg_error(ctx->ins, "could not initialize event encoder");
+ tcp_config_destroy(ctx);
+
+ ctx = NULL;
+ }
+
+ return ctx;
+}
+
+int tcp_config_destroy(struct flb_in_tcp_config *ctx)
+{
+ if (ctx->log_encoder != NULL) {
+ flb_log_event_encoder_destroy(ctx->log_encoder);
+ }
+
+ if (ctx->collector_id != -1) {
+ flb_input_collector_delete(ctx->collector_id, ctx->ins);
+
+ ctx->collector_id = -1;
+ }
+
+ if (ctx->downstream != NULL) {
+ flb_downstream_destroy(ctx->downstream);
+ }
+
+ flb_sds_destroy(ctx->separator);
+ flb_free(ctx->tcp_port);
+ flb_free(ctx);
+
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/in_tcp/tcp_config.h b/src/fluent-bit/plugins/in_tcp/tcp_config.h
new file mode 100644
index 000000000..36df27873
--- /dev/null
+++ b/src/fluent-bit/plugins/in_tcp/tcp_config.h
@@ -0,0 +1,28 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_TCP_CONFIG_H
+#define FLB_IN_TCP_CONFIG_H
+
+#include "tcp.h"
+
+struct flb_in_tcp_config *tcp_config_init(struct flb_input_instance *i_ins);
+int tcp_config_destroy(struct flb_in_tcp_config *config);
+
+#endif
diff --git a/src/fluent-bit/plugins/in_tcp/tcp_conn.c b/src/fluent-bit/plugins/in_tcp/tcp_conn.c
new file mode 100644
index 000000000..28b4b3222
--- /dev/null
+++ b/src/fluent-bit/plugins/in_tcp/tcp_conn.c
@@ -0,0 +1,412 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_engine.h>
+#include <fluent-bit/flb_network.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_error.h>
+
+#include "tcp.h"
+#include "tcp_conn.h"
+
+static inline void consume_bytes(char *buf, int bytes, int length)
+{
+ memmove(buf, buf + bytes, length - bytes);
+}
+
+static inline int process_pack(struct tcp_conn *conn,
+ char *pack, size_t size)
+{
+ int ret;
+ size_t off = 0;
+ msgpack_unpacked result;
+ msgpack_object entry;
+ struct flb_in_tcp_config *ctx;
+
+ ctx = conn->ctx;
+
+ flb_log_event_encoder_reset(ctx->log_encoder);
+
+ /* First pack the results, iterate concatenated messages */
+ msgpack_unpacked_init(&result);
+ while (msgpack_unpack_next(&result, pack, size, &off) == MSGPACK_UNPACK_SUCCESS) {
+ entry = result.data;
+
+ ret = flb_log_event_encoder_begin_record(ctx->log_encoder);
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_current_timestamp(ctx->log_encoder);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ if (entry.type == MSGPACK_OBJECT_MAP) {
+ ret = flb_log_event_encoder_set_body_from_msgpack_object(
+ ctx->log_encoder, &entry);
+ }
+ else if (entry.type == MSGPACK_OBJECT_ARRAY) {
+ ret = flb_log_event_encoder_append_body_values(
+ ctx->log_encoder,
+ FLB_LOG_EVENT_CSTRING_VALUE("msg"),
+ FLB_LOG_EVENT_MSGPACK_OBJECT_VALUE(&entry));
+ }
+ else {
+ ret = FLB_EVENT_ENCODER_ERROR_INVALID_VALUE_TYPE;
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_commit_record(ctx->log_encoder);
+ }
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ break;
+ }
+ }
+ }
+
+ msgpack_unpacked_destroy(&result);
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ flb_input_log_append(conn->ins, NULL, 0,
+ ctx->log_encoder->output_buffer,
+ ctx->log_encoder->output_length);
+ ret = 0;
+ }
+ else {
+ flb_plg_error(ctx->ins, "log event encoding error : %d", ret);
+
+ ret = -1;
+ }
+
+ return ret;
+}
+
+/* Process a JSON payload, return the number of processed bytes */
+static ssize_t parse_payload_json(struct tcp_conn *conn)
+{
+ int ret;
+ int out_size;
+ char *pack;
+
+ ret = flb_pack_json_state(conn->buf_data, conn->buf_len,
+ &pack, &out_size, &conn->pack_state);
+ if (ret == FLB_ERR_JSON_PART) {
+ flb_plg_debug(conn->ins, "JSON incomplete, waiting for more data...");
+ return 0;
+ }
+ else if (ret == FLB_ERR_JSON_INVAL) {
+ flb_plg_warn(conn->ins, "invalid JSON message, skipping");
+ conn->buf_len = 0;
+ conn->pack_state.multiple = FLB_TRUE;
+ return -1;
+ }
+ else if (ret == -1) {
+ return -1;
+ }
+
+ /* Process the packaged JSON and return the last byte used */
+ process_pack(conn, pack, out_size);
+ flb_free(pack);
+
+ return conn->pack_state.last_byte;
+}
+
+/*
+ * Process a raw text payload, uses the delimited character to split records,
+ * return the number of processed bytes
+ */
+static ssize_t parse_payload_none(struct tcp_conn *conn)
+{
+ int ret;
+ int len;
+ int sep_len;
+ size_t consumed = 0;
+ char *buf;
+ char *s;
+ char *separator;
+ struct flb_in_tcp_config *ctx;
+
+ ctx = conn->ctx;
+
+ separator = conn->ctx->separator;
+ sep_len = flb_sds_len(conn->ctx->separator);
+
+ buf = conn->buf_data;
+ ret = FLB_EVENT_ENCODER_SUCCESS;
+
+ flb_log_event_encoder_reset(ctx->log_encoder);
+
+ while ((s = strstr(buf, separator))) {
+ len = (s - buf);
+ if (len == 0) {
+ break;
+ }
+ else if (len > 0) {
+ ret = flb_log_event_encoder_begin_record(ctx->log_encoder);
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_current_timestamp(ctx->log_encoder);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_append_body_values(
+ ctx->log_encoder,
+ FLB_LOG_EVENT_CSTRING_VALUE("log"),
+ FLB_LOG_EVENT_STRING_VALUE(buf, len));
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_commit_record(ctx->log_encoder);
+ }
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ break;
+ }
+
+ consumed += len + 1;
+ buf += len + sep_len;
+ }
+ else {
+ break;
+ }
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ flb_input_log_append(conn->ins, NULL, 0,
+ ctx->log_encoder->output_buffer,
+ ctx->log_encoder->output_length);
+ }
+ else {
+ flb_plg_error(ctx->ins, "log event encoding error : %d", ret);
+ }
+
+ return consumed;
+}
+
+/* Callback invoked every time an event is triggered for a connection */
+int tcp_conn_event(void *data)
+{
+ int bytes;
+ int available;
+ int size;
+ ssize_t ret_payload = -1;
+ char *tmp;
+ struct mk_event *event;
+ struct tcp_conn *conn;
+ struct flb_connection *connection;
+ struct flb_in_tcp_config *ctx;
+
+ connection = (struct flb_connection *) data;
+
+ conn = connection->user_data;
+
+ ctx = conn->ctx;
+
+ event = &connection->event;
+
+ if (event->mask & MK_EVENT_READ) {
+ available = (conn->buf_size - conn->buf_len) - 1;
+ if (available < 1) {
+ if (conn->buf_size + ctx->chunk_size > ctx->buffer_size) {
+ flb_plg_warn(ctx->ins,
+ "fd=%i incoming data exceeds 'Buffer_Size' (%zu KB)",
+ event->fd, (ctx->buffer_size / 1024));
+ tcp_conn_del(conn);
+ return -1;
+ }
+
+ size = conn->buf_size + ctx->chunk_size;
+ tmp = flb_realloc(conn->buf_data, size);
+ if (!tmp) {
+ flb_errno();
+ return -1;
+ }
+ flb_plg_trace(ctx->ins, "fd=%i buffer realloc %i -> %i",
+ event->fd, conn->buf_size, size);
+
+ conn->buf_data = tmp;
+ conn->buf_size = size;
+ available = (conn->buf_size - conn->buf_len) - 1;
+ }
+
+ /* Read data */
+ bytes = flb_io_net_read(connection,
+ (void *) &conn->buf_data[conn->buf_len],
+ available);
+
+ if (bytes <= 0) {
+ flb_plg_trace(ctx->ins, "fd=%i closed connection", event->fd);
+ tcp_conn_del(conn);
+ return -1;
+ }
+
+ flb_plg_trace(ctx->ins, "read()=%i pre_len=%i now_len=%i",
+ bytes, conn->buf_len, conn->buf_len + bytes);
+ conn->buf_len += bytes;
+ conn->buf_data[conn->buf_len] = '\0';
+
+ /* Strip CR or LF if found at first byte */
+ if (conn->buf_data[0] == '\r' || conn->buf_data[0] == '\n') {
+ /* Skip message with one byte with CR or LF */
+ flb_plg_trace(ctx->ins, "skip one byte message with ASCII code=%i",
+ conn->buf_data[0]);
+ consume_bytes(conn->buf_data, 1, conn->buf_len);
+ conn->buf_len--;
+ conn->buf_data[conn->buf_len] = '\0';
+ }
+
+ /* JSON Format handler */
+ if (ctx->format == FLB_TCP_FMT_JSON) {
+ ret_payload = parse_payload_json(conn);
+ if (ret_payload == 0) {
+ /* Incomplete JSON message, we need more data */
+ return -1;
+ }
+ else if (ret_payload == -1) {
+ flb_pack_state_reset(&conn->pack_state);
+ flb_pack_state_init(&conn->pack_state);
+ conn->pack_state.multiple = FLB_TRUE;
+ return -1;
+ }
+ }
+ else if (ctx->format == FLB_TCP_FMT_NONE) {
+ ret_payload = parse_payload_none(conn);
+ if (ret_payload == 0) {
+ return -1;
+ }
+ else if (ret_payload == -1) {
+ conn->buf_len = 0;
+ return -1;
+ }
+ }
+
+
+ consume_bytes(conn->buf_data, ret_payload, conn->buf_len);
+ conn->buf_len -= ret_payload;
+ conn->buf_data[conn->buf_len] = '\0';
+
+ if (ctx->format == FLB_TCP_FMT_JSON) {
+ jsmn_init(&conn->pack_state.parser);
+ conn->pack_state.tokens_count = 0;
+ conn->pack_state.last_byte = 0;
+ conn->pack_state.buf_len = 0;
+ }
+
+ return bytes;
+ }
+
+ if (event->mask & MK_EVENT_CLOSE) {
+ flb_plg_trace(ctx->ins, "fd=%i hangup", event->fd);
+ tcp_conn_del(conn);
+ return -1;
+ }
+
+ return 0;
+}
+
+/* Create a new mqtt request instance */
+struct tcp_conn *tcp_conn_add(struct flb_connection *connection,
+ struct flb_in_tcp_config *ctx)
+{
+ struct tcp_conn *conn;
+ int ret;
+
+ conn = flb_malloc(sizeof(struct tcp_conn));
+ if (!conn) {
+ flb_errno();
+ return NULL;
+ }
+
+ conn->connection = connection;
+
+ /* Set data for the event-loop */
+ MK_EVENT_NEW(&connection->event);
+
+ connection->user_data = conn;
+ connection->event.type = FLB_ENGINE_EV_CUSTOM;
+ connection->event.handler = tcp_conn_event;
+
+ /* Connection info */
+ conn->ctx = ctx;
+ conn->buf_len = 0;
+ conn->rest = 0;
+ conn->status = TCP_NEW;
+
+ conn->buf_data = flb_malloc(ctx->chunk_size);
+ if (!conn->buf_data) {
+ flb_errno();
+
+ flb_plg_error(ctx->ins, "could not allocate new connection");
+ flb_free(conn);
+
+ return NULL;
+ }
+ conn->buf_size = ctx->chunk_size;
+ conn->ins = ctx->ins;
+
+ /* Initialize JSON parser */
+ if (ctx->format == FLB_TCP_FMT_JSON) {
+ flb_pack_state_init(&conn->pack_state);
+ conn->pack_state.multiple = FLB_TRUE;
+ }
+
+ /* Register instance into the event loop */
+ ret = mk_event_add(flb_engine_evl_get(),
+ connection->fd,
+ FLB_ENGINE_EV_CUSTOM,
+ MK_EVENT_READ,
+ &connection->event);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "could not register new connection");
+
+ flb_free(conn->buf_data);
+ flb_free(conn);
+
+ return NULL;
+ }
+
+ mk_list_add(&conn->_head, &ctx->connections);
+
+ return conn;
+}
+
+int tcp_conn_del(struct tcp_conn *conn)
+{
+ struct flb_in_tcp_config *ctx;
+
+ ctx = conn->ctx;
+
+ if (ctx->format == FLB_TCP_FMT_JSON) {
+ flb_pack_state_reset(&conn->pack_state);
+ }
+
+ /* The downstream unregisters the file descriptor from the event-loop
+ * so there's nothing to be done by the plugin
+ */
+ flb_downstream_conn_release(conn->connection);
+
+ /* Release resources */
+ mk_list_del(&conn->_head);
+
+ flb_free(conn->buf_data);
+ flb_free(conn);
+
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/in_tcp/tcp_conn.h b/src/fluent-bit/plugins/in_tcp/tcp_conn.h
new file mode 100644
index 000000000..f9af869f2
--- /dev/null
+++ b/src/fluent-bit/plugins/in_tcp/tcp_conn.h
@@ -0,0 +1,59 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_TCP_CONN_H
+#define FLB_IN_TCP_CONN_H
+
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_connection.h>
+
+#define FLB_IN_TCP_CHUNK "32768"
+
+enum {
+ TCP_NEW = 1, /* it's a new connection */
+ TCP_CONNECTED = 2, /* MQTT connection per protocol spec OK */
+};
+
+struct tcp_conn_stream {
+ char *tag;
+ size_t tag_len;
+};
+
+/* Respresents a connection */
+struct tcp_conn {
+ int status; /* Connection status */
+
+ /* Buffer */
+ char *buf_data; /* Buffer data */
+ int buf_len; /* Data length */
+ int buf_size; /* Buffer size */
+ size_t rest; /* Unpacking offset */
+
+ struct flb_input_instance *ins; /* Parent plugin instance */
+ struct flb_in_tcp_config *ctx; /* Plugin configuration context */
+ struct flb_pack_state pack_state; /* Internal JSON parser */
+ struct flb_connection *connection;
+
+ struct mk_list _head;
+};
+
+struct tcp_conn *tcp_conn_add(struct flb_connection *connection, struct flb_in_tcp_config *ctx);
+int tcp_conn_del(struct tcp_conn *conn);
+
+#endif
diff --git a/src/fluent-bit/plugins/in_thermal/CMakeLists.txt b/src/fluent-bit/plugins/in_thermal/CMakeLists.txt
new file mode 100644
index 000000000..693d0ed41
--- /dev/null
+++ b/src/fluent-bit/plugins/in_thermal/CMakeLists.txt
@@ -0,0 +1,4 @@
+set(src
+ in_thermal.c)
+
+FLB_PLUGIN(in_thermal "${src}" "")
diff --git a/src/fluent-bit/plugins/in_thermal/in_thermal.c b/src/fluent-bit/plugins/in_thermal/in_thermal.c
new file mode 100644
index 000000000..2eb9267de
--- /dev/null
+++ b/src/fluent-bit/plugins/in_thermal/in_thermal.c
@@ -0,0 +1,372 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_pack.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <sys/types.h>
+#include <dirent.h>
+#include <unistd.h>
+
+#include <msgpack.h>
+
+#include "in_thermal.h"
+
+struct flb_input_plugin in_thermal_plugin;
+
+/* Default collection time: every 1 second (0 nanoseconds) */
+#define DEFAULT_INTERVAL_SEC "1"
+#define DEFAULT_INTERVAL_NSEC "0"
+
+#define IN_THERMAL_N_MAX 32
+#define IN_THERMAL_FILENAME_LEN 1024
+#define IN_THERMAL_TYPE_LEN 256
+
+struct temp_info
+{
+ char name[IN_THERMAL_FILENAME_LEN]; /* .../thermal_zoneX/... */
+ char type[IN_THERMAL_TYPE_LEN]; /* from /sys/class/thermal/thermal_zoneX/type */
+ double temp; /* from /sys/class/thermal/thermal_zoneX/temp */
+};
+
+/* Retrieve temperature(s) from the system (via /sys/class/thermal) */
+static inline int proc_temperature(struct flb_in_thermal_config *ctx,
+ struct temp_info *info, int n)
+{
+ int i, j;
+ DIR *d;
+ struct dirent *e;
+ char filename[IN_THERMAL_FILENAME_LEN];
+ FILE *f;
+ int temp;
+
+ d = opendir("/sys/class/thermal");
+ if (d == NULL) {
+ return -1;
+ }
+
+ i = 0;
+ while (i<n && (e = readdir(d))) {
+ if (!strcmp(e->d_name, ".") || !strcmp(e->d_name, "..")) {
+ continue;
+ }
+
+ if (e->d_type == DT_REG) {
+ continue;
+ }
+
+#ifdef FLB_HAVE_REGEX
+ if (ctx->name_regex && !flb_regex_match(ctx->name_regex,
+ (unsigned char *) e->d_name,
+ strlen(e->d_name))) {
+ continue;
+ }
+#endif
+
+ if (!strncmp(e->d_name, "thermal_zone", 12)) {
+ strncpy(info[i].name, e->d_name, IN_THERMAL_FILENAME_LEN);
+ if (snprintf(filename, IN_THERMAL_FILENAME_LEN,
+ "/sys/class/thermal/%s/type", e->d_name) <=0 ) {
+ continue;
+ }
+
+ f = fopen(filename, "r");
+ if (!f) {
+ flb_errno();
+ flb_error("[in_thermal] cannot read %s", filename);
+ continue;
+ }
+
+ if (f && fgets(info[i].type, IN_THERMAL_TYPE_LEN, f) &&
+ strlen(info[i].type) > 1) {
+ /* Remove trailing \n */
+ for (j = 0; info[i].type[j]; ++j) {
+ if (info[i].type[j] == '\n') {
+ info[i].type[j] = 0;
+ break;
+ }
+ }
+ fclose(f);
+
+#ifdef FLB_HAVE_REGEX
+ if (ctx->type_regex &&
+ !flb_regex_match(ctx->type_regex,
+ (unsigned char *) info[i].type,
+ strlen(info[i].type))) {
+ continue;
+ }
+#endif
+
+ if (snprintf(filename, IN_THERMAL_FILENAME_LEN,
+ "/sys/class/thermal/%s/temp", e->d_name) <= 0) {
+ continue;
+ }
+ f = fopen(filename, "r");
+ if (f && fscanf(f, "%d", &temp) == 1) {
+ info[i].temp = temp/1000.0;
+ ++i;
+ }
+ }
+
+ if (f) {
+ fclose(f);
+ }
+ }
+ }
+
+ closedir(d);
+ return i;
+}
+
+/* Init temperature input */
+static int in_thermal_init(struct flb_input_instance *in,
+ struct flb_config *config, void *data)
+{
+ int ret;
+ struct flb_in_thermal_config *ctx;
+ struct temp_info info[IN_THERMAL_N_MAX];
+ (void) data;
+
+ /* Allocate space for the configuration */
+ ctx = flb_calloc(1, sizeof(struct flb_in_thermal_config));
+ if (!ctx) {
+ flb_errno();
+ return -1;
+ }
+ ctx->ins = in;
+
+ ctx->log_encoder = flb_log_event_encoder_create(FLB_LOG_EVENT_FORMAT_DEFAULT);
+
+ if (ctx->log_encoder == NULL) {
+ flb_plg_error(in, "could not initialize event encoder");
+ flb_free(ctx);
+
+ return -1;
+ }
+
+ /* Load the config map */
+ ret = flb_input_config_map_set(in, (void *)ctx);
+ if (ret == -1) {
+ flb_log_event_encoder_destroy(ctx->log_encoder);
+ flb_free(ctx);
+ flb_plg_error(in, "unable to load configuration");
+ return -1;
+ }
+
+ /* Collection time setting */
+ if (ctx->interval_sec <= 0 && ctx->interval_nsec <= 0) {
+ /* Illegal settings. Override them. */
+ ctx->interval_sec = atoi(DEFAULT_INTERVAL_SEC);
+ ctx->interval_nsec = atoi(DEFAULT_INTERVAL_NSEC);
+ }
+
+#ifdef FLB_HAVE_REGEX
+ if (ctx->name_rgx && strcmp(ctx->name_rgx, "") != 0) {
+ ctx->name_regex = flb_regex_create(ctx->name_rgx);
+ if (!ctx->name_regex) {
+ flb_plg_error(ctx->ins, "invalid 'name_regex' config value");
+ }
+ }
+
+ if (ctx->type_rgx && strcmp(ctx->type_rgx, "") != 0) {
+ ctx->type_regex = flb_regex_create(ctx->type_rgx);
+ if (!ctx->type_regex) {
+ flb_plg_error(ctx->ins, "invalid 'type_regex' config value");
+ }
+ }
+#endif
+
+ ctx->prev_device_num = proc_temperature(ctx, info, IN_THERMAL_N_MAX);
+ if (!ctx->prev_device_num) {
+ flb_plg_warn(ctx->ins, "thermal device file not found");
+ }
+
+ /* Set the context */
+ flb_input_set_context(in, ctx);
+
+ /* Set our collector based on time, temperature every 1 second */
+ ret = flb_input_set_collector_time(in,
+ in_thermal_collect,
+ ctx->interval_sec,
+ ctx->interval_nsec,
+ config);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins,
+ "Could not set collector for temperature input plugin");
+
+ flb_log_event_encoder_destroy(ctx->log_encoder);
+ flb_free(ctx);
+
+ return -1;
+ }
+ ctx->coll_fd = ret;
+
+ return 0;
+}
+
+/* Callback to gather temperature */
+int in_thermal_collect(struct flb_input_instance *i_ins,
+ struct flb_config *config, void *in_context)
+{
+ int n;
+ int i;
+ int ret;
+ struct temp_info info[IN_THERMAL_N_MAX];
+ struct flb_in_thermal_config *ctx = in_context;
+
+ (void) config;
+
+ /* Get the current temperature(s) */
+ n = proc_temperature(ctx, info, IN_THERMAL_N_MAX);
+ if (n != ctx->prev_device_num) {
+ flb_plg_info(ctx->ins, "the number of thermal devices changed %d -> %d",
+ ctx->prev_device_num, n);
+ }
+ ctx->prev_device_num = n;
+ if (!n) {
+ return 0;
+ }
+
+ /*
+ * Store the new data into the MessagePack buffer
+ */
+
+ for (i = 0; i < n; ++i) {
+ ret = flb_log_event_encoder_begin_record(ctx->log_encoder);
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_current_timestamp(ctx->log_encoder);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_append_body_values(
+ ctx->log_encoder,
+ FLB_LOG_EVENT_CSTRING_VALUE("name"),
+ FLB_LOG_EVENT_CSTRING_VALUE(info[i].name),
+
+ FLB_LOG_EVENT_CSTRING_VALUE("type"),
+ FLB_LOG_EVENT_CSTRING_VALUE(info[i].type),
+
+ FLB_LOG_EVENT_CSTRING_VALUE("temp"),
+ FLB_LOG_EVENT_DOUBLE_VALUE(info[i].temp));
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_commit_record(ctx->log_encoder);
+ }
+
+ flb_plg_trace(ctx->ins, "%s temperature %0.2f", info[i].name, info[i].temp);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ flb_input_log_append(ctx->ins, NULL, 0,
+ ctx->log_encoder->output_buffer,
+ ctx->log_encoder->output_length);
+ ret = 0;
+ }
+ else {
+ flb_plg_error(ctx->ins, "log event encoding error : %d", ret);
+
+ ret = -1;
+ }
+
+ flb_log_event_encoder_reset(ctx->log_encoder);
+
+ return 0;
+}
+
+static void in_thermal_pause(void *data, struct flb_config *config)
+{
+ struct flb_in_thermal_config *ctx = data;
+ flb_input_collector_pause(ctx->coll_fd, ctx->ins);
+}
+
+static void in_thermal_resume(void *data, struct flb_config *config)
+{
+ struct flb_in_thermal_config *ctx = data;
+ flb_input_collector_resume(ctx->coll_fd, ctx->ins);
+}
+
+static int in_thermal_exit(void *data, struct flb_config *config)
+{
+ (void) *config;
+ struct flb_in_thermal_config *ctx = data;
+
+ if (ctx->log_encoder != NULL) {
+ flb_log_event_encoder_destroy(ctx->log_encoder);
+ }
+
+#ifdef FLB_HAVE_REGEX
+ if (ctx && ctx->name_regex) {
+ flb_regex_destroy(ctx->name_regex);
+ }
+ if (ctx && ctx->type_regex) {
+ flb_regex_destroy(ctx->type_regex);
+ }
+#endif
+
+ flb_free(ctx);
+
+ return 0;
+}
+
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_INT, "interval_sec", DEFAULT_INTERVAL_SEC,
+ 0, FLB_TRUE, offsetof(struct flb_in_thermal_config, interval_sec),
+ "Set the collector interval"
+ },
+ {
+ FLB_CONFIG_MAP_INT, "interval_nsec", DEFAULT_INTERVAL_NSEC,
+ 0, FLB_TRUE, offsetof(struct flb_in_thermal_config, interval_nsec),
+ "Set the collector interval (nanoseconds)"
+ },
+#ifdef FLB_HAVE_REGEX
+ {
+ FLB_CONFIG_MAP_STR, "name_regex", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_in_thermal_config, name_rgx),
+ "Set thermal name regular expression filter"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "type_regex", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_in_thermal_config, type_rgx),
+ "Set thermal type regular expression filter"
+ },
+#endif /* FLB_HAVE_REGEX */
+ /* EOF */
+ {0}
+};
+
+/* Plugin reference */
+struct flb_input_plugin in_thermal_plugin = {
+ .name = "thermal",
+ .description = "Thermal",
+ .cb_init = in_thermal_init,
+ .cb_pre_run = NULL,
+ .cb_collect = in_thermal_collect,
+ .cb_flush_buf = NULL,
+ .cb_pause = in_thermal_pause,
+ .cb_resume = in_thermal_resume,
+ .cb_exit = in_thermal_exit,
+ .config_map = config_map
+};
diff --git a/src/fluent-bit/plugins/in_thermal/in_thermal.h b/src/fluent-bit/plugins/in_thermal/in_thermal.h
new file mode 100644
index 000000000..17f14ca34
--- /dev/null
+++ b/src/fluent-bit/plugins/in_thermal/in_thermal.h
@@ -0,0 +1,55 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_THERMAL_H
+#define FLB_IN_THERMAL_H
+
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_input.h>
+#include <fluent-bit/flb_log_event_encoder.h>
+
+#ifdef FLB_HAVE_REGEX
+#include <fluent-bit/flb_regex.h>
+#endif
+
+/* Temperature Input configuration & context */
+struct flb_in_thermal_config {
+ /* setup */
+ int coll_fd; /* collector id/fd */
+ int interval_sec; /* interval collection time (Second) */
+ int interval_nsec; /* interval collection time (Nanosecond) */
+ int prev_device_num; /* number of thermal devices */
+#ifdef FLB_HAVE_REGEX
+ struct flb_regex *name_regex; /* compiled filter by name */
+ struct flb_regex *type_regex; /* compiled filter by type */
+ flb_sds_t name_rgx; /* optional filter by name */
+ flb_sds_t type_rgx; /* optional filter by type */
+#endif
+ struct flb_input_instance *ins;
+ struct flb_log_event_encoder *log_encoder;
+};
+
+int in_thermal_pre_run(void *in_context, struct flb_config *config);
+int in_thermal_collect(struct flb_input_instance *i_ins,
+ struct flb_config *config, void *in_context);
+void *in_thermal_flush(void *in_context, size_t *size);
+
+extern struct flb_input_plugin in_thermal_plugin;
+
+#endif
diff --git a/src/fluent-bit/plugins/in_udp/CMakeLists.txt b/src/fluent-bit/plugins/in_udp/CMakeLists.txt
new file mode 100644
index 000000000..0b623f169
--- /dev/null
+++ b/src/fluent-bit/plugins/in_udp/CMakeLists.txt
@@ -0,0 +1,6 @@
+set(src
+ udp.c
+ udp_conn.c
+ udp_config.c)
+
+FLB_PLUGIN(in_udp "${src}" "")
diff --git a/src/fluent-bit/plugins/in_udp/udp.c b/src/fluent-bit/plugins/in_udp/udp.c
new file mode 100644
index 000000000..ad5a28497
--- /dev/null
+++ b/src/fluent-bit/plugins/in_udp/udp.c
@@ -0,0 +1,197 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_network.h>
+#include <msgpack.h>
+
+#include "udp.h"
+#include "udp_conn.h"
+#include "udp_config.h"
+
+static int in_udp_collect(struct flb_input_instance *in,
+ struct flb_config *config,
+ void *in_context)
+{
+ struct flb_connection *connection;
+ struct flb_in_udp_config *ctx;
+
+ ctx = in_context;
+
+ connection = flb_downstream_conn_get(ctx->downstream);
+
+ if (connection == NULL) {
+ flb_plg_error(ctx->ins, "could get UDP server dummy connection");
+
+ return -1;
+ }
+
+ return udp_conn_event(connection);
+}
+
+/* Initialize plugin */
+static int in_udp_init(struct flb_input_instance *in,
+ struct flb_config *config, void *data)
+{
+ struct flb_connection *connection;
+ unsigned short int port;
+ int ret;
+ struct flb_in_udp_config *ctx;
+
+ (void) data;
+
+ /* Allocate space for the configuration */
+ ctx = udp_config_init(in);
+
+ if (ctx == NULL) {
+ return -1;
+ }
+
+ ctx->collector_id = -1;
+ ctx->ins = in;
+
+ /* Set the context */
+ flb_input_set_context(in, ctx);
+
+ port = (unsigned short int) strtoul(ctx->port, NULL, 10);
+
+ ctx->downstream = flb_downstream_create(FLB_TRANSPORT_UDP,
+ in->flags,
+ ctx->listen,
+ port,
+ in->tls,
+ config,
+ &in->net_setup);
+
+ if (ctx->downstream == NULL) {
+ flb_plg_error(ctx->ins,
+ "could not initialize downstream on %s:%s. Aborting",
+ ctx->listen, ctx->port);
+
+ udp_config_destroy(ctx);
+
+ return -1;
+ }
+
+ flb_input_downstream_set(ctx->downstream, ctx->ins);
+
+ connection = flb_downstream_conn_get(ctx->downstream);
+
+ if (connection == NULL) {
+ flb_plg_error(ctx->ins, "could not get UDP server dummy connection");
+
+ udp_config_destroy(ctx);
+
+ return -1;
+ }
+
+ ctx->dummy_conn = udp_conn_add(connection, ctx);
+
+ if (ctx->dummy_conn == NULL) {
+ flb_plg_error(ctx->ins, "could not track UDP server dummy connection");
+
+ udp_config_destroy(ctx);
+
+ return -1;
+ }
+
+ /* Collect upon data available on the standard input */
+ ret = flb_input_set_collector_socket(in,
+ in_udp_collect,
+ ctx->downstream->server_fd,
+ config);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "Could not set collector for IN_UDP input plugin");
+ udp_config_destroy(ctx);
+
+ return -1;
+ }
+
+ ctx->collector_id = ret;
+ ctx->collector_event = flb_input_collector_get_event(ret, in);
+
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "Could not get collector event");
+ udp_config_destroy(ctx);
+
+ return -1;
+ }
+
+ return 0;
+}
+
+static int in_udp_exit(void *data, struct flb_config *config)
+{
+ struct flb_in_udp_config *ctx;
+
+ (void) *config;
+
+ ctx = data;
+
+ if (ctx->dummy_conn != NULL) {
+ udp_conn_del(ctx->dummy_conn);
+ }
+
+ udp_config_destroy(ctx);
+
+ return 0;
+}
+
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_STR, "format", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_in_udp_config, format_name),
+ "Set the format: json or none"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "separator", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_in_udp_config, raw_separator),
+ "Set separator"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "chunk_size", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_in_udp_config, chunk_size_str),
+ "Set the chunk size"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "buffer_size", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_in_udp_config, buffer_size_str),
+ "Set the buffer size"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "source_address_key", (char *) NULL,
+ 0, FLB_TRUE, offsetof(struct flb_in_udp_config, source_address_key),
+ "Key where the source address will be injected"
+ },
+ /* EOF */
+ {0}
+};
+
+/* Plugin reference */
+struct flb_input_plugin in_udp_plugin = {
+ .name = "udp",
+ .description = "UDP",
+ .cb_init = in_udp_init,
+ .cb_pre_run = NULL,
+ .cb_collect = in_udp_collect,
+ .cb_flush_buf = NULL,
+ .cb_exit = in_udp_exit,
+ .config_map = config_map,
+ .flags = FLB_INPUT_NET_SERVER,
+};
diff --git a/src/fluent-bit/plugins/in_udp/udp.h b/src/fluent-bit/plugins/in_udp/udp.h
new file mode 100644
index 000000000..1a7bfce30
--- /dev/null
+++ b/src/fluent-bit/plugins/in_udp/udp.h
@@ -0,0 +1,54 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_UDP_H
+#define FLB_IN_UDP_H
+
+#define FLB_UDP_FMT_JSON 0 /* default */
+#define FLB_UDP_FMT_NONE 1 /* no format, use delimiters */
+
+#include <fluent-bit/flb_downstream.h>
+#include <fluent-bit/flb_input.h>
+#include <fluent-bit/flb_sds.h>
+#include <fluent-bit/flb_log_event_encoder.h>
+#include <msgpack.h>
+
+struct udp_conn;
+
+struct flb_in_udp_config {
+ struct mk_event *collector_event;
+ flb_sds_t format_name; /* Data format name */
+ int format; /* Data format */
+ size_t buffer_size; /* Buffer size for each reader */
+ flb_sds_t buffer_size_str; /* Buffer size in string form */
+ size_t chunk_size; /* Chunk allocation size */
+ flb_sds_t chunk_size_str; /* Chunk size in string form */
+ char *listen; /* Listen interface */
+ char *port; /* Port */
+ flb_sds_t raw_separator; /* Unescaped string delimiterr */
+ flb_sds_t separator; /* String delimiter */
+ flb_sds_t source_address_key; /* Source IP address */
+ int collector_id; /* Listener collector id */
+ struct flb_downstream *downstream; /* Client manager */
+ struct udp_conn *dummy_conn; /* Datagram dummy connection */
+ struct flb_input_instance *ins; /* Input plugin instace */
+ struct flb_log_event_encoder *log_encoder;
+};
+
+#endif
diff --git a/src/fluent-bit/plugins/in_udp/udp_config.c b/src/fluent-bit/plugins/in_udp/udp_config.c
new file mode 100644
index 000000000..ad2995490
--- /dev/null
+++ b/src/fluent-bit/plugins/in_udp/udp_config.c
@@ -0,0 +1,155 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_unescape.h>
+
+#include "udp.h"
+#include "udp_conn.h"
+#include "udp_config.h"
+
+#include <stdlib.h>
+
+struct flb_in_udp_config *udp_config_init(struct flb_input_instance *ins)
+{
+ int ret;
+ int len;
+ char port[16];
+ char *out;
+ struct flb_in_udp_config *ctx;
+
+ /* Allocate plugin context */
+ ctx = flb_calloc(1, sizeof(struct flb_in_udp_config));
+ if (!ctx) {
+ flb_errno();
+ return NULL;
+ }
+ ctx->ins = ins;
+ ctx->format = FLB_UDP_FMT_JSON;
+
+ /* Load the config map */
+ ret = flb_input_config_map_set(ins, (void *)ctx);
+ if (ret == -1) {
+ flb_plg_error(ins, "unable to load configuration");
+ flb_free(ctx);
+ return NULL;
+ }
+
+ /* Data format (expected payload) */
+ if (ctx->format_name) {
+ if (strcasecmp(ctx->format_name, "json") == 0) {
+ ctx->format = FLB_UDP_FMT_JSON;
+ }
+ else if (strcasecmp(ctx->format_name, "none") == 0) {
+ ctx->format = FLB_UDP_FMT_NONE;
+ }
+ else {
+ flb_plg_error(ctx->ins, "unrecognized format value '%s'", ctx->format_name);
+ flb_free(ctx);
+ return NULL;
+ }
+ }
+
+ /* String separator used to split records when using 'format none' */
+ if (ctx->raw_separator) {
+ len = strlen(ctx->raw_separator);
+ out = flb_malloc(len + 1);
+ if (!out) {
+ flb_errno();
+ flb_free(ctx);
+ return NULL;
+ }
+ ret = flb_unescape_string(ctx->raw_separator, len, &out);
+ if (ret <= 0) {
+ flb_plg_error(ctx->ins, "invalid separator");
+ flb_free(out);
+ flb_free(ctx);
+ return NULL;
+ }
+
+ ctx->separator = flb_sds_create_len(out, ret);
+ if (!ctx->separator) {
+ flb_free(out);
+ flb_free(ctx);
+ return NULL;
+ }
+ flb_free(out);
+ }
+ if (!ctx->separator) {
+ ctx->separator = flb_sds_create_len("\n", 1);
+ }
+
+ /* Listen interface (if not set, defaults to 0.0.0.0:5170) */
+ flb_input_net_default_listener("0.0.0.0", 5170, ins);
+ ctx->listen = ins->host.listen;
+ snprintf(port, sizeof(port) - 1, "%d", ins->host.port);
+ ctx->port = flb_strdup(port);
+
+ /* Chunk size */
+ if (ctx->chunk_size_str) {
+ /* Convert KB unit to Bytes */
+ ctx->chunk_size = (atoi(ctx->chunk_size_str) * 1024);
+ } else {
+ ctx->chunk_size = atoi(FLB_IN_UDP_CHUNK);
+ }
+
+ /* Buffer size */
+ if (!ctx->buffer_size_str) {
+ ctx->buffer_size = ctx->chunk_size;
+ }
+ else {
+ /* Convert KB unit to Bytes */
+ ctx->buffer_size = (atoi(ctx->buffer_size_str) * 1024);
+ }
+
+ ctx->log_encoder = flb_log_event_encoder_create(FLB_LOG_EVENT_FORMAT_DEFAULT);
+
+ if (ctx->log_encoder == NULL) {
+ flb_plg_error(ctx->ins, "could not initialize event encoder");
+ udp_config_destroy(ctx);
+
+ ctx = NULL;
+ }
+
+ return ctx;
+}
+
+int udp_config_destroy(struct flb_in_udp_config *ctx)
+{
+ if (ctx->log_encoder != NULL) {
+ flb_log_event_encoder_destroy(ctx->log_encoder);
+ }
+
+ if (ctx->collector_id != -1) {
+ flb_input_collector_delete(ctx->collector_id, ctx->ins);
+
+ ctx->collector_id = -1;
+ }
+
+ if (ctx->downstream != NULL) {
+ flb_downstream_destroy(ctx->downstream);
+ }
+
+ flb_sds_destroy(ctx->separator);
+ flb_free(ctx->port);
+ flb_free(ctx);
+
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/in_udp/udp_config.h b/src/fluent-bit/plugins/in_udp/udp_config.h
new file mode 100644
index 000000000..dcddb74a7
--- /dev/null
+++ b/src/fluent-bit/plugins/in_udp/udp_config.h
@@ -0,0 +1,28 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_UDP_CONFIG_H
+#define FLB_IN_UDP_CONFIG_H
+
+#include "udp.h"
+
+struct flb_in_udp_config *udp_config_init(struct flb_input_instance *i_ins);
+int udp_config_destroy(struct flb_in_udp_config *config);
+
+#endif
diff --git a/src/fluent-bit/plugins/in_udp/udp_conn.c b/src/fluent-bit/plugins/in_udp/udp_conn.c
new file mode 100644
index 000000000..d8cc4d5e6
--- /dev/null
+++ b/src/fluent-bit/plugins/in_udp/udp_conn.c
@@ -0,0 +1,500 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_engine.h>
+#include <fluent-bit/flb_network.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_error.h>
+
+#include "udp.h"
+#include "udp_conn.h"
+
+static inline void consume_bytes(char *buf, int bytes, int length)
+{
+ memmove(buf, buf + bytes, length - bytes);
+}
+
+static int append_message_to_record_data(char **result_buffer,
+ size_t *result_size,
+ flb_sds_t message_key_name,
+ char *base_object_buffer,
+ size_t base_object_size,
+ char *message_buffer,
+ size_t message_size,
+ int message_type)
+{
+ int result = FLB_MAP_NOT_MODIFIED;
+ char *modified_data_buffer;
+ int modified_data_size;
+ msgpack_object_kv *new_map_entries[1];
+ msgpack_object_kv message_entry;
+ *result_buffer = NULL;
+ *result_size = 0;
+ modified_data_buffer = NULL;
+
+ if (message_key_name != NULL) {
+ new_map_entries[0] = &message_entry;
+
+ message_entry.key.type = MSGPACK_OBJECT_STR;
+ message_entry.key.via.str.size = flb_sds_len(message_key_name);
+ message_entry.key.via.str.ptr = message_key_name;
+
+ if (message_type == MSGPACK_OBJECT_BIN) {
+ message_entry.val.type = MSGPACK_OBJECT_BIN;
+ message_entry.val.via.bin.size = message_size;
+ message_entry.val.via.bin.ptr = message_buffer;
+ }
+ else if (message_type == MSGPACK_OBJECT_STR) {
+ message_entry.val.type = MSGPACK_OBJECT_STR;
+ message_entry.val.via.str.size = message_size;
+ message_entry.val.via.str.ptr = message_buffer;
+ }
+ else {
+ result = FLB_MAP_EXPANSION_INVALID_VALUE_TYPE;
+ }
+
+ if (result == FLB_MAP_NOT_MODIFIED) {
+ result = flb_msgpack_expand_map(base_object_buffer,
+ base_object_size,
+ new_map_entries, 1,
+ &modified_data_buffer,
+ &modified_data_size);
+ if (result == 0) {
+ result = FLB_MAP_EXPAND_SUCCESS;
+ }
+ else {
+ result = FLB_MAP_EXPANSION_ERROR;
+ }
+ }
+ }
+
+ if (result == FLB_MAP_EXPAND_SUCCESS) {
+ *result_buffer = modified_data_buffer;
+ *result_size = modified_data_size;
+ }
+
+ return result;
+}
+
+static inline int process_pack(struct udp_conn *conn,
+ char *pack, size_t size)
+{
+ int ret;
+ size_t off = 0;
+ msgpack_unpacked result;
+ msgpack_object entry;
+ msgpack_sbuffer sbuf;
+ msgpack_packer pck;
+ struct flb_in_udp_config *ctx;
+ char *appended_address_buffer;
+ size_t appended_address_size;
+ char *source_address;
+ int i;
+ int len;
+
+ ctx = conn->ctx;
+
+ flb_log_event_encoder_reset(ctx->log_encoder);
+
+ /* First pack the results, iterate concatenated messages */
+ msgpack_unpacked_init(&result);
+ while (msgpack_unpack_next(&result, pack, size, &off) == MSGPACK_UNPACK_SUCCESS) {
+ entry = result.data;
+
+ appended_address_buffer = NULL;
+ source_address = NULL;
+
+ ret = flb_log_event_encoder_begin_record(ctx->log_encoder);
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_current_timestamp(ctx->log_encoder);
+ }
+
+ if (ctx->source_address_key != NULL) {
+ source_address = flb_connection_get_remote_address(conn->connection);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ if (entry.type == MSGPACK_OBJECT_MAP) {
+ if (source_address != NULL) {
+ msgpack_sbuffer_init(&sbuf);
+ msgpack_packer_init(&pck, &sbuf, msgpack_sbuffer_write);
+
+ len = entry.via.map.size;
+ msgpack_pack_map(&pck, len);
+
+ for (i=0; i<len; i++) {
+ msgpack_pack_object(&pck, entry.via.map.ptr[i].key);
+ msgpack_pack_object(&pck, entry.via.map.ptr[i].val);
+ }
+
+ ret = append_message_to_record_data(&appended_address_buffer,
+ &appended_address_size,
+ ctx->source_address_key,
+ sbuf.data,
+ sbuf.size,
+ source_address,
+ strlen(source_address),
+ MSGPACK_OBJECT_STR);
+ msgpack_sbuffer_destroy(&sbuf);
+ }
+
+ if (ret == FLB_MAP_EXPANSION_ERROR) {
+ flb_plg_debug(ctx->ins, "error expanding source_address : %d", ret);
+ }
+
+ if (appended_address_buffer != NULL) {
+ ret = flb_log_event_encoder_set_body_from_raw_msgpack(
+ ctx->log_encoder, appended_address_buffer, appended_address_size);
+ }
+ else {
+ ret = flb_log_event_encoder_set_body_from_msgpack_object(
+ ctx->log_encoder, &entry);
+ }
+ }
+ else if (entry.type == MSGPACK_OBJECT_ARRAY) {
+ if (source_address != NULL) {
+ ret = flb_log_event_encoder_append_body_values(
+ ctx->log_encoder,
+ FLB_LOG_EVENT_CSTRING_VALUE("msg"),
+ FLB_LOG_EVENT_MSGPACK_OBJECT_VALUE(&entry),
+ FLB_LOG_EVENT_CSTRING_VALUE(ctx->source_address_key),
+ FLB_LOG_EVENT_CSTRING_VALUE(source_address));
+ }
+ else {
+ ret = flb_log_event_encoder_append_body_values(
+ ctx->log_encoder,
+ FLB_LOG_EVENT_CSTRING_VALUE("msg"),
+ FLB_LOG_EVENT_MSGPACK_OBJECT_VALUE(&entry));
+ }
+ }
+ else {
+ ret = FLB_EVENT_ENCODER_ERROR_INVALID_VALUE_TYPE;
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_commit_record(ctx->log_encoder);
+ }
+
+ if (appended_address_buffer != NULL) {
+ flb_free(appended_address_buffer);
+ }
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ break;
+ }
+ }
+ }
+
+ msgpack_unpacked_destroy(&result);
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ flb_input_log_append(conn->ins, NULL, 0,
+ ctx->log_encoder->output_buffer,
+ ctx->log_encoder->output_length);
+ ret = 0;
+ }
+ else {
+ flb_plg_error(ctx->ins, "log event encoding error : %d", ret);
+
+ ret = -1;
+ }
+
+ return ret;
+}
+
+/* Process a JSON payload, return the number of processed bytes */
+static ssize_t parse_payload_json(struct udp_conn *conn)
+{
+ int ret;
+ int out_size;
+ char *pack;
+
+ ret = flb_pack_json_state(conn->buf_data, conn->buf_len,
+ &pack, &out_size, &conn->pack_state);
+ if (ret == FLB_ERR_JSON_PART) {
+ flb_plg_debug(conn->ins, "JSON incomplete, waiting for more data...");
+ return 0;
+ }
+ else if (ret == FLB_ERR_JSON_INVAL) {
+ flb_plg_warn(conn->ins, "invalid JSON message, skipping");
+ conn->buf_len = 0;
+ conn->pack_state.multiple = FLB_TRUE;
+ return -1;
+ }
+ else if (ret == -1) {
+ return -1;
+ }
+
+ /* Process the packaged JSON and return the last byte used */
+ process_pack(conn, pack, out_size);
+ flb_free(pack);
+
+ return conn->pack_state.last_byte;
+}
+
+/*
+ * Process a raw text payload, uses the delimited character to split records,
+ * return the number of processed bytes
+ */
+static ssize_t parse_payload_none(struct udp_conn *conn)
+{
+ int ret;
+ int len;
+ int sep_len;
+ size_t consumed = 0;
+ char *buf;
+ char *s;
+ char *separator;
+ struct flb_in_udp_config *ctx;
+
+ ctx = conn->ctx;
+
+ separator = conn->ctx->separator;
+ sep_len = flb_sds_len(conn->ctx->separator);
+
+ buf = conn->buf_data;
+ ret = FLB_EVENT_ENCODER_SUCCESS;
+
+ flb_log_event_encoder_reset(ctx->log_encoder);
+
+ while ((s = strstr(buf, separator))) {
+ len = (s - buf);
+ if (len == 0) {
+ break;
+ }
+ else if (len > 0) {
+ ret = flb_log_event_encoder_begin_record(ctx->log_encoder);
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_current_timestamp(ctx->log_encoder);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_append_body_values(
+ ctx->log_encoder,
+ FLB_LOG_EVENT_CSTRING_VALUE("log"),
+ FLB_LOG_EVENT_STRING_VALUE(buf, len));
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_commit_record(ctx->log_encoder);
+ }
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ break;
+ }
+
+ consumed += len + 1;
+ buf += len + sep_len;
+ }
+ else {
+ break;
+ }
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ flb_input_log_append(conn->ins, NULL, 0,
+ ctx->log_encoder->output_buffer,
+ ctx->log_encoder->output_length);
+ }
+ else {
+ flb_plg_error(ctx->ins, "log event encoding error : %d", ret);
+ }
+
+ return consumed;
+}
+
+/* Callback invoked every time an event is triggered for a connection */
+int udp_conn_event(void *data)
+{
+ int bytes;
+ int available;
+ int size;
+ ssize_t ret_payload = -1;
+ char *tmp;
+ struct udp_conn *conn;
+ struct flb_connection *connection;
+ struct flb_in_udp_config *ctx;
+
+ connection = (struct flb_connection *) data;
+
+ conn = connection->user_data;
+
+ ctx = conn->ctx;
+
+ if (ctx->format == FLB_UDP_FMT_JSON &&
+ conn->buf_len > 0) {
+ flb_pack_state_reset(&conn->pack_state);
+ flb_pack_state_init(&conn->pack_state);
+
+ conn->pack_state.multiple = FLB_TRUE;
+ }
+
+ conn->buf_len = 0;
+
+ available = (conn->buf_size - conn->buf_len) - 1;
+ if (available < 1) {
+ if (conn->buf_size + ctx->chunk_size > ctx->buffer_size) {
+ flb_plg_trace(ctx->ins,
+ "fd=%i incoming data exceed limit (%zu KB)",
+ connection->fd, (ctx->buffer_size / 1024));
+ return -1;
+ }
+
+ size = conn->buf_size + ctx->chunk_size;
+ tmp = flb_realloc(conn->buf_data, size);
+ if (!tmp) {
+ flb_errno();
+ return -1;
+ }
+ flb_plg_trace(ctx->ins, "fd=%i buffer realloc %i -> %i",
+ connection->fd, conn->buf_size, size);
+
+ conn->buf_data = tmp;
+ conn->buf_size = size;
+ available = (conn->buf_size - conn->buf_len) - 1;
+ }
+
+ /* Read data */
+ bytes = flb_io_net_read(connection,
+ (void *) &conn->buf_data[conn->buf_len],
+ available);
+
+ if (bytes <= 0) {
+ return -1;
+ }
+
+ flb_plg_trace(ctx->ins, "read()=%i pre_len=%i now_len=%i",
+ bytes, conn->buf_len, conn->buf_len + bytes);
+ conn->buf_len += bytes;
+ conn->buf_data[conn->buf_len] = '\0';
+
+ /* Strip CR or LF if found at first byte */
+ if (conn->buf_data[0] == '\r' || conn->buf_data[0] == '\n') {
+ /* Skip message with one byte with CR or LF */
+ flb_plg_trace(ctx->ins, "skip one byte message with ASCII code=%i",
+ conn->buf_data[0]);
+ consume_bytes(conn->buf_data, 1, conn->buf_len);
+ conn->buf_len--;
+ conn->buf_data[conn->buf_len] = '\0';
+ }
+
+ /* JSON Format handler */
+ if (ctx->format == FLB_UDP_FMT_JSON) {
+ ret_payload = parse_payload_json(conn);
+ if (ret_payload == 0) {
+ /* Incomplete JSON message, we need more data */
+ return -1;
+ }
+ else if (ret_payload == -1) {
+ flb_pack_state_reset(&conn->pack_state);
+ flb_pack_state_init(&conn->pack_state);
+ conn->pack_state.multiple = FLB_TRUE;
+ return -1;
+ }
+ }
+ else if (ctx->format == FLB_UDP_FMT_NONE) {
+ ret_payload = parse_payload_none(conn);
+ if (ret_payload == 0) {
+ return -1;
+ }
+ else if (ret_payload == -1) {
+ conn->buf_len = 0;
+ return -1;
+ }
+ }
+
+ consume_bytes(conn->buf_data, ret_payload, conn->buf_len);
+ conn->buf_len -= ret_payload;
+ conn->buf_data[conn->buf_len] = '\0';
+
+ if (ctx->format == FLB_UDP_FMT_JSON) {
+ jsmn_init(&conn->pack_state.parser);
+ conn->pack_state.tokens_count = 0;
+ conn->pack_state.last_byte = 0;
+ conn->pack_state.buf_len = 0;
+ }
+
+ return bytes;
+}
+
+struct udp_conn *udp_conn_add(struct flb_connection *connection,
+ struct flb_in_udp_config *ctx)
+{
+ struct udp_conn *conn;
+
+ conn = flb_malloc(sizeof(struct udp_conn));
+ if (!conn) {
+ flb_errno();
+ return NULL;
+ }
+
+ conn->connection = connection;
+
+ /* Set data for the event-loop */
+
+ MK_EVENT_NEW(&connection->event);
+
+ connection->user_data = conn;
+ connection->event.type = FLB_ENGINE_EV_CUSTOM;
+ connection->event.handler = udp_conn_event;
+
+ /* Connection info */
+ conn->ctx = ctx;
+ conn->buf_len = 0;
+
+ conn->buf_data = flb_malloc(ctx->chunk_size);
+ if (!conn->buf_data) {
+ flb_errno();
+
+ flb_plg_error(ctx->ins, "could not allocate new connection");
+ flb_free(conn);
+
+ return NULL;
+ }
+ conn->buf_size = ctx->chunk_size;
+ conn->ins = ctx->ins;
+
+ /* Initialize JSON parser */
+ if (ctx->format == FLB_UDP_FMT_JSON) {
+ flb_pack_state_init(&conn->pack_state);
+ conn->pack_state.multiple = FLB_TRUE;
+ }
+
+ return conn;
+}
+
+int udp_conn_del(struct udp_conn *conn)
+{
+ struct flb_in_udp_config *ctx;
+
+ ctx = conn->ctx;
+
+ if (ctx->format == FLB_UDP_FMT_JSON) {
+ flb_pack_state_reset(&conn->pack_state);
+ }
+
+ flb_free(conn->buf_data);
+ flb_free(conn);
+
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/in_udp/udp_conn.h b/src/fluent-bit/plugins/in_udp/udp_conn.h
new file mode 100644
index 000000000..25b8ef3ef
--- /dev/null
+++ b/src/fluent-bit/plugins/in_udp/udp_conn.h
@@ -0,0 +1,57 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_UDP_CONN_H
+#define FLB_IN_UDP_CONN_H
+
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_connection.h>
+
+#define FLB_IN_UDP_CHUNK "32768"
+
+#define FLB_MAP_EXPAND_SUCCESS 0
+#define FLB_MAP_NOT_MODIFIED -1
+#define FLB_MAP_EXPANSION_ERROR -2
+#define FLB_MAP_EXPANSION_INVALID_VALUE_TYPE -3
+
+struct udp_conn_stream {
+ char *tag;
+ size_t tag_len;
+};
+
+/* Respresents a connection */
+struct udp_conn {
+ /* Buffer */
+ char *buf_data; /* Buffer data */
+ int buf_len; /* Data length */
+ int buf_size; /* Buffer size */
+
+ struct flb_input_instance *ins; /* Parent plugin instance */
+ struct flb_in_udp_config *ctx; /* Plugin configuration context */
+ struct flb_pack_state pack_state; /* Internal JSON parser */
+ struct flb_connection *connection;
+
+ struct mk_list _head;
+};
+
+struct udp_conn *udp_conn_add(struct flb_connection *connection, struct flb_in_udp_config *ctx);
+int udp_conn_del(struct udp_conn *conn);
+int udp_conn_event(void *data);
+
+#endif
diff --git a/src/fluent-bit/plugins/in_unix_socket/CMakeLists.txt b/src/fluent-bit/plugins/in_unix_socket/CMakeLists.txt
new file mode 100644
index 000000000..f07027449
--- /dev/null
+++ b/src/fluent-bit/plugins/in_unix_socket/CMakeLists.txt
@@ -0,0 +1,6 @@
+set(src
+ unix_socket.c
+ unix_socket_conn.c
+ unix_socket_config.c)
+
+FLB_PLUGIN(in_unix_socket "${src}" "")
diff --git a/src/fluent-bit/plugins/in_unix_socket/unix_socket.c b/src/fluent-bit/plugins/in_unix_socket/unix_socket.c
new file mode 100644
index 000000000..9e7b19110
--- /dev/null
+++ b/src/fluent-bit/plugins/in_unix_socket/unix_socket.c
@@ -0,0 +1,320 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_network.h>
+#include <msgpack.h>
+
+#include <sys/stat.h>
+
+#include "unix_socket.h"
+#include "unix_socket_conn.h"
+#include "unix_socket_config.h"
+
+/*
+ * For a server event, the collection event means a new client have arrived, we
+ * accept the connection and create a new UNIX SOCKET instance which will wait for
+ * JSON map messages.
+ */
+static int in_unix_socket_collect(struct flb_input_instance *in,
+ struct flb_config *config, void *in_context)
+{
+ struct flb_connection *connection;
+ struct unix_socket_conn *conn;
+ struct flb_in_unix_socket_config *ctx;
+
+ ctx = in_context;
+
+ connection = flb_downstream_conn_get(ctx->downstream);
+
+ if (connection == NULL) {
+ flb_plg_error(ctx->ins, "could not accept new connection");
+
+ return -1;
+ }
+
+ if (ctx->dgram_mode_flag) {
+ return unix_socket_conn_event(connection);
+ }
+ else {
+ flb_plg_trace(ctx->ins, "new UNIX SOCKET connection arrived FD=%i", connection->fd);
+
+ conn = unix_socket_conn_add(connection, ctx);
+
+ if (conn == NULL) {
+ flb_plg_error(ctx->ins, "could not accept new connection");
+
+ flb_downstream_conn_release(connection);
+
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int remove_existing_socket_file(char *socket_path)
+{
+ struct stat file_data;
+ int result;
+
+ result = stat(socket_path, &file_data);
+
+ if (result == -1) {
+ if (errno == ENOENT) {
+ return 0;
+ }
+
+ flb_errno();
+
+ return -1;
+ }
+
+ if (S_ISSOCK(file_data.st_mode) == 0) {
+ return -2;
+ }
+
+ result = unlink(socket_path);
+
+ if (result != 0) {
+ return -3;
+ }
+
+ return 0;
+}
+
+/* Initialize plugin */
+static int in_unix_socket_init(struct flb_input_instance *in,
+ struct flb_config *config, void *data)
+{
+ struct flb_connection *connection;
+ int mode;
+ struct flb_in_unix_socket_config *ctx;
+ int ret;
+ struct flb_tls *tls;
+
+ (void) data;
+
+ ctx = unix_socket_config_init(in);
+
+ if (ctx == NULL) {
+ return -1;
+ }
+
+ ctx->collector_id = -1;
+ ctx->ins = in;
+
+ mk_list_init(&ctx->connections);
+
+ /* Set the context */
+ flb_input_set_context(in, ctx);
+
+ ret = remove_existing_socket_file(ctx->listen);
+
+ if (ret != 0) {
+ if (ret == -2) {
+ flb_plg_error(ctx->ins,
+ "%s exists and it is not a unix socket. Aborting",
+ ctx->listen);
+ }
+ else {
+ flb_plg_error(ctx->ins,
+ "could not remove existing unix socket %s. Aborting",
+ ctx->listen);
+ }
+
+ unix_socket_config_destroy(ctx);
+
+ return -1;
+ }
+
+ mode = FLB_TRANSPORT_UNIX_STREAM;
+
+ if (ctx->socket_mode != NULL &&
+ strcasecmp(ctx->socket_mode, "DGRAM") == 0) {
+ mode = FLB_TRANSPORT_UNIX_DGRAM;
+ ctx->dgram_mode_flag = FLB_TRUE;
+ tls = NULL;
+ }
+ else {
+ tls = in->tls;
+ }
+
+ ctx->downstream = flb_downstream_create(mode,
+ in->flags,
+ ctx->listen,
+ 0,
+ tls,
+ config,
+ &in->net_setup);
+
+ if (ctx->downstream == NULL) {
+ flb_plg_error(ctx->ins,
+ "could not initialize downstream on unix://%s. Aborting",
+ ctx->listen);
+
+ unix_socket_config_destroy(ctx);
+
+ return -1;
+ }
+
+ flb_input_downstream_set(ctx->downstream, ctx->ins);
+
+ if (ctx->socket_permissions != NULL) {
+ ret = chmod(ctx->listen, ctx->socket_acl);
+
+ if (ret != 0) {
+ flb_errno();
+
+ flb_plg_error(ctx->ins, "cannot set permission on '%s' to %04o",
+ ctx->listen, ctx->socket_acl);
+
+ unix_socket_config_destroy(ctx);
+
+ return -1;
+ }
+ }
+
+ if (ctx->dgram_mode_flag) {
+ connection = flb_downstream_conn_get(ctx->downstream);
+
+ if (connection == NULL) {
+ flb_plg_error(ctx->ins, "could not get DGRAM server dummy "
+ "connection");
+
+ unix_socket_config_destroy(ctx);
+
+ return -1;
+ }
+
+ ctx->dummy_conn = unix_socket_conn_add(connection, ctx);
+
+ if (ctx->dummy_conn == NULL) {
+ flb_plg_error(ctx->ins, "could not track DGRAM server dummy "
+ "connection");
+
+ unix_socket_config_destroy(ctx);
+
+ return -1;
+ }
+ }
+
+ /* Collect upon data available on the standard input */
+ ret = flb_input_set_collector_socket(in,
+ in_unix_socket_collect,
+ ctx->downstream->server_fd,
+ config);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins,
+ "Could not set collector for IN_UNIX_SOCKET "
+ "input plugin");
+
+ unix_socket_config_destroy(ctx);
+
+ return -1;
+ }
+
+ ctx->collector_id = ret;
+ ctx->collector_event = flb_input_collector_get_event(ret, in);
+
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "Could not get collector event");
+
+ unix_socket_config_destroy(ctx);
+
+ return -1;
+ }
+
+ return 0;
+}
+
+static int in_unix_socket_exit(void *data, struct flb_config *config)
+{
+ struct mk_list *head;
+ struct unix_socket_conn *conn;
+ struct flb_in_unix_socket_config *ctx;
+ struct mk_list *tmp;
+
+ (void) *config;
+
+ ctx = data;
+
+ mk_list_foreach_safe(head, tmp, &ctx->connections) {
+ conn = mk_list_entry(head, struct unix_socket_conn, _head);
+
+ unix_socket_conn_del(conn);
+ }
+
+ unix_socket_config_destroy(ctx);
+
+ return 0;
+}
+
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_STR, "socket_mode", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_in_unix_socket_config, socket_mode),
+ "Unix socket mode : STREAM or DGRAM"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "socket_path", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_in_unix_socket_config, listen),
+ "Unix socket path"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "socket_permissions", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_in_unix_socket_config, socket_permissions),
+ "Set the permissions for the UNIX socket"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "format", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_in_unix_socket_config, format_name),
+ "Set the format: json or none"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "separator", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_in_unix_socket_config, raw_separator),
+ "Set separator"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "chunk_size", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_in_unix_socket_config, chunk_size_str),
+ "Set the chunk size"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "buffer_size", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_in_unix_socket_config, buffer_size_str),
+ "Set the buffer size"
+ },
+ /* EOF */
+ {0}
+};
+
+/* Plugin reference */
+struct flb_input_plugin in_unix_socket_plugin = {
+ .name = "unix_socket",
+ .description = "UNIX_SOCKET",
+ .cb_init = in_unix_socket_init,
+ .cb_pre_run = NULL,
+ .cb_collect = in_unix_socket_collect,
+ .cb_flush_buf = NULL,
+ .cb_exit = in_unix_socket_exit,
+ .config_map = config_map,
+ .flags = FLB_INPUT_NET_SERVER | FLB_IO_OPT_TLS
+};
diff --git a/src/fluent-bit/plugins/in_unix_socket/unix_socket.h b/src/fluent-bit/plugins/in_unix_socket/unix_socket.h
new file mode 100644
index 000000000..08642f4fa
--- /dev/null
+++ b/src/fluent-bit/plugins/in_unix_socket/unix_socket.h
@@ -0,0 +1,55 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_UNIX_SOCKET_H
+#define FLB_IN_UNIX_SOCKET_H
+
+#define FLB_UNIX_SOCKET_FMT_JSON 0 /* default */
+#define FLB_UNIX_SOCKET_FMT_NONE 1 /* no format, use delimiters */
+
+#include <fluent-bit/flb_downstream.h>
+#include <fluent-bit/flb_input.h>
+#include <fluent-bit/flb_sds.h>
+#include <fluent-bit/flb_log_event_encoder.h>
+#include <msgpack.h>
+
+struct flb_in_unix_socket_config {
+ int dgram_mode_flag; /* Stateless mode flag (UDP alike) */
+ struct mk_event *collector_event;
+ flb_sds_t format_name; /* Data format name */
+ int format; /* Data format */
+ size_t buffer_size; /* Buffer size for each reader */
+ flb_sds_t buffer_size_str; /* Buffer size in string form */
+ size_t chunk_size; /* Chunk allocation size */
+ flb_sds_t chunk_size_str; /* Chunk size in string form */
+ char *listen; /* Unix socket path */
+ char *socket_permissions; /* Unix socket ACL as string */
+ flb_sds_t socket_mode; /* Unix socket mode (STREAM or DGRAM) */
+ int socket_acl; /* Unix socket ACL */
+ flb_sds_t raw_separator; /* Unescaped string delimiterr */
+ flb_sds_t separator; /* String delimiter */
+ int collector_id; /* Listener collector id */
+ struct flb_downstream *downstream; /* Client manager */
+ struct unix_socket_conn *dummy_conn;/* Datagram dummy connection */
+ struct mk_list connections; /* List of active connections */
+ struct flb_input_instance *ins; /* Input plugin instace */
+ struct flb_log_event_encoder *log_encoder;
+};
+
+#endif
diff --git a/src/fluent-bit/plugins/in_unix_socket/unix_socket_config.c b/src/fluent-bit/plugins/in_unix_socket/unix_socket_config.c
new file mode 100644
index 000000000..e14d8aa1f
--- /dev/null
+++ b/src/fluent-bit/plugins/in_unix_socket/unix_socket_config.c
@@ -0,0 +1,153 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_unescape.h>
+
+#include "unix_socket.h"
+#include "unix_socket_conn.h"
+#include "unix_socket_config.h"
+
+#include <stdlib.h>
+
+struct flb_in_unix_socket_config *unix_socket_config_init(struct flb_input_instance *ins)
+{
+ int ret;
+ int len;
+ char *out;
+ struct flb_in_unix_socket_config *ctx;
+
+ /* Allocate plugin context */
+ ctx = flb_calloc(1, sizeof(struct flb_in_unix_socket_config));
+ if (!ctx) {
+ flb_errno();
+ return NULL;
+ }
+ ctx->ins = ins;
+ ctx->format = FLB_UNIX_SOCKET_FMT_JSON;
+
+ /* Load the config map */
+ ret = flb_input_config_map_set(ins, (void *)ctx);
+ if (ret == -1) {
+ flb_plg_error(ins, "unable to load configuration");
+ flb_free(ctx);
+ return NULL;
+ }
+
+ if (ctx->socket_permissions != NULL) {
+ ctx->socket_acl = strtol(ctx->socket_permissions, NULL, 8);
+ ctx->socket_acl &= 07777;
+ }
+
+
+ /* Data format (expected payload) */
+ if (ctx->format_name) {
+ if (strcasecmp(ctx->format_name, "json") == 0) {
+ ctx->format = FLB_UNIX_SOCKET_FMT_JSON;
+ }
+ else if (strcasecmp(ctx->format_name, "none") == 0) {
+ ctx->format = FLB_UNIX_SOCKET_FMT_NONE;
+ }
+ else {
+ flb_plg_error(ctx->ins, "unrecognized format value '%s'", ctx->format_name);
+ flb_free(ctx);
+ return NULL;
+ }
+ }
+
+ /* String separator used to split records when using 'format none' */
+ if (ctx->raw_separator) {
+ len = strlen(ctx->raw_separator);
+ out = flb_malloc(len + 1);
+ if (!out) {
+ flb_errno();
+ flb_free(ctx);
+ return NULL;
+ }
+ ret = flb_unescape_string(ctx->raw_separator, len, &out);
+ if (ret <= 0) {
+ flb_plg_error(ctx->ins, "invalid separator");
+ flb_free(out);
+ flb_free(ctx);
+ return NULL;
+ }
+
+ ctx->separator = flb_sds_create_len(out, ret);
+ if (!ctx->separator) {
+ flb_free(out);
+ flb_free(ctx);
+ return NULL;
+ }
+ flb_free(out);
+ }
+ if (!ctx->separator) {
+ ctx->separator = flb_sds_create_len("\n", 1);
+ }
+
+ /* Chunk size */
+ if (ctx->chunk_size_str) {
+ /* Convert KB unit to Bytes */
+ ctx->chunk_size = (atoi(ctx->chunk_size_str) * 1024);
+ } else {
+ ctx->chunk_size = atoi(FLB_IN_UNIX_SOCKET_CHUNK);
+ }
+
+ /* Buffer size */
+ if (!ctx->buffer_size_str) {
+ ctx->buffer_size = ctx->chunk_size;
+ }
+ else {
+ /* Convert KB unit to Bytes */
+ ctx->buffer_size = (atoi(ctx->buffer_size_str) * 1024);
+ }
+
+ ctx->log_encoder = flb_log_event_encoder_create(FLB_LOG_EVENT_FORMAT_DEFAULT);
+
+ if (ctx->log_encoder == NULL) {
+ flb_plg_error(ctx->ins, "could not initialize event encoder");
+ unix_socket_config_destroy(ctx);
+
+ ctx = NULL;
+ }
+
+ return ctx;
+}
+
+int unix_socket_config_destroy(struct flb_in_unix_socket_config *ctx)
+{
+ if (ctx->log_encoder != NULL) {
+ flb_log_event_encoder_destroy(ctx->log_encoder);
+ }
+
+ if (ctx->collector_id != -1) {
+ flb_input_collector_delete(ctx->collector_id, ctx->ins);
+
+ ctx->collector_id = -1;
+ }
+
+ if (ctx->downstream != NULL) {
+ flb_downstream_destroy(ctx->downstream);
+ }
+
+ flb_sds_destroy(ctx->separator);
+ flb_free(ctx);
+
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/in_unix_socket/unix_socket_config.h b/src/fluent-bit/plugins/in_unix_socket/unix_socket_config.h
new file mode 100644
index 000000000..93e07075f
--- /dev/null
+++ b/src/fluent-bit/plugins/in_unix_socket/unix_socket_config.h
@@ -0,0 +1,28 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_UNIX_SOCKET_CONFIG_H
+#define FLB_IN_UNIX_SOCKET_CONFIG_H
+
+#include "unix_socket.h"
+
+struct flb_in_unix_socket_config *unix_socket_config_init(struct flb_input_instance *i_ins);
+int unix_socket_config_destroy(struct flb_in_unix_socket_config *config);
+
+#endif
diff --git a/src/fluent-bit/plugins/in_unix_socket/unix_socket_conn.c b/src/fluent-bit/plugins/in_unix_socket/unix_socket_conn.c
new file mode 100644
index 000000000..472cbbeb2
--- /dev/null
+++ b/src/fluent-bit/plugins/in_unix_socket/unix_socket_conn.c
@@ -0,0 +1,433 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_engine.h>
+#include <fluent-bit/flb_network.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_error.h>
+
+#include "unix_socket.h"
+#include "unix_socket_conn.h"
+
+static inline void consume_bytes(char *buf, int bytes, int length)
+{
+ memmove(buf, buf + bytes, length - bytes);
+}
+
+static inline int process_pack(struct unix_socket_conn *conn,
+ char *pack, size_t size)
+{
+ int ret;
+ size_t off = 0;
+ msgpack_unpacked result;
+ msgpack_object entry;
+ struct flb_in_unix_socket_config *ctx;
+
+ ctx = (struct flb_in_unix_socket_config *) conn->ctx;
+
+ flb_log_event_encoder_reset(ctx->log_encoder);
+
+ /* First pack the results, iterate concatenated messages */
+ msgpack_unpacked_init(&result);
+ while (msgpack_unpack_next(&result, pack, size, &off) == MSGPACK_UNPACK_SUCCESS) {
+ entry = result.data;
+
+ entry = result.data;
+
+ ret = flb_log_event_encoder_begin_record(ctx->log_encoder);
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_current_timestamp(ctx->log_encoder);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ if (entry.type == MSGPACK_OBJECT_MAP) {
+ ret = flb_log_event_encoder_set_body_from_msgpack_object(
+ ctx->log_encoder, &entry);
+ }
+ else if (entry.type == MSGPACK_OBJECT_ARRAY) {
+ ret = flb_log_event_encoder_append_body_values(
+ ctx->log_encoder,
+ FLB_LOG_EVENT_CSTRING_VALUE("msg"),
+ FLB_LOG_EVENT_MSGPACK_OBJECT_VALUE(&entry));
+ }
+ else {
+ ret = FLB_EVENT_ENCODER_ERROR_INVALID_VALUE_TYPE;
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_commit_record(ctx->log_encoder);
+ }
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ break;
+ }
+ }
+ }
+
+ msgpack_unpacked_destroy(&result);
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ flb_input_log_append(conn->ins, NULL, 0,
+ ctx->log_encoder->output_buffer,
+ ctx->log_encoder->output_length);
+ ret = 0;
+ }
+ else {
+ flb_plg_error(ctx->ins, "log event encoding error : %d", ret);
+
+ ret = -1;
+ }
+
+ return ret;
+}
+
+/* Process a JSON payload, return the number of processed bytes */
+static ssize_t parse_payload_json(struct unix_socket_conn *conn)
+{
+ int ret;
+ int out_size;
+ char *pack;
+
+ ret = flb_pack_json_state(conn->buf_data, conn->buf_len,
+ &pack, &out_size, &conn->pack_state);
+ if (ret == FLB_ERR_JSON_PART) {
+ flb_plg_debug(conn->ins, "JSON incomplete, waiting for more data...");
+ return 0;
+ }
+ else if (ret == FLB_ERR_JSON_INVAL) {
+ flb_plg_warn(conn->ins, "invalid JSON message, skipping");
+ conn->buf_len = 0;
+ conn->pack_state.multiple = FLB_TRUE;
+ return -1;
+ }
+ else if (ret == -1) {
+ return -1;
+ }
+
+ /* Process the packaged JSON and return the last byte used */
+ process_pack(conn, pack, out_size);
+ flb_free(pack);
+
+ return conn->pack_state.last_byte;
+}
+
+/*
+ * Process a raw text payload, uses the delimited character to split records,
+ * return the number of processed bytes
+ */
+static ssize_t parse_payload_none(struct unix_socket_conn *conn)
+{
+ int ret;
+ int len;
+ int sep_len;
+ size_t consumed = 0;
+ char *buf;
+ char *s;
+ char *separator;
+ struct flb_in_unix_socket_config *ctx;
+
+ ctx = (struct flb_in_unix_socket_config *) conn->ctx;
+
+ separator = conn->ctx->separator;
+ sep_len = flb_sds_len(conn->ctx->separator);
+
+ buf = conn->buf_data;
+
+ flb_log_event_encoder_reset(ctx->log_encoder);
+
+ while ((s = strstr(buf, separator))) {
+ len = (s - buf);
+ if (len == 0) {
+ break;
+ }
+ else if (len > 0) {
+ ret = flb_log_event_encoder_begin_record(ctx->log_encoder);
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_current_timestamp(ctx->log_encoder);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_append_body_values(
+ ctx->log_encoder,
+ FLB_LOG_EVENT_CSTRING_VALUE("log"),
+ FLB_LOG_EVENT_STRING_VALUE(buf, len));
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_commit_record(ctx->log_encoder);
+ }
+
+ if (ret != FLB_EVENT_ENCODER_SUCCESS) {
+ break;
+ }
+
+ consumed += len + 1;
+ buf += len + sep_len;
+ }
+ else {
+ break;
+ }
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ flb_input_log_append(conn->ins, NULL, 0,
+ ctx->log_encoder->output_buffer,
+ ctx->log_encoder->output_length);
+ }
+ else {
+ flb_plg_error(ctx->ins, "log event encoding error : %d", ret);
+ }
+
+ return consumed;
+}
+
+/* Callback invoked every time an event is triggered for a connection */
+int unix_socket_conn_event(void *data)
+{
+ int bytes;
+ int available;
+ int size;
+ ssize_t ret_payload = -1;
+ char *tmp;
+ struct mk_event *event;
+ struct unix_socket_conn *conn;
+ struct flb_connection *connection;
+ struct flb_in_unix_socket_config *ctx;
+
+ connection = (struct flb_connection *) data;
+
+ conn = connection->user_data;
+
+ ctx = conn->ctx;
+
+ if (ctx->dgram_mode_flag) {
+ if (ctx->format == FLB_UNIX_SOCKET_FMT_JSON &&
+ conn->buf_len > 0) {
+ flb_pack_state_reset(&conn->pack_state);
+ flb_pack_state_init(&conn->pack_state);
+
+ conn->pack_state.multiple = FLB_TRUE;
+ }
+
+ event = ctx->collector_event;
+ }
+ else {
+ event = &connection->event;
+ }
+
+ if (event->mask & MK_EVENT_READ) {
+ available = (conn->buf_size - conn->buf_len) - 1;
+ if (available < 1) {
+ if (conn->buf_size + ctx->chunk_size > ctx->buffer_size) {
+ flb_plg_trace(ctx->ins,
+ "fd=%i incoming data exceed limit (%zu KB)",
+ event->fd, (ctx->buffer_size / 1024));
+
+ if (!ctx->dgram_mode_flag) {
+ unix_socket_conn_del(conn);
+ }
+
+ return -1;
+ }
+
+ size = conn->buf_size + ctx->chunk_size;
+ tmp = flb_realloc(conn->buf_data, size);
+ if (!tmp) {
+ flb_errno();
+ return -1;
+ }
+ flb_plg_trace(ctx->ins, "fd=%i buffer realloc %i -> %i",
+ event->fd, conn->buf_size, size);
+
+ conn->buf_data = tmp;
+ conn->buf_size = size;
+ available = (conn->buf_size - conn->buf_len) - 1;
+ }
+
+ /* Read data */
+ bytes = flb_io_net_read(connection,
+ (void *) &conn->buf_data[conn->buf_len],
+ available);
+
+ if (bytes <= 0) {
+ if (!ctx->dgram_mode_flag) {
+ flb_plg_trace(ctx->ins, "fd=%i closed connection", event->fd);
+ unix_socket_conn_del(conn);
+ }
+
+ return -1;
+ }
+
+ flb_plg_trace(ctx->ins, "read()=%i pre_len=%i now_len=%i",
+ bytes, conn->buf_len, conn->buf_len + bytes);
+ conn->buf_len += bytes;
+ conn->buf_data[conn->buf_len] = '\0';
+
+ /* Strip CR or LF if found at first byte */
+ if (conn->buf_data[0] == '\r' || conn->buf_data[0] == '\n') {
+ /* Skip message with one byte with CR or LF */
+ flb_plg_trace(ctx->ins, "skip one byte message with ASCII code=%i",
+ conn->buf_data[0]);
+ consume_bytes(conn->buf_data, 1, conn->buf_len);
+ conn->buf_len--;
+ conn->buf_data[conn->buf_len] = '\0';
+ }
+
+ /* JSON Format handler */
+ if (ctx->format == FLB_UNIX_SOCKET_FMT_JSON) {
+ ret_payload = parse_payload_json(conn);
+ if (ret_payload == 0) {
+ /* Incomplete JSON message, we need more data */
+ return -1;
+ }
+ else if (ret_payload == -1) {
+ flb_pack_state_reset(&conn->pack_state);
+ flb_pack_state_init(&conn->pack_state);
+ conn->pack_state.multiple = FLB_TRUE;
+ return -1;
+ }
+ }
+ else if (ctx->format == FLB_UNIX_SOCKET_FMT_NONE) {
+ ret_payload = parse_payload_none(conn);
+ if (ret_payload == 0) {
+ return -1;
+ }
+ else if (ret_payload == -1) {
+ conn->buf_len = 0;
+ return -1;
+ }
+ }
+
+
+ consume_bytes(conn->buf_data, ret_payload, conn->buf_len);
+ conn->buf_len -= ret_payload;
+ conn->buf_data[conn->buf_len] = '\0';
+
+ if (ctx->format == FLB_UNIX_SOCKET_FMT_JSON) {
+ jsmn_init(&conn->pack_state.parser);
+ conn->pack_state.tokens_count = 0;
+ conn->pack_state.last_byte = 0;
+ conn->pack_state.buf_len = 0;
+ }
+
+ return bytes;
+ }
+
+ if (event->mask & MK_EVENT_CLOSE) {
+ flb_plg_trace(ctx->ins, "fd=%i hangup", event->fd);
+ unix_socket_conn_del(conn);
+ return -1;
+ }
+
+ return 0;
+}
+
+/* Create a new mqtt request instance */
+struct unix_socket_conn *unix_socket_conn_add(struct flb_connection *connection,
+ struct flb_in_unix_socket_config *ctx)
+{
+ struct unix_socket_conn *conn;
+ int ret;
+
+ conn = flb_malloc(sizeof(struct unix_socket_conn));
+ if (!conn) {
+ flb_errno();
+ return NULL;
+ }
+
+ conn->connection = connection;
+
+ /* Set data for the event-loop */
+ MK_EVENT_NEW(&connection->event);
+
+ connection->user_data = conn;
+ connection->event.type = FLB_ENGINE_EV_CUSTOM;
+ connection->event.handler = unix_socket_conn_event;
+
+ /* Connection info */
+ conn->ctx = ctx;
+ conn->buf_len = 0;
+ conn->rest = 0;
+ conn->status = UNIX_SOCKET_NEW;
+
+ conn->buf_data = flb_malloc(ctx->chunk_size);
+ if (!conn->buf_data) {
+ flb_errno();
+
+ flb_plg_error(ctx->ins, "could not allocate new connection");
+ flb_free(conn);
+
+ return NULL;
+ }
+ conn->buf_size = ctx->chunk_size;
+ conn->ins = ctx->ins;
+
+ /* Initialize JSON parser */
+ if (ctx->format == FLB_UNIX_SOCKET_FMT_JSON) {
+ flb_pack_state_init(&conn->pack_state);
+ conn->pack_state.multiple = FLB_TRUE;
+ }
+
+ /* Register instance into the event loop */
+ ret = mk_event_add(flb_engine_evl_get(),
+ connection->fd,
+ FLB_ENGINE_EV_CUSTOM,
+ MK_EVENT_READ,
+ &connection->event);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "could not register new connection");
+
+ flb_free(conn->buf_data);
+ flb_free(conn);
+
+ return NULL;
+ }
+
+ mk_list_add(&conn->_head, &ctx->connections);
+
+ return conn;
+}
+
+int unix_socket_conn_del(struct unix_socket_conn *conn)
+{
+ struct flb_in_unix_socket_config *ctx;
+
+ ctx = conn->ctx;
+
+ if (ctx->format == FLB_UNIX_SOCKET_FMT_JSON) {
+ flb_pack_state_reset(&conn->pack_state);
+ }
+
+ /* The downstream unregisters the file descriptor from the event-loop
+ * so there's nothing to be done by the plugin
+ */
+ flb_downstream_conn_release(conn->connection);
+
+ /* Release resources */
+ mk_list_del(&conn->_head);
+
+ flb_free(conn->buf_data);
+ flb_free(conn);
+
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/in_unix_socket/unix_socket_conn.h b/src/fluent-bit/plugins/in_unix_socket/unix_socket_conn.h
new file mode 100644
index 000000000..d46694b46
--- /dev/null
+++ b/src/fluent-bit/plugins/in_unix_socket/unix_socket_conn.h
@@ -0,0 +1,60 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_IN_UNIX_SOCKET_CONN_H
+#define FLB_IN_UNIX_SOCKET_CONN_H
+
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_connection.h>
+
+#define FLB_IN_UNIX_SOCKET_CHUNK "32768"
+
+enum {
+ UNIX_SOCKET_NEW = 1, /* it's a new connection */
+ UNIX_SOCKET_CONNECTED = 2, /* MQTT connection per protocol spec OK */
+};
+
+struct unix_socket_conn_stream {
+ char *tag;
+ size_t tag_len;
+};
+
+/* Respresents a connection */
+struct unix_socket_conn {
+ int status; /* Connection status */
+
+ /* Buffer */
+ char *buf_data; /* Buffer data */
+ int buf_len; /* Data length */
+ int buf_size; /* Buffer size */
+ size_t rest; /* Unpacking offset */
+
+ struct flb_input_instance *ins; /* Parent plugin instance */
+ struct flb_in_unix_socket_config *ctx; /* Plugin configuration context */
+ struct flb_pack_state pack_state; /* Internal JSON parser */
+ struct flb_connection *connection;
+
+ struct mk_list _head;
+};
+
+struct unix_socket_conn *unix_socket_conn_add(struct flb_connection *connection, struct flb_in_unix_socket_config *ctx);
+int unix_socket_conn_del(struct unix_socket_conn *conn);
+int unix_socket_conn_event(void *data);
+
+#endif
diff --git a/src/fluent-bit/plugins/in_windows_exporter_metrics/CMakeLists.txt b/src/fluent-bit/plugins/in_windows_exporter_metrics/CMakeLists.txt
new file mode 100644
index 000000000..8cc7fe70a
--- /dev/null
+++ b/src/fluent-bit/plugins/in_windows_exporter_metrics/CMakeLists.txt
@@ -0,0 +1,28 @@
+set(src
+ we_config.c
+ we.c
+ we_cpu.c
+ we_os.c
+ we_net.c
+ we_logical_disk.c
+ we_cs.c
+ we_wmi.c
+ we_util.c
+ we_metric.c
+ we_perflib.c
+ we_wmi_thermalzone.c
+ we_wmi_cpu_info.c
+ we_wmi_logon.c
+ we_wmi_system.c
+ we_wmi_service.c
+ we_wmi_memory.c
+ we_wmi_paging_file.c
+ we_wmi_process.c
+ )
+
+set(libs
+ wbemuuid
+ netapi32
+)
+
+FLB_PLUGIN(in_windows_exporter_metrics "${src}" "${libs}")
diff --git a/src/fluent-bit/plugins/in_windows_exporter_metrics/we.c b/src/fluent-bit/plugins/in_windows_exporter_metrics/we.c
new file mode 100644
index 000000000..0f99fb83f
--- /dev/null
+++ b/src/fluent-bit/plugins/in_windows_exporter_metrics/we.c
@@ -0,0 +1,1144 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2019-2021 The Fluent Bit Authors
+ * Copyright (C) 2015-2018 Treasure Data Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_config_map.h>
+#include <fluent-bit/flb_error.h>
+#include <fluent-bit/flb_pack.h>
+
+#include "we.h"
+#include "we_wmi.h"
+#include "we_config.h"
+
+/* collectors */
+#include "we_cpu.h"
+#include "we_os.h"
+#include "we_net.h"
+#include "we_logical_disk.h"
+#include "we_cs.h"
+
+/* wmi collectors */
+#include "we_wmi_cpu_info.h"
+#include "we_wmi_logon.h"
+#include "we_wmi_system.h"
+#include "we_wmi_thermalzone.h"
+#include "we_wmi_service.h"
+#include "we_wmi_memory.h"
+#include "we_wmi_paging_file.h"
+#include "we_wmi_process.h"
+
+static int we_timer_cpu_metrics_cb(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ struct flb_ne *ctx = in_context;
+
+ we_cpu_update(ctx);
+
+ return 0;
+}
+
+static int we_timer_os_metrics_cb(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ struct flb_ne *ctx = in_context;
+
+ we_os_update(ctx);
+
+ return 0;
+}
+
+static int we_timer_net_metrics_cb(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ struct flb_ne *ctx = in_context;
+
+ we_net_update(ctx);
+
+ return 0;
+}
+
+static int we_timer_logical_disk_metrics_cb(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ struct flb_ne *ctx = in_context;
+
+ we_logical_disk_update(ctx);
+
+ return 0;
+}
+
+static int we_timer_cs_metrics_cb(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ struct flb_ne *ctx = in_context;
+
+ we_cs_update(ctx);
+
+ return 0;
+}
+
+static int we_timer_wmi_thermalzone_metrics_cb(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ struct flb_ne *ctx = in_context;
+
+ we_wmi_thermalzone_update(ctx);
+
+ return 0;
+}
+
+static int we_timer_wmi_cpu_info_metrics_cb(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ struct flb_ne *ctx = in_context;
+
+ we_wmi_cpu_info_update(ctx);
+
+ return 0;
+}
+
+static int we_timer_wmi_logon_metrics_cb(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ struct flb_ne *ctx = in_context;
+
+ we_wmi_logon_update(ctx);
+
+ return 0;
+}
+
+static int we_timer_wmi_system_metrics_cb(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ struct flb_ne *ctx = in_context;
+
+ we_wmi_system_update(ctx);
+
+ return 0;
+}
+
+static int we_timer_wmi_service_metrics_cb(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ struct flb_ne *ctx = in_context;
+
+ we_wmi_service_update(ctx);
+
+ return 0;
+}
+
+static int we_timer_wmi_memory_metrics_cb(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ struct flb_ne *ctx = in_context;
+
+ we_wmi_memory_update(ctx);
+
+ return 0;
+}
+
+static int we_timer_wmi_paging_file_metrics_cb(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ struct flb_ne *ctx = in_context;
+
+ we_wmi_paging_file_update(ctx);
+
+ return 0;
+}
+
+static int we_timer_wmi_process_metrics_cb(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ struct flb_ne *ctx = in_context;
+
+ we_wmi_process_update(ctx);
+
+ return 0;
+}
+
+struct flb_we_callback {
+ char *name;
+ void (*func)(char *, void *, void *);
+};
+
+static int we_update_cb(struct flb_we *ctx, char *name);
+
+static void update_metrics(struct flb_input_instance *ins, struct flb_we *ctx)
+{
+ int ret;
+ struct mk_list *head;
+ struct flb_slist_entry *entry;
+
+ /* Update our metrics */
+ if (ctx->metrics) {
+ mk_list_foreach(head, ctx->metrics) {
+ entry = mk_list_entry(head, struct flb_slist_entry, _head);
+ ret = flb_callback_exists(ctx->callback, entry->str);
+ if (ret == FLB_TRUE) {
+ we_update_cb(ctx, entry->str);
+ }
+ else {
+ flb_plg_debug(ctx->ins, "Callback for metrics '%s' is not registered", entry->str);
+ }
+ }
+ }
+}
+
+/*
+ * Update the metrics, this function is invoked every time 'scrape_interval'
+ * expires.
+ */
+static int cb_we_collect(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ int ret;
+ struct flb_we *ctx;
+
+ ctx = in_context;
+
+ update_metrics(ins, ctx);
+
+ /* Append the updated metrics */
+ ret = flb_input_metrics_append(ins, NULL, 0, ctx->cmt);
+
+ if (ret) {
+ flb_plg_error(ins, "could not append metrics");
+ }
+
+ return 0;
+}
+
+static void we_cpu_update_cb(char *name, void *p1, void *p2)
+{
+ struct flb_we *ctx = p1;
+
+ we_cpu_update(ctx);
+}
+
+static void we_os_update_cb(char *name, void *p1, void *p2)
+{
+ struct flb_we *ctx = p1;
+
+ we_os_update(ctx);
+}
+
+static void we_net_update_cb(char *name, void *p1, void *p2)
+{
+ struct flb_we *ctx = p1;
+
+ we_net_update(ctx);
+}
+
+static void we_logical_disk_update_cb(char *name, void *p1, void *p2)
+{
+ struct flb_we *ctx = p1;
+
+ we_logical_disk_update(ctx);
+}
+
+static void we_cs_update_cb(char *name, void *p1, void *p2)
+{
+ struct flb_we *ctx = p1;
+
+ we_cs_update(ctx);
+}
+
+static void we_wmi_thermalzone_update_cb(char *name, void *p1, void *p2)
+{
+ struct flb_we *ctx = p1;
+
+ we_wmi_thermalzone_update(ctx);
+}
+
+static void we_wmi_cpu_info_update_cb(char *name, void *p1, void *p2)
+{
+ struct flb_we *ctx = p1;
+
+ we_wmi_cpu_info_update(ctx);
+}
+
+static void we_wmi_logon_update_cb(char *name, void *p1, void *p2)
+{
+ struct flb_we *ctx = p1;
+
+ we_wmi_logon_update(ctx);
+}
+
+static void we_wmi_system_update_cb(char *name, void *p1, void *p2)
+{
+ struct flb_we *ctx = p1;
+
+ we_wmi_system_update(ctx);
+}
+
+static void we_wmi_service_update_cb(char *name, void *p1, void *p2)
+{
+ struct flb_we *ctx = p1;
+
+ we_wmi_service_update(ctx);
+}
+
+static void we_wmi_memory_update_cb(char *name, void *p1, void *p2)
+{
+ struct flb_we *ctx = p1;
+
+ we_wmi_memory_update(ctx);
+}
+
+static void we_wmi_paging_file_update_cb(char *name, void *p1, void *p2)
+{
+ struct flb_we *ctx = p1;
+
+ we_wmi_paging_file_update(ctx);
+}
+
+static void we_wmi_process_update_cb(char *name, void *p1, void *p2)
+{
+ struct flb_we *ctx = p1;
+
+ we_wmi_process_update(ctx);
+}
+
+static int we_update_cb(struct flb_we *ctx, char *name)
+{
+ int ret;
+
+ ret = flb_callback_do(ctx->callback, name, ctx, NULL);
+ return ret;
+}
+
+/*
+ * Callbacks Table
+ */
+struct flb_we_callback ne_callbacks[] = {
+ /* metrics */
+ { "cpu_info", we_wmi_cpu_info_update_cb },
+ { "cpu", we_cpu_update_cb },
+ { "os", we_os_update_cb },
+ { "net", we_net_update_cb },
+ { "logical_disk", we_logical_disk_update_cb },
+ { "cs", we_cs_update_cb },
+ { "thermalzone", we_wmi_thermalzone_update_cb },
+ { "logon", we_wmi_logon_update_cb },
+ { "system", we_wmi_system_update_cb },
+ { "service", we_wmi_service_update_cb },
+ { "memory", we_wmi_memory_update_cb },
+ { "paging_file", we_wmi_paging_file_update_cb },
+ { "process", we_wmi_process_update_cb },
+ { 0 }
+};
+
+static int in_we_init(struct flb_input_instance *in,
+ struct flb_config *config, void *data)
+{
+ int ret;
+ int metric_idx = -1;
+ struct flb_we *ctx;
+ double windows_version = 0.0;
+ struct mk_list *head;
+ struct flb_slist_entry *entry;
+ struct flb_we_callback *cb;
+
+ /* Create plugin context */
+ ctx = flb_we_config_create(in, config);
+
+ if (ctx == NULL) {
+ flb_errno();
+
+ return -1;
+ }
+
+ /* Initialize fds */
+ ctx->coll_cpu_fd = -1;
+ ctx->coll_net_fd = -1;
+ ctx->coll_logical_disk_fd = -1;
+ ctx->coll_cs_fd = -1;
+ ctx->coll_os_fd = -1;
+ ctx->coll_wmi_thermalzone_fd = -1;
+ ctx->coll_wmi_cpu_info_fd = -1;
+ ctx->coll_wmi_logon_fd = -1;
+ ctx->coll_wmi_system_fd = -1;
+ ctx->coll_wmi_service_fd = -1;
+ ctx->coll_wmi_memory_fd = -1;
+ ctx->coll_wmi_paging_file_fd = -1;
+ ctx->coll_wmi_process_fd = -1;
+
+ ctx->callback = flb_callback_create(in->name);
+ if (!ctx->callback) {
+ flb_plg_error(ctx->ins, "Create callback failed");
+ return -1;
+ }
+
+ /* Associate context with the instance */
+ flb_input_set_context(in, ctx);
+
+ ret = we_get_windows_version(&windows_version);
+
+ if (ret == FLB_FALSE) {
+ flb_plg_error(in, "could not get windows version");
+
+ return -1;
+ }
+ ctx->windows_version = windows_version;
+
+ ret = we_perflib_init(ctx);
+
+ if (ret) {
+ flb_plg_error(in, "could not initialize PERFLIB");
+ return -1;
+ }
+
+ ret = we_wmi_init(ctx);
+
+ if (ret) {
+ flb_plg_error(in, "could not initialize WMI");
+
+ return -1;
+ }
+
+ /* Create the collector */
+ ret = flb_input_set_collector_time(in,
+ cb_we_collect,
+ ctx->scrape_interval, 0,
+ config);
+
+ if (ret == -1) {
+ flb_plg_error(ctx->ins,
+ "could not set collector for "
+ "Windows Exporter Metrics plugin");
+ return -1;
+ }
+
+ ctx->coll_fd = ret;
+
+ /* Check and initialize enabled metrics */
+ if (ctx->metrics) {
+ mk_list_foreach(head, ctx->metrics) {
+ entry = mk_list_entry(head, struct flb_slist_entry, _head);
+ ret = flb_callback_exists(ctx->callback, entry->str);
+
+ if (ret == FLB_FALSE) {
+ if (strncmp(entry->str, "cpu_info", 8) == 0) {
+ if (ctx->wmi_cpu_info_scrape_interval == 0) {
+ flb_plg_debug(ctx->ins, "enabled metrics %s", entry->str);
+ metric_idx = 0;
+ }
+ else {
+ /* Create the cpu_info collector */
+ ret = flb_input_set_collector_time(in,
+ we_timer_wmi_cpu_info_metrics_cb,
+ ctx->wmi_cpu_info_scrape_interval, 0,
+ config);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins,
+ "could not set cpu_info collector for Windows Exporter Metrics plugin");
+ return -1;
+ }
+ ctx->coll_wmi_cpu_info_fd = ret;
+ }
+
+ /* Initialize cpu info metric collectors */
+ ret = we_wmi_cpu_info_init(ctx);
+ if (ret == -1) {
+ return -1;
+ }
+ }
+ else if (strncmp(entry->str, "cpu", 3) == 0) {
+ if (ctx->cpu_scrape_interval == 0) {
+ flb_plg_debug(ctx->ins, "enabled metrics %s", entry->str);
+ metric_idx = 1;
+ }
+ else {
+ /* Create the cpu collector */
+ ret = flb_input_set_collector_time(in,
+ we_timer_cpu_metrics_cb,
+ ctx->cpu_scrape_interval, 0,
+ config);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins,
+ "could not set cpu collector for Windows Exporter Metrics plugin");
+ return -1;
+ }
+ ctx->coll_cpu_fd = ret;
+ }
+
+ /* Initialize cpu metric collectors */
+ ret = we_cpu_init(ctx);
+ if (ret < 0) {
+ return -1;
+ }
+ }
+ else if (strncmp(entry->str, "os", 2) == 0) {
+ if (ctx->os_scrape_interval == 0) {
+ flb_plg_debug(ctx->ins, "enabled metrics %s", entry->str);
+ metric_idx = 2;
+ } else {
+ /* Create the os collector */
+ ret = flb_input_set_collector_time(in,
+ we_timer_os_metrics_cb,
+ ctx->os_scrape_interval, 0,
+ config);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins,
+ "could not set os collector for Windows Exporter Metrics plugin");
+ return -1;
+ }
+ ctx->coll_os_fd = ret;
+ }
+
+ /* Initialize os metric collectors */
+ ret = we_os_init(ctx);
+ if (ret) {
+ return -1;
+ }
+ }
+ else if (strncmp(entry->str, "net", 3) == 0) {
+ if (ctx->net_scrape_interval == 0) {
+ flb_plg_debug(ctx->ins, "enabled metrics %s", entry->str);
+ metric_idx = 3;
+ }
+ else {
+ /* Create the net collector */
+ ret = flb_input_set_collector_time(in,
+ we_timer_net_metrics_cb,
+ ctx->net_scrape_interval, 0,
+ config);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins,
+ "could not set net collector for Windows Exporter Metrics plugin");
+ return -1;
+ }
+ ctx->coll_net_fd = ret;
+ }
+
+ /* Initialize net metric collectors */
+ ret = we_net_init(ctx);
+ if (ret) {
+ return -1;
+ }
+ }
+ else if (strncmp(entry->str, "logical_disk", 12) == 0) {
+ if (ctx->logical_disk_scrape_interval == 0) {
+ flb_plg_debug(ctx->ins, "enabled metrics %s", entry->str);
+ metric_idx = 4;
+ }
+ else {
+ /* Create the logical_disk collector */
+ ret = flb_input_set_collector_time(in,
+ we_timer_logical_disk_metrics_cb,
+ ctx->logical_disk_scrape_interval, 0,
+ config);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins,
+ "could not set logical_disk collector for Windows Exporter Metrics plugin");
+ return -1;
+ }
+ ctx->coll_logical_disk_fd = ret;
+ }
+
+ /* Initialize logical_disk metric collectors */
+ ret = we_logical_disk_init(ctx);
+ if (ret) {
+ return -1;
+ }
+ }
+ else if (strncmp(entry->str, "cs", 2) == 0) {
+ if (ctx->cs_scrape_interval == 0) {
+ flb_plg_debug(ctx->ins, "enabled metrics %s", entry->str);
+ metric_idx = 5;
+ }
+ else {
+ /* Create the logical_disk collector */
+ ret = flb_input_set_collector_time(in,
+ we_timer_cs_metrics_cb,
+ ctx->cs_scrape_interval, 0,
+ config);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins,
+ "could not set cs collector for Windows Exporter Metrics plugin");
+ return -1;
+ }
+ ctx->coll_cs_fd = ret;
+ }
+
+ /* Initialize cs metric collectors */
+ ret = we_cs_init(ctx);
+ if (ret) {
+ return -1;
+ }
+ }
+ else if (strncmp(entry->str, "thermalzone", 11) == 0) {
+ if (ctx->wmi_thermalzone_scrape_interval == 0) {
+ flb_plg_debug(ctx->ins, "enabled metrics %s", entry->str);
+ metric_idx = 6;
+ }
+ else {
+ /* Create the thermalzone collector */
+ ret = flb_input_set_collector_time(in,
+ we_timer_wmi_thermalzone_metrics_cb,
+ ctx->wmi_thermalzone_scrape_interval, 0,
+ config);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins,
+ "could not set thermalzone collector for Windows Exporter Metrics plugin");
+ return -1;
+ }
+ ctx->coll_wmi_thermalzone_fd = ret;
+ }
+
+ /* Initialize thermalzone metric collectors */
+ ret = we_wmi_thermalzone_init(ctx);
+ if (ret) {
+ return -1;
+ }
+ }
+ else if (strncmp(entry->str, "logon", 5) == 0) {
+ if (ctx->wmi_logon_scrape_interval == 0) {
+ flb_plg_debug(ctx->ins, "enabled metrics %s", entry->str);
+ metric_idx = 7;
+ }
+ else {
+ /* Create the logon collector */
+ ret = flb_input_set_collector_time(in,
+ we_timer_wmi_logon_metrics_cb,
+ ctx->wmi_logon_scrape_interval, 0,
+ config);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins,
+ "could not set thermalzone collector for Windows Exporter Metrics plugin");
+ return -1;
+ }
+ ctx->coll_wmi_logon_fd = ret;
+ }
+
+ /* Initialize logon metric collectors */
+ ret = we_wmi_logon_init(ctx);
+ if (ret) {
+ return -1;
+ }
+ }
+ else if (strncmp(entry->str, "system", 6) == 0) {
+ if (ctx->wmi_logon_scrape_interval == 0) {
+ flb_plg_debug(ctx->ins, "enabled metrics %s", entry->str);
+ metric_idx = 8;
+ }
+ else {
+ /* Create the logon collector */
+ ret = flb_input_set_collector_time(in,
+ we_timer_wmi_system_metrics_cb,
+ ctx->wmi_system_scrape_interval, 0,
+ config);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins,
+ "could not set system collector for Windows Exporter Metrics plugin");
+ return -1;
+ }
+ ctx->coll_wmi_system_fd = ret;
+ }
+
+ /* Initialize system metric collectors */
+ ret = we_wmi_system_init(ctx);
+ if (ret) {
+ return -1;
+ }
+ }
+ else if (strncmp(entry->str, "service", 7) == 0) {
+ if (ctx->wmi_service_scrape_interval == 0) {
+ flb_plg_debug(ctx->ins, "enabled metrics %s", entry->str);
+ metric_idx = 9;
+ }
+ else {
+ /* Create the service collector */
+ ret = flb_input_set_collector_time(in,
+ we_timer_wmi_service_metrics_cb,
+ ctx->wmi_service_scrape_interval, 0,
+ config);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins,
+ "could not set service collector for Windows Exporter Metrics plugin");
+ return -1;
+ }
+ ctx->coll_wmi_service_fd = ret;
+ }
+
+ /* Initialize service metric collectors */
+ ret = we_wmi_service_init(ctx);
+ if (ret) {
+ return -1;
+ }
+ }
+ else if (strncmp(entry->str, "memory", 6) == 0) {
+ if (ctx->wmi_memory_scrape_interval == 0) {
+ flb_plg_debug(ctx->ins, "enabled metrics %s", entry->str);
+ metric_idx = 10;
+ }
+ else {
+ /* Create the memory collector */
+ ret = flb_input_set_collector_time(in,
+ we_timer_wmi_memory_metrics_cb,
+ ctx->wmi_memory_scrape_interval, 0,
+ config);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins,
+ "could not set memory collector for Windows Exporter Metrics plugin");
+ return -1;
+ }
+ ctx->coll_wmi_memory_fd = ret;
+ }
+
+ /* Initialize memory metric collectors */
+ ret = we_wmi_memory_init(ctx);
+ if (ret) {
+ return -1;
+ }
+ }
+ else if (strncmp(entry->str, "paging_file", 11) == 0) {
+ if (ctx->wmi_paging_file_scrape_interval == 0) {
+ flb_plg_debug(ctx->ins, "enabled metrics %s", entry->str);
+ metric_idx = 11;
+ }
+ else {
+ /* Create the paging_file collector */
+ ret = flb_input_set_collector_time(in,
+ we_timer_wmi_paging_file_metrics_cb,
+ ctx->wmi_paging_file_scrape_interval, 0,
+ config);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins,
+ "could not set paging_file collector for Windows Exporter Metrics plugin");
+ return -1;
+ }
+ ctx->coll_wmi_paging_file_fd = ret;
+ }
+
+ /* Initialize paging_file metric collectors */
+ ret = we_wmi_paging_file_init(ctx);
+ if (ret) {
+ return -1;
+ }
+ }
+ else if (strncmp(entry->str, "process", 7) == 0) {
+ if (ctx->wmi_process_scrape_interval == 0) {
+ flb_plg_debug(ctx->ins, "enabled metrics %s", entry->str);
+ metric_idx = 12;
+ }
+ else {
+ /* Create the process collector */
+ ret = flb_input_set_collector_time(in,
+ we_timer_wmi_process_metrics_cb,
+ ctx->wmi_process_scrape_interval, 0,
+ config);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins,
+ "could not set process collector for Windows Exporter Metrics plugin");
+ return -1;
+ }
+ ctx->coll_wmi_process_fd = ret;
+ }
+
+ /* Initialize paging_file metric collectors */
+ ret = we_wmi_process_init(ctx);
+ if (ret) {
+ return -1;
+ }
+ }
+ else {
+ flb_plg_warn(ctx->ins, "Unknown metrics: %s", entry->str);
+ metric_idx = -1;
+ }
+
+ if (metric_idx >= 0) {
+ cb = &ne_callbacks[metric_idx];
+ ret = flb_callback_set(ctx->callback, cb->name, cb->func);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "error setting up default "
+ "callback '%s'", cb->name);
+ }
+ }
+ }
+ }
+ }
+ else {
+ flb_plg_error(ctx->ins, "No metrics is specified");
+
+ return -1;
+ }
+
+ return 0;
+}
+
+static int in_we_exit(void *data, struct flb_config *config)
+{
+ int ret;
+ struct flb_we* ctx = data;
+ struct mk_list *head;
+ struct flb_slist_entry *entry;
+
+ if (data == NULL) {
+ return 0;
+ }
+
+ /* Teardown for callback tied up resources */
+ if (ctx->metrics) {
+ mk_list_foreach(head, ctx->metrics) {
+ entry = mk_list_entry(head, struct flb_slist_entry, _head);
+ ret = flb_callback_exists(ctx->callback, entry->str);
+
+ if (ret == FLB_TRUE) {
+ if (strncmp(entry->str, "cpu_info", 8) == 0) {
+ we_wmi_cpu_info_exit(ctx);
+ }
+ else if (strncmp(entry->str, "cpu", 3) == 0) {
+ /* nop */
+ }
+ else if (strncmp(entry->str, "os", 2) == 0) {
+ we_os_exit(ctx);
+ }
+ else if (strncmp(entry->str, "net", 3) == 0) {
+ we_net_exit(ctx);
+ }
+ else if (strncmp(entry->str, "logical_disk", 12) == 0) {
+ we_logical_disk_exit(ctx);
+ }
+ else if (strncmp(entry->str, "cs", 2) == 0) {
+ we_cs_exit(ctx);
+ }
+ else if (strncmp(entry->str, "thermalzone", 11) == 0) {
+ we_wmi_thermalzone_exit(ctx);
+ }
+ else if (strncmp(entry->str, "logon", 5) == 0) {
+ we_wmi_logon_exit(ctx);
+ }
+ else if (strncmp(entry->str, "system", 6) == 0) {
+ we_wmi_system_exit(ctx);
+ }
+ else if (strncmp(entry->str, "service", 7) == 0) {
+ we_wmi_service_exit(ctx);
+ }
+ else if (strncmp(entry->str, "memory", 6) == 0) {
+ we_wmi_memory_exit(ctx);
+ }
+ else if (strncmp(entry->str, "paging_file", 11) == 0) {
+ we_wmi_paging_file_exit(ctx);
+ }
+ else if (strncmp(entry->str, "process", 7) == 0) {
+ we_wmi_process_exit(ctx);
+ }
+ else {
+ flb_plg_warn(ctx->ins, "Unknown metrics: %s", entry->str);
+ }
+ }
+ }
+ }
+
+ /* destroy callback context */
+ if (ctx->callback) {
+ flb_callback_destroy(ctx->callback);
+ }
+
+ /* Teardown for timer tied up resources */
+ if (ctx->coll_net_fd != -1) {
+ we_net_exit(ctx);
+ }
+ if (ctx->coll_logical_disk_fd != -1) {
+ we_logical_disk_exit(ctx);
+ }
+ if (ctx->coll_cs_fd != -1) {
+ we_cs_exit(ctx);
+ }
+ if (ctx->coll_os_fd != -1) {
+ we_os_exit(ctx);
+ }
+ if (ctx->coll_wmi_thermalzone_fd != -1) {
+ we_wmi_thermalzone_exit(ctx);
+ }
+ if (ctx->coll_wmi_cpu_info_fd != -1) {
+ we_wmi_cpu_info_exit(ctx);
+ }
+ if (ctx->coll_wmi_logon_fd != -1) {
+ we_wmi_logon_exit(ctx);
+ }
+ if (ctx->coll_wmi_system_fd != -1) {
+ we_wmi_system_exit(ctx);
+ }
+ if (ctx->coll_wmi_service_fd != -1) {
+ we_wmi_service_exit(ctx);
+ }
+ if (ctx->coll_wmi_memory_fd != -1) {
+ we_wmi_memory_exit(ctx);
+ }
+ if (ctx->coll_wmi_paging_file_fd != -1) {
+ we_wmi_paging_file_exit(ctx);
+ }
+ if (ctx->coll_wmi_process_fd != -1) {
+ we_wmi_process_exit(ctx);
+ }
+
+ flb_we_config_destroy(ctx);
+
+ return 0;
+}
+
+static void in_we_pause(void *data, struct flb_config *config)
+{
+ struct flb_we *ctx;
+
+ ctx = (struct flb_we *) data;
+
+ flb_input_collector_pause(ctx->coll_fd, ctx->ins);
+ if (ctx->coll_cpu_fd != -1) {
+ flb_input_collector_pause(ctx->coll_cpu_fd, ctx->ins);
+ }
+ if (ctx->coll_net_fd != -1) {
+ flb_input_collector_pause(ctx->coll_net_fd, ctx->ins);
+ }
+ if (ctx->coll_logical_disk_fd != -1) {
+ flb_input_collector_pause(ctx->coll_logical_disk_fd, ctx->ins);
+ }
+ if (ctx->coll_cs_fd != -1) {
+ flb_input_collector_pause(ctx->coll_cs_fd, ctx->ins);
+ }
+ if (ctx->coll_os_fd != -1) {
+ flb_input_collector_pause(ctx->coll_os_fd, ctx->ins);
+ }
+ if (ctx->coll_wmi_thermalzone_fd != -1) {
+ flb_input_collector_pause(ctx->coll_wmi_thermalzone_fd, ctx->ins);
+ }
+ if (ctx->coll_wmi_cpu_info_fd != -1) {
+ flb_input_collector_pause(ctx->coll_wmi_cpu_info_fd, ctx->ins);
+ }
+ if (ctx->coll_wmi_logon_fd != -1) {
+ flb_input_collector_pause(ctx->coll_wmi_logon_fd, ctx->ins);
+ }
+ if (ctx->coll_wmi_system_fd != -1) {
+ flb_input_collector_pause(ctx->coll_wmi_system_fd, ctx->ins);
+ }
+ if (ctx->coll_wmi_service_fd != -1) {
+ flb_input_collector_pause(ctx->coll_wmi_service_fd, ctx->ins);
+ }
+ if (ctx->coll_wmi_memory_fd != -1) {
+ flb_input_collector_pause(ctx->coll_wmi_memory_fd, ctx->ins);
+ }
+ if (ctx->coll_wmi_paging_file_fd != -1) {
+ flb_input_collector_pause(ctx->coll_wmi_paging_file_fd, ctx->ins);
+ }
+ if (ctx->coll_wmi_process_fd != -1) {
+ flb_input_collector_pause(ctx->coll_wmi_process_fd, ctx->ins);
+ }
+}
+
+static void in_we_resume(void *data, struct flb_config *config)
+{
+ struct flb_we *ctx;
+
+ ctx = (struct flb_we *) data;
+
+ flb_input_collector_resume(ctx->coll_fd, ctx->ins);
+ if (ctx->coll_cpu_fd != -1) {
+ flb_input_collector_resume(ctx->coll_cpu_fd, ctx->ins);
+ }
+ if (ctx->coll_net_fd != -1) {
+ flb_input_collector_resume(ctx->coll_net_fd, ctx->ins);
+ }
+ if (ctx->coll_logical_disk_fd != -1) {
+ flb_input_collector_resume(ctx->coll_logical_disk_fd, ctx->ins);
+ }
+ if (ctx->coll_wmi_process_fd != -1) {
+ flb_input_collector_resume(ctx->coll_wmi_process_fd, ctx->ins);
+ }
+ if (ctx->coll_cs_fd != -1) {
+ flb_input_collector_resume(ctx->coll_cs_fd, ctx->ins);
+ }
+ if (ctx->coll_os_fd != -1) {
+ flb_input_collector_resume(ctx->coll_os_fd, ctx->ins);
+ }
+ if (ctx->coll_wmi_thermalzone_fd != -1) {
+ flb_input_collector_resume(ctx->coll_wmi_thermalzone_fd, ctx->ins);
+ }
+ if (ctx->coll_wmi_cpu_info_fd != -1) {
+ flb_input_collector_resume(ctx->coll_wmi_cpu_info_fd, ctx->ins);
+ }
+ if (ctx->coll_wmi_logon_fd != -1) {
+ flb_input_collector_resume(ctx->coll_wmi_logon_fd, ctx->ins);
+ }
+ if (ctx->coll_wmi_system_fd != -1) {
+ flb_input_collector_resume(ctx->coll_wmi_system_fd, ctx->ins);
+ }
+ if (ctx->coll_wmi_service_fd != -1) {
+ flb_input_collector_resume(ctx->coll_wmi_service_fd, ctx->ins);
+ }
+ if (ctx->coll_wmi_memory_fd != -1) {
+ flb_input_collector_resume(ctx->coll_wmi_memory_fd, ctx->ins);
+ }
+ if (ctx->coll_wmi_paging_file_fd != -1) {
+ flb_input_collector_resume(ctx->coll_wmi_paging_file_fd, ctx->ins);
+ }
+}
+
+/* Configuration properties map */
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_TIME, "scrape_interval", "1",
+ 0, FLB_TRUE, offsetof(struct flb_we, scrape_interval),
+ "scrape interval to collect metrics from the node."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "enable_collector", NULL,
+ FLB_CONFIG_MAP_MULT, FLB_TRUE, offsetof(struct flb_we, collectors),
+ "Collector to enable."
+ },
+ {
+ FLB_CONFIG_MAP_TIME, "collector.cpu.scrape_interval", "0",
+ 0, FLB_TRUE, offsetof(struct flb_we, cpu_scrape_interval),
+ "scrape interval to collect cpu metrics from the node."
+ },
+
+ {
+ FLB_CONFIG_MAP_TIME, "collector.net.scrape_interval", "0",
+ 0, FLB_TRUE, offsetof(struct flb_we, net_scrape_interval),
+ "scrape interval to collect net metrics from the node."
+ },
+ {
+ FLB_CONFIG_MAP_TIME, "collector.logical_disk.scrape_interval", "0",
+ 0, FLB_TRUE, offsetof(struct flb_we, logical_disk_scrape_interval),
+ "scrape interval to collect logical_disk metrics from the node."
+ },
+
+ {
+ FLB_CONFIG_MAP_TIME, "collector.cs.scrape_interval", "0",
+ 0, FLB_TRUE, offsetof(struct flb_we, cs_scrape_interval),
+ "scrape interval to collect cs metrics from the node."
+ },
+
+ {
+ FLB_CONFIG_MAP_TIME, "collector.os.scrape_interval", "0",
+ 0, FLB_TRUE, offsetof(struct flb_we, os_scrape_interval),
+ "scrape interval to collect os metrics from the node."
+ },
+
+ {
+ FLB_CONFIG_MAP_TIME, "collector.thermalzone.scrape_interval", "0",
+ 0, FLB_TRUE, offsetof(struct flb_we, wmi_thermalzone_scrape_interval),
+ "scrape interval to collect thermalzone metrics from the node."
+ },
+
+ {
+ FLB_CONFIG_MAP_TIME, "collector.cpu_info.scrape_interval", "0",
+ 0, FLB_TRUE, offsetof(struct flb_we, wmi_cpu_info_scrape_interval),
+ "scrape interval to collect cpu_info metrics from the node."
+ },
+
+ {
+ FLB_CONFIG_MAP_TIME, "collector.logon.scrape_interval", "0",
+ 0, FLB_TRUE, offsetof(struct flb_we, wmi_logon_scrape_interval),
+ "scrape interval to collect logon metrics from the node."
+ },
+ {
+ FLB_CONFIG_MAP_TIME, "collector.system.scrape_interval", "0",
+ 0, FLB_TRUE, offsetof(struct flb_we, wmi_system_scrape_interval),
+ "scrape interval to collect system metrics from the node."
+ },
+ {
+ FLB_CONFIG_MAP_TIME, "collector.service.scrape_interval", "0",
+ 0, FLB_TRUE, offsetof(struct flb_we, wmi_service_scrape_interval),
+ "scrape interval to collect service metrics from the node."
+ },
+
+ {
+ FLB_CONFIG_MAP_TIME, "collector.memory.scrape_interval", "0",
+ 0, FLB_TRUE, offsetof(struct flb_we, wmi_memory_scrape_interval),
+ "scrape interval to collect memory metrics from the node."
+ },
+ {
+ FLB_CONFIG_MAP_TIME, "collector.paging_file.scrape_interval", "0",
+ 0, FLB_TRUE, offsetof(struct flb_we, wmi_paging_file_scrape_interval),
+ "scrape interval to collect paging_file metrics from the node."
+ },
+
+ {
+ FLB_CONFIG_MAP_TIME, "collector.process.scrape_interval", "0",
+ 0, FLB_TRUE, offsetof(struct flb_we, wmi_process_scrape_interval),
+ "scrape interval to collect process metrics from the node."
+ },
+
+ {
+ FLB_CONFIG_MAP_CLIST, "metrics",
+ "cpu,cpu_info,os,net,logical_disk,cs,thermalzone,logon,system,service",
+ 0, FLB_TRUE, offsetof(struct flb_we, metrics),
+ "Comma separated list of keys to enable metrics."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "we.logical_disk.allow_disk_regex", "/.+/",
+ 0, FLB_TRUE, offsetof(struct flb_we, raw_allowing_disk),
+ "Specify to be scribable regex for logical disk metrics."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "we.logical_disk.deny_disk_regex", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_we, raw_denying_disk),
+ "Specify to be denied regex for logical disk metrics."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "we.net.allow_nic_regex", "/.+/",
+ 0, FLB_TRUE, offsetof(struct flb_we, raw_allowing_nic),
+ "Specify to be scribable regex for net metrics by name of NIC."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "we.service.where", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_we, raw_where_clause),
+ "Specify the where clause for retrieving service metrics."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "we.service.include", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_we, raw_service_include),
+ "Specify the key value condition pairs for includeing condition to construct where clause of service metrics."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "we.service.exclude", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_we, raw_service_exclude),
+ "Specify the key value condition pairs for excludeing condition to construct where clause of service metrics."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "we.process.allow_process_regex", "/.+/",
+ 0, FLB_TRUE, offsetof(struct flb_we, raw_allowing_process),
+ "Specify the regex covering the process metrics to collect."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "we.process.deny_process_regex", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_we, raw_denying_process),
+ "Specify the regex for process metrics to prevent collection of/ignore."
+ },
+ /* EOF */
+ {0}
+};
+
+struct flb_input_plugin in_windows_exporter_metrics_plugin = {
+ .name = "windows_exporter_metrics",
+ .description = "Windows Exporter Metrics (Prometheus Compatible)",
+ .cb_init = in_we_init,
+ .cb_pre_run = NULL,
+ .cb_collect = cb_we_collect,
+ .cb_flush_buf = NULL,
+ .config_map = config_map,
+ .cb_pause = in_we_pause,
+ .cb_resume = in_we_resume,
+ .cb_exit = in_we_exit,
+};
diff --git a/src/fluent-bit/plugins/in_windows_exporter_metrics/we.h b/src/fluent-bit/plugins/in_windows_exporter_metrics/we.h
new file mode 100644
index 000000000..1ec848893
--- /dev/null
+++ b/src/fluent-bit/plugins/in_windows_exporter_metrics/we.h
@@ -0,0 +1,332 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2019-2021 The Fluent Bit Authors
+ * Copyright (C) 2015-2018 Treasure Data Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_WINDOWS_EXPORTER_H
+#define FLB_WINDOWS_EXPORTER_H
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_hash_table.h>
+#include <fluent-bit/flb_metrics.h>
+
+#include <monkey/mk_core/mk_list.h>
+#include <fluent-bit/flb_sds.h>
+
+#include <windows.h>
+#include <wbemidl.h>
+
+#include "we_metric.h"
+
+#define PERFLIB_COUNTER_TYPE_COUNTER 0x400
+#define PERFLIB_COUNTER_FLAG_BASE_VALUE 0x00030000
+#define PERFLIB_COUNTER_FLAG_BASE_NANOSECONDS 0x00100000
+
+struct we_perflib_counter_definition {
+ char *name_index_str;
+ uint32_t name_index;
+ char *name;
+ uint32_t help_index;
+ char *help;
+
+ uint32_t type;
+ uint32_t size;
+ uint32_t offset;
+ uint32_t detail_level;
+
+ struct mk_list _head;
+};
+
+union we_perflib_value {
+ uint64_t as_qword;
+ double as_double;
+ uint32_t as_dword;
+ float as_float;
+};
+
+struct we_perflib_counter {
+ struct we_perflib_instance *parent;
+ struct we_perflib_counter_definition *definition;
+ union we_perflib_value primary_value;
+ union we_perflib_value secondary_value;
+ struct mk_list _head;
+};
+
+struct we_perflib_instance {
+ char *name;
+ struct we_perflib_object *parent;
+ struct flb_hash_table *counters;
+ struct mk_list _head;
+};
+
+struct we_perflib_object {
+ char *name;
+ int64_t time;
+ int64_t frequency;
+ int64_t hundred_ns_time;
+ size_t counter_count;
+ size_t instance_count;
+ struct flb_hash_table *instances;
+ struct mk_list counter_definitions;
+};
+
+
+struct we_perflib_context {
+ struct flb_hash_table *counter_indexes;
+};
+
+struct we_cpu_counters {
+ struct we_perflib_metric_source *metric_sources;
+ struct we_perflib_metric_spec *metric_specs;
+ int operational;
+ struct flb_hash_table *metrics;
+ char *query;
+};
+
+struct we_net_counters {
+ struct we_perflib_metric_source *metric_sources;
+ struct we_perflib_metric_spec *metric_specs;
+ int operational;
+ struct flb_hash_table *metrics;
+ char *query;
+};
+
+struct we_logical_disk_counters {
+ struct we_perflib_metric_source *metric_sources;
+ struct we_perflib_metric_spec *metric_specs;
+ int operational;
+ struct flb_hash_table *metrics;
+ char *query;
+};
+
+struct wmi_query_spec;
+
+struct we_wmi_thermal_counters {
+ struct wmi_query_spec *temperature_celsius;
+ struct wmi_query_spec *percent_passive_limit;
+ struct wmi_query_spec *throttle_reasons;
+ int operational;
+};
+
+struct we_wmi_cpu_info_counters {
+ struct wmi_query_spec *info;
+ int operational;
+};
+
+struct we_wmi_logon_counters {
+ struct wmi_query_spec *info;
+ int operational;
+};
+
+struct we_wmi_system_counters {
+ struct wmi_query_spec *info;
+ struct cmt_gauge *context_switches;
+ struct cmt_gauge *exception_dispatches;
+ struct cmt_gauge *processor_queue;
+ struct cmt_gauge *system_calls;
+ struct cmt_gauge *system_up_time;
+ struct cmt_gauge *threads;
+ int operational;
+};
+
+struct we_wmi_service_counters {
+ struct wmi_query_spec *info;
+ struct cmt_gauge *information;
+ struct cmt_gauge *state;
+ struct cmt_gauge *start_mode;
+ struct cmt_gauge *status;
+ int operational;
+};
+
+struct we_wmi_memory_counters {
+ struct wmi_query_spec *info;
+ struct cmt_gauge *available_bytes;
+ struct cmt_gauge *cache_bytes;
+ struct cmt_gauge *cache_bytes_peak;
+ struct cmt_gauge *cache_faults_total;
+ struct cmt_gauge *commit_limit;
+ struct cmt_gauge *committed_bytes;
+ struct cmt_gauge *demand_zero_faults_total;
+ struct cmt_gauge *free_and_zero_page_list_bytes;
+ struct cmt_gauge *free_system_page_table_entries;
+ struct cmt_gauge *modified_page_list_bytes;
+ struct cmt_gauge *page_faults_total;
+ struct cmt_gauge *swap_page_reads_total;
+ struct cmt_gauge *swap_pages_read_total;
+ struct cmt_gauge *swap_pages_written_total;
+ struct cmt_gauge *swap_page_operations_total;
+ struct cmt_gauge *swap_page_writes_total;
+ struct cmt_gauge *pool_nonpaged_allocs_total;
+ struct cmt_gauge *pool_nonpaged_bytes;
+ struct cmt_gauge *pool_paged_allocs_total;
+ struct cmt_gauge *pool_paged_bytes;
+ struct cmt_gauge *pool_paged_resident_bytes;
+ struct cmt_gauge *standby_cache_core_bytes;
+ struct cmt_gauge *standby_cache_normal_priority_bytes;
+ struct cmt_gauge *standby_cache_reserve_bytes;
+ struct cmt_gauge *system_cache_resident_bytes;
+ struct cmt_gauge *system_code_resident_bytes;
+ struct cmt_gauge *system_code_total_bytes;
+ struct cmt_gauge *system_driver_resident_bytes;
+ struct cmt_gauge *system_driver_total_bytes;
+ struct cmt_gauge *transition_faults_total;
+ struct cmt_gauge *transition_pages_repurposed_total;
+ struct cmt_gauge *write_copies_total;
+ int operational;
+};
+
+struct we_wmi_paging_file_counters {
+ struct wmi_query_spec *info;
+ struct cmt_gauge *allocated_base_size_megabytes;
+ struct cmt_gauge *current_usage_megabytes;
+ struct cmt_gauge *peak_usage_megabytes;
+ int operational;
+};
+
+struct we_wmi_process_counters {
+ struct wmi_query_spec *info;
+ struct cmt_gauge *start_time;
+ struct cmt_gauge *handles;
+ struct cmt_gauge *cpu_time_total;
+ struct cmt_gauge *io_bytes_total;
+ struct cmt_gauge *io_operations_total;
+ struct cmt_gauge *page_faults_total;
+ struct cmt_gauge *page_file_bytes;
+ struct cmt_gauge *pool_bytes;
+ struct cmt_gauge *priority_base;
+ struct cmt_gauge *thread_count;
+ struct cmt_gauge *private_bytes;
+ struct cmt_gauge *virtual_bytes;
+ struct cmt_gauge *working_set_private_bytes;
+ struct cmt_gauge *working_set_peak_bytes;
+ struct cmt_gauge *working_set_bytes;
+ int operational;
+};
+
+struct we_os_counters {
+ struct cmt_gauge *info;
+ struct cmt_gauge *users;
+ struct cmt_gauge *physical_memory_free_bytes;
+ struct cmt_gauge *time;
+ struct cmt_gauge *tz;
+ struct cmt_gauge *virtual_memory_free_bytes;
+ struct cmt_gauge *processes_limit;
+ struct cmt_gauge *process_memory_limit_bytes;
+ struct cmt_gauge *processes;
+ struct cmt_gauge *virtual_memory_bytes;
+ struct cmt_gauge *visible_memory_bytes;
+ int operational;
+};
+
+struct we_cs_counters {
+ struct cmt_gauge *logical_processors;
+ struct cmt_gauge *physical_memory_bytes;
+ struct cmt_gauge *hostname;
+ int operational;
+};
+
+struct flb_we {
+ /* configuration */
+ int scrape_interval;
+
+ int coll_fd; /* collector fd */
+ struct cmt *cmt; /* cmetrics context */
+ struct flb_input_instance *ins; /* input instance */
+ struct mk_list *collectors;
+ char *raw_allowing_disk;
+ char *raw_denying_disk;
+ char *raw_allowing_nic;
+ char *raw_where_clause;
+ char *raw_service_include;
+ char *raw_service_exclude;
+ char *raw_allowing_process;
+ char *raw_denying_process;
+ char *service_include_buffer;
+ int service_include_buffer_size;
+ char *service_exclude_buffer;
+ int service_exclude_buffer_size;
+
+ struct flb_regex *allowing_disk_regex;
+ struct flb_regex *denying_disk_regex;
+ struct flb_regex *allowing_nic_regex;
+ struct flb_regex *allowing_process_regex;
+ struct flb_regex *denying_process_regex;
+
+ struct we_perflib_context perflib_context;
+ /* WMI locator and service contexts */
+ IWbemLocator *locator;
+ IWbemServices *service;
+
+ float windows_version;
+
+ struct flb_callback *callback; /* metric callback */
+ struct mk_list *metrics; /* enabled metrics */
+
+ /* Individual intervals for metrics */
+ int cpu_scrape_interval;
+ int net_scrape_interval;
+ int logical_disk_scrape_interval;
+ int cs_scrape_interval;
+ int os_scrape_interval;
+ int wmi_thermalzone_scrape_interval;
+ int wmi_cpu_info_scrape_interval;
+ int wmi_logon_scrape_interval;
+ int wmi_system_scrape_interval;
+ int wmi_service_scrape_interval;
+ int wmi_memory_scrape_interval;
+ int wmi_paging_file_scrape_interval;
+ int wmi_process_scrape_interval;
+
+ int coll_cpu_fd; /* collector fd (cpu) */
+ int coll_net_fd; /* collector fd (net) */
+ int coll_logical_disk_fd; /* collector fd (logical_disk) */
+ int coll_cs_fd; /* collector fd (cs) */
+ int coll_os_fd; /* collector fd (os) */
+ int coll_wmi_thermalzone_fd; /* collector fd (wmi_thermalzone) */
+ int coll_wmi_cpu_info_fd; /* collector fd (wmi_cpu_info) */
+ int coll_wmi_logon_fd; /* collector fd (wmi_logon) */
+ int coll_wmi_system_fd; /* collector fd (wmi_system) */
+ int coll_wmi_service_fd; /* collector fd (wmi_service) */
+ int coll_wmi_memory_fd; /* collector fd (wmi_memory) */
+ int coll_wmi_paging_file_fd; /* collector fd (wmi_paging_file) */
+ int coll_wmi_process_fd; /* collector fd (wmi_process) */
+
+ /*
+ * Metrics Contexts
+ * ----------------
+ */
+
+ struct we_cpu_counters cpu;
+ struct we_net_counters net;
+ struct we_logical_disk_counters logical_disk;
+ struct we_cs_counters cs;
+ struct we_os_counters *os;
+ struct we_wmi_thermal_counters *wmi_thermals;
+ struct we_wmi_cpu_info_counters *wmi_cpu_info;
+ struct we_wmi_logon_counters *wmi_logon;
+ struct we_wmi_system_counters *wmi_system;
+ struct we_wmi_service_counters *wmi_service;
+ struct we_wmi_memory_counters *wmi_memory;
+ struct we_wmi_paging_file_counters *wmi_paging_file;
+ struct we_wmi_process_counters *wmi_process;
+};
+
+typedef int (*collector_cb)(struct flb_we *);
+
+#endif
diff --git a/src/fluent-bit/plugins/in_windows_exporter_metrics/we_config.c b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_config.c
new file mode 100644
index 000000000..14913b433
--- /dev/null
+++ b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_config.c
@@ -0,0 +1,154 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2019-2021 The Fluent Bit Authors
+ * Copyright (C) 2015-2018 Treasure Data Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_pack.h>
+#include "we.h"
+
+struct flb_we *flb_we_config_create(struct flb_input_instance *ins,
+ struct flb_config *config)
+{
+ int ret;
+ struct flb_we *ctx;
+ int root_type;
+
+ ctx = flb_calloc(1, sizeof(struct flb_we));
+ if (!ctx) {
+ flb_errno();
+ return NULL;
+ }
+ ctx->ins = ins;
+ ctx->allowing_disk_regex = NULL;
+ ctx->denying_disk_regex = NULL;
+ ctx->allowing_nic_regex = NULL;
+ ctx->service_include_buffer = NULL;
+ ctx->service_include_buffer_size = 0;
+ ctx->service_exclude_buffer = NULL;
+ ctx->service_exclude_buffer_size = 0;
+ ctx->allowing_process_regex = NULL;
+ ctx->denying_process_regex = NULL;
+
+ /* Load the config map */
+ ret = flb_input_config_map_set(ins, (void *) ctx);
+ if (ret == -1) {
+ flb_free(ctx);
+ return NULL;
+ }
+
+ /* Process allow/deny regex rules */
+ if (ctx->raw_allowing_disk != NULL) {
+ ctx->allowing_disk_regex = flb_regex_create(ctx->raw_allowing_disk);
+ }
+
+ if (ctx->raw_denying_disk != NULL) {
+ ctx->denying_disk_regex = flb_regex_create(ctx->raw_denying_disk);
+ }
+
+ if (ctx->raw_allowing_nic != NULL) {
+ ctx->allowing_nic_regex = flb_regex_create(ctx->raw_allowing_nic);
+ }
+
+ if (ctx->raw_service_include != NULL) {
+ ret = flb_pack_json(ctx->raw_service_include,
+ strlen(ctx->raw_service_include),
+ &ctx->service_include_buffer,
+ &ctx->service_include_buffer_size,
+ &root_type,
+ NULL);
+ if (ret != 0) {
+ flb_plg_warn(ctx->ins, "we.service.include is incomplete. Ignored.");
+ ctx->service_include_buffer = NULL;
+ ctx->service_include_buffer_size = 0;
+ }
+ }
+
+ if (ctx->raw_service_exclude != NULL) {
+ ret = flb_pack_json(ctx->raw_service_exclude,
+ strlen(ctx->raw_service_exclude),
+ &ctx->service_exclude_buffer,
+ &ctx->service_exclude_buffer_size,
+ &root_type,
+ NULL);
+ if (ret != 0) {
+ flb_plg_warn(ctx->ins, "we.service.exclude is incomplete. Ignored.");
+ ctx->service_exclude_buffer = NULL;
+ ctx->service_exclude_buffer_size = 0;
+ }
+ }
+
+ /* Process allow/deny regex rules for process metrics */
+ if (ctx->raw_allowing_process != NULL) {
+ ctx->allowing_process_regex = flb_regex_create(ctx->raw_allowing_process);
+ }
+
+ if (ctx->raw_denying_process != NULL) {
+ ctx->denying_process_regex = flb_regex_create(ctx->raw_denying_process);
+ }
+
+ ctx->cmt = cmt_create();
+ if (!ctx->cmt) {
+ flb_plg_error(ins, "could not initialize CMetrics");
+ flb_free(ctx);
+ return NULL;
+ }
+
+ return ctx;
+}
+
+void flb_we_config_destroy(struct flb_we *ctx)
+{
+ if (!ctx) {
+ return;
+ }
+
+ if (ctx->allowing_disk_regex != NULL) {
+ flb_regex_destroy(ctx->allowing_disk_regex);
+ }
+
+ if (ctx->denying_disk_regex != NULL) {
+ flb_regex_destroy(ctx->denying_disk_regex);
+ }
+
+ if (ctx->allowing_nic_regex != NULL) {
+ flb_regex_destroy(ctx->allowing_nic_regex);
+ }
+
+ if (ctx->service_include_buffer != NULL) {
+ flb_free(ctx->service_include_buffer);
+ }
+
+ if (ctx->service_exclude_buffer != NULL) {
+ flb_free(ctx->service_exclude_buffer);
+ }
+
+ if (ctx->allowing_process_regex != NULL) {
+ flb_regex_destroy(ctx->allowing_process_regex);
+ }
+
+ if (ctx->denying_disk_regex != NULL) {
+ flb_regex_destroy(ctx->denying_disk_regex);
+ }
+
+ if (ctx->cmt) {
+ cmt_destroy(ctx->cmt);
+ }
+
+ flb_free(ctx);
+}
diff --git a/src/fluent-bit/plugins/in_windows_exporter_metrics/we_config.h b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_config.h
new file mode 100644
index 000000000..00eea3ebe
--- /dev/null
+++ b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_config.h
@@ -0,0 +1,32 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2019-2021 The Fluent Bit Authors
+ * Copyright (C) 2015-2018 Treasure Data Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_WE_CONFIG_H
+#define FLB_WE_CONFIG_H
+
+#include <fluent-bit/flb_input_plugin.h>
+#include "we.h"
+
+struct flb_we *flb_we_config_create(struct flb_input_instance *ins,
+ struct flb_config *config);
+
+void flb_we_config_destroy(struct flb_we *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/in_windows_exporter_metrics/we_cpu.c b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_cpu.c
new file mode 100644
index 000000000..d6013d797
--- /dev/null
+++ b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_cpu.c
@@ -0,0 +1,304 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2019-2021 The Fluent Bit Authors
+ * Copyright (C) 2015-2018 Treasure Data Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_config_map.h>
+#include <fluent-bit/flb_error.h>
+#include <fluent-bit/flb_pack.h>
+
+#include <float.h>
+
+#include "we.h"
+#include "we_cpu.h"
+#include "we_util.h"
+#include "we_metric.h"
+#include "we_perflib.h"
+
+
+struct we_perflib_metric_source basic_metric_sources[] = {
+ WE_PERFLIB_METRIC_SOURCE("cstate_seconds_total",
+ "% C1 Time",
+ "c1"),
+
+ WE_PERFLIB_METRIC_SOURCE("cstate_seconds_total",
+ "% C2 Time",
+ "c2"),
+
+ WE_PERFLIB_METRIC_SOURCE("cstate_seconds_total",
+ "% C3 Time",
+ "c3"),
+
+ WE_PERFLIB_METRIC_SOURCE("time_total",
+ "% Idle Time",
+ "idle"),
+
+ WE_PERFLIB_METRIC_SOURCE("time_total",
+ "% Interrupt Time",
+ "interrupt"),
+
+ WE_PERFLIB_METRIC_SOURCE("time_total",
+ "% DPC Time",
+ "dpc"),
+
+ WE_PERFLIB_METRIC_SOURCE("time_total",
+ "% Privileged Time",
+ "privileged"),
+
+ WE_PERFLIB_METRIC_SOURCE("time_total",
+ "% User Time",
+ "user"),
+
+ WE_PERFLIB_METRIC_SOURCE("interrupts_total",
+ "Interrupts/sec",
+ NULL),
+
+ WE_PERFLIB_METRIC_SOURCE("dpcs_total",
+ "DPCs Queued/sec",
+ NULL),
+
+ WE_PERFLIB_TERMINATOR_SOURCE()
+ };
+
+
+struct we_perflib_metric_source full_metric_sources[] = {
+ WE_PERFLIB_METRIC_SOURCE("cstate_seconds_total",
+ "% C1 Time",
+ "c1"),
+
+ WE_PERFLIB_METRIC_SOURCE("cstate_seconds_total",
+ "% C2 Time",
+ "c2"),
+
+ WE_PERFLIB_METRIC_SOURCE("cstate_seconds_total",
+ "% C3 Time",
+ "c3"),
+
+ WE_PERFLIB_METRIC_SOURCE("time_total",
+ "% Idle Time",
+ "idle"),
+
+ WE_PERFLIB_METRIC_SOURCE("time_total",
+ "% Interrupt Time",
+ "interrupt"),
+
+ WE_PERFLIB_METRIC_SOURCE("time_total",
+ "% DPC Time",
+ "dpc"),
+
+ WE_PERFLIB_METRIC_SOURCE("time_total",
+ "% Privileged Time",
+ "privileged"),
+
+ WE_PERFLIB_METRIC_SOURCE("time_total",
+ "% User Time",
+ "user"),
+
+ WE_PERFLIB_METRIC_SOURCE("interrupts_total",
+ "Interrupts/sec",
+ NULL),
+
+ WE_PERFLIB_METRIC_SOURCE("dpcs_total",
+ "DPCs Queued/sec",
+ NULL),
+
+ WE_PERFLIB_METRIC_SOURCE("clock_interrupts_total",
+ "Clock Interrupts/sec",
+ NULL),
+
+ WE_PERFLIB_METRIC_SOURCE("idle_break_events_total",
+ "Idle Break Events/sec",
+ NULL),
+
+ WE_PERFLIB_METRIC_SOURCE("parkings_status",
+ "Parking Status",
+ NULL),
+
+ WE_PERFLIB_METRIC_SOURCE("core_frequency_mhz",
+ "Processor Frequency",
+ NULL),
+
+ WE_PERFLIB_METRIC_SOURCE("processor_performance",
+ "% Processor Performance",
+ NULL),
+
+ WE_PERFLIB_TERMINATOR_SOURCE()
+ };
+
+struct we_perflib_metric_spec full_metric_specs[] =
+ {
+ WE_PERFLIB_COUNTER_SPEC("cstate_seconds_total",
+ "Time spent in low-power idle state.",
+ "core,state"),
+
+ WE_PERFLIB_COUNTER_SPEC("time_total",
+ "Time that processor spent in different " \
+ "modes (idle, user, system, ...)",
+ "core,mode"),
+
+ WE_PERFLIB_COUNTER_SPEC("interrupts_total",
+ "Total number of received and serviced " \
+ "hardware interrupts",
+ "core"),
+
+ WE_PERFLIB_COUNTER_SPEC("dpcs_total",
+ "Total number of received and serviced " \
+ "deferred procedure calls (DPCs)",
+ "core"),
+
+ WE_PERFLIB_COUNTER_SPEC("clock_interrupts_total",
+ "Total number of received and serviced " \
+ "clock tick interrupts",
+ "core"),
+
+ WE_PERFLIB_COUNTER_SPEC("idle_break_events_total",
+ "Total number of time processor was woken " \
+ "from idle",
+ "core"),
+
+ WE_PERFLIB_GAUGE_SPEC("parkings_status",
+ "Parking Status represents whether a " \
+ "processor is parked or not",
+ "core"),
+
+ WE_PERFLIB_GAUGE_SPEC("core_frequency_mhz",
+ "Core frequency in megahertz",
+ "core"),
+
+ WE_PERFLIB_GAUGE_SPEC("processor_performance",
+ "Processor Performance is the average " \
+ "performance of the processor while it is " \
+ "executing instructions, as a percentage of" \
+ " the nominal performance of the processor." \
+ " On some processors, Processor Performance" \
+ " may exceed 100%",
+ "core"),
+
+ WE_PERFLIB_TERMINATOR_SPEC()
+ };
+
+
+int we_cpu_init(struct flb_we *ctx)
+{
+ struct we_perflib_metric_source *metric_sources;
+ int result;
+
+ ctx->cpu.operational = FLB_FALSE;
+
+ ctx->cpu.metrics = flb_hash_table_create(FLB_HASH_TABLE_EVICT_NONE, 64, 128);
+
+ if (ctx->cpu.metrics == NULL) {
+ flb_plg_error(ctx->ins, "could not create metrics hash table");
+
+ return -1;
+ }
+
+ result = we_initialize_perflib_metric_specs(ctx->cmt,
+ ctx->cpu.metrics,
+ "windows",
+ "cpu",
+ &ctx->cpu.metric_specs,
+ full_metric_specs);
+
+ if (result != 0) {
+ flb_plg_error(ctx->ins, "could not initialize metric specs");
+
+ return -2;
+ }
+
+ if (fabsf(ctx->windows_version - 6.05) > FLT_EPSILON) {
+ metric_sources = full_metric_sources;
+ ctx->cpu.query = (char *) "Processor Information";
+ }
+ else {
+ metric_sources = basic_metric_sources;
+ ctx->cpu.query = (char *) "Processor";
+ }
+
+ result = we_initialize_perflib_metric_sources(ctx->cpu.metrics,
+ &ctx->cpu.metric_sources,
+ metric_sources);
+
+ if (result != 0) {
+ flb_plg_error(ctx->ins, "could not initialize metric sources");
+
+ we_deinitialize_perflib_metric_specs(ctx->cpu.metric_specs);
+ flb_free(ctx->cpu.metric_specs);
+
+ return -3;
+ }
+
+ ctx->cpu.operational = FLB_TRUE;
+
+ return 0;
+}
+
+int we_cpu_exit(struct flb_we *ctx)
+{
+ we_deinitialize_perflib_metric_sources(ctx->cpu.metric_sources);
+ we_deinitialize_perflib_metric_specs(ctx->cpu.metric_specs);
+
+ flb_free(ctx->cpu.metric_sources);
+ flb_free(ctx->cpu.metric_specs);
+
+ ctx->cpu.operational = FLB_FALSE;
+
+ return 0;
+}
+
+int we_cpu_instance_hook(char *instance_name, struct flb_we *ctx)
+{
+ return (strcasestr(instance_name, "Total") != NULL);
+}
+
+int we_cpu_label_prepend_hook(char **label_list,
+ size_t label_list_size,
+ size_t *label_count,
+ struct we_perflib_metric_source *metric_source,
+ char *instance_name,
+ struct we_perflib_counter *counter)
+{
+ if (label_count == NULL) {
+ return -1;
+ }
+
+ if (*label_count >= label_list_size) {
+ return -2;
+ }
+
+ label_list[(*label_count)++] = instance_name;
+
+ return 0;
+}
+
+int we_cpu_update(struct flb_we *ctx)
+{
+ if (!ctx->cpu.operational) {
+ flb_plg_error(ctx->ins, "cpu collector not yet in operational state");
+
+ return -1;
+ }
+
+ return we_perflib_update_counters(ctx,
+ ctx->cpu.query,
+ ctx->cpu.metric_sources,
+ we_cpu_instance_hook,
+ we_cpu_label_prepend_hook);
+}
diff --git a/src/fluent-bit/plugins/in_windows_exporter_metrics/we_cpu.h b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_cpu.h
new file mode 100644
index 000000000..f2b040977
--- /dev/null
+++ b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_cpu.h
@@ -0,0 +1,30 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2019-2021 The Fluent Bit Authors
+ * Copyright (C) 2015-2018 Treasure Data Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_WE_CPU_H
+#define FLB_WE_CPU_H
+
+#include "we.h"
+
+int we_cpu_init(struct flb_we *ctx);
+int we_cpu_exit(struct flb_we *ctx);
+int we_cpu_update(struct flb_we *ctx);
+
+#endif \ No newline at end of file
diff --git a/src/fluent-bit/plugins/in_windows_exporter_metrics/we_cs.c b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_cs.c
new file mode 100644
index 000000000..9ed4a1ca1
--- /dev/null
+++ b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_cs.c
@@ -0,0 +1,112 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_config_map.h>
+#include <fluent-bit/flb_error.h>
+#include <fluent-bit/flb_pack.h>
+
+#include "we.h"
+#include "we_cs.h"
+#include "we_util.h"
+#include "we_metric.h"
+
+int we_cs_init(struct flb_we *ctx)
+{
+ ctx->cs.operational = FLB_FALSE;
+
+ struct cmt_gauge *g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "cs", "logical_processors",
+ "Number of logical processors",
+ 0, NULL);
+
+ if (!g) {
+ return -1;
+ }
+ ctx->cs.logical_processors = g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "cs", "physical_memory_bytes",
+ "Amount of bytes of physical memory",
+ 0, NULL);
+
+ if (!g) {
+ return -1;
+ }
+ ctx->cs.physical_memory_bytes = g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "cs", "hostname",
+ "Value of Local Time",
+ 3, (char *[]) {"hostname", "domain", "fqdn"});
+ if (!g) {
+ return -1;
+ }
+ ctx->cs.hostname = g;
+
+ ctx->cs.operational = FLB_TRUE;
+
+ return 0;
+}
+
+int we_cs_exit(struct flb_we *ctx)
+{
+ return 0;
+}
+
+int we_cs_update(struct flb_we *ctx)
+{
+ SYSTEM_INFO system_info;
+ MEMORYSTATUSEX statex;
+ char hostname[256] = "", domain[256] = "", fqdn[256] = "";
+ DWORD size = 0;
+ uint64_t timestamp = 0;
+
+ if (!ctx->cs.operational) {
+ flb_plg_error(ctx->ins, "cs collector not yet in operational state");
+
+ return -1;
+ }
+
+ timestamp = cfl_time_now();
+
+ statex.dwLength = sizeof (statex);
+ GlobalMemoryStatusEx(&statex);
+
+ GetSystemInfo(&system_info);
+
+ size = _countof(hostname);
+ if (!GetComputerNameExA(ComputerNameDnsHostname, hostname, &size)) {
+ flb_plg_warn(ctx->ins, "Failed to retrieve hostname info");
+ }
+ size = _countof(domain);
+ if (!GetComputerNameExA(ComputerNameDnsDomain, domain, &size)) {
+ flb_plg_warn(ctx->ins, "Failed to retrieve domain info");
+ }
+ size = _countof(fqdn);
+ if (!GetComputerNameExA(ComputerNameDnsFullyQualified, fqdn, &size)) {
+ flb_plg_warn(ctx->ins, "Failed to retrieve fqdn info");
+ }
+
+ cmt_gauge_set(ctx->cs.logical_processors, timestamp, (double)system_info.dwNumberOfProcessors, 0, NULL);
+ cmt_gauge_set(ctx->cs.physical_memory_bytes, timestamp, (double)statex.ullTotalPhys, 0, NULL);
+ cmt_gauge_set(ctx->cs.hostname, timestamp, 1.0, 3, (char *[]) { hostname, domain, fqdn });
+
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/in_windows_exporter_metrics/we_cs.h b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_cs.h
new file mode 100644
index 000000000..111672891
--- /dev/null
+++ b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_cs.h
@@ -0,0 +1,29 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_WE_CS_H
+#define FLB_WE_CS_H
+
+#include "we.h"
+
+int we_cs_init(struct flb_we *ctx);
+int we_cs_exit(struct flb_we *ctx);
+int we_cs_update(struct flb_we *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/in_windows_exporter_metrics/we_logical_disk.c b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_logical_disk.c
new file mode 100644
index 000000000..df2e09c41
--- /dev/null
+++ b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_logical_disk.c
@@ -0,0 +1,272 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_config_map.h>
+#include <fluent-bit/flb_error.h>
+#include <fluent-bit/flb_pack.h>
+
+#include "we.h"
+#include "we_logical_disk.h"
+#include "we_util.h"
+#include "we_metric.h"
+#include "we_perflib.h"
+
+
+struct we_perflib_metric_source logical_disk_metric_sources[] = {
+ WE_PERFLIB_METRIC_SOURCE("requests_queued",
+ "Current Disk Queue Length",
+ NULL),
+
+ WE_PERFLIB_METRIC_SOURCE("read_bytes_total",
+ "Disk Read Bytes/sec",
+ NULL),
+
+ WE_PERFLIB_METRIC_SOURCE("read_total",
+ "Disk Reads/sec",
+ NULL),
+
+ WE_PERFLIB_METRIC_SOURCE("write_bytes_total",
+ "Disk Write Bytes/sec",
+ NULL),
+
+ WE_PERFLIB_METRIC_SOURCE("write_total",
+ "Disk Writes/sec",
+ NULL),
+
+ WE_PERFLIB_METRIC_SOURCE("read_seconds_total",
+ "% Disk Read Time",
+ NULL),
+
+ WE_PERFLIB_METRIC_SOURCE("write_seconds_total",
+ "% Disk Write Time",
+ NULL),
+
+ WE_PERFLIB_METRIC_SOURCE("free_megabytes",
+ "Free Megabytes",
+ NULL),
+
+ /* FIXME: Prometheus windows exporter uses '% Free Space_Base' as
+ * query for size_(mega)bytes metrics, but it does not work. */
+ /* WE_PERFLIB_METRIC_SOURCE("size_megabytes", */
+ /* "% Free Space_Base", */
+ /* NULL), */
+
+ WE_PERFLIB_METRIC_SOURCE("idle_seconds_total",
+ "% Idle Time",
+ NULL),
+
+ WE_PERFLIB_METRIC_SOURCE("split_ios_total",
+ "Split IO/Sec",
+ NULL),
+
+ WE_PERFLIB_METRIC_SOURCE("read_latency_seconds_total",
+ "Avg. Disk sec/Read",
+ NULL),
+
+ WE_PERFLIB_METRIC_SOURCE("write_latency_seconds_total",
+ "Avg. Disk sec/Write",
+ NULL),
+
+ WE_PERFLIB_METRIC_SOURCE("read_write_latency_seconds_total",
+ "Avg. Disk sec/Transfer",
+ NULL),
+
+ WE_PERFLIB_TERMINATOR_SOURCE()
+ };
+
+struct we_perflib_metric_spec logical_disk_metric_specs[] = {
+ WE_PERFLIB_GAUGE_SPEC("requests_queued",
+ "Number of queued requests on the disk",
+ "volume"),
+
+ WE_PERFLIB_COUNTER_SPEC("read_bytes_total",
+ "Number of read bytes from the disk",
+ "volume"),
+
+ WE_PERFLIB_COUNTER_SPEC("read_total",
+ "Number of read from the disk",
+ "volume"),
+
+ WE_PERFLIB_COUNTER_SPEC("write_bytes_total",
+ "Number of write bytes to the disk",
+ "volume"),
+
+ WE_PERFLIB_COUNTER_SPEC("write_total",
+ "Number of write from to disk",
+ "volume"),
+
+ WE_PERFLIB_COUNTER_SPEC("read_seconds_total",
+ "Total amount of reading time from the disk",
+ "volume"),
+
+ WE_PERFLIB_COUNTER_SPEC("write_seconds_total",
+ "Total amount of writeing time to the disk",
+ "volume"),
+
+ WE_PERFLIB_GAUGE_SPEC("free_megabytes",
+ "Free megabytes on the disk",
+ "volume"),
+
+ /* WE_PERFLIB_COUNTER_SPEC("size_megabytes", */
+ /* "Total amount of free megabytes on the disk", */
+ /* "volume"), */
+
+ WE_PERFLIB_COUNTER_SPEC("idle_seconds_total",
+ "Total amount of idling time on the disk",
+ "volume"),
+
+ WE_PERFLIB_COUNTER_SPEC("split_ios_total",
+ "Total amount of split I/O operations on the disk",
+ "volume"),
+
+ WE_PERFLIB_COUNTER_SPEC("read_latency_seconds_total",
+ "Average latency, in seconds, to read from the disk",
+ "volume"),
+
+ WE_PERFLIB_COUNTER_SPEC("write_latency_seconds_total",
+ "Average latency, in seconds, to write into the disk",
+ "volume"),
+
+ WE_PERFLIB_COUNTER_SPEC("read_write_latency_seconds_total",
+ "Average latency, in seconds, to transfer operations on the disk",
+ "volume"),
+
+ WE_PERFLIB_TERMINATOR_SPEC()
+ };
+
+
+int we_logical_disk_init(struct flb_we *ctx)
+{
+ struct we_perflib_metric_source *metric_sources;
+ int result;
+
+ ctx->logical_disk.operational = FLB_FALSE;
+
+ ctx->logical_disk.metrics = flb_hash_table_create(FLB_HASH_TABLE_EVICT_NONE, 32, 128);
+
+ if (ctx->logical_disk.metrics == NULL) {
+ flb_plg_error(ctx->ins, "could not create metrics hash table for logical_disk metrics");
+
+ return -1;
+ }
+
+ result = we_initialize_perflib_metric_specs(ctx->cmt,
+ ctx->logical_disk.metrics,
+ "windows",
+ "logical_disk",
+ &ctx->logical_disk.metric_specs,
+ logical_disk_metric_specs);
+
+ if (result != 0) {
+ flb_plg_error(ctx->ins, "could not initialize logical_disk metric specs");
+
+ return -2;
+ }
+
+ ctx->logical_disk.query = (char *) "LogicalDisk";
+
+ result = we_initialize_perflib_metric_sources(ctx->logical_disk.metrics,
+ &ctx->logical_disk.metric_sources,
+ logical_disk_metric_sources);
+
+ if (result != 0) {
+ flb_plg_error(ctx->ins, "could not initialize logical_disk metric sources");
+
+ we_deinitialize_perflib_metric_specs(ctx->logical_disk.metric_specs);
+ flb_free(ctx->logical_disk.metric_specs);
+
+ return -3;
+ }
+
+ ctx->logical_disk.operational = FLB_TRUE;
+
+ return 0;
+}
+
+int we_logical_disk_exit(struct flb_we *ctx)
+{
+ we_deinitialize_perflib_metric_sources(ctx->logical_disk.metric_sources);
+ we_deinitialize_perflib_metric_specs(ctx->logical_disk.metric_specs);
+
+ flb_free(ctx->logical_disk.metric_sources);
+ flb_free(ctx->logical_disk.metric_specs);
+
+ ctx->logical_disk.operational = FLB_FALSE;
+
+ return 0;
+}
+
+static int logical_disk_regex_match(struct flb_regex *regex, char *instance_name)
+{
+ if (regex == NULL) {
+ return 0;
+ }
+ return flb_regex_match(regex, instance_name, strlen(instance_name));
+}
+
+
+int we_logical_disk_instance_hook(char *instance_name, struct flb_we *ctx)
+{
+ if (strcasestr(instance_name, "Total") != NULL) {
+ return 1;
+ }
+ if (logical_disk_regex_match(ctx->denying_disk_regex, instance_name) ||
+ !logical_disk_regex_match(ctx->allowing_disk_regex, instance_name)) {
+ return 1;
+ }
+
+ return 0;
+}
+
+int we_logical_disk_label_prepend_hook(char **label_list,
+ size_t label_list_size,
+ size_t *label_count,
+ struct we_perflib_metric_source *metric_source,
+ char *instance_name,
+ struct we_perflib_counter *counter)
+{
+ if (label_count == NULL) {
+ return -1;
+ }
+
+ if (*label_count >= label_list_size) {
+ return -2;
+ }
+
+ label_list[(*label_count)++] = instance_name;
+
+ return 0;
+}
+
+int we_logical_disk_update(struct flb_we *ctx)
+{
+ if (!ctx->logical_disk.operational) {
+ flb_plg_error(ctx->ins, "logical_disk collector not yet in operational state");
+
+ return -1;
+ }
+
+ return we_perflib_update_counters(ctx,
+ ctx->logical_disk.query,
+ ctx->logical_disk.metric_sources,
+ we_logical_disk_instance_hook,
+ we_logical_disk_label_prepend_hook);
+}
diff --git a/src/fluent-bit/plugins/in_windows_exporter_metrics/we_logical_disk.h b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_logical_disk.h
new file mode 100644
index 000000000..d63678bae
--- /dev/null
+++ b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_logical_disk.h
@@ -0,0 +1,29 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_WE_LOGICAL_DISK_H
+#define FLB_WE_LOGICAL_DISK_H
+
+#include "we.h"
+
+int we_logical_disk_init(struct flb_we *ctx);
+int we_logical_disk_exit(struct flb_we *ctx);
+int we_logical_disk_update(struct flb_we *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/in_windows_exporter_metrics/we_metric.c b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_metric.c
new file mode 100644
index 000000000..3475cdad3
--- /dev/null
+++ b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_metric.c
@@ -0,0 +1,368 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2019-2021 The Fluent Bit Authors
+ * Copyright (C) 2015-2018 Treasure Data Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_config_map.h>
+#include <fluent-bit/flb_error.h>
+#include <fluent-bit/flb_pack.h>
+
+#include "we.h"
+#include "we_cpu.h"
+#include "we_util.h"
+#include "we_perflib.h"
+
+static int we_expand_perflib_label_set(char *input_raw_label_set,
+ char ***output_label_set,
+ size_t *output_label_set_size)
+{
+ flb_sds_t raw_label_set;
+ size_t label_index;
+ size_t label_count;
+ char *label_name;
+ char **label_set;
+ int result;
+
+ raw_label_set = flb_sds_create(input_raw_label_set);
+
+ if (raw_label_set == NULL) {
+ return -1;
+ }
+
+ label_count = 0;
+ label_name = (char *) raw_label_set;
+
+ while (label_name != NULL) {
+ result = mk_string_char_search(label_name, ',', -1);
+
+ if (result != -1) {
+ label_name[result] = '\0';
+ label_name = &label_name[result + 1];
+ }
+ else {
+ label_name = NULL;
+ }
+
+ label_count++;
+ }
+
+ label_set = (char **) flb_calloc(label_count, sizeof(char *));
+
+ if (label_set == NULL) {
+ flb_sds_destroy(raw_label_set);
+
+ return -2;
+ }
+
+ label_name = (char *) raw_label_set;
+
+ for (label_index = 0 ; label_index < label_count ; label_index++) {
+ label_set[label_index] = label_name;
+ label_name = &label_name[strlen(label_name) + 1];
+ }
+
+ *output_label_set = label_set;
+ *output_label_set_size = label_count;
+
+ return 0;
+}
+
+static int we_expand_perflib_metric_source_labels(
+ struct we_perflib_metric_source *source)
+{
+ source->label_set_size = 0;
+ source->label_set = NULL;
+
+ if (source->raw_label_set == NULL) {
+ return 0;
+ }
+
+ return we_expand_perflib_label_set(source->raw_label_set,
+ &source->label_set,
+ &source->label_set_size);
+}
+
+static int we_expand_perflib_metric_spec_labels(
+ struct we_perflib_metric_spec *spec)
+{
+ spec->label_set_size = 0;
+ spec->label_set = NULL;
+
+ if (spec->raw_label_set == NULL) {
+ return 0;
+ }
+
+ return we_expand_perflib_label_set(spec->raw_label_set,
+ &spec->label_set,
+ &spec->label_set_size);
+}
+
+static int we_match_perflib_metric_source_to_parent(
+ struct flb_hash_table *lookup_table,
+ struct we_perflib_metric_source *source)
+{
+ struct we_perflib_metric_spec *spec;
+
+ spec = flb_hash_table_get_ptr(lookup_table,
+ source->parent_name,
+ strlen(source->parent_name));
+
+ if (spec == NULL) {
+ return -1;
+ }
+
+ source->parent = spec;
+
+ return 0;
+}
+
+static int we_create_perflib_metric_instance(
+ struct cmt *context,
+ struct flb_hash_table *lookup_table,
+ char *namespace,
+ char *subsystem,
+ struct we_perflib_metric_spec *spec)
+{
+ void *metric_instance;
+ int result;
+
+ if (spec->type == CMT_COUNTER) {
+ metric_instance = (void *) cmt_counter_create(context,
+ namespace,
+ subsystem,
+ spec->name,
+ spec->description,
+ spec->label_set_size,
+ spec->label_set);
+ if (metric_instance == NULL) {
+ return -1;
+ }
+ }
+ else if (spec->type == CMT_GAUGE) {
+ metric_instance = (void *) cmt_gauge_create(context,
+ namespace,
+ subsystem,
+ spec->name,
+ spec->description,
+ spec->label_set_size,
+ spec->label_set);
+
+ if (metric_instance == NULL) {
+ return -2;
+ }
+ }
+ else {
+ return -3;
+ }
+
+ result = flb_hash_table_add(lookup_table,
+ spec->name,
+ strlen(spec->name),
+ spec,
+ 0);
+
+ if (result < 0) {
+ if (spec->type == CMT_COUNTER) {
+ cmt_counter_destroy(metric_instance);
+ }
+ else {
+ cmt_gauge_destroy(metric_instance);
+ }
+
+ return -4;
+ }
+
+ spec->metric_instance = metric_instance;
+
+ return 0;
+}
+
+void we_deinitialize_perflib_metric_sources(struct we_perflib_metric_source *sources)
+{
+ size_t source_index;
+
+ for (source_index = 0 ;
+ sources[source_index].name != NULL;
+ source_index++) {
+ if (sources[source_index].label_set_size) {
+ flb_sds_destroy(sources[source_index].label_set[0]);
+ flb_free(sources[source_index].label_set);
+ }
+ }
+}
+
+int we_initialize_perflib_metric_sources(
+ struct flb_hash_table *lookup_table,
+ struct we_perflib_metric_source **out_sources,
+ struct we_perflib_metric_source *in_sources)
+{
+ size_t source_array_size;
+ struct we_perflib_metric_source *source_array_copy;
+ struct we_perflib_metric_spec *source_entry;
+ size_t source_index;
+ size_t source_count;
+ int result;
+
+ if (out_sources == NULL) {
+ return -1;
+ }
+
+ if (in_sources == NULL) {
+ return -2;
+ }
+
+ source_count = 0;
+
+ while (in_sources[source_count].name != NULL) {
+ source_count++;
+ }
+
+ if (source_count == 0) {
+ return -3;
+ }
+
+ source_array_size = sizeof(struct we_perflib_metric_source);
+ source_array_size *= (source_count + 1);
+
+ source_array_copy = (struct we_perflib_metric_spec *) flb_calloc(1, source_array_size);
+
+ if (source_array_copy == NULL) {
+ return -4;
+ }
+
+ memcpy(source_array_copy, in_sources, source_array_size);
+
+ for (source_index = 0 ; source_index < source_count; source_index++) {
+ source_entry = &source_array_copy[source_index];
+
+ result = we_expand_perflib_metric_source_labels(source_entry);
+
+ if (result != 0) {
+ we_deinitialize_perflib_metric_sources(source_array_copy);
+ flb_free(source_array_copy);
+
+ return -5;
+ }
+
+ result = we_match_perflib_metric_source_to_parent(lookup_table,
+ source_entry);
+
+ if (result != 0) {
+ we_deinitialize_perflib_metric_sources(source_array_copy);
+ flb_free(source_array_copy);
+
+ return -6;
+ }
+ }
+
+ *out_sources = source_array_copy;
+
+ return 0;
+}
+
+void we_deinitialize_perflib_metric_specs(struct we_perflib_metric_spec *specs)
+{
+ size_t spec_index;
+
+ for (spec_index = 0 ;
+ specs[spec_index].name != NULL;
+ spec_index++) {
+ if (specs[spec_index].label_set_size) {
+ flb_sds_destroy(specs[spec_index].label_set[0]);
+ flb_free(specs[spec_index].label_set);
+ }
+ }
+}
+
+int we_initialize_perflib_metric_specs(
+ struct cmt *context,
+ struct flb_hash_table *lookup_table,
+ char *namespace,
+ char *subsystem,
+ struct we_perflib_metric_spec **out_specs,
+ struct we_perflib_metric_spec *in_specs)
+{
+ size_t spec_array_size;
+ struct we_perflib_metric_spec *spec_array_copy;
+ struct we_perflib_metric_spec *spec_entry;
+ size_t spec_index;
+ size_t spec_count;
+ int result;
+
+ if (out_specs == NULL) {
+ return -1;
+ }
+
+ if (in_specs == NULL) {
+ return -2;
+ }
+
+ spec_count = 0;
+
+ while (in_specs[spec_count].name != NULL) {
+ spec_count++;
+ }
+
+ if (spec_count == 0) {
+ return -3;
+ }
+
+ spec_array_size = sizeof(struct we_perflib_metric_spec);
+ spec_array_size *= spec_count + 1;
+
+ spec_array_copy = (struct we_perflib_metric_spec *) flb_calloc(1, spec_array_size);
+
+ if (spec_array_copy == NULL) {
+ return -4;
+ }
+
+ memcpy(spec_array_copy, in_specs, spec_array_size);
+
+ for (spec_index = 0 ; spec_index < spec_count; spec_index++) {
+ spec_entry = &spec_array_copy[spec_index];
+
+ result = we_expand_perflib_metric_spec_labels(spec_entry);
+
+ if (result) {
+ we_deinitialize_perflib_metric_specs(spec_array_copy);
+ flb_free(spec_array_copy);
+
+ return -5;
+ }
+
+ result = we_create_perflib_metric_instance(context,
+ lookup_table,
+ namespace,
+ subsystem,
+ spec_entry);
+
+ if (result) {
+ we_deinitialize_perflib_metric_specs(spec_array_copy);
+ flb_free(spec_array_copy);
+
+ return -6;
+ }
+ }
+
+ *out_specs = spec_array_copy;
+
+ return 0;
+}
+
diff --git a/src/fluent-bit/plugins/in_windows_exporter_metrics/we_metric.h b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_metric.h
new file mode 100644
index 000000000..7c9611a69
--- /dev/null
+++ b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_metric.h
@@ -0,0 +1,98 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2019-2021 The Fluent Bit Authors
+ * Copyright (C) 2015-2018 Treasure Data Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_METRIC_H
+#define FLB_METRIC_H
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_hash_table.h>
+#include <fluent-bit/flb_metrics.h>
+
+struct we_perflib_metric_spec {
+ int type;
+ char *name;
+ char *description;
+ char *raw_label_set;
+ char **label_set;
+ size_t label_set_size;
+ void *metric_instance;
+};
+
+struct we_perflib_metric_source {
+ struct we_perflib_metric_spec *parent;
+ char *parent_name;
+ char *name;
+ char *raw_label_set;
+ char **label_set;
+ size_t label_set_size;
+};
+
+#define WE_PERFLIB_SPEC(type_, name_, description_, raw_label_set_) \
+ { \
+ .type = type_, \
+ .name = name_, \
+ .description = description_, \
+ .raw_label_set = raw_label_set_, \
+ .label_set = NULL, \
+ .label_set_size = 0, \
+ .metric_instance = NULL \
+ }
+
+#define WE_PERFLIB_COUNTER_SPEC(name_, description_, raw_label_set_) \
+ WE_PERFLIB_SPEC(CMT_COUNTER, name_, description_, raw_label_set_)
+
+#define WE_PERFLIB_GAUGE_SPEC(name_, description_, raw_label_set_) \
+ WE_PERFLIB_SPEC(CMT_GAUGE, name_, description_, raw_label_set_)
+
+#define WE_PERFLIB_TERMINATOR_SPEC() \
+ WE_PERFLIB_SPEC(0, NULL, NULL, NULL)
+
+#define WE_PERFLIB_METRIC_SOURCE(parent_name_, name_, raw_label_set_) \
+ { \
+ .parent = NULL, \
+ .parent_name = parent_name_, \
+ .name = name_, \
+ .raw_label_set = raw_label_set_, \
+ .label_set = NULL, \
+ .label_set_size = 0 \
+ }
+
+#define WE_PERFLIB_TERMINATOR_SOURCE() \
+ WE_PERFLIB_METRIC_SOURCE(NULL, NULL, NULL)
+
+
+void we_deinitialize_perflib_metric_sources(struct we_perflib_metric_source *sources);
+int we_initialize_perflib_metric_sources(
+ struct flb_hash *lookup_table,
+ struct we_perflib_metric_source **out_sources,
+ struct we_perflib_metric_source *in_sources);
+
+
+void we_deinitialize_perflib_metric_specs(struct we_perflib_metric_spec *specs);
+int we_initialize_perflib_metric_specs(
+ struct cmt *context,
+ struct flb_hash *lookup_table,
+ char *namespace,
+ char *subsystem,
+ struct we_perflib_metric_spec **out_specs,
+ struct we_perflib_metric_spec *in_specs);
+
+#endif
diff --git a/src/fluent-bit/plugins/in_windows_exporter_metrics/we_net.c b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_net.c
new file mode 100644
index 000000000..673d665e8
--- /dev/null
+++ b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_net.c
@@ -0,0 +1,253 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_config_map.h>
+#include <fluent-bit/flb_error.h>
+#include <fluent-bit/flb_pack.h>
+
+#include "we.h"
+#include "we_net.h"
+#include "we_util.h"
+#include "we_metric.h"
+#include "we_perflib.h"
+
+
+struct we_perflib_metric_source net_metric_sources[] = {
+ WE_PERFLIB_METRIC_SOURCE("bytes_received_total",
+ "Bytes Received/sec",
+ NULL),
+
+ WE_PERFLIB_METRIC_SOURCE("bytes_sent_total",
+ "Bytes Sent/sec",
+ NULL),
+
+ WE_PERFLIB_METRIC_SOURCE("bytes_total",
+ "Bytes Total/sec",
+ NULL),
+
+ WE_PERFLIB_METRIC_SOURCE("packets_outbound_discarded_total",
+ "Packets Outbound Discarded",
+ NULL),
+
+ WE_PERFLIB_METRIC_SOURCE("packets_outbound_errors_total",
+ "Packets Outbound Errors",
+ NULL),
+
+ WE_PERFLIB_METRIC_SOURCE("packets_received_discarded_total",
+ "Packets Received Discarded",
+ NULL),
+
+ WE_PERFLIB_METRIC_SOURCE("packets_received_errors_total",
+ "Packets Received Errors",
+ NULL),
+
+ WE_PERFLIB_METRIC_SOURCE("packets_received_total",
+ "Packets Received/sec",
+ NULL),
+
+ WE_PERFLIB_METRIC_SOURCE("packets_received_unknown_total",
+ "Packets Received Unknown",
+ NULL),
+
+ WE_PERFLIB_METRIC_SOURCE("packets_total",
+ "Packets/sec",
+ NULL),
+
+ WE_PERFLIB_METRIC_SOURCE("packets_sent_total",
+ "Packets Sent/sec",
+ NULL),
+
+ WE_PERFLIB_METRIC_SOURCE("current_bandwidth_bits",
+ "Current Bandwidth",
+ NULL),
+
+ WE_PERFLIB_TERMINATOR_SOURCE()
+ };
+
+struct we_perflib_metric_spec net_metric_specs[] = {
+ WE_PERFLIB_COUNTER_SPEC("bytes_received_total",
+ "Total amount of received bytes",
+ "nic"),
+
+ WE_PERFLIB_COUNTER_SPEC("bytes_sent_total",
+ "Total amount of sent bytes",
+ "nic"),
+
+ WE_PERFLIB_COUNTER_SPEC("bytes_total",
+ "Total amount of bytes",
+ "nic"),
+
+ WE_PERFLIB_COUNTER_SPEC("packets_outbound_discarded_total",
+ "Total amount of outbound discarded bytes",
+ "nic"),
+
+ WE_PERFLIB_COUNTER_SPEC("packets_outbound_errors_total",
+ "Total number of outbound errors",
+ "nic"),
+
+ WE_PERFLIB_COUNTER_SPEC("packets_received_discarded_total",
+ "Total amount of received discarded bytes",
+ "nic"),
+
+ WE_PERFLIB_COUNTER_SPEC("packets_received_errors_total",
+ "Total number of received packets' errors",
+ "nic"),
+
+ WE_PERFLIB_COUNTER_SPEC("packets_received_total",
+ "Total number of received packets",
+ "nic"),
+
+ WE_PERFLIB_COUNTER_SPEC("packets_received_unknown_total",
+ "Total number of received unknown",
+ "nic"),
+
+ WE_PERFLIB_COUNTER_SPEC("packets_total",
+ "Total amount of packets",
+ "nic"),
+
+ WE_PERFLIB_COUNTER_SPEC("packets_sent_total",
+ "Total amount of sent packets",
+ "nic"),
+
+ WE_PERFLIB_GAUGE_SPEC("current_bandwidth_bits",
+ "Current Bandwidth /bits",
+ "nic"),
+
+ WE_PERFLIB_TERMINATOR_SPEC()
+ };
+
+
+int we_net_init(struct flb_we *ctx)
+{
+ struct we_perflib_metric_source *metric_sources;
+ int result;
+
+ ctx->net.operational = FLB_FALSE;
+
+ ctx->net.metrics = flb_hash_table_create(FLB_HASH_TABLE_EVICT_NONE, 32, 128);
+
+ if (ctx->net.metrics == NULL) {
+ flb_plg_error(ctx->ins, "could not create metrics hash table");
+
+ return -1;
+ }
+
+ result = we_initialize_perflib_metric_specs(ctx->cmt,
+ ctx->net.metrics,
+ "windows",
+ "net",
+ &ctx->net.metric_specs,
+ net_metric_specs);
+
+ if (result != 0) {
+ flb_plg_error(ctx->ins, "could not initialize net metric specs");
+
+ return -2;
+ }
+
+ ctx->net.query = (char *) "Network Interface";
+
+ result = we_initialize_perflib_metric_sources(ctx->net.metrics,
+ &ctx->net.metric_sources,
+ net_metric_sources);
+
+ if (result != 0) {
+ flb_plg_error(ctx->ins, "could not initialize net metric sources");
+
+ we_deinitialize_perflib_metric_specs(ctx->net.metric_specs);
+ flb_free(ctx->net.metric_specs);
+
+ return -3;
+ }
+
+ ctx->net.operational = FLB_TRUE;
+
+ return 0;
+}
+
+int we_net_exit(struct flb_we *ctx)
+{
+ we_deinitialize_perflib_metric_sources(ctx->net.metric_sources);
+ we_deinitialize_perflib_metric_specs(ctx->net.metric_specs);
+
+ flb_free(ctx->net.metric_sources);
+ flb_free(ctx->net.metric_specs);
+
+ ctx->net.operational = FLB_FALSE;
+
+ return 0;
+}
+
+static int net_regex_match(struct flb_regex *regex, char *instance_name)
+{
+ if (regex == NULL) {
+ return 0;
+ }
+ return flb_regex_match(regex, instance_name, strlen(instance_name));
+}
+
+int we_net_instance_hook(char *instance_name, struct flb_we *ctx)
+{
+ if (strcasestr(instance_name, "Total") != NULL) {
+ return 1;
+ }
+
+ if (!net_regex_match(ctx->allowing_nic_regex, instance_name)) {
+ return 1;
+ }
+
+ return 0;
+}
+
+int we_net_label_prepend_hook(char **label_list,
+ size_t label_list_size,
+ size_t *label_count,
+ struct we_perflib_metric_source *metric_source,
+ char *instance_name,
+ struct we_perflib_counter *counter)
+{
+ if (label_count == NULL) {
+ return -1;
+ }
+
+ if (*label_count >= label_list_size) {
+ return -2;
+ }
+
+ label_list[(*label_count)++] = instance_name;
+
+ return 0;
+}
+
+int we_net_update(struct flb_we *ctx)
+{
+ if (!ctx->net.operational) {
+ flb_plg_error(ctx->ins, "net collector not yet in operational state");
+
+ return -1;
+ }
+
+ return we_perflib_update_counters(ctx,
+ ctx->net.query,
+ ctx->net.metric_sources,
+ we_net_instance_hook,
+ we_net_label_prepend_hook);
+}
diff --git a/src/fluent-bit/plugins/in_windows_exporter_metrics/we_net.h b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_net.h
new file mode 100644
index 000000000..52340ccfb
--- /dev/null
+++ b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_net.h
@@ -0,0 +1,29 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_WE_NET_H
+#define FLB_WE_NET_H
+
+#include "we.h"
+
+int we_net_init(struct flb_we *ctx);
+int we_net_exit(struct flb_we *ctx);
+int we_net_update(struct flb_we *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/in_windows_exporter_metrics/we_os.c b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_os.c
new file mode 100644
index 000000000..299514d8c
--- /dev/null
+++ b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_os.c
@@ -0,0 +1,268 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_config_map.h>
+#include <fluent-bit/flb_error.h>
+#include <fluent-bit/flb_pack.h>
+
+#ifndef UNICODE
+#define UNICODE
+#endif
+#include <lm.h>
+#include <psapi.h>
+#include <timezoneapi.h>
+
+#include "we.h"
+#include "we_os.h"
+#include "we_util.h"
+#include "we_metric.h"
+
+int we_os_init(struct flb_we *ctx)
+{
+ ctx->os = flb_calloc(1, sizeof(struct we_os_counters));
+ if (!ctx->os) {
+ flb_errno();
+ return -1;
+ }
+ ctx->os->operational = FLB_FALSE;
+
+ struct cmt_gauge *g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "os", "info",
+ "Version information of OperatingSystem",
+ 5, (char *[]) {"product", "version", "major_version", "minor_version", "build_number"});
+
+ if (!g) {
+ return -1;
+ }
+ ctx->os->info = g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "os", "physical_memory_free_bytes",
+ "Amount of free bytes of physical memory",
+ 0, NULL);
+
+ if (!g) {
+ return -1;
+ }
+ ctx->os->physical_memory_free_bytes = g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "os", "time",
+ "Value of Local Time",
+ 0, NULL);
+ if (!g) {
+ return -1;
+ }
+ ctx->os->time = g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "os", "timezone",
+ "Name of Local Timezone",
+ 1, (char *[]) {"timezone"});
+ if (!g) {
+ return -1;
+ }
+ ctx->os->tz = g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "os", "virtual_memory_bytes",
+ "Total amount of bytes of virtual memory",
+ 0, NULL);
+
+ if (!g) {
+ return -1;
+ }
+ ctx->os->virtual_memory_bytes = g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "os", "processes_limit",
+ "Number of processes limit",
+ 0, NULL);
+
+ if (!g) {
+ return -1;
+ }
+ ctx->os->processes_limit = g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "os", "process_memory_limit_bytes",
+ "Limit of processes memory",
+ 0, NULL);
+
+ if (!g) {
+ return -1;
+ }
+ ctx->os->process_memory_limit_bytes = g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "os", "processes",
+ "Number of processes",
+ 0, NULL);
+
+ if (!g) {
+ return -1;
+ }
+ ctx->os->processes = g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "os", "users",
+ "Number of users",
+ 0, NULL);
+
+ if (!g) {
+ return -1;
+ }
+ ctx->os->users = g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "os", "visible_memory_bytes",
+ "Total amount of bytes of visibile memory",
+ 0, NULL);
+
+ if (!g) {
+ return -1;
+ }
+ ctx->os->visible_memory_bytes = g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "os", "virtual_memory_free_bytes",
+ "Amount of free bytes of virtual memory",
+ 0, NULL);
+
+ if (!g) {
+ return -1;
+ }
+ ctx->os->virtual_memory_free_bytes = g;
+
+ ctx->os->operational = FLB_TRUE;
+
+ return 0;
+}
+
+int we_os_exit(struct flb_we *ctx)
+{
+ flb_free(ctx->os);
+ return 0;
+}
+
+int we_os_update(struct flb_we *ctx)
+{
+ DWORD level = 102;
+ LPWKSTA_INFO_102 wksta = NULL;
+ NET_API_STATUS status;
+ MEMORYSTATUSEX statex;
+ PERFORMANCE_INFORMATION perf;
+ DWORD size = 0;
+ char version[65] = {0}, major[32] = {0}, minor[32] = {0};
+ int users = 0;
+ LONG ret;
+ HKEY hkey;
+ char caption[80], build_number[32];
+ DWORD caption_len = sizeof(caption), build_len = sizeof(build_number);
+ uint64_t timestamp = 0;
+ char label_caption[90];
+ TIME_ZONE_INFORMATION tzi;
+ DWORD tztype = 0;
+ char *displaytz;
+
+ if (!ctx->os->operational) {
+ flb_plg_error(ctx->ins, "os collector not yet in operational state");
+
+ return -1;
+ }
+
+ timestamp = cfl_time_now();
+
+ ret = RegOpenKeyExA(HKEY_LOCAL_MACHINE, WE_OS_CURRENT_VERSION_PATH, 0, KEY_QUERY_VALUE, &hkey);
+ if (ret != ERROR_SUCCESS) {
+ return -1;
+ }
+ ret = RegQueryValueExA(hkey, "ProductName", NULL, NULL, (LPBYTE)caption, &caption_len);
+ if (ret != ERROR_SUCCESS) {
+ return -1;
+ }
+ ret = RegQueryValueExA(hkey, "CurrentBuildNumber", NULL, NULL, (LPBYTE)build_number, &build_len);
+ if (ret != ERROR_SUCCESS) {
+ return -1;
+ }
+ RegCloseKey(hkey);
+
+ status = NetWkstaGetInfo(NULL,
+ level,
+ (LPBYTE *)&wksta);
+
+ if (status == NERR_Success) {
+ snprintf(version, 65, "%d.%d", wksta->wki102_ver_major,
+ wksta->wki102_ver_minor);
+ snprintf(major, 32, "%d", wksta->wki102_ver_major);
+ snprintf(minor, 32, "%d", wksta->wki102_ver_minor);
+ snprintf(label_caption, 90, "Microsoft %s", caption);
+
+ users = wksta->wki102_logged_on_users;
+
+ cmt_gauge_set(ctx->os->info, timestamp, 1.0, 5,
+ (char *[]) { label_caption, version, major, minor, build_number});
+ cmt_gauge_set(ctx->os->users, timestamp, (double)users, 0, NULL);
+ }
+ else {
+ if (wksta != NULL) {
+ NetApiBufferFree(wksta);
+ }
+ flb_plg_error(ctx->ins, "A system error has occurred: %d\n", status);
+ return -1;
+ }
+
+ cmt_gauge_set(ctx->os->time, timestamp, (double)timestamp/1000000000L, 0, NULL);
+
+ tztype = GetTimeZoneInformation(&tzi);
+ switch (tztype) {
+ case TIME_ZONE_ID_STANDARD:
+ displaytz = we_convert_wstr(tzi.StandardName, CP_UTF8);
+ cmt_gauge_set(ctx->os->tz, timestamp, 1.0, 1, (char *[]) {displaytz});
+ flb_free(displaytz);
+ break;
+ case TIME_ZONE_ID_DAYLIGHT:
+ displaytz = we_convert_wstr(tzi.DaylightName, CP_UTF8);
+ cmt_gauge_set(ctx->os->tz, timestamp, 1.0, 1, (char *[]) {displaytz});
+ flb_free(displaytz);
+ break;
+ case TIME_ZONE_ID_UNKNOWN:
+ /* The current timezone does not use daylight saving time. */
+ displaytz = we_convert_wstr(tzi.StandardName, CP_UTF8);
+ cmt_gauge_set(ctx->os->tz, timestamp, 1.0, 1, (char *[]) {displaytz});
+ flb_free(displaytz);
+ break;
+ default:
+ flb_plg_error(ctx->ins, "Error to retrieve timezone information with status: %d", GetLastError());
+ }
+
+ statex.dwLength = sizeof (statex);
+ GlobalMemoryStatusEx(&statex);
+
+ size = sizeof(perf);
+ GetPerformanceInfo(&perf, size);
+
+ cmt_gauge_set(ctx->os->physical_memory_free_bytes, timestamp, (double)statex.ullAvailPhys, 0, NULL);
+ cmt_gauge_set(ctx->os->virtual_memory_free_bytes, timestamp, (double)statex.ullAvailPageFile, 0, NULL);
+ /* The result is from $(Get-WMIObject Win32_OperatingSystem).MaxNumberOfProcesses. */
+ cmt_gauge_set(ctx->os->processes_limit, timestamp, (double)4294967295, 0, NULL);
+ cmt_gauge_set(ctx->os->process_memory_limit_bytes, timestamp, (double)statex.ullTotalVirtual, 0, NULL);
+ cmt_gauge_set(ctx->os->processes, timestamp, (double)perf.ProcessCount, 0, NULL);
+ cmt_gauge_set(ctx->os->virtual_memory_bytes, timestamp, (double)statex.ullTotalPageFile, 0, NULL);
+ cmt_gauge_set(ctx->os->visible_memory_bytes, timestamp, (double)statex.ullTotalPhys, 0, NULL);
+
+ if (wksta != NULL) {
+ NetApiBufferFree(wksta);
+ }
+
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/in_windows_exporter_metrics/we_os.h b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_os.h
new file mode 100644
index 000000000..6a62e6064
--- /dev/null
+++ b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_os.h
@@ -0,0 +1,32 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_WE_OS_H
+#define FLB_WE_OS_H
+
+#include "we.h"
+
+#define WE_OS_CURRENT_VERSION_PATH \
+ "SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion"
+
+int we_os_init(struct flb_we *ctx);
+int we_os_exit(struct flb_we *ctx);
+int we_os_update(struct flb_we *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/in_windows_exporter_metrics/we_perflib.c b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_perflib.c
new file mode 100644
index 000000000..0c140661d
--- /dev/null
+++ b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_perflib.c
@@ -0,0 +1,1048 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2019-2021 The Fluent Bit Authors
+ * Copyright (C) 2015-2018 Treasure Data Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <fluent-bit/flb_sds.h>
+
+#include "we.h"
+#include "we_util.h"
+#include "we_metric.h"
+#include "we_perflib.h"
+
+double we_perflib_get_adjusted_counter_value(struct we_perflib_counter *counter)
+{
+ double result;
+
+ result = (double) counter->primary_value.as_qword;
+
+ switch(counter->definition->type) {
+ case PERF_ELAPSED_TIME:
+ result -= counter->parent->parent->time;
+ result /= counter->parent->parent->frequency;
+ break;
+
+ case PERF_100NSEC_TIMER:
+ case PERF_PRECISION_100NS_TIMER:
+ result /= counter->parent->parent->frequency;
+ break;
+ }
+
+ return result;
+}
+
+char *we_perflib_get_counter_type_as_text(uint32_t counter_Type)
+{
+ switch (counter_Type) {
+ case PERF_100NSEC_TIMER:
+ return "PERF_100NSEC_TIMER";
+ case PERF_100NSEC_TIMER_INV:
+ return "PERF_100NSEC_TIMER_INV";
+ case PERF_100NSEC_MULTI_TIMER:
+ return "PERF_100NSEC_MULTI_TIMER";
+ case PERF_100NSEC_MULTI_TIMER_INV:
+ return "PERF_100NSEC_MULTI_TIMER_INV";
+ case PERF_AVERAGE_BASE:
+ return "PERF_AVERAGE_BASE";
+ case PERF_AVERAGE_BULK:
+ return "PERF_AVERAGE_BULK";
+ case PERF_AVERAGE_TIMER:
+ return "PERF_AVERAGE_TIMER";
+ case PERF_COUNTER_100NS_QUEUELEN_TYPE:
+ return "PERF_COUNTER_100NS_QUEUELEN_TYPE";
+ case PERF_COUNTER_BULK_COUNT:
+ return "PERF_COUNTER_BULK_COUNT";
+ case PERF_COUNTER_COUNTER:
+ return "PERF_COUNTER_COUNTER";
+ case PERF_COUNTER_DELTA:
+ return "PERF_COUNTER_DELTA";
+ case PERF_COUNTER_HISTOGRAM_TYPE:
+ return "PERF_COUNTER_HISTOGRAM_TYPE";
+ case PERF_COUNTER_LARGE_DELTA:
+ return "PERF_COUNTER_LARGE_DELTA";
+ case PERF_COUNTER_LARGE_QUEUELEN_TYPE:
+ return "PERF_COUNTER_LARGE_QUEUELEN_TYPE";
+ case PERF_COUNTER_LARGE_RAWCOUNT:
+ return "PERF_COUNTER_LARGE_RAWCOUNT";
+ case PERF_COUNTER_LARGE_RAWCOUNT_HEX:
+ return "PERF_COUNTER_LARGE_RAWCOUNT_HEX";
+ case PERF_COUNTER_MULTI_BASE:
+ return "PERF_COUNTER_MULTI_BASE";
+ case PERF_COUNTER_MULTI_TIMER:
+ return "PERF_COUNTER_MULTI_TIMER";
+ case PERF_COUNTER_MULTI_TIMER_INV:
+ return "PERF_COUNTER_MULTI_TIMER_INV";
+ case PERF_COUNTER_NODATA:
+ return "PERF_COUNTER_NODATA";
+ case PERF_COUNTER_OBJ_TIME_QUEUELEN_TYPE:
+ return "PERF_COUNTER_OBJ_TIME_QUEUELEN_TYPE";
+ case PERF_COUNTER_QUEUELEN_TYPE:
+ return "PERF_COUNTER_QUEUELEN_TYPE";
+ case PERF_COUNTER_RAWCOUNT:
+ return "PERF_COUNTER_RAWCOUNT";
+ case PERF_COUNTER_RAWCOUNT_HEX:
+ return "PERF_COUNTER_RAWCOUNT_HEX";
+ case PERF_COUNTER_TEXT:
+ return "PERF_COUNTER_TEXT";
+ case PERF_COUNTER_TIMER:
+ return "PERF_COUNTER_TIMER";
+ case PERF_COUNTER_TIMER_INV:
+ return "PERF_COUNTER_TIMER_INV";
+ case PERF_ELAPSED_TIME:
+ return "PERF_ELAPSED_TIME";
+ case PERF_LARGE_RAW_BASE:
+ return "PERF_LARGE_RAW_BASE";
+ case PERF_LARGE_RAW_FRACTION:
+ return "PERF_LARGE_RAW_FRACTION";
+ case PERF_OBJ_TIME_TIMER:
+ return "PERF_OBJ_TIME_TIMER";
+ case PERF_PRECISION_100NS_TIMER:
+ return "PERF_PRECISION_100NS_TIMER";
+ case PERF_PRECISION_OBJECT_TIMER:
+ return "PERF_PRECISION_OBJECT_TIMER";
+ case PERF_PRECISION_SYSTEM_TIMER:
+ return "PERF_PRECISION_SYSTEM_TIMER";
+ case PERF_RAW_BASE:
+ return "PERF_RAW_BASE";
+ case PERF_RAW_FRACTION:
+ return "PERF_RAW_FRACTION";
+ case PERF_SAMPLE_BASE:
+ return "PERF_SAMPLE_BASE";
+ case PERF_SAMPLE_COUNTER:
+ return "PERF_SAMPLE_COUNTER";
+ case PERF_SAMPLE_FRACTION:
+ return "PERF_SAMPLE_FRACTION";
+ };
+
+ return "UNRECOGNIZED_COUNTER_TYPE";
+}
+
+void we_perflib_destroy_counter(struct we_perflib_counter *counter)
+{
+ flb_free(counter);
+}
+
+void we_perflib_destroy_instance(struct we_perflib_instance *instance)
+{
+ struct flb_hash_table_entry *counter_hash_entry;
+ struct mk_list *counter_iterator;
+ struct we_perflib_counter *counter;
+ struct mk_list *tmp;
+
+ mk_list_foreach_safe(counter_iterator,
+ tmp,
+ &instance->counters->entries) {
+ counter_hash_entry = mk_list_entry(counter_iterator,
+ struct flb_hash_table_entry,
+ _head_parent);
+
+ counter = (struct we_perflib_counter *) counter_hash_entry->val;
+
+ we_perflib_destroy_counter(counter);
+ }
+
+ if (instance->name != NULL) {
+ flb_free(instance->name);
+ }
+
+ flb_hash_table_destroy(instance->counters);
+
+ flb_free(instance);
+}
+
+void we_perflib_destroy_counter_definition(
+ struct we_perflib_counter_definition *definition)
+{
+ flb_sds_destroy(definition->name_index_str);
+
+ mk_list_del(&definition->_head);
+
+ flb_free(definition);
+}
+
+void we_perflib_destroy_object(struct we_perflib_object *object)
+{
+ struct mk_list *definition_iterator;
+ struct flb_hash_table_entry *instance_hash_entry;
+ struct mk_list *instance_iterator;
+ struct we_perflib_counter_definition *definition;
+ struct we_perflib_instance *instance;
+ struct mk_list *tmp;
+
+ mk_list_foreach_safe(definition_iterator, tmp, &object->counter_definitions) {
+ definition = mk_list_entry(definition_iterator,
+ struct we_perflib_counter_definition,
+ _head);
+
+ we_perflib_destroy_counter_definition(definition);
+ }
+
+ mk_list_foreach_safe(instance_iterator, tmp, &object->instances->entries) {
+ instance_hash_entry = mk_list_entry(instance_iterator,
+ struct flb_hash_table_entry,
+ _head_parent);
+
+ instance = (struct we_perflib_instance *) instance_hash_entry->val;
+
+ we_perflib_destroy_instance(instance);
+ }
+
+ flb_hash_table_destroy(object->instances);
+
+ flb_free(object);
+}
+
+static int get_string_list(char *source, flb_sds_t *out_result_buffer)
+{
+ DWORD result_buffer_size;
+ flb_sds_t result_buffer;
+ LSTATUS result;
+
+ result_buffer = NULL;
+ result_buffer_size = 0;
+
+ if (out_result_buffer == NULL) {
+ return -1;
+ }
+
+ result = RegQueryValueExA(HKEY_PERFORMANCE_TEXT,
+ source,
+ NULL,
+ NULL,
+ NULL,
+ &result_buffer_size);
+
+ if (result != ERROR_SUCCESS) {
+ return -2;
+ }
+
+ result_buffer = flb_sds_create_size(result_buffer_size);
+
+ if (result_buffer == NULL) {
+ return -3;
+ }
+
+ result = RegQueryValueExA(HKEY_PERFORMANCE_TEXT,
+ source,
+ NULL,
+ NULL,
+ (LPBYTE) result_buffer,
+ &result_buffer_size);
+
+ if (result != ERROR_SUCCESS)
+ {
+ flb_sds_destroy(result_buffer);
+
+ return -4;
+ }
+
+ *out_result_buffer = result_buffer;
+
+ return 0;
+}
+
+static int get_number_of_string_entries(uint32_t *result_count)
+{
+ DWORD argument_size;
+ DWORD entry_count;
+ HKEY key_handle;
+ LSTATUS result;
+
+ entry_count = 0;
+ argument_size = sizeof(DWORD);
+
+ result = RegOpenKeyExA(HKEY_LOCAL_MACHINE,
+ WE_PERFLIB_REGISTRY_PATH,
+ 0,
+ KEY_READ,
+ &key_handle);
+
+ if (result != ERROR_SUCCESS) {
+ return -1;
+ }
+
+ result = RegQueryValueExA(key_handle,
+ WE_PERFLIB_STRING_COUNT_KEY,
+ NULL,
+ 0,
+ (LPBYTE) &entry_count,
+ &argument_size);
+
+ RegCloseKey(key_handle);
+
+ if (result != ERROR_SUCCESS) {
+ return -2;
+ }
+
+ *result_count = (uint32_t) entry_count;
+
+ return 0;
+}
+
+static int get_text_mapping_table(struct flb_hash_table **out_mapping_table)
+{
+ char *current_counter_string;
+ flb_sds_t counter_strings;
+ char *counter_index;
+ char *counter_name;
+ uint32_t string_count;
+ int result;
+
+ if (out_mapping_table == NULL) {
+ return -1;
+ }
+
+ result = get_number_of_string_entries(&string_count);
+
+ if (result) {
+ return -2;
+ }
+
+ result = get_string_list(WE_PERFLIB_COUNTER_KEY_NAME, &counter_strings);
+
+ if (result) {
+ return -3;
+ }
+
+ *out_mapping_table = flb_hash_table_create(FLB_HASH_TABLE_EVICT_NONE,
+ 512, string_count * 2);
+ if (*out_mapping_table == NULL) {
+ flb_sds_destroy(counter_strings);
+
+ return -4;
+ }
+
+ current_counter_string = (char *) counter_strings;
+
+ while (1) {
+ counter_index = current_counter_string;
+ current_counter_string = &current_counter_string[strlen(current_counter_string) + 1];
+
+ if (!current_counter_string[0]) {
+ break;
+ }
+
+ counter_name = current_counter_string;
+ current_counter_string = &current_counter_string[strlen(current_counter_string) + 1];
+
+ if (!current_counter_string[0]) {
+ break;
+ }
+
+ result = flb_hash_table_add(*out_mapping_table,
+ counter_name, strlen(counter_name),
+ counter_index, strlen(counter_index));
+
+ if (result < 0) {
+ flb_sds_destroy(counter_strings);
+ flb_hash_table_destroy(*out_mapping_table);
+
+ *out_mapping_table = NULL;
+
+ return -5;
+ }
+
+ result = flb_hash_table_add(*out_mapping_table,
+ counter_index, strlen(counter_index),
+ counter_name, strlen(counter_name));
+
+ if (result < 0) {
+ flb_sds_destroy(counter_strings);
+ flb_hash_table_destroy(*out_mapping_table);
+
+ *out_mapping_table = NULL;
+
+ return -5;
+ }
+ }
+
+ flb_sds_destroy(counter_strings);
+
+ return 0;
+}
+
+int we_perflib_query_raw_data(struct flb_we *ctx, char *source,
+ char **out_buffer, size_t *out_buffer_size)
+{
+ char *reallocated_buffer;
+ DWORD buffer_size;
+ DWORD data_size;
+ char *buffer;
+ LSTATUS result;
+
+ buffer_size = WE_PERFLIB_QUERY_BUFFER_INITIAL_SIZE;
+
+ result = ERROR_MORE_DATA;
+
+ buffer = (char *) flb_malloc(buffer_size);
+
+ if (buffer == NULL) {
+ return -1;
+ }
+
+ while (result == ERROR_MORE_DATA) {
+ data_size = buffer_size;
+
+ result = RegQueryValueExA(HKEY_PERFORMANCE_DATA,
+ source,
+ NULL,
+ NULL,
+ buffer,
+ &data_size);
+
+ RegCloseKey(HKEY_PERFORMANCE_DATA);
+
+ buffer_size += WE_PERFLIB_QUERY_BUFFER_INCREMENT_SIZE;
+
+ reallocated_buffer = (char *) flb_realloc(buffer, buffer_size);
+
+ if (reallocated_buffer == NULL) {
+ flb_free(buffer);
+
+ return -2;
+ }
+
+ buffer = reallocated_buffer;
+ }
+
+ *out_buffer = buffer;
+ *out_buffer_size = data_size;
+
+ return 0;
+}
+
+static char *we_perflib_lookup_counter_index(struct flb_hash_table *mapping_table,
+ char *name)
+{
+ return flb_hash_table_get_ptr(mapping_table,
+ name,
+ strlen(name));
+}
+
+static char *we_perflib_lookup_counter_name(struct flb_hash_table *mapping_table,
+ uint32_t index)
+{
+ char hash_table_index[11];
+
+ sprintf(hash_table_index, "%" PRIu32, index);
+
+ return flb_hash_table_get_ptr(mapping_table,
+ hash_table_index,
+ strlen(hash_table_index));
+}
+
+static int we_perflib_process_object_type(
+ struct we_perflib_context *context,
+ char *input_data_block,
+ struct we_perflib_object **out_perflib_object)
+{
+ char *input_object_block;
+ struct we_perflib_object *perflib_object;
+ PERF_OBJECT_TYPE *perf_object;
+ PERF_DATA_BLOCK *perf_data;
+ int result;
+
+ perf_data = (PERF_DATA_BLOCK *) input_data_block;
+
+ result = wcsncmp(perf_data->Signature, L"PERF", 4);
+
+ if (result) {
+ return -1;
+ }
+
+ input_object_block = &input_data_block[perf_data->HeaderLength];
+
+ perf_object = (PERF_OBJECT_TYPE *) input_object_block;
+
+ perflib_object = (struct we_perflib_object *) \
+ flb_calloc(1, sizeof(struct we_perflib_object));
+
+ if (perflib_object == NULL) {
+ return -2;
+ }
+
+ perflib_object->name = we_perflib_lookup_counter_name(
+ context->counter_indexes,
+ perf_object->ObjectNameTitleIndex);
+
+ if (perflib_object->name == NULL) {
+ flb_free(perflib_object);
+
+ return -3;
+ }
+
+ perflib_object->time = perf_data->PerfTime.QuadPart;
+ perflib_object->frequency = perf_data->PerfFreq.QuadPart;
+ perflib_object->hundred_ns_time = perf_data->PerfTime100nSec.QuadPart;
+
+ perflib_object->counter_count = perf_object->NumCounters;
+ perflib_object->instance_count = perf_object->NumInstances;
+
+
+ perflib_object->instances = flb_hash_table_create(
+ FLB_HASH_TABLE_EVICT_NONE,
+ 64,
+ perflib_object->instance_count + 1);
+
+ if (perflib_object->instances == NULL) {
+ flb_free(perflib_object);
+
+ return -4;
+ }
+
+ mk_list_init(&perflib_object->counter_definitions);
+
+ *out_perflib_object = perflib_object;
+
+ return perf_data->HeaderLength + perf_object->HeaderLength;
+}
+
+static int we_perflib_process_counter_definition(
+ struct we_perflib_context *context,
+ char *input_data_block,
+ struct we_perflib_counter_definition **out_counter_definition)
+{
+ PERF_COUNTER_DEFINITION *perf_counter_definition;
+ struct we_perflib_counter_definition *counter_definition;
+ char name_index_str[12];
+
+ perf_counter_definition = (PERF_COUNTER_DEFINITION *) input_data_block;
+
+ counter_definition = (struct we_perflib_counter_definition *) \
+ flb_calloc(1, sizeof(struct we_perflib_counter_definition));
+
+ if (counter_definition == NULL) {
+ return -1;
+ }
+
+ counter_definition->name_index = perf_counter_definition->CounterNameTitleIndex;
+
+ counter_definition->name = we_perflib_lookup_counter_name(
+ context->counter_indexes,
+ counter_definition->name_index);
+
+ snprintf(name_index_str,
+ sizeof(name_index_str),
+ "%" PRIu32,
+ counter_definition->name_index);
+
+ counter_definition->name_index_str = flb_sds_create(name_index_str);
+
+ if (counter_definition->name_index_str == NULL) {
+ flb_free(counter_definition);
+
+ return -2;
+ }
+
+ if (counter_definition->name == NULL) {
+ counter_definition->name = "";
+ }
+
+ if (counter_definition->name_index_str == NULL) {
+ counter_definition->name_index_str = flb_sds_create("");
+ }
+
+ counter_definition->help_index = perf_counter_definition->CounterHelpTitleIndex;
+
+ counter_definition->type = perf_counter_definition->CounterType;
+ counter_definition->size = perf_counter_definition->CounterSize;
+ counter_definition->offset = perf_counter_definition->CounterOffset;
+ counter_definition->detail_level = perf_counter_definition->DetailLevel;
+
+ *out_counter_definition = counter_definition;
+
+ return perf_counter_definition->ByteLength;
+}
+
+static int we_perflib_process_counter_definitions(
+ struct we_perflib_context *context,
+ struct we_perflib_object *perflib_object,
+ char *input_data_block)
+{
+ size_t counter_definition_index;
+ struct we_perflib_counter_definition *counter_definition;
+ size_t offset;
+ int result;
+
+ offset = 0;
+
+ for (counter_definition_index = 0 ;
+ counter_definition_index < perflib_object->counter_count ;
+ counter_definition_index++) {
+ result = we_perflib_process_counter_definition(context,
+ &input_data_block[offset],
+ &counter_definition);
+
+ if (result <= 0) {
+ return -1;
+ }
+
+ offset += result;
+
+ mk_list_add(&counter_definition->_head, &perflib_object->counter_definitions);
+ }
+
+ return offset;
+}
+
+static struct we_perflib_counter * we_perflib_create_counter(
+ struct we_perflib_counter_definition *counter_definition)
+{
+ struct we_perflib_counter *counter;
+
+ counter = (struct we_perflib_counter *) \
+ flb_calloc(1, sizeof(struct we_perflib_counter));
+
+ if (counter == NULL) {
+ return NULL;
+ }
+
+ counter->definition = counter_definition;
+
+ return counter;
+}
+
+static int we_perflib_process_counter(
+ struct we_perflib_context *context,
+ struct we_perflib_counter_definition *counter_definition,
+ char *input_data_block,
+ struct we_perflib_counter **out_counter)
+{
+ struct we_perflib_counter *perflib_instance_counter;
+
+ perflib_instance_counter = we_perflib_create_counter(counter_definition);
+
+ if (perflib_instance_counter == NULL) {
+ return -1;
+ }
+
+ memcpy(&perflib_instance_counter->primary_value,
+ &input_data_block[counter_definition->offset],
+ counter_definition->size);
+
+ if (counter_definition->size > sizeof(union we_perflib_value)) {
+ we_perflib_destroy_counter(perflib_instance_counter);
+
+ return -2;
+ }
+
+ *out_counter = perflib_instance_counter;
+
+ return 0;
+}
+
+static int we_perflib_process_counters(struct we_perflib_context *context,
+ struct we_perflib_object *perflib_object,
+ struct we_perflib_instance *instance,
+ char *input_data_block)
+{
+ struct mk_list *counter_definition_iterator;
+ struct we_perflib_counter *perflib_instance_counter;
+ PERF_COUNTER_BLOCK *perf_counter_block;
+ struct we_perflib_counter_definition *counter_definition;
+ int result;
+ int offset;
+
+ perf_counter_block = (PERF_COUNTER_BLOCK *) input_data_block;
+
+ mk_list_foreach(counter_definition_iterator,
+ &perflib_object->counter_definitions) {
+ counter_definition = mk_list_entry(counter_definition_iterator,
+ struct we_perflib_counter_definition,
+ _head);
+
+ if (!counter_definition->name_index) {
+ continue;
+ }
+
+ result = we_perflib_process_counter(context,
+ counter_definition,
+ input_data_block,
+ &perflib_instance_counter);
+
+ if (result < 0) {
+ return -1;
+ }
+
+ perflib_instance_counter->parent = instance;
+
+ result = -1;
+
+ if (counter_definition->name[0]) {
+ result = flb_hash_table_add(instance->counters,
+ counter_definition->name,
+ strlen(counter_definition->name),
+ perflib_instance_counter,
+ 0);
+ }
+ else
+ {
+ result = flb_hash_table_add(instance->counters,
+ counter_definition->name_index_str,
+ strlen(counter_definition->name_index_str),
+ perflib_instance_counter,
+ 0);
+ }
+
+ if (result < 0) {
+ we_perflib_destroy_counter(perflib_instance_counter);
+
+ return -2;
+ }
+ }
+
+ return perf_counter_block->ByteLength;
+}
+
+static struct we_perflib_instance *we_perflib_create_instance(size_t counter_count)
+{
+ struct we_perflib_instance *instance;
+
+ instance = (struct we_perflib_instance *) \
+ flb_calloc(1, sizeof(struct we_perflib_instance));
+
+ if (instance == NULL) {
+ return NULL;
+ }
+
+ instance->counters = flb_hash_table_create(FLB_HASH_TABLE_EVICT_NONE,
+ 64,
+ counter_count + 1);
+
+ if (instance->counters == NULL) {
+ flb_free(instance);
+
+ return NULL;
+ }
+
+ return instance;
+}
+
+static int we_perflib_process_instance(struct we_perflib_context *context,
+ struct we_perflib_object *perflib_object,
+ char *input_data_block,
+ struct we_perflib_instance **out_instance)
+{
+ PERF_INSTANCE_DEFINITION *perf_instance_definition;
+ struct we_perflib_instance *perflib_instance;
+ int offset;
+ int result;
+
+ perflib_instance = we_perflib_create_instance(perflib_object->counter_count);
+
+ if (perflib_instance == NULL) {
+ return -1;
+ }
+
+ offset = 0;
+
+ if (perflib_object->instance_count >= 1) {
+ perf_instance_definition = (PERF_INSTANCE_DEFINITION *) input_data_block;
+
+ if (perf_instance_definition->NameLength > 0) {
+ perflib_instance->name = \
+ we_convert_wstr(&input_data_block[perf_instance_definition->NameOffset], CP_UTF8);
+ if (perflib_instance->name == NULL) {
+ we_perflib_destroy_instance(perflib_instance);
+
+ return -2;
+ }
+ }
+ else {
+ perflib_instance->name = flb_strdup("DEFAULT");
+ }
+
+ offset = perf_instance_definition->ByteLength;
+ }
+
+ perflib_instance->parent = perflib_object;
+
+ result = we_perflib_process_counters(context,
+ perflib_object,
+ perflib_instance,
+ &input_data_block[offset]);
+
+ if (result < 0) {
+ we_perflib_destroy_instance(perflib_instance);
+
+ return -3;
+ }
+
+ offset += result;
+
+ *out_instance = perflib_instance;
+
+ return offset;
+}
+
+static int we_perflib_process_instances(struct we_perflib_context *context,
+ struct we_perflib_object *perflib_object,
+ char *input_data_block)
+{
+ struct we_perflib_instance *perflib_instance;
+ size_t instance_index;
+ int result;
+ int offset;
+
+ offset = 0;
+
+ for (instance_index = 0 ;
+ instance_index < perflib_object->instance_count ;
+ instance_index++) {
+
+ result = we_perflib_process_instance(context,
+ perflib_object,
+ &input_data_block[offset],
+ &perflib_instance);
+
+ if (result <= 0) {
+ return -1;
+ }
+
+ offset += result;
+
+ result = flb_hash_table_add(perflib_object->instances,
+ perflib_instance->name,
+ strlen(perflib_instance->name),
+ perflib_instance,
+ 0);
+
+ if (result < 0) {
+ we_perflib_destroy_instance(perflib_instance);
+
+ return -2;
+ }
+ }
+
+ return offset;
+}
+
+int we_perflib_query(struct flb_we *ctx,
+ char *counter_name,
+ struct we_perflib_object **out_object)
+{
+ char *counter_name_index;
+ char *raw_data_buffer;
+ size_t raw_data_offset;
+ struct we_perflib_object *perflib_object;
+ size_t raw_data_size;
+ int result;
+
+
+ counter_name_index = we_perflib_lookup_counter_index(
+ ctx->perflib_context.counter_indexes, counter_name);
+
+ if (counter_name_index == NULL) {
+ return -1;
+ }
+
+ result = we_perflib_query_raw_data(ctx,
+ counter_name_index,
+ &raw_data_buffer,
+ &raw_data_size);
+
+ if (result) {
+ return -2;
+ }
+
+ raw_data_offset = 0;
+
+ result = we_perflib_process_object_type(&ctx->perflib_context,
+ &raw_data_buffer[raw_data_offset],
+ &perflib_object);
+
+ if (result < 0) {
+ flb_free(raw_data_buffer);
+
+ return -3;
+ }
+
+ raw_data_offset += result;
+
+ result = we_perflib_process_counter_definitions(&ctx->perflib_context,
+ perflib_object,
+ &raw_data_buffer[raw_data_offset]);
+
+ if (result < 0) {
+ we_perflib_destroy_object(perflib_object);
+ flb_free(raw_data_buffer);
+
+ return -4;
+ }
+
+ raw_data_offset += result;
+
+ result = we_perflib_process_instances(&ctx->perflib_context,
+ perflib_object,
+ &raw_data_buffer[raw_data_offset]);
+
+ if (result < 0) {
+ we_perflib_destroy_object(perflib_object);
+ flb_free(raw_data_buffer);
+
+ return -5;
+ }
+
+ flb_free(raw_data_buffer);
+
+ *out_object = perflib_object;
+
+ return 0;
+}
+
+int we_perflib_update_counters(struct flb_we *ctx,
+ char *query,
+ struct we_perflib_metric_source *metric_sources,
+ we_perflib_instance_filter filter_hook,
+ we_perflib_label_prepend_hook label_prepend_hook)
+{
+ char *metric_label_list[WE_PERFLIB_METRIC_LABEL_LIST_SIZE];
+ struct flb_hash_table_entry *instance_hash_entry;
+ size_t metric_label_count;
+ struct mk_list *instance_iterator;
+ struct we_perflib_metric_source *metric_source;
+ size_t metric_index;
+ void *metric_entry;
+ size_t label_index;
+ struct we_perflib_object *measurement;
+ uint64_t timestamp;
+ struct we_perflib_counter *counter;
+ int result;
+
+
+ timestamp = cfl_time_now();
+
+ result = we_perflib_query(ctx, query, &measurement);
+
+ if (result) {
+ return -1;
+ }
+
+ mk_list_foreach_r (instance_iterator, &measurement->instances->entries) {
+ instance_hash_entry = mk_list_entry(instance_iterator,
+ struct flb_hash_table_entry,
+ _head_parent);
+
+ if (filter_hook(instance_hash_entry->key, ctx) == 0) {
+ for (metric_index = 0 ;
+ metric_sources[metric_index].name != NULL ;
+ metric_index++) {
+
+ metric_source = &metric_sources[metric_index];
+
+ counter = we_perflib_get_counter(measurement,
+ instance_hash_entry->key,
+ metric_source->name);
+
+ if (counter == NULL) {
+ return -2;
+ }
+
+ metric_label_count = 0;
+
+ result = label_prepend_hook(metric_label_list,
+ WE_PERFLIB_METRIC_LABEL_LIST_SIZE,
+ &metric_label_count,
+ metric_source,
+ instance_hash_entry->key,
+ counter);
+
+ if (result != 0) {
+ return -3;
+ }
+
+ for (label_index = 0 ;
+ label_index < metric_source->label_set_size;
+ label_index++) {
+ metric_label_list[metric_label_count++] = \
+ metric_source->label_set[label_index];
+ }
+
+ metric_entry = metric_source->parent->metric_instance;
+
+ if (metric_source->parent->type == CMT_COUNTER) {
+ cmt_counter_set(metric_entry, timestamp,
+ we_perflib_get_adjusted_counter_value(counter),
+ metric_label_count, metric_label_list);
+ }
+ else if (metric_source->parent->type == CMT_GAUGE) {
+ cmt_gauge_set(metric_entry, timestamp,
+ we_perflib_get_adjusted_counter_value(counter),
+ metric_label_count, metric_label_list);
+ }
+ }
+ }
+ }
+
+ we_perflib_destroy_object(measurement);
+
+ return 0;
+}
+
+struct we_perflib_counter *we_perflib_get_counter(struct we_perflib_object *object,
+ char *instance_name,
+ char *counter_name)
+{
+ struct we_perflib_instance *instance;
+ struct we_perflib_counter *counter;
+
+ if (instance_name == NULL) {
+ instance_name = "DEFAULT";
+ }
+
+ instance = flb_hash_table_get_ptr(object->instances,
+ instance_name,
+ strlen(instance_name));
+
+ if (instance == NULL) {
+ return NULL;
+ }
+
+ counter = flb_hash_table_get_ptr(instance->counters,
+ counter_name,
+ strlen(counter_name));
+
+ return counter;
+}
+
+int we_perflib_init(struct flb_we *ctx)
+{
+ int result;
+
+ result = get_text_mapping_table(&ctx->perflib_context.counter_indexes);
+
+ if (result) {
+ return -1;
+ }
+
+ return 0;
+}
+
+int we_perflib_exit(struct flb_we *ctx)
+{
+ if (ctx->perflib_context.counter_indexes != NULL) {
+ flb_hash_table_destroy(ctx->perflib_context.counter_indexes);
+ ctx->perflib_context.counter_indexes = NULL;
+ }
+
+ return 0;
+}
+
+/*
+https://docs.microsoft.com/en-us/previous-versions/windows/it-pro/windows-server-2003/cc785636(v=ws.10)
+*/
+
diff --git a/src/fluent-bit/plugins/in_windows_exporter_metrics/we_perflib.h b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_perflib.h
new file mode 100644
index 000000000..46059bb8d
--- /dev/null
+++ b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_perflib.h
@@ -0,0 +1,72 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2019-2021 The Fluent Bit Authors
+ * Copyright (C) 2015-2018 Treasure Data Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_WE_PERFLIB_H
+#define FLB_WE_PERFLIB_H
+
+#include "we.h"
+#include "we_metric.h"
+
+#define WE_PERFLIB_REGISTRY_PATH \
+ "SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\Perflib"
+
+#define WE_PERFLIB_STRING_COUNT_KEY "Last Help"
+#define WE_PERFLIB_COUNTER_KEY_NAME "Counter 009"
+
+#define WE_PERFLIB_METRIC_LABEL_LIST_SIZE 64
+
+#define WE_PERFLIB_QUERY_BUFFER_INITIAL_SIZE (32 * 1024)
+#define WE_PERFLIB_QUERY_BUFFER_INCREMENT_SIZE (16 * 1024)
+
+#define WE_PERFLIB_WINDOWS_EPOCH ((double) 1 / 1e7)
+#define WE_PERFLIB_TICKS_TO_SECONDS_SCALE_FACTOR ((double) 116444736000000000)
+
+typedef int (*we_perflib_instance_filter)(char *, struct flb_we *);
+typedef int (*we_perflib_label_prepend_hook)(char **,
+ size_t,
+ size_t *,
+ struct we_perflib_metric_source *,
+ char *,
+ struct we_perflib_counter *);
+
+int we_perflib_init(struct flb_we *ctx);
+int we_perflib_exit(struct flb_we *ctx);
+
+int we_perflib_query(struct flb_we *ctx,
+ char *counter_name,
+ struct we_perflib_object **out_object);
+
+struct we_perflib_counter *we_perflib_get_counter(struct we_perflib_object *object,
+ char *instance_name,
+ char *counter_name);
+
+void we_perflib_destroy_object(struct we_perflib_object *object);
+
+char *we_perflib_get_counter_type_as_text(uint32_t counter_Type);
+
+int we_perflib_update_counters(struct flb_we *ctx,
+ char *query,
+ struct we_perflib_metric_source *metric_sources,
+ we_perflib_instance_filter filter_hook,
+ we_perflib_label_prepend_hook label_prepend_hook);
+
+double we_perflib_get_adjusted_counter_value(struct we_perflib_counter *counter);
+
+#endif \ No newline at end of file
diff --git a/src/fluent-bit/plugins/in_windows_exporter_metrics/we_util.c b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_util.c
new file mode 100644
index 000000000..625872709
--- /dev/null
+++ b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_util.c
@@ -0,0 +1,167 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2019-2021 The Fluent Bit Authors
+ * Copyright (C) 2015-2018 Treasure Data Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_sds.h>
+
+#include "we.h"
+#include "we_util.h"
+
+int we_get_windows_version(double *version_number)
+{
+ LSTATUS result;
+ DWORD data_size;
+ HKEY key_handle;
+ char version_text[8];
+
+ data_size = sizeof(version_text);
+
+ result = RegOpenKeyExA(HKEY_LOCAL_MACHINE,
+ WE_VERSION_REGISTRY_PATH,
+ 0,
+ KEY_READ,
+ &key_handle);
+
+ if (result != ERROR_SUCCESS) {
+ return FLB_FALSE;
+ }
+
+ result = RegQueryValueExA(key_handle,
+ WE_VERSION_KEY_NAME,
+ NULL,
+ 0,
+ version_text,
+ &data_size);
+
+ RegCloseKey(key_handle);
+
+ if (result != ERROR_SUCCESS)
+ {
+ return FLB_FALSE;
+ }
+
+ *version_number = strtod(version_text, NULL);
+
+ return FLB_TRUE;
+}
+
+void we_hexdump(uint8_t *buffer, size_t buffer_length, size_t line_length) {
+ char *printable_line;
+ size_t buffer_index;
+ size_t filler_index;
+
+ if (40 < line_length)
+ {
+ line_length = 40;
+ }
+
+ printable_line = malloc(line_length + 1);
+
+ if (NULL == printable_line)
+ {
+ printf("Alloca returned NULL\n");
+
+ return;
+ }
+
+ memset(printable_line, '\0', line_length + 1);
+
+ for (buffer_index = 0 ; buffer_index < buffer_length ; buffer_index++) {
+ if (0 != buffer_index &&
+ 0 == (buffer_index % line_length)) {
+
+ printf("%s\n", printable_line);
+
+ memset(printable_line, '\0', line_length + 1);
+ }
+
+ if (0 != isprint(buffer[buffer_index])) {
+ printable_line[(buffer_index % line_length)] = buffer[buffer_index];
+ }
+ else {
+ printable_line[(buffer_index % line_length)] = '.';
+ }
+
+ printf("%02X ", buffer[buffer_index]);
+ }
+
+ if (0 != buffer_index &&
+ 0 != (buffer_index % line_length)) {
+
+ for (filler_index = 0 ;
+ filler_index < (line_length - (buffer_index % line_length)) ;
+ filler_index++) {
+ printf(" ");
+ }
+
+ printf("%s\n", printable_line);
+
+ memset(printable_line, '.', line_length);
+ }
+
+ free(printable_line);
+}
+
+char* we_convert_wstr(wchar_t *wstr, UINT codePage)
+{
+ int size = 0;
+ char *buf = NULL;
+
+ size = WideCharToMultiByte(codePage, 0, wstr, -1, NULL, 0, NULL, NULL);
+ if (size == 0) {
+ return NULL;
+ }
+
+ buf = flb_calloc(1, size);
+ if (buf == NULL) {
+ flb_errno();
+ return NULL;
+ }
+ size = WideCharToMultiByte(codePage, 0, wstr, -1, buf, size, NULL, NULL);
+ if (size == 0) {
+ flb_free(buf);
+ return NULL;
+ }
+
+ return buf;
+}
+
+wchar_t* we_convert_str(char *str)
+{
+ int size = 0;
+ wchar_t *buf = NULL;
+
+ size = MultiByteToWideChar(CP_UTF8, 0, str, -1, NULL, 0);
+ if (size == 0) {
+ return NULL;
+ }
+
+ buf = flb_calloc(1, sizeof(PWSTR) * size);
+ if (buf == NULL) {
+ flb_errno();
+ return NULL;
+ }
+ size = MultiByteToWideChar(CP_UTF8, 0, str, -1, buf, size);
+ if (size == 0) {
+ flb_free(buf);
+ return NULL;
+ }
+
+ return buf;
+}
diff --git a/src/fluent-bit/plugins/in_windows_exporter_metrics/we_util.h b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_util.h
new file mode 100644
index 000000000..1f556d2cb
--- /dev/null
+++ b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_util.h
@@ -0,0 +1,37 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2019-2021 The Fluent Bit Authors
+ * Copyright (C) 2015-2018 Treasure Data Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_WE_UTIL_H
+#define FLB_WE_UTIL_H
+
+#include "we.h"
+
+#define WE_VERSION_REGISTRY_PATH \
+ "SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion"
+
+#define WE_VERSION_KEY_NAME "CurrentVersion"
+
+int we_get_windows_version(double *version_number);
+void we_hexdump(uint8_t *buffer, size_t buffer_length, size_t line_length);
+/* Utilites for char/wchar_t conversion */
+wchar_t* we_convert_str(char *str);
+char* we_convert_wstr(wchar_t *wstr, UINT codePage);
+
+#endif
diff --git a/src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi.c b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi.c
new file mode 100644
index 000000000..03505c4bc
--- /dev/null
+++ b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi.c
@@ -0,0 +1,572 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2019-2021 The Fluent Bit Authors
+ * Copyright (C) 2015-2018 Treasure Data Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_config_map.h>
+#include <fluent-bit/flb_error.h>
+#include <fluent-bit/flb_pack.h>
+
+#include "we.h"
+#include "we_util.h"
+#include "we_wmi.h"
+
+static int wmi_coinitialize(struct flb_we *ctx, char* wmi_namespace)
+{
+ IWbemLocator *locator = 0;
+ IWbemServices *service = 0;
+ HRESULT hr;
+ wchar_t *wnamespace;
+
+ flb_plg_debug(ctx->ins, "initializing WMI instance....");
+
+ /* Initialize COM library */
+ hr = CoInitializeEx(0, COINIT_MULTITHREADED);
+ if (FAILED(hr)) {
+ flb_plg_error(ctx->ins, "Failed to initialize COM library. Error code = %x", hr);
+ return -1;
+ }
+
+ /* Initialize COM security */
+ hr = CoInitializeSecurity(NULL,
+ -1,
+ NULL,
+ NULL,
+ RPC_C_AUTHN_LEVEL_DEFAULT,
+ RPC_C_IMP_LEVEL_IMPERSONATE,
+ NULL,
+ EOAC_NONE,
+ NULL);
+
+ if (FAILED(hr)) {
+ return hr;
+ }
+
+ /* Create WMI instance */
+ hr = CoCreateInstance(&CLSID_WbemLocator, 0,
+ CLSCTX_INPROC_SERVER, &IID_IWbemLocator, (LPVOID *) &locator);
+ if (FAILED(hr)) {
+ flb_plg_error(ctx->ins, "Failed to create IWbemLocator object. Error code = %x", hr);
+ CoUninitialize();
+ return hr;
+ }
+ ctx->locator = locator;
+
+ if (wmi_namespace == NULL) {
+ wnamespace = we_convert_str("ROOT\\CIMV2");
+ }
+ else {
+ wnamespace = we_convert_str(wmi_namespace);
+ }
+ /* Connect WMI server */
+ hr = locator->lpVtbl->ConnectServer(locator,
+ wnamespace,
+ NULL,
+ NULL,
+ 0,
+ 0,
+ 0,
+ NULL,
+ &service);
+ flb_free(wnamespace);
+
+ if (FAILED(hr)) {
+ flb_plg_error(ctx->ins, "Could not connect. Error code = %x", hr);
+ locator->lpVtbl->Release(locator);
+ CoUninitialize();
+ return hr;
+ }
+ ctx->service = service;
+
+ /* Set up ProxyBlanket */
+ hr = CoSetProxyBlanket(service,
+ RPC_C_AUTHN_WINNT,
+ RPC_C_AUTHZ_NONE,
+ NULL,
+ RPC_C_AUTHN_LEVEL_CALL,
+ RPC_C_IMP_LEVEL_IMPERSONATE,
+ NULL,
+ EOAC_NONE
+ );
+ if (FAILED(hr)) {
+ flb_plg_error(ctx->ins, "Could not set proxy blanket. Error code = %x", hr);
+ service->lpVtbl->Release(service);
+ locator->lpVtbl->Release(locator);
+ CoUninitialize();
+ return hr;
+ }
+
+ return 0;
+}
+
+
+
+int wmi_utils_str_to_double(char *str, double *out_val)
+{
+ double val;
+ char *end;
+
+ errno = 0;
+ val = strtod(str, &end);
+ if (errno != 0 || *end != '\0') {
+ return -1;
+ }
+ *out_val = val;
+ return 0;
+}
+
+static int wmi_update_counters(struct wmi_query_spec *spec, uint64_t timestamp, double val, int metric_label_count, char **metric_label_set)
+{
+ val = spec->value_adjuster(val);
+
+ if (spec->type == CMT_GAUGE) {
+ cmt_gauge_set((struct cmt_gauge *)spec->metric_instance, timestamp,
+ val,
+ metric_label_count, metric_label_set);
+ }
+ else if (spec->type == CMT_COUNTER) {
+ cmt_counter_set((struct cmt_counter *)spec->metric_instance, timestamp,
+ val,
+ metric_label_count, metric_label_set);
+ }
+
+ return 0;
+}
+
+static char *convert_prop_to_str(VARIANT *prop, int handle_null)
+{
+ char *strlabel = NULL;
+ char *newstr = NULL;
+
+ if (handle_null == FLB_TRUE && prop->vt == VT_NULL) {
+ newstr = strdup("");
+ if (newstr == NULL) {
+ return NULL;
+ }
+ }
+ else {
+ if (VariantChangeType(prop, prop, 0, VT_BSTR) != S_OK) {
+ return NULL;
+ }
+ strlabel = we_convert_wstr(prop->bstrVal, CP_UTF8);
+ if (strlabel == NULL) {
+ return NULL;
+ }
+ newstr = strdup(strlabel);
+ if (newstr == NULL) {
+ free(strlabel);
+ return NULL;
+ }
+ free(strlabel);
+ }
+ return newstr;
+}
+
+static double wmi_get_value(struct flb_we *ctx, struct wmi_query_spec *spec, IWbemClassObject *class_obj)
+{
+ VARIANT prop;
+ char *strprop;
+ double val = 1.0;
+ HRESULT hr;
+ wchar_t *wproperty;
+
+ VariantInit(&prop);
+ wproperty = we_convert_str(spec->wmi_property);
+ hr = class_obj->lpVtbl->Get(class_obj, wproperty, 0, &prop, 0, 0);
+ if (FAILED(hr)) {
+ flb_plg_warn(ctx->ins, "Retrive prop '%s' failed. Error code = %x", spec->wmi_property, hr);
+ }
+ strprop = convert_prop_to_str(&prop, FLB_FALSE);
+ if (strprop == NULL) {
+ return 0;
+ }
+ wmi_utils_str_to_double(strprop, &val);
+ flb_free(strprop);
+ VariantClear(&prop);
+ flb_free(wproperty);
+
+ return val;
+}
+
+static double wmi_get_property_value(struct flb_we *ctx, char *raw_property_key, IWbemClassObject *class_obj)
+{
+ VARIANT prop;
+ char *strprop;
+ double val = 1.0;
+ HRESULT hr;
+ wchar_t *wproperty;
+
+ VariantInit(&prop);
+ wproperty = we_convert_str(raw_property_key);
+ hr = class_obj->lpVtbl->Get(class_obj, wproperty, 0, &prop, 0, 0);
+ if (FAILED(hr)) {
+ flb_plg_warn(ctx->ins, "Retrive prop '%s' failed. Error code = %x", raw_property_key, hr);
+ }
+ strprop = convert_prop_to_str(&prop, FLB_FALSE);
+ if (strprop == NULL) {
+ return 0;
+ }
+ wmi_utils_str_to_double(strprop, &val);
+ flb_free(strprop);
+ VariantClear(&prop);
+ flb_free(wproperty);
+
+ return val;
+}
+
+static char *wmi_get_property_str_value(struct flb_we *ctx, char *raw_property_key,
+ IWbemClassObject *class_obj)
+{
+ VARIANT prop;
+ char *strprop;
+ char *str_val = NULL;
+ HRESULT hr;
+ wchar_t *wproperty;
+
+
+ VariantInit(&prop);
+ wproperty = we_convert_str(raw_property_key);
+ hr = class_obj->lpVtbl->Get(class_obj, wproperty, 0, &prop, 0, 0);
+ if (FAILED(hr)) {
+ flb_plg_warn(ctx->ins, "Retrive prop '%s' failed. Error code = %x", raw_property_key, hr);
+ }
+ str_val = convert_prop_to_str(&prop, FLB_TRUE);
+ VariantClear(&prop);
+ flb_free(wproperty);
+
+ return str_val;
+}
+
+static inline int wmi_update_metrics(struct flb_we *ctx, struct wmi_query_spec *spec,
+ double val, IWbemClassObject *class_obj, uint64_t timestamp)
+{
+
+ VARIANT prop;
+ int label_index = 0;
+ HRESULT hr;
+ char *metric_label_set[WE_WMI_METRIC_LABEL_LIST_SIZE];
+ int metric_label_count = 0;
+ char buf[16] = {0};
+ wchar_t *wlabel;
+ char *newstr = NULL;
+
+ VariantInit(&prop);
+ metric_label_count = 0;
+ for (label_index = 0; label_index < spec->label_property_count; label_index++) {
+ wlabel = we_convert_str(spec->label_property_keys[label_index]);
+ hr = class_obj->lpVtbl->Get(class_obj, wlabel, 0, &prop, 0, 0);
+ if (FAILED(hr)) {
+ flb_plg_warn(ctx->ins, "Retrive prop failed. Error code = %x", hr);
+ }
+ newstr = convert_prop_to_str(&prop, FLB_TRUE);
+ if (newstr == NULL) {
+ continue;
+ }
+ metric_label_set[label_index] = newstr;
+ metric_label_count++;
+ VariantClear(&prop);
+ flb_free(wlabel);
+ }
+
+ wmi_update_counters(spec, timestamp, val, metric_label_count, metric_label_set);
+
+ VariantClear(&prop);
+
+ return 0;
+}
+
+static inline int wmi_execute_query(struct flb_we *ctx, struct wmi_query_spec *spec, IEnumWbemClassObject **out_enumerator)
+{
+ HRESULT hr;
+ wchar_t *wquery;
+ char *query = NULL;
+ IEnumWbemClassObject* enumerator = NULL;
+ size_t size;
+
+ size = 14 + strlen(spec->wmi_counter);
+ if (spec->where_clause != NULL) {
+ size += 7 + strlen(spec->where_clause);
+ }
+ query = flb_calloc(size, sizeof(char *));
+ if (!query) {
+ flb_errno();
+ return -1;
+ }
+ if (spec->where_clause != NULL) {
+ snprintf(query, size, "SELECT * FROM %s WHERE %s", spec->wmi_counter, spec->where_clause);
+ }
+ else {
+ snprintf(query, size, "SELECT * FROM %s", spec->wmi_counter);
+ }
+ flb_trace("[wmi] query = %s", query);
+ wquery = we_convert_str(query);
+ flb_free(query);
+
+ hr = ctx->service->lpVtbl->ExecQuery(
+ ctx->service,
+ L"WQL",
+ wquery,
+ WBEM_FLAG_FORWARD_ONLY | WBEM_FLAG_RETURN_IMMEDIATELY,
+ NULL,
+ &enumerator);
+
+ flb_free(wquery);
+
+ if (FAILED(hr)) {
+ flb_plg_error(ctx->ins, "Query for %s %s failed. Error code = %x",
+ spec->wmi_counter, spec->wmi_counter, hr);
+ ctx->service->lpVtbl->Release(ctx->service);
+ ctx->locator->lpVtbl->Release(ctx->locator);
+ CoUninitialize();
+ return -1;
+ }
+
+ *out_enumerator = enumerator;
+
+ return 0;
+}
+
+static int wmi_exec_query_fixed_val(struct flb_we *ctx, struct wmi_query_spec *spec)
+{
+ IEnumWbemClassObject* enumerator = NULL;
+ HRESULT hr;
+
+ IWbemClassObject *class_obj = NULL;
+ ULONG ret = 0;
+ uint64_t timestamp = 0;
+
+ timestamp = cfl_time_now();
+
+ if (FAILED(wmi_execute_query(ctx, spec, &enumerator))) {
+ return -1;
+ }
+
+ while (enumerator) {
+ hr = enumerator->lpVtbl->Next(enumerator, WBEM_INFINITE, 1,
+ &class_obj, &ret);
+
+ if(0 == ret) {
+ break;
+ }
+
+ wmi_update_metrics(ctx, spec, 1.0, class_obj, timestamp);
+
+ class_obj->lpVtbl->Release(class_obj);
+ }
+
+ enumerator->lpVtbl->Release(enumerator);
+
+ return 0;
+}
+
+static int wmi_exec_query(struct flb_we *ctx, struct wmi_query_spec *spec)
+{
+ IEnumWbemClassObject* enumerator = NULL;
+ HRESULT hr;
+
+ IWbemClassObject *class_obj = NULL;
+ ULONG ret = 0;
+ double val = 0;
+ uint64_t timestamp = 0;
+
+ timestamp = cfl_time_now();
+
+ if (FAILED(wmi_execute_query(ctx, spec, &enumerator))) {
+ return -1;
+ }
+
+ while (enumerator) {
+ hr = enumerator->lpVtbl->Next(enumerator, WBEM_INFINITE, 1,
+ &class_obj, &ret);
+
+ if(0 == ret) {
+ break;
+ }
+
+ val = wmi_get_value(ctx, spec, class_obj);
+
+ wmi_update_metrics(ctx, spec, val, class_obj, timestamp);
+
+ class_obj->lpVtbl->Release(class_obj);
+ }
+
+ enumerator->lpVtbl->Release(enumerator);
+
+ return 0;
+}
+
+static int wmi_cleanup(struct flb_we *ctx)
+{
+ flb_plg_debug(ctx->ins, "deinitializing WMI instance....");
+
+ /* Clean up */
+ if (ctx->service != NULL) {
+ ctx->service->lpVtbl->Release(ctx->service);
+ ctx->service = NULL;
+ }
+ if (ctx->locator != NULL) {
+ ctx->locator->lpVtbl->Release(ctx->locator);
+ ctx->locator = NULL;
+ }
+ CoUninitialize();
+
+ return 0;
+}
+
+static int wmi_query(struct flb_we *ctx, struct wmi_query_spec *spec)
+{
+ if (FAILED(wmi_coinitialize(ctx, NULL))) {
+ return -1;
+ }
+ if (FAILED(wmi_exec_query(ctx, spec))) {
+ return -1;
+ }
+
+ wmi_cleanup(ctx);
+
+ return 0;
+}
+
+static int wmi_query_namespace(struct flb_we *ctx, struct wmi_query_spec *spec, char *namespace)
+{
+ if (FAILED(wmi_coinitialize(ctx, namespace))) {
+ return -1;
+ }
+ if (FAILED(wmi_exec_query(ctx, spec))) {
+ return -1;
+ }
+
+ wmi_cleanup(ctx);
+
+ return 0;
+}
+
+static int wmi_query_fixed_val(struct flb_we *ctx, struct wmi_query_spec *spec)
+{
+ if (FAILED(wmi_coinitialize(ctx, NULL))) {
+ return -1;
+ }
+ if (FAILED(wmi_exec_query_fixed_val(ctx, spec))) {
+ return -1;
+ }
+
+ wmi_cleanup(ctx);
+
+ return 0;
+}
+
+int we_wmi_init(struct flb_we *ctx)
+{
+ ctx->locator = NULL;
+ ctx->service = NULL;
+
+ return 0;
+}
+
+int we_wmi_cleanup(struct flb_we *ctx)
+{
+ wmi_cleanup(ctx);
+
+ return 0;
+}
+
+int we_wmi_exit(struct flb_we *ctx)
+{
+ return 0;
+}
+
+/* Abstract APIs */
+int we_wmi_query_fixed_val(struct flb_we *ctx, struct wmi_query_specs *spec)
+{
+ if (FAILED(wmi_query_fixed_val(ctx, spec))) {
+ return -1;
+ }
+ return 0;
+}
+
+int we_wmi_query(struct flb_we *ctx, struct wmi_query_specs *spec)
+{
+ if (FAILED(wmi_query(ctx, spec))) {
+ return -1;
+ }
+ return 0;
+}
+
+int we_wmi_query_namespace(struct flb_we *ctx, struct wmi_query_specs *spec, char *namespace)
+{
+ if (FAILED(wmi_query_namespace(ctx, spec, namespace))) {
+ return -1;
+ }
+ return 0;
+}
+
+/* Concreate APIs */
+int we_wmi_coinitialize(struct flb_we *ctx)
+{
+ if (FAILED(wmi_coinitialize(ctx, NULL))) {
+ return -1;
+ }
+
+ return 0;
+}
+
+int we_wmi_execute_query(struct flb_we *ctx, struct wmi_query_spec *spec, IEnumWbemClassObject **out_enumerator)
+{
+ IEnumWbemClassObject* enumerator = NULL;
+
+ if (FAILED(wmi_execute_query(ctx, spec, &enumerator))) {
+ return -1;
+ }
+
+ *out_enumerator = enumerator;
+
+ return 0;
+}
+
+double we_wmi_get_value(struct flb_we *ctx, struct wmi_query_spec *spec, IWbemClassObject *class_obj)
+{
+ return wmi_get_value(ctx, spec, class_obj);
+}
+
+double we_wmi_get_property_value(struct flb_we *ctx, char *raw_property_key, IWbemClassObject *class_obj)
+{
+ return wmi_get_property_value(ctx, raw_property_key, class_obj);
+}
+
+char *we_wmi_get_property_str_value(struct flb_we *ctx, char *raw_property_key,
+ IWbemClassObject *class_obj)
+{
+ return wmi_get_property_str_value(ctx, raw_property_key, class_obj);
+}
+
+int we_wmi_update_counters(struct flb_we *ctx, struct wmi_query_spec *spec, uint64_t timestamp, double val, int metric_label_count, char **metric_label_set)
+{
+ wmi_update_counters(spec, timestamp, val, metric_label_count, metric_label_set);
+
+ return 0;
+}
+
+/*
+https://stackoverflow.com/questions/33033111/create-com-object-using-plain-c
+https://stackoverflow.com/questions/1431103/how-to-obtain-data-from-wmi-using-a-c-application
+https://stackoverflow.com/questions/626674/wmi-queries-in-c
+https://github.com/MicrosoftDocs/win32/blob/docs/desktop-src/WmiSdk/example--getting-wmi-data-from-the-local-computer.md
+https://docs.microsoft.com/en-us/windows/win32/wmisdk/creating-wmi-clients
+*/
diff --git a/src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi.h b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi.h
new file mode 100644
index 000000000..2999f5764
--- /dev/null
+++ b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi.h
@@ -0,0 +1,59 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2019-2021 The Fluent Bit Authors
+ * Copyright (C) 2015-2018 Treasure Data Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_WE_WMI_H
+#define FLB_WE_WMI_H
+
+#include "we.h"
+
+#define WE_WMI_METRIC_LABEL_LIST_SIZE 64
+
+typedef double (*we_wmi_value_adjuster) (double);
+struct wmi_query_spec {
+ void *metric_instance;
+ int type;
+ we_wmi_value_adjuster value_adjuster;
+ char *wmi_counter;
+ char *wmi_property;
+ int label_property_count;
+ char **label_property_keys;
+ char *where_clause;
+};
+
+int we_wmi_init(struct flb_we *ctx);
+int we_wmi_cleanup(struct flb_we *ctx);
+int we_wmi_exit(struct flb_we *ctx);
+
+/* Abstract APIs */
+int we_wmi_query(struct flb_we *ctx, struct wmi_query_specs *spec);
+int we_wmi_query_fixed_val(struct flb_we *ctx, struct wmi_query_specs *spec);
+int we_wmi_query_namespace(struct flb_we *ctx, struct wmi_query_specs *spec, char *namepsace);
+
+/* Concrete APIs */
+int we_wmi_coinitialize(struct flb_we *ctx);
+int we_wmi_execute_query(struct flb_we *ctx, struct wmi_query_spec *spec, IEnumWbemClassObject **out_enumerator);
+double we_wmi_get_value(struct flb_we *ctx, struct wmi_query_spec *spec, IWbemClassObject *class_obj);
+double we_wmi_get_property_value(struct flb_we *ctx, char *raw_property_key, IWbemClassObject *class_obj);
+char *we_wmi_get_property_str_value(struct flb_we *ctx, char *raw_property_key,
+ IWbemClassObject *class_obj);
+int we_wmi_update_counters(struct flb_we *ctx, struct wmi_query_spec *spec,
+ uint64_t timestamp, double val, int metric_label_count, char **metric_label_set);
+
+#endif
diff --git a/src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi_cpu_info.c b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi_cpu_info.c
new file mode 100644
index 000000000..9e8e96e1a
--- /dev/null
+++ b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi_cpu_info.c
@@ -0,0 +1,116 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_config_map.h>
+#include <fluent-bit/flb_error.h>
+#include <fluent-bit/flb_pack.h>
+
+#include "we.h"
+#include "we_wmi.h"
+#include "we_wmi_cpu_info.h"
+#include "we_util.h"
+#include "we_metric.h"
+
+static double nop_adjust(double value)
+{
+ return value;
+}
+
+int we_wmi_cpu_info_init(struct flb_we *ctx)
+{
+ ctx->wmi_cpu_info = flb_calloc(1, sizeof(struct we_wmi_cpu_info_counters));
+ if (!ctx->wmi_cpu_info) {
+ flb_errno();
+ return -1;
+ }
+ ctx->wmi_cpu_info->operational = FLB_FALSE;
+
+ struct cmt_gauge *g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "", "cpu_info",
+ "Labeled CPU information provided by WMI Win32_Processor",
+ 7, (char *[]) {"architecture",
+ "device_id",
+ "description",
+ "family",
+ "l2_cache_size",
+ "l3_cache_size",
+ "name"});
+ if (!g) {
+ return -1;
+ }
+
+ ctx->wmi_cpu_info->info = flb_calloc(1, sizeof(struct wmi_query_spec));
+ if (!ctx->wmi_cpu_info->info) {
+ flb_errno();
+ return -1;
+ }
+ ctx->wmi_cpu_info->info->label_property_keys = (char **) flb_calloc(7, sizeof(char *));
+ if (!ctx->wmi_cpu_info->info->label_property_keys) {
+ flb_errno();
+ return -1;
+ }
+
+ ctx->wmi_cpu_info->info->metric_instance = (void *)g;
+ ctx->wmi_cpu_info->info->type = CMT_GAUGE;
+ ctx->wmi_cpu_info->info->value_adjuster = nop_adjust;
+ ctx->wmi_cpu_info->info->wmi_counter = "Win32_Processor";
+ /* This metrics does not retrieve metrics values. Filled out as
+ * 1.0. */
+ ctx->wmi_cpu_info->info->wmi_property = "";
+ ctx->wmi_cpu_info->info->label_property_count = 7;
+ ctx->wmi_cpu_info->info->label_property_keys[0] = "architecture" ;
+ ctx->wmi_cpu_info->info->label_property_keys[1] = "deviceid" ;
+ ctx->wmi_cpu_info->info->label_property_keys[2] = "description" ;
+ ctx->wmi_cpu_info->info->label_property_keys[3] = "family" ;
+ ctx->wmi_cpu_info->info->label_property_keys[4] = "l2cachesize" ;
+ ctx->wmi_cpu_info->info->label_property_keys[5] = "l3cachesize" ;
+ ctx->wmi_cpu_info->info->label_property_keys[6] = "name" ;
+ ctx->wmi_cpu_info->info->where_clause = NULL;
+
+ ctx->wmi_cpu_info->operational = FLB_TRUE;
+
+ return 0;
+}
+
+int we_wmi_cpu_info_exit(struct flb_we *ctx)
+{
+ flb_free(ctx->wmi_cpu_info->info->label_property_keys);
+ flb_free(ctx->wmi_cpu_info->info);
+ flb_free(ctx->wmi_cpu_info);
+
+ return 0;
+}
+
+int we_wmi_cpu_info_update(struct flb_we *ctx)
+{
+ if (!ctx->wmi_cpu_info->operational) {
+ flb_plg_error(ctx->ins, "cpu_info collector not yet in operational state");
+
+ return -1;
+ }
+
+ if (FAILED(we_wmi_query_fixed_val(ctx, ctx->wmi_cpu_info->info))) {
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi_cpu_info.h b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi_cpu_info.h
new file mode 100644
index 000000000..8120b4983
--- /dev/null
+++ b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi_cpu_info.h
@@ -0,0 +1,29 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_WE_WMI_CPU_INFO_H
+#define FLB_WE_WMI_CPU_INFO_H
+
+#include "we.h"
+
+int we_wmi_cpu_info_init(struct flb_we *ctx);
+int we_wmi_cpu_info_exit(struct flb_we *ctx);
+int we_wmi_cpu_info_update(struct flb_we *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi_logon.c b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi_logon.c
new file mode 100644
index 000000000..761d4ac7a
--- /dev/null
+++ b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi_logon.c
@@ -0,0 +1,198 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_config_map.h>
+#include <fluent-bit/flb_error.h>
+#include <fluent-bit/flb_pack.h>
+
+#include "we.h"
+#include "we_wmi.h"
+#include "we_wmi_cpu_info.h"
+#include "we_util.h"
+#include "we_metric.h"
+
+static double nop_adjust(double value)
+{
+ return value;
+}
+
+int we_wmi_logon_init(struct flb_we *ctx)
+{
+ ctx->wmi_logon = flb_calloc(1, sizeof(struct we_wmi_logon_counters));
+ if (!ctx->wmi_logon) {
+ flb_errno();
+ return -1;
+ }
+ ctx->wmi_logon->operational = FLB_FALSE;
+
+ struct cmt_gauge *g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "logon", "logon_type",
+ "Number of active logon sessions (LogonSession.LogonType) by WMI Win32_LogonSession",
+ 1, (char *[]) {"status"});
+ if (!g) {
+ return -1;
+ }
+
+ ctx->wmi_logon->info = flb_calloc(1, sizeof(struct wmi_query_spec));
+ if (!ctx->wmi_logon->info) {
+ flb_errno();
+ return -1;
+ }
+ ctx->wmi_logon->info->label_property_keys = (char **) flb_calloc(1, sizeof(char *));
+ if (!ctx->wmi_logon->info->label_property_keys) {
+ flb_errno();
+ return -1;
+ }
+
+ ctx->wmi_logon->info->metric_instance = (void *)g;
+ ctx->wmi_logon->info->type = CMT_GAUGE;
+ ctx->wmi_logon->info->value_adjuster = nop_adjust;
+ ctx->wmi_logon->info->wmi_counter = "Win32_LogonSession";
+ ctx->wmi_logon->info->wmi_property = "LogonType";
+ ctx->wmi_logon->info->label_property_count = 1;
+ ctx->wmi_logon->info->label_property_keys[0] = "status" ;
+ ctx->wmi_logon->info->where_clause = NULL;
+
+ ctx->wmi_logon->operational = FLB_TRUE;
+
+ return 0;
+}
+
+int we_wmi_logon_exit(struct flb_we *ctx)
+{
+ flb_free(ctx->wmi_logon->info->label_property_keys);
+ flb_free(ctx->wmi_logon->info);
+ flb_free(ctx->wmi_logon);
+
+ return 0;
+}
+
+int we_wmi_logon_update(struct flb_we *ctx)
+{
+ IEnumWbemClassObject* enumerator = NULL;
+ HRESULT hr;
+
+ IWbemClassObject *class_obj = NULL;
+ ULONG ret = 0;
+ double val = 0;
+ int type = 0;
+ uint64_t timestamp = 0;
+ /* Init counters for logon */
+ uint64_t system = 0, interactive = 0, network = 0, batch = 0, service = 0,
+ proxy = 0, unlock = 0, networkcleartext = 0, newcredentials = 0, remoteinteractive = 0,
+ cachedinteractive = 0, cachedremoteinteractive = 0, cachedunlock = 0;
+ struct wmi_query_spec *spec;
+
+ if (!ctx->wmi_logon->operational) {
+ flb_plg_error(ctx->ins, "logon collector not yet in operational state");
+
+ return -1;
+ }
+
+ if (FAILED(we_wmi_coinitialize(ctx))) {
+ return -1;
+ }
+
+ timestamp = cfl_time_now();
+
+ if (FAILED(we_wmi_execute_query(ctx, ctx->wmi_logon->info, &enumerator))) {
+ return -1;
+ }
+
+ while(enumerator) {
+ hr = enumerator->lpVtbl->Next(enumerator, WBEM_INFINITE, 1,
+ &class_obj, &ret);
+
+ if(0 == ret) {
+ break;
+ }
+
+ val = we_wmi_get_value(ctx, ctx->wmi_logon->info, class_obj);
+ type = (int)val;
+
+ switch(type) {
+ case 0:
+ system++;
+ break;
+ case 2:
+ interactive++;
+ break;
+ case 3:
+ network++;
+ break;
+ case 4:
+ batch++;
+ break;
+ case 5:
+ service++;
+ break;
+ case 6:
+ proxy++;
+ break;
+ case 7:
+ unlock++;
+ break;
+ case 8:
+ networkcleartext++;
+ break;
+ case 9:
+ newcredentials++;
+ break;
+ case 10:
+ remoteinteractive++;
+ break;
+ case 11:
+ cachedinteractive++;
+ break;
+ case 12:
+ cachedremoteinteractive++;
+ break;
+ case 13:
+ cachedunlock++;
+ break;
+ }
+
+ class_obj->lpVtbl->Release(class_obj);
+ }
+
+ enumerator->lpVtbl->Release(enumerator);
+
+ spec = ctx->wmi_logon->info;
+
+ we_wmi_update_counters(ctx, spec, timestamp, (double)system, 1, (char *[]) {"system"} );
+ we_wmi_update_counters(ctx, spec, timestamp, (double)interactive, 1, (char *[]) {"interactive"} );
+ we_wmi_update_counters(ctx, spec, timestamp, (double)network, 1, (char *[]) {"network"} );
+ we_wmi_update_counters(ctx, spec, timestamp, (double)batch, 1, (char *[]) {"batch"} );
+ we_wmi_update_counters(ctx, spec, timestamp, (double)service, 1, (char *[]) {"service"} );
+ we_wmi_update_counters(ctx, spec, timestamp, (double)proxy, 1, (char *[]) {"proxy"} );
+ we_wmi_update_counters(ctx, spec, timestamp, (double)unlock, 1, (char *[]) {"unlock"} );
+ we_wmi_update_counters(ctx, spec, timestamp, (double)networkcleartext, 1, (char *[]) {"network_clear_text"} );
+ we_wmi_update_counters(ctx, spec, timestamp, (double)newcredentials, 1, (char *[]) {"new_credentials"} );
+ we_wmi_update_counters(ctx, spec, timestamp, (double)remoteinteractive, 1, (char *[]) {"remote_interactive"} );
+ we_wmi_update_counters(ctx, spec, timestamp, (double)cachedinteractive, 1, (char *[]) {"cached_interactive"} );
+ we_wmi_update_counters(ctx, spec, timestamp, (double)cachedremoteinteractive, 1, (char *[]) {"cached_remote_interactive"} );
+ we_wmi_update_counters(ctx, spec, timestamp, (double)cachedunlock, 1, (char *[]) {"cached_unlock"} );
+
+ we_wmi_cleanup(ctx);
+
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi_logon.h b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi_logon.h
new file mode 100644
index 000000000..9fc8c5318
--- /dev/null
+++ b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi_logon.h
@@ -0,0 +1,29 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_WE_WMI_LOGON_H
+#define FLB_WE_WMI_LOGON_H
+
+#include "we.h"
+
+int we_wmi_logon_init(struct flb_we *ctx);
+int we_wmi_logon_exit(struct flb_we *ctx);
+int we_wmi_logon_update(struct flb_we *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi_memory.c b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi_memory.c
new file mode 100644
index 000000000..ec1a13e96
--- /dev/null
+++ b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi_memory.c
@@ -0,0 +1,557 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_config_map.h>
+#include <fluent-bit/flb_error.h>
+#include <fluent-bit/flb_pack.h>
+
+#include "we.h"
+#include "we_wmi.h"
+#include "we_wmi_memory.h"
+#include "we_util.h"
+#include "we_metric.h"
+
+static double nop_adjust(double value)
+{
+ return value;
+}
+
+int we_wmi_memory_init(struct flb_we *ctx)
+{
+ struct cmt_gauge *g;
+
+ ctx->wmi_memory = flb_calloc(1, sizeof(struct we_wmi_memory_counters));
+ if (!ctx->wmi_memory) {
+ flb_errno();
+ return -1;
+ }
+ ctx->wmi_memory->operational = FLB_FALSE;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "memory", "available_bytes",
+ "The amount of physical memory, in bytes, immediately available " \
+ "for allocation to a process or for system use. (AvailableBytes)",
+ 0, NULL);
+
+ if (!g) {
+ return -1;
+ }
+ ctx->wmi_memory->available_bytes = g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "memory", "cache_bytes",
+ "The size, in bytes, of the portion of the system file cache " \
+ "which is currently resident and active in physical memory "\
+ "(CacheBytes)",
+ 0, NULL);
+
+ if (!g) {
+ return -1;
+ }
+ ctx->wmi_memory->cache_bytes = g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "memory", "cache_bytes_peak",
+ "the maximum number of bytes used by the system file cache " \
+ "since the system was last restarted (CacheBytesPeak)",
+ 0, NULL);
+
+ if (!g) {
+ return -1;
+ }
+ ctx->wmi_memory->cache_bytes_peak = g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "memory", "cache_faults_total",
+ "The rate at which faults occur when a page sought in " \
+ "the file system cache is not found and must be retrieved " \
+ "from elsewhere in memory (a soft fault) or from disk (a hard fault)" \
+ "(CacheFaultsPersec)",
+ 0, NULL);
+
+ if (!g) {
+ return -1;
+ }
+ ctx->wmi_memory->cache_faults_total = g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "memory", "commit_limit",
+ "The amount of virtual memory that can be committed " \
+ "without having to extend the paging file(s) " \
+ "(CommitLimit)",
+ 0, NULL);
+
+ if (!g) {
+ return -1;
+ }
+ ctx->wmi_memory->commit_limit = g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "memory", "committed_bytes",
+ "The amount of committed virtual memory, in bytes " \
+ "(CommittedBytes)",
+ 0, NULL);
+
+ if (!g) {
+ return -1;
+ }
+ ctx->wmi_memory->committed_bytes = g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "memory", "demand_zero_faults_total",
+ "The rate at which a zeroed page is required to satisfy the fault " \
+ "(DemandZeroFaultsPersec)",
+ 0, NULL);
+
+ if (!g) {
+ return -1;
+ }
+ ctx->wmi_memory->demand_zero_faults_total = g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "memory", "free_and_zero_page_list_bytes",
+ "the amount of physical memory, in bytes, that is assigned to " \
+ "the free and zero page lists " \
+ "(FreeAndZeroPageListBytes)",
+ 0, NULL);
+
+ if (!g) {
+ return -1;
+ }
+ ctx->wmi_memory->free_and_zero_page_list_bytes = g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "memory", "free_system_page_table_entries",
+ "The number of page table entries not currently in used by the system " \
+ "(FreeSystemPageTableEntries)",
+ 0, NULL);
+
+ if (!g) {
+ return -1;
+ }
+ ctx->wmi_memory->free_system_page_table_entries = g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "memory", "modified_page_list_bytes",
+ "The amount of physical memory, in bytes, that is assigned to " \
+ "the modified page list " \
+ "(ModifiedPageListBytes)",
+ 0, NULL);
+
+ if (!g) {
+ return -1;
+ }
+ ctx->wmi_memory->modified_page_list_bytes = g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "memory", "page_faults_total",
+ "The average number of pages faulted per second. " \
+ "It is measured in number of pages faulted per second " \
+ "because only one page is faulted in each fault operation, " \
+ "hence this is also equal to the number of page fault operations " \
+ "(PageFaultsPersec)",
+ 0, NULL);
+
+ if (!g) {
+ return -1;
+ }
+ ctx->wmi_memory->page_faults_total = g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "memory", "swap_page_reads_total",
+ "The rate at which the disk was read to resolve hard page faults " \
+ "(PageReadsPersec)",
+ 0, NULL);
+
+ if (!g) {
+ return -1;
+ }
+ ctx->wmi_memory->swap_page_reads_total = g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "memory", "swap_pages_read_total",
+ "The rate at which pages are read from disk to resolve hard page faults " \
+ "(PagesInputPersec)",
+ 0, NULL);
+
+ if (!g) {
+ return -1;
+ }
+ ctx->wmi_memory->swap_pages_read_total = g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "memory", "swap_pages_written_total",
+ "the rate at which pages are written to disk to free up space "\
+ "in physical memory (PagesOutputPersec)",
+ 0, NULL);
+
+ if (!g) {
+ return -1;
+ }
+ ctx->wmi_memory->swap_pages_written_total = g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "memory", "swap_page_operations_total",
+ "the rate at which pages are read from or written " \
+ "to disk to resolve hard page faults (PagesPersec)",
+ 0, NULL);
+
+ if (!g) {
+ return -1;
+ }
+ ctx->wmi_memory->swap_page_operations_total = g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "memory", "swap_page_writes_total",
+ "the rate at which pages are written to disk to free up space " \
+ "in physical memory (PageWritesPersec)",
+ 0, NULL);
+
+ if (!g) {
+ return -1;
+ }
+ ctx->wmi_memory->swap_page_writes_total = g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "memory", "pool_nonpaged_allocs_total",
+ "Number of calls to allocate space in the nonpaged pool (PoolNonpagedAllocs)",
+ 0, NULL);
+
+ if (!g) {
+ return -1;
+ }
+ ctx->wmi_memory->pool_nonpaged_allocs_total = g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "memory", "pool_nonpaged_bytes",
+ "the size, in bytes, of the nonpaged pool, an area of " \
+ "the system virtual memory that is used for objects " \
+ "that cannot be written to disk, but must remain " \
+ "in physical memory as long as they are allocated " \
+ "(PoolNonpagedBytes)",
+ 0, NULL);
+
+ if (!g) {
+ return -1;
+ }
+ ctx->wmi_memory->pool_nonpaged_bytes = g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "memory", "pool_nonpaged_allocs_total",
+ "Number of bytes of allocated space in paged pool (PoolPagedAllocs)",
+ 0, NULL);
+
+ if (!g) {
+ return -1;
+ }
+ ctx->wmi_memory->pool_paged_allocs_total = g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "memory", "pool_paged_bytes",
+ "the size, in bytes, of the paged pool, an area of the system " \
+ "virtual memory that is used for objects that can be written " \
+ "to disk when they are not being used (PoolPagedBytes)",
+ 0, NULL);
+
+ if (!g) {
+ return -1;
+ }
+ ctx->wmi_memory->pool_paged_bytes = g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "memory", "pool_paged_resident_bytes",
+ "the size, in bytes, of the portion of the paged pool " \
+ "that is currently resident and active in physical memory " \
+ "(PoolPagedResidentBytes)",
+ 0, NULL);
+
+ if (!g) {
+ return -1;
+ }
+ ctx->wmi_memory->pool_paged_resident_bytes = g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "memory", "standby_cache_core_bytes",
+ "The amount of physical memory, in bytes, that is assigned " \
+ "to the core standby cache page lists (StandbyCacheCoreBytes)",
+ 0, NULL);
+
+ if (!g) {
+ return -1;
+ }
+ ctx->wmi_memory->standby_cache_core_bytes = g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "memory", "standby_cache_normal_priority_bytes",
+ " the amount of physical memory, in bytes, that is assigned " \
+ "to the normal priority standby cache page lists " \
+ "(StandbyCacheNormalPriorityBytes)",
+ 0, NULL);
+
+ if (!g) {
+ return -1;
+ }
+ ctx->wmi_memory->standby_cache_normal_priority_bytes = g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "memory", "standby_cache_reserve_bytes",
+ "Number of physical memory size(bytes) which is assigned to " \
+ "the reserve standby cache page lists (StandbyCacheReserveBytes)",
+ 0, NULL);
+
+ if (!g) {
+ return -1;
+ }
+ ctx->wmi_memory->standby_cache_reserve_bytes = g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "memory", "system_cache_resident_bytes",
+ "Number of physical memory size(bytes) of the portion of " \
+ "the system file cache which is currently resident and active " \
+ "(SystemCacheResidentBytes)",
+ 0, NULL);
+
+ if (!g) {
+ return -1;
+ }
+ ctx->wmi_memory->system_cache_resident_bytes = g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "memory", "system_code_resident_bytes",
+ "Number of physical memory size(bytes) of the pageable operating system code "\
+ "which is currently resident and active (SystemCodeResidentBytes)",
+ 0, NULL);
+
+ if (!g) {
+ return -1;
+ }
+ ctx->wmi_memory->system_code_resident_bytes = g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "memory", "system_code_total_bytes",
+ "Number of virtual memory size(bytes) of the pageable operating system code " \
+ "which is mapped into virtual address (SystemCodeTotalBytes)",
+ 0, NULL);
+
+ if (!g) {
+ return -1;
+ }
+ ctx->wmi_memory->system_code_total_bytes = g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "memory", "system_driver_resident_bytes",
+ "Number of pagable physical memory size(bytes) by used device drivers "\
+ "(SystemDriverResidentBytes)",
+ 0, NULL);
+
+ if (!g) {
+ return -1;
+ }
+ ctx->wmi_memory->system_driver_resident_bytes = g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "memory", "system_driver_total_bytes",
+ "Number of virtual memory size(bytes) by used device drivers " \
+ "(SystemDriverTotalBytes)",
+ 0, NULL);
+
+ if (!g) {
+ return -1;
+ }
+ ctx->wmi_memory->system_driver_total_bytes = g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "memory", "transition_faults_total",
+ "Number of the rate at which page faults are resolved by recovering pages " \
+ "that were being used by another process sharing the page, " \
+ "or were on the modified page list or the standby list, " \
+ "or were being written to disk at the time of the page fault " \
+ "(TransitionFaultsPersec)",
+ 0, NULL);
+
+ if (!g) {
+ return -1;
+ }
+ ctx->wmi_memory->transition_faults_total = g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "memory", "transition_pages_repurposed_total",
+ "Number of the rate at which the number of transition cache " \
+ "pages were reused for a different purpose " \
+ "(TransitionPagesRePurposedPersec)",
+ 0, NULL);
+
+ if (!g) {
+ return -1;
+ }
+ ctx->wmi_memory->transition_pages_repurposed_total = g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "memory", "write_copies_total",
+ "Number of the rate at which page faults are caused by "\
+ "attempts to write that have been satisfied by coping " \
+ "of the page from elsewhere in physical memory " \
+ "(WriteCopiesPersec)",
+ 0, NULL);
+
+ if (!g) {
+ return -1;
+ }
+ ctx->wmi_memory->write_copies_total = g;
+
+ ctx->wmi_memory->info = flb_calloc(1, sizeof(struct wmi_query_spec));
+ if (!ctx->wmi_memory->info) {
+ flb_errno();
+ return -1;
+ }
+ ctx->wmi_memory->info->metric_instance = (void *)g;
+ ctx->wmi_memory->info->type = CMT_GAUGE;
+ ctx->wmi_memory->info->value_adjuster = nop_adjust;
+ ctx->wmi_memory->info->wmi_counter = "Win32_PerfRawData_PerfOS_Memory";
+ ctx->wmi_memory->info->wmi_property = "";
+ ctx->wmi_memory->info->label_property_count = 0;
+ ctx->wmi_memory->info->label_property_keys = NULL;
+ ctx->wmi_memory->info->where_clause = NULL;
+
+ ctx->wmi_memory->operational = FLB_TRUE;
+
+ return 0;
+}
+
+int we_wmi_memory_exit(struct flb_we *ctx)
+{
+ ctx->wmi_memory->operational = FLB_FALSE;
+
+ flb_free(ctx->wmi_memory->info);
+ flb_free(ctx->wmi_memory);
+
+ return 0;
+}
+
+int we_wmi_memory_update(struct flb_we *ctx)
+{
+ uint64_t timestamp = 0;
+ IEnumWbemClassObject* enumerator = NULL;
+ HRESULT hr;
+
+ IWbemClassObject *class_obj = NULL;
+ ULONG ret = 0;
+ double val = 0;
+
+ if (!ctx->wmi_memory->operational) {
+ flb_plg_error(ctx->ins, "memory collector not yet in operational state");
+
+ return -1;
+ }
+
+ if (FAILED(we_wmi_coinitialize(ctx))) {
+ return -1;
+ }
+
+ timestamp = cfl_time_now();
+
+ if (FAILED(we_wmi_execute_query(ctx, ctx->wmi_memory->info, &enumerator))) {
+ return -1;
+ }
+
+ while(enumerator) {
+ hr = enumerator->lpVtbl->Next(enumerator, WBEM_INFINITE, 1, &class_obj, &ret);
+
+ if(ret == 0) {
+ break;
+ }
+
+ val = we_wmi_get_property_value(ctx, "AvailableBytes", class_obj);
+ cmt_gauge_set(ctx->wmi_memory->available_bytes, timestamp, val, 0, NULL);
+
+ val = we_wmi_get_property_value(ctx, "CacheBytes", class_obj);
+ cmt_gauge_set(ctx->wmi_memory->cache_bytes, timestamp, val, 0, NULL);
+
+ val = we_wmi_get_property_value(ctx, "CacheBytesPeak", class_obj);
+ cmt_gauge_set(ctx->wmi_memory->cache_bytes_peak, timestamp, val, 0, NULL);
+
+ val = we_wmi_get_property_value(ctx, "CacheFaultsPersec", class_obj);
+ cmt_gauge_set(ctx->wmi_memory->cache_faults_total, timestamp, val, 0, NULL);
+
+ val = we_wmi_get_property_value(ctx, "CommitLimit", class_obj);
+ cmt_gauge_set(ctx->wmi_memory->commit_limit, timestamp, val, 0, NULL);
+
+ val = we_wmi_get_property_value(ctx, "CommittedBytes", class_obj);
+ cmt_gauge_set(ctx->wmi_memory->committed_bytes, timestamp, val, 0, NULL);
+
+ val = we_wmi_get_property_value(ctx, "DemandZeroFaultsPersec", class_obj);
+ cmt_gauge_set(ctx->wmi_memory->demand_zero_faults_total, timestamp, val, 0, NULL);
+
+ val = we_wmi_get_property_value(ctx, "FreeAndZeroPageListBytes", class_obj);
+ cmt_gauge_set(ctx->wmi_memory->free_and_zero_page_list_bytes, timestamp, val, 0, NULL);
+
+ val = we_wmi_get_property_value(ctx, "FreeSystemPageTableEntries", class_obj);
+ cmt_gauge_set(ctx->wmi_memory->free_system_page_table_entries, timestamp, val, 0, NULL);
+
+ val = we_wmi_get_property_value(ctx, "ModifiedPageListBytes", class_obj);
+ cmt_gauge_set(ctx->wmi_memory->modified_page_list_bytes, timestamp, val, 0, NULL);
+
+ val = we_wmi_get_property_value(ctx, "PageFaultsPersec", class_obj);
+ cmt_gauge_set(ctx->wmi_memory->page_faults_total, timestamp, val, 0, NULL);
+
+ val = we_wmi_get_property_value(ctx, "PageReadsPersec", class_obj);
+ cmt_gauge_set(ctx->wmi_memory->swap_page_reads_total, timestamp, val, 0, NULL);
+
+ val = we_wmi_get_property_value(ctx, "PagesInputPersec", class_obj);
+ cmt_gauge_set(ctx->wmi_memory->swap_pages_read_total, timestamp, val, 0, NULL);
+
+ val = we_wmi_get_property_value(ctx, "PagesOutputPersec", class_obj);
+ cmt_gauge_set(ctx->wmi_memory->swap_pages_written_total, timestamp, val, 0, NULL);
+
+ val = we_wmi_get_property_value(ctx, "PagesPersec", class_obj);
+ cmt_gauge_set(ctx->wmi_memory->swap_page_operations_total, timestamp, val, 0, NULL);
+
+ val = we_wmi_get_property_value(ctx, "PageWritesPersec", class_obj);
+ cmt_gauge_set(ctx->wmi_memory->swap_page_writes_total, timestamp, val, 0, NULL);
+
+ val = we_wmi_get_property_value(ctx, "PoolNonpagedAllocs", class_obj);
+ cmt_gauge_set(ctx->wmi_memory->pool_nonpaged_allocs_total, timestamp, val, 0, NULL);
+
+ val = we_wmi_get_property_value(ctx, "PoolNonpagedBytes", class_obj);
+ cmt_gauge_set(ctx->wmi_memory->pool_nonpaged_bytes, timestamp, val, 0, NULL);
+
+ val = we_wmi_get_property_value(ctx, "PoolPagedAllocs", class_obj);
+ cmt_gauge_set(ctx->wmi_memory->pool_paged_allocs_total, timestamp, val, 0, NULL);
+
+ val = we_wmi_get_property_value(ctx, "PoolPagedBytes", class_obj);
+ cmt_gauge_set(ctx->wmi_memory->pool_paged_bytes, timestamp, val, 0, NULL);
+
+ val = we_wmi_get_property_value(ctx, "PoolPagedResidentBytes", class_obj);
+ cmt_gauge_set(ctx->wmi_memory->pool_paged_resident_bytes, timestamp, val, 0, NULL);
+
+ val = we_wmi_get_property_value(ctx, "StandbyCacheCoreBytes", class_obj);
+ cmt_gauge_set(ctx->wmi_memory->standby_cache_core_bytes, timestamp, val, 0, NULL);
+
+ val = we_wmi_get_property_value(ctx, "StandbyCacheNormalPriorityBytes", class_obj);
+ cmt_gauge_set(ctx->wmi_memory->standby_cache_normal_priority_bytes, timestamp, val, 0, NULL);
+
+ val = we_wmi_get_property_value(ctx, "StandbyCacheReserveBytes", class_obj);
+ cmt_gauge_set(ctx->wmi_memory->standby_cache_reserve_bytes, timestamp, val, 0, NULL);
+
+ val = we_wmi_get_property_value(ctx, "SystemCacheResidentBytes", class_obj);
+ cmt_gauge_set(ctx->wmi_memory->system_cache_resident_bytes, timestamp, val, 0, NULL);
+
+ val = we_wmi_get_property_value(ctx, "SystemCacheResidentBytes", class_obj);
+ cmt_gauge_set(ctx->wmi_memory->system_cache_resident_bytes, timestamp, val, 0, NULL);
+
+ val = we_wmi_get_property_value(ctx, "SystemCodeResidentBytes", class_obj);
+ cmt_gauge_set(ctx->wmi_memory->system_code_resident_bytes, timestamp, val, 0, NULL);
+
+ val = we_wmi_get_property_value(ctx, "SystemCodeTotalBytes", class_obj);
+ cmt_gauge_set(ctx->wmi_memory->system_code_total_bytes, timestamp, val, 0, NULL);
+
+ val = we_wmi_get_property_value(ctx, "SystemDriverResidentBytes", class_obj);
+ cmt_gauge_set(ctx->wmi_memory->system_driver_resident_bytes, timestamp, val, 0, NULL);
+
+ val = we_wmi_get_property_value(ctx, "SystemDriverTotalBytes", class_obj);
+ cmt_gauge_set(ctx->wmi_memory->system_driver_total_bytes, timestamp, val, 0, NULL);
+
+ val = we_wmi_get_property_value(ctx, "TransitionFaultsPersec", class_obj);
+ cmt_gauge_set(ctx->wmi_memory->transition_faults_total, timestamp, val, 0, NULL);
+
+ val = we_wmi_get_property_value(ctx, "TransitionPagesRePurposedPersec", class_obj);
+ cmt_gauge_set(ctx->wmi_memory->transition_pages_repurposed_total, timestamp, val, 0, NULL);
+
+ val = we_wmi_get_property_value(ctx, "WriteCopiesPersec", class_obj);
+ cmt_gauge_set(ctx->wmi_memory->write_copies_total, timestamp, val, 0, NULL);
+
+ class_obj->lpVtbl->Release(class_obj);
+ }
+
+ enumerator->lpVtbl->Release(enumerator);
+
+ we_wmi_cleanup(ctx);
+
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi_memory.h b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi_memory.h
new file mode 100644
index 000000000..fd6f08d54
--- /dev/null
+++ b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi_memory.h
@@ -0,0 +1,29 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2023 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_WE_WMI_MEMORY_H
+#define FLB_WE_WMI_MEMORY_H
+
+#include "we.h"
+
+int we_wmi_memory_init(struct flb_we *ctx);
+int we_wmi_memory_exit(struct flb_we *ctx);
+int we_wmi_memory_update(struct flb_we *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi_paging_file.c b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi_paging_file.c
new file mode 100644
index 000000000..ed5853811
--- /dev/null
+++ b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi_paging_file.c
@@ -0,0 +1,156 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_config_map.h>
+#include <fluent-bit/flb_error.h>
+#include <fluent-bit/flb_pack.h>
+
+#include "we.h"
+#include "we_wmi.h"
+#include "we_wmi_paging_file.h"
+#include "we_util.h"
+#include "we_metric.h"
+
+static double nop_adjust(double value)
+{
+ return value;
+}
+
+int we_wmi_paging_file_init(struct flb_we *ctx)
+{
+ struct cmt_gauge *g;
+
+ ctx->wmi_paging_file = flb_calloc(1, sizeof(struct we_wmi_paging_file_counters));
+ if (!ctx->wmi_paging_file) {
+ flb_errno();
+ return -1;
+ }
+ ctx->wmi_paging_file->operational = FLB_FALSE;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "paging_file", "allocated_base_size_megabytes",
+ "The value indicates the actual amount of disk space allocated "\
+ "for use with this page file (AllocatedBaseSize)",
+ 0, NULL);
+
+ if (!g) {
+ return -1;
+ }
+ ctx->wmi_paging_file->allocated_base_size_megabytes = g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "paging_file", "current_usage_megabytes",
+ "The value indicates how much of the total reserved page file " \
+ "is currently in use (CurrentUsage)",
+ 0, NULL);
+
+ if (!g) {
+ return -1;
+ }
+ ctx->wmi_paging_file->current_usage_megabytes = g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "paging_file", "peak_usage_megabytes",
+ "The value indicates the highest use page file (PeakUsage)",
+ 0, NULL);
+
+ if (!g) {
+ return -1;
+ }
+ ctx->wmi_paging_file->peak_usage_megabytes = g;
+
+ ctx->wmi_paging_file->info = flb_calloc(1, sizeof(struct wmi_query_spec));
+ if (!ctx->wmi_paging_file->info) {
+ flb_errno();
+ return -1;
+ }
+ ctx->wmi_paging_file->info->metric_instance = (void *)g;
+ ctx->wmi_paging_file->info->type = CMT_GAUGE;
+ ctx->wmi_paging_file->info->value_adjuster = nop_adjust;
+ ctx->wmi_paging_file->info->wmi_counter = "Win32_PageFileUsage";
+ ctx->wmi_paging_file->info->wmi_property = "";
+ ctx->wmi_paging_file->info->label_property_count = 0;
+ ctx->wmi_paging_file->info->label_property_keys = NULL;
+ ctx->wmi_paging_file->info->where_clause = NULL;
+
+ ctx->wmi_paging_file->operational = FLB_TRUE;
+
+ return 0;
+}
+
+int we_wmi_paging_file_exit(struct flb_we *ctx)
+{
+ ctx->wmi_paging_file->operational = FLB_FALSE;
+
+ flb_free(ctx->wmi_paging_file->info);
+ flb_free(ctx->wmi_paging_file);
+
+ return 0;
+}
+
+int we_wmi_paging_file_update(struct flb_we *ctx)
+{
+ uint64_t timestamp = 0;
+ IEnumWbemClassObject* enumerator = NULL;
+ HRESULT hr;
+
+ IWbemClassObject *class_obj = NULL;
+ ULONG ret = 0;
+ double val = 0;
+
+ if (!ctx->wmi_paging_file->operational) {
+ flb_plg_error(ctx->ins, "paging_file collector not yet in operational state");
+
+ return -1;
+ }
+
+ if (FAILED(we_wmi_coinitialize(ctx))) {
+ return -1;
+ }
+
+ timestamp = cfl_time_now();
+
+ if (FAILED(we_wmi_execute_query(ctx, ctx->wmi_paging_file->info, &enumerator))) {
+ return -1;
+ }
+
+ while(enumerator) {
+ hr = enumerator->lpVtbl->Next(enumerator, WBEM_INFINITE, 1, &class_obj, &ret);
+
+ if(ret == 0) {
+ break;
+ }
+
+ val = we_wmi_get_property_value(ctx, "AllocatedBaseSize", class_obj);
+ cmt_gauge_set(ctx->wmi_paging_file->allocated_base_size_megabytes, timestamp, val, 0, NULL);
+
+ val = we_wmi_get_property_value(ctx, "CurrentUsage", class_obj);
+ cmt_gauge_set(ctx->wmi_paging_file->current_usage_megabytes, timestamp, val, 0, NULL);
+
+ val = we_wmi_get_property_value(ctx, "PeakUsage", class_obj);
+ cmt_gauge_set(ctx->wmi_paging_file->peak_usage_megabytes, timestamp, val, 0, NULL);
+
+ class_obj->lpVtbl->Release(class_obj);
+ }
+
+ enumerator->lpVtbl->Release(enumerator);
+
+ we_wmi_cleanup(ctx);
+
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi_paging_file.h b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi_paging_file.h
new file mode 100644
index 000000000..5ed74d292
--- /dev/null
+++ b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi_paging_file.h
@@ -0,0 +1,29 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2023 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_WE_WMI_PAGING_FILE_H
+#define FLB_WE_WMI_PAGING_FILE_H
+
+#include "we.h"
+
+int we_wmi_paging_file_init(struct flb_we *ctx);
+int we_wmi_paging_file_exit(struct flb_we *ctx);
+int we_wmi_paging_file_update(struct flb_we *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi_process.c b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi_process.c
new file mode 100644
index 000000000..97226c274
--- /dev/null
+++ b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi_process.c
@@ -0,0 +1,417 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2023 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_config_map.h>
+#include <fluent-bit/flb_error.h>
+#include <fluent-bit/flb_pack.h>
+
+#include "we.h"
+#include "we_wmi.h"
+#include "we_wmi_process.h"
+#include "we_util.h"
+#include "we_metric.h"
+
+static double nop_adjust(double value)
+{
+ return value;
+}
+
+int we_wmi_process_init(struct flb_we *ctx)
+{
+ struct cmt_gauge *g;
+
+ ctx->wmi_process = flb_calloc(1, sizeof(struct we_wmi_process_counters));
+ if (!ctx->wmi_process) {
+ flb_errno();
+ return -1;
+ }
+ ctx->wmi_process->operational = FLB_FALSE;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "process", "start_time",
+ "Time of process start",
+ 3, (char *[]) {"process", "process_id", "creating_process_id"});
+
+ if (!g) {
+ return -1;
+ }
+ ctx->wmi_process->start_time = g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "process", "handles",
+ "Total number of handles the process has open. " \
+ "This number is the sum of the handles currently " \
+ "open by each thread in the process.",
+ 3, (char *[]) {"process", "process_id", "creating_process_id"});
+
+ if (!g) {
+ return -1;
+ }
+ ctx->wmi_process->handles = g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "process", "cpu_time_total",
+ "Returns elapsed time that all of the threads of this process " \
+ "used the processor to execute instructions by mode " \
+ "(privileged, user). An instruction is the basic unit " \
+ "of execution in a computer, a thread is the object " \
+ "that executes instructions, and a process is " \
+ "the object created when a program is run. " \
+ "Code executed to handle some hardware interrupts " \
+ "and trap conditions is included in this count.",
+ 4, (char *[]) {"process", "process_id", "creating_process_id", "mode"});
+
+ if (!g) {
+ return -1;
+ }
+ ctx->wmi_process->cpu_time_total = g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "process", "io_bytes_total",
+ "Bytes issued to I/O operations in different modes "\
+ "(read, write, other).",
+ 4, (char *[]) {"process", "process_id", "creating_process_id", "mode"});
+
+ if (!g) {
+ return -1;
+ }
+ ctx->wmi_process->io_bytes_total = g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "process", "io_operations_total",
+ "I/O operations issued in different modes (read, write, other).",
+ 4, (char *[]) {"process", "process_id", "creating_process_id", "mode"});
+
+ if (!g) {
+ return -1;
+ }
+ ctx->wmi_process->io_operations_total = g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "process", "page_faults_total",
+ "Page faults by the threads executing in this process.",
+ 3, (char *[]) {"process", "process_id", "creating_process_id"});
+
+ if (!g) {
+ return -1;
+ }
+ ctx->wmi_process->page_faults_total = g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "process", "page_file_bytes",
+ "Current number of bytes this process has used " \
+ "in the paging file(s).",
+ 3, (char *[]) {"process", "process_id", "creating_process_id"});
+
+ if (!g) {
+ return -1;
+ }
+ ctx->wmi_process->page_file_bytes = g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "process", "pool_bytes",
+ "Pool Bytes is the last observed number of bytes " \
+ "in the paged or nonpaged pool.",
+ 4, (char *[]) {"process", "process_id", "creating_process_id", "pool"});
+
+ if (!g) {
+ return -1;
+ }
+ ctx->wmi_process->pool_bytes = g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "process", "priority_base",
+ "Current base priority of this process. " \
+ "Threads within a process can raise and " \
+ "lower their own base priority relative to " \
+ "the process base priority of the process.",
+ 3, (char *[]) {"process", "process_id", "creating_process_id"});
+
+ if (!g) {
+ return -1;
+ }
+ ctx->wmi_process->priority_base = g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "process", "private_bytes",
+ "Current number of bytes this process has allocated " \
+ "that cannot be shared with other processes.",
+ 3, (char *[]) {"process", "process_id", "creating_process_id"});
+
+ if (!g) {
+ return -1;
+ }
+ ctx->wmi_process->private_bytes = g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "process", "thread_count",
+ "Number of threads currently active in this process.",
+ 3, (char *[]) {"process", "process_id", "creating_process_id"});
+
+ if (!g) {
+ return -1;
+ }
+ ctx->wmi_process->thread_count = g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "process", "virtual_bytes",
+ "Current size, in bytes, of the virtual address space " \
+ "that the process is using.",
+ 3, (char *[]) {"process", "process_id", "creating_process_id"});
+
+ if (!g) {
+ return -1;
+ }
+ ctx->wmi_process->virtual_bytes = g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "process", "working_set_private_bytes",
+ "Size of the working set, in bytes, that is " \
+ "use for this process only and not shared nor " \
+ "shareable by other processes.",
+ 3, (char *[]) {"process", "process_id", "creating_process_id"});
+
+ if (!g) {
+ return -1;
+ }
+ ctx->wmi_process->working_set_private_bytes = g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "process", "working_set_peak_bytes",
+ "Maximum size, in bytes, of the Working Set of " \
+ "this process at any point in time. " \
+ "The Working Set is the set of memory pages touched recently " \
+ "by the threads in the process.",
+ 3, (char *[]) {"process", "process_id", "creating_process_id"});
+
+ if (!g) {
+ return -1;
+ }
+ ctx->wmi_process->working_set_peak_bytes = g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "process", "working_set_bytes",
+ "Maximum number of bytes in the working set of " \
+ "this process at any point in time. " \
+ "The working set is the set of memory pages touched recently " \
+ "by the threads in the process.",
+ 3, (char *[]) {"process", "process_id", "creating_process_id"});
+
+ if (!g) {
+ return -1;
+ }
+ ctx->wmi_process->working_set_bytes = g;
+
+ ctx->wmi_process->info = flb_calloc(1, sizeof(struct wmi_query_spec));
+ if (!ctx->wmi_process->info) {
+ flb_errno();
+ return -1;
+ }
+ ctx->wmi_process->info->metric_instance = (void *)g;
+ ctx->wmi_process->info->type = CMT_GAUGE;
+ ctx->wmi_process->info->value_adjuster = nop_adjust;
+ ctx->wmi_process->info->wmi_counter = "Win32_PerfRawData_PerfProc_Process";
+ ctx->wmi_process->info->wmi_property = "";
+ ctx->wmi_process->info->label_property_count = 0;
+ ctx->wmi_process->info->label_property_keys = NULL;
+ ctx->wmi_process->info->where_clause = NULL;
+
+ ctx->wmi_process->operational = FLB_TRUE;
+
+ return 0;
+}
+
+int we_wmi_process_exit(struct flb_we *ctx)
+{
+ ctx->wmi_process->operational = FLB_FALSE;
+
+ flb_free(ctx->wmi_process->info);
+ flb_free(ctx->wmi_process);
+
+ return 0;
+}
+
+static int wmi_process_regex_match(struct flb_regex *regex, char *name)
+{
+ if (regex == NULL) {
+ return 0;
+ }
+
+ if (name == NULL) {
+ return 0;
+ }
+
+ return flb_regex_match(regex, name, strlen(name));
+}
+
+int we_wmi_process_filter(char *name, struct flb_we *ctx)
+{
+ if (strcasestr(name, "_Total") != NULL) {
+ return 1;
+ }
+
+ if (wmi_process_regex_match(ctx->denying_process_regex, name) ||
+ !wmi_process_regex_match(ctx->allowing_process_regex, name)) {
+ return 1;
+ }
+
+ return 0;
+}
+
+int we_wmi_process_update(struct flb_we *ctx)
+{
+ uint64_t timestamp = 0;
+ IEnumWbemClassObject* enumerator = NULL;
+ HRESULT hr;
+
+ IWbemClassObject *class_obj = NULL;
+ ULONG ret = 0;
+ double val = 0;
+ char *name = NULL;
+ char *process_name = NULL;
+ char *process_id = NULL;
+ char *creating_process_id = NULL;
+ double freq = 0;
+ double ticks_to_seconds = 1 / 1e7;
+ char *state;
+
+ if (!ctx->wmi_process->operational) {
+ flb_plg_error(ctx->ins, "process collector not yet in operational state");
+
+ return -1;
+ }
+
+ if (FAILED(we_wmi_coinitialize(ctx))) {
+ return -1;
+ }
+
+ timestamp = cfl_time_now();
+
+ if (FAILED(we_wmi_execute_query(ctx, ctx->wmi_process->info, &enumerator))) {
+ return -1;
+ }
+
+ while(enumerator) {
+ hr = enumerator->lpVtbl->Next(enumerator, WBEM_INFINITE, 1, &class_obj, &ret);
+
+ if(ret == 0) {
+ break;
+ }
+
+ name = we_wmi_get_property_str_value(ctx, "Name", class_obj);
+ if (!name) {
+ continue;
+ }
+ /* Remove # from the duplicated process names */
+ process_name = strtok_s(name, "#", &state);
+ if (we_wmi_process_filter(process_name, ctx) == 1) {
+ flb_free(name);
+
+ continue;
+ }
+
+ process_id = we_wmi_get_property_str_value(ctx, "IDProcess", class_obj);
+ creating_process_id = we_wmi_get_property_str_value(ctx, "CreatingProcessID", class_obj);
+ freq = we_wmi_get_property_value(ctx, "Frequency_Object", class_obj);
+
+ val = we_wmi_get_property_value(ctx, "ElapsedTime", class_obj);
+ cmt_gauge_set(ctx->wmi_process->start_time, timestamp,
+ (double)((val-116444736000000000)/freq),
+ 3, (char *[]) {process_name, process_id, creating_process_id});
+
+ val = we_wmi_get_property_value(ctx, "HandleCount", class_obj);
+ cmt_gauge_set(ctx->wmi_process->handles, timestamp, val,
+ 3, (char *[]) {process_name, process_id, creating_process_id});
+
+ val = we_wmi_get_property_value(ctx, "PercentUserTime", class_obj);
+ cmt_gauge_set(ctx->wmi_process->cpu_time_total, timestamp, val * ticks_to_seconds,
+ 4, (char *[]) {process_name, process_id, creating_process_id, "user"});
+
+ val = we_wmi_get_property_value(ctx, "PercentPrivilegedTime", class_obj);
+ cmt_gauge_set(ctx->wmi_process->cpu_time_total, timestamp, val * ticks_to_seconds,
+ 4, (char *[]) {process_name, process_id, creating_process_id, "privileged"});
+
+ val = we_wmi_get_property_value(ctx, "IOOtherBytesPersec", class_obj);
+ cmt_gauge_set(ctx->wmi_process->io_bytes_total, timestamp, val,
+ 4, (char *[]) {process_name, process_id, creating_process_id, "other"});
+
+ val = we_wmi_get_property_value(ctx, "IOOtherOperationsPersec", class_obj);
+ cmt_gauge_set(ctx->wmi_process->io_operations_total, timestamp, val,
+ 4, (char *[]) {process_name, process_id, creating_process_id, "other"});
+
+ val = we_wmi_get_property_value(ctx, "IOReadBytesPersec", class_obj);
+ cmt_gauge_set(ctx->wmi_process->io_bytes_total, timestamp, val,
+ 4, (char *[]) {process_name, process_id, creating_process_id, "read"});
+
+ val = we_wmi_get_property_value(ctx, "IOReadOperationsPersec", class_obj);
+ cmt_gauge_set(ctx->wmi_process->io_operations_total, timestamp, val,
+ 4, (char *[]) {process_name, process_id, creating_process_id, "read"});
+
+ val = we_wmi_get_property_value(ctx, "IOWriteBytesPersec", class_obj);
+ cmt_gauge_set(ctx->wmi_process->io_bytes_total, timestamp, val,
+ 4, (char *[]) {process_name, process_id, creating_process_id, "write"});
+
+ val = we_wmi_get_property_value(ctx, "IOWriteOperationsPersec", class_obj);
+ cmt_gauge_set(ctx->wmi_process->io_operations_total, timestamp, val,
+ 4, (char *[]) {process_name, process_id, creating_process_id, "write"});
+
+ val = we_wmi_get_property_value(ctx, "PageFaultsPersec", class_obj);
+ cmt_gauge_set(ctx->wmi_process->page_faults_total, timestamp, val,
+ 3, (char *[]) {process_name, process_id, creating_process_id});
+
+ val = we_wmi_get_property_value(ctx, "PageFileBytes", class_obj);
+ cmt_gauge_set(ctx->wmi_process->page_file_bytes, timestamp, val,
+ 3, (char *[]) {process_name, process_id, creating_process_id});
+
+ val = we_wmi_get_property_value(ctx, "PoolNonpagedBytes", class_obj);
+ cmt_gauge_set(ctx->wmi_process->pool_bytes, timestamp, val,
+ 4, (char *[]) {process_name, process_id, creating_process_id, "nonpaged"});
+
+ val = we_wmi_get_property_value(ctx, "PoolPagedBytes", class_obj);
+ cmt_gauge_set(ctx->wmi_process->pool_bytes, timestamp, val,
+ 4, (char *[]) {process_name, process_id, creating_process_id, "paged"});
+
+ val = we_wmi_get_property_value(ctx, "PriorityBase", class_obj);
+ cmt_gauge_set(ctx->wmi_process->priority_base, timestamp, val,
+ 3, (char *[]) {process_name, process_id, creating_process_id});
+
+ val = we_wmi_get_property_value(ctx, "ThreadCount", class_obj);
+ cmt_gauge_set(ctx->wmi_process->thread_count, timestamp, val,
+ 3, (char *[]) {process_name, process_id, creating_process_id});
+
+ val = we_wmi_get_property_value(ctx, "PrivateBytes", class_obj);
+ cmt_gauge_set(ctx->wmi_process->private_bytes, timestamp, val,
+ 3, (char *[]) {process_name, process_id, creating_process_id});
+
+ val = we_wmi_get_property_value(ctx, "VirtualBytes", class_obj);
+ cmt_gauge_set(ctx->wmi_process->virtual_bytes, timestamp, val,
+ 3, (char *[]) {process_name, process_id, creating_process_id});
+
+ val = we_wmi_get_property_value(ctx, "WorkingSetPrivate", class_obj);
+ cmt_gauge_set(ctx->wmi_process->working_set_private_bytes, timestamp, val,
+ 3, (char *[]) {process_name, process_id, creating_process_id});
+
+ val = we_wmi_get_property_value(ctx, "WorkingSetPeak", class_obj);
+ cmt_gauge_set(ctx->wmi_process->working_set_peak_bytes, timestamp, val,
+ 3, (char *[]) {process_name, process_id, creating_process_id});
+
+ val = we_wmi_get_property_value(ctx, "WorkingSet", class_obj);
+ cmt_gauge_set(ctx->wmi_process->working_set_bytes, timestamp, val,
+ 3, (char *[]) {process_name, process_id, creating_process_id});
+
+ class_obj->lpVtbl->Release(class_obj);
+
+ flb_free(name);
+ flb_free(process_id);
+ flb_free(creating_process_id);
+ }
+
+ enumerator->lpVtbl->Release(enumerator);
+
+ we_wmi_cleanup(ctx);
+
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi_process.h b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi_process.h
new file mode 100644
index 000000000..0ad921ea6
--- /dev/null
+++ b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi_process.h
@@ -0,0 +1,29 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2023 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_WE_WMI_PROCESS_INFO_H
+#define FLB_WE_WMI_PROCESS_INFO_H
+
+#include "we.h"
+
+int we_wmi_process_info_init(struct flb_we *ctx);
+int we_wmi_process_info_exit(struct flb_we *ctx);
+int we_wmi_process_info_update(struct flb_we *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi_service.c b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi_service.c
new file mode 100644
index 000000000..e31a4943b
--- /dev/null
+++ b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi_service.c
@@ -0,0 +1,493 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2023 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_config_map.h>
+#include <fluent-bit/flb_error.h>
+#include <fluent-bit/flb_pack.h>
+
+#include "we.h"
+#include "we_wmi.h"
+#include "we_wmi_service.h"
+#include "we_util.h"
+#include "we_metric.h"
+
+static double nop_adjust(double value)
+{
+ return value;
+}
+
+static int construct_include_clause(struct flb_we *ctx, flb_sds_t *clause)
+{
+ int ret = -1;
+ size_t off = 0;
+ msgpack_unpacked result;
+ msgpack_object key;
+ msgpack_object val;
+ msgpack_object map;
+ int map_size;
+ int i;
+ int idx = 0;
+ int use_like = FLB_FALSE;
+ char *key_str = NULL;
+ size_t key_str_size = 0;
+ char *val_str = NULL;
+ size_t val_str_size = 0;
+ flb_sds_t val_buf;
+
+ msgpack_unpacked_init(&result);
+ while (msgpack_unpack_next(&result,
+ ctx->service_include_buffer,
+ ctx->service_include_buffer_size,
+ &off) == MSGPACK_UNPACK_SUCCESS) {
+ if (result.data.type != MSGPACK_OBJECT_MAP) {
+ flb_plg_error(ctx->ins, "Invalid include buffer");
+ ret = -2;
+
+ goto cleanup;
+ }
+
+ map = result.data;
+ map_size = map.via.map.size;
+
+ for (i = 0; i < map_size; i++) {
+ use_like = FLB_FALSE;
+ if (idx == 0) {
+ flb_sds_cat_safe(clause, "(", 1);
+ }
+ else {
+ flb_sds_cat_safe(clause, " OR ", 4);
+ }
+
+ key = map.via.map.ptr[i].key;
+ val = map.via.map.ptr[i].val;
+ if (key.type == MSGPACK_OBJECT_BIN) {
+ key_str = (char *) key.via.bin.ptr;
+ key_str_size = key.via.bin.size;
+ }
+ else if (key.type == MSGPACK_OBJECT_STR) {
+ key_str = (char *) key.via.str.ptr;
+ key_str_size = key.via.str.size;
+ }
+ if (val.type == MSGPACK_OBJECT_BIN) {
+ val_str = (char *) val.via.bin.ptr;
+ val_str_size = val.via.bin.size;
+ val_buf = flb_sds_create_len(val_str, val_str_size);
+ if (val_buf == NULL) {
+ flb_plg_error(ctx->ins, "val_buf creation is failed");
+ ret = -3;
+
+ goto cleanup;
+ }
+ }
+ else if (val.type == MSGPACK_OBJECT_STR) {
+ val_str = (char *) val.via.str.ptr;
+ val_str_size = val.via.str.size;
+ val_buf = flb_sds_create_len(val_str, val_str_size);
+ if (val_buf == NULL) {
+ flb_plg_error(ctx->ins, "val_buf creation is failed");
+ ret = -3;
+
+ goto cleanup;
+ }
+ }
+
+ if (val_str != NULL && strstr(val_buf, "%") != NULL) {
+ use_like = FLB_TRUE;
+ flb_sds_destroy(val_buf);
+ }
+ flb_sds_cat_safe(clause, key_str, key_str_size);
+ if (use_like == FLB_TRUE) {
+ flb_sds_cat_safe(clause, " LIKE ", 6);
+ }
+ else {
+ flb_sds_cat_safe(clause, "=", 1);
+ }
+ flb_sds_cat_safe(clause, "'", 1);
+ flb_sds_cat_safe(clause, val_str, val_str_size);
+ flb_sds_cat_safe(clause, "'", 1);
+ idx++;
+ }
+ flb_sds_cat_safe(clause, ")", 1);
+ }
+ msgpack_unpacked_destroy(&result);
+
+ return 0;
+
+cleanup:
+ msgpack_unpacked_destroy(&result);
+
+ return ret;
+}
+
+static int construct_exclude_clause(struct flb_we *ctx, flb_sds_t *clause)
+{
+ int ret = -1;
+ size_t off = 0;
+ msgpack_unpacked result;
+ msgpack_object key;
+ msgpack_object val;
+ msgpack_object map;
+ int map_size;
+ int i;
+ int idx = 0;
+ int use_like = FLB_FALSE;
+ char *key_str = NULL;
+ size_t key_str_size = 0;
+ char *val_str = NULL;
+ size_t val_str_size = 0;
+ flb_sds_t val_buf;
+
+ msgpack_unpacked_init(&result);
+ while (msgpack_unpack_next(&result,
+ ctx->service_exclude_buffer,
+ ctx->service_exclude_buffer_size,
+ &off) == MSGPACK_UNPACK_SUCCESS) {
+ if (result.data.type != MSGPACK_OBJECT_MAP) {
+ flb_plg_error(ctx->ins, "Invalid exclude buffer");
+ ret = -2;
+
+ goto cleanup;
+ }
+
+ map = result.data;
+ map_size = map.via.map.size;
+
+ for (i = 0; i < map_size; i++) {
+ use_like = FLB_FALSE;
+ if (idx == 0) {
+ flb_sds_cat_safe(clause, "(", 1);
+ }
+ else {
+ flb_sds_cat_safe(clause, " AND ", 5);
+ }
+
+ key = map.via.map.ptr[i].key;
+ val = map.via.map.ptr[i].val;
+ if (key.type == MSGPACK_OBJECT_BIN) {
+ key_str = (char *) key.via.bin.ptr;
+ key_str_size = key.via.bin.size;
+ }
+ else if (key.type == MSGPACK_OBJECT_STR) {
+ key_str = (char *) key.via.str.ptr;
+ key_str_size = key.via.str.size;
+ }
+ if (val.type == MSGPACK_OBJECT_BIN) {
+ val_str = (char *) val.via.bin.ptr;
+ val_str_size = val.via.bin.size;
+ val_buf = flb_sds_create_len(val_str, val_str_size);
+ if (val_buf == NULL) {
+ flb_plg_error(ctx->ins, "val_buf creation is failed");
+ ret = -3;
+
+ goto cleanup;
+ }
+ }
+ else if (val.type == MSGPACK_OBJECT_STR) {
+ val_str = (char *) val.via.str.ptr;
+ val_str_size = val.via.str.size;
+ val_buf = flb_sds_create_len(val_str, val_str_size);
+ if (val_buf == NULL) {
+ flb_plg_error(ctx->ins, "val_buf creation is failed");
+ ret = -3;
+
+ goto cleanup;
+ }
+ }
+
+ if (val_str != NULL && strstr(val_buf, "%") != NULL) {
+ use_like = FLB_TRUE;
+ flb_sds_destroy(val_buf);
+ }
+ if (use_like == FLB_TRUE) {
+ flb_sds_cat_safe(clause, "NOT ", 4);
+ }
+ flb_sds_cat_safe(clause, key_str, key_str_size);
+ if (use_like == FLB_TRUE) {
+ flb_sds_cat_safe(clause, " LIKE ", 6);
+ }
+ else {
+ flb_sds_cat_safe(clause, "!=", 2);
+ }
+ flb_sds_cat_safe(clause, "'", 1);
+ flb_sds_cat_safe(clause, val_str, val_str_size);
+ flb_sds_cat_safe(clause, "'", 1);
+ idx++;
+ }
+ flb_sds_cat_safe(clause, ")", 1);
+ }
+ msgpack_unpacked_destroy(&result);
+
+ return 0;
+
+cleanup:
+ msgpack_unpacked_destroy(&result);
+
+ return ret;
+}
+
+static int construct_where_clause(struct flb_we *ctx)
+{
+ int ret;
+ flb_sds_t clause;
+
+ clause = flb_sds_create_size(256);
+ if (!clause) {
+ return -1;
+ }
+
+ if (ctx->service_include_buffer != NULL && ctx->service_include_buffer_size > 0) {
+ ret = construct_include_clause(ctx, &clause);
+ if (ret != 0) {
+ goto cleanup;
+ }
+ }
+
+ if (ctx->service_exclude_buffer != NULL && ctx->service_exclude_buffer_size > 0) {
+ if (flb_sds_len(clause) > 0) {
+ flb_sds_cat_safe(&clause, " AND ", 5);
+ }
+ ret = construct_exclude_clause(ctx, &clause);
+ if (ret != 0) {
+ goto cleanup;
+ }
+ }
+
+ if (ctx->raw_where_clause != NULL){
+ if (flb_sds_len(clause) > 0) {
+ flb_sds_cat_safe(&clause, " AND (", 6);
+ flb_sds_cat_safe(&clause, ctx->raw_where_clause, strlen(ctx->raw_where_clause));
+ flb_sds_cat_safe(&clause, ")", 1);
+ }
+ else {
+ flb_sds_cat_safe(&clause, ctx->raw_where_clause, strlen(ctx->raw_where_clause));
+ }
+ }
+
+ if (flb_sds_len(clause) > 0) {
+ ctx->wmi_service->info->where_clause = clause;
+ }
+
+ return 0;
+
+cleanup:
+ flb_sds_destroy(clause);
+
+ return ret;
+}
+
+int we_wmi_service_init(struct flb_we *ctx)
+{
+ int ret;
+ struct cmt_gauge *g;
+
+ ctx->wmi_service = flb_calloc(1, sizeof(struct we_wmi_service_counters));
+ if (!ctx->wmi_service) {
+ flb_errno();
+ return -1;
+ }
+ ctx->wmi_service->operational = FLB_FALSE;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "service", "info",
+ "A metric for Windows Service information",
+ 4, (char *[]) {"name", "display_name", "process_id", "run_as"});
+
+ if (!g) {
+ return -1;
+ }
+ ctx->wmi_service->information = g;
+
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "service", "state",
+ "A state of the service",
+ 2, (char *[]){"name", "state"});
+ if (!g) {
+ return -1;
+ }
+ ctx->wmi_service->state = g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "service", "start_mode",
+ "A start mode of the service",
+ 2, (char *[]){"name", "start_mode"});
+ if (!g) {
+ return -1;
+ }
+ ctx->wmi_service->start_mode = g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "service", "status",
+ "A status of the service",
+ 2, (char *[]){"name", "status"});
+ if (!g) {
+ return -1;
+ }
+ ctx->wmi_service->status = g;
+
+ ctx->wmi_service->info = flb_calloc(1, sizeof(struct wmi_query_spec));
+ if (!ctx->wmi_service->info) {
+ flb_errno();
+ return -1;
+ }
+ ctx->wmi_service->info->metric_instance = (void *)g;
+ ctx->wmi_service->info->type = CMT_GAUGE;
+ ctx->wmi_service->info->value_adjuster = nop_adjust;
+ ctx->wmi_service->info->wmi_counter = "Win32_Service";
+ ctx->wmi_service->info->wmi_property = "";
+ ctx->wmi_service->info->label_property_count = 0;
+ ctx->wmi_service->info->label_property_keys = NULL;
+ ctx->wmi_service->info->where_clause = NULL;
+ ret = construct_where_clause(ctx);
+ if (ret != 0) {
+ return ret;
+ }
+
+ ctx->wmi_service->operational = FLB_TRUE;
+
+ return 0;
+}
+
+int we_wmi_service_exit(struct flb_we *ctx)
+{
+ ctx->wmi_service->operational = FLB_FALSE;
+
+ if (ctx->wmi_service->info->where_clause != NULL) {
+ flb_sds_destroy(ctx->wmi_service->info->where_clause);
+ }
+ flb_free(ctx->wmi_service->info);
+ flb_free(ctx->wmi_service);
+
+ return 0;
+}
+
+int we_wmi_service_update(struct flb_we *ctx)
+{
+ IEnumWbemClassObject* enumerator = NULL;
+ HRESULT hr;
+
+ IWbemClassObject *class_obj = NULL;
+ ULONG ret = 0;
+ int i = 0;
+ uint64_t timestamp = 0;
+ char *service_name = NULL;
+ char *display_name = NULL;
+ char *pid = NULL;
+ char *run_as = NULL;
+ char *str_prop = NULL;
+ char *state = NULL;
+ char *start_mode = NULL;
+ char *status = NULL;
+ char **states = (char *[]){
+ "stopped", "start pending", "stop pending", "running",
+ "continue pending", "pause pending", "paused", "unknown", NULL
+ };
+ char **statuses = (char *[]){
+ "ok", "error", "degraded", "unknown",
+ "pred fail", "starting", "stopping", "service",
+ "stressed", "nonrecover", "no contact", "lost comm", NULL
+ };
+ char **start_modes = (char *[]) {
+ "boot", "system", "auto", "manual", "disabled", NULL
+ };
+
+ if (!ctx->wmi_service->operational) {
+ flb_plg_error(ctx->ins, "windows_service collector not yet in operational state");
+
+ return -1;
+ }
+
+ if (FAILED(we_wmi_coinitialize(ctx))) {
+ return -1;
+ }
+
+ timestamp = cfl_time_now();
+
+ if (FAILED(we_wmi_execute_query(ctx, ctx->wmi_service->info, &enumerator))) {
+ return -1;
+ }
+
+ while (enumerator) {
+ hr = enumerator->lpVtbl->Next(enumerator, WBEM_INFINITE, 1,
+ &class_obj, &ret);
+
+ if (0 == ret) {
+ break;
+ }
+
+ service_name = we_wmi_get_property_str_value(ctx, "Name", class_obj);
+ display_name = we_wmi_get_property_str_value(ctx, "DisplayName", class_obj);
+ pid = we_wmi_get_property_str_value(ctx, "ProcessID", class_obj);
+ run_as = we_wmi_get_property_str_value(ctx, "StartName", class_obj);
+ state = we_wmi_get_property_str_value(ctx, "State", class_obj);
+ start_mode = we_wmi_get_property_str_value(ctx, "StartMode", class_obj);
+ status = we_wmi_get_property_str_value(ctx, "Status", class_obj);
+
+ /* Information */
+ cmt_gauge_set(ctx->wmi_service->information, timestamp, 1.0,
+ 4, (char *[]){ service_name, display_name, pid, run_as});
+
+ /* State */
+ for (i = 0; states[i] != NULL; i++) {
+ if (strcasecmp(state, states[i]) == 0) {
+ cmt_gauge_set(ctx->wmi_service->state, timestamp, 1.0,
+ 2, (char *[]){ service_name, states[i]});
+ }
+ else {
+ cmt_gauge_set(ctx->wmi_service->state, timestamp, 0.0,
+ 2, (char *[]){ service_name, states[i]});
+ }
+ }
+ /* Start Mode */
+ for (i = 0; start_modes[i] != NULL; i++) {
+ if (strcasecmp(start_mode, start_modes[i]) == 0) {
+ cmt_gauge_set(ctx->wmi_service->start_mode, timestamp, 1.0,
+ 2, (char *[]){ service_name, start_modes[i]});
+ }
+ else {
+ cmt_gauge_set(ctx->wmi_service->start_mode, timestamp, 0.0,
+ 2, (char *[]){ service_name, start_modes[i]});
+ }
+ }
+
+ /* Status */
+ for (i = 0; statuses[i] != NULL; i++) {
+ if (strcasecmp(status, statuses[i]) == 0) {
+ cmt_gauge_set(ctx->wmi_service->status, timestamp, 1.0,
+ 2, (char *[]){ service_name, statuses[i]});
+ } else {
+ cmt_gauge_set(ctx->wmi_service->status, timestamp, 0.0,
+ 2, (char *[]){ service_name, statuses[i]});
+ }
+ }
+
+ class_obj->lpVtbl->Release(class_obj);
+
+ flb_free(service_name);
+ flb_free(display_name);
+ flb_free(pid);
+ flb_free(run_as);
+ flb_free(state);
+ flb_free(start_mode);
+ flb_free(status);
+ }
+
+ enumerator->lpVtbl->Release(enumerator);
+ we_wmi_cleanup(ctx);
+
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi_service.h b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi_service.h
new file mode 100644
index 000000000..d9b3efea9
--- /dev/null
+++ b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi_service.h
@@ -0,0 +1,29 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2023 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_WE_WMI_SERVICE_H
+#define FLB_WE_WMI_SERVICE_H
+
+#include "we.h"
+
+int we_wmi_service_init(struct flb_we *ctx);
+int we_wmi_service_exit(struct flb_we *ctx);
+int we_wmi_service_update(struct flb_we *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi_system.c b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi_system.c
new file mode 100644
index 000000000..0eb7fffaf
--- /dev/null
+++ b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi_system.c
@@ -0,0 +1,190 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_config_map.h>
+#include <fluent-bit/flb_error.h>
+#include <fluent-bit/flb_pack.h>
+
+#include "we.h"
+#include "we_wmi.h"
+#include "we_wmi_system.h"
+#include "we_util.h"
+#include "we_metric.h"
+
+static double nop_adjust(double value)
+{
+ return value;
+}
+
+int we_wmi_system_init(struct flb_we *ctx)
+{
+ ctx->wmi_system = flb_calloc(1, sizeof(struct we_wmi_system_counters));
+ if (!ctx->wmi_system) {
+ flb_errno();
+ return -1;
+ }
+ ctx->wmi_system->operational = FLB_FALSE;
+
+ struct cmt_gauge *g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "system", "context_switches_total",
+ "Total number of context switches",
+ 0, NULL);
+
+ if (!g) {
+ return -1;
+ }
+ ctx->wmi_system->context_switches = g;
+
+ g = cmt_counter_create(ctx->cmt, "windows", "system", "exception_dispatches_total",
+ "Total number of exception_dispatches",
+ 0, NULL);
+
+ if (!g) {
+ return -1;
+ }
+ ctx->wmi_system->exception_dispatches = g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "system", "processor_queue",
+ "Length of processor queues",
+ 0, NULL);
+
+ if (!g) {
+ return -1;
+ }
+ ctx->wmi_system->processor_queue = g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "system", "system_calls_total",
+ "Total number of system calls",
+ 0, NULL);
+
+ if (!g) {
+ return -1;
+ }
+ ctx->wmi_system->system_calls = g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "system", "system_up_time",
+ "System boot time",
+ 0, NULL);
+
+ if (!g) {
+ return -1;
+ }
+ ctx->wmi_system->system_up_time = g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "system", "threads",
+ "Current number of threads",
+ 0, NULL);
+
+ if (!g) {
+ return -1;
+ }
+ ctx->wmi_system->threads = g;
+
+ ctx->wmi_system->info = flb_calloc(1, sizeof(struct wmi_query_spec));
+ if (!ctx->wmi_system->info) {
+ flb_errno();
+ return -1;
+ }
+ ctx->wmi_system->info->metric_instance = (void *)g;
+ ctx->wmi_system->info->type = CMT_GAUGE;
+ ctx->wmi_system->info->value_adjuster = nop_adjust;
+ ctx->wmi_system->info->wmi_counter = "Win32_PerfFormattedData_PerfOS_System";
+ ctx->wmi_system->info->wmi_property = "";
+ ctx->wmi_system->info->label_property_count = 0;
+ ctx->wmi_system->info->label_property_keys = NULL;
+ ctx->wmi_system->info->where_clause = NULL;
+
+ ctx->wmi_system->operational = FLB_TRUE;
+
+ return 0;
+}
+
+int we_wmi_system_exit(struct flb_we *ctx)
+{
+ ctx->wmi_system->operational = FLB_FALSE;
+
+ flb_free(ctx->wmi_system->info);
+ flb_free(ctx->wmi_system);
+
+ return 0;
+}
+
+int we_wmi_system_update(struct flb_we *ctx)
+{
+ uint64_t timestamp = 0;
+ IEnumWbemClassObject* enumerator = NULL;
+ HRESULT hr;
+
+ IWbemClassObject *class_obj = NULL;
+ ULONG ret = 0;
+ double val = 0;
+
+ if (!ctx->wmi_system->operational) {
+ flb_plg_error(ctx->ins, "system collector not yet in operational state");
+
+ return -1;
+ }
+
+ if (FAILED(we_wmi_coinitialize(ctx))) {
+ return -1;
+ }
+
+ timestamp = cfl_time_now();
+
+ if (FAILED(we_wmi_execute_query(ctx, ctx->wmi_system->info, &enumerator))) {
+ return -1;
+ }
+
+ while(enumerator) {
+ hr = enumerator->lpVtbl->Next(enumerator, WBEM_INFINITE, 1, &class_obj, &ret);
+
+ if(0 == ret) {
+ break;
+ }
+
+ val = we_wmi_get_property_value(ctx, "ContextSwitchesPersec", class_obj);
+ cmt_gauge_set(ctx->wmi_system->context_switches, timestamp, val, 0, NULL);
+
+ val = we_wmi_get_property_value(ctx, "ExceptionDispatchesPersec", class_obj);
+ cmt_gauge_set(ctx->wmi_system->exception_dispatches, timestamp, val, 0, NULL);
+
+ val = we_wmi_get_property_value(ctx, "ProcessorQueueLength", class_obj);
+ cmt_gauge_set(ctx->wmi_system->processor_queue, timestamp, val, 0, NULL);
+
+ val = we_wmi_get_property_value(ctx, "SystemCallsPersec", class_obj);
+ cmt_gauge_set(ctx->wmi_system->system_calls, timestamp, val, 0, NULL);
+
+ val = we_wmi_get_property_value(ctx, "SystemUpTime", class_obj);
+ cmt_gauge_set(ctx->wmi_system->system_up_time, timestamp, val, 0, NULL);
+
+ val = we_wmi_get_property_value(ctx, "Threads", class_obj);
+ cmt_gauge_set(ctx->wmi_system->threads, timestamp, val, 0, NULL);
+
+ class_obj->lpVtbl->Release(class_obj);
+ }
+
+ enumerator->lpVtbl->Release(enumerator);
+
+ we_wmi_cleanup(ctx);
+
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi_system.h b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi_system.h
new file mode 100644
index 000000000..abc8ea6f5
--- /dev/null
+++ b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi_system.h
@@ -0,0 +1,29 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_WE_WMI_SYSTEM_H
+#define FLB_WE_WMI_SYSTEM_H
+
+#include "we.h"
+
+int we_wmi_system_init(struct flb_we *ctx);
+int we_wmi_system_exit(struct flb_we *ctx);
+int we_wmi_system_update(struct flb_we *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi_thermalzone.c b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi_thermalzone.c
new file mode 100644
index 000000000..1766c5907
--- /dev/null
+++ b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi_thermalzone.c
@@ -0,0 +1,171 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2019-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_config_map.h>
+#include <fluent-bit/flb_error.h>
+#include <fluent-bit/flb_pack.h>
+
+#include "we.h"
+#include "we_wmi.h"
+#include "we_wmi_thermalzone.h"
+#include "we_util.h"
+#include "we_metric.h"
+
+static double adjust_celsius(double value)
+{
+ return (value/10.0) - 273.15;
+}
+
+static double nop_adjust(double value)
+{
+ return value;
+}
+
+int we_wmi_thermalzone_init(struct flb_we *ctx)
+{
+ ctx->wmi_thermals = flb_calloc(1, sizeof(struct we_wmi_thermal_counters));
+ if (!ctx->wmi_thermals) {
+ flb_errno();
+ return -1;
+ }
+ ctx->wmi_thermals->operational = FLB_FALSE;
+
+ struct cmt_gauge *g;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "thermalzone", "temperature_celsius",
+ "Temperature of the sensor device.",
+ 1, (char *[]) {"name"});
+ if (!g) {
+ return -1;
+ }
+
+ ctx->wmi_thermals->temperature_celsius = flb_calloc(1, sizeof(struct wmi_query_spec));
+ if (!ctx->wmi_thermals->temperature_celsius) {
+ return -1;
+ }
+ ctx->wmi_thermals->temperature_celsius->label_property_keys = (char **) flb_calloc(1, sizeof(char *));
+ if (!ctx->wmi_thermals->temperature_celsius->label_property_keys) {
+ return -1;
+ }
+
+ ctx->wmi_thermals->temperature_celsius->metric_instance = (void *)g;
+ ctx->wmi_thermals->temperature_celsius->type = CMT_GAUGE;
+ ctx->wmi_thermals->temperature_celsius->value_adjuster = adjust_celsius;
+ ctx->wmi_thermals->temperature_celsius->wmi_counter = "Win32_PerfRawData_Counters_ThermalZoneInformation";
+ ctx->wmi_thermals->temperature_celsius->wmi_property = "HighPrecisionTemperature";
+ ctx->wmi_thermals->temperature_celsius->label_property_count = 1;
+ ctx->wmi_thermals->temperature_celsius->label_property_keys[0] = "name" ;
+ ctx->wmi_thermals->temperature_celsius->where_clause = NULL;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "thermalzone", "percent_passive_limit",
+ "The limit of passive limit (percent).",
+ 1, (char *[]) {"name"});
+ if (!g) {
+ return -1;
+ }
+
+ ctx->wmi_thermals->percent_passive_limit = flb_calloc(1, sizeof(struct wmi_query_spec));
+ if (!ctx->wmi_thermals->percent_passive_limit) {
+ flb_errno();
+ return -1;
+ }
+
+ ctx->wmi_thermals->percent_passive_limit->label_property_keys = (char **) flb_calloc(1, sizeof(char *));
+ if (!ctx->wmi_thermals->percent_passive_limit->label_property_keys) {
+ flb_errno();
+ return -1;
+ }
+
+ ctx->wmi_thermals->percent_passive_limit->metric_instance = (void *)g;
+ ctx->wmi_thermals->percent_passive_limit->type = CMT_GAUGE;
+ ctx->wmi_thermals->percent_passive_limit->value_adjuster = nop_adjust;
+ ctx->wmi_thermals->percent_passive_limit->wmi_counter = "Win32_PerfRawData_Counters_ThermalZoneInformation";
+ ctx->wmi_thermals->percent_passive_limit->wmi_property = "PercentPassiveLimit";
+ ctx->wmi_thermals->percent_passive_limit->label_property_count = 1;
+ ctx->wmi_thermals->percent_passive_limit->label_property_keys[0] = "name";
+ ctx->wmi_thermals->percent_passive_limit->where_clause = NULL;
+
+ g = cmt_gauge_create(ctx->cmt, "windows", "thermalzone", "throttle_reasons",
+ "The reason of throttle.",
+ 1, (char *[]) {"name"});
+ if (!g) {
+ return -1;
+ }
+ ctx->wmi_thermals->throttle_reasons = flb_calloc(1, sizeof(struct wmi_query_spec));
+ if (!ctx->wmi_thermals->throttle_reasons) {
+ flb_errno();
+ return -1;
+ }
+ ctx->wmi_thermals->throttle_reasons->label_property_keys = (char **) flb_calloc(1, sizeof(char *));
+ if (!ctx->wmi_thermals->throttle_reasons->label_property_keys) {
+ flb_errno();
+ return -1;
+ }
+
+ ctx->wmi_thermals->throttle_reasons->metric_instance = (void *)g;
+ ctx->wmi_thermals->throttle_reasons->type = CMT_GAUGE;
+ ctx->wmi_thermals->throttle_reasons->value_adjuster = nop_adjust;
+ ctx->wmi_thermals->throttle_reasons->wmi_counter = "Win32_PerfRawData_Counters_ThermalZoneInformation";
+ ctx->wmi_thermals->throttle_reasons->wmi_property = "ThrottleReasons";
+ ctx->wmi_thermals->throttle_reasons->label_property_count = 1;
+ ctx->wmi_thermals->throttle_reasons->label_property_keys[0] = "name";
+ ctx->wmi_thermals->throttle_reasons->where_clause = NULL;
+
+ ctx->wmi_thermals->operational = FLB_TRUE;
+
+ return 0;
+}
+
+int we_wmi_thermalzone_exit(struct flb_we *ctx)
+{
+ flb_free(ctx->wmi_thermals->temperature_celsius->label_property_keys);
+ flb_free(ctx->wmi_thermals->temperature_celsius);
+ flb_free(ctx->wmi_thermals->percent_passive_limit->label_property_keys);
+ flb_free(ctx->wmi_thermals->percent_passive_limit);
+ flb_free(ctx->wmi_thermals->throttle_reasons->label_property_keys);
+ flb_free(ctx->wmi_thermals->throttle_reasons);
+ flb_free(ctx->wmi_thermals);
+
+ return 0;
+}
+
+int we_wmi_thermalzone_update(struct flb_we *ctx)
+{
+ if (!ctx->wmi_thermals->operational) {
+ flb_plg_error(ctx->ins, "thermalzone collector not yet in operational state");
+
+ return -1;
+ }
+
+ if (FAILED(we_wmi_query(ctx, ctx->wmi_thermals->temperature_celsius))) {
+ return -1;
+ }
+
+ if (FAILED(we_wmi_query(ctx, ctx->wmi_thermals->percent_passive_limit))) {
+ return -1;
+ }
+
+ if (FAILED(we_wmi_query(ctx, ctx->wmi_thermals->throttle_reasons))) {
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi_thermalzone.h b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi_thermalzone.h
new file mode 100644
index 000000000..a94d6dc36
--- /dev/null
+++ b/src/fluent-bit/plugins/in_windows_exporter_metrics/we_wmi_thermalzone.h
@@ -0,0 +1,29 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_WE_WMI_THERMAL_ZONE_H
+#define FLB_WE_WMI_THERMAL_ZONE_H
+
+#include "we.h"
+
+int we_wmi_thermalzone_init(struct flb_we *ctx);
+int we_wmi_thermalzone_exit(struct flb_we *ctx);
+int we_wmi_thermalzone_update(struct flb_we *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/in_winevtlog/CMakeLists.txt b/src/fluent-bit/plugins/in_winevtlog/CMakeLists.txt
new file mode 100644
index 000000000..8f4c0e233
--- /dev/null
+++ b/src/fluent-bit/plugins/in_winevtlog/CMakeLists.txt
@@ -0,0 +1,6 @@
+set(src
+ in_winevtlog.c
+ pack.c
+ winevtlog.c)
+
+FLB_PLUGIN(in_winevtlog "${src}" "wevtapi")
diff --git a/src/fluent-bit/plugins/in_winevtlog/in_winevtlog.c b/src/fluent-bit/plugins/in_winevtlog/in_winevtlog.c
new file mode 100644
index 000000000..60c5bcecb
--- /dev/null
+++ b/src/fluent-bit/plugins/in_winevtlog/in_winevtlog.c
@@ -0,0 +1,279 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2019-2021 The Fluent Bit Authors
+ * Copyright (C) 2015-2018 Treasure Data Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_compat.h>
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_kernel.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_sqldb.h>
+#include "winevtlog.h"
+
+#define DEFAULT_INTERVAL_SEC 1
+#define DEFAULT_INTERVAL_NSEC 0
+#define DEFAULT_THRESHOLD_SIZE 0x7ffff /* Default reading buffer size (512kb) */
+
+static int in_winevtlog_collect(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context);
+
+static int in_winevtlog_init(struct flb_input_instance *in,
+ struct flb_config *config, void *data)
+{
+ int ret;
+ const char *tmp;
+ int read_existing_events = FLB_FALSE;
+ struct mk_list *head;
+ struct winevtlog_channel *ch;
+ struct winevtlog_config *ctx;
+
+ /* Initialize context */
+ ctx = flb_calloc(1, sizeof(struct winevtlog_config));
+ if (ctx == NULL) {
+ flb_errno();
+ return -1;
+ }
+ ctx->ins = in;
+
+ ctx->log_encoder = flb_log_event_encoder_create(FLB_LOG_EVENT_FORMAT_DEFAULT);
+
+ if (ctx->log_encoder == NULL) {
+ flb_plg_error(in, "could not initialize event encoder");
+ flb_free(ctx);
+
+ return NULL;
+ }
+
+ /* Load the config map */
+ ret = flb_input_config_map_set(in, (void *) ctx);
+ if (ret == -1) {
+ flb_log_event_encoder_destroy(ctx->log_encoder);
+ flb_free(ctx);
+ return -1;
+ }
+
+ /* Set up total reading size threshold */
+ ctx->total_size_threshold = DEFAULT_THRESHOLD_SIZE;
+
+ /* Open channels */
+ tmp = flb_input_get_property("channels", in);
+ if (!tmp) {
+ flb_plg_debug(ctx->ins, "no channel provided. listening to 'Application'");
+ tmp = "Application";
+ }
+
+ ctx->active_channel = winevtlog_open_all(tmp, ctx);
+ if (!ctx->active_channel) {
+ flb_plg_error(ctx->ins, "failed to open channels");
+ flb_log_event_encoder_destroy(ctx->log_encoder);
+ flb_free(ctx);
+ return -1;
+ }
+
+ /* Initialize SQLite DB (optional) */
+ tmp = flb_input_get_property("db", in);
+ if (tmp) {
+ ctx->db = flb_sqldb_open(tmp, in->name, config);
+ if (!ctx->db) {
+ flb_plg_error(ctx->ins, "could not open/create database");
+ winevtlog_close_all(ctx->active_channel);
+ flb_log_event_encoder_destroy(ctx->log_encoder);
+ flb_free(ctx);
+ return -1;
+ }
+
+ ret = flb_sqldb_query(ctx->db, SQL_CREATE_CHANNELS, NULL, NULL);
+ if (ret != FLB_OK) {
+ flb_plg_error(ctx->ins, "could not create 'channels' table");
+ flb_sqldb_close(ctx->db);
+ winevtlog_close_all(ctx->active_channel);
+ flb_log_event_encoder_destroy(ctx->log_encoder);
+ flb_free(ctx);
+ return -1;
+ }
+
+ mk_list_foreach(head, ctx->active_channel) {
+ ch = mk_list_entry(head, struct winevtlog_channel, _head);
+ winevtlog_sqlite_load(ch, ctx->db);
+ flb_plg_debug(ctx->ins, "load channel<%s time=%u>",
+ ch->name, ch->time_created);
+ }
+ }
+
+ /* Set the context */
+ flb_input_set_context(in, ctx);
+
+ /* Set the collector */
+ ret = flb_input_set_collector_time(in,
+ in_winevtlog_collect,
+ ctx->interval_sec,
+ ctx->interval_nsec,
+ config);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "could not set up a collector");
+ }
+ ctx->coll_fd = ret;
+
+ return 0;
+}
+
+static int in_winevtlog_read_channel(struct flb_input_instance *ins,
+ struct winevtlog_config *ctx,
+ struct winevtlog_channel *ch)
+{
+ unsigned int read;
+
+ if (winevtlog_read(ch, ctx, &read)) {
+ flb_plg_error(ctx->ins, "failed to read '%s'", ch->name);
+ return -1;
+ }
+ if (read == 0) {
+ return 0;
+ }
+ flb_plg_debug(ctx->ins, "read %u bytes from '%s'", read, ch->name);
+
+ if (ctx->db) {
+ ch->time_updated = time(NULL);
+ flb_plg_debug(ctx->ins, "save channel<%s time=%u>",
+ ch->name, ch->time_updated);
+ winevtlog_sqlite_save(ch, ctx->db);
+ }
+
+ if (ctx->log_encoder->output_length > 0) {
+ flb_input_log_append(ctx->ins, NULL, 0,
+ ctx->log_encoder->output_buffer,
+ ctx->log_encoder->output_length);
+ }
+
+ flb_log_event_encoder_reset(ctx->log_encoder);
+
+ return 0;
+}
+
+static int in_winevtlog_collect(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ struct winevtlog_config *ctx = in_context;
+ struct mk_list *head;
+ struct winevtlog_channel *ch;
+
+ mk_list_foreach(head, ctx->active_channel) {
+ ch = mk_list_entry(head, struct winevtlog_channel, _head);
+ in_winevtlog_read_channel(ins, ctx, ch);
+ }
+ return 0;
+}
+
+static void in_winevtlog_pause(void *data, struct flb_config *config)
+{
+ struct winevtlog_config *ctx = data;
+ flb_input_collector_pause(ctx->coll_fd, ctx->ins);
+}
+
+static void in_winevtlog_resume(void *data, struct flb_config *config)
+{
+ struct winevtlog_config *ctx = data;
+ flb_input_collector_resume(ctx->coll_fd, ctx->ins);
+}
+
+static int in_winevtlog_exit(void *data, struct flb_config *config)
+{
+ struct winevtlog_config *ctx = data;
+
+ if (!ctx) {
+ return 0;
+ }
+
+ winevtlog_close_all(ctx->active_channel);
+
+ if (ctx->db) {
+ flb_sqldb_close(ctx->db);
+ }
+ flb_free(ctx);
+
+ return 0;
+}
+
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_STR, "channels", NULL,
+ 0, FLB_FALSE, 0,
+ "Specify a comma-separated list of channels to read from"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "db", NULL,
+ 0, FLB_FALSE, 0,
+ "Specify DB file to save read offsets"
+ },
+ {
+ FLB_CONFIG_MAP_TIME, "interval_sec", "1s",
+ 0, FLB_TRUE, offsetof(struct winevtlog_config, interval_sec),
+ "Set the polling interval for each channel"
+ },
+ {
+ FLB_CONFIG_MAP_INT, "interval_nsec", "0",
+ 0, FLB_TRUE, offsetof(struct winevtlog_config, interval_nsec),
+ "Set the polling interval for each channel (sub seconds)"
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "string_inserts", "true",
+ 0, FLB_TRUE, offsetof(struct winevtlog_config, string_inserts),
+ "Whether to include StringInserts in output records"
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "read_existing_events", "false",
+ 0, FLB_TRUE, offsetof(struct winevtlog_config, read_existing_events),
+ "Whether to consume at oldest records in channels"
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "render_event_as_xml", "false",
+ 0, FLB_TRUE, offsetof(struct winevtlog_config, render_event_as_xml),
+ "Whether to consume at oldest records in channels"
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "use_ansi", "false",
+ 0, FLB_TRUE, offsetof(struct winevtlog_config, use_ansi),
+ "Use ANSI encoding on eventlog messages"
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "ignore_missing_channels", "false",
+ 0, FLB_TRUE, offsetof(struct winevtlog_config, ignore_missing_channels),
+ "Whether to ignore channels missing in eventlog"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "event_query", "*",
+ 0, FLB_TRUE, offsetof(struct winevtlog_config, event_query),
+ "Specify XML query for filtering events"
+ },
+ /* EOF */
+ {0}
+};
+
+struct flb_input_plugin in_winevtlog_plugin = {
+ .name = "winevtlog",
+ .description = "Windows EventLog using winevt.h API",
+ .cb_init = in_winevtlog_init,
+ .cb_pre_run = NULL,
+ .cb_collect = in_winevtlog_collect,
+ .cb_flush_buf = NULL,
+ .cb_pause = in_winevtlog_pause,
+ .cb_resume = in_winevtlog_resume,
+ .cb_exit = in_winevtlog_exit,
+ .config_map = config_map
+};
diff --git a/src/fluent-bit/plugins/in_winevtlog/pack.c b/src/fluent-bit/plugins/in_winevtlog/pack.c
new file mode 100644
index 000000000..28075436b
--- /dev/null
+++ b/src/fluent-bit/plugins/in_winevtlog/pack.c
@@ -0,0 +1,625 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2019-2021 The Fluent Bit Authors
+ * Copyright (C) 2015-2018 Treasure Data Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_compat.h>
+#include <fluent-bit/flb_mem.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_input_plugin.h>
+#include <msgpack.h>
+#include <sddl.h>
+#include <locale.h>
+#include "winevtlog.h"
+
+#define FORMAT_ISO8601 "%Y-%m-%d %H:%M:%S %z"
+
+#define BINDATA(evt) ((unsigned char *) (evt) + (evt)->DataOffset)
+
+static int pack_nullstr(struct winevtlog_config *ctx)
+{
+ flb_log_event_encoder_append_body_cstring(ctx->log_encoder, "");
+}
+
+static int pack_wstr(struct winevtlog_config *ctx, const wchar_t *wstr)
+{
+ int size;
+ char *buf;
+ UINT code_page = CP_UTF8;
+ LPCSTR defaultChar = L" ";
+
+ if (ctx->use_ansi) {
+ code_page = CP_ACP;
+ }
+
+ /* Compute the buffer size first */
+ size = WideCharToMultiByte(code_page, 0, wstr, -1, NULL, 0, NULL, NULL);
+ if (size == 0) {
+ return -1;
+ }
+
+ buf = flb_malloc(size);
+ if (buf == NULL) {
+ flb_errno();
+ return -1;
+ }
+
+ /* Convert UTF-16 into UTF-8 */
+ size = WideCharToMultiByte(code_page, 0, wstr, -1, buf, size, defaultChar, NULL);
+ if (size == 0) {
+ flb_free(buf);
+ return -1;
+ }
+
+ /* Pack buf except the trailing '\0' */
+ flb_log_event_encoder_append_body_string(ctx->log_encoder, buf, size - 1);
+
+ flb_free(buf);
+ return 0;
+}
+
+static int pack_binary(struct winevtlog_config *ctx, PBYTE bin, size_t length)
+{
+ const char *HEX_TABLE = "0123456789ABCDEF";
+ char *buffer;
+ int size = length * 2;
+ size_t i, j;
+ unsigned int idx = 0;
+
+ if (length == 0) {
+ pack_nullstr(ctx->log_encoder);
+ return 0;
+ }
+
+ buffer = flb_malloc(size);
+ if (buffer == NULL) {
+ flb_errno();
+ return -1;
+ }
+
+ for (i = 0; i < length; i++) {
+ for (j = 0; j < 2; j++) {
+ idx = (unsigned int)(bin[i] >> (j * 4) & 0x0F);
+ buffer[2*i+(1-j)] = HEX_TABLE[idx];
+ }
+ }
+
+ flb_log_event_encoder_append_body_string(ctx->log_encoder, buffer, size);
+
+ flb_free(buffer);
+
+ return 0;
+}
+
+static int pack_guid(struct winevtlog_config *ctx, const GUID *guid)
+{
+ LPOLESTR p = NULL;
+
+ if (FAILED(StringFromCLSID(guid, &p))) {
+ return -1;
+ }
+ if (pack_wstr(ctx, p)) {
+ CoTaskMemFree(p);
+ return -1;
+ }
+
+ CoTaskMemFree(p);
+
+ return 0;
+}
+
+static int pack_hex32(struct winevtlog_config *ctx, int32_t hex)
+{
+ CHAR buffer[32];
+ size_t size = _countof(buffer);
+
+ _snprintf_s(buffer,
+ size,
+ _TRUNCATE,
+ "0x%lx",
+ hex);
+ size = strlen(buffer);
+ if (size > 0) {
+ flb_log_event_encoder_append_body_cstring(ctx->log_encoder, buffer);
+
+ return 0;
+ }
+
+ return -1;
+}
+
+static int pack_hex64(struct winevtlog_config *ctx, int64_t hex)
+{
+ CHAR buffer[32];
+ size_t size = _countof(buffer);
+
+ _snprintf_s(buffer,
+ size,
+ _TRUNCATE,
+ "0x%llx",
+ hex);
+
+ size = strlen(buffer);
+ if (size > 0) {
+ flb_log_event_encoder_append_body_cstring(ctx->log_encoder, buffer);
+
+ return 0;
+ }
+
+ return -1;
+}
+
+
+static int pack_keywords(struct winevtlog_config *ctx, uint64_t keywords)
+{
+ CHAR buffer[32];
+ size_t size = _countof(buffer);
+
+ _snprintf_s(buffer,
+ size,
+ _TRUNCATE,
+ "0x%llx",
+ keywords);
+
+ size = strlen(buffer);
+
+ flb_log_event_encoder_append_body_cstring(ctx->log_encoder, buffer);
+
+ return 0;
+}
+
+static int pack_systemtime(struct winevtlog_config *ctx, SYSTEMTIME *st)
+{
+ CHAR buf[64];
+ size_t len = 0;
+ _locale_t locale;
+ TIME_ZONE_INFORMATION tzi;
+ SYSTEMTIME st_local;
+
+ GetTimeZoneInformation(&tzi);
+
+ locale = _get_current_locale();
+ if (locale == NULL) {
+ return -1;
+ }
+ if (st != NULL) {
+ SystemTimeToTzSpecificLocalTime(&tzi, st, &st_local);
+
+ struct tm tm = {st_local.wSecond,
+ st_local.wMinute,
+ st_local.wHour,
+ st_local.wDay,
+ st_local.wMonth-1,
+ st_local.wYear-1900,
+ st_local.wDayOfWeek, 0, 0};
+ len = _strftime_l(buf, 64, FORMAT_ISO8601, &tm, locale);
+ if (len == 0) {
+ flb_errno();
+ _free_locale(locale);
+ return -1;
+ }
+ _free_locale(locale);
+
+ flb_log_event_encoder_append_body_string(ctx->log_encoder, buf, len);
+ }
+ else {
+ return -1;
+ }
+
+ return 0;
+}
+
+static int pack_filetime(struct winevtlog_config *ctx, ULONGLONG filetime)
+{
+ LARGE_INTEGER timestamp;
+ CHAR buf[64];
+ size_t len = 0;
+ FILETIME ft, ft_local;
+ SYSTEMTIME st;
+ _locale_t locale;
+
+ locale = _get_current_locale();
+ if (locale == NULL) {
+ return -1;
+ }
+ timestamp.QuadPart = filetime;
+ ft.dwHighDateTime = timestamp.HighPart;
+ ft.dwLowDateTime = timestamp.LowPart;
+ FileTimeToLocalFileTime(&ft, &ft_local);
+ if (FileTimeToSystemTime(&ft_local, &st)) {
+ struct tm tm = {st.wSecond, st.wMinute, st.wHour, st.wDay, st.wMonth-1, st.wYear-1900, st.wDayOfWeek, 0, 0};
+ len = _strftime_l(buf, 64, FORMAT_ISO8601, &tm, locale);
+ if (len == 0) {
+ flb_errno();
+ _free_locale(locale);
+ return -1;
+ }
+ _free_locale(locale);
+
+ flb_log_event_encoder_append_body_string(ctx->log_encoder, buf, len);
+ }
+ else {
+ return -1;
+ }
+
+ return 0;
+}
+
+static int pack_sid(struct winevtlog_config *ctx, PSID sid)
+{
+ size_t size;
+ LPWSTR wide_sid = NULL;
+ int ret = -1;
+
+ if (ConvertSidToStringSidW(sid, &wide_sid)) {
+ ret = pack_wstr(ctx, wide_sid);
+
+ LocalFree(wide_sid);
+ return ret;
+ }
+
+ return ret;
+}
+
+static void pack_string_inserts(struct winevtlog_config *ctx, PEVT_VARIANT values, DWORD count)
+{
+ int i;
+ int ret;
+
+ ret = flb_log_event_encoder_body_begin_array(ctx->log_encoder);
+
+ for (i = 0; i < count; i++) {
+ if (values[i].Type & EVT_VARIANT_TYPE_ARRAY) {
+ continue;
+ }
+
+ switch (values[i].Type & EVT_VARIANT_TYPE_MASK) {
+ case EvtVarTypeNull:
+ pack_nullstr(ctx);
+ break;
+ case EvtVarTypeString:
+ if (pack_wstr(ctx, values[i].StringVal)) {
+ pack_nullstr(ctx);
+ }
+ break;
+ case EvtVarTypeAnsiString:
+ if (pack_wstr(ctx, values[i].AnsiStringVal)) {
+ pack_nullstr(ctx);
+ }
+ break;
+ case EvtVarTypeSByte:
+ flb_log_event_encoder_append_body_int8(ctx->log_encoder, values[i].SByteVal);
+ break;
+ case EvtVarTypeByte:
+ flb_log_event_encoder_append_body_uint8(ctx->log_encoder, values[i].ByteVal);
+ break;
+ case EvtVarTypeInt16:
+ flb_log_event_encoder_append_body_int16(ctx->log_encoder, values[i].Int16Val);
+ break;
+ case EvtVarTypeUInt16:
+ flb_log_event_encoder_append_body_uint16(ctx->log_encoder, values[i].UInt16Val);
+ break;
+ case EvtVarTypeInt32:
+ flb_log_event_encoder_append_body_int32(ctx->log_encoder, values[i].Int32Val);
+ break;
+ case EvtVarTypeUInt32:
+ flb_log_event_encoder_append_body_uint32(ctx->log_encoder, values[i].UInt32Val);
+ break;
+ case EvtVarTypeInt64:
+ flb_log_event_encoder_append_body_int64(ctx->log_encoder, values[i].Int64Val);
+ break;
+ case EvtVarTypeUInt64:
+ flb_log_event_encoder_append_body_uint64(ctx->log_encoder, values[i].UInt64Val);
+ break;
+ case EvtVarTypeSingle:
+ flb_log_event_encoder_append_body_double(ctx->log_encoder, values[i].SingleVal);
+ break;
+ case EvtVarTypeDouble:
+ flb_log_event_encoder_append_body_double(ctx->log_encoder, values[i].DoubleVal);
+ break;
+ case EvtVarTypeBoolean:
+ flb_log_event_encoder_append_body_boolean(ctx->log_encoder, (int) values[i].BooleanVal);
+ break;
+ case EvtVarTypeGuid:
+ if (pack_guid(ctx, values[i].GuidVal)) {
+ pack_nullstr(ctx);
+ }
+ break;
+ case EvtVarTypeSizeT:
+ flb_log_event_encoder_append_body_uint64(ctx->log_encoder, values[i].SizeTVal);
+ break;
+ case EvtVarTypeFileTime:
+ if (pack_filetime(ctx, values[i].FileTimeVal)) {
+ pack_nullstr(ctx);
+ }
+ break;
+ case EvtVarTypeSysTime:
+ if (pack_systemtime(ctx, values[i].SysTimeVal)) {
+ pack_nullstr(ctx);
+ }
+ break;
+ case EvtVarTypeSid:
+ if (pack_sid(ctx, values[i].SidVal)) {
+ pack_nullstr(ctx);
+ }
+ break;
+ case EvtVarTypeHexInt32:
+ if (pack_hex32(ctx, values[i].Int32Val)) {
+ pack_nullstr(ctx);
+ }
+ break;
+ case EvtVarTypeHexInt64:
+ if (pack_hex64(ctx, values[i].Int64Val)) {
+ pack_nullstr(ctx);
+ }
+ break;
+ case EvtVarTypeEvtXml:
+ if (pack_wstr(ctx, values[i].XmlVal, ctx)) {
+ pack_nullstr(ctx);
+ }
+ break;
+ case EvtVarTypeBinary:
+ if (pack_binary(ctx, values[i].BinaryVal, values[i].Count)) {
+ pack_nullstr(ctx);
+ }
+ break;
+ default:
+ flb_log_event_encoder_append_body_cstring(ctx->log_encoder, "?");
+ }
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_body_commit_array(ctx->log_encoder);
+ }
+
+}
+
+void winevtlog_pack_xml_event(WCHAR *system_xml, WCHAR *message,
+ PEVT_VARIANT string_inserts, UINT count_inserts, struct winevtlog_channel *ch,
+ struct winevtlog_config *ctx)
+{
+ int ret;
+
+ ret = flb_log_event_encoder_begin_record(ctx->log_encoder);
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_current_timestamp(ctx->log_encoder);
+ }
+
+
+ ret = flb_log_event_encoder_append_body_cstring(ctx->log_encoder, "System");
+
+ if (pack_wstr(ctx, system_xml)) {
+ pack_nullstr(ctx);
+ }
+
+ ret = flb_log_event_encoder_append_body_cstring(ctx->log_encoder, "Message");
+
+ if (pack_wstr(ctx, message)) {
+ pack_nullstr(ctx);
+ }
+
+ if (ctx->string_inserts) {
+ ret = flb_log_event_encoder_append_body_cstring(ctx->log_encoder, "StringInserts");
+
+ pack_string_inserts(ctx, string_inserts, count_inserts);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_commit_record(ctx->log_encoder);
+ }
+}
+
+void winevtlog_pack_event(PEVT_VARIANT system, WCHAR *message,
+ PEVT_VARIANT string_inserts, UINT count_inserts, struct winevtlog_channel *ch,
+ struct winevtlog_config *ctx)
+{
+ int ret;
+ size_t len;
+ int count = 19;
+
+ if (ctx->string_inserts) {
+ count++;
+ }
+
+ ret = flb_log_event_encoder_begin_record(ctx->log_encoder);
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_set_current_timestamp(ctx->log_encoder);
+ }
+
+ /* ProviderName */
+ ret = flb_log_event_encoder_append_body_cstring(ctx->log_encoder, "ProviderName");
+
+ if (pack_wstr(ctx, system[EvtSystemProviderName].StringVal)) {
+ pack_nullstr(ctx);
+ }
+
+ /* ProviderGuid */
+ ret = flb_log_event_encoder_append_body_cstring(ctx->log_encoder, "ProviderGuid");
+
+ if (EvtVarTypeNull != system[EvtSystemProviderGuid].Type) {
+ if (pack_guid(ctx, system[EvtSystemProviderGuid].GuidVal)) {
+ pack_nullstr(ctx);
+ }
+ }
+ else {
+ pack_nullstr(ctx);
+ }
+
+ /* Qualifiers */
+ ret = flb_log_event_encoder_append_body_cstring(ctx->log_encoder, "Qualifiers");
+
+ if (EvtVarTypeNull != system[EvtSystemQualifiers].Type) {
+ flb_log_event_encoder_append_body_uint16(ctx->log_encoder, system[EvtSystemQualifiers].UInt16Val);
+ }
+ else {
+ pack_nullstr(ctx);
+ }
+
+ /* EventID */
+ ret = flb_log_event_encoder_append_body_cstring(ctx->log_encoder, "EventID");
+
+ if (EvtVarTypeNull != system[EvtSystemEventID].Type) {
+ flb_log_event_encoder_append_body_uint16(ctx->log_encoder, system[EvtSystemEventID].UInt16Val);
+ }
+ else {
+ pack_nullstr(ctx);
+ }
+
+ /* Version */
+ ret = flb_log_event_encoder_append_body_cstring(ctx->log_encoder, "Version");
+
+ if (EvtVarTypeNull != system[EvtSystemVersion].Type) {
+ flb_log_event_encoder_append_body_uint8(ctx->log_encoder, system[EvtSystemVersion].ByteVal);
+ }
+ else {
+ flb_log_event_encoder_append_body_uint8(ctx->log_encoder, 0);
+ }
+
+ /* Level */
+ ret = flb_log_event_encoder_append_body_cstring(ctx->log_encoder, "Level");
+
+ if (EvtVarTypeNull != system[EvtSystemLevel].Type) {
+ flb_log_event_encoder_append_body_uint8(ctx->log_encoder, system[EvtSystemLevel].ByteVal);
+ }
+ else {
+ flb_log_event_encoder_append_body_uint8(ctx->log_encoder, 0);
+ }
+
+ /* Task */
+ ret = flb_log_event_encoder_append_body_cstring(ctx->log_encoder, "Task");
+
+ if (EvtVarTypeNull != system[EvtSystemTask].Type) {
+ flb_log_event_encoder_append_body_uint16(ctx->log_encoder, system[EvtSystemTask].UInt16Val);
+ }
+ else {
+ flb_log_event_encoder_append_body_uint16(ctx->log_encoder, 0);
+ }
+
+ /* Opcode */
+ ret = flb_log_event_encoder_append_body_cstring(ctx->log_encoder, "Opcode");
+
+ if (EvtVarTypeNull != system[EvtSystemOpcode].Type) {
+ flb_log_event_encoder_append_body_uint8(ctx->log_encoder, system[EvtSystemOpcode].ByteVal);
+ }
+ else {
+ flb_log_event_encoder_append_body_uint8(ctx->log_encoder, 0);
+ }
+
+ /* Keywords */
+ ret = flb_log_event_encoder_append_body_cstring(ctx->log_encoder, "Keywords");
+
+ if (EvtVarTypeNull != system[EvtSystemKeywords].Type) {
+ pack_keywords(ctx, system[EvtSystemKeywords].UInt64Val);
+ }
+ else {
+ flb_log_event_encoder_append_body_uint64(ctx->log_encoder, 0);
+ }
+
+ /* TimeCreated */
+ ret = flb_log_event_encoder_append_body_cstring(ctx->log_encoder, "TimeCreated");
+
+ if (pack_filetime(ctx, system[EvtSystemTimeCreated].FileTimeVal)) {
+ pack_nullstr(ctx);
+ }
+
+ /* EventRecordID */
+ ret = flb_log_event_encoder_append_body_cstring(ctx->log_encoder, "EventRecordID");
+
+ if (EvtVarTypeNull != system[EvtSystemEventRecordId].Type) {
+ flb_log_event_encoder_append_body_uint64(ctx->log_encoder, system[EvtSystemEventRecordId].UInt64Val);
+ }
+ else {
+ flb_log_event_encoder_append_body_uint64(ctx->log_encoder, 0);
+ }
+
+ /* ActivityID */
+ ret = flb_log_event_encoder_append_body_cstring(ctx->log_encoder, "ActivityID");
+
+ if (pack_guid(ctx, system[EvtSystemActivityID].GuidVal)) {
+ pack_nullstr(ctx);
+ }
+
+ /* Related ActivityID */
+ ret = flb_log_event_encoder_append_body_cstring(ctx->log_encoder, "RelatedActivityID");
+
+ if (pack_guid(ctx, system[EvtSystemRelatedActivityID].GuidVal)) {
+ pack_nullstr(ctx);
+ }
+
+ /* ProcessID */
+ ret = flb_log_event_encoder_append_body_cstring(ctx->log_encoder, "ProcessID");
+
+ if (EvtVarTypeNull != system[EvtSystemProcessID].Type) {
+ flb_log_event_encoder_append_body_uint32(ctx->log_encoder, system[EvtSystemProcessID].UInt32Val);
+ }
+ else {
+ flb_log_event_encoder_append_body_uint32(ctx->log_encoder, 0);
+ }
+
+ /* ThreadID */
+ ret = flb_log_event_encoder_append_body_cstring(ctx->log_encoder, "ThreadID");
+
+ if (EvtVarTypeNull != system[EvtSystemThreadID].Type) {
+ flb_log_event_encoder_append_body_uint32(ctx->log_encoder, system[EvtSystemThreadID].UInt32Val);
+ }
+ else {
+ flb_log_event_encoder_append_body_uint32(ctx->log_encoder, 0);
+ }
+
+ /* Channel */
+ ret = flb_log_event_encoder_append_body_cstring(ctx->log_encoder, "Channel");
+
+ if (pack_wstr(ctx, system[EvtSystemChannel].StringVal)) {
+ pack_nullstr(ctx);
+ }
+
+ /* Computer */
+ ret = flb_log_event_encoder_append_body_cstring(ctx->log_encoder, "Computer");
+
+ if (pack_wstr(ctx, system[EvtSystemComputer].StringVal)) {
+ pack_nullstr(ctx);
+ }
+
+ /* UserID */
+ ret = flb_log_event_encoder_append_body_cstring(ctx->log_encoder, "UserID");
+
+ if (pack_sid(ctx, system[EvtSystemUserID].SidVal)) {
+ pack_nullstr(ctx);
+ }
+
+ /* Message */
+ ret = flb_log_event_encoder_append_body_cstring(ctx->log_encoder, "Message");
+
+ if (pack_wstr(ctx, message)) {
+ pack_nullstr(ctx);
+ }
+
+ /* String Inserts */
+ if (ctx->string_inserts) {
+ ret = flb_log_event_encoder_append_body_cstring(ctx->log_encoder, "StringInserts");
+
+ pack_string_inserts(ctx, string_inserts, count_inserts);
+ }
+
+ if (ret == FLB_EVENT_ENCODER_SUCCESS) {
+ ret = flb_log_event_encoder_commit_record(ctx->log_encoder);
+ }
+}
diff --git a/src/fluent-bit/plugins/in_winevtlog/winevtlog.c b/src/fluent-bit/plugins/in_winevtlog/winevtlog.c
new file mode 100644
index 000000000..09d3f9624
--- /dev/null
+++ b/src/fluent-bit/plugins/in_winevtlog/winevtlog.c
@@ -0,0 +1,840 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2019-2021 The Fluent Bit Authors
+ * Copyright (C) 2015-2018 Treasure Data Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_compat.h>
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_sqldb.h>
+#include <fluent-bit/flb_input.h>
+#include "winevtlog.h"
+
+#define EVENT_PROVIDER_NAME_LENGTH 256
+
+static char* convert_wstr(wchar_t *wstr, UINT codePage);
+static wchar_t* convert_str(char *str);
+
+struct winevtlog_channel *winevtlog_subscribe(const char *channel, int read_existing_events,
+ EVT_HANDLE stored_bookmark, const char *query)
+{
+ struct winevtlog_channel *ch;
+ EVT_HANDLE bookmark = NULL;
+ HANDLE signal_event = NULL;
+ DWORD len;
+ DWORD flags = 0L;
+ PWSTR wide_channel = NULL;
+ PWSTR wide_query = NULL;
+ void *buf;
+
+ ch = flb_calloc(1, sizeof(struct winevtlog_channel));
+ if (ch == NULL) {
+ flb_errno();
+ return NULL;
+ }
+
+ ch->name = flb_strdup(channel);
+ if (!ch->name) {
+ flb_errno();
+ flb_free(ch);
+ return NULL;
+ }
+ ch->query = NULL;
+
+ signal_event = CreateEvent(NULL, FALSE, FALSE, NULL);
+
+ // channel : To wide char
+ len = MultiByteToWideChar(CP_UTF8, 0, channel, -1, NULL, 0);
+ wide_channel = flb_malloc(sizeof(PWSTR) * len);
+ MultiByteToWideChar(CP_UTF8, 0, channel, -1, wide_channel, len);
+ if (query != NULL) {
+ // query : To wide char
+ len = MultiByteToWideChar(CP_UTF8, 0, query, -1, NULL, 0);
+ wide_query = flb_malloc(sizeof(PWSTR) * len);
+ MultiByteToWideChar(CP_UTF8, 0, query, -1, wide_query, len);
+ ch->query = flb_strdup(query);
+ }
+
+ if (stored_bookmark) {
+ flags |= EvtSubscribeStartAfterBookmark;
+ } else if (read_existing_events) {
+ flags |= EvtSubscribeStartAtOldestRecord;
+ } else {
+ flags |= EvtSubscribeToFutureEvents;
+ }
+
+ /* The wide_query parameter can handle NULL as `*` for retrieving all events.
+ * ref. https://learn.microsoft.com/en-us/windows/win32/api/winevt/nf-winevt-evtsubscribe
+ */
+ ch->subscription = EvtSubscribe(NULL, signal_event, wide_channel, wide_query,
+ stored_bookmark, NULL, NULL, flags);
+ if (!ch->subscription) {
+ flb_error("[in_winevtlog] cannot subscribe '%s' (%i)", channel, GetLastError());
+ flb_free(ch->name);
+ if (ch->query != NULL) {
+ flb_free(ch->query);
+ }
+ flb_free(ch);
+ return NULL;
+ }
+ ch->signal_event = signal_event;
+
+ if (stored_bookmark) {
+ ch->bookmark = stored_bookmark;
+ }
+ else {
+ bookmark = EvtCreateBookmark(NULL);
+ if (bookmark) {
+ ch->bookmark = bookmark;
+ }
+ else {
+ if (ch->subscription) {
+ EvtClose(ch->subscription);
+ }
+ if (signal_event) {
+ CloseHandle(signal_event);
+ }
+ flb_error("[in_winevtlog] cannot subscribe '%s' (%i)", channel, GetLastError());
+ flb_free(wide_channel);
+ flb_free(ch->name);
+ if (ch->query != NULL) {
+ flb_free(ch->query);
+ }
+ flb_free(ch);
+ return NULL;
+ }
+ }
+
+ flb_free(wide_channel);
+ if (wide_query != NULL) {
+ flb_free(wide_query);
+ }
+
+ return ch;
+}
+
+BOOL cancel_subscription(struct winevtlog_channel *ch)
+{
+ return EvtCancel(ch->subscription);
+}
+
+static void close_handles(struct winevtlog_channel *ch)
+{
+ int i;
+
+ if (ch->subscription) {
+ EvtClose(ch->subscription);
+ ch->subscription = NULL;
+ }
+ if (ch->signal_event) {
+ CloseHandle(ch->signal_event);
+ ch->signal_event = NULL;
+ }
+ if (ch->bookmark) {
+ EvtClose(ch->bookmark);
+ ch->bookmark = NULL;
+ }
+ for (i = 0; i < ch->count; i++) {
+ if (ch->events[i]) {
+ EvtClose(ch->events[i]);
+ ch->events[i] = NULL;
+ }
+ }
+ ch->count = 0;
+}
+
+
+void winevtlog_close(struct winevtlog_channel *ch)
+{
+ flb_free(ch->name);
+ if (ch->query != NULL) {
+ flb_free(ch->query);
+ }
+ close_handles(ch);
+
+ flb_free(ch);
+}
+
+// Render the event as an XML string and print it.
+PWSTR render_event(EVT_HANDLE hEvent, DWORD flags, unsigned int *event_size)
+{
+ DWORD status = ERROR_SUCCESS;
+ DWORD buffer_size = 0;
+ DWORD buffer_used = 0;
+ DWORD count = 0;
+ LPWSTR event_xml = NULL;
+
+ if (flags != EvtRenderEventXml && flags != EvtRenderBookmark) {
+ flb_error("Invalid flags is specified: %d", flags);
+ return NULL;
+ }
+
+ if (!EvtRender(NULL, hEvent, flags, buffer_size, event_xml, &buffer_used, &count)) {
+ status = GetLastError();
+ if (status == ERROR_INSUFFICIENT_BUFFER) {
+ buffer_size = buffer_used;
+ /* return buffer size */
+ *event_size = buffer_size;
+ event_xml = (LPWSTR)flb_malloc(buffer_size);
+ if (event_xml) {
+ EvtRender(NULL, hEvent, flags, buffer_size, event_xml, &buffer_used, &count);
+ }
+ else {
+ flb_error("malloc failed");
+ goto cleanup;
+ }
+ }
+
+ status = GetLastError();
+ if (status != ERROR_SUCCESS) {
+ flb_error("EvtRender failed with %d", GetLastError());
+ goto cleanup;
+ }
+ }
+
+ return event_xml;
+
+cleanup:
+
+ if (event_xml) {
+ flb_free(event_xml);
+ }
+
+ return NULL;
+}
+
+DWORD render_system_event(EVT_HANDLE event, PEVT_VARIANT *system, unsigned int *system_size)
+{
+ DWORD status = ERROR_SUCCESS;
+ EVT_HANDLE context = NULL;
+ DWORD buffer_size = 0;
+ DWORD buffer_used = 0;
+ DWORD count = 0;
+ PEVT_VARIANT rendered_system = NULL;
+
+ context = EvtCreateRenderContext(0, NULL, EvtRenderContextSystem);
+ if (NULL == context) {
+ status = GetLastError();
+ flb_error("failed to create RenderContext with %d", status);
+
+ goto cleanup;
+ }
+ if (!EvtRender(context,
+ event,
+ EvtRenderEventValues,
+ buffer_size,
+ rendered_system,
+ &buffer_used,
+ &count)) {
+ status = GetLastError();
+
+ if (status == ERROR_INSUFFICIENT_BUFFER) {
+ buffer_size = buffer_used;
+ rendered_system = (PEVT_VARIANT)flb_malloc(buffer_size);
+ if (rendered_system) {
+ EvtRender(context,
+ event,
+ EvtRenderEventValues,
+ buffer_size,
+ rendered_system,
+ &buffer_used,
+ &count);
+ status = GetLastError();
+ *system_size = buffer_used;
+ } else {
+ if (rendered_system)
+ flb_free(rendered_system);
+
+ flb_error("failed to malloc memory with %d", status);
+
+ goto cleanup;
+ }
+ }
+
+ if (ERROR_SUCCESS != status) {
+ EvtClose(context);
+ flb_free(rendered_system);
+
+ return status;
+ }
+ }
+
+ *system = rendered_system;
+
+cleanup:
+
+ if (context) {
+ EvtClose(context);
+ }
+
+ return status;
+}
+
+
+PWSTR get_message(EVT_HANDLE metadata, EVT_HANDLE handle, unsigned int *message_size)
+{
+ WCHAR* buffer = NULL;
+ DWORD status = ERROR_SUCCESS;
+ DWORD buffer_size = 0;
+ DWORD buffer_used = 0;
+ LPVOID format_message_buffer;
+ WCHAR* message = NULL;
+ char *error_message = NULL;
+
+ // Get the size of the buffer
+ if (!EvtFormatMessage(metadata, handle, 0, 0, NULL,
+ EvtFormatMessageEvent, buffer_size, buffer, &buffer_used)) {
+ status = GetLastError();
+ if (ERROR_INSUFFICIENT_BUFFER == status) {
+ buffer_size = buffer_used;
+ buffer = flb_malloc(sizeof(WCHAR) * buffer_size);
+ if (!buffer) {
+ flb_error("failed to malloc message buffer");
+
+ goto cleanup;
+ }
+ if (!EvtFormatMessage(metadata,
+ handle,
+ 0xffffffff,
+ 0,
+ NULL,
+ EvtFormatMessageEvent,
+ buffer_size,
+ buffer,
+ &buffer_used)) {
+ status = GetLastError();
+ *message_size = buffer_used;
+
+ if (status != ERROR_EVT_UNRESOLVED_VALUE_INSERT) {
+ switch (status) {
+ case ERROR_EVT_MESSAGE_NOT_FOUND:
+ case ERROR_EVT_MESSAGE_ID_NOT_FOUND:
+ case ERROR_EVT_MESSAGE_LOCALE_NOT_FOUND:
+ case ERROR_RESOURCE_DATA_NOT_FOUND:
+ case ERROR_RESOURCE_TYPE_NOT_FOUND:
+ case ERROR_RESOURCE_NAME_NOT_FOUND:
+ case ERROR_RESOURCE_LANG_NOT_FOUND:
+ case ERROR_MUI_FILE_NOT_FOUND:
+ case ERROR_EVT_UNRESOLVED_PARAMETER_INSERT:
+ {
+ if (FormatMessageW(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM |
+ FORMAT_MESSAGE_IGNORE_INSERTS,
+ NULL,
+ status,
+ MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
+ (WCHAR*)(&format_message_buffer),
+ 0,
+ NULL) == 0)
+ FormatMessageW(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM |
+ FORMAT_MESSAGE_IGNORE_INSERTS,
+ NULL,
+ status,
+ MAKELANGID(LANG_ENGLISH, SUBLANG_ENGLISH_US),
+ (WCHAR*)(&format_message_buffer),
+ 0,
+ NULL);
+ error_message = convert_wstr((WCHAR*)format_message_buffer, CP_ACP);
+ flb_error("Failed to get message with %d, err = %s", status, error_message);
+ flb_free(error_message);
+
+ message = _wcsdup((WCHAR*)format_message_buffer);
+ LocalFree(format_message_buffer);
+
+ goto cleanup;
+ }
+ }
+
+ if (status != ERROR_INSUFFICIENT_BUFFER) {
+ flb_error("failed with %d", status);
+ goto cleanup;
+ }
+ }
+ }
+ }
+ }
+
+ message = _wcsdup(buffer);
+
+cleanup:
+ if (buffer) {
+ flb_free(buffer);
+ }
+
+ return message;
+}
+
+PWSTR get_description(EVT_HANDLE handle, LANGID langID, unsigned int *message_size)
+{
+ WCHAR *buffer[EVENT_PROVIDER_NAME_LENGTH];
+ PEVT_VARIANT values = NULL;
+ DWORD buffer_used = 0;
+ DWORD status = ERROR_SUCCESS;
+ DWORD count = 0;
+ WCHAR *message = NULL;
+ EVT_HANDLE metadata = NULL;
+
+ PCWSTR properties[] = { L"Event/System/Provider/@Name" };
+ EVT_HANDLE context =
+ EvtCreateRenderContext(1, properties, EvtRenderContextValues);
+ if (context == NULL) {
+ flb_error("Failed to create renderContext");
+ goto cleanup;
+ }
+
+ if (EvtRender(context,
+ handle,
+ EvtRenderEventValues,
+ EVENT_PROVIDER_NAME_LENGTH,
+ buffer,
+ &buffer_used,
+ &count) != FALSE){
+ status = ERROR_SUCCESS;
+ }
+ else {
+ status = GetLastError();
+ }
+
+ if (status != ERROR_SUCCESS) {
+ flb_error("failed to query RenderContextValues");
+ goto cleanup;
+ }
+ values = (PEVT_VARIANT)buffer;
+
+ metadata = EvtOpenPublisherMetadata(
+ NULL, // TODO: Remote handle
+ values[0].StringVal,
+ NULL,
+ MAKELCID(langID, SORT_DEFAULT),
+ 0);
+ if (metadata == NULL) {
+ goto cleanup;
+ }
+
+ message = get_message(metadata, handle, message_size);
+
+cleanup:
+ if (context) {
+ EvtClose(context);
+ }
+
+ if (metadata) {
+ EvtClose(metadata);
+ }
+
+ return message;
+}
+
+int get_string_inserts(EVT_HANDLE handle, PEVT_VARIANT *string_inserts_values,
+ UINT *prop_count, unsigned int *string_inserts_size)
+{
+ PEVT_VARIANT values;
+ DWORD buffer_size = 0;
+ DWORD buffer_size_used = 0;
+ DWORD count = 0;
+ BOOL succeeded = FLB_TRUE;
+
+ EVT_HANDLE context = EvtCreateRenderContext(0, NULL, EvtRenderContextUser);
+ if (context == NULL) {
+ flb_error("Failed to create renderContext");
+ succeeded = FLB_FALSE;
+ goto cleanup;
+ }
+
+ // Get the size of the buffer
+ EvtRender(context, handle, EvtRenderEventValues, 0, NULL, &buffer_size, &count);
+ values = (PEVT_VARIANT)flb_malloc(buffer_size);
+
+ succeeded = EvtRender(context,
+ handle,
+ EvtRenderContextValues,
+ buffer_size,
+ values,
+ &buffer_size_used,
+ &count);
+
+ if (!succeeded) {
+ flb_error("Failed to get string inserts with %d\n", GetLastError());
+ goto cleanup;
+ }
+
+ *prop_count = count;
+ *string_inserts_values = values;
+ *string_inserts_size = buffer_size;
+
+cleanup:
+
+ if (context != NULL) {
+ EvtClose(context);
+ }
+
+ return succeeded;
+}
+
+static int winevtlog_next(struct winevtlog_channel *ch, int hit_threshold)
+{
+ EVT_HANDLE events[SUBSCRIBE_ARRAY_SIZE];
+ DWORD count = 0;
+ DWORD status = ERROR_SUCCESS;
+ BOOL has_next = FALSE;
+ int i;
+
+ /* If subscription handle is NULL, it should return false. */
+ if (!ch->subscription) {
+ flb_error("Invalid subscription is passed");
+ return FLB_FALSE;
+ }
+
+ if (hit_threshold) {
+ return FLB_FALSE;
+ }
+
+ has_next = EvtNext(ch->subscription, SUBSCRIBE_ARRAY_SIZE,
+ events, INFINITE, 0, &count);
+
+ if (!has_next) {
+ status = GetLastError();
+ if (ERROR_CANCELLED == status) {
+ return FLB_FALSE;
+ }
+ if (ERROR_NO_MORE_ITEMS != status) {
+ return FLB_FALSE;
+ }
+ }
+
+ if (status == ERROR_SUCCESS) {
+ ch->count = count;
+ for (i = 0; i < count; i++) {
+ ch->events[i] = events[i];
+ EvtUpdateBookmark(ch->bookmark, ch->events[i]);
+ }
+
+ return FLB_TRUE;
+ }
+
+ return FLB_FALSE;
+}
+
+/*
+ * Read from an open Windows Event Log channel.
+ */
+int winevtlog_read(struct winevtlog_channel *ch, struct winevtlog_config *ctx,
+ unsigned int *read)
+{
+ DWORD status = ERROR_SUCCESS;
+ PWSTR system_xml = NULL;
+ unsigned int system_size = 0;
+ unsigned int message_size = 0;
+ unsigned int string_inserts_size = 0;
+ int hit_threshold = FLB_FALSE;
+ unsigned int read_size = 0;
+ PWSTR message = NULL;
+ PEVT_VARIANT rendered_system = NULL;
+ PEVT_VARIANT string_inserts = NULL;
+ UINT count_inserts = 0;
+ DWORD i = 0;
+
+ while (winevtlog_next(ch, hit_threshold)) {
+ for (i = 0; i < ch->count; i++) {
+ if (ctx->render_event_as_xml) {
+ system_xml = render_event(ch->events[i], EvtRenderEventXml, &system_size);
+ message = get_description(ch->events[i], LANG_NEUTRAL, &message_size);
+ get_string_inserts(ch->events[i], &string_inserts, &count_inserts, &string_inserts_size);
+ if (system_xml) {
+ /* Caluculate total allocated size: system + message + string_inserts */
+ read_size += (system_size + message_size + string_inserts_size);
+ winevtlog_pack_xml_event(system_xml, message, string_inserts,
+ count_inserts, ch, ctx);
+
+ flb_free(string_inserts);
+ flb_free(system_xml);
+ if (message)
+ flb_free(message);
+ }
+ }
+ else {
+ render_system_event(ch->events[i], &rendered_system, &system_size);
+ message = get_description(ch->events[i], LANG_NEUTRAL, &message_size);
+ get_string_inserts(ch->events[i], &string_inserts, &count_inserts, &string_inserts_size);
+ if (rendered_system) {
+ /* Caluculate total allocated size: system + message + string_inserts */
+ read_size += (system_size + message_size + string_inserts_size);
+ winevtlog_pack_event(rendered_system, message, string_inserts,
+ count_inserts, ch, ctx);
+
+ flb_free(string_inserts);
+ flb_free(rendered_system);
+ if (message)
+ flb_free(message);
+ }
+ }
+ }
+
+ /* Closes any events in case an error occurred above. */
+ for (i = 0; i < ch->count; i++) {
+ if (NULL != ch->events[i]) {
+ EvtClose(ch->events[i]);
+ ch->events[i] = NULL;
+ }
+ }
+
+ if (read_size > ctx->total_size_threshold) {
+ hit_threshold = FLB_TRUE;
+ /* hit reading threshold on read, then break. */
+ break;
+ }
+ }
+
+ *read = read_size;
+
+ return 0;
+}
+
+/*
+ * Open multiple channels at once. The return value is a linked
+ * list of winevtlog_channel objects.
+ *
+ * "channels" are comma-separated names like "Setup,Security".
+ */
+struct mk_list *winevtlog_open_all(const char *channels, struct winevtlog_config *ctx)
+{
+ char *tmp;
+ char *channel;
+ char *state;
+ struct winevtlog_channel *ch;
+ struct mk_list *list;
+
+ tmp = flb_strdup(channels);
+ if (!tmp) {
+ flb_errno();
+ return NULL;
+ }
+
+ list = flb_malloc(sizeof(struct mk_list));
+ if (!list) {
+ flb_errno();
+ flb_free(tmp);
+ return NULL;
+ }
+ mk_list_init(list);
+
+ channel = strtok_s(tmp , ",", &state);
+ while (channel) {
+ ch = winevtlog_subscribe(channel, ctx->read_existing_events, NULL, ctx->event_query);
+ if (ch) {
+ mk_list_add(&ch->_head, list);
+ }
+ else {
+ if (ctx->ignore_missing_channels) {
+ flb_debug("[in_winevtlog] channel '%s' does not exist", channel);
+ }
+ else {
+ flb_free(tmp);
+ winevtlog_close_all(list);
+ return NULL;
+ }
+ }
+ channel = strtok_s(NULL, ",", &state);
+ }
+
+ if (mk_list_size(list) == 0) {
+ flb_free(tmp);
+ winevtlog_close_all(list);
+ return NULL;
+ }
+
+ flb_free(tmp);
+ return list;
+}
+
+void winevtlog_close_all(struct mk_list *list)
+{
+ struct winevtlog_channel *ch;
+ struct mk_list *head;
+ struct mk_list *tmp;
+
+ mk_list_foreach_safe(head, tmp, list) {
+ ch = mk_list_entry(head, struct winevtlog_channel, _head);
+ mk_list_del(&ch->_head);
+ winevtlog_close(ch);
+ }
+ flb_free(list);
+}
+
+/*
+ * Callback function for flb_sqldb_query().
+ */
+static int winevtlog_sqlite_callback(void *data, int argc, char **argv, char **cols)
+{
+ struct winevtlog_sqlite_record *p = data;
+
+ p->name = argv[0];
+ p->bookmark_xml = strdup(argv[1]);
+ p->time_updated = (unsigned int) strtoul(argv[2], NULL, 10);
+ p->created = (unsigned int) strtoul(argv[3], NULL, 10);
+ return 0;
+}
+
+static wchar_t* convert_str(char *str)
+{
+ int size = 0;
+ wchar_t *buf = NULL;
+
+ size = MultiByteToWideChar(CP_UTF8, 0, str, -1, NULL, 0);
+ if (size == 0) {
+ return NULL;
+ }
+
+ buf = flb_malloc(sizeof(PWSTR) * size);
+ if (buf == NULL) {
+ flb_errno();
+ return NULL;
+ }
+ size = MultiByteToWideChar(CP_UTF8, 0, str, -1, buf, size);
+ if (size == 0) {
+ flb_free(buf);
+ return NULL;
+ }
+
+ return buf;
+}
+
+static char* convert_wstr(wchar_t *wstr, UINT codePage)
+{
+ int size = 0;
+ char *buf = NULL;
+
+ size = WideCharToMultiByte(codePage, 0, wstr, -1, NULL, 0, NULL, NULL);
+ if (size == 0) {
+ return NULL;
+ }
+
+ buf = flb_malloc(size);
+ if (buf == NULL) {
+ flb_errno();
+ return NULL;
+ }
+ size = WideCharToMultiByte(codePage, 0, wstr, -1, buf, size, NULL, NULL);
+ if (size == 0) {
+ flb_free(buf);
+ return NULL;
+ }
+
+ return buf;
+}
+
+/*
+ * Load the bookmark from SQLite DB.
+ */
+int winevtlog_sqlite_load(struct winevtlog_channel *ch, struct flb_sqldb *db)
+{
+ int ret;
+ char query[1024];
+ struct winevtlog_sqlite_record record = {0};
+ EVT_HANDLE bookmark = NULL;
+ PWSTR bookmark_xml = NULL;
+ struct winevtlog_channel *re_ch = NULL;
+
+ snprintf(query, sizeof(query) - 1, SQL_GET_CHANNEL, ch->name);
+
+ ret = flb_sqldb_query(db, query, winevtlog_sqlite_callback, &record);
+ if (ret == FLB_ERROR) {
+ return -1;
+ }
+
+ if (record.created) {
+ ch->time_created = record.created;
+ }
+ if (record.time_updated) {
+ ch->time_updated = record.time_updated;
+ }
+
+ if (record.name) {
+ bookmark_xml = convert_str(record.bookmark_xml);
+ if (bookmark_xml) {
+ bookmark = EvtCreateBookmark(bookmark_xml);
+ if (bookmark) {
+ /* re-create subscription handles */
+ re_ch = winevtlog_subscribe(ch->name, FLB_FALSE, bookmark, ch->query);
+ if (re_ch != NULL) {
+ close_handles(ch);
+
+ ch->bookmark = re_ch->bookmark;
+ ch->subscription = re_ch->subscription;
+ ch->signal_event = re_ch->signal_event;
+ }
+ else {
+ flb_error("Failed to subscribe with bookmark XML: %s\n", record.bookmark_xml);
+ ch->bookmark = EvtCreateBookmark(NULL);
+ }
+ }
+ else {
+ flb_error("Failed to load bookmark XML with %d\n", GetLastError());
+ ch->bookmark = EvtCreateBookmark(NULL);
+ }
+ }
+ if (bookmark_xml) {
+ flb_free(bookmark_xml);
+ }
+ }
+ return 0;
+}
+
+/*
+ * Save the bookmark into SQLite DB.
+ */
+int winevtlog_sqlite_save(struct winevtlog_channel *ch, struct flb_sqldb *db)
+{
+ int ret;
+ char query[1024];
+ PWSTR wide_bookmark_xml = NULL;
+ char *bookmark_xml;
+ int used_size = 0;
+
+ wide_bookmark_xml = render_event(ch->bookmark, EvtRenderBookmark, &used_size);
+ if (wide_bookmark_xml == NULL) {
+ flb_error("failed to render bookmark with %d", GetLastError());
+ flb_free(wide_bookmark_xml);
+
+ return -1;
+ }
+ bookmark_xml = convert_wstr(wide_bookmark_xml, CP_UTF8);
+ if (bookmark_xml == NULL) {
+ flb_error("failed to convert Wider string with %d", GetLastError());
+ flb_free(wide_bookmark_xml);
+ flb_free(bookmark_xml);
+
+ return -1;
+ }
+
+ snprintf(query, sizeof(query) - 1, SQL_UPDATE_CHANNEL,
+ ch->name, bookmark_xml, ch->time_updated, time(NULL));
+
+ ret = flb_sqldb_query(db, query, NULL, NULL);
+ if (ret == FLB_ERROR) {
+ flb_error("failed to save db with %d", GetLastError());
+ flb_free(wide_bookmark_xml);
+ flb_free(bookmark_xml);
+
+ return -1;
+ }
+
+ flb_free(wide_bookmark_xml);
+ flb_free(bookmark_xml);
+
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/in_winevtlog/winevtlog.h b/src/fluent-bit/plugins/in_winevtlog/winevtlog.h
new file mode 100644
index 000000000..10ef3e457
--- /dev/null
+++ b/src/fluent-bit/plugins/in_winevtlog/winevtlog.h
@@ -0,0 +1,134 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2019-2021 The Fluent Bit Authors
+ * Copyright (C) 2015-2018 Treasure Data Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_WINEVTLOG_H
+#define FLB_WINEVTLOG_H
+
+#include <winevt.h>
+#include <fluent-bit/flb_log_event_encoder.h>
+
+struct winevtlog_config {
+ unsigned int interval_sec;
+ unsigned int interval_nsec;
+ unsigned int total_size_threshold;
+ int string_inserts;
+ int read_existing_events;
+ int render_event_as_xml;
+ int use_ansi;
+ int ignore_missing_channels;
+ flb_sds_t event_query;
+
+ struct mk_list *active_channel;
+ struct flb_sqldb *db;
+ flb_pipefd_t coll_fd;
+ struct flb_input_instance *ins;
+ struct flb_log_event_encoder *log_encoder;
+};
+
+/* Some channels has very heavy contents for 10 events at same time.
+ * For now, we specify simultaneous subscribe size to 5.
+ */
+#define SUBSCRIBE_ARRAY_SIZE 5
+
+struct winevtlog_channel {
+ EVT_HANDLE subscription;
+ EVT_HANDLE bookmark;
+ HANDLE signal_event;
+ EVT_HANDLE events[SUBSCRIBE_ARRAY_SIZE];
+ int count;
+
+ char *name;
+ char *query;
+ unsigned int time_updated;
+ unsigned int time_created;
+ struct mk_list _head;
+};
+
+struct winevtlog_sqlite_record {
+ char *name;
+ char *bookmark_xml;
+ unsigned int time_updated;
+ unsigned int created;
+};
+
+/*
+ * Open a Windows Event Log channel.
+ */
+struct winevtlog_channel *winevtlog_open(const char *channel);
+void winevtlog_close(struct winevtlog_channel *ch);
+
+/*
+ * Read records from a channel.
+ */
+int winevtlog_read(struct winevtlog_channel *ch,
+ struct winevtlog_config *ctx, unsigned int *read);
+
+/*
+ * A bulk API to handle multiple channels at once using mk_list.
+ *
+ * "channels" are comma-separated names like "Setup,Security".
+ */
+struct mk_list *winevtlog_open_all(const char *channels, struct winevtlog_config *ctx);
+void winevtlog_close_all(struct mk_list *list);
+
+void winevtlog_pack_xml_event(WCHAR *system_xml, WCHAR *message,
+ PEVT_VARIANT string_inserts, UINT count_inserts, struct winevtlog_channel *ch,
+ struct winevtlog_config *ctx);
+void winevtlog_pack_event(PEVT_VARIANT system, WCHAR *message,
+ PEVT_VARIANT string_inserts, UINT count_inserts, struct winevtlog_channel *ch,
+ struct winevtlog_config *ctx);
+
+/*
+ * Save the read offset to disk.
+ */
+int winevtlog_sqlite_load(struct winevtlog_channel *ch, struct flb_sqldb *db);
+int winevtlog_sqlite_save(struct winevtlog_channel *ch, struct flb_sqldb *db);
+
+/*
+ * SQL templates
+ */
+#define SQL_CREATE_CHANNELS \
+ "CREATE TABLE IF NOT EXISTS in_winevtlog_channels (" \
+ " name TEXT PRIMARY KEY," \
+ " bookmark_xml TEXT," \
+ " time_updated INTEGER," \
+ " created INTEGER" \
+ ");"
+
+#define SQL_GET_CHANNEL \
+ "SELECT name, bookmark_xml, time_updated, created" \
+ " FROM in_winevtlog_channels WHERE name = '%s';"
+
+/*
+ * This uses UPCERT i.e. execute INSERT first and fall back to
+ * UPDATE if the entry already exists. It saves the trouble of
+ * doing an existence check manually.
+ *
+ * https://www.sqlite.org/lang_UPSERT.html
+ */
+#define SQL_UPDATE_CHANNEL \
+ "INSERT INTO in_winevtlog_channels" \
+ " (name, bookmark_xml, time_updated, created)" \
+ " VALUES ('%s', \"%s\", %u, %llu)" \
+ " ON CONFLICT(name) DO UPDATE" \
+ " SET bookmark_xml = excluded.bookmark_xml," \
+ " time_updated = excluded.time_updated" \
+
+#endif
diff --git a/src/fluent-bit/plugins/in_winlog/CMakeLists.txt b/src/fluent-bit/plugins/in_winlog/CMakeLists.txt
new file mode 100644
index 000000000..7b8b31563
--- /dev/null
+++ b/src/fluent-bit/plugins/in_winlog/CMakeLists.txt
@@ -0,0 +1,6 @@
+set(src
+ in_winlog.c
+ pack.c
+ winlog.c)
+
+FLB_PLUGIN(in_winlog "${src}" "advapi32")
diff --git a/src/fluent-bit/plugins/in_winlog/in_winlog.c b/src/fluent-bit/plugins/in_winlog/in_winlog.c
new file mode 100644
index 000000000..1d5398635
--- /dev/null
+++ b/src/fluent-bit/plugins/in_winlog/in_winlog.c
@@ -0,0 +1,267 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_compat.h>
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_kernel.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_sqldb.h>
+#include "winlog.h"
+
+#define DEFAULT_INTERVAL_SEC 1
+#define DEFAULT_INTERVAL_NSEC 0
+#define DEFAULT_BUFFER_SIZE 0x7ffff /* Max size allowed by Win32 (512kb) */
+
+static int in_winlog_collect(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context);
+
+static int in_winlog_init(struct flb_input_instance *in,
+ struct flb_config *config, void *data)
+{
+ int ret;
+ const char *tmp;
+ struct mk_list *head;
+ struct winlog_channel *ch;
+ struct winlog_config *ctx;
+
+ /* Initialize context */
+ ctx = flb_calloc(1, sizeof(struct winlog_config));
+ if (!ctx) {
+ flb_errno();
+ return -1;
+ }
+ ctx->ins = in;
+
+ /* Load the config map */
+ ret = flb_input_config_map_set(in, (void *) ctx);
+ if (ret == -1) {
+ flb_free(ctx);
+ return -1;
+ }
+
+ /* Read Buffer */
+ ctx->bufsize = DEFAULT_BUFFER_SIZE;
+ ctx->buf = flb_malloc(ctx->bufsize);
+ if (!ctx->buf) {
+ flb_errno();
+ flb_free(ctx);
+ }
+
+ /* Open channels */
+ tmp = flb_input_get_property("channels", in);
+ if (!tmp) {
+ flb_plg_debug(ctx->ins, "no channel provided. listening to 'Application'");
+ tmp = "Application";
+ }
+
+ ctx->active_channel = winlog_open_all(tmp);
+ if (!ctx->active_channel) {
+ flb_plg_error(ctx->ins, "failed to open channels");
+ flb_free(ctx->buf);
+ flb_free(ctx);
+ return -1;
+ }
+
+ /* Initialize SQLite DB (optional) */
+ tmp = flb_input_get_property("db", in);
+ if (tmp) {
+ ctx->db = flb_sqldb_open(tmp, in->name, config);
+ if (!ctx->db) {
+ flb_plg_error(ctx->ins, "could not open/create database");
+ winlog_close_all(ctx->active_channel);
+ flb_free(ctx->buf);
+ flb_free(ctx);
+ return -1;
+ }
+
+ ret = flb_sqldb_query(ctx->db, SQL_CREATE_CHANNELS, NULL, NULL);
+ if (ret != FLB_OK) {
+ flb_plg_error(ctx->ins, "could not create 'channels' table");
+ flb_sqldb_close(ctx->db);
+ winlog_close_all(ctx->active_channel);
+ flb_free(ctx->buf);
+ flb_free(ctx);
+ return -1;
+ }
+
+ mk_list_foreach(head, ctx->active_channel) {
+ ch = mk_list_entry(head, struct winlog_channel, _head);
+ winlog_sqlite_load(ch, ctx->db);
+ flb_plg_debug(ctx->ins, "load channel<%s record=%u time=%u>",
+ ch->name, ch->record_number, ch->time_written);
+ }
+ }
+
+ /* Set the context */
+ flb_input_set_context(in, ctx);
+
+ /* Set the collector */
+ ret = flb_input_set_collector_time(in,
+ in_winlog_collect,
+ ctx->interval_sec,
+ ctx->interval_nsec,
+ config);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "could not set up a collector");
+ }
+ ctx->coll_fd = ret;
+
+ return 0;
+}
+
+static int in_winlog_read_channel(struct flb_input_instance *ins,
+ struct winlog_config *ctx,
+ struct winlog_channel *ch)
+{
+ unsigned int read;
+ char *ptr;
+ PEVENTLOGRECORD evt;
+ msgpack_packer mp_pck;
+ msgpack_sbuffer mp_sbuf;
+
+ if (winlog_read(ch, ctx->buf, ctx->bufsize, &read)) {
+ flb_plg_error(ctx->ins, "failed to read '%s'", ch->name);
+ return -1;
+ }
+ if (read == 0) {
+ return 0;
+ }
+ flb_plg_debug(ctx->ins, "read %u bytes from '%s'", read, ch->name);
+
+ msgpack_sbuffer_init(&mp_sbuf);
+ msgpack_packer_init(&mp_pck, &mp_sbuf, msgpack_sbuffer_write);
+
+ ptr = ctx->buf;
+ while (ptr < ctx->buf + read) {
+ evt = (PEVENTLOGRECORD) ptr;
+
+ winlog_pack_event(&mp_pck, evt, ch, ctx);
+
+ ch->record_number = evt->RecordNumber;
+ ch->time_written = evt->TimeWritten;
+
+ ptr += evt->Length;
+ }
+
+ if (ctx->db) {
+ flb_plg_debug(ctx->ins, "save channel<%s record=%u time=%u>",
+ ch->name, ch->record_number, ch->time_written);
+ winlog_sqlite_save(ch, ctx->db);
+ }
+
+ flb_input_log_append(ins, NULL, 0, mp_sbuf.data, mp_sbuf.size);
+
+ msgpack_sbuffer_destroy(&mp_sbuf);
+ return 0;
+}
+
+static int in_winlog_collect(struct flb_input_instance *ins,
+ struct flb_config *config, void *in_context)
+{
+ struct winlog_config *ctx = in_context;
+ struct mk_list *head;
+ struct winlog_channel *ch;
+
+ mk_list_foreach(head, ctx->active_channel) {
+ ch = mk_list_entry(head, struct winlog_channel, _head);
+ in_winlog_read_channel(ins, ctx, ch);
+ }
+ return 0;
+}
+
+static void in_winlog_pause(void *data, struct flb_config *config)
+{
+ struct winlog_config *ctx = data;
+ flb_input_collector_pause(ctx->coll_fd, ctx->ins);
+}
+
+static void in_winlog_resume(void *data, struct flb_config *config)
+{
+ struct winlog_config *ctx = data;
+ flb_input_collector_resume(ctx->coll_fd, ctx->ins);
+}
+
+static int in_winlog_exit(void *data, struct flb_config *config)
+{
+ struct winlog_config *ctx = data;
+
+ if (!ctx) {
+ return 0;
+ }
+
+ winlog_close_all(ctx->active_channel);
+
+ if (ctx->db) {
+ flb_sqldb_close(ctx->db);
+ }
+ flb_free(ctx->buf);
+ flb_free(ctx);
+
+ return 0;
+}
+
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_STR, "channels", NULL,
+ 0, FLB_FALSE, 0,
+ "Specify a comma-separated list of channels to read from"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "db", NULL,
+ 0, FLB_FALSE, 0,
+ "Specify DB file to save read offsets"
+ },
+ {
+ FLB_CONFIG_MAP_TIME, "interval_sec", "1s",
+ 0, FLB_TRUE, offsetof(struct winlog_config, interval_sec),
+ "Set the polling interval for each channel"
+ },
+ {
+ FLB_CONFIG_MAP_INT, "interval_nsec", "0",
+ 0, FLB_TRUE, offsetof(struct winlog_config, interval_nsec),
+ "Set the polling interval for each channel (sub seconds)"
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "string_inserts", "true",
+ 0, FLB_TRUE, offsetof(struct winlog_config, string_inserts),
+ "Whether to include StringInserts in output records"
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "use_ansi", "false",
+ 0, FLB_TRUE, offsetof(struct winlog_config, use_ansi),
+ "Use ANSI encoding on eventlog messages"
+ },
+
+ /* EOF */
+ {0}
+};
+
+struct flb_input_plugin in_winlog_plugin = {
+ .name = "winlog",
+ .description = "Windows Event Log",
+ .cb_init = in_winlog_init,
+ .cb_pre_run = NULL,
+ .cb_collect = in_winlog_collect,
+ .cb_flush_buf = NULL,
+ .cb_pause = in_winlog_pause,
+ .cb_resume = in_winlog_resume,
+ .cb_exit = in_winlog_exit,
+ .config_map = config_map
+};
diff --git a/src/fluent-bit/plugins/in_winlog/pack.c b/src/fluent-bit/plugins/in_winlog/pack.c
new file mode 100644
index 000000000..4547d5514
--- /dev/null
+++ b/src/fluent-bit/plugins/in_winlog/pack.c
@@ -0,0 +1,451 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_compat.h>
+#include <fluent-bit/flb_mem.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_input_plugin.h>
+#include <msgpack.h>
+#include <sddl.h>
+#include <locale.h>
+#include "winlog.h"
+
+#define REGKEY_MAXLEN 256
+#define FMT_ISO8601 "%Y-%m-%d %H:%M:%S %z"
+#define FMT_EVTLOG L"SYSTEM\\CurrentControlSet\\Services\\EventLog\\%S\\%s"
+#define FMT_EVTALT L"SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\WINEVT\\Publishers\\%s"
+
+/* 127 is the max number of function params */
+#define PARAM_MAXNUM 127
+
+#define SRCNAME(evt) ((wchar_t *) ((char *) (evt) + sizeof(EVENTLOGRECORD)))
+#define BINDATA(evt) ((unsigned char *) (evt) + (evt)->DataOffset)
+
+static void pack_nullstr(msgpack_packer *mp_pck)
+{
+ msgpack_pack_str(mp_pck, 0);
+ msgpack_pack_str_body(mp_pck, "", 0);
+}
+
+static int pack_wstr(msgpack_packer *mp_pck, wchar_t *wstr, int use_ansi)
+{
+ int size;
+ char *buf;
+ UINT codePage = CP_UTF8;
+ if (use_ansi) {
+ codePage = CP_ACP;
+ }
+
+ /* Compute the buffer size first */
+ size = WideCharToMultiByte(codePage, 0, wstr, -1, NULL, 0, NULL, NULL);
+ if (size == 0) {
+ return -1;
+ }
+
+ buf = flb_malloc(size);
+ if (buf == NULL) {
+ flb_errno();
+ return -1;
+ }
+
+ /* Convert UTF-16 into UTF-8/System code Page encoding */
+ size = WideCharToMultiByte(codePage, 0, wstr, -1, buf, size, NULL, NULL);
+ if (size == 0) {
+ flb_free(buf);
+ return -1;
+ }
+
+ /* Pack buf except the trailing '\0' */
+ msgpack_pack_str(mp_pck, size - 1);
+ msgpack_pack_str_body(mp_pck, buf, size - 1);
+ flb_free(buf);
+ return 0;
+}
+
+static int pack_time(msgpack_packer *mp_pck, int time)
+{
+ size_t len;
+ struct tm tm;
+ char buf[64];
+ _locale_t locale;
+
+ if (_localtime32_s(&tm, &time)) {
+ flb_errno();
+ return -1;
+ }
+
+ locale = _get_current_locale();
+ if (locale == NULL) {
+ return -1;
+ }
+
+ len = _strftime_l(buf, 64, FMT_ISO8601, &tm, locale);
+ if (len == 0) {
+ flb_errno();
+ _free_locale(locale);
+ return -1;
+ }
+ _free_locale(locale);
+ msgpack_pack_str(mp_pck, len);
+ msgpack_pack_str_body(mp_pck, buf, len);
+
+ return 0;
+}
+
+static int pack_event_type(msgpack_packer *mp_pck, int type)
+{
+ switch (type) {
+ case EVENTLOG_SUCCESS:
+ msgpack_pack_str(mp_pck, 7);
+ msgpack_pack_str_body(mp_pck, "Success", 7);
+ break;
+ case EVENTLOG_INFORMATION_TYPE:
+ msgpack_pack_str(mp_pck, 11);
+ msgpack_pack_str_body(mp_pck, "Information", 11);
+ break;
+ case EVENTLOG_WARNING_TYPE:
+ msgpack_pack_str(mp_pck, 7);
+ msgpack_pack_str_body(mp_pck, "Warning", 7);
+ break;
+ case EVENTLOG_ERROR_TYPE:
+ msgpack_pack_str(mp_pck, 5);
+ msgpack_pack_str_body(mp_pck, "Error", 5);
+ break;
+ case EVENTLOG_AUDIT_SUCCESS:
+ msgpack_pack_str(mp_pck, 12);
+ msgpack_pack_str_body(mp_pck, "SuccessAudit", 12);
+ break;
+ case EVENTLOG_AUDIT_FAILURE:
+ msgpack_pack_str(mp_pck, 12);
+ msgpack_pack_str_body(mp_pck, "FailureAudit", 12);
+ break;
+ default:
+ return -1;
+ }
+ return 0;
+}
+
+static int pack_binary(msgpack_packer *mp_pck, unsigned char *bin, int len)
+{
+ const char *hex = "0123456789abcdef";
+ char *buf;
+ int size = len * 2;
+ int i;
+
+ if (len == 0) {
+ pack_nullstr(mp_pck);
+ return 0;
+ }
+
+ buf = flb_malloc(size);
+ if (buf == NULL) {
+ flb_errno();
+ return -1;
+ }
+
+ for (i = 0; i < len; i++) {
+ buf[2*i] = hex[bin[i] / 16];
+ buf[2*i+1] = hex[bin[i] % 16];
+ }
+ msgpack_pack_str(mp_pck, size);
+ msgpack_pack_str_body(mp_pck, buf, size);
+ flb_free(buf);
+ return 0;
+}
+
+static int pack_sid(msgpack_packer *mp_pck, PEVENTLOGRECORD evt,
+ struct winlog_config *ctx)
+{
+ size_t size;
+ char *buf;
+ char *sid = (char *) evt + evt->UserSidOffset;
+
+ if (evt->UserSidLength == 0) {
+ pack_nullstr(mp_pck);
+ return 0;
+ }
+
+ if (!ConvertSidToStringSidA(sid, &buf)) {
+ flb_plg_error(ctx->ins, "fail to convert SID: %i", GetLastError());
+ return -1;
+ }
+
+ size = strlen(buf);
+ msgpack_pack_str(mp_pck, size);
+ msgpack_pack_str_body(mp_pck, buf, size);
+
+ LocalFree(buf);
+ return 0;
+}
+
+static wchar_t *read_registry(HKEY hkey, wchar_t *key, wchar_t *val)
+{
+ int ret;
+ int size;
+ wchar_t *buf;
+ unsigned int flags = RRF_RT_REG_EXPAND_SZ | RRF_RT_REG_SZ;
+
+ /* Get the buffer size first */
+ ret = RegGetValueW(hkey, key, val, flags, NULL, NULL, &size);
+ if (ret != ERROR_SUCCESS) {
+ return NULL;
+ }
+
+ buf = flb_malloc(size);
+ if (buf == NULL) {
+ flb_errno();
+ return NULL;
+ }
+
+ /* Read data into buffer */
+ ret = RegGetValueW(hkey, key, val, flags, NULL, buf, &size);
+ if (ret != ERROR_SUCCESS) {
+ flb_free(buf);
+ return NULL;
+ }
+ return buf;
+}
+
+static wchar_t *query_guid(wchar_t *guid)
+{
+ int ret;
+ wchar_t key[REGKEY_MAXLEN];
+
+ ret = swprintf_s(key, REGKEY_MAXLEN, FMT_EVTALT, guid);
+ if (ret == -1) {
+ flb_errno();
+ return NULL;
+ }
+
+ return read_registry(HKEY_LOCAL_MACHINE, key, L"MessageFileName");
+}
+
+static int pack_message(msgpack_packer *mp_pck, PEVENTLOGRECORD evt,
+ struct winlog_channel *ch, struct winlog_config *ctx)
+{
+ int ret;
+ int i;
+ HMODULE hfile;
+ wchar_t key[REGKEY_MAXLEN];
+ wchar_t *msg;
+ wchar_t *paths;
+ wchar_t *path;
+ wchar_t *guid;
+ wchar_t *state;
+ wchar_t *tmp;
+ DWORD_PTR *args = NULL;
+
+ ret = swprintf_s(key, REGKEY_MAXLEN, FMT_EVTLOG, ch->name, SRCNAME(evt));
+ if (ret == -1) {
+ flb_errno();
+ return -1;
+ }
+
+ guid = read_registry(HKEY_LOCAL_MACHINE, key, L"ProviderGuid");
+ if (guid) {
+ paths = query_guid(guid);
+ flb_free(guid);
+ }
+ else {
+ paths = read_registry(HKEY_LOCAL_MACHINE, key, L"EventMessageFile");
+ }
+
+ if (paths == NULL) {
+ return -1;
+ }
+
+ if (evt->NumStrings) {
+ args = flb_calloc(PARAM_MAXNUM, sizeof(DWORD_PTR));
+ if (args == NULL) {
+ flb_errno();
+ flb_free(paths);
+ return -1;
+ }
+
+ tmp = (wchar_t *) ((char *) evt + evt->StringOffset);
+ for (i = 0; i < evt->NumStrings; i++) {
+ args[i] = (DWORD_PTR) tmp;
+ tmp += wcslen(tmp) + 1;
+ }
+ }
+
+ path = paths;
+ wcstok_s(path, L";", &state);
+ while (path) {
+ hfile = LoadLibraryExW(path, NULL, LOAD_LIBRARY_AS_DATAFILE);
+ if (hfile == NULL) {
+ path = wcstok_s(NULL , L";", &state);
+ continue;
+ }
+
+ ret = FormatMessageW(FORMAT_MESSAGE_FROM_HMODULE |
+ FORMAT_MESSAGE_ALLOCATE_BUFFER |
+ FORMAT_MESSAGE_ARGUMENT_ARRAY,
+ hfile, /* lpSource */
+ evt->EventID, /* dwMessageId */
+ 0, /* dwLanguageId */
+ (LPWSTR) &msg,/* lpBuffer */
+ 0, /* nSize */
+ (va_list *) args);
+ if (ret > 0) {
+ ret = pack_wstr(mp_pck, msg, ctx->use_ansi);
+ LocalFree(msg);
+ FreeLibrary(hfile);
+ flb_free(paths);
+ flb_free(args);
+ return ret;
+ }
+ FreeLibrary(hfile);
+ path = wcstok_s(NULL , L";", &state);
+ }
+
+ flb_free(paths);
+ flb_free(args);
+ return -1;
+}
+
+static void pack_strings(msgpack_packer *mp_pck, PEVENTLOGRECORD evt, int use_ansi)
+{
+ int i;
+ int len;
+ wchar_t *wstr = (wchar_t *) ((char *) evt + evt->StringOffset);
+
+ msgpack_pack_array(mp_pck, evt->NumStrings);
+
+ for (i = 0; i < evt->NumStrings; i++) {
+ if (pack_wstr(mp_pck, wstr, use_ansi)) {
+ pack_nullstr(mp_pck);
+ }
+ wstr += wcslen(wstr) + 1;
+ }
+}
+
+void winlog_pack_event(msgpack_packer *mp_pck, PEVENTLOGRECORD evt,
+ struct winlog_channel *ch, struct winlog_config *ctx)
+{
+ wchar_t *source_name = SRCNAME(evt);
+ wchar_t *computer_name = source_name + wcslen(source_name) + 1;
+ size_t len;
+ int count = 13;
+
+ if (ctx->string_inserts) {
+ count++;
+ }
+
+ msgpack_pack_array(mp_pck, 2);
+ flb_pack_time_now(mp_pck);
+
+ msgpack_pack_map(mp_pck, count);
+
+ /* RecordNumber */
+ msgpack_pack_str(mp_pck, 12);
+ msgpack_pack_str_body(mp_pck, "RecordNumber", 12);
+ msgpack_pack_uint32(mp_pck, evt->RecordNumber);
+
+ /* TimeGenerated */
+ msgpack_pack_str(mp_pck, 13);
+ msgpack_pack_str_body(mp_pck, "TimeGenerated", 13);
+ if (pack_time(mp_pck, evt->TimeGenerated)) {
+ flb_plg_error(ctx->ins, "invalid TimeGenerated %i", evt->TimeGenerated);
+ pack_nullstr(mp_pck);
+ }
+
+ /* TimeWritten */
+ msgpack_pack_str(mp_pck, 11);
+ msgpack_pack_str_body(mp_pck, "TimeWritten", 11);
+ if (pack_time(mp_pck, evt->TimeWritten)) {
+ flb_plg_error(ctx->ins, "invalid TimeWritten %i", evt->TimeWritten);
+ pack_nullstr(mp_pck);
+ }
+
+ /* EventId */
+ msgpack_pack_str(mp_pck, 7);
+ msgpack_pack_str_body(mp_pck, "EventID", 7);
+ msgpack_pack_uint16(mp_pck, evt->EventID & 0xffff);
+
+ /* Qualifiers */
+ msgpack_pack_str(mp_pck, 10);
+ msgpack_pack_str_body(mp_pck, "Qualifiers", 10);
+ msgpack_pack_uint16(mp_pck, evt->EventID >> 16);
+
+ /* EventType */
+ msgpack_pack_str(mp_pck, 9);
+ msgpack_pack_str_body(mp_pck, "EventType", 9);
+ if (pack_event_type(mp_pck, evt->EventType)) {
+ flb_plg_error(ctx->ins, "invalid EventType %i", evt->EventType);
+ pack_nullstr(mp_pck);
+ }
+
+ /* EventCategory */
+ msgpack_pack_str(mp_pck, 13);
+ msgpack_pack_str_body(mp_pck, "EventCategory", 13);
+ msgpack_pack_uint16(mp_pck, evt->EventCategory);
+
+ /* Channel */
+ len = strlen(ch->name);
+ msgpack_pack_str(mp_pck, 7);
+ msgpack_pack_str_body(mp_pck, "Channel", 7);
+ msgpack_pack_str(mp_pck, len);
+ msgpack_pack_str_body(mp_pck, ch->name, len);
+
+ /* Source Name */
+ msgpack_pack_str(mp_pck, 10);
+ msgpack_pack_str_body(mp_pck, "SourceName", 10);
+ if (pack_wstr(mp_pck, source_name, ctx->use_ansi)) {
+ flb_plg_error(ctx->ins, "invalid SourceName '%ls'", source_name);
+ pack_nullstr(mp_pck);
+ }
+
+ /* Computer Name */
+ msgpack_pack_str(mp_pck, 12);
+ msgpack_pack_str_body(mp_pck, "ComputerName", 12);
+ if (pack_wstr(mp_pck, computer_name, ctx->use_ansi)) {
+ flb_plg_error(ctx->ins, "invalid ComputerName '%ls'", computer_name);
+ pack_nullstr(mp_pck);
+ }
+
+ /* Event-specific Data */
+ msgpack_pack_str(mp_pck, 4);
+ msgpack_pack_str_body(mp_pck, "Data", 4);
+ if (pack_binary(mp_pck, BINDATA(evt), evt->DataLength)) {
+ pack_nullstr(mp_pck);
+ }
+
+ /* Sid */
+ msgpack_pack_str(mp_pck, 3);
+ msgpack_pack_str_body(mp_pck, "Sid", 3);
+ if (pack_sid(mp_pck, evt, ctx)) {
+ pack_nullstr(mp_pck);
+ }
+
+ /* Message */
+ msgpack_pack_str(mp_pck, 7);
+ msgpack_pack_str_body(mp_pck, "Message", 7);
+ if (pack_message(mp_pck, evt, ch, ctx)) {
+ pack_nullstr(mp_pck);
+ }
+
+ /* StringInserts (optional) */
+ if (ctx->string_inserts) {
+ msgpack_pack_str(mp_pck, 13);
+ msgpack_pack_str_body(mp_pck, "StringInserts", 13);
+ pack_strings(mp_pck, evt, ctx->use_ansi);
+ }
+}
diff --git a/src/fluent-bit/plugins/in_winlog/winlog.c b/src/fluent-bit/plugins/in_winlog/winlog.c
new file mode 100644
index 000000000..b6064b1f1
--- /dev/null
+++ b/src/fluent-bit/plugins/in_winlog/winlog.c
@@ -0,0 +1,300 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_compat.h>
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_sqldb.h>
+#include <fluent-bit/flb_input.h>
+#include "winlog.h"
+
+struct winlog_channel *winlog_open(const char *channel)
+{
+ struct winlog_channel *ch;
+
+ ch = flb_calloc(1, sizeof(struct winlog_channel));
+ if (!ch) {
+ flb_errno();
+ return NULL;
+ }
+
+ ch->name = flb_strdup(channel);
+ if (!ch->name) {
+ flb_errno();
+ flb_free(ch);
+ return NULL;
+ }
+
+ ch->h = OpenEventLogA(NULL, channel);
+ if (!ch->h) {
+ flb_error("[in_winlog] cannot open '%s' (%i)", channel, GetLastError());
+ flb_free(ch->name);
+ flb_free(ch);
+ return NULL;
+ }
+
+ return ch;
+}
+
+void winlog_close(struct winlog_channel *ch)
+{
+ flb_free(ch->name);
+ CloseEventLog(ch->h);
+ flb_free(ch);
+}
+
+/*
+ * This routine is called when Windows Event Log was cleared
+ * while reading (e.g. running Clear-EventLog on PowerShell).
+ *
+ * In such a case, the only neat thing to do is to reopen the
+ * channel and start reading from the beginning.
+ */
+int winlog_on_cleared(struct winlog_channel *ch)
+{
+ HANDLE h;
+
+ h = OpenEventLogA(NULL, ch->name);
+ if (!h) {
+ flb_error("[in_winlog] cannot open '%s' (%i)", ch->name, GetLastError());
+ return -1;
+ }
+
+ if (ch->h) {
+ CloseEventLog(ch->h);
+ }
+
+ ch->h = h;
+ ch->seek = 0;
+ return 0;
+}
+
+
+/*
+ * ReadEventLog() has a known bug that SEEK_READ fails when the log file
+ * is too big.
+ *
+ * winlog_seek() is a workaround for the issue, which emulates seek
+ * by reading the stream until it reaches the target record.
+ *
+ * https://support.microsoft.com/en-hk/help/177199/
+ */
+static int winlog_seek(struct winlog_channel *ch, char *buf,
+ unsigned int size, unsigned int *read)
+{
+ char *p;
+ char *end;
+ PEVENTLOGRECORD evt;
+
+ ch->seek = 0;
+ while (1) {
+ if (winlog_read(ch, buf, size, read)) {
+ return -1;
+ }
+ if (*read == 0) {
+ flb_trace("[in_winlog] seek '%s' to EOF", ch->name);
+ return 0;
+ }
+
+ p = buf;
+ end = buf + *read;
+ while (p < end) {
+ evt = (PEVENTLOGRECORD) p;
+
+ /* If the record is newer than the last record we've read,
+ * stop immediately.
+ */
+ if (evt->TimeWritten > ch->time_written) {
+ *read = (end - p);
+ memmove(buf, p, *read);
+ flb_trace("[in_winlog] seek '%s' to RecordNumber=%u (time)",
+ ch->name, evt->RecordNumber);
+ return 0;
+ }
+ if (evt->TimeWritten == ch->time_written) {
+
+ /* If the record was written at the same time, compare
+ * the record number.
+ *
+ * Note! Since Windows would reset RecordNumber occasionally,
+ * this comparison is not completely reliable.
+ */
+ if (evt->RecordNumber > ch->record_number) {
+ *read = (end - p);
+ memmove(buf, p, *read);
+ flb_trace("[in_winlog] seek '%s' to RecordNumber=%u",
+ ch->name, evt->RecordNumber);
+ return 0;
+ }
+ }
+ p += evt->Length;
+ }
+ }
+}
+
+/*
+ * Read from an open Windows Event Log channel.
+ */
+int winlog_read(struct winlog_channel *ch, char *buf, unsigned int size,
+ unsigned int *read)
+{
+ unsigned int flags;
+ unsigned int req;
+ unsigned int err;
+
+ if (ch->seek) {
+ flags = EVENTLOG_SEEK_READ;
+ } else {
+ flags = EVENTLOG_SEQUENTIAL_READ | EVENTLOG_FORWARDS_READ;
+ }
+
+ /*
+ * Note: ReadEventLogW() ignores `ch->record_number` (dwRecordOffset)
+ * if EVENTLOG_SEEK_READ is not set.
+ */
+ if (!ReadEventLogW(ch->h, flags, ch->record_number, buf, size, read, &req)) {
+ switch (err = GetLastError()) {
+ case ERROR_HANDLE_EOF:
+ break;
+ case ERROR_INVALID_PARAMETER:
+ return winlog_seek(ch, buf, size, read);
+ case ERROR_EVENTLOG_FILE_CHANGED:
+ flb_info("[in_winlog] channel '%s' is cleared. reopen it.", ch->name);
+ return winlog_on_cleared(ch);
+ default:
+ flb_error("[in_winlog] cannot read '%s' (%i)", ch->name, err);
+ return -1;
+ }
+ }
+ ch->seek = 0;
+ return 0;
+}
+
+/*
+ * Open multiple channels at once. The return value is a linked
+ * list of window_channel objects.
+ *
+ * "channels" are comma-separated names like "Setup,Security".
+ */
+struct mk_list *winlog_open_all(const char *channels)
+{
+ char *tmp;
+ char *channel;
+ char *state;
+ struct winlog_channel *ch;
+ struct mk_list *list;
+
+ tmp = flb_strdup(channels);
+ if (!tmp) {
+ flb_errno();
+ return NULL;
+ }
+
+ list = flb_malloc(sizeof(struct mk_list));
+ if (!list) {
+ flb_errno();
+ flb_free(tmp);
+ return NULL;
+ }
+ mk_list_init(list);
+
+ channel = strtok_s(tmp , ",", &state);
+ while (channel) {
+ ch = winlog_open(channel);
+ if (!ch) {
+ flb_free(tmp);
+ winlog_close_all(list);
+ return NULL;
+ }
+ mk_list_add(&ch->_head, list);
+ channel = strtok_s(NULL, ",", &state);
+ }
+ flb_free(tmp);
+ return list;
+}
+
+void winlog_close_all(struct mk_list *list)
+{
+ struct winlog_channel *ch;
+ struct mk_list *head;
+ struct mk_list *tmp;
+
+ mk_list_foreach_safe(head, tmp, list) {
+ ch = mk_list_entry(head, struct winlog_channel, _head);
+ mk_list_del(&ch->_head);
+ winlog_close(ch);
+ }
+ flb_free(list);
+}
+
+/*
+ * Callback function for flb_sqldb_query().
+ */
+static int winlog_sqlite_callback(void *data, int argc, char **argv, char **cols)
+{
+ struct winlog_sqlite_record *p = data;
+
+ p->name = argv[0];
+ p->record_number = (unsigned int) strtoul(argv[1], NULL, 10);
+ p->time_written = (unsigned int) strtoul(argv[2], NULL, 10);
+ p->created = (unsigned int) strtoul(argv[3], NULL, 10);
+ return 0;
+}
+
+/*
+ * Load the read offset from SQLite DB.
+ */
+int winlog_sqlite_load(struct winlog_channel *ch, struct flb_sqldb *db)
+{
+ int ret;
+ char query[1024];
+ struct winlog_sqlite_record record = {0};
+
+ snprintf(query, sizeof(query) - 1, SQL_GET_CHANNEL, ch->name);
+
+ ret = flb_sqldb_query(db, query, winlog_sqlite_callback, &record);
+ if (ret == FLB_ERROR) {
+ return -1;
+ }
+
+ if (record.name) {
+ ch->record_number = record.record_number;
+ ch->time_written = record.time_written;
+ ch->seek = 1;
+ }
+ return 0;
+}
+
+/*
+ * Save the read offset into SQLite DB.
+ */
+int winlog_sqlite_save(struct winlog_channel *ch, struct flb_sqldb *db)
+{
+ int ret;
+ char query[1024];
+
+ snprintf(query, sizeof(query) - 1, SQL_UPDATE_CHANNEL,
+ ch->name, ch->record_number, ch->time_written, time(NULL));
+
+ ret = flb_sqldb_query(db, query, NULL, NULL);
+ if (ret == FLB_ERROR) {
+ return -1;
+ }
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/in_winlog/winlog.h b/src/fluent-bit/plugins/in_winlog/winlog.h
new file mode 100644
index 000000000..007a996ed
--- /dev/null
+++ b/src/fluent-bit/plugins/in_winlog/winlog.h
@@ -0,0 +1,110 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_WINLOG_H
+#define FLB_WINLOG_H
+
+struct winlog_config {
+ unsigned int interval_sec;
+ unsigned int interval_nsec;
+ unsigned int bufsize;
+ int string_inserts;
+ int use_ansi;
+ char *buf;
+ struct mk_list *active_channel;
+ struct flb_sqldb *db;
+ flb_pipefd_t coll_fd;
+ struct flb_input_instance *ins;
+};
+
+struct winlog_channel {
+ HANDLE h;
+ char *name;
+ unsigned int record_number;
+ unsigned int time_written;
+ unsigned int seek;
+ struct mk_list _head;
+};
+
+struct winlog_sqlite_record {
+ char *name;
+ unsigned int record_number;
+ unsigned int time_written;
+ unsigned int created;
+};
+
+/*
+ * Open a Windows Event Log channel.
+ */
+struct winlog_channel *winlog_open(const char *channel);
+void winlog_close(struct winlog_channel *ch);
+
+/*
+ * Read records from a channel.
+ */
+int winlog_read(struct winlog_channel *ch, char *buf, unsigned int size, unsigned int *read);
+
+/*
+ * A bulk API to handle multiple channels at once using mk_list.
+ *
+ * "channels" are comma-separated names like "Setup,Security".
+ */
+struct mk_list *winlog_open_all(const char *channels);
+void winlog_close_all(struct mk_list *list);
+
+void winlog_pack_event(msgpack_packer *mp_pck, PEVENTLOGRECORD evt,
+ struct winlog_channel *ch, struct winlog_config *ctx);
+
+/*
+ * Save the read offset to disk.
+ */
+int winlog_sqlite_load(struct winlog_channel *ch, struct flb_sqldb *db);
+int winlog_sqlite_save(struct winlog_channel *ch, struct flb_sqldb *db);
+
+/*
+ * SQL templates
+ */
+#define SQL_CREATE_CHANNELS \
+ "CREATE TABLE IF NOT EXISTS in_winlog_channels (" \
+ " name TEXT PRIMARY KEY," \
+ " record_number INTEGER," \
+ " time_written INTEGER," \
+ " created INTEGER" \
+ ");"
+
+#define SQL_GET_CHANNEL \
+ "SELECT name, record_number, time_written, created" \
+ " FROM in_winlog_channels WHERE name = '%s';"
+
+/*
+ * This uses UPCERT i.e. execute INSERT first and fall back to
+ * UPDATE if the entry already exists. It saves the trouble of
+ * doing an existence check manually.
+ *
+ * https://www.sqlite.org/lang_UPSERT.html
+ */
+#define SQL_UPDATE_CHANNEL \
+ "INSERT INTO in_winlog_channels" \
+ " (name, record_number, time_written, created)" \
+ " VALUES ('%s', %u, %u, %llu)" \
+ " ON CONFLICT(name) DO UPDATE" \
+ " SET record_number = excluded.record_number," \
+ " time_written = excluded.time_written"
+
+#endif
diff --git a/src/fluent-bit/plugins/in_winstat/CMakeLists.txt b/src/fluent-bit/plugins/in_winstat/CMakeLists.txt
new file mode 100644
index 000000000..ed36a5615
--- /dev/null
+++ b/src/fluent-bit/plugins/in_winstat/CMakeLists.txt
@@ -0,0 +1,4 @@
+set(src
+ winstat.c)
+
+FLB_PLUGIN(in_winstat "${src}" "")
diff --git a/src/fluent-bit/plugins/in_winstat/winstat.c b/src/fluent-bit/plugins/in_winstat/winstat.c
new file mode 100644
index 000000000..b6fd2e173
--- /dev/null
+++ b/src/fluent-bit/plugins/in_winstat/winstat.c
@@ -0,0 +1,340 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_compat.h>
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_input.h>
+#include <fluent-bit/flb_input_plugin.h>
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_pack.h>
+
+#include <psapi.h>
+
+struct stat_cache {
+ int64_t processes;
+ int64_t threads;
+ int64_t handles;
+ int64_t commit_total;
+ int64_t commit_limit;
+ int64_t kernel_total;
+ int64_t kernel_paged;
+ int64_t kernel_nonpaged;
+ int64_t physical_available;
+ int64_t physical_total;
+ int64_t physical_used;
+ uint64_t idletime;
+ uint64_t kerneltime;
+ uint64_t usertime;
+ uint64_t cpu_idle;
+ uint64_t cpu_user;
+ uint64_t cpu_kernel;
+ float cpu_utilization;
+ char uptime_human[32];
+ uint64_t uptime_msec;
+};
+
+struct flb_winstat {
+ int coll_fd;
+ int interval_sec;
+ int interval_nsec;
+ struct flb_input_instance *ins;
+ struct stat_cache cache;
+};
+
+#define filetime64(ft) \
+ ((((uint64_t) (ft)->dwHighDateTime) << 32) + (ft)->dwLowDateTime)
+
+#define KB(n, page) ((n) * (page) / 1024)
+
+static int query_processor(struct stat_cache *cache)
+{
+ uint64_t prev_idletime = cache->idletime;
+ uint64_t prev_usertime = cache->usertime;
+ uint64_t prev_kerneltime = cache->kerneltime;
+ FILETIME idletime;
+ FILETIME kerneltime;
+ FILETIME usertime;
+ uint64_t total;
+
+ if (!GetSystemTimes(&idletime, &kerneltime, &usertime)) {
+ return -1;
+ }
+ cache->idletime = filetime64(&idletime);
+ cache->kerneltime = filetime64(&kerneltime) - cache->idletime;
+ cache->usertime = filetime64(&usertime);
+
+ cache->cpu_idle = cache->idletime - prev_idletime;
+ cache->cpu_user = cache->usertime - prev_usertime;
+ cache->cpu_kernel = cache->kerneltime - prev_kerneltime;
+
+ total = cache->cpu_user + cache->cpu_kernel + cache->cpu_idle;
+ cache->cpu_utilization = 100 - 100.0 * cache->cpu_idle / total;
+
+ return 0;
+}
+
+static int query_performance_info(struct stat_cache *cache)
+{
+ PERFORMANCE_INFORMATION perf;
+
+ if (!GetPerformanceInfo(&perf, sizeof(perf))) {
+ return -1;
+ }
+
+ cache->processes = perf.ProcessCount;
+ cache->threads = perf.ThreadCount;
+ cache->handles = perf.HandleCount;
+
+ cache->physical_total = KB(perf.PhysicalTotal, perf.PageSize);
+ cache->physical_available = KB(perf.PhysicalAvailable, perf.PageSize);
+ cache->physical_used = cache->physical_total - cache->physical_available;
+
+ cache->commit_total = KB(perf.CommitTotal, perf.PageSize);
+ cache->commit_limit = KB(perf.CommitLimit, perf.PageSize);
+
+ cache->kernel_total = KB(perf.KernelTotal, perf.PageSize);
+ cache->kernel_paged = KB(perf.KernelPaged, perf.PageSize);
+ cache->kernel_nonpaged = KB(perf.KernelNonpaged, perf.PageSize);
+ return 0;
+}
+
+static int query_uptime(struct stat_cache *cache)
+{
+ int ret;
+
+ cache->uptime_msec = GetTickCount64();
+
+ /* Emulate Windows Task Manager (DD:HH:MM:SS) */
+ ret = sprintf_s(cache->uptime_human, 32, "%d:%02d:%02d:%02d",
+ (int) (cache->uptime_msec / 1000 / 60 / 60 / 24),
+ (int) ((cache->uptime_msec / 1000 / 60 / 60) % 24),
+ (int) ((cache->uptime_msec / 1000 / 60) % 60),
+ (int) ((cache->uptime_msec / 1000) % 60));
+ if (ret == -1) {
+ return -1;
+ }
+ return 0;
+}
+
+/*
+ * Input Plugin API
+ */
+static int in_winstat_collect(struct flb_input_instance *in,
+ struct flb_config *config, void *data)
+{
+ struct flb_winstat *ctx = data;
+ struct stat_cache *cache = &ctx->cache;
+ int uptime_len;
+
+ msgpack_packer mp_pck;
+ msgpack_sbuffer mp_sbuf;
+
+ /* Query Windows metrics */
+ if (query_performance_info(cache)) {
+ flb_plg_error(ctx->ins, "cannot query Performance info");
+ return -1;
+ }
+
+ if (query_processor(cache)) {
+ flb_plg_error(ctx->ins, "cannot query Processor info");
+ return -1;
+ }
+
+ if (query_uptime(cache)) {
+ flb_plg_error(ctx->ins, "cannot query uptime");
+ return -1;
+ }
+
+ /* Pack the data */
+ msgpack_sbuffer_init(&mp_sbuf);
+ msgpack_packer_init(&mp_pck, &mp_sbuf, msgpack_sbuffer_write);
+
+ msgpack_pack_array(&mp_pck, 2);
+ flb_pack_time_now(&mp_pck);
+ msgpack_pack_map(&mp_pck, 17);
+
+ /* Processes/Threads/Handles */
+ msgpack_pack_str(&mp_pck, 9);
+ msgpack_pack_str_body(&mp_pck, "processes", 9);
+ msgpack_pack_int64(&mp_pck, cache->processes);
+
+ msgpack_pack_str(&mp_pck, 7);
+ msgpack_pack_str_body(&mp_pck, "threads", 7);
+ msgpack_pack_int64(&mp_pck, cache->threads);
+
+ msgpack_pack_str(&mp_pck, 7);
+ msgpack_pack_str_body(&mp_pck, "handles", 7);
+ msgpack_pack_int64(&mp_pck, cache->handles);
+
+ /* System performance info */
+ msgpack_pack_str(&mp_pck, 14);
+ msgpack_pack_str_body(&mp_pck, "physical_total", 14);
+ msgpack_pack_int64(&mp_pck, cache->physical_total);
+
+ msgpack_pack_str(&mp_pck, 13);
+ msgpack_pack_str_body(&mp_pck, "physical_used", 13);
+ msgpack_pack_int64(&mp_pck, cache->physical_used);
+
+ msgpack_pack_str(&mp_pck, 18);
+ msgpack_pack_str_body(&mp_pck, "physical_available", 18);
+ msgpack_pack_int64(&mp_pck, cache->physical_available);
+
+ msgpack_pack_str(&mp_pck, 12);
+ msgpack_pack_str_body(&mp_pck, "commit_total", 12);
+ msgpack_pack_int64(&mp_pck, cache->commit_total);
+
+ msgpack_pack_str(&mp_pck, 12);
+ msgpack_pack_str_body(&mp_pck, "commit_limit", 12);
+ msgpack_pack_int64(&mp_pck, cache->commit_limit);
+
+ msgpack_pack_str(&mp_pck, 12);
+ msgpack_pack_str_body(&mp_pck, "kernel_total", 12);
+ msgpack_pack_int64(&mp_pck, cache->kernel_total);
+
+ msgpack_pack_str(&mp_pck, 12);
+ msgpack_pack_str_body(&mp_pck, "kernel_paged", 12);
+ msgpack_pack_int64(&mp_pck, cache->kernel_paged);
+
+ msgpack_pack_str(&mp_pck, 15);
+ msgpack_pack_str_body(&mp_pck, "kernel_nonpaged", 15);
+ msgpack_pack_int64(&mp_pck, cache->kernel_nonpaged);
+
+ /* Processors */
+ msgpack_pack_str(&mp_pck, 8);
+ msgpack_pack_str_body(&mp_pck, "cpu_user", 8);
+ msgpack_pack_uint64(&mp_pck, cache->cpu_user);
+
+ msgpack_pack_str(&mp_pck, 8);
+ msgpack_pack_str_body(&mp_pck, "cpu_idle", 8);
+ msgpack_pack_uint64(&mp_pck, cache->cpu_idle);
+
+ msgpack_pack_str(&mp_pck, 10);
+ msgpack_pack_str_body(&mp_pck, "cpu_kernel", 10);
+ msgpack_pack_uint64(&mp_pck, cache->cpu_kernel);
+
+ msgpack_pack_str(&mp_pck, 15);
+ msgpack_pack_str_body(&mp_pck, "cpu_utilization", 15);
+ msgpack_pack_float(&mp_pck, cache->cpu_utilization);
+
+ /* Uptime */
+ msgpack_pack_str(&mp_pck, 11);
+ msgpack_pack_str_body(&mp_pck, "uptime_msec", 11);
+ msgpack_pack_uint64(&mp_pck, cache->uptime_msec);
+
+ uptime_len = strlen(cache->uptime_human);
+ msgpack_pack_str(&mp_pck, 12);
+ msgpack_pack_str_body(&mp_pck, "uptime_human", 12);
+ msgpack_pack_str(&mp_pck, uptime_len);
+ msgpack_pack_str_body(&mp_pck, cache->uptime_human, uptime_len);
+
+ flb_input_log_append(in, NULL, 0, mp_sbuf.data, mp_sbuf.size);
+ msgpack_sbuffer_destroy(&mp_sbuf);
+
+ return 0;
+}
+
+static int in_winstat_init(struct flb_input_instance *in,
+ struct flb_config *config, void *data)
+{
+ int ret;
+ struct flb_winstat *ctx;
+
+ /* Initialize context */
+ ctx = flb_calloc(1, sizeof(struct flb_winstat));
+ if (!ctx) {
+ return -1;
+ }
+ ctx->ins = in;
+
+ /* Load the config map */
+ ret = flb_input_config_map_set(in, (void *) ctx);
+ if (ret == -1) {
+ flb_free(ctx);
+ return -1;
+ }
+
+ /* Preload CPU usage */
+ if (query_processor(&ctx->cache)) {
+ flb_plg_warn(ctx->ins, "cannot preload CPU times.");
+ }
+
+ /* Set the context */
+ flb_input_set_context(in, ctx);
+
+ /* Set the collector */
+ ret = flb_input_set_collector_time(in,
+ in_winstat_collect,
+ ctx->interval_sec,
+ ctx->interval_nsec,
+ config);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "could not set up a collector");
+ flb_free(ctx);
+ return -1;
+ }
+ ctx->coll_fd = ret;
+
+ return 0;
+}
+
+static int in_winstat_exit(void *data, struct flb_config *config)
+{
+ struct flb_winstat *ctx = data;
+ flb_free(ctx);
+ return 0;
+}
+
+static void in_winstat_pause(void *data, struct flb_config *config)
+{
+ struct flb_winstat *ctx = data;
+ flb_input_collector_pause(ctx->coll_fd, ctx->ins);
+}
+
+static void in_winstat_resume(void *data, struct flb_config *config)
+{
+ struct flb_winstat *ctx = data;
+ flb_input_collector_resume(ctx->coll_fd, ctx->ins);
+}
+
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_TIME, "interval_sec", "1s",
+ 0, FLB_TRUE, offsetof(struct flb_winstat, interval_sec),
+ "Set the emitter interval"
+ },
+ {
+ FLB_CONFIG_MAP_INT, "interval_nsec", "0",
+ 0, FLB_TRUE, offsetof(struct flb_winstat, interval_nsec),
+ "Set the emitter interval (sub seconds)"
+ },
+ {0}
+};
+
+struct flb_input_plugin in_winstat_plugin = {
+ .name = "winstat",
+ .description = "Windows System Statistics",
+ .cb_init = in_winstat_init,
+ .cb_pre_run = NULL,
+ .cb_collect = in_winstat_collect,
+ .cb_flush_buf = NULL,
+ .cb_pause = in_winstat_pause,
+ .cb_resume = in_winstat_resume,
+ .cb_exit = in_winstat_exit,
+ .config_map = config_map
+};
diff --git a/src/fluent-bit/plugins/out_azure/CMakeLists.txt b/src/fluent-bit/plugins/out_azure/CMakeLists.txt
new file mode 100644
index 000000000..d8a5ba5e7
--- /dev/null
+++ b/src/fluent-bit/plugins/out_azure/CMakeLists.txt
@@ -0,0 +1,6 @@
+set(src
+ azure_conf.c
+ azure.c
+ )
+
+FLB_PLUGIN(out_azure "${src}" "")
diff --git a/src/fluent-bit/plugins/out_azure/azure.c b/src/fluent-bit/plugins/out_azure/azure.c
new file mode 100644
index 000000000..d4322fb65
--- /dev/null
+++ b/src/fluent-bit/plugins/out_azure/azure.c
@@ -0,0 +1,452 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_http_client.h>
+#include <fluent-bit/flb_base64.h>
+#include <fluent-bit/flb_crypto.h>
+#include <fluent-bit/flb_hmac.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_log_event_decoder.h>
+#include <fluent-bit/flb_record_accessor.h>
+#include <fluent-bit/flb_ra_key.h>
+#include <msgpack.h>
+
+#include "azure.h"
+#include "azure_conf.h"
+
+static int cb_azure_init(struct flb_output_instance *ins,
+ struct flb_config *config, void *data)
+{
+ struct flb_azure *ctx;
+
+ ctx = flb_azure_conf_create(ins, config);
+ if (!ctx) {
+ flb_plg_error(ins, "configuration failed");
+ return -1;
+ }
+
+ flb_output_set_context(ins, ctx);
+ return 0;
+}
+
+static int azure_format(const void *in_buf, size_t in_bytes,
+ flb_sds_t tag, flb_sds_t *tag_val_out,
+ char **out_buf, size_t *out_size,
+ struct flb_azure *ctx)
+{
+ int i;
+ int array_size = 0;
+ int map_size;
+ double t;
+ msgpack_object map;
+ msgpack_object k;
+ msgpack_object v;
+ msgpack_sbuffer mp_sbuf;
+ msgpack_packer mp_pck;
+ msgpack_sbuffer tmp_sbuf;
+ msgpack_packer tmp_pck;
+ flb_sds_t record;
+ char time_formatted[32];
+ size_t s;
+ struct tm tms;
+ int len;
+ struct flb_log_event_decoder log_decoder;
+ struct flb_log_event log_event;
+ flb_sds_t tmp = NULL;
+ int ret;
+
+ /* Count number of items */
+ array_size = flb_mp_count(in_buf, in_bytes);
+
+ ret = flb_log_event_decoder_init(&log_decoder, (char *) in_buf, in_bytes);
+
+ if (ret != FLB_EVENT_DECODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event decoder initialization error : %d", ret);
+
+ return -1;
+ }
+
+ /* Create temporary msgpack buffer */
+ msgpack_sbuffer_init(&mp_sbuf);
+ msgpack_packer_init(&mp_pck, &mp_sbuf, msgpack_sbuffer_write);
+ msgpack_pack_array(&mp_pck, array_size);
+
+ while ((ret = flb_log_event_decoder_next(
+ &log_decoder,
+ &log_event)) == FLB_EVENT_DECODER_SUCCESS) {
+ /* Create temporary msgpack buffer */
+ msgpack_sbuffer_init(&tmp_sbuf);
+ msgpack_packer_init(&tmp_pck, &tmp_sbuf, msgpack_sbuffer_write);
+
+ map = *log_event.body;
+ map_size = map.via.map.size;
+
+ if (ctx->log_type_key) {
+ tmp = flb_ra_translate(ctx->ra_prefix_key,
+ tag, flb_sds_len(tag),
+ map, NULL);
+ if (!tmp) {
+ flb_plg_error(ctx->ins, "Tagged record translation failed!");
+ }
+ else if (flb_sds_is_empty(tmp)) {
+ flb_plg_warn(ctx->ins, "Record accessor key not matched");
+ flb_sds_destroy(tmp);
+ }
+ else {
+ /* tag_val_out must be destroyed by the caller */
+ *tag_val_out = tmp;
+ }
+ }
+
+ msgpack_pack_map(&mp_pck, map_size + 1);
+
+ /* Append the time key */
+ msgpack_pack_str(&mp_pck, flb_sds_len(ctx->time_key));
+ msgpack_pack_str_body(&mp_pck,
+ ctx->time_key,
+ flb_sds_len(ctx->time_key));
+
+ if (ctx->time_generated == FLB_TRUE) {
+ /* Append the time value as ISO 8601 */
+ gmtime_r(&log_event.timestamp.tm.tv_sec, &tms);
+
+ s = strftime(time_formatted, sizeof(time_formatted) - 1,
+ FLB_PACK_JSON_DATE_ISO8601_FMT, &tms);
+
+ len = snprintf(time_formatted + s,
+ sizeof(time_formatted) - 1 - s,
+ ".%03" PRIu64 "Z",
+ (uint64_t) log_event.timestamp.tm.tv_nsec / 1000000);
+
+ s += len;
+ msgpack_pack_str(&mp_pck, s);
+ msgpack_pack_str_body(&mp_pck, time_formatted, s);
+ } else {
+ /* Append the time value as millis.nanos */
+ t = flb_time_to_double(&log_event.timestamp);
+
+ msgpack_pack_double(&mp_pck, t);
+ }
+
+ /* Append original map k/v */
+ for (i = 0; i < map_size; i++) {
+ k = map.via.map.ptr[i].key;
+ v = map.via.map.ptr[i].val;
+
+ msgpack_pack_object(&tmp_pck, k);
+ msgpack_pack_object(&tmp_pck, v);
+ }
+ msgpack_sbuffer_write(&mp_sbuf, tmp_sbuf.data, tmp_sbuf.size);
+ msgpack_sbuffer_destroy(&tmp_sbuf);
+ }
+
+ record = flb_msgpack_raw_to_json_sds(mp_sbuf.data, mp_sbuf.size);
+ if (!record) {
+ flb_errno();
+
+ flb_log_event_decoder_destroy(&log_decoder);
+ msgpack_sbuffer_destroy(&mp_sbuf);
+
+ return -1;
+ }
+
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ msgpack_sbuffer_destroy(&mp_sbuf);
+
+ *out_buf = record;
+ *out_size = flb_sds_len(record);
+
+ return 0;
+}
+
+static int build_headers(struct flb_http_client *c,
+ flb_sds_t log_type,
+ size_t content_length,
+ struct flb_azure *ctx)
+{
+ int len;
+ char *auth;
+ char tmp[256];
+ time_t t;
+ size_t size;
+ size_t olen;
+ flb_sds_t rfc1123date;
+ flb_sds_t str_hash;
+ struct tm tm = {0};
+ unsigned char hmac_hash[32] = {0};
+ int result;
+
+ /* Format Date */
+ rfc1123date = flb_sds_create_size(32);
+ if (!rfc1123date) {
+ flb_errno();
+ return -1;
+ }
+
+ t = time(NULL);
+ if (!gmtime_r(&t, &tm)) {
+ flb_errno();
+ flb_sds_destroy(rfc1123date);
+ return -1;
+ }
+ size = strftime(rfc1123date,
+ flb_sds_alloc(rfc1123date) - 1,
+ "%a, %d %b %Y %H:%M:%S GMT", &tm);
+ if (size <= 0) {
+ flb_errno();
+ flb_sds_destroy(rfc1123date);
+ return -1;
+ }
+ flb_sds_len_set(rfc1123date, size);
+
+ /* Compose source string for the hash */
+ str_hash = flb_sds_create_size(256);
+ if (!str_hash) {
+ flb_errno();
+ flb_sds_destroy(rfc1123date);
+ return -1;
+ }
+
+ len = snprintf(tmp, sizeof(tmp) - 1, "%zu\n", content_length);
+ flb_sds_cat(str_hash, "POST\n", 5);
+ flb_sds_cat(str_hash, tmp, len);
+ flb_sds_cat(str_hash, "application/json\n", 17);
+ flb_sds_cat(str_hash, "x-ms-date:", 10);
+ flb_sds_cat(str_hash, rfc1123date, flb_sds_len(rfc1123date));
+ flb_sds_cat(str_hash, "\n", 1);
+ flb_sds_cat(str_hash, FLB_AZURE_RESOURCE, sizeof(FLB_AZURE_RESOURCE) - 1);
+
+ /* Authorization signature */
+ result = flb_hmac_simple(FLB_HASH_SHA256,
+ (unsigned char *) ctx->dec_shared_key,
+ flb_sds_len(ctx->dec_shared_key),
+ (unsigned char *) str_hash,
+ flb_sds_len(str_hash),
+ hmac_hash,
+ sizeof(hmac_hash));
+
+ if (result != FLB_CRYPTO_SUCCESS) {
+ flb_sds_destroy(rfc1123date);
+ flb_sds_destroy(str_hash);
+ return -1;
+ }
+
+ /* Encoded hash */
+ result = flb_base64_encode((unsigned char *) &tmp, sizeof(tmp) - 1, &olen,
+ hmac_hash, sizeof(hmac_hash));
+ tmp[olen] = '\0';
+
+ /* Append headers */
+ flb_http_add_header(c, "User-Agent", 10, "Fluent-Bit", 10);
+ flb_http_add_header(c, "Log-Type", 8,
+ log_type, flb_sds_len(log_type));
+ flb_http_add_header(c, "Content-Type", 12, "application/json", 16);
+ flb_http_add_header(c, "x-ms-date", 9, rfc1123date,
+ flb_sds_len(rfc1123date));
+ if (ctx->time_generated == FLB_TRUE) {
+ /* Use time value as time-generated within azure */
+ flb_http_add_header(c, "time-generated-field", 20, ctx->time_key, flb_sds_len(ctx->time_key));
+ }
+
+ size = 32 + flb_sds_len(ctx->customer_id) + olen;
+ auth = flb_malloc(size);
+ if (!auth) {
+ flb_errno();
+ flb_sds_destroy(rfc1123date);
+ flb_sds_destroy(str_hash);
+ return -1;
+ }
+
+
+ len = snprintf(auth, size, "SharedKey %s:%s",
+ ctx->customer_id, tmp);
+ flb_http_add_header(c, "Authorization", 13, auth, len);
+
+ /* release resources */
+ flb_sds_destroy(rfc1123date);
+ flb_sds_destroy(str_hash);
+ flb_free(auth);
+
+ return 0;
+}
+
+static void cb_azure_flush(struct flb_event_chunk *event_chunk,
+ struct flb_output_flush *out_flush,
+ struct flb_input_instance *i_ins,
+ void *out_context,
+ struct flb_config *config)
+{
+ int ret;
+ size_t b_sent;
+ char *buf_data;
+ size_t buf_size;
+ struct flb_azure *ctx = out_context;
+ struct flb_connection *u_conn;
+ struct flb_http_client *c;
+ flb_sds_t payload;
+ flb_sds_t final_log_type = NULL;
+ (void) i_ins;
+ (void) config;
+
+ /* Get upstream connection */
+ u_conn = flb_upstream_conn_get(ctx->u);
+ if (!u_conn) {
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ /* Convert binary logs into a JSON payload */
+ ret = azure_format(event_chunk->data, event_chunk->size,
+ event_chunk->tag, &final_log_type, &buf_data, &buf_size, ctx);
+ /* If cannot get matching record using log_type_prefix, use log_type directly */
+ if (!final_log_type) {
+ final_log_type = ctx->log_type;
+ }
+
+ if (ret == -1) {
+ flb_upstream_conn_release(u_conn);
+ FLB_OUTPUT_RETURN(FLB_ERROR);
+ }
+ payload = (flb_sds_t) buf_data;
+
+ /* Compose HTTP Client request */
+ c = flb_http_client(u_conn, FLB_HTTP_POST, ctx->uri,
+ buf_data, buf_size, NULL, 0, NULL, 0);
+ flb_http_buffer_size(c, FLB_HTTP_DATA_SIZE_MAX);
+
+ /* Append headers and Azure signature */
+ ret = build_headers(c, final_log_type, flb_sds_len(payload), ctx);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "error composing signature");
+ flb_sds_destroy(payload);
+ flb_http_client_destroy(c);
+ flb_upstream_conn_release(u_conn);
+ FLB_OUTPUT_RETURN(FLB_ERROR);
+ }
+
+ ret = flb_http_do(c, &b_sent);
+ if (ret != 0) {
+ flb_plg_warn(ctx->ins, "http_do=%i", ret);
+ goto retry;
+ }
+ else {
+ if (c->resp.status >= 200 && c->resp.status <= 299) {
+ flb_plg_info(ctx->ins, "customer_id=%s, HTTP status=%i",
+ ctx->customer_id, c->resp.status);
+ }
+ else {
+ if (c->resp.payload_size > 0) {
+ flb_plg_warn(ctx->ins, "http_status=%i:\n%s",
+ c->resp.status, c->resp.payload);
+ }
+ else {
+ flb_plg_warn(ctx->ins, "http_status=%i", c->resp.status);
+ }
+ goto retry;
+ }
+ }
+
+ /* Cleanup */
+ if (final_log_type != ctx->log_type) {
+ flb_sds_destroy(final_log_type);
+ }
+ flb_http_client_destroy(c);
+ flb_sds_destroy(payload);
+ flb_upstream_conn_release(u_conn);
+ FLB_OUTPUT_RETURN(FLB_OK);
+
+ /* Issue a retry */
+ retry:
+ flb_http_client_destroy(c);
+ flb_sds_destroy(payload);
+ flb_upstream_conn_release(u_conn);
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+}
+
+static int cb_azure_exit(void *data, struct flb_config *config)
+{
+ struct flb_azure *ctx = data;
+
+ flb_azure_conf_destroy(ctx);
+ return 0;
+}
+
+/* Configuration properties map */
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_STR, "customer_id", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_azure, customer_id),
+ "Customer ID or WorkspaceID string."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "shared_key", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_azure, shared_key),
+ "The primary or the secondary Connected Sources client authentication key."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "log_type", FLB_AZURE_LOG_TYPE,
+ 0, FLB_TRUE, offsetof(struct flb_azure, log_type),
+ "The name of the event type."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "log_type_key", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_azure, log_type_key),
+ "If included, the value for this key will be looked upon in the record "
+ "and if present, will over-write the `log_type`. If the key/value "
+ "is not found in the record then the `log_type` option will be used. "
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "time_key", FLB_AZURE_TIME_KEY,
+ 0, FLB_TRUE, offsetof(struct flb_azure, time_key),
+ "Optional parameter to specify the key name where the timestamp will be stored."
+ },
+
+ {
+ FLB_CONFIG_MAP_BOOL, "time_generated", "false",
+ 0, FLB_TRUE, offsetof(struct flb_azure, time_generated),
+ "If enabled, the HTTP request header 'time-generated-field' will be included "
+ "so Azure can override the timestamp with the key specified by 'time_key' "
+ "option."
+ },
+
+ /* EOF */
+ {0}
+};
+
+struct flb_output_plugin out_azure_plugin = {
+ .name = "azure",
+ .description = "Send events to Azure HTTP Event Collector",
+ .cb_init = cb_azure_init,
+ .cb_flush = cb_azure_flush,
+ .cb_exit = cb_azure_exit,
+
+ /* Configuration */
+ .config_map = config_map,
+
+ /* Plugin flags */
+ .flags = FLB_OUTPUT_NET | FLB_IO_TLS,
+};
diff --git a/src/fluent-bit/plugins/out_azure/azure.h b/src/fluent-bit/plugins/out_azure/azure.h
new file mode 100644
index 000000000..192d41ac8
--- /dev/null
+++ b/src/fluent-bit/plugins/out_azure/azure.h
@@ -0,0 +1,62 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_OUT_AZURE
+#define FLB_OUT_AZURE
+
+#define FLB_AZURE_API_VERSION "?api-version=2016-04-01"
+#define FLB_AZURE_HOST ".ods.opinsights.azure.com"
+#define FLB_AZURE_PORT 443
+#define FLB_AZURE_RESOURCE "/api/logs"
+#define FLB_AZURE_LOG_TYPE "fluentbit"
+#define FLB_AZURE_TIME_KEY "@timestamp"
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_output.h>
+#include <fluent-bit/flb_sds.h>
+#include <fluent-bit/flb_record_accessor.h>
+
+struct flb_azure {
+ /* account setup */
+ flb_sds_t customer_id;
+ flb_sds_t log_type;
+ flb_sds_t log_type_key;
+ flb_sds_t shared_key;
+ flb_sds_t dec_shared_key;
+
+ /* networking */
+ int port;
+ flb_sds_t host;
+ flb_sds_t uri;
+
+ /* records */
+ flb_sds_t time_key;
+ struct flb_record_accessor *ra_prefix_key;
+
+ /* time_generated: on/off */
+ int time_generated;
+
+ /* Upstream connection to the backend server */
+ struct flb_upstream *u;
+
+ /* Plugin instance reference */
+ struct flb_output_instance *ins;
+};
+
+#endif
diff --git a/src/fluent-bit/plugins/out_azure/azure_conf.c b/src/fluent-bit/plugins/out_azure/azure_conf.c
new file mode 100644
index 000000000..9f8f8a05f
--- /dev/null
+++ b/src/fluent-bit/plugins/out_azure/azure_conf.c
@@ -0,0 +1,219 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_base64.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_record_accessor.h>
+#include <fluent-bit/flb_pack.h>
+
+#include "azure.h"
+#include "azure_conf.h"
+
+struct flb_azure *flb_azure_conf_create(struct flb_output_instance *ins,
+ struct flb_config *config)
+{
+ int ret;
+ size_t size;
+ size_t olen;
+ const char *tmp;
+ struct flb_upstream *upstream;
+ struct flb_azure *ctx;
+ struct flb_record_accessor *ra_prefix_key = NULL;
+
+ /* Allocate config context */
+ ctx = flb_calloc(1, sizeof(struct flb_azure));
+ if (!ctx) {
+ flb_errno();
+ return NULL;
+ }
+ ctx->ins = ins;
+
+ /* Set context */
+ flb_output_set_context(ins, ctx);
+
+ /* Load config map */
+ ret = flb_output_config_map_set(ins, (void *) ctx);
+ if (ret == -1) {
+ return NULL;
+ }
+
+ if (!ctx->customer_id) {
+ flb_plg_error(ctx->ins, "property 'customer_id' is not defined");
+ flb_azure_conf_destroy(ctx);
+ return NULL;
+ }
+
+ /* config: 'shared_key' */
+ if (!ctx->shared_key) {
+ flb_plg_error(ctx->ins, "property 'shared_key' is not defined");
+ flb_azure_conf_destroy(ctx);
+ return NULL;
+ }
+
+ /* decode shared key */
+ size = flb_sds_len(ctx->shared_key) * 1.2;
+ ctx->dec_shared_key = flb_sds_create_size(size);
+ if (!ctx->dec_shared_key) {
+ flb_errno();
+ flb_azure_conf_destroy(ctx);
+ return NULL;
+ }
+
+ ret = flb_base64_decode((unsigned char *) ctx->dec_shared_key, size,
+ &olen,
+ (unsigned char *) ctx->shared_key,
+ flb_sds_len(ctx->shared_key));
+ if (ret != 0) {
+ flb_plg_error(ctx->ins, "error decoding shared_key");
+ flb_azure_conf_destroy(ctx);
+ return NULL;
+ }
+ flb_sds_len_set(ctx->dec_shared_key, olen);
+
+ /* config: 'log_type_key' */
+ if (ctx->log_type_key) {
+ ra_prefix_key = flb_ra_create(ctx->log_type_key, FLB_TRUE);
+
+ if (!ra_prefix_key) {
+ flb_plg_error(ctx->ins, "invalid log_type_key pattern '%s'", ctx->log_type_key);
+ flb_azure_conf_destroy(ctx);
+ return NULL;
+ }
+ else {
+ ctx->ra_prefix_key = ra_prefix_key;
+ }
+ }
+
+ /* Validate hostname given by command line or 'Host' property */
+ if (!ins->host.name && !ctx->customer_id) {
+ flb_plg_error(ctx->ins, "property 'customer_id' is not defined");
+ flb_free(ctx);
+ return NULL;
+ }
+
+ /* Lookup customer id from given host name */
+ if (!ctx->customer_id) {
+ tmp = strchr(ins->host.name, '.');
+ if (!tmp) {
+ flb_plg_error(ctx->ins, "invalid hostname");
+ flb_free(ctx);
+ return NULL;
+ }
+ else {
+ ctx->customer_id = flb_sds_create_len(ins->host.name,
+ tmp - ins->host.name);
+ if (!ctx->customer_id) {
+ flb_errno();
+ flb_free(ctx);
+ return NULL;
+ }
+ }
+ }
+
+ /* Compose real host */
+ ctx->host = flb_sds_create_size(256);
+ if (!ctx->host) {
+ flb_errno();
+ flb_free(ctx);
+ return NULL;
+ }
+
+ if (!ins->host.name) {
+ flb_sds_cat(ctx->host, ctx->customer_id,
+ flb_sds_len(ctx->customer_id));
+ flb_sds_cat(ctx->host, FLB_AZURE_HOST, sizeof(FLB_AZURE_HOST) - 1);
+ }
+ else {
+ if (!strstr(ins->host.name, ctx->customer_id)) {
+ flb_sds_cat(ctx->host, ctx->customer_id,
+ flb_sds_len(ctx->customer_id));
+ if (ins->host.name[0] != '.') {
+ flb_sds_cat(ctx->host, ".", 1);
+ }
+ }
+ flb_sds_cat(ctx->host, ins->host.name, strlen(ins->host.name));
+ }
+
+
+ /* TCP Port */
+ if (ins->host.port == 0) {
+ ctx->port = FLB_AZURE_PORT;
+ }
+ else {
+ ctx->port = ins->host.port;
+ }
+
+ /* Prepare an upstream handler */
+ upstream = flb_upstream_create(config,
+ ctx->host,
+ ctx->port,
+ FLB_IO_TLS,
+ ins->tls);
+ if (!upstream) {
+ flb_plg_error(ctx->ins, "cannot create Upstream context");
+ flb_azure_conf_destroy(ctx);
+ return NULL;
+ }
+ ctx->u = upstream;
+ flb_output_upstream_set(ctx->u, ins);
+
+ /* Compose uri */
+ ctx->uri = flb_sds_create_size(1024);
+ if (!ctx->uri) {
+ flb_errno();
+ flb_azure_conf_destroy(ctx);
+ return NULL;
+ }
+ flb_sds_cat(ctx->uri, FLB_AZURE_RESOURCE, sizeof(FLB_AZURE_RESOURCE) - 1);
+ flb_sds_cat(ctx->uri, FLB_AZURE_API_VERSION,
+ sizeof(FLB_AZURE_API_VERSION) - 1);
+
+ flb_plg_info(ctx->ins, "customer_id='%s' host='%s:%i'",
+ ctx->customer_id, ctx->host, ctx->port);
+
+ return ctx;
+}
+
+int flb_azure_conf_destroy(struct flb_azure *ctx)
+{
+ if (!ctx) {
+ return -1;
+ }
+
+ if (ctx->dec_shared_key) {
+ flb_sds_destroy(ctx->dec_shared_key);
+ }
+
+ if (ctx->host) {
+ flb_sds_destroy(ctx->host);
+ }
+ if (ctx->uri) {
+ flb_sds_destroy(ctx->uri);
+ }
+ if (ctx->ra_prefix_key) {
+ flb_ra_destroy(ctx->ra_prefix_key);
+ }
+ if (ctx->u) {
+ flb_upstream_destroy(ctx->u);
+ }
+ flb_free(ctx);
+
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/out_azure/azure_conf.h b/src/fluent-bit/plugins/out_azure/azure_conf.h
new file mode 100644
index 000000000..626b217ce
--- /dev/null
+++ b/src/fluent-bit/plugins/out_azure/azure_conf.h
@@ -0,0 +1,29 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_OUT_AZURE_CONF_H
+#define FLB_OUT_AZURE_CONF_H
+
+#include "azure.h"
+
+struct flb_azure *flb_azure_conf_create(struct flb_output_instance *ins,
+ struct flb_config *config);
+int flb_azure_conf_destroy(struct flb_azure *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/out_azure_blob/CMakeLists.txt b/src/fluent-bit/plugins/out_azure_blob/CMakeLists.txt
new file mode 100644
index 000000000..3624480e6
--- /dev/null
+++ b/src/fluent-bit/plugins/out_azure_blob/CMakeLists.txt
@@ -0,0 +1,10 @@
+set(src
+ azure_blob.c
+ azure_blob_uri.c
+ azure_blob_conf.c
+ azure_blob_http.c
+ azure_blob_appendblob.c
+ azure_blob_blockblob.c
+ )
+
+FLB_PLUGIN(out_azure_blob "${src}" "")
diff --git a/src/fluent-bit/plugins/out_azure_blob/azure_blob.c b/src/fluent-bit/plugins/out_azure_blob/azure_blob.c
new file mode 100644
index 000000000..3f539826d
--- /dev/null
+++ b/src/fluent-bit/plugins/out_azure_blob/azure_blob.c
@@ -0,0 +1,594 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_sds.h>
+#include <fluent-bit/flb_kv.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_config_map.h>
+#include <fluent-bit/flb_gzip.h>
+#include <fluent-bit/flb_base64.h>
+
+#include <msgpack.h>
+
+#include "azure_blob.h"
+#include "azure_blob_uri.h"
+#include "azure_blob_conf.h"
+#include "azure_blob_appendblob.h"
+#include "azure_blob_blockblob.h"
+#include "azure_blob_http.h"
+
+#define CREATE_BLOB 1337
+
+static int azure_blob_format(struct flb_config *config,
+ struct flb_input_instance *ins,
+ void *plugin_context,
+ void *flush_ctx,
+ int event_type,
+ const char *tag, int tag_len,
+ const void *data, size_t bytes,
+ void **out_data, size_t *out_size)
+{
+ flb_sds_t out_buf;
+ struct flb_azure_blob *ctx = plugin_context;
+
+ out_buf = flb_pack_msgpack_to_json_format(data, bytes,
+ FLB_PACK_JSON_FORMAT_LINES,
+ FLB_PACK_JSON_DATE_ISO8601,
+ ctx->date_key);
+ if (!out_buf) {
+ return -1;
+ }
+
+ *out_data = out_buf;
+ *out_size = flb_sds_len(out_buf);
+ return 0;
+}
+
+static int send_blob(struct flb_config *config,
+ struct flb_input_instance *i_ins,
+ struct flb_azure_blob *ctx, char *name,
+ char *tag, int tag_len, void *data, size_t bytes)
+{
+ int ret;
+ int compressed = FLB_FALSE;
+ int content_encoding = FLB_FALSE;
+ int content_type = FLB_FALSE;
+ uint64_t ms = 0;
+ size_t b_sent;
+ void *out_buf;
+ size_t out_size;
+ flb_sds_t uri = NULL;
+ flb_sds_t blockid = NULL;
+ void *payload_buf;
+ size_t payload_size;
+ struct flb_http_client *c;
+ struct flb_connection *u_conn;
+
+ if (ctx->btype == AZURE_BLOB_APPENDBLOB) {
+ uri = azb_append_blob_uri(ctx, tag);
+ }
+ else if (ctx->btype == AZURE_BLOB_BLOCKBLOB) {
+ blockid = azb_block_blob_id(&ms);
+ if (!blockid) {
+ flb_plg_error(ctx->ins, "could not generate block id");
+ return FLB_RETRY;
+ }
+ uri = azb_block_blob_uri(ctx, tag, blockid, ms);
+ }
+
+ if (!uri) {
+ flb_free(blockid);
+ return FLB_RETRY;
+ }
+
+ /* Get upstream connection */
+ u_conn = flb_upstream_conn_get(ctx->u);
+ if (!u_conn) {
+ flb_plg_error(ctx->ins,
+ "cannot create upstream connection for append_blob");
+ flb_sds_destroy(uri);
+ flb_free(blockid);
+ return FLB_RETRY;
+ }
+
+ /* Format the data */
+ ret = azure_blob_format(config, i_ins,
+ ctx, NULL,
+ FLB_EVENT_TYPE_LOGS,
+ tag, tag_len,
+ data, bytes,
+ &out_buf, &out_size);
+ if (ret != 0) {
+ flb_upstream_conn_release(u_conn);
+ flb_sds_destroy(uri);
+ flb_free(blockid);
+ return FLB_RETRY;
+ }
+
+ /* Map buffer */
+ payload_buf = out_buf;
+ payload_size = out_size;
+
+ if (ctx->compress_gzip == FLB_TRUE || ctx->compress_blob == FLB_TRUE) {
+ ret = flb_gzip_compress((void *) out_buf, out_size,
+ &payload_buf, &payload_size);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins,
+ "cannot gzip payload, disabling compression");
+ }
+ else {
+ compressed = FLB_TRUE;
+ /* JSON buffer is not longer needed */
+ flb_sds_destroy(out_buf);
+ }
+ }
+
+ if (ctx->compress_blob == FLB_TRUE) {
+ content_encoding = AZURE_BLOB_CE_NONE;
+ content_type = AZURE_BLOB_CT_GZIP;
+ }
+ else if (compressed == FLB_TRUE) {
+ content_encoding = AZURE_BLOB_CE_GZIP;
+ content_type = AZURE_BLOB_CT_JSON;
+ }
+
+ /* Create HTTP client context */
+ c = flb_http_client(u_conn, FLB_HTTP_PUT,
+ uri,
+ payload_buf, payload_size, NULL, 0, NULL, 0);
+ if (!c) {
+ flb_plg_error(ctx->ins, "cannot create HTTP client context");
+ flb_sds_destroy(out_buf);
+ flb_upstream_conn_release(u_conn);
+ flb_free(blockid);
+ return FLB_RETRY;
+ }
+
+ /* Prepare headers and authentication */
+ azb_http_client_setup(ctx, c, (ssize_t) payload_size, FLB_FALSE,
+ content_type, content_encoding);
+
+ /* Send HTTP request */
+ ret = flb_http_do(c, &b_sent);
+ flb_sds_destroy(uri);
+
+ /* Release */
+ if (compressed == FLB_FALSE) {
+ flb_sds_destroy(out_buf);
+ }
+ else {
+ flb_free(payload_buf);
+ }
+
+ flb_upstream_conn_release(u_conn);
+
+ /* Validate HTTP status */
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "error sending append_blob");
+ flb_free(blockid);
+ return FLB_RETRY;
+ }
+
+ if (c->resp.status == 201) {
+ flb_plg_info(ctx->ins, "content appended to blob successfully");
+ flb_http_client_destroy(c);
+
+ if (ctx->btype == AZURE_BLOB_BLOCKBLOB) {
+ ret = azb_block_blob_commit(ctx, blockid, tag, ms);
+ flb_free(blockid);
+ return ret;
+ }
+ flb_free(blockid);
+ return FLB_OK;
+ }
+ else if (c->resp.status == 404) {
+ flb_plg_info(ctx->ins, "blob not found: %s", c->uri);
+ flb_http_client_destroy(c);
+ return CREATE_BLOB;
+ }
+ else if (c->resp.payload_size > 0) {
+ flb_plg_error(ctx->ins, "cannot append content to blob\n%s",
+ c->resp.payload);
+ if (strstr(c->resp.payload, "must be 0 for Create Append")) {
+ flb_http_client_destroy(c);
+ return CREATE_BLOB;
+ }
+ }
+ else {
+ flb_plg_error(ctx->ins, "cannot append content to blob");
+ }
+ flb_http_client_destroy(c);
+
+ return FLB_RETRY;
+}
+
+static int create_blob(struct flb_azure_blob *ctx, char *name)
+{
+ int ret;
+ size_t b_sent;
+ flb_sds_t uri = NULL;
+ struct flb_http_client *c;
+ struct flb_connection *u_conn;
+
+ uri = azb_uri_create_blob(ctx, name);
+ if (!uri) {
+ return FLB_RETRY;
+ }
+
+ /* Get upstream connection */
+ u_conn = flb_upstream_conn_get(ctx->u);
+ if (!u_conn) {
+ flb_plg_error(ctx->ins,
+ "cannot create upstream connection for create_append_blob");
+ flb_sds_destroy(uri);
+ return FLB_RETRY;
+ }
+
+ /* Create HTTP client context */
+ c = flb_http_client(u_conn, FLB_HTTP_PUT,
+ uri,
+ NULL, 0, NULL, 0, NULL, 0);
+ if (!c) {
+ flb_plg_error(ctx->ins, "cannot create HTTP client context");
+ flb_upstream_conn_release(u_conn);
+ flb_sds_destroy(uri);
+ return FLB_RETRY;
+ }
+
+ /* Prepare headers and authentication */
+ azb_http_client_setup(ctx, c, -1, FLB_TRUE,
+ AZURE_BLOB_CT_NONE, AZURE_BLOB_CE_NONE);
+
+ /* Send HTTP request */
+ ret = flb_http_do(c, &b_sent);
+ flb_sds_destroy(uri);
+
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "error sending append_blob");
+ flb_http_client_destroy(c);
+ flb_upstream_conn_release(u_conn);
+ return FLB_RETRY;
+ }
+
+ if (c->resp.status == 201) {
+ flb_plg_info(ctx->ins, "blob created successfully: %s", c->uri);
+ }
+ else {
+ if (c->resp.payload_size > 0) {
+ flb_plg_error(ctx->ins, "http_status=%i cannot create append blob\n%s",
+ c->resp.status, c->resp.payload);
+ }
+ else {
+ flb_plg_error(ctx->ins, "http_status=%i cannot create append blob",
+ c->resp.status);
+ }
+ flb_http_client_destroy(c);
+ flb_upstream_conn_release(u_conn);
+ return FLB_RETRY;
+ }
+
+ flb_http_client_destroy(c);
+ flb_upstream_conn_release(u_conn);
+ return FLB_OK;
+}
+
+static int create_container(struct flb_azure_blob *ctx, char *name)
+{
+ int ret;
+ size_t b_sent;
+ flb_sds_t uri;
+ struct flb_http_client *c;
+ struct flb_connection *u_conn;
+
+ /* Get upstream connection */
+ u_conn = flb_upstream_conn_get(ctx->u);
+ if (!u_conn) {
+ flb_plg_error(ctx->ins,
+ "cannot create upstream connection for container creation");
+ return FLB_FALSE;
+ }
+
+ /* URI */
+ uri = azb_uri_ensure_or_create_container(ctx);
+ if (!uri) {
+ flb_upstream_conn_release(u_conn);
+ return FLB_FALSE;
+ }
+
+ /* Create HTTP client context */
+ c = flb_http_client(u_conn, FLB_HTTP_PUT,
+ uri,
+ NULL, 0, NULL, 0, NULL, 0);
+ if (!c) {
+ flb_plg_error(ctx->ins, "cannot create HTTP client context");
+ flb_upstream_conn_release(u_conn);
+ return FLB_FALSE;
+ }
+
+ /* Prepare headers and authentication */
+ azb_http_client_setup(ctx, c, -1, FLB_FALSE,
+ AZURE_BLOB_CT_NONE, AZURE_BLOB_CE_NONE);
+
+ /* Send HTTP request */
+ ret = flb_http_do(c, &b_sent);
+
+ /* Release URI */
+ flb_sds_destroy(uri);
+
+ /* Validate http response */
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "error requesting container creation");
+ flb_http_client_destroy(c);
+ flb_upstream_conn_release(u_conn);
+ return FLB_FALSE;
+ }
+
+ if (c->resp.status == 201) {
+ flb_plg_info(ctx->ins, "container '%s' created sucessfully", name);
+ }
+ else {
+ if (c->resp.payload_size > 0) {
+ flb_plg_error(ctx->ins, "cannot create container '%s'\n%s",
+ name, c->resp.payload);
+ }
+ else {
+ flb_plg_error(ctx->ins, "cannot create container '%s'\n%s",
+ name, c->resp.payload);
+ }
+ flb_http_client_destroy(c);
+ flb_upstream_conn_release(u_conn);
+ return FLB_FALSE;
+ }
+
+ flb_http_client_destroy(c);
+ flb_upstream_conn_release(u_conn);
+ return FLB_TRUE;
+}
+
+/*
+ * Check that the container exists, if it doesn't and the configuration property
+ * auto_create_container is enabled, it will send a request to create it. If it
+ * could not be created or auto_create_container is disabled, it returns FLB_FALSE.
+ */
+static int ensure_container(struct flb_azure_blob *ctx)
+{
+ int ret;
+ int status;
+ size_t b_sent;
+ flb_sds_t uri;
+ struct flb_http_client *c;
+ struct flb_connection *u_conn;
+
+ uri = azb_uri_ensure_or_create_container(ctx);
+ if (!uri) {
+ return FLB_FALSE;
+ }
+
+ /* Get upstream connection */
+ u_conn = flb_upstream_conn_get(ctx->u);
+ if (!u_conn) {
+ flb_plg_error(ctx->ins,
+ "cannot create upstream connection for container check");
+ flb_sds_destroy(uri);
+ return FLB_FALSE;
+ }
+
+ /* Create HTTP client context */
+ c = flb_http_client(u_conn, FLB_HTTP_GET,
+ uri,
+ NULL, 0, NULL, 0, NULL, 0);
+ if (!c) {
+ flb_plg_error(ctx->ins, "cannot create HTTP client context");
+ flb_upstream_conn_release(u_conn);
+ return FLB_FALSE;
+ }
+ flb_http_strip_port_from_host(c);
+
+ /* Prepare headers and authentication */
+ azb_http_client_setup(ctx, c, -1, FLB_FALSE,
+ AZURE_BLOB_CT_NONE, AZURE_BLOB_CE_NONE);
+
+ /* Send HTTP request */
+ ret = flb_http_do(c, &b_sent);
+ flb_sds_destroy(uri);
+
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "error requesting container properties");
+ flb_upstream_conn_release(u_conn);
+ return FLB_FALSE;
+ }
+
+ status = c->resp.status;
+ flb_http_client_destroy(c);
+
+ /* Release connection */
+ flb_upstream_conn_release(u_conn);
+
+ /* Request was successful, validate HTTP status code */
+ if (status == 404) {
+ /* The container was not found, try to create it */
+ flb_plg_info(ctx->ins, "container '%s' not found, trying to create it",
+ ctx->container_name);
+ ret = create_container(ctx, ctx->container_name);
+ return ret;
+ }
+ else if (status == 200) {
+ return FLB_TRUE;
+ }
+
+ return FLB_FALSE;
+}
+
+static int cb_azure_blob_init(struct flb_output_instance *ins,
+ struct flb_config *config, void *data)
+{
+ struct flb_azure_blob *ctx = NULL;
+ (void) ins;
+ (void) config;
+ (void) data;
+
+ ctx = flb_azure_blob_conf_create(ins, config);
+ if (!ctx) {
+ return -1;
+ }
+
+ flb_output_set_http_debug_callbacks(ins);
+ return 0;
+}
+
+static void cb_azure_blob_flush(struct flb_event_chunk *event_chunk,
+ struct flb_output_flush *out_flush,
+ struct flb_input_instance *i_ins,
+ void *out_context,
+ struct flb_config *config)
+{
+ int ret;
+ struct flb_azure_blob *ctx = out_context;
+ (void) i_ins;
+ (void) config;
+
+ /* Validate the container exists, otherwise just create it */
+ ret = ensure_container(ctx);
+ if (ret == FLB_FALSE) {
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ ret = send_blob(config, i_ins, ctx,
+ (char *) event_chunk->tag, /* use tag as 'name' */
+ (char *) event_chunk->tag, flb_sds_len(event_chunk->tag),
+ (char *) event_chunk->data, event_chunk->size);
+
+ if (ret == CREATE_BLOB) {
+ ret = create_blob(ctx, event_chunk->tag);
+ if (ret == FLB_OK) {
+ ret = send_blob(config, i_ins, ctx,
+ (char *) event_chunk->tag, /* use tag as 'name' */
+ (char *) event_chunk->tag,
+ flb_sds_len(event_chunk->tag),
+ (char *) event_chunk->data, event_chunk->size);
+ }
+ }
+
+ /* FLB_RETRY, FLB_OK, FLB_ERROR */
+ FLB_OUTPUT_RETURN(ret);
+}
+
+static int cb_azure_blob_exit(void *data, struct flb_config *config)
+{
+ struct flb_azure_blob *ctx = data;
+
+ if (!ctx) {
+ return 0;
+ }
+
+ flb_azure_blob_conf_destroy(ctx);
+ return 0;
+}
+
+/* Configuration properties map */
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_STR, "account_name", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_azure_blob, account_name),
+ "Azure account name (mandatory)"
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "container_name", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_azure_blob, container_name),
+ "Container name (mandatory)"
+ },
+
+ {
+ FLB_CONFIG_MAP_BOOL, "auto_create_container", "true",
+ 0, FLB_TRUE, offsetof(struct flb_azure_blob, auto_create_container),
+ "Auto create container if it don't exists"
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "blob_type", "appendblob",
+ 0, FLB_TRUE, offsetof(struct flb_azure_blob, blob_type),
+ "Set the block type: appendblob or blockblob"
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "compress", NULL,
+ 0, FLB_FALSE, 0,
+ "Set payload compression in network transfer. Option available is 'gzip'"
+ },
+
+ {
+ FLB_CONFIG_MAP_BOOL, "compress_blob", "false",
+ 0, FLB_TRUE, offsetof(struct flb_azure_blob, compress_blob),
+ "Enable block blob GZIP compression in the final blob file. This option is "
+ "not compatible with 'appendblob' block type"
+ },
+
+ {
+ FLB_CONFIG_MAP_BOOL, "emulator_mode", "false",
+ 0, FLB_TRUE, offsetof(struct flb_azure_blob, emulator_mode),
+ "Use emulator mode, enable it if you want to use Azurite"
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "shared_key", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_azure_blob, shared_key),
+ "Azure shared key"
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "endpoint", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_azure_blob, endpoint),
+ "Custom full URL endpoint to use an emulator"
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "path", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_azure_blob, path),
+ "Set a path for your blob"
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "date_key", "@timestamp",
+ 0, FLB_TRUE, offsetof(struct flb_azure_blob, date_key),
+ "Name of the key that will have the record timestamp"
+ },
+
+ /* EOF */
+ {0}
+};
+
+/* Plugin registration */
+struct flb_output_plugin out_azure_blob_plugin = {
+ .name = "azure_blob",
+ .description = "Azure Blob Storage",
+ .cb_init = cb_azure_blob_init,
+ .cb_flush = cb_azure_blob_flush,
+ .cb_exit = cb_azure_blob_exit,
+
+ /* Test */
+ .test_formatter.callback = azure_blob_format,
+
+ .config_map = config_map,
+
+ /* Plugin flags */
+ .flags = FLB_OUTPUT_NET | FLB_IO_OPT_TLS,
+};
diff --git a/src/fluent-bit/plugins/out_azure_blob/azure_blob.h b/src/fluent-bit/plugins/out_azure_blob/azure_blob.h
new file mode 100644
index 000000000..5cf8a2927
--- /dev/null
+++ b/src/fluent-bit/plugins/out_azure_blob/azure_blob.h
@@ -0,0 +1,74 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_OUT_AZURE_BLOB_H
+#define FLB_OUT_AZURE_BLOB_H
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_upstream.h>
+#include <fluent-bit/flb_sds.h>
+
+/* Content-Type */
+#define AZURE_BLOB_CT "Content-Type"
+#define AZURE_BLOB_CT_NONE 0
+#define AZURE_BLOB_CT_JSON 1 /* application/json */
+#define AZURE_BLOB_CT_GZIP 2 /* application/gzip */
+
+/* Content-Encoding */
+#define AZURE_BLOB_CE "Content-Encoding"
+#define AZURE_BLOB_CE_NONE 0
+#define AZURE_BLOB_CE_GZIP 1 /* gzip */
+
+/* service endpoint */
+#define AZURE_ENDPOINT_PREFIX ".blob.core.windows.net"
+
+#define AZURE_BLOB_APPENDBLOB 0
+#define AZURE_BLOB_BLOCKBLOB 1
+
+struct flb_azure_blob {
+ int auto_create_container;
+ int emulator_mode;
+ int compress_gzip;
+ int compress_blob;
+ flb_sds_t account_name;
+ flb_sds_t container_name;
+ flb_sds_t blob_type;
+ flb_sds_t shared_key;
+ flb_sds_t endpoint;
+ flb_sds_t path;
+ flb_sds_t date_key;
+
+ /*
+ * Internal use
+ */
+ int btype; /* blob type */
+ flb_sds_t real_endpoint;
+ flb_sds_t base_uri;
+ flb_sds_t shared_key_prefix;
+
+ /* Shared key */
+ unsigned char *decoded_sk; /* decoded shared key */
+ size_t decoded_sk_size; /* size of decoded shared key */
+
+ /* Upstream connection */
+ struct flb_upstream *u;
+ struct flb_output_instance *ins;
+};
+
+#endif
diff --git a/src/fluent-bit/plugins/out_azure_blob/azure_blob_appendblob.c b/src/fluent-bit/plugins/out_azure_blob/azure_blob_appendblob.c
new file mode 100644
index 000000000..2d9a82171
--- /dev/null
+++ b/src/fluent-bit/plugins/out_azure_blob/azure_blob_appendblob.c
@@ -0,0 +1,44 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_sds.h>
+
+#include "azure_blob.h"
+#include "azure_blob_conf.h"
+#include "azure_blob_uri.h"
+
+flb_sds_t azb_append_blob_uri(struct flb_azure_blob *ctx, char *tag)
+{
+ flb_sds_t uri;
+
+ uri = azb_uri_container(ctx);
+ if (!uri) {
+ return NULL;
+ }
+
+ if (ctx->path) {
+ flb_sds_printf(&uri, "/%s/%s?comp=appendblock", ctx->path, tag);
+ }
+ else {
+ flb_sds_printf(&uri, "/%s?comp=appendblock", tag);
+ }
+
+ return uri;
+}
diff --git a/src/fluent-bit/plugins/out_azure_blob/azure_blob_appendblob.h b/src/fluent-bit/plugins/out_azure_blob/azure_blob_appendblob.h
new file mode 100644
index 000000000..9ab103b0f
--- /dev/null
+++ b/src/fluent-bit/plugins/out_azure_blob/azure_blob_appendblob.h
@@ -0,0 +1,28 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AZURE_BLOB_APPENDBLOB_H
+#define AZURE_BLOB_APPENDBLOB_H
+
+#include <fluent-bit/flb_output_plugin.h>
+#include "azure_blob.h"
+
+flb_sds_t azb_append_blob_uri(struct flb_azure_blob *ctx, char *tag);
+
+#endif
diff --git a/src/fluent-bit/plugins/out_azure_blob/azure_blob_blockblob.c b/src/fluent-bit/plugins/out_azure_blob/azure_blob_blockblob.c
new file mode 100644
index 000000000..a9b0e4a28
--- /dev/null
+++ b/src/fluent-bit/plugins/out_azure_blob/azure_blob_blockblob.c
@@ -0,0 +1,238 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_base64.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_sds.h>
+
+#include <math.h>
+
+#include "azure_blob.h"
+#include "azure_blob_conf.h"
+#include "azure_blob_uri.h"
+#include "azure_blob_http.h"
+
+flb_sds_t azb_block_blob_uri(struct flb_azure_blob *ctx, char *tag,
+ char *blockid, uint64_t ms)
+{
+ int len;
+ flb_sds_t uri;
+ char *ext;
+ char *encoded_blockid;
+
+ len = strlen(blockid);
+ encoded_blockid = azb_uri_encode(blockid, len);
+ if (!encoded_blockid) {
+ return NULL;
+ }
+
+ uri = azb_uri_container(ctx);
+ if (!uri) {
+ flb_sds_destroy(encoded_blockid);
+ return NULL;
+ }
+
+ if (ctx->compress_blob == FLB_TRUE) {
+ ext = ".gz";
+ }
+ else {
+ ext = "";
+ }
+
+ if (ctx->path) {
+ flb_sds_printf(&uri, "/%s/%s.%" PRIu64 "%s?blockid=%s&comp=block",
+ ctx->path, tag, ms, ext, encoded_blockid);
+ }
+ else {
+ flb_sds_printf(&uri, "/%s.%" PRIu64 "%s?blockid=%s&comp=block",
+ tag, ms, ext, encoded_blockid);
+ }
+
+ flb_sds_destroy(encoded_blockid);
+ return uri;
+}
+
+flb_sds_t azb_block_blob_uri_commit(struct flb_azure_blob *ctx,
+ char *tag, uint64_t ms)
+{
+ char *ext;
+ flb_sds_t uri;
+
+ uri = azb_uri_container(ctx);
+ if (!uri) {
+ return NULL;
+ }
+
+ if (ctx->compress_blob == FLB_TRUE) {
+ ext = ".gz";
+ }
+ else {
+ ext = "";
+ }
+
+ if (ctx->path) {
+ flb_sds_printf(&uri, "/%s/%s.%" PRIu64 "%s?comp=blocklist", ctx->path, tag,
+ ms, ext);
+ }
+ else {
+ flb_sds_printf(&uri, "/%s.%" PRIu64 "%s?comp=blocklist", tag, ms, ext);
+ }
+
+ return uri;
+}
+
+/* Generate a block id */
+char *azb_block_blob_id(uint64_t *ms)
+{
+ int len;
+ int ret;
+ double now;
+ char tmp[32];
+ size_t size;
+ size_t o_len;
+ char *b64;
+ struct flb_time tm;
+
+ /* Get current time */
+ flb_time_get(&tm);
+
+ /*
+ * Set outgoing time in milliseconds: this is used as a suffix for the
+ * block name
+ */
+ *ms = ((tm.tm.tv_sec * 1000) + (tm.tm.tv_nsec / 1000000));
+
+ /* Convert time to double to format the block id */
+ now = flb_time_to_double(&tm);
+ len = snprintf(tmp, sizeof(tmp), "flb-%.4f.id", now);
+
+ /* Allocate space for the outgoing base64 buffer */
+ size = (4 * ceil(((double) len / 3) + 1));
+ b64 = flb_malloc(size);
+ if (!b64) {
+ return NULL;
+ }
+
+ /* base64 encode block id */
+ ret = flb_base64_encode((unsigned char *) b64, size, &o_len,
+ (unsigned char *) tmp, len);
+ if (ret != 0) {
+ flb_free(b64);
+ return NULL;
+ }
+ return b64;
+}
+
+int azb_block_blob_commit(struct flb_azure_blob *ctx, char *blockid, char *tag,
+ uint64_t ms)
+{
+ int ret;
+ size_t b_sent;
+ flb_sds_t uri = NULL;
+ flb_sds_t payload;
+ struct flb_http_client *c;
+ struct flb_connection *u_conn;
+
+ /* Get upstream connection */
+ u_conn = flb_upstream_conn_get(ctx->u);
+ if (!u_conn) {
+ flb_plg_error(ctx->ins,
+ "cannot create upstream connection for blockblob commit");
+ return FLB_RETRY;
+ }
+
+ /* Compose commit URI */
+ uri = azb_block_blob_uri_commit(ctx, tag, ms);
+ if (!uri) {
+ flb_upstream_conn_release(u_conn);
+ return FLB_ERROR;
+ }
+
+ payload = flb_sds_create_size(256);
+ if (!payload) {
+ flb_sds_destroy(uri);
+ flb_upstream_conn_release(u_conn);
+ return FLB_ERROR;
+ }
+
+ flb_sds_printf(&payload,
+ "<?xml version=\"1.0\" encoding=\"utf-8\"?>"
+ "<BlockList>"
+ " <Latest>%s</Latest>"
+ "</BlockList>",
+ blockid);
+
+ /* Create HTTP client context */
+ c = flb_http_client(u_conn, FLB_HTTP_PUT,
+ uri,
+ payload, flb_sds_len(payload), NULL, 0, NULL, 0);
+ if (!c) {
+ flb_plg_error(ctx->ins, "cannot create HTTP client context");
+ flb_sds_destroy(payload);
+ flb_sds_destroy(uri);
+ flb_upstream_conn_release(u_conn);
+ return FLB_RETRY;
+ }
+
+ /* Prepare headers and authentication */
+ azb_http_client_setup(ctx, c, flb_sds_len(payload),
+ FLB_FALSE,
+ AZURE_BLOB_CT_NONE, AZURE_BLOB_CE_NONE);
+
+ /* Send HTTP request */
+ ret = flb_http_do(c, &b_sent);
+ flb_sds_destroy(uri);
+ flb_sds_destroy(payload);
+
+ /* Validate HTTP status */
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "error sending append_blob");
+ return FLB_RETRY;
+ }
+
+ if (c->resp.status == 201) {
+ flb_plg_info(ctx->ins, "blob id %s committed successfully", blockid);
+ flb_http_client_destroy(c);
+ flb_upstream_conn_release(u_conn);
+ return FLB_OK;
+ }
+ else if (c->resp.status == 404) {
+ flb_plg_info(ctx->ins, "blob not found: %s", c->uri);
+ flb_http_client_destroy(c);
+ flb_upstream_conn_release(u_conn);
+ return FLB_RETRY;
+ }
+ else if (c->resp.payload_size > 0) {
+ flb_plg_error(ctx->ins, "cannot commit blob id %s\n%s",
+ blockid, c->resp.payload);
+ if (strstr(c->resp.payload, "must be 0 for Create Append")) {
+ flb_http_client_destroy(c);
+ flb_upstream_conn_release(u_conn);
+ return FLB_RETRY;
+ }
+ }
+ else {
+ flb_plg_error(ctx->ins, "cannot append content to blob");
+ }
+ flb_http_client_destroy(c);
+ flb_upstream_conn_release(u_conn);
+
+ return FLB_OK;
+}
diff --git a/src/fluent-bit/plugins/out_azure_blob/azure_blob_blockblob.h b/src/fluent-bit/plugins/out_azure_blob/azure_blob_blockblob.h
new file mode 100644
index 000000000..ee210d138
--- /dev/null
+++ b/src/fluent-bit/plugins/out_azure_blob/azure_blob_blockblob.h
@@ -0,0 +1,32 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AZURE_BLOB_BLOCKBLOB_H
+#define AZURE_BLOB_BLOCKBLOB_H
+
+#include <fluent-bit/flb_output_plugin.h>
+#include "azure_blob.h"
+
+flb_sds_t azb_block_blob_uri(struct flb_azure_blob *ctx, char *tag, char *blockid,
+ uint64_t ms);
+char *azb_block_blob_id(uint64_t *ms);
+int azb_block_blob_commit(struct flb_azure_blob *ctx, char *blockid, char *tag,
+ uint64_t ms);
+
+#endif
diff --git a/src/fluent-bit/plugins/out_azure_blob/azure_blob_conf.c b/src/fluent-bit/plugins/out_azure_blob/azure_blob_conf.c
new file mode 100644
index 000000000..4437a6d2d
--- /dev/null
+++ b/src/fluent-bit/plugins/out_azure_blob/azure_blob_conf.c
@@ -0,0 +1,245 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_base64.h>
+
+#include "azure_blob.h"
+#include "azure_blob_conf.h"
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+static int set_shared_key(struct flb_azure_blob *ctx)
+{
+ int s;
+ int ret;
+ size_t o_len = 0;
+
+ s = flb_sds_len(ctx->shared_key);
+
+ /* buffer for final hex key */
+ ctx->decoded_sk = flb_malloc(s * 2);
+ if (!ctx->decoded_sk) {
+ return -1;
+ }
+
+ /* decode base64 */
+ ret = flb_base64_decode(ctx->decoded_sk, s * 2,
+ &o_len,
+ (unsigned char *)ctx->shared_key,
+ flb_sds_len(ctx->shared_key));
+ if (ret != 0) {
+ flb_plg_error(ctx->ins, "cannot decode shared_key");
+ return -1;
+ }
+
+ ctx->decoded_sk_size = o_len;
+ return 0;
+}
+
+struct flb_azure_blob *flb_azure_blob_conf_create(struct flb_output_instance *ins,
+ struct flb_config *config)
+{
+ int ret;
+ int port;
+ int io_flags = 0;
+ flb_sds_t tmp;
+ struct flb_azure_blob *ctx;
+
+ ctx = flb_calloc(1, sizeof(struct flb_azure_blob));
+ if (!ctx) {
+ flb_errno();
+ return NULL;
+ }
+ ctx->ins = ins;
+
+ /* Set context */
+ flb_output_set_context(ins, ctx);
+
+ /* Load config map */
+ ret = flb_output_config_map_set(ins, (void *) ctx);
+ if (ret == -1) {
+ return NULL;
+ }
+
+ if (!ctx->container_name) {
+ flb_plg_error(ctx->ins, "'container_name' has not been set");
+ return NULL;
+ }
+
+ /* If the shared key is set decode it */
+ if (ctx->shared_key) {
+ ret = set_shared_key(ctx);
+ if (ret == -1) {
+ return NULL;
+ }
+ }
+
+ /* Set Blob type */
+ tmp = (char *) flb_output_get_property("blob_type", ins);
+ if (!tmp) {
+ ctx->btype = AZURE_BLOB_APPENDBLOB;
+ }
+ else {
+ if (strcasecmp(tmp, "appendblob") == 0) {
+ ctx->btype = AZURE_BLOB_APPENDBLOB;
+ }
+ else if (strcasecmp(tmp, "blockblob") == 0) {
+ ctx->btype = AZURE_BLOB_BLOCKBLOB;
+ }
+ else {
+ flb_plg_error(ctx->ins, "invalid blob_type value '%s'", tmp);
+ return NULL;
+ }
+ }
+
+ /* Compress (gzip) */
+ tmp = (char *) flb_output_get_property("compress", ins);
+ ctx->compress_gzip = FLB_FALSE;
+ if (tmp) {
+ if (strcasecmp(tmp, "gzip") == 0) {
+ ctx->compress_gzip = FLB_TRUE;
+ }
+ }
+
+ /* Compress Blob: only availabel for blockblob type */
+ if (ctx->compress_blob == FLB_TRUE && ctx->btype != AZURE_BLOB_BLOCKBLOB) {
+ flb_plg_error(ctx->ins,
+ "the option 'compress_blob' is not compatible with 'appendblob' "
+ "blob_type");
+ return NULL;
+ }
+
+ /*
+ * Setting up the real endpoint:
+ *
+ * If the user provided a custom endpoint, just parse it. Here we need to
+ * discover if a TLS connection is required, just use the protocol prefix.
+ */
+ if (ctx->endpoint) {
+ if (strncmp(ctx->endpoint, "https", 5) == 0) {
+ io_flags |= FLB_IO_TLS;
+ }
+ else {
+ io_flags |= FLB_IO_TCP;
+ }
+
+ ctx->u = flb_upstream_create_url(config, ctx->endpoint,
+ io_flags, ins->tls);
+ if (!ctx->u) {
+ flb_plg_error(ctx->ins, "invalid endpoint '%s'", ctx->endpoint);
+ return NULL;
+ }
+ ctx->real_endpoint = flb_sds_create(ctx->endpoint);
+ }
+ else {
+ ctx->real_endpoint = flb_sds_create_size(256);
+ if (!ctx->real_endpoint) {
+ flb_plg_error(ctx->ins, "cannot create endpoint");
+ return NULL;
+ }
+ flb_sds_printf(&ctx->real_endpoint, "%s%s",
+ ctx->account_name,
+ AZURE_ENDPOINT_PREFIX);
+
+ /* use TLS ? */
+ if (ins->use_tls == FLB_TRUE) {
+ port = 443;
+ io_flags = FLB_IO_TLS;
+ }
+ else {
+ port = 80;
+ io_flags = FLB_IO_TCP;
+ }
+
+ ctx->u = flb_upstream_create(config, ctx->real_endpoint, port, io_flags,
+ ins->tls);
+ if (!ctx->u) {
+ flb_plg_error(ctx->ins, "cannot create upstream for endpoint '%s'",
+ ctx->real_endpoint);
+ return NULL;
+ }
+ }
+ flb_output_upstream_set(ctx->u, ins);
+
+ /* Compose base uri */
+ ctx->base_uri = flb_sds_create_size(256);
+ if (!ctx->base_uri) {
+ flb_plg_error(ctx->ins, "cannot create base_uri for endpoint '%s'",
+ ctx->real_endpoint);
+ return NULL;
+ }
+
+ if (ctx->emulator_mode == FLB_TRUE) {
+ flb_sds_printf(&ctx->base_uri, "/%s/", ctx->account_name);
+ }
+ else {
+ flb_sds_printf(&ctx->base_uri, "/");
+ }
+
+ /* Prepare shared key buffer */
+ ctx->shared_key_prefix = flb_sds_create_size(256);
+ if (!ctx->shared_key_prefix) {
+ flb_plg_error(ctx->ins, "cannot create shared key prefix");
+ return NULL;
+ }
+ flb_sds_printf(&ctx->shared_key_prefix, "SharedKey %s:", ctx->account_name);
+
+ /* Sanitize path: remove any ending slash */
+ if (ctx->path) {
+ if (ctx->path[flb_sds_len(ctx->path) - 1] == '/') {
+ ctx->path[flb_sds_len(ctx->path) - 1] = '\0';
+ }
+ }
+
+ flb_plg_info(ctx->ins,
+ "account_name=%s, container_name=%s, blob_type=%s, emulator_mode=%s, endpoint=%s",
+ ctx->account_name, ctx->container_name,
+ ctx->btype == AZURE_BLOB_APPENDBLOB ? "appendblob": "blockblob",
+ ctx->emulator_mode ? "yes": "no",
+ ctx->real_endpoint ? ctx->real_endpoint: "no");
+ return ctx;
+}
+
+void flb_azure_blob_conf_destroy(struct flb_azure_blob *ctx)
+{
+ if (ctx->decoded_sk) {
+ flb_free(ctx->decoded_sk);
+ }
+
+ if (ctx->base_uri) {
+ flb_sds_destroy(ctx->base_uri);
+ }
+
+ if (ctx->real_endpoint) {
+ flb_sds_destroy(ctx->real_endpoint);
+ }
+
+ if (ctx->shared_key_prefix) {
+ flb_sds_destroy(ctx->shared_key_prefix);
+ }
+
+ if (ctx->u) {
+ flb_upstream_destroy(ctx->u);
+ }
+
+ flb_free(ctx);
+}
diff --git a/src/fluent-bit/plugins/out_azure_blob/azure_blob_conf.h b/src/fluent-bit/plugins/out_azure_blob/azure_blob_conf.h
new file mode 100644
index 000000000..32a85c678
--- /dev/null
+++ b/src/fluent-bit/plugins/out_azure_blob/azure_blob_conf.h
@@ -0,0 +1,29 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_OUT_AZURE_BLOB_CONF_H
+#define FLB_OUT_AZURE_BLOB_CONF_H
+
+#include <fluent-bit/flb_output_plugin.h>
+
+struct flb_azure_blob *flb_azure_blob_conf_create(struct flb_output_instance *ins,
+ struct flb_config *config);
+void flb_azure_blob_conf_destroy(struct flb_azure_blob *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/out_azure_blob/azure_blob_http.c b/src/fluent-bit/plugins/out_azure_blob/azure_blob_http.c
new file mode 100644
index 000000000..5ac81a9a1
--- /dev/null
+++ b/src/fluent-bit/plugins/out_azure_blob/azure_blob_http.c
@@ -0,0 +1,361 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_http_client.h>
+#include <fluent-bit/flb_base64.h>
+#include <fluent-bit/flb_crypto.h>
+#include <fluent-bit/flb_hmac.h>
+#include <fluent-bit/flb_sds.h>
+#include <fluent-bit/flb_kv.h>
+
+#include "azure_blob.h"
+#include "azure_blob_uri.h"
+
+static int hmac_sha256_sign(unsigned char out[32],
+ unsigned char *key, size_t key_len,
+ unsigned char *msg, size_t msg_len)
+{
+ return flb_hmac_simple(FLB_HASH_SHA256,
+ key, key_len,
+ msg, msg_len,
+ out, 32);
+}
+
+static flb_sds_t canonical_headers(struct flb_http_client *c)
+{
+ flb_sds_t ch;
+ flb_sds_t tmp;
+ struct flb_kv *kv;
+ struct mk_list *head;
+
+ ch = flb_sds_create_size(mk_list_size(&c->headers) * 64);
+ if (!ch) {
+ return NULL;
+ }
+
+ mk_list_foreach(head, &c->headers) {
+ kv = mk_list_entry(head, struct flb_kv, _head);
+ if (strncmp(kv->key, "x-ms-", 5) != 0) {
+ continue;
+ }
+
+ /* key */
+ tmp = flb_sds_cat(ch, kv->key, flb_sds_len(kv->key));
+ if (!tmp) {
+ flb_sds_destroy(ch);
+ return NULL;
+ }
+ ch = tmp;
+
+ /* sep */
+ tmp = flb_sds_cat(ch, ":", 1);
+ if (!tmp) {
+ flb_sds_destroy(ch);
+ return NULL;
+ }
+ ch = tmp;
+
+ /* value */
+ tmp = flb_sds_cat(ch, kv->val, flb_sds_len(kv->val));
+ if (!tmp) {
+ flb_sds_destroy(ch);
+ return NULL;
+ }
+ ch = tmp;
+
+ tmp = flb_sds_cat(ch, "\n", 1);
+ if (!tmp) {
+ flb_sds_destroy(ch);
+ return NULL;
+ }
+ ch = tmp;
+ }
+
+ return ch;
+}
+
+static flb_sds_t canonical_resource(struct flb_azure_blob *ctx,
+ struct flb_http_client *c)
+{
+ int pos;
+ int len;
+ int kv_start;
+ char *p;
+ size_t size;
+ flb_sds_t cr;
+ flb_sds_t dec_uri;
+ flb_sds_t tmp;
+
+ len = strlen(c->uri);
+ size = flb_sds_len(ctx->account_name) + len + 64;
+
+ cr = flb_sds_create_size(size);
+ if (!cr) {
+ return NULL;
+ }
+
+ dec_uri = azb_uri_decode(c->uri, len);
+ tmp = flb_sds_printf(&cr, "/%s%s", ctx->account_name, dec_uri);
+ if (!tmp) {
+ flb_sds_destroy(dec_uri);
+ flb_sds_destroy(cr);
+ return NULL;
+ }
+ flb_sds_destroy(dec_uri);
+
+ pos = 1 + flb_sds_len(ctx->account_name);
+
+ p = strchr(cr + pos, '?');
+ if (p) {
+ kv_start = FLB_TRUE;
+ while (*p) {
+ if (*p == '?') {
+ *p = '\n';
+ }
+ else if (*p == '=' && kv_start == FLB_TRUE) {
+ *p = ':';
+ kv_start = FLB_FALSE;
+ }
+ else if (*p == '&') {
+ *p = '\n';
+ kv_start = FLB_TRUE;
+ }
+ p++;
+ }
+ }
+
+ return cr;
+}
+
+flb_sds_t azb_http_canonical_request(struct flb_azure_blob *ctx,
+ struct flb_http_client *c,
+ ssize_t content_length,
+ int content_type,
+ int content_encoding)
+{
+ int ret;
+ size_t size;
+ size_t o_len = 0;
+ flb_sds_t can_req;
+ flb_sds_t can_res;
+ flb_sds_t can_headers;
+ flb_sds_t tmp = NULL;
+ char *b64 = NULL;
+ char *encoding;
+ char *ctype = "";
+ unsigned char signature[32];
+
+ size = strlen(c->uri) + (mk_list_size(&c->headers) * 64) + 256;
+ can_req = flb_sds_create_size(size);
+ if (!can_req) {
+ flb_plg_error(ctx->ins, "cannot allocate buffer for canonical request");
+ return NULL;
+ }
+
+ switch (c->method) {
+ case FLB_HTTP_GET:
+ tmp = flb_sds_cat(can_req, "GET\n", 4);
+ break;
+ case FLB_HTTP_POST:
+ tmp = flb_sds_cat(can_req, "POST\n", 5);
+ break;
+ case FLB_HTTP_PUT:
+ tmp = flb_sds_cat(can_req, "PUT\n", 4);
+ break;
+ };
+
+ if (!tmp) {
+ flb_plg_error(ctx->ins, "invalid processing HTTP method");
+ flb_sds_destroy(can_req);
+ return NULL;
+ }
+
+ if (content_encoding == AZURE_BLOB_CE_GZIP) {
+ encoding = "gzip";
+ }
+ else {
+ encoding = "";
+ }
+
+ flb_sds_printf(&can_req,
+ "%s\n" /* Content-Encoding */
+ "\n", /* Content-Language */
+ encoding
+ );
+
+ if (content_length >= 0) {
+ flb_sds_printf(&can_req,
+ "%zi\n" /* Content-Length */,
+ content_length);
+ }
+ else {
+ flb_sds_printf(&can_req,
+ "\n" /* Content-Length */
+ );
+ }
+
+ if (content_type == AZURE_BLOB_CT_NONE) {
+ ctype = "";
+ }
+ else if (content_type == AZURE_BLOB_CT_JSON) {
+ ctype = "application/json";
+ }
+ else if (content_type == AZURE_BLOB_CT_GZIP) {
+ ctype = "application/gzip";
+ }
+
+ flb_sds_printf(&can_req,
+ "\n" /* Content-MD5 */
+ "%s\n" /* Content-Type */
+ "\n" /* Date */
+ "\n" /* If-Modified-Since */
+ "\n" /* If-Match */
+ "\n" /* If-None-Match */
+ "\n" /* If-Unmodified-Since */
+ "\n" /* Range */,
+ ctype);
+
+ /* Append canonicalized headers */
+ can_headers = canonical_headers(c);
+ if (!can_headers) {
+ flb_sds_destroy(can_req);
+ return NULL;
+ }
+ tmp = flb_sds_cat(can_req, can_headers, flb_sds_len(can_headers));
+ if (!tmp) {
+ flb_sds_destroy(can_req);
+ flb_sds_destroy(can_headers);
+ return NULL;
+ }
+ can_req = tmp;
+ flb_sds_destroy(can_headers);
+
+ /* Append canonical resource */
+ can_res = canonical_resource(ctx, c);
+ if (!can_res) {
+ flb_sds_destroy(can_req);
+ return NULL;
+ }
+ tmp = flb_sds_cat(can_req, can_res, flb_sds_len(can_res));
+ if (!tmp) {
+ flb_sds_destroy(can_res);
+ flb_sds_destroy(can_req);
+ return NULL;
+ }
+ can_req = tmp;
+ flb_sds_destroy(can_res);
+
+ flb_plg_trace(ctx->ins, "string to sign\n%s", can_req);
+
+ /* Signature */
+ hmac_sha256_sign(signature, ctx->decoded_sk, ctx->decoded_sk_size,
+ (unsigned char *) can_req, flb_sds_len(can_req));
+ flb_sds_destroy(can_req);
+
+ /* base64 decoded size */
+ size = ((4 * ((sizeof(signature) + 1)) / 3) + 1);
+ b64 = flb_sds_create_size(size);
+ if (!b64) {
+ return NULL;
+ }
+
+ ret = flb_base64_encode((unsigned char *) b64, size, &o_len,
+ signature, sizeof(signature));
+ if (ret != 0) {
+ flb_sds_destroy(b64);
+ return NULL;
+ }
+ flb_sds_len_set(b64, o_len);
+
+ return b64;
+}
+
+int azb_http_client_setup(struct flb_azure_blob *ctx, struct flb_http_client *c,
+ ssize_t content_length, int blob_type,
+ int content_type, int content_encoding)
+{
+ int len;
+ time_t now;
+ struct tm tm;
+ char tmp[64];
+ flb_sds_t can_req;
+ flb_sds_t auth;
+
+ /* Header: User Agent */
+ flb_http_add_header(c, "User-Agent", 10, "Fluent-Bit", 10);
+
+ /* Header: Content-Type */
+ if (content_type == AZURE_BLOB_CT_JSON) {
+ flb_http_add_header(c,
+ AZURE_BLOB_CT, sizeof(AZURE_BLOB_CT) - 1,
+ "application/json", 16);
+ }
+ else if (content_type == AZURE_BLOB_CT_GZIP) {
+ flb_http_add_header(c,
+ AZURE_BLOB_CT, sizeof(AZURE_BLOB_CT) - 1,
+ "application/gzip", 16);
+ }
+
+ if (content_encoding == AZURE_BLOB_CE_GZIP) {
+ flb_http_add_header(c,
+ AZURE_BLOB_CE, sizeof(AZURE_BLOB_CE) - 1,
+ "gzip", 4);
+ }
+
+ /* Azure header: x-ms-blob-type */
+ if (blob_type == FLB_TRUE) {
+ if (ctx->btype == AZURE_BLOB_APPENDBLOB) {
+ flb_http_add_header(c, "x-ms-blob-type", 14, "AppendBlob", 10);
+ }
+ else if (ctx->btype == AZURE_BLOB_BLOCKBLOB) {
+ flb_http_add_header(c, "x-ms-blob-type", 14, "BlockBlob", 9);
+ }
+ }
+
+ /* Azure header: x-ms-date */
+ now = time(NULL);
+ gmtime_r(&now, &tm);
+ len = strftime(tmp, sizeof(tmp) - 1, "%a, %d %b %Y %H:%M:%S GMT", &tm);
+
+ flb_http_add_header(c, "x-ms-date", 9, tmp, len);
+
+ /* Azure header: x-ms-version */
+ flb_http_add_header(c, "x-ms-version", 12, "2019-12-12", 10);
+
+ can_req = azb_http_canonical_request(ctx, c, content_length, content_type,
+ content_encoding);
+
+ auth = flb_sds_create_size(64 + flb_sds_len(can_req));
+
+ flb_sds_cat(auth, ctx->shared_key_prefix, flb_sds_len(ctx->shared_key_prefix));
+ flb_sds_cat(auth, can_req, flb_sds_len(can_req));
+
+ /* Azure header: authorization */
+ flb_http_add_header(c, "Authorization", 13, auth, flb_sds_len(auth));
+
+ /* Release buffers */
+ flb_sds_destroy(can_req);
+ flb_sds_destroy(auth);
+
+ /* Set callback context to the HTTP client context */
+ flb_http_set_callback_context(c, ctx->ins->callback);
+
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/out_azure_blob/azure_blob_http.h b/src/fluent-bit/plugins/out_azure_blob/azure_blob_http.h
new file mode 100644
index 000000000..04f7cfd98
--- /dev/null
+++ b/src/fluent-bit/plugins/out_azure_blob/azure_blob_http.h
@@ -0,0 +1,36 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AZURE_BLOB_HTTP_H
+#define AZURE_BLOB_HTTP_H
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_http_client.h>
+#include "azure_blob.h"
+
+int azb_http_client_setup(struct flb_azure_blob *ctx, struct flb_http_client *c,
+ ssize_t content_length, int blob_type,
+ int content_type, int content_encoding);
+
+flb_sds_t azb_http_canonical_request(struct flb_azure_blob *ctx,
+ struct flb_http_client *c,
+ ssize_t content_length,
+ int content_type);
+
+#endif
diff --git a/src/fluent-bit/plugins/out_azure_blob/azure_blob_uri.c b/src/fluent-bit/plugins/out_azure_blob/azure_blob_uri.c
new file mode 100644
index 000000000..c7a05e286
--- /dev/null
+++ b/src/fluent-bit/plugins/out_azure_blob/azure_blob_uri.c
@@ -0,0 +1,150 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_sds.h>
+
+#include "azure_blob.h"
+
+static inline int to_encode(char c)
+{
+ if ((c >= 48 && c <= 57) || /* 0-9 */
+ (c >= 65 && c <= 90) || /* A-Z */
+ (c >= 97 && c <= 122) || /* a-z */
+ (c == '?' || c == '&' || c == '-' || c == '_' || c == '.' ||
+ c == '~' || c == '/')) {
+ return FLB_FALSE;
+ }
+
+ return FLB_TRUE;
+}
+
+flb_sds_t azb_uri_encode(const char *uri, size_t len)
+{
+ int i;
+ flb_sds_t buf = NULL;
+ flb_sds_t tmp = NULL;
+
+ buf = flb_sds_create_size(len * 2);
+ if (!buf) {
+ flb_error("[uri] cannot allocate buffer for URI encoding");
+ return NULL;
+ }
+
+ for (i = 0; i < len; i++) {
+ if (to_encode(uri[i]) == FLB_TRUE) {
+ tmp = flb_sds_printf(&buf, "%%%02X", (unsigned char) *(uri + i));
+ if (!tmp) {
+ flb_sds_destroy(buf);
+ return NULL;
+ }
+ continue;
+ }
+
+ /* Direct assignment, just copy the character */
+ if (buf) {
+ tmp = flb_sds_cat(buf, uri + i, 1);
+ if (!tmp) {
+ flb_sds_destroy(buf);
+ return NULL;
+ }
+ buf = tmp;
+ }
+ }
+
+ return buf;
+}
+
+flb_sds_t azb_uri_decode(const char *uri, size_t len)
+{
+ int i;
+ int hex_result;
+ int c = 0;
+ char hex[3];
+ flb_sds_t out;
+
+ out = flb_sds_create_size(len);
+ if (!out) {
+ return NULL;
+ }
+
+ for (i = 0; i < len; i++) {
+ if (uri[i] == '%') {
+ hex[0] = uri[i + 1];
+ hex[1] = uri[i + 2];
+ hex[2] = '\0';
+
+ hex_result = flb_utils_hex2int(hex, 2);
+ out[c++] = hex_result;
+ i += 2;
+ }
+ else {
+ out[c++] = uri[i];
+ }
+ }
+ out[c++] = '\0';
+
+ return out;
+}
+
+flb_sds_t azb_uri_container(struct flb_azure_blob *ctx)
+{
+ flb_sds_t uri;
+
+ uri = flb_sds_create_size(256);
+ if (!uri) {
+ return NULL;
+ }
+
+ flb_sds_printf(&uri, "%s%s", ctx->base_uri, ctx->container_name);
+ return uri;
+}
+
+flb_sds_t azb_uri_ensure_or_create_container(struct flb_azure_blob *ctx)
+{
+ flb_sds_t uri;
+
+ uri = azb_uri_container(ctx);
+ if (!uri) {
+ return NULL;
+ }
+
+ flb_sds_printf(&uri, "?restype=container");
+ return uri;
+}
+
+flb_sds_t azb_uri_create_blob(struct flb_azure_blob *ctx, char *tag)
+{
+ flb_sds_t uri;
+
+ uri = azb_uri_container(ctx);
+ if (!uri) {
+ return NULL;
+ }
+
+ if (ctx->path) {
+ flb_sds_printf(&uri, "/%s/%s", ctx->path, tag);
+ }
+ else {
+ flb_sds_printf(&uri, "/%s", tag);
+ }
+
+ return uri;
+}
diff --git a/src/fluent-bit/plugins/out_azure_blob/azure_blob_uri.h b/src/fluent-bit/plugins/out_azure_blob/azure_blob_uri.h
new file mode 100644
index 000000000..ffeed7636
--- /dev/null
+++ b/src/fluent-bit/plugins/out_azure_blob/azure_blob_uri.h
@@ -0,0 +1,34 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_AZURE_BLOB_URI
+#define FLB_AZURE_BLOB_URI
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_sds.h>
+
+#include "azure_blob.h"
+
+flb_sds_t azb_uri_container(struct flb_azure_blob *ctx);
+flb_sds_t azb_uri_ensure_or_create_container(struct flb_azure_blob *ctx);
+flb_sds_t azb_uri_create_blob(struct flb_azure_blob *ctx, char *tag);
+flb_sds_t azb_uri_encode(const char *uri, size_t len);
+flb_sds_t azb_uri_decode(const char *uri, size_t len);
+
+#endif
diff --git a/src/fluent-bit/plugins/out_azure_kusto/CMakeLists.txt b/src/fluent-bit/plugins/out_azure_kusto/CMakeLists.txt
new file mode 100644
index 000000000..6803bee09
--- /dev/null
+++ b/src/fluent-bit/plugins/out_azure_kusto/CMakeLists.txt
@@ -0,0 +1,7 @@
+set(src
+ azure_kusto.c
+ azure_kusto_conf.c
+ azure_kusto_ingest.c
+ )
+
+FLB_PLUGIN(out_azure_kusto "${src}" "")
diff --git a/src/fluent-bit/plugins/out_azure_kusto/azure_kusto.c b/src/fluent-bit/plugins/out_azure_kusto/azure_kusto.c
new file mode 100644
index 000000000..4b8ad9b82
--- /dev/null
+++ b/src/fluent-bit/plugins/out_azure_kusto/azure_kusto.c
@@ -0,0 +1,477 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_http_client.h>
+#include <fluent-bit/flb_kv.h>
+#include <fluent-bit/flb_oauth2.h>
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_signv4.h>
+#include <fluent-bit/flb_log_event_decoder.h>
+
+#include "azure_kusto.h"
+#include "azure_kusto_conf.h"
+#include "azure_kusto_ingest.h"
+
+/* Create a new oauth2 context and get a oauth2 token */
+static int azure_kusto_get_oauth2_token(struct flb_azure_kusto *ctx)
+{
+ int ret;
+ char *token;
+
+ /* Clear any previous oauth2 payload content */
+ flb_oauth2_payload_clear(ctx->o);
+
+ ret = flb_oauth2_payload_append(ctx->o, "grant_type", 10, "client_credentials", 18);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "error appending oauth2 params");
+ return -1;
+ }
+
+ ret = flb_oauth2_payload_append(ctx->o, "scope", 5, FLB_AZURE_KUSTO_SCOPE, 39);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "error appending oauth2 params");
+ return -1;
+ }
+
+ ret = flb_oauth2_payload_append(ctx->o, "client_id", 9, ctx->client_id, -1);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "error appending oauth2 params");
+ return -1;
+ }
+
+ ret = flb_oauth2_payload_append(ctx->o, "client_secret", 13, ctx->client_secret, -1);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "error appending oauth2 params");
+ return -1;
+ }
+
+ /* Retrieve access token */
+ token = flb_oauth2_token_get(ctx->o);
+ if (!token) {
+ flb_plg_error(ctx->ins, "error retrieving oauth2 access token");
+ return -1;
+ }
+
+ return 0;
+}
+
+flb_sds_t get_azure_kusto_token(struct flb_azure_kusto *ctx)
+{
+ int ret = 0;
+ flb_sds_t output = NULL;
+
+ if (pthread_mutex_lock(&ctx->token_mutex)) {
+ flb_plg_error(ctx->ins, "error locking mutex");
+ return NULL;
+ }
+
+ if (flb_oauth2_token_expired(ctx->o) == FLB_TRUE) {
+ ret = azure_kusto_get_oauth2_token(ctx);
+ }
+
+ /* Copy string to prevent race conditions (get_oauth2 can free the string) */
+ if (ret == 0) {
+ output = flb_sds_create_size(flb_sds_len(ctx->o->token_type) +
+ flb_sds_len(ctx->o->access_token) + 2);
+ if (!output) {
+ flb_plg_error(ctx->ins, "error creating token buffer");
+ return NULL;
+ }
+ flb_sds_snprintf(&output, flb_sds_alloc(output), "%s %s", ctx->o->token_type,
+ ctx->o->access_token);
+ }
+
+ if (pthread_mutex_unlock(&ctx->token_mutex)) {
+ flb_plg_error(ctx->ins, "error unlocking mutex");
+ if (output) {
+ flb_sds_destroy(output);
+ }
+ return NULL;
+ }
+
+ return output;
+}
+
+/**
+ * Executes a control command against kusto's endpoint
+ *
+ * @param ctx Plugin's context
+ * @param csl Kusto's control command
+ * @return flb_sds_t Returns the response or NULL on error.
+ */
+flb_sds_t execute_ingest_csl_command(struct flb_azure_kusto *ctx, const char *csl)
+{
+ flb_sds_t token;
+ flb_sds_t body;
+ size_t b_sent;
+ int ret;
+ struct flb_connection *u_conn;
+ struct flb_http_client *c;
+ flb_sds_t resp = NULL;
+
+ /* Get upstream connection */
+ u_conn = flb_upstream_conn_get(ctx->u);
+
+ if (u_conn) {
+ token = get_azure_kusto_token(ctx);
+
+ if (token) {
+ /* Compose request body */
+ body = flb_sds_create_size(sizeof(FLB_AZURE_KUSTO_MGMT_BODY_TEMPLATE) - 1 +
+ strlen(csl));
+
+ if (body) {
+ flb_sds_snprintf(&body, flb_sds_alloc(body),
+ FLB_AZURE_KUSTO_MGMT_BODY_TEMPLATE, csl);
+
+ /* Compose HTTP Client request */
+ c = flb_http_client(u_conn, FLB_HTTP_POST, FLB_AZURE_KUSTO_MGMT_URI_PATH,
+ body, flb_sds_len(body), NULL, 0, NULL, 0);
+
+ if (c) {
+ /* Add headers */
+ flb_http_add_header(c, "User-Agent", 10, "Fluent-Bit", 10);
+ flb_http_add_header(c, "Content-Type", 12, "application/json", 16);
+ flb_http_add_header(c, "Accept", 6, "application/json", 16);
+ flb_http_add_header(c, "Authorization", 13, token,
+ flb_sds_len(token));
+ flb_http_buffer_size(c, FLB_HTTP_DATA_SIZE_MAX * 10);
+
+ /* Send HTTP request */
+ ret = flb_http_do(c, &b_sent);
+ flb_plg_debug(
+ ctx->ins,
+ "Kusto ingestion command request http_do=%i, HTTP Status: %i",
+ ret, c->resp.status);
+
+ if (ret == 0) {
+ if (c->resp.status == 200) {
+ /* Copy payload response to the response param */
+ resp =
+ flb_sds_create_len(c->resp.payload, c->resp.payload_size);
+ }
+ else if (c->resp.payload_size > 0) {
+ flb_plg_debug(ctx->ins, "Request failed and returned: \n%s",
+ c->resp.payload);
+ }
+ else {
+ flb_plg_debug(ctx->ins, "Request failed");
+ }
+ }
+ else {
+ flb_plg_error(ctx->ins, "cannot send HTTP request");
+ }
+
+ flb_http_client_destroy(c);
+ }
+ else {
+ flb_plg_error(ctx->ins, "cannot create HTTP client context");
+ }
+
+ flb_sds_destroy(body);
+ }
+ else {
+ flb_plg_error(ctx->ins, "cannot construct request body");
+ }
+
+ flb_sds_destroy(token);
+ }
+ else {
+ flb_plg_error(ctx->ins, "cannot retrieve oauth2 token");
+ }
+
+ flb_upstream_conn_release(u_conn);
+ }
+ else {
+ flb_plg_error(ctx->ins, "cannot create upstream connection");
+ }
+
+ return resp;
+}
+
+static int cb_azure_kusto_init(struct flb_output_instance *ins, struct flb_config *config,
+ void *data)
+{
+ int io_flags = FLB_IO_TLS;
+ struct flb_azure_kusto *ctx;
+
+ /* Create config context */
+ ctx = flb_azure_kusto_conf_create(ins, config);
+ if (!ctx) {
+ flb_plg_error(ins, "configuration failed");
+ return -1;
+ }
+
+ flb_output_set_context(ins, ctx);
+
+ /* Network mode IPv6 */
+ if (ins->host.ipv6 == FLB_TRUE) {
+ io_flags |= FLB_IO_IPV6;
+ }
+
+ /* Create mutex for acquiring oauth tokens and getting ingestion resources (they
+ * are shared across flush coroutines)
+ */
+ pthread_mutex_init(&ctx->token_mutex, NULL);
+ pthread_mutex_init(&ctx->resources_mutex, NULL);
+
+ /*
+ * Create upstream context for Kusto Ingestion endpoint
+ */
+ ctx->u = flb_upstream_create_url(config, ctx->ingestion_endpoint, io_flags, ins->tls);
+ if (!ctx->u) {
+ flb_plg_error(ctx->ins, "upstream creation failed");
+ return -1;
+ }
+
+ /* Create oauth2 context */
+ ctx->o =
+ flb_oauth2_create(ctx->config, ctx->oauth_url, FLB_AZURE_KUSTO_TOKEN_REFRESH);
+ if (!ctx->o) {
+ flb_plg_error(ctx->ins, "cannot create oauth2 context");
+ return -1;
+ }
+ flb_output_upstream_set(ctx->u, ins);
+
+ return 0;
+}
+
+static int azure_kusto_format(struct flb_azure_kusto *ctx, const char *tag, int tag_len,
+ const void *data, size_t bytes, void **out_data,
+ size_t *out_size)
+{
+ int records = 0;
+ msgpack_sbuffer mp_sbuf;
+ msgpack_packer mp_pck;
+ /* for sub msgpack objs */
+ int map_size;
+ struct tm tms;
+ char time_formatted[32];
+ size_t s;
+ int len;
+ struct flb_log_event_decoder log_decoder;
+ struct flb_log_event log_event;
+ int ret;
+ /* output buffer */
+ flb_sds_t out_buf;
+
+ /* Create array for all records */
+ records = flb_mp_count(data, bytes);
+ if (records <= 0) {
+ flb_plg_error(ctx->ins, "error counting msgpack entries");
+ return -1;
+ }
+
+ ret = flb_log_event_decoder_init(&log_decoder, (char *) data, bytes);
+
+ if (ret != FLB_EVENT_DECODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event decoder initialization error : %d", ret);
+
+ return -1;
+ }
+
+ /* Create temporary msgpack buffer */
+ msgpack_sbuffer_init(&mp_sbuf);
+ msgpack_packer_init(&mp_pck, &mp_sbuf, msgpack_sbuffer_write);
+
+ msgpack_pack_array(&mp_pck, records);
+
+ while ((ret = flb_log_event_decoder_next(
+ &log_decoder,
+ &log_event)) == FLB_EVENT_DECODER_SUCCESS) {
+ map_size = 1;
+ if (ctx->include_time_key == FLB_TRUE) {
+ map_size++;
+ }
+
+ if (ctx->include_tag_key == FLB_TRUE) {
+ map_size++;
+ }
+
+ msgpack_pack_map(&mp_pck, map_size);
+
+ /* include_time_key */
+ if (ctx->include_time_key == FLB_TRUE) {
+ msgpack_pack_str(&mp_pck, flb_sds_len(ctx->time_key));
+ msgpack_pack_str_body(&mp_pck, ctx->time_key, flb_sds_len(ctx->time_key));
+
+ /* Append the time value as ISO 8601 */
+ gmtime_r(&log_event.timestamp.tm.tv_sec, &tms);
+ s = strftime(time_formatted, sizeof(time_formatted) - 1,
+ FLB_PACK_JSON_DATE_ISO8601_FMT, &tms);
+
+ len = snprintf(time_formatted + s, sizeof(time_formatted) - 1 - s,
+ ".%03" PRIu64 "Z",
+ (uint64_t)log_event.timestamp.tm.tv_nsec / 1000000);
+ s += len;
+ msgpack_pack_str(&mp_pck, s);
+ msgpack_pack_str_body(&mp_pck, time_formatted, s);
+ }
+
+ /* include_tag_key */
+ if (ctx->include_tag_key == FLB_TRUE) {
+ msgpack_pack_str(&mp_pck, flb_sds_len(ctx->tag_key));
+ msgpack_pack_str_body(&mp_pck, ctx->tag_key, flb_sds_len(ctx->tag_key));
+ msgpack_pack_str(&mp_pck, tag_len);
+ msgpack_pack_str_body(&mp_pck, tag, tag_len);
+ }
+
+ msgpack_pack_str(&mp_pck, flb_sds_len(ctx->log_key));
+ msgpack_pack_str_body(&mp_pck, ctx->log_key, flb_sds_len(ctx->log_key));
+ msgpack_pack_object(&mp_pck, *log_event.body);
+ }
+
+ /* Convert from msgpack to JSON */
+ out_buf = flb_msgpack_raw_to_json_sds(mp_sbuf.data, mp_sbuf.size);
+
+ /* Cleanup */
+ flb_log_event_decoder_destroy(&log_decoder);
+ msgpack_sbuffer_destroy(&mp_sbuf);
+
+ if (!out_buf) {
+ flb_plg_error(ctx->ins, "error formatting JSON payload");
+ return -1;
+ }
+
+ *out_data = out_buf;
+ *out_size = flb_sds_len(out_buf);
+
+ return 0;
+}
+
+static void cb_azure_kusto_flush(struct flb_event_chunk *event_chunk,
+ struct flb_output_flush *out_flush,
+ struct flb_input_instance *i_ins, void *out_context,
+ struct flb_config *config)
+{
+ int ret;
+ flb_sds_t json;
+ size_t json_size;
+ size_t tag_len;
+ struct flb_azure_kusto *ctx = out_context;
+
+ (void)i_ins;
+ (void)config;
+
+ flb_plg_trace(ctx->ins, "flushing bytes %zu", event_chunk->size);
+
+ tag_len = flb_sds_len(event_chunk->tag);
+
+ /* Load or refresh ingestion resources */
+ ret = azure_kusto_load_ingestion_resources(ctx, config);
+ if (ret != 0) {
+ flb_plg_error(ctx->ins, "cannot load ingestion resources");
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ /* Reformat msgpack to JSON payload */
+ ret = azure_kusto_format(ctx, event_chunk->tag, tag_len, event_chunk->data,
+ event_chunk->size, (void **)&json, &json_size);
+ if (ret != 0) {
+ flb_plg_error(ctx->ins, "cannot reformat data into json");
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ ret = azure_kusto_queued_ingestion(ctx, event_chunk->tag, tag_len, json, json_size);
+ if (ret != 0) {
+ flb_plg_error(ctx->ins, "cannot perform queued ingestion");
+ flb_sds_destroy(json);
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ /* Cleanup */
+ flb_sds_destroy(json);
+
+ /* Done */
+ FLB_OUTPUT_RETURN(FLB_OK);
+}
+
+static int cb_azure_kusto_exit(void *data, struct flb_config *config)
+{
+ struct flb_azure_kusto *ctx = data;
+
+ if (!ctx) {
+ return -1;
+ }
+
+ if (ctx->u) {
+ flb_upstream_destroy(ctx->u);
+ ctx->u = NULL;
+ }
+
+ flb_azure_kusto_conf_destroy(ctx);
+
+ return 0;
+}
+
+static struct flb_config_map config_map[] = {
+ {FLB_CONFIG_MAP_STR, "tenant_id", (char *)NULL, 0, FLB_TRUE,
+ offsetof(struct flb_azure_kusto, tenant_id),
+ "Set the tenant ID of the AAD application used for authentication"},
+ {FLB_CONFIG_MAP_STR, "client_id", (char *)NULL, 0, FLB_TRUE,
+ offsetof(struct flb_azure_kusto, client_id),
+ "Set the client ID (Application ID) of the AAD application used for authentication"},
+ {FLB_CONFIG_MAP_STR, "client_secret", (char *)NULL, 0, FLB_TRUE,
+ offsetof(struct flb_azure_kusto, client_secret),
+ "Set the client secret (Application Password) of the AAD application used for "
+ "authentication"},
+ {FLB_CONFIG_MAP_STR, "ingestion_endpoint", (char *)NULL, 0, FLB_TRUE,
+ offsetof(struct flb_azure_kusto, ingestion_endpoint),
+ "Set the Kusto cluster's ingestion endpoint URL (e.g. "
+ "https://ingest-mycluster.eastus.kusto.windows.net)"},
+ {FLB_CONFIG_MAP_STR, "database_name", (char *)NULL, 0, FLB_TRUE,
+ offsetof(struct flb_azure_kusto, database_name), "Set the database name"},
+ {FLB_CONFIG_MAP_STR, "table_name", (char *)NULL, 0, FLB_TRUE,
+ offsetof(struct flb_azure_kusto, table_name), "Set the table name"},
+ {FLB_CONFIG_MAP_STR, "ingestion_mapping_reference", (char *)NULL, 0, FLB_TRUE,
+ offsetof(struct flb_azure_kusto, ingestion_mapping_reference),
+ "Set the ingestion mapping reference"},
+ {FLB_CONFIG_MAP_STR, "log_key", FLB_AZURE_KUSTO_DEFAULT_LOG_KEY, 0, FLB_TRUE,
+ offsetof(struct flb_azure_kusto, log_key), "The key name of event payload"},
+ {FLB_CONFIG_MAP_BOOL, "include_tag_key", "true", 0, FLB_TRUE,
+ offsetof(struct flb_azure_kusto, include_tag_key),
+ "If enabled, tag is appended to output. "
+ "The key name is used 'tag_key' property."},
+ {FLB_CONFIG_MAP_STR, "tag_key", FLB_AZURE_KUSTO_DEFAULT_TAG_KEY, 0, FLB_TRUE,
+ offsetof(struct flb_azure_kusto, tag_key),
+ "The key name of tag. If 'include_tag_key' is false, "
+ "This property is ignored"},
+ {FLB_CONFIG_MAP_BOOL, "include_time_key", "true", 0, FLB_TRUE,
+ offsetof(struct flb_azure_kusto, include_time_key),
+ "If enabled, time is appended to output. "
+ "The key name is used 'time_key' property."},
+ {FLB_CONFIG_MAP_STR, "time_key", FLB_AZURE_KUSTO_DEFAULT_TIME_KEY, 0, FLB_TRUE,
+ offsetof(struct flb_azure_kusto, time_key),
+ "The key name of the time. If 'include_time_key' is false, "
+ "This property is ignored"},
+ /* EOF */
+ {0}};
+
+struct flb_output_plugin out_azure_kusto_plugin = {
+ .name = "azure_kusto",
+ .description = "Send events to Kusto (Azure Data Explorer)",
+ .cb_init = cb_azure_kusto_init,
+ .cb_flush = cb_azure_kusto_flush,
+ .cb_exit = cb_azure_kusto_exit,
+ .config_map = config_map,
+ /* Plugin flags */
+ .flags = FLB_OUTPUT_NET | FLB_IO_TLS,
+};
diff --git a/src/fluent-bit/plugins/out_azure_kusto/azure_kusto.h b/src/fluent-bit/plugins/out_azure_kusto/azure_kusto.h
new file mode 100644
index 000000000..ac4eedfd0
--- /dev/null
+++ b/src/fluent-bit/plugins/out_azure_kusto/azure_kusto.h
@@ -0,0 +1,110 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_OUT_AZURE_KUSTO
+#define FLB_OUT_AZURE_KUSTO
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_oauth2.h>
+#include <fluent-bit/flb_output.h>
+#include <fluent-bit/flb_sds.h>
+#include <fluent-bit/flb_upstream_ha.h>
+
+/* refresh token every 50 minutes */
+#define FLB_AZURE_KUSTO_TOKEN_REFRESH 3000
+
+/* Kusto streaming inserts oauth scope */
+#define FLB_AZURE_KUSTO_SCOPE "https://help.kusto.windows.net/.default"
+
+/* MSAL authorization URL */
+#define FLB_MSAL_AUTH_URL_TEMPLATE \
+ "https://login.microsoftonline.com/%s/oauth2/v2.0/token"
+
+#define FLB_AZURE_KUSTO_MGMT_URI_PATH "/v1/rest/mgmt"
+#define FLB_AZURE_KUSTO_MGMT_BODY_TEMPLATE "{\"csl\":\"%s\", \"db\": \"NetDefaultDB\"}"
+
+#define FLB_AZURE_KUSTO_DEFAULT_TIME_KEY "timestamp"
+#define FLB_AZURE_KUSTO_DEFAULT_TAG_KEY "tag"
+#define FLB_AZURE_KUSTO_DEFAULT_LOG_KEY "log"
+
+#define AZURE_KUSTO_RESOURCE_STORAGE 0
+#define AZURE_KUSTO_RESOURCE_QUEUE 1
+
+#define AZURE_KUSTO_RESOURCE_UPSTREAM_URI "uri"
+#define AZURE_KUSTO_RESOURCE_UPSTREAM_SAS "sas"
+
+#define FLB_AZURE_KUSTO_RESOURCES_LOAD_INTERVAL_SEC 3600
+
+struct flb_azure_kusto_resources {
+ struct flb_upstream_ha *blob_ha;
+ struct flb_upstream_ha *queue_ha;
+ flb_sds_t identity_token;
+
+ /* used to reload resouces after some time */
+ time_t load_time;
+};
+
+struct flb_azure_kusto {
+ /* azure_kusto configuration */
+ flb_sds_t tenant_id;
+ flb_sds_t client_id;
+ flb_sds_t client_secret;
+ flb_sds_t ingestion_endpoint;
+ flb_sds_t database_name;
+ flb_sds_t table_name;
+ flb_sds_t ingestion_mapping_reference;
+
+ /* records configuration */
+ flb_sds_t log_key;
+ int include_tag_key;
+ flb_sds_t tag_key;
+ int include_time_key;
+ flb_sds_t time_key;
+
+ /* --- internal data --- */
+
+ flb_sds_t ingestion_mgmt_endpoint;
+
+ /* oauth2 context */
+ flb_sds_t oauth_url;
+ struct flb_oauth2 *o;
+
+ /* mutex for acquiring oauth tokens */
+ pthread_mutex_t token_mutex;
+
+ /* ingestion resources */
+ struct flb_azure_kusto_resources *resources;
+
+ /* mutex for loading reosurces */
+ pthread_mutex_t resources_mutex;
+
+ /* Upstream connection to the backend server */
+ struct flb_upstream *u;
+
+ /* Fluent Bit context */
+ struct flb_config *config;
+
+ /* Plugin output instance reference */
+ struct flb_output_instance *ins;
+};
+
+flb_sds_t get_azure_kusto_token(struct flb_azure_kusto *ctx);
+flb_sds_t execute_ingest_csl_command(struct flb_azure_kusto *ctx, const char *csl);
+
+#endif
diff --git a/src/fluent-bit/plugins/out_azure_kusto/azure_kusto_conf.c b/src/fluent-bit/plugins/out_azure_kusto/azure_kusto_conf.c
new file mode 100644
index 000000000..5303fef67
--- /dev/null
+++ b/src/fluent-bit/plugins/out_azure_kusto/azure_kusto_conf.c
@@ -0,0 +1,665 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_jsmn.h>
+#include <fluent-bit/flb_oauth2.h>
+#include <fluent-bit/flb_output.h>
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_sds.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_unescape.h>
+#include <fluent-bit/flb_upstream_ha.h>
+#include <fluent-bit/flb_utils.h>
+
+#include "azure_kusto.h"
+#include "azure_kusto_conf.h"
+
+static struct flb_upstream_node *flb_upstream_node_create_url(struct flb_azure_kusto *ctx,
+ struct flb_config *config,
+ const char *url)
+{
+ int ret;
+ char *prot = NULL;
+ char *host = NULL;
+ char *port = NULL;
+ char *uri = NULL;
+ flb_sds_t sds_host = NULL;
+ flb_sds_t sds_port = NULL;
+ char *tmp;
+ struct flb_hash_table *kv = NULL;
+ struct flb_upstream_node *node = NULL;
+ int uri_length;
+ int sas_length;
+
+ /* Parse and split URL */
+ ret = flb_utils_url_split(url, &prot, &host, &port, &uri);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "invalid URL: %s", url);
+ return NULL;
+ }
+
+ /* find sas token in query */
+ tmp = strchr(uri, '?');
+
+ if (tmp) {
+ uri_length = tmp - uri;
+ sas_length = strnlen(tmp + 1, 256);
+
+ /* kv that will hold base uri, and sas token */
+ kv = flb_hash_table_create(FLB_HASH_TABLE_EVICT_NONE, 2, 2);
+
+ if (kv) {
+ ret = flb_hash_table_add(kv, AZURE_KUSTO_RESOURCE_UPSTREAM_URI, 3, uri, uri_length);
+
+ if (ret != -1) {
+ ret = flb_hash_table_add(kv, AZURE_KUSTO_RESOURCE_UPSTREAM_SAS, 3, tmp + 1,
+ sas_length);
+
+ if (ret != -1) {
+ /* if any/all of these creations would fail the node creation will fail and cleanup */
+ sds_host = flb_sds_create(host);
+ sds_port = flb_sds_create(port);
+
+ node = flb_upstream_node_create(
+ NULL, sds_host, sds_port, FLB_TRUE, ctx->ins->tls->verify,
+ ctx->ins->tls->debug, ctx->ins->tls->vhost, NULL, NULL, NULL,
+ NULL, NULL, kv, config);
+
+ if (!node) {
+ flb_plg_error(ctx->ins, "error creating resource upstream node");
+ }
+ }
+ else {
+ flb_plg_error(ctx->ins, "error storing resource sas token");
+ }
+ }
+ else {
+ flb_plg_error(ctx->ins, "error storing resource uri");
+ }
+
+ /* avoid destorying if function is successful */
+ if (!node) {
+ flb_hash_table_destroy(kv);
+ }
+ }
+ else {
+ flb_plg_error(ctx->ins, "error creating upstream node hash table");
+ }
+ }
+ else {
+ flb_plg_error(ctx->ins, "uri has no sas token query: %s", uri);
+ }
+
+ flb_free(prot);
+ flb_free(host);
+ flb_free(port);
+ flb_free(uri);
+
+ return node;
+}
+
+static int flb_azure_kusto_resources_clear(struct flb_azure_kusto_resources *resources)
+{
+ if (!resources) {
+ return -1;
+ }
+
+ if (resources->blob_ha) {
+ flb_upstream_ha_destroy(resources->blob_ha);
+ resources->blob_ha = NULL;
+ }
+
+ if (resources->queue_ha) {
+ flb_upstream_ha_destroy(resources->queue_ha);
+ resources->queue_ha = NULL;
+ }
+
+ if (resources->identity_token) {
+ flb_sds_destroy(resources->identity_token);
+ resources->identity_token = NULL;
+ }
+
+ resources->load_time = 0;
+
+ return 0;
+}
+
+/**
+ * Parses ".get ingestion resources" response into HA upstreams of the queue & blob
+ * resources in the response.
+ *
+ * @param ctx Pointer to the plugin's context
+ * @param config Pointer to the config
+ * @param response sds string containing the response body
+ * @param blob_ha Pointer to an HA upstream for the blob resources, that would be
+ * allocated here.
+ * @param queue_ha Pointer to an HA upstream for the queue resources, that would be
+ * allocated here.
+ * @return int 0 on success, -1 on failure
+ */
+static int parse_storage_resources(struct flb_azure_kusto *ctx, struct flb_config *config,
+ flb_sds_t response, struct flb_upstream_ha *blob_ha,
+ struct flb_upstream_ha *queue_ha)
+{
+ jsmn_parser parser;
+ jsmntok_t *t;
+ jsmntok_t *tokens;
+ int tok_size = 100;
+ int ret = -1;
+ int i;
+ int blob_count = 0;
+ int queue_count = 0;
+ char *token_str;
+ int token_str_len;
+ int resource_type;
+ struct flb_upstream_node *node;
+ struct flb_upstream_ha *ha;
+ flb_sds_t resource_uri;
+
+ /* Response is a json in the form of
+ * {
+ * "Tables": [
+ * {
+ * "TableName": "Table_0",
+ * "Columns": [...],
+ * "Rows": [
+ * [
+ * ("TempStorage" | "SecuredReadyForAggregationQueue" |
+ * "SuccessfulIngestionsQueue" | "FailedIngestionsQueue" | "IngestionsStatusTable"),
+ * <URI with SAS>
+ * ],
+ * ...
+ * ]
+ * }
+ * ]
+ * }
+ */
+
+ resource_uri = flb_sds_create(NULL);
+ if (!resource_uri) {
+ flb_plg_error(ctx->ins, "error allocating resource uri buffer");
+ return -1;
+ }
+
+ jsmn_init(&parser);
+ tokens = flb_calloc(1, sizeof(jsmntok_t) * tok_size);
+
+ if (tokens) {
+ ret = jsmn_parse(&parser, response, flb_sds_len(response), tokens, tok_size);
+
+ if (ret > 0) {
+ /* skip all tokens until we reach "Rows" */
+ for (i = 0; i < ret - 1; i++) {
+ t = &tokens[i];
+
+ if (t->type != JSMN_STRING) {
+ continue;
+ }
+
+ token_str = response + t->start;
+ token_str_len = (t->end - t->start);
+
+ /**
+ * if we found the Rows key, skipping this token and the next one (key and
+ * wrapping array value)
+ */
+ if (token_str_len == 4 && strncmp(token_str, "Rows", 4) == 0) {
+ i += 2;
+ break;
+ }
+ }
+
+ /* iterating rows, each row will have 3 tokens: the array holding the column
+ * values, the first value containing the resource type, and the second value
+ * containing the resource uri */
+ for (; i < ret; i++) {
+ t = &tokens[i];
+
+ /**
+ * each token should be an array with 2 strings:
+ * First will be the resource type (TempStorage,
+ * SecuredReadyForAggregationQueue, etc...) Second will be the SAS URI
+ */
+ if (t->type != JSMN_ARRAY) {
+ break;
+ }
+
+ /* move to the next token, first item in the array - resource type */
+ i++;
+ t = &tokens[i];
+ if (t->type != JSMN_STRING) {
+ break;
+ }
+
+ token_str = response + t->start;
+ token_str_len = (t->end - t->start);
+
+ flb_plg_debug(ctx->ins, "found resource of type: %.*s ",
+ t->end - t->start, response + t->start);
+
+ if (token_str_len == 11 && strncmp(token_str, "TempStorage", 11) == 0) {
+ resource_type = AZURE_KUSTO_RESOURCE_STORAGE;
+ }
+ else if (token_str_len == 31 &&
+ strncmp(token_str, "SecuredReadyForAggregationQueue", 31) == 0) {
+ resource_type = AZURE_KUSTO_RESOURCE_QUEUE;
+ }
+ /* we don't care about other resources so we just skip the next token and
+ move on to the next pair */
+ else {
+ i++;
+ continue;
+ }
+
+ /* move to the next token, second item in the array - resource URI */
+ i++;
+ t = &tokens[i];
+
+ if (t->type != JSMN_STRING) {
+ break;
+ }
+
+ token_str = response + t->start;
+ token_str_len = (t->end - t->start);
+
+ resource_uri = flb_sds_copy(resource_uri, token_str, token_str_len);
+ if (resource_type == AZURE_KUSTO_RESOURCE_QUEUE) {
+ ha = queue_ha;
+ queue_count++;
+ }
+ else {
+ ha = blob_ha;
+ blob_count++;
+ }
+
+ if (!ha) {
+ flb_plg_error(ctx->ins, "error creating HA upstream");
+ ret = -1;
+ break;
+ }
+
+ node = flb_upstream_node_create_url(ctx, config, resource_uri);
+
+ if (!node) {
+ flb_plg_error(ctx->ins, "error creating HA upstream node");
+ ret = -1;
+ break;
+ }
+
+ flb_upstream_ha_node_add(ha, node);
+ }
+
+ if (ret != -1) {
+ if (queue_count > 0 && blob_count > 0) {
+ flb_plg_debug(ctx->ins,
+ "parsed %d blob resources and %d queue resources",
+ blob_count, queue_count);
+ ret = 0;
+ }
+ else {
+ flb_plg_error(ctx->ins, "error parsing resources: missing resources");
+ ret = -1;
+ }
+ }
+ }
+ else {
+ flb_plg_error(ctx->ins, "error parsing JSON response: %s", response);
+ ret = -1;
+ }
+ }
+ else {
+ flb_plg_error(ctx->ins, "error allocating tokens");
+ ret = -1;
+ }
+
+ flb_sds_destroy(resource_uri);
+ flb_free(tokens);
+
+ return ret;
+}
+
+/**
+ * Parses ".get kusto identity token" response and returns the token as an sds string
+ *
+ * @param ctx Pointer to the plugin's context
+ * @param response sds string containing the response body
+ * @return flb_sds_t The parsed token
+ */
+static flb_sds_t parse_ingestion_identity_token(struct flb_azure_kusto *ctx,
+ flb_sds_t response)
+{
+ flb_sds_t identity_token = NULL;
+ int tok_size = 19;
+ jsmn_parser parser;
+ jsmntok_t *t;
+ jsmntok_t *tokens;
+ int ret;
+ char *token_str;
+ int token_str_len;
+
+ /**
+ * Response is a json in the form of
+ * {
+ * "Tables": [
+ * {
+ * "TableName": "Table_0",
+ * "Columns": [{
+ * "ColumnName": "AuthorizationContext",
+ * "DataType": "String",
+ * "ColumnType": "string"
+ * }],
+ * "Rows": [
+ * [
+ * <value>,
+ * ]
+ * ]
+ * }
+ * ]
+ * }
+ * i.e. only one row and one column is expected (exactly 13 tokens) and the value
+ * should be the last
+ */
+
+ jsmn_init(&parser);
+ tokens = flb_calloc(1, sizeof(jsmntok_t) * tok_size);
+ if (!tokens) {
+ flb_plg_error(ctx->ins, "error allocating tokens");
+ return NULL;
+ }
+
+ ret = jsmn_parse(&parser, response, flb_sds_len(response), tokens, tok_size);
+ if (ret > 0) {
+ t = &tokens[tok_size - 1];
+
+ if (t->type == JSMN_STRING) {
+ t = &tokens[tok_size - 1];
+ token_str = response + t->start;
+ token_str_len = (t->end - t->start);
+
+ identity_token = flb_sds_create_len(token_str, token_str_len);
+
+ if (identity_token) {
+ flb_plg_debug(ctx->ins, "parsed kusto identity token: '%s'",
+ identity_token);
+ }
+ else {
+ flb_plg_error(ctx->ins, "error parsing kusto identity token");
+ }
+ }
+ else {
+ flb_plg_error(ctx->ins, "unexpected JSON response: %s", response);
+ }
+ }
+ else {
+ flb_plg_error(ctx->ins, "error parsing JSON response: %s", response);
+ }
+
+ flb_free(tokens);
+
+ return identity_token;
+}
+
+int azure_kusto_load_ingestion_resources(struct flb_azure_kusto *ctx,
+ struct flb_config *config)
+{
+ int ret = -1;
+ flb_sds_t response = NULL;
+ flb_sds_t identity_token = NULL;
+ struct flb_upstream_ha *blob_ha = NULL;
+ struct flb_upstream_ha *queue_ha = NULL;
+ time_t now;
+
+ if (pthread_mutex_lock(&ctx->resources_mutex)) {
+ flb_plg_error(ctx->ins, "error locking mutex");
+ return -1;
+ }
+
+ now = time(NULL);
+
+ /* check if we have all resources and they are not stale */
+ if (ctx->resources->blob_ha && ctx->resources->queue_ha &&
+ ctx->resources->identity_token &&
+ now - ctx->resources->load_time < FLB_AZURE_KUSTO_RESOURCES_LOAD_INTERVAL_SEC) {
+ flb_plg_debug(ctx->ins, "resources are already loaded and are not stale");
+ ret = 0;
+ }
+ else {
+ flb_plg_info(ctx->ins, "loading kusto ingestion resourcs");
+ response = execute_ingest_csl_command(ctx, ".get ingestion resources");
+
+ if (response) {
+ queue_ha = flb_upstream_ha_create("azure_kusto_queue_ha");
+
+ if (queue_ha) {
+ blob_ha = flb_upstream_ha_create("azure_kusto_blob_ha");
+
+ if (blob_ha) {
+ ret =
+ parse_storage_resources(ctx, config, response, blob_ha, queue_ha);
+
+ if (ret == 0) {
+ flb_sds_destroy(response);
+ response = NULL;
+
+ response =
+ execute_ingest_csl_command(ctx, ".get kusto identity token");
+
+ if (response) {
+ identity_token =
+ parse_ingestion_identity_token(ctx, response);
+
+ if (identity_token) {
+ ret = flb_azure_kusto_resources_clear(ctx->resources);
+
+ if (ret != -1) {
+ ctx->resources->blob_ha = blob_ha;
+ ctx->resources->queue_ha = queue_ha;
+ ctx->resources->identity_token = identity_token;
+ ctx->resources->load_time = now;
+
+ ret = 0;
+ }
+ else {
+ flb_plg_error(
+ ctx->ins,
+ "error destroying previous ingestion resources");
+ }
+ }
+ else {
+ flb_plg_error(ctx->ins,
+ "error parsing ingestion identity token");
+ ret = -1;
+ }
+ }
+ else {
+ flb_plg_error(ctx->ins, "error getting kusto identity token");
+ ret = -1;
+ }
+ }
+ else {
+ flb_plg_error(ctx->ins,
+ "error parsing ingestion storage resources");
+ ret = -1;
+ }
+
+ if (ret == -1) {
+ flb_upstream_ha_destroy(blob_ha);
+ }
+ }
+ else {
+ flb_plg_error(ctx->ins, "error creating storage resources upstreams");
+ ret = -1;
+ }
+
+ if (ret == -1) {
+ flb_upstream_ha_destroy(queue_ha);
+ }
+ }
+ else {
+ flb_plg_error(ctx->ins, "error creating storage resources upstreams");
+ }
+
+ if (response) {
+ flb_sds_destroy(response);
+ }
+ }
+ if (!response) {
+ flb_plg_error(ctx->ins, "error getting ingestion storage resources");
+ }
+ }
+
+ if (pthread_mutex_unlock(&ctx->resources_mutex)) {
+ flb_plg_error(ctx->ins, "error unlocking mutex");
+ return -1;
+ }
+
+ return ret;
+}
+
+static int flb_azure_kusto_resources_destroy(struct flb_azure_kusto_resources *resources)
+{
+ int ret;
+
+ if (!resources) {
+ return -1;
+ }
+
+ ret = flb_azure_kusto_resources_clear(resources);
+ if (ret != 0) {
+ return -1;
+ }
+
+ flb_free(resources);
+
+ return 0;
+}
+
+struct flb_azure_kusto *flb_azure_kusto_conf_create(struct flb_output_instance *ins,
+ struct flb_config *config)
+{
+ int ret;
+ struct flb_azure_kusto *ctx;
+
+ /* Allocate config context */
+ ctx = flb_calloc(1, sizeof(struct flb_azure_kusto));
+ if (!ctx) {
+ flb_errno();
+ return NULL;
+ }
+ ctx->ins = ins;
+ ctx->config = config;
+
+ ret = flb_output_config_map_set(ins, (void *)ctx);
+ if (ret == -1) {
+ flb_plg_error(ins, "unable to load configuration");
+ flb_free(ctx);
+ return NULL;
+ }
+
+ /* config: 'tenant_id' */
+ if (ctx->tenant_id == NULL) {
+ flb_plg_error(ctx->ins, "property 'tenant_id' is not defined.");
+ flb_azure_kusto_conf_destroy(ctx);
+ return NULL;
+ }
+
+ /* config: 'client_id' */
+ if (ctx->client_id == NULL) {
+ flb_plg_error(ctx->ins, "property 'client_id' is not defined");
+ flb_azure_kusto_conf_destroy(ctx);
+ return NULL;
+ }
+
+ /* config: 'client_secret' */
+ if (ctx->client_secret == NULL) {
+ flb_plg_error(ctx->ins, "property 'client_secret' is not defined");
+ flb_azure_kusto_conf_destroy(ctx);
+ return NULL;
+ }
+
+ /* config: 'ingestion_endpoint' */
+ if (ctx->ingestion_endpoint == NULL) {
+ flb_plg_error(ctx->ins, "property 'ingestion_endpoint' is not defined");
+ flb_azure_kusto_conf_destroy(ctx);
+ return NULL;
+ }
+
+ /* config: 'database_name' */
+ if (ctx->database_name == NULL) {
+ flb_plg_error(ctx->ins, "property 'database_name' is not defined");
+ flb_azure_kusto_conf_destroy(ctx);
+ return NULL;
+ }
+
+ /* config: 'table_name' */
+ if (ctx->table_name == NULL) {
+ flb_plg_error(ctx->ins, "property 'table_name' is not defined");
+ flb_azure_kusto_conf_destroy(ctx);
+ return NULL;
+ }
+
+ /* Create the auth URL */
+ ctx->oauth_url = flb_sds_create_size(sizeof(FLB_MSAL_AUTH_URL_TEMPLATE) - 1 +
+ flb_sds_len(ctx->tenant_id));
+ if (!ctx->oauth_url) {
+ flb_errno();
+ flb_azure_kusto_conf_destroy(ctx);
+ return NULL;
+ }
+ flb_sds_snprintf(&ctx->oauth_url, flb_sds_alloc(ctx->oauth_url),
+ FLB_MSAL_AUTH_URL_TEMPLATE, ctx->tenant_id);
+
+ ctx->resources = flb_calloc(1, sizeof(struct flb_azure_kusto_resources));
+ if (!ctx->resources) {
+ flb_errno();
+ flb_azure_kusto_conf_destroy(ctx);
+ return NULL;
+ }
+
+ flb_plg_info(ctx->ins, "endpoint='%s', database='%s', table='%s'",
+ ctx->ingestion_endpoint, ctx->database_name, ctx->table_name);
+
+ return ctx;
+}
+
+int flb_azure_kusto_conf_destroy(struct flb_azure_kusto *ctx)
+{
+ if (!ctx) {
+ return -1;
+ }
+
+ if (ctx->oauth_url) {
+ flb_sds_destroy(ctx->oauth_url);
+ ctx->oauth_url = NULL;
+ }
+
+ if (ctx->o) {
+ flb_oauth2_destroy(ctx->o);
+ ctx->o = NULL;
+ }
+
+ if (ctx->resources) {
+ flb_azure_kusto_resources_destroy(ctx->resources);
+ ctx->resources = NULL;
+ }
+
+ flb_free(ctx);
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/out_azure_kusto/azure_kusto_conf.h b/src/fluent-bit/plugins/out_azure_kusto/azure_kusto_conf.h
new file mode 100644
index 000000000..b4b2e3a39
--- /dev/null
+++ b/src/fluent-bit/plugins/out_azure_kusto/azure_kusto_conf.h
@@ -0,0 +1,31 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_OUT_AZURE_KUSTO_CONF_H
+#define FLB_OUT_AZURE_KUSTO_CONF_H
+
+#include "azure_kusto.h"
+
+int azure_kusto_load_ingestion_resources(struct flb_azure_kusto *ctx,
+ struct flb_config *config);
+struct flb_azure_kusto *flb_azure_kusto_conf_create(struct flb_output_instance *ins,
+ struct flb_config *config);
+int flb_azure_kusto_conf_destroy(struct flb_azure_kusto *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/out_azure_kusto/azure_kusto_ingest.c b/src/fluent-bit/plugins/out_azure_kusto/azure_kusto_ingest.c
new file mode 100644
index 000000000..d38d92e7f
--- /dev/null
+++ b/src/fluent-bit/plugins/out_azure_kusto/azure_kusto_ingest.c
@@ -0,0 +1,496 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_base64.h>
+#include <fluent-bit/flb_http_client.h>
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_random.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_utils.h>
+
+#include <math.h>
+#include <msgpack.h>
+
+#include "azure_kusto_ingest.h"
+
+/* not really uuid but a random string in the form 00000000-0000-0000-0000-000000000000 */
+static char *generate_uuid()
+{
+ char *chars = "0123456789abcdef";
+ char *uuid;
+ int i;
+ uint64_t rand;
+
+ uuid = flb_malloc(37);
+ if (!uuid) {
+ flb_errno();
+ return NULL;
+ }
+
+ for (i = 0; i < 36; i++) {
+ if (i == 8 || i == 13 || i == 18 || i == 23) {
+ uuid[i] = '-';
+ continue;
+ }
+
+ if (flb_random_bytes((unsigned char *)&rand, sizeof(uint64_t))) {
+ rand = time(NULL);
+ }
+ uuid[i] = chars[rand % 16];
+ }
+ uuid[36] = '\0';
+
+ return uuid;
+}
+
+static char *base64_encode(flb_sds_t s, size_t len, size_t *out_len)
+{
+ char *b64;
+ int ret;
+ size_t buffer_len = 4 * ceil(((double)len / 3) + 1);
+
+ b64 = flb_malloc(buffer_len);
+ if (!b64) {
+ flb_errno();
+ return NULL;
+ }
+
+ ret = flb_base64_encode((unsigned char *)b64, buffer_len, out_len, (unsigned char *)s,
+ len);
+ if (ret != 0) {
+ flb_error("cannot encode string %s into base64", s);
+ flb_free(b64);
+ return NULL;
+ }
+
+ return b64;
+}
+
+static flb_sds_t azure_kusto_create_blob_uri(struct flb_azure_kusto *ctx,
+ struct flb_upstream_node *u_node,
+ flb_sds_t blob_id)
+{
+ int ret;
+ flb_sds_t uri = NULL;
+ char *blob_uri;
+ size_t blob_uri_size;
+ char *blob_sas;
+ size_t blob_sas_size;
+
+ ret = flb_hash_table_get(u_node->ht, AZURE_KUSTO_RESOURCE_UPSTREAM_URI, 3,
+ (void **)&blob_uri, &blob_uri_size);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "error getting blob uri");
+ return NULL;
+ }
+
+ ret = flb_hash_table_get(u_node->ht, AZURE_KUSTO_RESOURCE_UPSTREAM_SAS, 3,
+ (void **)&blob_sas, &blob_sas_size);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "error getting blob sas token");
+ return NULL;
+ }
+
+ /* uri will be https://<blob_host>/<container_uri>/<blob_id>.multijson?<sas_token> */
+ uri = flb_sds_create_size(flb_sds_len(u_node->host) + blob_uri_size + blob_sas_size +
+ flb_sds_len(blob_id) + 21);
+
+ if (uri) {
+ flb_sds_snprintf(&uri, flb_sds_alloc(uri), "https://%s%s/%s.multijson?%s",
+ u_node->host, blob_uri, blob_id, blob_sas);
+ flb_plg_debug(ctx->ins, "created blob uri %s", uri);
+ }
+ else {
+ flb_plg_error(ctx->ins, "cannot create blob uri buffer");
+ }
+
+ return uri;
+}
+
+static flb_sds_t azure_kusto_create_blob(struct flb_azure_kusto *ctx, flb_sds_t blob_id,
+ flb_sds_t payload, size_t payload_size)
+{
+ int ret = -1;
+ flb_sds_t uri = NULL;
+ struct flb_upstream_node *u_node;
+ struct flb_connection *u_conn;
+ struct flb_http_client *c;
+ size_t resp_size;
+ time_t now;
+ struct tm tm;
+ char tmp[64];
+ int len;
+
+ now = time(NULL);
+ gmtime_r(&now, &tm);
+ len = strftime(tmp, sizeof(tmp) - 1, "%a, %d %b %Y %H:%M:%S GMT", &tm);
+
+ u_node = flb_upstream_ha_node_get(ctx->resources->blob_ha);
+ if (!u_node) {
+ flb_plg_error(ctx->ins, "error getting blob upstream");
+ return NULL;
+ }
+
+ u_conn = flb_upstream_conn_get(u_node->u);
+
+ if (u_conn) {
+ uri = azure_kusto_create_blob_uri(ctx, u_node, blob_id);
+
+ if (uri) {
+ flb_plg_debug(ctx->ins, "uploading payload to blob uri: %s", uri);
+ c = flb_http_client(u_conn, FLB_HTTP_PUT, uri, payload, payload_size, NULL, 0,
+ NULL, 0);
+
+ if (c) {
+ flb_http_add_header(c, "User-Agent", 10, "Fluent-Bit", 10);
+ flb_http_add_header(c, "Content-Type", 12, "application/json", 16);
+ flb_http_add_header(c, "x-ms-blob-type", 14, "BlockBlob", 9);
+ flb_http_add_header(c, "x-ms-date", 9, tmp, len);
+ flb_http_add_header(c, "x-ms-version", 12, "2019-12-12", 10);
+
+ ret = flb_http_do(c, &resp_size);
+ flb_plg_debug(ctx->ins,
+ "kusto blob upload request http_do=%i, HTTP Status: %i",
+ ret, c->resp.status);
+
+ if (ret == 0) {
+ /* Validate return status and HTTP status if set */
+ if (c->resp.status != 201) {
+ ret = -1;
+
+ if (c->resp.payload_size > 0) {
+ flb_plg_debug(ctx->ins, "Request failed and returned: \n%s",
+ c->resp.payload);
+ }
+ else {
+ flb_plg_debug(ctx->ins, "Request failed");
+ }
+ }
+ }
+ else {
+ flb_plg_error(ctx->ins, "cannot send HTTP request");
+ }
+
+ flb_http_client_destroy(c);
+ }
+ else {
+ flb_plg_error(ctx->ins,
+ "cannot create HTTP client context for blob container");
+ }
+
+ if (ret != 0) {
+ flb_sds_destroy(uri);
+ uri = NULL;
+ }
+ }
+ else {
+ flb_plg_error(ctx->ins, "error creating blob container uri buffer");
+ }
+
+ flb_upstream_conn_release(u_conn);
+ }
+ else {
+ flb_plg_error(ctx->ins, "error getting blob container upstream connection");
+ }
+
+ return uri;
+}
+
+static flb_sds_t create_ingestion_message(struct flb_azure_kusto *ctx, flb_sds_t blob_uri,
+ size_t payload_size)
+{
+ flb_sds_t message = NULL;
+ int ret = 0;
+ char *uuid;
+ char *message_b64;
+ size_t b64_len;
+ size_t message_len;
+
+ uuid = generate_uuid();
+ if (uuid) {
+ message = flb_sds_create(NULL);
+
+ if (message) {
+ message_len =
+ flb_sds_snprintf(&message, 0,
+ "{\"Id\": \"%s\", \"BlobPath\": \"%s\", "
+ "\"RawDataSize\": %lu, \"DatabaseName\": "
+ "\"%s\", \"TableName\": \"%s\","
+ "\"AdditionalProperties\": { \"format\": \"multijson\", "
+ "\"authorizationContext\": "
+ "\"%s\", \"jsonMappingReference\": \"%s\" }}%c",
+ uuid, blob_uri, payload_size, ctx->database_name,
+ ctx->table_name, ctx->resources->identity_token,
+ ctx->ingestion_mapping_reference == NULL
+ ? ""
+ : ctx->ingestion_mapping_reference, 0);
+
+ if (message_len != -1) {
+ flb_plg_debug(ctx->ins, "created ingestion message:\n%s", message);
+ message_b64 = base64_encode(message, message_len, &b64_len);
+
+ if (message_b64) {
+ ret = flb_sds_snprintf(
+ &message, flb_sds_alloc(message),
+ "<QueueMessage><MessageText>%s</MessageText></QueueMessage>%c",
+ message_b64, 0);
+
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "error creating ingestion queue message");
+ }
+
+ flb_free(message_b64);
+ }
+ else {
+ flb_plg_error(ctx->ins, "error encoding ingestion message to base64");
+ }
+ }
+ else {
+ flb_plg_error(ctx->ins, "error creating ingestion message");
+ ret = -1;
+ }
+
+ if (ret == -1) {
+ flb_sds_destroy(message);
+ message = NULL;
+ }
+ }
+ else {
+ flb_plg_error(ctx->ins, "error creating ingestion message buffer");
+ }
+
+ flb_free(uuid);
+ }
+ else {
+ flb_plg_error(ctx->ins, "error generating unique ingestion UUID");
+ }
+
+ return message;
+}
+
+static flb_sds_t azure_kusto_create_queue_uri(struct flb_azure_kusto *ctx,
+ struct flb_upstream_node *u_node)
+{
+ int ret;
+ flb_sds_t uri = NULL;
+ char *queue_uri;
+ size_t queue_uri_size;
+ char *queue_sas;
+ size_t queue_sas_size;
+
+ ret = flb_hash_table_get(u_node->ht, AZURE_KUSTO_RESOURCE_UPSTREAM_URI, 3,
+ (void **)&queue_uri, &queue_uri_size);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "error getting queue uri");
+ return NULL;
+ }
+
+ ret = flb_hash_table_get(u_node->ht, AZURE_KUSTO_RESOURCE_UPSTREAM_SAS, 3,
+ (void **)&queue_sas, &queue_sas_size);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "error getting queue sas token");
+ return NULL;
+ }
+
+ /* uri will be <container_uri>/messages?<sas_token> */
+ uri = flb_sds_create_size(queue_uri_size + queue_sas_size + 11);
+
+ if (uri) {
+ flb_sds_snprintf(&uri, flb_sds_alloc(uri), "%s/messages?%s", queue_uri,
+ queue_sas);
+ flb_plg_debug(ctx->ins, "created queue uri %s", uri);
+ }
+ else {
+ flb_plg_error(ctx->ins, "cannot create queue uri buffer");
+ }
+
+ return uri;
+}
+
+static int azure_kusto_enqueue_ingestion(struct flb_azure_kusto *ctx, flb_sds_t blob_uri,
+ size_t payload_size)
+{
+ int ret = -1;
+ struct flb_upstream_node *u_node;
+ struct flb_connection *u_conn;
+ struct flb_http_client *c;
+ flb_sds_t uri;
+ flb_sds_t payload;
+ size_t resp_size;
+ time_t now;
+ struct tm tm;
+ char tmp[64];
+ int len;
+
+ now = time(NULL);
+ gmtime_r(&now, &tm);
+ len = strftime(tmp, sizeof(tmp) - 1, "%a, %d %b %Y %H:%M:%S GMT", &tm);
+
+ u_node = flb_upstream_ha_node_get(ctx->resources->queue_ha);
+ if (!u_node) {
+ flb_plg_error(ctx->ins, "error getting queue upstream");
+ return -1;
+ }
+
+ u_conn = flb_upstream_conn_get(u_node->u);
+
+ if (u_conn) {
+ uri = azure_kusto_create_queue_uri(ctx, u_node);
+
+ if (uri) {
+ payload = create_ingestion_message(ctx, blob_uri, payload_size);
+
+ if (payload) {
+ c = flb_http_client(u_conn, FLB_HTTP_POST, uri, payload,
+ flb_sds_len(payload), NULL, 0, NULL, 0);
+
+ if (c) {
+ flb_http_add_header(c, "User-Agent", 10, "Fluent-Bit", 10);
+ flb_http_add_header(c, "Content-Type", 12, "application/atom+xml",
+ 20);
+ flb_http_add_header(c, "x-ms-date", 9, tmp, len);
+ flb_http_add_header(c, "x-ms-version", 12, "2019-12-12", 10);
+
+ ret = flb_http_do(c, &resp_size);
+ flb_plg_debug(ctx->ins,
+ "kusto queue request http_do=%i, HTTP Status: %i", ret,
+ c->resp.status);
+
+ if (ret == 0) {
+ /* Validate return status and HTTP status if set */
+ if (c->resp.status != 201) {
+ ret = -1;
+
+ if (c->resp.payload_size > 0) {
+ flb_plg_debug(ctx->ins,
+ "Request failed and returned: \n%s",
+ c->resp.payload);
+ }
+ else {
+ flb_plg_debug(ctx->ins, "Request failed");
+ }
+ }
+ }
+ else {
+ flb_plg_error(ctx->ins, "cannot send HTTP request");
+ }
+
+ flb_http_client_destroy(c);
+ }
+ else {
+ flb_plg_error(ctx->ins,
+ "cannot create HTTP client context for queue");
+ }
+
+ flb_sds_destroy(payload);
+ }
+ else {
+ flb_plg_error(ctx->ins, "error creating payload buffer");
+ }
+
+ flb_sds_destroy(uri);
+ }
+ else {
+ flb_plg_error(ctx->ins, "error creating queue uri buffer");
+ }
+
+ flb_upstream_conn_release(u_conn);
+ }
+ else {
+ flb_plg_error(ctx->ins, "error getting queue upstream connection");
+ }
+
+ return ret;
+}
+
+static flb_sds_t azure_kusto_create_blob_id(struct flb_azure_kusto *ctx, flb_sds_t tag,
+ size_t tag_len)
+{
+ flb_sds_t blob_id = NULL;
+ struct flb_time tm;
+ uint64_t ms;
+ char *b64tag;
+ size_t b64_len;
+
+ flb_time_get(&tm);
+ ms = ((tm.tm.tv_sec * 1000) + (tm.tm.tv_nsec / 1000000));
+
+ b64tag = base64_encode(tag, tag_len, &b64_len);
+
+ if (b64tag) {
+ /* remove trailing '=' */
+ while (b64_len && b64tag[b64_len - 1] == '=') {
+ b64tag[b64_len - 1] = '\0';
+ b64_len--;
+ }
+
+ blob_id = flb_sds_create_size(flb_sds_len(ctx->database_name) +
+ flb_sds_len(ctx->table_name) + b64_len + 24);
+ if (blob_id) {
+ flb_sds_snprintf(&blob_id, flb_sds_alloc(blob_id), "flb__%s__%s__%s__%lu",
+ ctx->database_name, ctx->table_name, b64tag, ms);
+ }
+ else {
+ flb_plg_error(ctx->ins, "cannot create blob id buffer");
+ }
+
+ flb_free(b64tag);
+ }
+ else {
+ flb_plg_error(ctx->ins, "error encoding tag '%s' to base64", tag);
+ }
+
+ return blob_id;
+}
+
+int azure_kusto_queued_ingestion(struct flb_azure_kusto *ctx, flb_sds_t tag,
+ size_t tag_len, flb_sds_t payload, size_t payload_size)
+{
+ int ret = -1;
+ flb_sds_t blob_id;
+ flb_sds_t blob_uri;
+
+ /* flb__<db>__<table>__<b64tag>__<timestamp> */
+ blob_id = azure_kusto_create_blob_id(ctx, tag, tag_len);
+
+ if (blob_id) {
+ blob_uri = azure_kusto_create_blob(ctx, blob_id, payload, payload_size);
+
+ if (blob_uri) {
+ ret = azure_kusto_enqueue_ingestion(ctx, blob_uri, payload_size);
+
+ if (ret != 0) {
+ flb_plg_error(ctx->ins, "failed to enqueue ingestion blob to queue");
+ ret = -1;
+ }
+
+ flb_sds_destroy(blob_uri);
+ }
+ else {
+ flb_plg_error(ctx->ins, "failed to create payload blob uri");
+ }
+
+ flb_sds_destroy(blob_id);
+ }
+ else {
+ flb_plg_error(ctx->ins, "cannot create blob id");
+ }
+
+ return ret;
+}
diff --git a/src/fluent-bit/plugins/out_azure_kusto/azure_kusto_ingest.h b/src/fluent-bit/plugins/out_azure_kusto/azure_kusto_ingest.h
new file mode 100644
index 000000000..60613919a
--- /dev/null
+++ b/src/fluent-bit/plugins/out_azure_kusto/azure_kusto_ingest.h
@@ -0,0 +1,28 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_OUT_AZURE_KUSTO_INGEST_H
+#define FLB_OUT_AZURE_KUSTO_INGEST_H
+
+#include "azure_kusto.h"
+
+int azure_kusto_queued_ingestion(struct flb_azure_kusto *ctx, flb_sds_t tag,
+ size_t tag_len, flb_sds_t payload, size_t payload_size);
+
+#endif \ No newline at end of file
diff --git a/src/fluent-bit/plugins/out_azure_logs_ingestion/CMakeLists.txt b/src/fluent-bit/plugins/out_azure_logs_ingestion/CMakeLists.txt
new file mode 100644
index 000000000..b51308c70
--- /dev/null
+++ b/src/fluent-bit/plugins/out_azure_logs_ingestion/CMakeLists.txt
@@ -0,0 +1,6 @@
+set(src
+ azure_logs_ingestion.c
+ azure_logs_ingestion_conf.c
+ )
+
+FLB_PLUGIN(out_azure_logs_ingestion "${src}" "")
diff --git a/src/fluent-bit/plugins/out_azure_logs_ingestion/azure_logs_ingestion.c b/src/fluent-bit/plugins/out_azure_logs_ingestion/azure_logs_ingestion.c
new file mode 100644
index 000000000..9b839ef7e
--- /dev/null
+++ b/src/fluent-bit/plugins/out_azure_logs_ingestion/azure_logs_ingestion.c
@@ -0,0 +1,445 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_http_client.h>
+#include <fluent-bit/flb_oauth2.h>
+#include <fluent-bit/flb_base64.h>
+#include <fluent-bit/flb_crypto.h>
+#include <fluent-bit/flb_gzip.h>
+#include <fluent-bit/flb_hmac.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_time.h>
+#include <msgpack.h>
+
+#include "azure_logs_ingestion.h"
+#include "azure_logs_ingestion_conf.h"
+
+static int cb_azure_logs_ingestion_init(struct flb_output_instance *ins,
+ struct flb_config *config, void *data)
+{
+ struct flb_az_li *ctx;
+ (void) config;
+ (void) ins;
+ (void) data;
+
+ /* Allocate and initialize a context from configuration */
+ ctx = flb_az_li_ctx_create(ins, config);
+ if (!ctx) {
+ flb_plg_error(ins, "configuration failed");
+ return -1;
+ }
+
+ return 0;
+}
+
+/* A duplicate function copied from the azure log analytics plugin.
+ allocates sds string */
+static int az_li_format(const void *in_buf, size_t in_bytes,
+ char **out_buf, size_t *out_size,
+ struct flb_az_li *ctx)
+{
+ int i;
+ int array_size = 0;
+ int map_size;
+ size_t off = 0;
+ double t;
+ struct flb_time tm;
+ msgpack_unpacked result;
+ msgpack_object root;
+ msgpack_object *obj;
+ msgpack_object map;
+ msgpack_object k;
+ msgpack_object v;
+ msgpack_sbuffer mp_sbuf;
+ msgpack_packer mp_pck;
+ msgpack_sbuffer tmp_sbuf;
+ msgpack_packer tmp_pck;
+ flb_sds_t record;
+ char time_formatted[32];
+ size_t s;
+ struct tm tms;
+ int len;
+
+ /* Count number of items */
+ array_size = flb_mp_count(in_buf, in_bytes);
+ msgpack_unpacked_init(&result);
+
+ /* Create temporary msgpack buffer */
+ msgpack_sbuffer_init(&mp_sbuf);
+ msgpack_packer_init(&mp_pck, &mp_sbuf, msgpack_sbuffer_write);
+ msgpack_pack_array(&mp_pck, array_size);
+
+ off = 0;
+ while (msgpack_unpack_next(&result, in_buf, in_bytes, &off) == MSGPACK_UNPACK_SUCCESS) {
+ root = result.data;
+
+ /* Get timestamp */
+ flb_time_pop_from_msgpack(&tm, &result, &obj);
+
+ /* Create temporary msgpack buffer */
+ msgpack_sbuffer_init(&tmp_sbuf);
+ msgpack_packer_init(&tmp_pck, &tmp_sbuf, msgpack_sbuffer_write);
+
+ map = root.via.array.ptr[1];
+ map_size = map.via.map.size;
+
+ msgpack_pack_map(&mp_pck, map_size + 1);
+
+ /* Append the time key */
+ msgpack_pack_str(&mp_pck, flb_sds_len(ctx->time_key));
+ msgpack_pack_str_body(&mp_pck,
+ ctx->time_key,
+ flb_sds_len(ctx->time_key));
+
+ if (ctx->time_generated == FLB_TRUE) {
+ /* Append the time value as ISO 8601 */
+ gmtime_r(&tm.tm.tv_sec, &tms);
+ s = strftime(time_formatted, sizeof(time_formatted) - 1,
+ FLB_PACK_JSON_DATE_ISO8601_FMT, &tms);
+
+ len = snprintf(time_formatted + s,
+ sizeof(time_formatted) - 1 - s,
+ ".%03" PRIu64 "Z",
+ (uint64_t) tm.tm.tv_nsec / 1000000);
+ s += len;
+ msgpack_pack_str(&mp_pck, s);
+ msgpack_pack_str_body(&mp_pck, time_formatted, s);
+ }
+ else {
+ /* Append the time value as millis.nanos */
+ t = flb_time_to_double(&tm);
+ msgpack_pack_double(&mp_pck, t);
+ }
+
+ /* Append original map k/v */
+ for (i = 0; i < map_size; i++) {
+ k = map.via.map.ptr[i].key;
+ v = map.via.map.ptr[i].val;
+
+ msgpack_pack_object(&tmp_pck, k);
+ msgpack_pack_object(&tmp_pck, v);
+ }
+ msgpack_sbuffer_write(&mp_sbuf, tmp_sbuf.data, tmp_sbuf.size);
+ msgpack_sbuffer_destroy(&tmp_sbuf);
+ }
+
+ record = flb_msgpack_raw_to_json_sds(mp_sbuf.data, mp_sbuf.size);
+ if (!record) {
+ flb_errno();
+ msgpack_sbuffer_destroy(&mp_sbuf);
+ msgpack_unpacked_destroy(&result);
+ return -1;
+ }
+
+ msgpack_sbuffer_destroy(&mp_sbuf);
+ msgpack_unpacked_destroy(&result);
+
+ *out_buf = record;
+ *out_size = flb_sds_len(record);
+
+ return 0;
+}
+
+/* Gets OAuth token; (allocates sds string everytime, must deallocate) */
+flb_sds_t get_az_li_token(struct flb_az_li *ctx)
+{
+ int ret = 0;
+ char* token;
+ size_t token_len;
+ flb_sds_t token_return = NULL;
+
+ if (pthread_mutex_lock(&ctx->token_mutex)) {
+ flb_plg_error(ctx->ins, "error locking mutex");
+ return NULL;
+ }
+ /* Retrieve access token only if expired */
+ if (flb_oauth2_token_expired(ctx->u_auth) == FLB_TRUE) {
+ flb_plg_debug(ctx->ins, "token expired. getting new token");
+ /* Clear any previous oauth2 payload content */
+ flb_oauth2_payload_clear(ctx->u_auth);
+
+ ret = flb_oauth2_payload_append(ctx->u_auth, "grant_type", 10,
+ "client_credentials", 18);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "error appending oauth2 params");
+ goto token_cleanup;
+ }
+
+ ret = flb_oauth2_payload_append(ctx->u_auth, "scope", 5, FLB_AZ_LI_AUTH_SCOPE,
+ sizeof(FLB_AZ_LI_AUTH_SCOPE) - 1);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "error appending oauth2 params");
+ goto token_cleanup;
+ }
+
+ ret = flb_oauth2_payload_append(ctx->u_auth, "client_id", 9,
+ ctx->client_id, -1);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "error appending oauth2 params");
+ goto token_cleanup;
+ }
+
+ ret = flb_oauth2_payload_append(ctx->u_auth, "client_secret", 13,
+ ctx->client_secret, -1);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "error appending oauth2 params");
+ goto token_cleanup;
+ }
+
+ token = flb_oauth2_token_get(ctx->u_auth);
+
+ /* Copy string to prevent race conditions */
+ if (!token) {
+ flb_plg_error(ctx->ins, "error retrieving oauth2 access token");
+ goto token_cleanup;
+ }
+ flb_plg_debug(ctx->ins, "got azure token");
+ }
+
+ /* Reached this code-block means, got new token or token not expired */
+ /* Either way we copy the token to a new string */
+ token_len = flb_sds_len(ctx->u_auth->token_type) + 2 +
+ flb_sds_len(ctx->u_auth->access_token);
+ flb_plg_debug(ctx->ins, "create token header string");
+ /* Now create */
+ token_return = flb_sds_create_size(token_len);
+ if (!token_return) {
+ flb_plg_error(ctx->ins, "error creating token buffer");
+ goto token_cleanup;
+ }
+ flb_sds_snprintf(&token_return, flb_sds_alloc(token_return), "%s %s",
+ ctx->u_auth->token_type, ctx->u_auth->access_token);
+
+token_cleanup:
+ if (pthread_mutex_unlock(&ctx->token_mutex)) {
+ flb_plg_error(ctx->ins, "error unlocking mutex");
+ return NULL;
+ }
+
+ return token_return;
+}
+
+static void cb_azure_logs_ingestion_flush(struct flb_event_chunk *event_chunk,
+ struct flb_output_flush *out_flush,
+ struct flb_input_instance *i_ins,
+ void *out_context,
+ struct flb_config *config)
+{
+ int ret;
+ int flush_status;
+ size_t b_sent;
+ size_t json_payload_size;
+ void* final_payload;
+ size_t final_payload_size;
+ flb_sds_t token;
+ struct flb_connection *u_conn;
+ struct flb_http_client *c = NULL;
+ int is_compressed = FLB_FALSE;
+ flb_sds_t json_payload = NULL;
+ struct flb_az_li *ctx = out_context;
+ (void) i_ins;
+ (void) config;
+
+ /* Get upstream connection */
+ u_conn = flb_upstream_conn_get(ctx->u_dce);
+ if (!u_conn) {
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ /* Convert binary logs into a JSON payload */
+ ret = az_li_format(event_chunk->data, event_chunk->size,
+ &json_payload, &json_payload_size, ctx);
+ if (ret == -1) {
+ flb_upstream_conn_release(u_conn);
+ FLB_OUTPUT_RETURN(FLB_ERROR);
+ }
+
+ /* Get OAuth2 token */
+ token = get_az_li_token(ctx);
+ if (!token) {
+ flush_status = FLB_RETRY;
+ goto cleanup;
+ }
+
+ /* Map buffer */
+ final_payload = json_payload;
+ final_payload_size = json_payload_size;
+ if (ctx->compress_enabled == FLB_TRUE) {
+ ret = flb_gzip_compress((void *) json_payload, json_payload_size,
+ &final_payload, &final_payload_size);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins,
+ "cannot gzip payload, disabling compression");
+ }
+ else {
+ is_compressed = FLB_TRUE;
+ flb_plg_debug(ctx->ins, "enabled payload gzip compression");
+ /* JSON buffer will be cleared at cleanup: */
+ }
+ }
+
+ /* Compose HTTP Client request */
+ c = flb_http_client(u_conn, FLB_HTTP_POST, ctx->dce_u_url,
+ final_payload, final_payload_size, NULL, 0, NULL, 0);
+
+ if (!c) {
+ flb_plg_warn(ctx->ins, "retrying payload bytes=%lu", final_payload_size);
+ flush_status = FLB_RETRY;
+ goto cleanup;
+ }
+
+ /* Append headers */
+ flb_http_add_header(c, "User-Agent", 10, "Fluent-Bit", 10);
+ flb_http_add_header(c, "Content-Type", 12, "application/json", 16);
+ if (is_compressed) {
+ flb_http_add_header(c, "Content-Encoding", 16, "gzip", 4);
+ }
+ flb_http_add_header(c, "Authorization", 13, token, flb_sds_len(token));
+ flb_http_buffer_size(c, FLB_HTTP_DATA_SIZE_MAX);
+
+ /* Execute rest call */
+ ret = flb_http_do(c, &b_sent);
+ if (ret != 0) {
+ flb_plg_warn(ctx->ins, "http_do=%i", ret);
+ flush_status = FLB_RETRY;
+ goto cleanup;
+ }
+ else {
+ if (c->resp.status >= 200 && c->resp.status <= 299) {
+ flb_plg_info(ctx->ins, "http_status=%i, dcr_id=%s, table=%s",
+ c->resp.status, ctx->dcr_id, ctx->table_name);
+ flush_status = FLB_OK;
+ goto cleanup;
+ }
+ else {
+ if (c->resp.payload_size > 0) {
+ flb_plg_warn(ctx->ins, "http_status=%i:\n%s",
+ c->resp.status, c->resp.payload);
+ }
+ else {
+ flb_plg_warn(ctx->ins, "http_status=%i", c->resp.status);
+ }
+ flb_plg_debug(ctx->ins, "retrying payload bytes=%lu", final_payload_size);
+ flush_status = FLB_RETRY;
+ goto cleanup;
+ }
+ }
+
+cleanup:
+ /* cleanup */
+ if (json_payload) {
+ flb_sds_destroy(json_payload);
+ }
+
+ /* release compressed payload */
+ if (is_compressed == FLB_TRUE) {
+ flb_free(final_payload);
+ }
+
+ if (c) {
+ flb_http_client_destroy(c);
+ }
+ if (u_conn) {
+ flb_upstream_conn_release(u_conn);
+ }
+
+ /* destory token at last after HTTP call has finished */
+ if (token) {
+ flb_sds_destroy(token);
+ }
+ FLB_OUTPUT_RETURN(flush_status);
+}
+
+static int cb_azure_logs_ingestion_exit(void *data, struct flb_config *config)
+{
+ struct flb_az_li *ctx = data;
+ flb_plg_debug(ctx->ins, "exiting logs ingestion plugin");
+ flb_az_li_ctx_destroy(ctx);
+ return 0;
+}
+
+/* Configuration properties map */
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_STR, "tenant_id", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_az_li, tenant_id),
+ "Set the tenant ID of the AAD application"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "client_id", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_az_li, client_id),
+ "Set the client/app ID of the AAD application"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "client_secret", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_az_li, client_secret),
+ "Set the client secret of the AAD application"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "dce_url", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_az_li, dce_url),
+ "Data Collection Endpoint(DCE) URI (e.g. "
+ "https://la-endpoint-q12a.eastus-1.ingest.monitor.azure.com)"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "dcr_id", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_az_li, dcr_id),
+ "Data Collection Rule (DCR) immutable ID"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "table_name", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_az_li, table_name),
+ "The name of the custom log table, including '_CL' suffix"
+ },
+ /* optional params */
+ {
+ FLB_CONFIG_MAP_STR, "time_key", FLB_AZ_LI_TIME_KEY,
+ 0, FLB_TRUE, offsetof(struct flb_az_li, time_key),
+ "[Optional] Specify the key name where the timestamp will be stored."
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "time_generated", "false",
+ 0, FLB_TRUE, offsetof(struct flb_az_li, time_generated),
+ "If enabled, will generate a timestamp and append it to JSON. "
+ "The key name is set by the 'time_key' parameter"
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "compress", "false",
+ 0, FLB_TRUE, offsetof(struct flb_az_li, compress_enabled),
+ "Enable HTTP payload compression (gzip)."
+ },
+ /* EOF */
+ {0}
+};
+
+struct flb_output_plugin out_azure_logs_ingestion_plugin = {
+ .name = "azure_logs_ingestion",
+ .description = "Send logs to Log Analytics with Log Ingestion API",
+ .cb_init = cb_azure_logs_ingestion_init,
+ .cb_flush = cb_azure_logs_ingestion_flush,
+ .cb_exit = cb_azure_logs_ingestion_exit,
+
+ /* Configuration */
+ .config_map = config_map,
+
+ /* Plugin flags */
+ .flags = FLB_OUTPUT_NET | FLB_IO_TLS,
+};
diff --git a/src/fluent-bit/plugins/out_azure_logs_ingestion/azure_logs_ingestion.h b/src/fluent-bit/plugins/out_azure_logs_ingestion/azure_logs_ingestion.h
new file mode 100644
index 000000000..15b2420b8
--- /dev/null
+++ b/src/fluent-bit/plugins/out_azure_logs_ingestion/azure_logs_ingestion.h
@@ -0,0 +1,74 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_OUT_AZURE_LOGS_INGESTION
+#define FLB_OUT_AZURE_LOGS_INGESTION
+
+#define FLB_AZ_LI_API_VERSION "api-version=2021-11-01-preview"
+#define FLB_AZ_LI_TIME_KEY "@timestamp"
+#define FLB_AZ_LI_AUTH_SCOPE "https://monitor.azure.com/.default"
+/* auth url needs tenant_id */
+#define FLB_AZ_LI_AUTH_URL_TMPLT "https://login.microsoftonline.com/"\
+ "%s/oauth2/v2.0/token"
+/* DCE Full URL needs: dce_url, dcr_id, Log Analytics custom table name */
+#define FLB_AZ_LI_DCE_URL_TMPLT "%s/dataCollectionRules/%s/streams/"\
+ "Custom-%s?"FLB_AZ_LI_API_VERSION
+/* TLS Modes for upstream connection = FLB_IO_TLS or FLB_IO_OPT_TLS*/
+#define FLB_AZ_LI_TLS_MODE FLB_IO_TLS
+/* refresh token every 60 minutes */
+#define FLB_AZ_LI_TOKEN_TIMEOUT 3600
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_output.h>
+#include <fluent-bit/flb_sds.h>
+
+/* Context structure for Azure Logs Ingestion API */
+struct flb_az_li {
+ /* log ingestion account setup */
+ flb_sds_t tenant_id;
+ flb_sds_t client_id;
+ flb_sds_t client_secret;
+ flb_sds_t dce_url;
+ flb_sds_t dcr_id;
+ flb_sds_t table_name;
+
+ /* time_generated: on/off */
+ int time_generated;
+ /* time key name */
+ flb_sds_t time_key;
+
+ /* compress payload */
+ int compress_enabled;
+
+ /* mangement auth */
+ flb_sds_t auth_url;
+ struct flb_oauth2 *u_auth;
+ /* mutex for acquiring tokens */
+ pthread_mutex_t token_mutex;
+
+ /* upstream connection to the data collection endpoint */
+ struct flb_upstream *u_dce;
+ flb_sds_t dce_u_url;
+
+ /* plugin output and config instance reference */
+ struct flb_output_instance *ins;
+ struct flb_config *config;
+};
+
+#endif
diff --git a/src/fluent-bit/plugins/out_azure_logs_ingestion/azure_logs_ingestion_conf.c b/src/fluent-bit/plugins/out_azure_logs_ingestion/azure_logs_ingestion_conf.c
new file mode 100644
index 000000000..344a7f5ff
--- /dev/null
+++ b/src/fluent-bit/plugins/out_azure_logs_ingestion/azure_logs_ingestion_conf.c
@@ -0,0 +1,172 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_base64.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_http_client.h>
+#include <fluent-bit/flb_oauth2.h>
+
+#include "azure_logs_ingestion.h"
+#include "azure_logs_ingestion_conf.h"
+
+struct flb_az_li* flb_az_li_ctx_create(struct flb_output_instance *ins,
+ struct flb_config *config)
+{
+ int ret;
+ struct flb_az_li *ctx;
+ (void) ins;
+ (void) config;
+
+ /* Allocate a new context object for this output instance */
+ ctx = flb_calloc(1, sizeof(struct flb_az_li));
+ if (!ctx) {
+ flb_errno();
+ return NULL;
+ }
+
+ /* Set the conext in output_instance so that we can retrieve it later */
+ ctx->ins = ins;
+ ctx->config = config;
+ /* Set context */
+ flb_output_set_context(ins, ctx);
+
+ /* Load config map */
+ ret = flb_output_config_map_set(ins, (void *) ctx);
+ if (ret == -1) {
+ flb_plg_error(ins, "unable to load configuration");
+ return NULL;
+ }
+
+ /* config: 'client_id' */
+ if (!ctx->client_id) {
+ flb_plg_error(ins, "property 'client_id' is not defined");
+ flb_az_li_ctx_destroy(ctx);
+ return NULL;
+ }
+ /* config: 'tenant_id' */
+ if (!ctx->tenant_id) {
+ flb_plg_error(ins, "property 'tenant_id' is not defined");
+ flb_az_li_ctx_destroy(ctx);
+ return NULL;
+ }
+ /* config: 'client_secret' */
+ if (!ctx->client_secret) {
+ flb_plg_error(ins, "property 'client_secret' is not defined");
+ flb_az_li_ctx_destroy(ctx);
+ return NULL;
+ }
+ /* config: 'dce_url' */
+ if (!ctx->dce_url) {
+ flb_plg_error(ins, "property 'dce_url' is not defined");
+ flb_az_li_ctx_destroy(ctx);
+ return NULL;
+ }
+ /* config: 'dcr_id' */
+ if (!ctx->dcr_id) {
+ flb_plg_error(ins, "property 'dcr_id' is not defined");
+ flb_az_li_ctx_destroy(ctx);
+ return NULL;
+ }
+ /* config: 'table_name' */
+ if (!ctx->table_name) {
+ flb_plg_error(ins, "property 'table_name' is not defined");
+ flb_az_li_ctx_destroy(ctx);
+ return NULL;
+ }
+
+ /* Allocate and set auth url */
+ ctx->auth_url = flb_sds_create_size(sizeof(FLB_AZ_LI_AUTH_URL_TMPLT) - 1 +
+ flb_sds_len(ctx->tenant_id));
+ if (!ctx->auth_url) {
+ flb_errno();
+ flb_az_li_ctx_destroy(ctx);
+ return NULL;
+ }
+ flb_sds_snprintf(&ctx->auth_url, flb_sds_alloc(ctx->auth_url),
+ FLB_AZ_LI_AUTH_URL_TMPLT, ctx->tenant_id);
+
+ /* Allocate and set dce full url */
+ ctx->dce_u_url = flb_sds_create_size(sizeof(FLB_AZ_LI_DCE_URL_TMPLT) - 1 +
+ flb_sds_len(ctx->dce_url) +
+ flb_sds_len(ctx->dcr_id) +
+ flb_sds_len(ctx->table_name));
+ if (!ctx->dce_u_url) {
+ flb_errno();
+ flb_az_li_ctx_destroy(ctx);
+ return NULL;
+ }
+ flb_sds_snprintf(&ctx->dce_u_url, flb_sds_alloc(ctx->dce_u_url),
+ FLB_AZ_LI_DCE_URL_TMPLT, ctx->dce_url,
+ ctx->dcr_id, ctx->table_name);
+
+ /* Initialize the auth mutex */
+ pthread_mutex_init(&ctx->token_mutex, NULL);
+
+ /* Create oauth2 context */
+ ctx->u_auth = flb_oauth2_create(config, ctx->auth_url,
+ FLB_AZ_LI_TOKEN_TIMEOUT);
+ if (!ctx->u_auth) {
+ flb_plg_error(ins, "cannot create oauth2 context");
+ flb_az_li_ctx_destroy(ctx);
+ return NULL;
+ }
+
+ /* Create upstream context for Log Ingsetion endpoint */
+ ctx->u_dce = flb_upstream_create_url(config, ctx->dce_url,
+ FLB_AZ_LI_TLS_MODE, ins->tls);
+ if (!ctx->u_dce) {
+ flb_plg_error(ins, "upstream creation failed");
+ flb_az_li_ctx_destroy(ctx);
+ return NULL;
+ }
+ flb_output_upstream_set(ctx->u_dce, ins);
+
+ flb_plg_info(ins, "dce_url='%s', dcr='%s', table='%s', stream='Custom-%s'",
+ ctx->dce_url, ctx->dcr_id, ctx->table_name, ctx->table_name);
+
+ return ctx;
+}
+
+/* Free the context and created memory */
+int flb_az_li_ctx_destroy(struct flb_az_li *ctx)
+{
+ if (!ctx) {
+ return -1;
+ }
+
+ if (ctx->auth_url) {
+ flb_sds_destroy(ctx->auth_url);
+ }
+
+ if (ctx->dce_u_url) {
+ flb_sds_destroy(ctx->dce_u_url);
+ }
+
+ if (ctx->u_auth) {
+ flb_oauth2_destroy(ctx->u_auth);
+ }
+
+ if (ctx->u_dce) {
+ flb_upstream_destroy(ctx->u_dce);
+ }
+ flb_free(ctx);
+
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/out_azure_logs_ingestion/azure_logs_ingestion_conf.h b/src/fluent-bit/plugins/out_azure_logs_ingestion/azure_logs_ingestion_conf.h
new file mode 100644
index 000000000..3886f75bc
--- /dev/null
+++ b/src/fluent-bit/plugins/out_azure_logs_ingestion/azure_logs_ingestion_conf.h
@@ -0,0 +1,29 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_OUT_AZURE_LOGS_INGESTION_CONF_H
+#define FLB_OUT_AZURE_LOGS_INGESTION_CONF_H
+
+#include "azure_logs_ingestion.h"
+
+struct flb_az_li* flb_az_li_ctx_create(struct flb_output_instance *ins,
+ struct flb_config *config);
+int flb_az_li_ctx_destroy(struct flb_az_li *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/out_bigquery/CMakeLists.txt b/src/fluent-bit/plugins/out_bigquery/CMakeLists.txt
new file mode 100644
index 000000000..042b71bec
--- /dev/null
+++ b/src/fluent-bit/plugins/out_bigquery/CMakeLists.txt
@@ -0,0 +1,6 @@
+set(src
+ bigquery_conf.c
+ bigquery.c
+ )
+
+FLB_PLUGIN(out_bigquery "${src}" "")
diff --git a/src/fluent-bit/plugins/out_bigquery/bigquery.c b/src/fluent-bit/plugins/out_bigquery/bigquery.c
new file mode 100644
index 000000000..ab5b4657f
--- /dev/null
+++ b/src/fluent-bit/plugins/out_bigquery/bigquery.c
@@ -0,0 +1,1159 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_http_client.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_oauth2.h>
+#include <fluent-bit/flb_base64.h>
+#include <fluent-bit/flb_hash.h>
+#include <fluent-bit/flb_crypto.h>
+#include <fluent-bit/flb_signv4.h>
+#include <fluent-bit/flb_log_event_decoder.h>
+#include <fluent-bit/flb_kv.h>
+
+#include <msgpack.h>
+
+#include "bigquery.h"
+#include "bigquery_conf.h"
+
+// TODO: The following code is copied from the Stackdriver plugin and should be
+// factored into common library functions.
+
+/*
+ * Base64 Encoding in JWT must:
+ *
+ * - remove any trailing padding '=' character
+ * - replace '+' with '-'
+ * - replace '/' with '_'
+ *
+ * ref: https://www.rfc-editor.org/rfc/rfc7515.txt Appendix C
+ */
+int bigquery_jwt_base64_url_encode(unsigned char *out_buf, size_t out_size,
+ unsigned char *in_buf, size_t in_size,
+ size_t *olen)
+
+{
+ int i;
+ size_t len;
+ int result;
+
+ /* do normal base64 encoding */
+ result = flb_base64_encode((unsigned char *) out_buf, out_size - 1,
+ &len, in_buf, in_size);
+ if (result != 0) {
+ return -1;
+ }
+
+ /* Replace '+' and '/' characters */
+ for (i = 0; i < len && out_buf[i] != '='; i++) {
+ if (out_buf[i] == '+') {
+ out_buf[i] = '-';
+ }
+ else if (out_buf[i] == '/') {
+ out_buf[i] = '_';
+ }
+ }
+
+ /* Now 'i' becomes the new length */
+ *olen = i;
+ return 0;
+}
+
+static int bigquery_jwt_encode(struct flb_bigquery *ctx,
+ char *payload, char *secret,
+ char **out_signature, size_t *out_size)
+{
+ int ret;
+ int len;
+ int buf_size;
+ size_t olen;
+ char *buf;
+ char *sigd;
+ char *headers = "{\"alg\": \"RS256\", \"typ\": \"JWT\"}";
+ unsigned char sha256_buf[32] = {0};
+ flb_sds_t out;
+ unsigned char sig[256] = {0};
+ size_t sig_len;
+
+ buf_size = (strlen(payload) + strlen(secret)) * 2;
+ buf = flb_malloc(buf_size);
+ if (!buf) {
+ flb_errno();
+ return -1;
+ }
+
+ /* Encode header */
+ len = strlen(headers);
+ ret = flb_base64_encode((unsigned char *) buf, buf_size - 1,
+ &olen, (unsigned char *) headers, len);
+ if (ret != 0) {
+ flb_free(buf);
+
+ return ret;
+ }
+
+ /* Create buffer to store JWT */
+ out = flb_sds_create_size(2048);
+ if (!out) {
+ flb_errno();
+ flb_free(buf);
+ return -1;
+ }
+
+ /* Append header */
+ out = flb_sds_cat(out, buf, olen);
+ out = flb_sds_cat(out, ".", 1);
+
+ /* Encode Payload */
+ len = strlen(payload);
+ bigquery_jwt_base64_url_encode((unsigned char *) buf, buf_size,
+ (unsigned char *) payload, len, &olen);
+
+ /* Append Payload */
+ out = flb_sds_cat(out, buf, olen);
+
+ /* do sha256() of base64(header).base64(payload) */
+ ret = flb_hash_simple(FLB_HASH_SHA256,
+ (unsigned char *) out, flb_sds_len(out),
+ sha256_buf, sizeof(sha256_buf));
+
+ if (ret != FLB_CRYPTO_SUCCESS) {
+ flb_plg_error(ctx->ins, "error hashing token");
+ flb_free(buf);
+ flb_sds_destroy(out);
+ return -1;
+ }
+
+ /* In mbedTLS cert length must include the null byte */
+ len = strlen(secret) + 1;
+
+ sig_len = sizeof(sig);
+
+ ret = flb_crypto_sign_simple(FLB_CRYPTO_PRIVATE_KEY,
+ FLB_CRYPTO_PADDING_PKCS1,
+ FLB_HASH_SHA256,
+ (unsigned char *) secret, len,
+ sha256_buf, sizeof(sha256_buf),
+ sig, &sig_len);
+
+ if (ret != FLB_CRYPTO_SUCCESS) {
+ flb_plg_error(ctx->ins, "error creating RSA context");
+ flb_free(buf);
+ flb_sds_destroy(out);
+ return -1;
+ }
+
+ sigd = flb_malloc(2048);
+ if (!sigd) {
+ flb_errno();
+ flb_free(buf);
+ flb_sds_destroy(out);
+ return -1;
+ }
+
+ bigquery_jwt_base64_url_encode((unsigned char *) sigd, 2048, sig, 256, &olen);
+
+ out = flb_sds_cat(out, ".", 1);
+ out = flb_sds_cat(out, sigd, olen);
+
+ *out_signature = out;
+ *out_size = flb_sds_len(out);
+
+ flb_free(buf);
+ flb_free(sigd);
+
+ return 0;
+}
+
+/* Create a new oauth2 context and get a oauth2 token */
+static int bigquery_get_oauth2_token(struct flb_bigquery *ctx)
+{
+ int ret;
+ char *token;
+ char *sig_data;
+ size_t sig_size;
+ time_t issued;
+ time_t expires;
+ char payload[1024];
+
+ /* Clear any previous oauth2 payload content */
+ flb_oauth2_payload_clear(ctx->o);
+
+ /* JWT encode for oauth2 */
+ issued = time(NULL);
+ expires = issued + FLB_BIGQUERY_TOKEN_REFRESH;
+
+ snprintf(payload, sizeof(payload) - 1,
+ "{\"iss\": \"%s\", \"scope\": \"%s\", "
+ "\"aud\": \"%s\", \"exp\": %lu, \"iat\": %lu}",
+ ctx->oauth_credentials->client_email, FLB_BIGQUERY_SCOPE,
+ FLB_BIGQUERY_AUTH_URL,
+ expires, issued);
+
+ /* Compose JWT signature */
+ ret = bigquery_jwt_encode(ctx, payload, ctx->oauth_credentials->private_key,
+ &sig_data, &sig_size);
+ if (ret != 0) {
+ flb_plg_error(ctx->ins, "JWT signature generation failed");
+ return -1;
+ }
+
+ flb_plg_debug(ctx->ins, "JWT signature:\n%s", sig_data);
+
+ ret = flb_oauth2_payload_append(ctx->o,
+ "grant_type", -1,
+ "urn%3Aietf%3Aparams%3Aoauth%3A"
+ "grant-type%3Ajwt-bearer", -1);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "error appending oauth2 params");
+ flb_sds_destroy(sig_data);
+ return -1;
+ }
+
+ ret = flb_oauth2_payload_append(ctx->o,
+ "assertion", -1,
+ sig_data, sig_size);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "error appending oauth2 params");
+ flb_sds_destroy(sig_data);
+ return -1;
+ }
+ flb_sds_destroy(sig_data);
+
+ /* Retrieve access token */
+ token = flb_oauth2_token_get(ctx->o);
+ if (!token) {
+ flb_plg_error(ctx->ins, "error retrieving oauth2 access token");
+ return -1;
+ }
+
+ return 0;
+}
+
+static flb_sds_t add_aws_signature(struct flb_http_client *c, struct flb_bigquery *ctx) {
+ flb_sds_t signature;
+
+ flb_plg_debug(ctx->ins, "Signing the request with AWS SigV4 using IMDS credentials");
+
+ signature = flb_signv4_do(c, FLB_TRUE, FLB_TRUE, time(NULL),
+ ctx->aws_region, "sts",
+ 0, NULL, ctx->aws_provider);
+ if (!signature) {
+ flb_plg_error(ctx->ins, "Could not sign the request with AWS SigV4");
+ return NULL;
+ }
+
+ return signature;
+}
+
+static inline int to_encode_path(char c)
+{
+ if ((c >= 48 && c <= 57) || /* 0-9 */
+ (c >= 65 && c <= 90) || /* A-Z */
+ (c >= 97 && c <= 122) || /* a-z */
+ (c == '-' || c == '_' || c == '.' || c == '~' || c == '/')) {
+ return FLB_FALSE;
+ }
+
+ return FLB_TRUE;
+}
+
+static flb_sds_t uri_encode(const char *uri, size_t len)
+{
+ int i;
+ flb_sds_t buf = NULL;
+ flb_sds_t tmp = NULL;
+
+ buf = flb_sds_create_size(len * 2);
+ if (!buf) {
+ flb_error("[uri_encode] cannot allocate buffer for URI encoding");
+ return NULL;
+ }
+
+ for (i = 0; i < len; i++) {
+ if (to_encode_path(uri[i]) == FLB_TRUE) {
+ tmp = flb_sds_printf(&buf, "%%%02X", (unsigned char) *(uri + i));
+ if (!tmp) {
+ flb_error("[uri_encode] error formatting special character");
+ flb_sds_destroy(buf);
+ return NULL;
+ }
+ continue;
+ }
+
+ /* Direct assignment, just copy the character */
+ if (buf) {
+ tmp = flb_sds_cat(buf, uri + i, 1);
+ if (!tmp) {
+ flb_error("[uri_encode] error composing outgoing buffer");
+ flb_sds_destroy(buf);
+ return NULL;
+ }
+ buf = tmp;
+ }
+ }
+
+ return buf;
+}
+
+/* https://cloud.google.com/iam/docs/using-workload-identity-federation */
+static int bigquery_exchange_aws_creds_for_google_oauth(struct flb_bigquery *ctx)
+{
+ struct flb_connection *aws_sts_conn;
+ struct flb_connection *google_sts_conn = NULL;
+ struct flb_connection *google_gen_access_token_conn = NULL;
+ struct flb_http_client *aws_sts_c = NULL;
+ struct flb_http_client *google_sts_c = NULL;
+ struct flb_http_client *google_gen_access_token_c = NULL;
+ int google_sts_ret;
+ int google_gen_access_token_ret;
+ size_t b_sent_google_sts;
+ size_t b_sent_google_gen_access_token;
+ flb_sds_t signature = NULL;
+ flb_sds_t sigv4_amz_date = NULL;
+ flb_sds_t sigv4_amz_sec_token = NULL;
+ flb_sds_t aws_gci_url = NULL;
+ flb_sds_t aws_gci_goog_target_resource = NULL;
+ flb_sds_t aws_gci_token = NULL;
+ flb_sds_t aws_gci_token_encoded = NULL;
+ flb_sds_t google_sts_token = NULL;
+ flb_sds_t google_gen_access_token_body = NULL;
+ flb_sds_t google_gen_access_token_url = NULL;
+ flb_sds_t google_federated_token = NULL;
+ flb_sds_t google_auth_header = NULL;
+
+ if (ctx->sa_token) {
+ flb_sds_destroy(ctx->sa_token);
+ ctx->sa_token = NULL;
+ }
+
+ /* Sign an AWS STS request with AWS SigV4 signature */
+ aws_sts_conn = flb_upstream_conn_get(ctx->aws_sts_upstream);
+ if (!aws_sts_conn) {
+ flb_plg_error(ctx->ins, "Failed to get upstream connection for AWS STS");
+ goto error;
+ }
+
+ aws_sts_c = flb_http_client(aws_sts_conn, FLB_HTTP_POST, FLB_BIGQUERY_AWS_STS_ENDPOINT,
+ NULL, 0, NULL, 0, NULL, 0);
+ if (!aws_sts_c) {
+ flb_plg_error(ctx->ins, "Failed to create HTTP client for AWS STS");
+ goto error;
+ }
+
+ signature = add_aws_signature(aws_sts_c, ctx);
+ if (!signature) {
+ flb_plg_error(ctx->ins, "Failed to sign AWS STS request");
+ goto error;
+ }
+
+ sigv4_amz_date = flb_sds_create(flb_kv_get_key_value("x-amz-date", &aws_sts_c->headers));
+ if (!sigv4_amz_date) {
+ flb_plg_error(ctx->ins, "Failed to extract `x-amz-date` header from AWS STS signed request");
+ goto error;
+ }
+
+ sigv4_amz_sec_token = flb_sds_create(flb_kv_get_key_value("x-amz-security-token", &aws_sts_c->headers));
+ if (!sigv4_amz_sec_token) {
+ flb_plg_error(ctx->ins, "Failed to extract `x-amz-security-token` header from AWS STS signed request");
+ goto error;
+ }
+
+ /* Create an AWS GetCallerIdentity token */
+
+ /* AWS STS endpoint URL */
+ aws_gci_url = flb_sds_create_size(128);
+ aws_gci_url = flb_sds_printf(&aws_gci_url,
+ "https://%s%s",
+ ctx->aws_sts_endpoint,
+ FLB_BIGQUERY_AWS_STS_ENDPOINT);
+
+ /* x-goog-cloud-target-resource header */
+ aws_gci_goog_target_resource = flb_sds_create_size(128);
+ aws_gci_goog_target_resource = flb_sds_printf(&aws_gci_goog_target_resource,
+ FLB_BIGQUERY_GOOGLE_CLOUD_TARGET_RESOURCE,
+ ctx->project_number, ctx->pool_id, ctx->provider_id);
+
+ aws_gci_token = flb_sds_create_size(2048);
+ aws_gci_token = flb_sds_printf(
+ &aws_gci_token,
+ "{\"url\":\"%s\",\"method\":\"POST\",\"headers\":["
+ "{\"key\":\"Authorization\",\"value\":\"%s\"},"
+ "{\"key\":\"host\",\"value\":\"%s\"},"
+ "{\"key\":\"x-amz-date\",\"value\":\"%s\"},"
+ "{\"key\":\"x-goog-cloud-target-resource\",\"value\":\"%s\"},"
+ "{\"key\":\"x-amz-security-token\",\"value\":\"%s\"}"
+ "]}",
+ aws_gci_url,
+ signature,
+ ctx->aws_sts_endpoint,
+ sigv4_amz_date,
+ aws_gci_goog_target_resource,
+ sigv4_amz_sec_token);
+
+ aws_gci_token_encoded = uri_encode(aws_gci_token, flb_sds_len(aws_gci_token));
+ if (!aws_gci_token_encoded) {
+ flb_plg_error(ctx->ins, "Failed to encode GetCallerIdentity token");
+ goto error;
+ }
+
+ /* To exchange the AWS credential for a federated access token,
+ * we need to pass the AWS GetCallerIdentity token to the Google Security Token Service's token() method */
+ google_sts_token = flb_sds_create_size(2048);
+ google_sts_token = flb_sds_printf(
+ &google_sts_token,
+ "{\"audience\":\"%s\","
+ "\"grantType\":\"%s\","
+ "\"requestedTokenType\":\"%s\","
+ "\"scope\":\"%s\","
+ "\"subjectTokenType\":\"%s\","
+ "\"subjectToken\":\"%s\"}",
+ aws_gci_goog_target_resource,
+ FLB_BIGQUERY_GOOGLE_STS_TOKEN_GRANT_TYPE,
+ FLB_BIGQUERY_GOOGLE_STS_TOKEN_REQUESTED_TOKEN_TYPE,
+ FLB_BIGQUERY_GOOGLE_STS_TOKEN_SCOPE,
+ FLB_BIGQUERY_GOOGLE_STS_TOKEN_SUBJECT_TOKEN_TYPE,
+ aws_gci_token_encoded);
+
+ google_sts_conn = flb_upstream_conn_get(ctx->google_sts_upstream);
+ if (!google_sts_conn) {
+ flb_plg_error(ctx->ins, "Google STS connection setup failed");
+ goto error;
+ }
+
+ google_sts_c = flb_http_client(google_sts_conn, FLB_HTTP_POST, FLB_BIGQUERY_GOOGLE_CLOUD_TOKEN_ENDPOINT,
+ google_sts_token, flb_sds_len(google_sts_token),
+ NULL, 0, NULL, 0);
+
+ google_sts_ret = flb_http_do(google_sts_c, &b_sent_google_sts);
+ if (google_sts_ret != 0) {
+ flb_plg_error(ctx->ins, "Google STS token request http_do=%i", google_sts_ret);
+ goto error;
+ }
+
+ if (google_sts_c->resp.status != 200) {
+ flb_plg_error(ctx->ins, "Google STS token response status: %i, payload:\n%s",
+ google_sts_c->resp.status, google_sts_c->resp.payload);
+ goto error;
+ }
+
+ /* To exchange the federated token for a service account access token,
+ * we need to call the Google Service Account Credentials API generateAccessToken() method */
+ google_federated_token = flb_json_get_val(google_sts_c->resp.payload,
+ google_sts_c->resp.payload_size,
+ "access_token");
+ if (!google_federated_token) {
+ flb_plg_error(ctx->ins, "Failed to extract Google federated access token from STS token() response");
+ goto error;
+ }
+
+ google_gen_access_token_conn = flb_upstream_conn_get(ctx->google_iam_upstream);
+ if (!google_gen_access_token_conn) {
+ flb_plg_error(ctx->ins, "Google Service Account Credentials API connection setup failed");
+ goto error;
+ }
+
+ google_gen_access_token_url = flb_sds_create_size(256);
+ google_gen_access_token_url = flb_sds_printf(&google_gen_access_token_url,
+ FLB_BIGQUERY_GOOGLE_GEN_ACCESS_TOKEN_URL,
+ ctx->google_service_account);
+
+ google_gen_access_token_body = flb_sds_create(FLB_BIGQUERY_GOOGLE_GEN_ACCESS_TOKEN_REQUEST_BODY);
+
+ google_gen_access_token_c = flb_http_client(google_gen_access_token_conn, FLB_HTTP_POST, google_gen_access_token_url,
+ google_gen_access_token_body, flb_sds_len(google_gen_access_token_body),
+ NULL, 0, NULL, 0);
+
+ google_auth_header = flb_sds_create_size(2048 + 7);
+ google_auth_header = flb_sds_printf(&google_auth_header, "%s%s",
+ "Bearer ", google_federated_token);
+
+ flb_http_add_header(google_gen_access_token_c, "Authorization", 13,
+ google_auth_header, flb_sds_len(google_auth_header));
+
+ flb_http_add_header(google_gen_access_token_c, "Content-Type", 12,
+ "application/json; charset=utf-8", 31);
+
+ google_gen_access_token_ret = flb_http_do(google_gen_access_token_c, &b_sent_google_gen_access_token);
+ if (google_gen_access_token_ret != 0) {
+ flb_plg_error(ctx->ins, "Google Service Account Credentials API generateAccessToken() request http_do=%i",
+ google_gen_access_token_ret);
+ goto error;
+ }
+
+ if (google_gen_access_token_c->resp.status != 200) {
+ flb_plg_error(ctx->ins, "Google Service Account Credentials API generateAccessToken() response "
+ "status: %i, payload:\n%s",
+ google_gen_access_token_c->resp.status, google_gen_access_token_c->resp.payload);
+ goto error;
+ }
+
+ ctx->sa_token = flb_json_get_val(google_gen_access_token_c->resp.payload,
+ google_gen_access_token_c->resp.payload_size,
+ "accessToken");
+ if (!ctx->sa_token) {
+ flb_plg_error(ctx->ins, "Failed to extract Google OAuth token "
+ "from Service Account Credentials API generateAccessToken() response");
+ goto error;
+ }
+
+ ctx->sa_token_expiry = time(NULL) + FLB_BIGQUERY_TOKEN_REFRESH;
+
+ flb_sds_destroy(signature);
+ flb_sds_destroy(sigv4_amz_date);
+ flb_sds_destroy(sigv4_amz_sec_token);
+ flb_sds_destroy(aws_gci_url);
+ flb_sds_destroy(aws_gci_goog_target_resource);
+ flb_sds_destroy(aws_gci_token);
+ flb_sds_destroy(aws_gci_token_encoded);
+ flb_sds_destroy(google_sts_token);
+ flb_sds_destroy(google_gen_access_token_body);
+ flb_sds_destroy(google_gen_access_token_url);
+ flb_sds_destroy(google_federated_token);
+ flb_sds_destroy(google_auth_header);
+
+ flb_http_client_destroy(aws_sts_c);
+ flb_http_client_destroy(google_sts_c);
+ flb_http_client_destroy(google_gen_access_token_c);
+
+ flb_upstream_conn_release(aws_sts_conn);
+ flb_upstream_conn_release(google_sts_conn);
+ flb_upstream_conn_release(google_gen_access_token_conn);
+
+ flb_plg_info(ctx->ins, "Retrieved Google service account OAuth token via Identity Federation");
+
+ return 0;
+
+error:
+ flb_sds_destroy(signature);
+ flb_sds_destroy(sigv4_amz_date);
+ flb_sds_destroy(sigv4_amz_sec_token);
+ flb_sds_destroy(aws_gci_url);
+ flb_sds_destroy(aws_gci_goog_target_resource);
+ flb_sds_destroy(aws_gci_token);
+ flb_sds_destroy(aws_gci_token_encoded);
+ flb_sds_destroy(google_sts_token);
+ flb_sds_destroy(google_gen_access_token_body);
+ flb_sds_destroy(google_gen_access_token_url);
+ flb_sds_destroy(google_federated_token);
+ flb_sds_destroy(google_auth_header);
+
+ if (aws_sts_c) {
+ flb_http_client_destroy(aws_sts_c);
+ }
+
+ if (google_sts_c) {
+ flb_http_client_destroy(google_sts_c);
+ }
+
+ if (google_gen_access_token_c) {
+ flb_http_client_destroy(google_gen_access_token_c);
+ }
+
+ if (aws_sts_conn) {
+ flb_upstream_conn_release(aws_sts_conn);
+ }
+
+ if (google_sts_conn) {
+ flb_upstream_conn_release(google_sts_conn);
+ }
+
+ if (google_gen_access_token_conn) {
+ flb_upstream_conn_release(google_gen_access_token_conn);
+ }
+
+ return -1;
+}
+
+static int flb_bigquery_google_token_expired(time_t expiry)
+{
+ time_t now;
+
+ now = time(NULL);
+ if (expiry <= now) {
+ return FLB_TRUE;
+ }
+
+ return FLB_FALSE;
+}
+
+static flb_sds_t get_google_service_account_token(struct flb_bigquery *ctx) {
+ int ret = 0;
+ flb_sds_t output;
+ flb_plg_trace(ctx->ins, "Getting Google service account token");
+
+ if (!ctx->sa_token) {
+ flb_plg_trace(ctx->ins, "Acquiring new token");
+ ret = bigquery_exchange_aws_creds_for_google_oauth(ctx);
+ }
+ else if (flb_bigquery_google_token_expired(ctx->sa_token_expiry) == FLB_TRUE) {
+ flb_plg_trace(ctx->ins, "Replacing expired token");
+ ret = bigquery_exchange_aws_creds_for_google_oauth(ctx);
+ }
+
+ if (ret != 0) {
+ return NULL;
+ }
+
+ output = flb_sds_create_size(2048 + 7);
+ output = flb_sds_printf(&output, "%s%s", "Bearer ", ctx->sa_token);
+ return output;
+}
+
+static flb_sds_t get_google_token(struct flb_bigquery *ctx)
+{
+ int ret = 0;
+ flb_sds_t output = NULL;
+
+ if (pthread_mutex_lock(&ctx->token_mutex)){
+ flb_plg_error(ctx->ins, "error locking mutex");
+ return NULL;
+ }
+
+ if (flb_oauth2_token_expired(ctx->o) == FLB_TRUE) {
+ ret = bigquery_get_oauth2_token(ctx);
+ }
+
+ /* Copy string to prevent race conditions (get_oauth2 can free the string) */
+ if (ret == 0) {
+ output = flb_sds_create(ctx->o->token_type);
+ flb_sds_printf(&output, " %s", ctx->o->access_token);
+ }
+
+ if (pthread_mutex_unlock(&ctx->token_mutex)){
+ flb_plg_error(ctx->ins, "error unlocking mutex");
+ if (output) {
+ flb_sds_destroy(output);
+ }
+ return NULL;
+ }
+
+ return output;
+}
+
+static int cb_bigquery_init(struct flb_output_instance *ins,
+ struct flb_config *config, void *data)
+{
+ char *token;
+ int io_flags = FLB_IO_TLS;
+ struct flb_bigquery *ctx;
+
+ /* Create config context */
+ ctx = flb_bigquery_conf_create(ins, config);
+ if (!ctx) {
+ flb_plg_error(ins, "configuration failed");
+ return -1;
+ }
+
+ flb_output_set_context(ins, ctx);
+
+ /* Network mode IPv6 */
+ if (ins->host.ipv6 == FLB_TRUE) {
+ io_flags |= FLB_IO_IPV6;
+ }
+
+ /* Create mutex for acquiring oauth tokens (they are shared across flush coroutines) */
+ pthread_mutex_init(&ctx->token_mutex, NULL);
+
+ /*
+ * Create upstream context for BigQuery Streaming Inserts
+ * (no oauth2 service)
+ */
+ ctx->u = flb_upstream_create_url(config, FLB_BIGQUERY_URL_BASE,
+ io_flags, ins->tls);
+ if (!ctx->u) {
+ flb_plg_error(ctx->ins, "upstream creation failed");
+ return -1;
+ }
+
+ if (ctx->has_identity_federation) {
+ /* Configure AWS IMDS */
+ ctx->aws_tls = flb_tls_create(FLB_TLS_CLIENT_MODE,
+ FLB_TRUE,
+ ins->tls_debug,
+ ins->tls_vhost,
+ ins->tls_ca_path,
+ ins->tls_ca_file,
+ ins->tls_crt_file,
+ ins->tls_key_file,
+ ins->tls_key_passwd);
+
+ if (!ctx->aws_tls) {
+ flb_plg_error(ctx->ins, "Failed to create TLS context");
+ flb_bigquery_conf_destroy(ctx);
+ return -1;
+ }
+
+ ctx->aws_provider = flb_standard_chain_provider_create(config,
+ ctx->aws_tls,
+ NULL,
+ NULL,
+ NULL,
+ flb_aws_client_generator(),
+ NULL);
+
+ if (!ctx->aws_provider) {
+ flb_plg_error(ctx->ins, "Failed to create AWS Credential Provider");
+ flb_bigquery_conf_destroy(ctx);
+ return -1;
+ }
+
+ /* initialize credentials in sync mode */
+ ctx->aws_provider->provider_vtable->sync(ctx->aws_provider);
+ ctx->aws_provider->provider_vtable->init(ctx->aws_provider);
+
+ /* set back to async */
+ ctx->aws_provider->provider_vtable->async(ctx->aws_provider);
+ ctx->aws_provider->provider_vtable->upstream_set(ctx->aws_provider, ctx->ins);
+
+ /* Configure AWS STS */
+ ctx->aws_sts_tls = flb_tls_create(FLB_TLS_CLIENT_MODE,
+ FLB_TRUE,
+ ins->tls_debug,
+ ins->tls_vhost,
+ ins->tls_ca_path,
+ ins->tls_ca_file,
+ ins->tls_crt_file,
+ ins->tls_key_file,
+ ins->tls_key_passwd);
+
+ if (!ctx->aws_sts_tls) {
+ flb_plg_error(ctx->ins, "Failed to create TLS context");
+ flb_bigquery_conf_destroy(ctx);
+ return -1;
+ }
+
+ ctx->aws_sts_upstream = flb_upstream_create(config,
+ ctx->aws_sts_endpoint,
+ 443,
+ io_flags,
+ ctx->aws_sts_tls);
+
+ if (!ctx->aws_sts_upstream) {
+ flb_plg_error(ctx->ins, "AWS STS upstream creation failed");
+ flb_bigquery_conf_destroy(ctx);
+ return -1;
+ }
+
+ ctx->aws_sts_upstream->base.net.keepalive = FLB_FALSE;
+
+ /* Configure Google STS */
+ ctx->google_sts_tls = flb_tls_create(FLB_TLS_CLIENT_MODE,
+ FLB_TRUE,
+ ins->tls_debug,
+ ins->tls_vhost,
+ ins->tls_ca_path,
+ ins->tls_ca_file,
+ ins->tls_crt_file,
+ ins->tls_key_file,
+ ins->tls_key_passwd);
+
+ if (!ctx->google_sts_tls) {
+ flb_plg_error(ctx->ins, "Failed to create TLS context");
+ flb_bigquery_conf_destroy(ctx);
+ return -1;
+ }
+
+ ctx->google_sts_upstream = flb_upstream_create_url(config,
+ FLB_BIGQUERY_GOOGLE_STS_URL,
+ io_flags,
+ ctx->google_sts_tls);
+
+ if (!ctx->google_sts_upstream) {
+ flb_plg_error(ctx->ins, "Google STS upstream creation failed");
+ flb_bigquery_conf_destroy(ctx);
+ return -1;
+ }
+
+ /* Configure Google IAM */
+ ctx->google_iam_tls = flb_tls_create(FLB_TLS_CLIENT_MODE,
+ FLB_TRUE,
+ ins->tls_debug,
+ ins->tls_vhost,
+ ins->tls_ca_path,
+ ins->tls_ca_file,
+ ins->tls_crt_file,
+ ins->tls_key_file,
+ ins->tls_key_passwd);
+
+ if (!ctx->google_iam_tls) {
+ flb_plg_error(ctx->ins, "Failed to create TLS context");
+ flb_bigquery_conf_destroy(ctx);
+ return -1;
+ }
+
+ ctx->google_iam_upstream = flb_upstream_create_url(config,
+ FLB_BIGQUERY_GOOGLE_IAM_URL,
+ io_flags,
+ ctx->google_iam_tls);
+
+ if (!ctx->google_iam_upstream) {
+ flb_plg_error(ctx->ins, "Google IAM upstream creation failed");
+ flb_bigquery_conf_destroy(ctx);
+ return -1;
+ }
+
+ /* Remove async flag from upstream */
+ flb_stream_disable_async_mode(&ctx->aws_sts_upstream->base);
+ flb_stream_disable_async_mode(&ctx->google_sts_upstream->base);
+ flb_stream_disable_async_mode(&ctx->google_iam_upstream->base);
+ }
+
+ /* Create oauth2 context */
+ ctx->o = flb_oauth2_create(ctx->config, FLB_BIGQUERY_AUTH_URL, 3000);
+ if (!ctx->o) {
+ flb_plg_error(ctx->ins, "cannot create oauth2 context");
+ return -1;
+ }
+ flb_output_upstream_set(ctx->u, ins);
+
+ /* Get or renew the OAuth2 token */
+ if (ctx->has_identity_federation) {
+ token = get_google_service_account_token(ctx);
+ }
+ else {
+ token = get_google_token(ctx);
+ }
+
+ if (!token) {
+ flb_plg_warn(ctx->ins, "token retrieval failed");
+ }
+ else {
+ flb_sds_destroy(token);
+ }
+
+ return 0;
+}
+
+static int bigquery_format(const void *data, size_t bytes,
+ const char *tag, size_t tag_len,
+ char **out_data, size_t *out_size,
+ struct flb_bigquery *ctx)
+{
+ int array_size = 0;
+ flb_sds_t out_buf;
+ msgpack_sbuffer mp_sbuf;
+ msgpack_packer mp_pck;
+ struct flb_log_event_decoder log_decoder;
+ struct flb_log_event log_event;
+ int ret;
+
+ ret = flb_log_event_decoder_init(&log_decoder, (char *) data, bytes);
+
+ if (ret != FLB_EVENT_DECODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event decoder initialization error : %d", ret);
+
+ return -1;
+ }
+
+ array_size = flb_mp_count(data, bytes);
+
+ /* Create temporary msgpack buffer */
+ msgpack_sbuffer_init(&mp_sbuf);
+ msgpack_packer_init(&mp_pck, &mp_sbuf, msgpack_sbuffer_write);
+
+ /*
+ * Pack root map (kind & rows):
+ *
+ * {
+ * "kind": "bigquery#tableDataInsertAllRequest",
+ * "skipInvalidRows": boolean,
+ * "ignoreUnknownValues": boolean,
+ * "rows": []
+ * }
+ */
+ msgpack_pack_map(&mp_pck, 4);
+
+ msgpack_pack_str(&mp_pck, 4);
+ msgpack_pack_str_body(&mp_pck, "kind", 4);
+
+ msgpack_pack_str(&mp_pck, 34);
+ msgpack_pack_str_body(&mp_pck, "bigquery#tableDataInsertAllRequest", 34);
+
+ msgpack_pack_str(&mp_pck, 15);
+ msgpack_pack_str_body(&mp_pck, "skipInvalidRows", 15);
+
+ if (ctx->skip_invalid_rows) {
+ msgpack_pack_true(&mp_pck);
+ }
+ else {
+ msgpack_pack_false(&mp_pck);
+ }
+
+ msgpack_pack_str(&mp_pck, 19);
+ msgpack_pack_str_body(&mp_pck, "ignoreUnknownValues", 19);
+
+ if (ctx->ignore_unknown_values) {
+ msgpack_pack_true(&mp_pck);
+ }
+ else {
+ msgpack_pack_false(&mp_pck);
+ }
+
+ msgpack_pack_str(&mp_pck, 4);
+ msgpack_pack_str_body(&mp_pck, "rows", 4);
+
+ /* Append entries */
+ msgpack_pack_array(&mp_pck, array_size);
+
+ while ((ret = flb_log_event_decoder_next(
+ &log_decoder,
+ &log_event)) == FLB_EVENT_DECODER_SUCCESS) {
+ /*
+ * Pack entry
+ *
+ * {
+ * "json": {...}
+ * }
+ *
+ * For now, we don't support the insertId that's required for duplicate detection.
+ */
+ msgpack_pack_map(&mp_pck, 1);
+
+ /* json */
+ msgpack_pack_str(&mp_pck, 4);
+ msgpack_pack_str_body(&mp_pck, "json", 4);
+ msgpack_pack_object(&mp_pck, *log_event.body);
+ }
+
+ /* Convert from msgpack to JSON */
+ out_buf = flb_msgpack_raw_to_json_sds(mp_sbuf.data, mp_sbuf.size);
+
+ flb_log_event_decoder_destroy(&log_decoder);
+ msgpack_sbuffer_destroy(&mp_sbuf);
+
+ if (!out_buf) {
+ flb_plg_error(ctx->ins, "error formatting JSON payload");
+ return -1;
+ }
+
+ *out_data = out_buf;
+ *out_size = flb_sds_len(out_buf);
+
+ return 0;
+}
+
+static void cb_bigquery_flush(struct flb_event_chunk *event_chunk,
+ struct flb_output_flush *out_flush,
+ struct flb_input_instance *i_ins,
+ void *out_context,
+ struct flb_config *config)
+{
+ (void) i_ins;
+ (void) config;
+ int ret;
+ int ret_code = FLB_RETRY;
+ size_t b_sent;
+ flb_sds_t token;
+ flb_sds_t payload_buf;
+ size_t payload_size;
+ struct flb_bigquery *ctx = out_context;
+ struct flb_connection *u_conn;
+ struct flb_http_client *c;
+
+ flb_plg_trace(ctx->ins, "flushing bytes %zu", event_chunk->size);
+
+ /* Get upstream connection */
+ u_conn = flb_upstream_conn_get(ctx->u);
+ if (!u_conn) {
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ /* Get or renew Token */
+ if (ctx->has_identity_federation) {
+ token = get_google_service_account_token(ctx);
+ }
+ else {
+ token = get_google_token(ctx);
+ }
+
+ if (!token) {
+ flb_plg_error(ctx->ins, "cannot retrieve oauth2 token");
+ flb_upstream_conn_release(u_conn);
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ /* Reformat msgpack to bigquery JSON payload */
+ ret = bigquery_format(event_chunk->data, event_chunk->size,
+ event_chunk->tag, flb_sds_len(event_chunk->tag),
+ &payload_buf, &payload_size, ctx);
+ if (ret != 0) {
+ flb_upstream_conn_release(u_conn);
+ flb_sds_destroy(token);
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ /* Compose HTTP Client request */
+ c = flb_http_client(u_conn, FLB_HTTP_POST, ctx->uri,
+ payload_buf, payload_size, NULL, 0, NULL, 0);
+ if (!c) {
+ flb_plg_error(ctx->ins, "cannot create HTTP client context");
+ flb_upstream_conn_release(u_conn);
+ flb_sds_destroy(token);
+ flb_sds_destroy(payload_buf);
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ flb_http_buffer_size(c, 4192);
+ flb_http_add_header(c, "User-Agent", 10, "Fluent-Bit", 10);
+ flb_http_add_header(c, "Content-Type", 12, "application/json", 16);
+
+ /* Compose and append Authorization header */
+ flb_http_add_header(c, "Authorization", 13, token, flb_sds_len(token));
+
+ /* Send HTTP request */
+ ret = flb_http_do(c, &b_sent);
+
+ /* validate response */
+ if (ret != 0) {
+ flb_plg_warn(ctx->ins, "http_do=%i", ret);
+ ret_code = FLB_RETRY;
+ }
+ else {
+ /* The request was issued successfully, validate the 'error' field */
+ flb_plg_debug(ctx->ins, "HTTP Status=%i", c->resp.status);
+ if (c->resp.status == 200) {
+ ret_code = FLB_OK;
+ }
+ else {
+ if (c->resp.payload && c->resp.payload_size > 0) {
+ /* we got an error */
+ flb_plg_warn(ctx->ins, "response\n%s", c->resp.payload);
+ }
+ ret_code = FLB_RETRY;
+ }
+ }
+
+ /* Cleanup */
+ flb_sds_destroy(payload_buf);
+ flb_sds_destroy(token);
+ flb_http_client_destroy(c);
+ flb_upstream_conn_release(u_conn);
+
+ /* Done */
+ FLB_OUTPUT_RETURN(ret_code);
+}
+
+static int cb_bigquery_exit(void *data, struct flb_config *config)
+{
+ struct flb_bigquery *ctx = data;
+
+ if (!ctx) {
+ return -1;
+ }
+
+ if (ctx->u) {
+ flb_upstream_destroy(ctx->u);
+ }
+
+ flb_bigquery_conf_destroy(ctx);
+ return 0;
+}
+
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_STR, "google_service_credentials", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_bigquery, credentials_file),
+ "Set the path for the google service credentials file"
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "enable_identity_federation", "false",
+ 0, FLB_TRUE, offsetof(struct flb_bigquery, has_identity_federation),
+ "Enable identity federation"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "aws_region", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_bigquery, aws_region),
+ "Enable identity federation"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "project_number", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_bigquery, project_number),
+ "Set project number"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "pool_id", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_bigquery, pool_id),
+ "Set the pool id"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "provider_id", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_bigquery, provider_id),
+ "Set the provider id"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "google_service_account", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_bigquery, google_service_account),
+ "Set the google service account"
+ },
+ // set in flb_bigquery_oauth_credentials
+ {
+ FLB_CONFIG_MAP_STR, "service_account_email", (char *)NULL,
+ 0, FLB_FALSE, 0,
+ "Set the service account email"
+ },
+ // set in flb_bigquery_oauth_credentials
+ {
+ FLB_CONFIG_MAP_STR, "service_account_secret", (char *)NULL,
+ 0, FLB_FALSE, 0,
+ "Set the service account secret"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "project_id", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_bigquery, project_id),
+ "Set the project id"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "dataset_id", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_bigquery, dataset_id),
+ "Set the dataset id"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "table_id", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_bigquery, table_id),
+ "Set the table id"
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "skip_invalid_rows", "false",
+ 0, FLB_TRUE, offsetof(struct flb_bigquery, skip_invalid_rows),
+ "Enable skipping of invalid rows",
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "ignore_unknown_values", "false",
+ 0, FLB_TRUE, offsetof(struct flb_bigquery, ignore_unknown_values),
+ "Enable ignoring unknown value",
+ },
+ /* EOF */
+ {0}
+};
+
+struct flb_output_plugin out_bigquery_plugin = {
+ .name = "bigquery",
+ .description = "Send events to BigQuery via streaming insert",
+ .cb_init = cb_bigquery_init,
+ .cb_flush = cb_bigquery_flush,
+ .cb_exit = cb_bigquery_exit,
+ .config_map = config_map,
+ /* Plugin flags */
+ .flags = FLB_OUTPUT_NET | FLB_IO_TLS,
+};
diff --git a/src/fluent-bit/plugins/out_bigquery/bigquery.h b/src/fluent-bit/plugins/out_bigquery/bigquery.h
new file mode 100644
index 000000000..c48d9ba41
--- /dev/null
+++ b/src/fluent-bit/plugins/out_bigquery/bigquery.h
@@ -0,0 +1,132 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_OUT_BIGQUERY
+#define FLB_OUT_BIGQUERY
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_output.h>
+#include <fluent-bit/flb_oauth2.h>
+#include <fluent-bit/flb_sds.h>
+
+/* refresh token every 50 minutes */
+#define FLB_BIGQUERY_TOKEN_REFRESH 3000
+
+/* BigQuery streaming inserts oauth scope */
+#define FLB_BIGQUERY_SCOPE "https://www.googleapis.com/auth/bigquery.insertdata"
+
+/* BigQuery authorization URL */
+#define FLB_BIGQUERY_AUTH_URL "https://oauth2.googleapis.com/token"
+
+#define FLB_BIGQUERY_RESOURCE_TEMPLATE "/bigquery/v2/projects/%s/datasets/%s/tables/%s/insertAll"
+#define FLB_BIGQUERY_URL_BASE "https://www.googleapis.com"
+
+#define FLB_BIGQUERY_GOOGLE_STS_URL "https://sts.googleapis.com"
+#define FLB_BIGQUERY_GOOGLE_IAM_URL "https://iamcredentials.googleapis.com"
+#define FLB_BIGQUERY_AWS_STS_ENDPOINT "/?Action=GetCallerIdentity&Version=2011-06-15"
+
+#define FLB_BIGQUERY_GOOGLE_CLOUD_TARGET_RESOURCE \
+ "//iam.googleapis.com/projects/%s/locations/global/workloadIdentityPools/%s/providers/%s"
+
+#define FLB_BIGQUERY_GOOGLE_STS_TOKEN_GRANT_TYPE "urn:ietf:params:oauth:grant-type:token-exchange"
+#define FLB_BIGQUERY_GOOGLE_STS_TOKEN_REQUESTED_TOKEN_TYPE "urn:ietf:params:oauth:token-type:access_token"
+#define FLB_BIGQUERY_GOOGLE_STS_TOKEN_SCOPE "https://www.googleapis.com/auth/cloud-platform"
+#define FLB_BIGQUERY_GOOGLE_STS_TOKEN_SUBJECT_TOKEN_TYPE "urn:ietf:params:aws:token-type:aws4_request"
+#define FLB_BIGQUERY_GOOGLE_CLOUD_TOKEN_ENDPOINT "/v1/token"
+
+#define FLB_BIGQUERY_GOOGLE_GEN_ACCESS_TOKEN_REQUEST_BODY \
+ "{\"scope\": [\"https://www.googleapis.com/auth/cloud-platform\"]}"
+
+#define FLB_BIGQUERY_GOOGLE_GEN_ACCESS_TOKEN_URL \
+ "https://iamcredentials.googleapis.com/v1/projects/-/serviceAccounts/%s:generateAccessToken"
+
+struct flb_bigquery_oauth_credentials {
+ /* parsed credentials file */
+ flb_sds_t type;
+ flb_sds_t project_id;
+ flb_sds_t private_key_id;
+ flb_sds_t private_key;
+ flb_sds_t client_email;
+ flb_sds_t client_id;
+ flb_sds_t auth_uri;
+ flb_sds_t token_uri;
+};
+
+struct flb_bigquery {
+ /* credentials */
+ flb_sds_t credentials_file;
+
+ struct flb_bigquery_oauth_credentials *oauth_credentials;
+
+ /* Workload Identity Federation */
+ int has_identity_federation;
+ flb_sds_t project_number;
+ flb_sds_t pool_id;
+ flb_sds_t provider_id;
+ flb_sds_t aws_region;
+ flb_sds_t google_service_account;
+
+ /* AWS IMDS */
+ struct flb_tls *aws_tls;
+ struct flb_aws_provider *aws_provider;
+
+ /* AWS STS */
+ flb_sds_t aws_sts_endpoint;
+ struct flb_tls *aws_sts_tls;
+ struct flb_upstream *aws_sts_upstream;
+
+ /* Google STS API */
+ struct flb_tls *google_sts_tls;
+ struct flb_upstream *google_sts_upstream;
+
+ /* Google Service Account Credentials API */
+ struct flb_tls *google_iam_tls;
+ struct flb_upstream *google_iam_upstream;
+
+ /* Google OAuth access token for service account, that was exchanged for AWS credentials */
+ flb_sds_t sa_token;
+ time_t sa_token_expiry;
+
+ /* bigquery configuration */
+ flb_sds_t project_id;
+ flb_sds_t dataset_id;
+ flb_sds_t table_id;
+
+ int skip_invalid_rows;
+ int ignore_unknown_values;
+
+ flb_sds_t uri;
+
+ /* oauth2 context */
+ struct flb_oauth2 *o;
+
+ /* mutex for acquiring oauth tokens */
+ pthread_mutex_t token_mutex;
+
+ /* Upstream connection to the backend server */
+ struct flb_upstream *u;
+
+ /* Fluent Bit context */
+ struct flb_config *config;
+
+ /* Plugin output instance reference */
+ struct flb_output_instance *ins;
+};
+
+#endif
diff --git a/src/fluent-bit/plugins/out_bigquery/bigquery_conf.c b/src/fluent-bit/plugins/out_bigquery/bigquery_conf.c
new file mode 100644
index 000000000..a7855d92f
--- /dev/null
+++ b/src/fluent-bit/plugins/out_bigquery/bigquery_conf.c
@@ -0,0 +1,435 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_unescape.h>
+#include <fluent-bit/flb_jsmn.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_aws_credentials.h>
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#include "bigquery.h"
+#include "bigquery_conf.h"
+
+
+static inline int key_cmp(char *str, int len, char *cmp) {
+
+ if (strlen(cmp) != len) {
+ return -1;
+ }
+
+ return strncasecmp(str, cmp, len);
+}
+
+static int flb_bigquery_read_credentials_file(struct flb_bigquery *ctx,
+ char *creds,
+ struct flb_bigquery_oauth_credentials *ctx_creds)
+{
+ int i;
+ int ret;
+ int len;
+ int key_len;
+ int val_len;
+ int tok_size = 32;
+ char *buf;
+ char *key;
+ char *val;
+ flb_sds_t tmp;
+ struct stat st;
+ jsmn_parser parser;
+ jsmntok_t *t;
+ jsmntok_t *tokens;
+
+ /* Validate credentials path */
+ ret = stat(creds, &st);
+ if (ret == -1) {
+ flb_errno();
+ flb_plg_error(ctx->ins, "cannot open credentials file: %s",
+ creds);
+ return -1;
+ }
+
+ if (!S_ISREG(st.st_mode) && !S_ISLNK(st.st_mode)) {
+ flb_plg_error(ctx->ins, "credentials file "
+ "is not a valid file: %s", creds);
+ return -1;
+ }
+
+ /* Read file content */
+ buf = mk_file_to_buffer(creds);
+ if (!buf) {
+ flb_plg_error(ctx->ins, "error reading credentials file: %s",
+ creds);
+ return -1;
+ }
+
+ /* Parse content */
+ jsmn_init(&parser);
+ tokens = flb_calloc(1, sizeof(jsmntok_t) * tok_size);
+ if (!tokens) {
+ flb_errno();
+ flb_free(buf);
+ return -1;
+ }
+
+ ret = jsmn_parse(&parser, buf, st.st_size, tokens, tok_size);
+ if (ret <= 0) {
+ flb_plg_error(ctx->ins, "invalid JSON credentials file: %s",
+ creds);
+ flb_free(buf);
+ flb_free(tokens);
+ return -1;
+ }
+
+ t = &tokens[0];
+ if (t->type != JSMN_OBJECT) {
+ flb_plg_error(ctx->ins, "invalid JSON map on file: %s",
+ creds);
+ flb_free(buf);
+ flb_free(tokens);
+ return -1;
+ }
+
+ /* Parse JSON tokens */
+ for (i = 1; i < ret; i++) {
+ t = &tokens[i];
+ if (t->type != JSMN_STRING) {
+ continue;
+ }
+
+ if (t->start == -1 || t->end == -1 || (t->start == 0 && t->end == 0)){
+ break;
+ }
+
+ /* Key */
+ key = buf + t->start;
+ key_len = (t->end - t->start);
+
+ /* Value */
+ i++;
+ t = &tokens[i];
+ val = buf + t->start;
+ val_len = (t->end - t->start);
+
+ if (key_cmp(key, key_len, "type") == 0) {
+ ctx_creds->type = flb_sds_create_len(val, val_len);
+ }
+ else if (key_cmp(key, key_len, "project_id") == 0) {
+ ctx_creds->project_id = flb_sds_create_len(val, val_len);
+ }
+ else if (key_cmp(key, key_len, "private_key_id") == 0) {
+ ctx_creds->private_key_id = flb_sds_create_len(val, val_len);
+ }
+ else if (key_cmp(key, key_len, "private_key") == 0) {
+ tmp = flb_sds_create_len(val, val_len);
+ if (tmp) {
+ /* Unescape private key */
+ len = flb_sds_len(tmp);
+ ctx_creds->private_key = flb_sds_create_size(len);
+ flb_unescape_string(tmp, len,
+ &ctx_creds->private_key);
+ flb_sds_destroy(tmp);
+ }
+ }
+ else if (key_cmp(key, key_len, "client_email") == 0) {
+ ctx_creds->client_email = flb_sds_create_len(val, val_len);
+ }
+ else if (key_cmp(key, key_len, "client_id") == 0) {
+ ctx_creds->client_id = flb_sds_create_len(val, val_len);
+ }
+ else if (key_cmp(key, key_len, "auth_uri") == 0) {
+ ctx_creds->auth_uri = flb_sds_create_len(val, val_len);
+ }
+ else if (key_cmp(key, key_len, "token_uri") == 0) {
+ ctx_creds->token_uri = flb_sds_create_len(val, val_len);
+ }
+ }
+
+ flb_free(buf);
+ flb_free(tokens);
+
+ return 0;
+}
+
+
+struct flb_bigquery *flb_bigquery_conf_create(struct flb_output_instance *ins,
+ struct flb_config *config)
+{
+ int ret;
+ const char *tmp;
+ char *tmp_aws_region;
+ struct flb_bigquery *ctx;
+ struct flb_bigquery_oauth_credentials *creds;
+
+ /* Allocate config context */
+ ctx = flb_calloc(1, sizeof(struct flb_bigquery));
+ if (!ctx) {
+ flb_errno();
+ return NULL;
+ }
+ ctx->ins = ins;
+ ctx->config = config;
+
+ ret = flb_output_config_map_set(ins, (void *)ctx);
+ if (ret == -1) {
+ flb_plg_error(ins, "unable to load configuration");
+ flb_free(ctx);
+ return NULL;
+ }
+
+ /* Lookup credentials file */
+ creds = flb_calloc(1, sizeof(struct flb_bigquery_oauth_credentials));
+ if (!creds) {
+ flb_errno();
+ flb_free(ctx);
+ return NULL;
+ }
+ ctx->oauth_credentials = creds;
+
+ if (ctx->credentials_file == NULL) {
+ tmp = getenv("GOOGLE_SERVICE_CREDENTIALS");
+ if (tmp) {
+ ctx->credentials_file = flb_sds_create(tmp);
+ }
+ }
+
+ if (ctx->credentials_file && ctx->has_identity_federation) {
+ flb_plg_error(ctx->ins, "Either `google_service_credentials` or `enable_identity_federation` should be set");
+ return NULL;
+ }
+
+ if (ctx->aws_region) {
+ tmp_aws_region = flb_aws_endpoint("sts", ctx->aws_region);
+ if (!tmp_aws_region) {
+ flb_plg_error(ctx->ins, "Could not create AWS STS regional endpoint");
+ return NULL;
+ }
+ ctx->aws_sts_endpoint = flb_sds_create(tmp_aws_region);
+ flb_free(tmp_aws_region);
+ }
+
+ if (ctx->has_identity_federation) {
+ if (!ctx->aws_region) {
+ flb_plg_error(ctx->ins, "`aws_region` is required when `enable_identity_federation` is true");
+ return NULL;
+ }
+
+ if (!ctx->project_number) {
+ flb_plg_error(ctx->ins, "`project_number` is required when `enable_identity_federation` is true");
+ return NULL;
+ }
+
+ if (!ctx->pool_id) {
+ flb_plg_error(ctx->ins, "`pool_id` is required when `enable_identity_federation` is true");
+ return NULL;
+ }
+
+ if (!ctx->provider_id) {
+ flb_plg_error(ctx->ins, "`provider_id` is required when `enable_identity_federation` is true");
+ return NULL;
+ }
+
+ if (!ctx->google_service_account) {
+ flb_plg_error(ctx->ins, "`google_service_account` is required when `enable_identity_federation` is true");
+ return NULL;
+ }
+ }
+
+ if (ctx->credentials_file) {
+ ret = flb_bigquery_read_credentials_file(ctx,
+ ctx->credentials_file,
+ ctx->oauth_credentials);
+ if (ret != 0) {
+ flb_bigquery_conf_destroy(ctx);
+ return NULL;
+ }
+ }
+ else if (!ctx->credentials_file && !ctx->has_identity_federation) {
+ /*
+ * If no credentials file has been defined, do manual lookup of the
+ * client email and the private key.
+ */
+
+ /* Service Account Email */
+ tmp = flb_output_get_property("service_account_email", ins);
+ if (tmp) {
+ creds->client_email = flb_sds_create(tmp);
+ }
+ else {
+ tmp = getenv("SERVICE_ACCOUNT_EMAIL");
+ if (tmp) {
+ creds->client_email = flb_sds_create(tmp);
+ }
+ }
+
+ /* Service Account Secret */
+ tmp = flb_output_get_property("service_account_secret", ins);
+ if (tmp) {
+ creds->private_key = flb_sds_create(tmp);
+ }
+ else {
+ tmp = getenv("SERVICE_ACCOUNT_SECRET");
+ if (tmp) {
+ creds->private_key = flb_sds_create(tmp);
+ }
+ }
+
+ if (!creds->client_email) {
+ flb_plg_error(ctx->ins, "service_account_email/client_email is not defined");
+ flb_bigquery_conf_destroy(ctx);
+ return NULL;
+ }
+
+ if (!creds->private_key) {
+ flb_plg_error(ctx->ins, "service_account_secret/private_key is not defined");
+ flb_bigquery_conf_destroy(ctx);
+ return NULL;
+ }
+ }
+
+ /* config: 'project_id' */
+ if (ctx->project_id == NULL) {
+ if (creds->project_id) {
+ /* flb_config_map_destroy uses the pointer within the config_map struct to
+ * free the value so if we assign it here it is safe to free later with the
+ * creds struct. If we do not we will leak here.
+ */
+ ctx->project_id = creds->project_id;
+ if (!ctx->project_id) {
+ flb_plg_error(ctx->ins,
+ "failed extracting 'project_id' from credentials.");
+ flb_bigquery_conf_destroy(ctx);
+ return NULL;
+ }
+ }
+ else {
+ flb_plg_error(ctx->ins,
+ "no 'project_id' configured or present in credentials.");
+ flb_bigquery_conf_destroy(ctx);
+ return NULL;
+ }
+ }
+
+ /* config: 'dataset_id' */
+ if (ctx->dataset_id == NULL) {
+ flb_plg_error(ctx->ins, "property 'dataset_id' is not defined");
+ flb_bigquery_conf_destroy(ctx);
+ return NULL;
+ }
+
+ /* config: 'table_id' */
+ if (ctx->table_id == NULL) {
+ flb_plg_error(ctx->ins, "property 'table_id' is not defined");
+ flb_bigquery_conf_destroy(ctx);
+ return NULL;
+ }
+
+ /* Create the target URI */
+ ctx->uri = flb_sds_create_size(sizeof(FLB_BIGQUERY_RESOURCE_TEMPLATE)-6 +
+ flb_sds_len(ctx->project_id) +
+ flb_sds_len(ctx->dataset_id) +
+ flb_sds_len(ctx->table_id));
+ if (!ctx->uri) {
+ flb_errno();
+ flb_bigquery_conf_destroy(ctx);
+ return NULL;
+ }
+ ctx->uri = flb_sds_printf(&ctx->uri, FLB_BIGQUERY_RESOURCE_TEMPLATE,
+ ctx->project_id, ctx->dataset_id, ctx->table_id);
+
+ flb_plg_info(ctx->ins, "project='%s' dataset='%s' table='%s'",
+ ctx->project_id, ctx->dataset_id, ctx->table_id);
+
+ return ctx;
+}
+
+
+int flb_bigquery_oauth_credentials_destroy(struct flb_bigquery_oauth_credentials *creds)
+{
+ if (!creds) {
+ return -1;
+ }
+ flb_sds_destroy(creds->type);
+ flb_sds_destroy(creds->project_id);
+ flb_sds_destroy(creds->private_key_id);
+ flb_sds_destroy(creds->private_key);
+ flb_sds_destroy(creds->client_email);
+ flb_sds_destroy(creds->client_id);
+ flb_sds_destroy(creds->auth_uri);
+ flb_sds_destroy(creds->token_uri);
+
+ flb_free(creds);
+
+ return 0;
+}
+
+int flb_bigquery_conf_destroy(struct flb_bigquery *ctx)
+{
+ if (!ctx) {
+ return -1;
+ }
+
+ flb_bigquery_oauth_credentials_destroy(ctx->oauth_credentials);
+
+ if (ctx->aws_sts_upstream) {
+ flb_upstream_destroy(ctx->aws_sts_upstream);
+ }
+
+ if (ctx->google_sts_upstream) {
+ flb_upstream_destroy(ctx->google_sts_upstream);
+ }
+
+ if (ctx->google_iam_upstream) {
+ flb_upstream_destroy(ctx->google_iam_upstream);
+ }
+
+ if (ctx->aws_provider) {
+ flb_aws_provider_destroy(ctx->aws_provider);
+ }
+
+ if (ctx->aws_tls) {
+ flb_tls_destroy(ctx->aws_tls);
+ }
+
+ if (ctx->aws_sts_tls) {
+ flb_tls_destroy(ctx->aws_sts_tls);
+ }
+
+ if (ctx->google_sts_tls) {
+ flb_tls_destroy(ctx->google_sts_tls);
+ }
+
+ if (ctx->google_iam_tls) {
+ flb_tls_destroy(ctx->google_iam_tls);
+ }
+
+ flb_sds_destroy(ctx->aws_sts_endpoint);
+ flb_sds_destroy(ctx->sa_token);
+ flb_sds_destroy(ctx->uri);
+
+ if (ctx->o) {
+ flb_oauth2_destroy(ctx->o);
+ }
+
+ flb_free(ctx);
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/out_bigquery/bigquery_conf.h b/src/fluent-bit/plugins/out_bigquery/bigquery_conf.h
new file mode 100644
index 000000000..f06a86607
--- /dev/null
+++ b/src/fluent-bit/plugins/out_bigquery/bigquery_conf.h
@@ -0,0 +1,29 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_OUT_BIGQUERY_CONF_H
+#define FLB_OUT_BIGQUERY_CONF_H
+
+#include "bigquery.h"
+
+struct flb_bigquery *flb_bigquery_conf_create(struct flb_output_instance *ins,
+ struct flb_config *config);
+int flb_bigquery_conf_destroy(struct flb_bigquery *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/out_calyptia/CMakeLists.txt b/src/fluent-bit/plugins/out_calyptia/CMakeLists.txt
new file mode 100644
index 000000000..064c4b835
--- /dev/null
+++ b/src/fluent-bit/plugins/out_calyptia/CMakeLists.txt
@@ -0,0 +1,4 @@
+set(src
+ calyptia.c)
+
+FLB_PLUGIN(out_calyptia "${src}" "")
diff --git a/src/fluent-bit/plugins/out_calyptia/calyptia.c b/src/fluent-bit/plugins/out_calyptia/calyptia.c
new file mode 100644
index 000000000..19811dcc9
--- /dev/null
+++ b/src/fluent-bit/plugins/out_calyptia/calyptia.c
@@ -0,0 +1,1025 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_log.h>
+#include <fluent-bit/flb_kv.h>
+#include <fluent-bit/flb_upstream.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_version.h>
+#include <fluent-bit/flb_metrics.h>
+#include <fluent-bit/flb_fstore.h>
+
+#include "calyptia.h"
+
+#include <cmetrics/cmetrics.h>
+#include <cmetrics/cmt_encode_influx.h>
+
+flb_sds_t custom_calyptia_pipeline_config_get(struct flb_config *ctx);
+
+static int get_io_flags(struct flb_output_instance *ins)
+{
+ int flags = 0;
+
+ if (ins->use_tls) {
+ flags = FLB_IO_TLS;
+ }
+ else {
+ flags = FLB_IO_TCP;
+ }
+
+ return flags;
+}
+
+static int config_add_labels(struct flb_output_instance *ins,
+ struct flb_calyptia *ctx)
+{
+ struct mk_list *head;
+ struct flb_config_map_val *mv;
+ struct flb_slist_entry *k = NULL;
+ struct flb_slist_entry *v = NULL;
+ struct flb_kv *kv;
+
+ if (!ctx->add_labels || mk_list_size(ctx->add_labels) == 0) {
+ return 0;
+ }
+
+ /* iterate all 'add_label' definitions */
+ flb_config_map_foreach(head, mv, ctx->add_labels) {
+ if (mk_list_size(mv->val.list) != 2) {
+ flb_plg_error(ins, "'add_label' expects a key and a value, "
+ "e.g: 'add_label version 1.8.x'");
+ return -1;
+ }
+
+ k = mk_list_entry_first(mv->val.list, struct flb_slist_entry, _head);
+ v = mk_list_entry_last(mv->val.list, struct flb_slist_entry, _head);
+
+ kv = flb_kv_item_create(&ctx->kv_labels, k->str, v->str);
+ if (!kv) {
+ flb_plg_error(ins, "could not append label %s=%s\n", k->str, v->str);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static void append_labels(struct flb_calyptia *ctx, struct cmt *cmt)
+{
+ struct flb_kv *kv;
+ struct mk_list *head;
+
+ mk_list_foreach(head, &ctx->kv_labels) {
+ kv = mk_list_entry(head, struct flb_kv, _head);
+ cmt_label_add(cmt, kv->key, kv->val);
+ }
+}
+
+static void pack_str(msgpack_packer *mp_pck, char *str)
+{
+ int len;
+
+ len = strlen(str);
+ msgpack_pack_str(mp_pck, len);
+ msgpack_pack_str_body(mp_pck, str, len);
+}
+
+static void pack_env(struct flb_env *env, char *prefix, char *key,
+ struct flb_mp_map_header *h,
+ msgpack_packer *mp_pck)
+{
+ int len = 0;
+ char *val;
+
+ /* prefix set in the key, if set, adjust the key name */
+ if (prefix) {
+ len = strlen(prefix);
+ }
+
+ val = (char *) flb_env_get(env, key);
+ if (val) {
+ flb_mp_map_header_append(h);
+ pack_str(mp_pck, key + len);
+ pack_str(mp_pck, val);
+ }
+}
+
+static void pack_env_metadata(struct flb_env *env,
+ struct flb_mp_map_header *mh, msgpack_packer *mp_pck)
+{
+ char *tmp;
+ struct flb_mp_map_header h;
+ struct flb_mp_map_header meta;
+
+ /* Metadata */
+ flb_mp_map_header_append(mh);
+ pack_str(mp_pck, "metadata");
+
+ flb_mp_map_header_init(&meta, mp_pck);
+
+ /* Kubernetes */
+ tmp = (char *) flb_env_get(env, "k8s");
+ if (tmp && strcasecmp(tmp, "enabled") == 0) {
+ flb_mp_map_header_append(&meta);
+ pack_str(mp_pck, "k8s");
+
+ /* adding k8s map */
+ flb_mp_map_header_init(&h, mp_pck);
+
+ pack_env(env, "k8s.", "k8s.namespace", &h, mp_pck);
+ pack_env(env, "k8s.", "k8s.pod_name", &h, mp_pck);
+ pack_env(env, "k8s.", "k8s.node_name", &h, mp_pck);
+
+ flb_mp_map_header_end(&h);
+ }
+
+ /* AWS */
+ tmp = (char *) flb_env_get(env, "aws");
+ if (tmp && strcasecmp(tmp, "enabled") == 0) {
+ flb_mp_map_header_append(&meta);
+ pack_str(mp_pck, "aws");
+
+ /* adding aws map */
+ flb_mp_map_header_init(&h, mp_pck);
+
+ pack_env(env, "aws.", "aws.az", &h, mp_pck);
+ pack_env(env, "aws.", "aws.ec2_instance_id", &h, mp_pck);
+ pack_env(env, "aws.", "aws.ec2_instance_type", &h, mp_pck);
+ pack_env(env, "aws.", "aws.private_ip", &h, mp_pck);
+ pack_env(env, "aws.", "aws.vpc_id", &h, mp_pck);
+ pack_env(env, "aws.", "aws.ami_id", &h, mp_pck);
+ pack_env(env, "aws.", "aws.account_id", &h, mp_pck);
+ pack_env(env, "aws.", "aws.hostname", &h, mp_pck);
+
+ flb_mp_map_header_end(&h);
+ }
+ flb_mp_map_header_end(&meta);
+}
+
+static flb_sds_t get_agent_metadata(struct flb_calyptia *ctx)
+{
+ int len;
+ char *host;
+ flb_sds_t conf;
+ flb_sds_t meta;
+ struct flb_mp_map_header mh;
+ msgpack_sbuffer mp_sbuf;
+ msgpack_packer mp_pck;
+ struct flb_config *config = ctx->config;
+
+ /* init msgpack */
+ msgpack_sbuffer_init(&mp_sbuf);
+ msgpack_packer_init(&mp_pck, &mp_sbuf, msgpack_sbuffer_write);
+
+ /* pack map */
+ flb_mp_map_header_init(&mh, &mp_pck);
+
+ host = (char *) flb_env_get(ctx->env, "HOSTNAME");
+ if (!host) {
+ host = "unknown";
+ }
+ len = strlen(host);
+
+ /* name */
+ flb_mp_map_header_append(&mh);
+ msgpack_pack_str(&mp_pck, 4);
+ msgpack_pack_str_body(&mp_pck, "name", 4);
+ msgpack_pack_str(&mp_pck, len);
+ msgpack_pack_str_body(&mp_pck, host, len);
+
+ /* type */
+ flb_mp_map_header_append(&mh);
+ msgpack_pack_str(&mp_pck, 4);
+ msgpack_pack_str_body(&mp_pck, "type", 4);
+ msgpack_pack_str(&mp_pck, 9);
+ msgpack_pack_str_body(&mp_pck, "fluentbit", 9);
+
+ /* rawConfig */
+ conf = custom_calyptia_pipeline_config_get(ctx->config);
+ if (conf) {
+ flb_mp_map_header_append(&mh);
+ len = flb_sds_len(conf);
+ msgpack_pack_str(&mp_pck, 9);
+ msgpack_pack_str_body(&mp_pck, "rawConfig", 9);
+ msgpack_pack_str(&mp_pck, len);
+ msgpack_pack_str_body(&mp_pck, conf, len);
+ }
+ flb_sds_destroy(conf);
+
+ /* version */
+ flb_mp_map_header_append(&mh);
+ msgpack_pack_str(&mp_pck, 7);
+ msgpack_pack_str_body(&mp_pck, "version", 7);
+ len = strlen(FLB_VERSION_STR);
+ msgpack_pack_str(&mp_pck, len);
+ msgpack_pack_str_body(&mp_pck, FLB_VERSION_STR, len);
+
+ /* edition */
+ flb_mp_map_header_append(&mh);
+ msgpack_pack_str(&mp_pck, 7);
+ msgpack_pack_str_body(&mp_pck, "edition", 7);
+ msgpack_pack_str(&mp_pck, 9);
+ msgpack_pack_str_body(&mp_pck, "community", 9);
+
+ /* machineID */
+ flb_mp_map_header_append(&mh);
+ msgpack_pack_str(&mp_pck, 9);
+ msgpack_pack_str_body(&mp_pck, "machineID", 9);
+ len = flb_sds_len(ctx->machine_id);
+ msgpack_pack_str(&mp_pck, len);
+ msgpack_pack_str_body(&mp_pck, ctx->machine_id, len);
+
+ /* fleetID */
+ if (ctx->fleet_id) {
+ flb_mp_map_header_append(&mh);
+ msgpack_pack_str(&mp_pck, 7);
+ msgpack_pack_str_body(&mp_pck, "fleetID", 7);
+ len = flb_sds_len(ctx->fleet_id);
+ msgpack_pack_str(&mp_pck, len);
+ msgpack_pack_str_body(&mp_pck, ctx->fleet_id, len);
+ }
+
+ /* pack environment metadata */
+ pack_env_metadata(config->env, &mh, &mp_pck);
+
+ /* finalize */
+ flb_mp_map_header_end(&mh);
+
+ /* convert to json */
+ meta = flb_msgpack_raw_to_json_sds(mp_sbuf.data, mp_sbuf.size);
+ msgpack_sbuffer_destroy(&mp_sbuf);
+
+ return meta;
+}
+
+static int calyptia_http_do(struct flb_calyptia *ctx, struct flb_http_client *c,
+ int type)
+{
+ int ret;
+ size_t b_sent;
+
+ /* append headers */
+ if (type == CALYPTIA_ACTION_REGISTER) {
+ flb_http_add_header(c,
+ CALYPTIA_H_CTYPE, sizeof(CALYPTIA_H_CTYPE) - 1,
+ CALYPTIA_H_CTYPE_JSON, sizeof(CALYPTIA_H_CTYPE_JSON) - 1);
+
+ flb_http_add_header(c,
+ CALYPTIA_H_PROJECT, sizeof(CALYPTIA_H_PROJECT) - 1,
+ ctx->api_key, flb_sds_len(ctx->api_key));
+ }
+ else if (type == CALYPTIA_ACTION_PATCH) {
+ flb_http_add_header(c,
+ CALYPTIA_H_CTYPE, sizeof(CALYPTIA_H_CTYPE) - 1,
+ CALYPTIA_H_CTYPE_JSON, sizeof(CALYPTIA_H_CTYPE_JSON) - 1);
+
+ flb_http_add_header(c,
+ CALYPTIA_H_AGENT_TOKEN,
+ sizeof(CALYPTIA_H_AGENT_TOKEN) - 1,
+ ctx->agent_token, flb_sds_len(ctx->agent_token));
+ }
+ else if (type == CALYPTIA_ACTION_METRICS) {
+ flb_http_add_header(c,
+ CALYPTIA_H_CTYPE, sizeof(CALYPTIA_H_CTYPE) - 1,
+ CALYPTIA_H_CTYPE_MSGPACK,
+ sizeof(CALYPTIA_H_CTYPE_MSGPACK) - 1);
+
+ flb_http_add_header(c,
+ CALYPTIA_H_AGENT_TOKEN,
+ sizeof(CALYPTIA_H_AGENT_TOKEN) - 1,
+ ctx->agent_token, flb_sds_len(ctx->agent_token));
+ }
+#ifdef FLB_HAVE_CHUNK_TRACE
+ else if (type == CALYPTIA_ACTION_TRACE) {
+ flb_http_add_header(c,
+ CALYPTIA_H_CTYPE, sizeof(CALYPTIA_H_CTYPE) - 1,
+ CALYPTIA_H_CTYPE_JSON, sizeof(CALYPTIA_H_CTYPE_JSON) - 1);
+
+ flb_http_add_header(c,
+ CALYPTIA_H_AGENT_TOKEN,
+ sizeof(CALYPTIA_H_AGENT_TOKEN) - 1,
+ ctx->agent_token, flb_sds_len(ctx->agent_token));
+ }
+#endif
+
+ /* Map debug callbacks */
+ flb_http_client_debug(c, ctx->ins->callback);
+
+ /* Perform HTTP request */
+ ret = flb_http_do(c, &b_sent);
+ if (ret != 0) {
+ flb_plg_warn(ctx->ins, "http_do=%i", ret);
+ return FLB_RETRY;
+ }
+
+ if (c->resp.status != 200 && c->resp.status != 201 && c->resp.status != 204) {
+ if (c->resp.payload_size > 0) {
+ flb_plg_warn(ctx->ins, "http_status=%i:\n%s",
+ c->resp.status, c->resp.payload);
+ }
+ else {
+ flb_plg_warn(ctx->ins, "http_status=%i", c->resp.status);
+ }
+
+ /* invalid metrics */
+ if (c->resp.status == 422) {
+ return FLB_ERROR;
+ }
+ return FLB_RETRY;;
+ }
+
+ return FLB_OK;
+}
+
+static flb_sds_t get_agent_info(char *buf, size_t size, char *k)
+{
+ int i;
+ int ret;
+ int type;
+ int len;
+ char *out_buf;
+ flb_sds_t v = NULL;
+ size_t off = 0;
+ size_t out_size;
+ msgpack_unpacked result;
+ msgpack_object root;
+ msgpack_object key;
+ msgpack_object val;
+
+ len = strlen(k);
+
+ ret = flb_pack_json(buf, size, &out_buf, &out_size, &type, NULL);
+ if (ret != 0) {
+ return NULL;
+ }
+
+ msgpack_unpacked_init(&result);
+ ret = msgpack_unpack_next(&result, out_buf, out_size, &off);
+ if (ret != MSGPACK_UNPACK_SUCCESS) {
+ flb_free(out_buf);
+ msgpack_unpacked_destroy(&result);
+ return NULL;
+ }
+
+ root = result.data;
+ if (root.type != MSGPACK_OBJECT_MAP) {
+ flb_free(out_buf);
+ msgpack_unpacked_destroy(&result);
+ return NULL;
+ }
+
+ for (i = 0; i < root.via.map.size; i++) {
+ key = root.via.map.ptr[i].key;
+ val = root.via.map.ptr[i].val;
+
+ if (key.type != MSGPACK_OBJECT_STR || val.type != MSGPACK_OBJECT_STR) {
+ continue;
+ }
+
+ if (key.via.str.size != len) {
+ continue;
+ }
+
+ if (strncmp(key.via.str.ptr, k, len) == 0) {
+ v = flb_sds_create_len(val.via.str.ptr, val.via.str.size);
+ break;
+ }
+ }
+
+ flb_free(out_buf);
+ msgpack_unpacked_destroy(&result);
+ return v;
+}
+
+/* Set the session content */
+static int store_session_set(struct flb_calyptia *ctx, char *buf, size_t size)
+{
+ int ret;
+ int type;
+ char *mp_buf;
+ size_t mp_size;
+
+ /* remove any previous session file */
+ if (ctx->fs_file) {
+ flb_fstore_file_delete(ctx->fs, ctx->fs_file);
+ }
+
+ /* create session file */
+ ctx->fs_file = flb_fstore_file_create(ctx->fs, ctx->fs_stream,
+ CALYPTIA_SESSION_FILE, 1024);
+ if (!ctx->fs_file) {
+ flb_plg_error(ctx->ins, "could not create new session file");
+ return -1;
+ }
+
+ /* store meta */
+ flb_fstore_file_meta_set(ctx->fs, ctx->fs_file,
+ FLB_VERSION_STR "\n", sizeof(FLB_VERSION_STR) - 1);
+
+ /* encode */
+ ret = flb_pack_json(buf, size, &mp_buf, &mp_size, &type, NULL);
+ if (ret < 0) {
+ flb_plg_error(ctx->ins, "could not encode session information");
+ return -1;
+ }
+
+ /* store content */
+ ret = flb_fstore_file_append(ctx->fs_file, mp_buf, mp_size);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "could not store session information");
+ flb_free(mp_buf);
+ return -1;
+ }
+
+ flb_free(mp_buf);
+ return 0;
+}
+
+static int store_session_get(struct flb_calyptia *ctx,
+ void **out_buf, size_t *out_size)
+{
+ int ret;
+ void *buf;
+ size_t size;
+ flb_sds_t json;
+
+ ret = flb_fstore_file_content_copy(ctx->fs, ctx->fs_file,
+ &buf, &size);
+
+ if (size == 0) {
+ return -1;
+ }
+
+ /* decode */
+ json = flb_msgpack_raw_to_json_sds(buf, size);
+ flb_free(buf);
+ if (!json) {
+ return -1;
+ }
+
+ *out_buf = json;
+ *out_size = flb_sds_len(json);
+
+ return ret;
+}
+
+static int store_init(struct flb_calyptia *ctx)
+{
+ int ret;
+ struct flb_fstore *fs;
+ struct flb_fstore_file *fsf;
+ void *buf;
+ size_t size;
+
+ /* store context */
+ fs = flb_fstore_create(ctx->store_path, FLB_FSTORE_FS);
+ if (!fs) {
+ flb_plg_error(ctx->ins,
+ "could not initialize 'store_path': %s",
+ ctx->store_path);
+ return -1;
+ }
+ ctx->fs = fs;
+
+ /* stream */
+ ctx->fs_stream = flb_fstore_stream_create(ctx->fs, "calyptia");
+ if (!ctx->fs_stream) {
+ flb_plg_error(ctx->ins, "could not create storage stream");
+ return -1;
+ }
+
+ /* lookup any previous file */
+ fsf = flb_fstore_file_get(ctx->fs, ctx->fs_stream, CALYPTIA_SESSION_FILE,
+ sizeof(CALYPTIA_SESSION_FILE) - 1);
+ if (!fsf) {
+ flb_plg_debug(ctx->ins, "no session file was found");
+ return 0;
+ }
+ ctx->fs_file = fsf;
+
+ /* retrieve session info */
+ ret = store_session_get(ctx, &buf, &size);
+ if (ret == 0) {
+ /* agent id */
+ ctx->agent_id = get_agent_info(buf, size, "id");
+
+ /* agent token */
+ ctx->agent_token = get_agent_info(buf, size, "token");
+
+ if (ctx->agent_id && ctx->agent_token) {
+ flb_plg_info(ctx->ins, "session setup OK");
+ }
+ else {
+ if (ctx->agent_id) {
+ flb_sds_destroy(ctx->agent_id);
+ }
+ if (ctx->agent_token) {
+ flb_sds_destroy(ctx->agent_token);
+ }
+ }
+ flb_sds_destroy(buf);
+ }
+
+ return 0;
+}
+
+/* Agent creation is perform on initialization using a sync upstream connection */
+static int api_agent_create(struct flb_config *config, struct flb_calyptia *ctx)
+{
+ int ret;
+ int flb_ret;
+ int flags;
+ int action = CALYPTIA_ACTION_REGISTER;
+ char uri[1024];
+ flb_sds_t meta;
+ struct flb_upstream *u;
+ struct flb_connection *u_conn;
+ struct flb_http_client *c;
+
+ /* Meta */
+ meta = get_agent_metadata(ctx);
+ if (!meta) {
+ flb_plg_error(ctx->ins, "could not retrieve metadata");
+ return -1;
+ }
+
+ /* Upstream */
+ flags = get_io_flags(ctx->ins);
+ u = flb_upstream_create(ctx->config,
+ ctx->cloud_host, ctx->cloud_port,
+ flags, ctx->ins->tls);
+ if (!u) {
+ flb_plg_error(ctx->ins,
+ "could not create upstream connection on 'agent create'");
+ flb_sds_destroy(meta);
+ return -1;
+ }
+
+ /* Make it synchronous */
+ flb_stream_disable_async_mode(&u->base);
+
+ /* Get upstream connection */
+ u_conn = flb_upstream_conn_get(u);
+ if (!u_conn) {
+ flb_upstream_destroy(u);
+ flb_sds_destroy(meta);
+ return -1;
+ }
+
+ if (ctx->agent_id && ctx->agent_token) {
+ /* Patch */
+ action = CALYPTIA_ACTION_PATCH;
+ snprintf(uri, sizeof(uri) - 1, CALYPTIA_ENDPOINT_PATCH, ctx->agent_id);
+ c = flb_http_client(u_conn, FLB_HTTP_PATCH, uri,
+ meta, flb_sds_len(meta), NULL, 0, NULL, 0);
+ }
+ else {
+ /* Create */
+ action = CALYPTIA_ACTION_REGISTER;
+ c = flb_http_client(u_conn, FLB_HTTP_POST, CALYPTIA_ENDPOINT_CREATE,
+ meta, flb_sds_len(meta), NULL, 0, NULL, 0);
+ }
+
+ if (!c) {
+ flb_upstream_conn_release(u_conn);
+ flb_upstream_destroy(u);
+ return -1;
+ }
+
+ /* perform request */
+ flb_ret = calyptia_http_do(ctx, c, action);
+ if (flb_ret == FLB_OK &&
+ (c->resp.status == 200 || c->resp.status == 201 || c->resp.status == 204)) {
+ if (c->resp.payload_size > 0) {
+ if (action == CALYPTIA_ACTION_REGISTER) {
+ /* agent id */
+ ctx->agent_id = get_agent_info(c->resp.payload,
+ c->resp.payload_size,
+ "id");
+
+ /* agent token */
+ ctx->agent_token = get_agent_info(c->resp.payload,
+ c->resp.payload_size,
+ "token");
+
+ if (ctx->agent_id && ctx->agent_token) {
+ flb_plg_info(ctx->ins, "connected to Calyptia, agent_id='%s'",
+ ctx->agent_id);
+
+ if (ctx->store_path && ctx->fs) {
+ ret = store_session_set(ctx,
+ c->resp.payload,
+ c->resp.payload_size);
+ if (ret == -1) {
+ flb_plg_warn(ctx->ins,
+ "could not store Calyptia session");
+ }
+ }
+ }
+ }
+ }
+
+ if (action == CALYPTIA_ACTION_PATCH) {
+ flb_plg_info(ctx->ins, "known agent registration successful");
+ }
+ }
+
+ /* release resources */
+ flb_sds_destroy(meta);
+ flb_http_client_destroy(c);
+ flb_upstream_conn_release(u_conn);
+ flb_upstream_destroy(u);
+
+ return flb_ret;
+}
+
+static struct flb_calyptia *config_init(struct flb_output_instance *ins,
+ struct flb_config *config)
+{
+ int ret;
+ int flags;
+ struct flb_calyptia *ctx;
+
+ /* Calyptia plugin context */
+ ctx = flb_calloc(1, sizeof(struct flb_calyptia));
+ if (!ctx) {
+ flb_errno();
+ return NULL;
+ }
+ ctx->ins = ins;
+ ctx->config = config;
+ flb_kv_init(&ctx->kv_labels);
+
+ /* Load the config map */
+ ret = flb_output_config_map_set(ins, (void *) ctx);
+ if (ret == -1) {
+ flb_free(ctx);
+ return NULL;
+ }
+
+ /* api_key */
+ if (!ctx->api_key) {
+ flb_plg_error(ctx->ins, "configuration 'api_key' is missing");
+ flb_free(ctx);
+ return NULL;
+ }
+
+ /* parse 'add_label' */
+ ret = config_add_labels(ins, ctx);
+ if (ret == -1) {
+ return NULL;
+ }
+
+ /* env reader */
+ ctx->env = flb_env_create();
+
+ /* Set context */
+ flb_output_set_context(ins, ctx);
+
+ /* Initialize optional storage */
+ if (ctx->store_path) {
+ ret = store_init(ctx);
+ if (ret == -1) {
+ return NULL;
+ }
+ }
+
+ /* the machine-id is provided by custom calyptia, which invokes this plugin. */
+ if (!ctx->machine_id) {
+ flb_plg_error(ctx->ins, "machine_id has not been set");
+ return NULL;
+ }
+
+ flb_plg_debug(ctx->ins, "machine_id=%s", ctx->machine_id);
+
+ /* Upstream */
+ flags = get_io_flags(ctx->ins);
+ ctx->u = flb_upstream_create(ctx->config,
+ ctx->cloud_host, ctx->cloud_port,
+ flags, ctx->ins->tls);
+ if (!ctx->u) {
+ return NULL;
+ }
+
+ /* Set instance flags into upstream */
+ flb_output_upstream_set(ctx->u, ins);
+
+ return ctx;
+}
+
+static int cb_calyptia_init(struct flb_output_instance *ins,
+ struct flb_config *config, void *data)
+{
+ int ret;
+ struct flb_calyptia *ctx;
+ (void) data;
+
+ /* create config context */
+ ctx = config_init(ins, config);
+ if (!ctx) {
+ flb_plg_error(ins, "could not initialize configuration");
+ return -1;
+ }
+
+ /*
+ * This plugin instance uses the HTTP client interface, let's register
+ * it debugging callbacks.
+ */
+ flb_output_set_http_debug_callbacks(ins);
+
+ /* register/update agent */
+ ret = api_agent_create(config, ctx);
+ if (ret != FLB_OK) {
+ flb_plg_error(ctx->ins, "agent registration failed");
+ return -1;
+ }
+
+ /* metrics endpoint */
+ ctx->metrics_endpoint = flb_sds_create_size(256);
+ flb_sds_printf(&ctx->metrics_endpoint, CALYPTIA_ENDPOINT_METRICS,
+ ctx->agent_id);
+
+#ifdef FLB_HAVE_CHUNK_TRACE
+ ctx->trace_endpoint = flb_sds_create_size(256);
+ flb_sds_printf(&ctx->trace_endpoint, CALYPTIA_ENDPOINT_TRACE,
+ ctx->pipeline_id);
+#endif /* FLB_HAVE_CHUNK_TRACE */
+ return 0;
+}
+
+static void debug_payload(struct flb_calyptia *ctx, void *data, size_t bytes)
+{
+ int ret;
+ size_t off = 0;
+ struct cmt *cmt;
+ cfl_sds_t out;
+
+ ret = cmt_decode_msgpack_create(&cmt, (char *) data, bytes, &off);
+ if (ret != CMT_DECODE_MSGPACK_SUCCESS) {
+ flb_plg_warn(ctx->ins, "could not unpack debug payload");
+ return;
+ }
+
+ out = cmt_encode_text_create(cmt);
+ flb_plg_info(ctx->ins, "debug payload:\n%s", out);
+ cmt_encode_text_destroy(out);
+ cmt_destroy(cmt);
+}
+
+static void cb_calyptia_flush(struct flb_event_chunk *event_chunk,
+ struct flb_output_flush *out_flush,
+ struct flb_input_instance *i_ins,
+ void *out_context,
+ struct flb_config *config)
+{
+ int ret;
+ size_t off = 0;
+ size_t out_size = 0;
+ char *out_buf = NULL;
+
+/* used to create records for reporting traces to the cloud. */
+#ifdef FLB_HAVE_CHUNK_TRACE
+ flb_sds_t json;
+#endif /* FLB_HAVE_CHUNK_TRACE */
+
+ struct flb_connection *u_conn;
+ struct flb_http_client *c;
+ struct flb_calyptia *ctx = out_context;
+ struct cmt *cmt;
+ (void) i_ins;
+ (void) config;
+
+ /* Get upstream connection */
+ u_conn = flb_upstream_conn_get(ctx->u);
+ if (!u_conn) {
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ if (event_chunk->type == FLB_EVENT_TYPE_METRICS) {
+ /* if we have labels append them */
+ if (ctx->add_labels && mk_list_size(ctx->add_labels) > 0) {
+ ret = cmt_decode_msgpack_create(&cmt,
+ (char *) event_chunk->data,
+ event_chunk->size,
+ &off);
+ if (ret != CMT_DECODE_MSGPACK_SUCCESS) {
+ flb_upstream_conn_release(u_conn);
+ FLB_OUTPUT_RETURN(FLB_ERROR);
+ }
+
+ /* append labels set by config */
+ append_labels(ctx, cmt);
+
+ /* encode back to msgpack */
+ ret = cmt_encode_msgpack_create(cmt, &out_buf, &out_size);
+ if (ret != 0) {
+ cmt_destroy(cmt);
+ flb_upstream_conn_release(u_conn);
+ FLB_OUTPUT_RETURN(FLB_ERROR);
+ }
+ cmt_destroy(cmt);
+ }
+ else {
+ out_buf = (char *) event_chunk->data;
+ out_size = event_chunk->size;
+ }
+
+ /* Compose HTTP Client request */
+ c = flb_http_client(u_conn, FLB_HTTP_POST, ctx->metrics_endpoint,
+ out_buf, out_size, NULL, 0, NULL, 0);
+ if (!c) {
+ if (out_buf != event_chunk->data) {
+ cmt_encode_msgpack_destroy(out_buf);
+ }
+ flb_upstream_conn_release(u_conn);
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ /* perform request: 'ret' might be FLB_OK, FLB_ERROR or FLB_RETRY */
+ ret = calyptia_http_do(ctx, c, CALYPTIA_ACTION_METRICS);
+ if (ret == FLB_OK) {
+ flb_plg_debug(ctx->ins, "metrics delivered OK");
+ }
+ else if (ret == FLB_ERROR) {
+ flb_plg_error(ctx->ins, "could not deliver metrics");
+ debug_payload(ctx, out_buf, out_size);
+ }
+
+ if (out_buf != event_chunk->data) {
+ cmt_encode_msgpack_destroy(out_buf);
+ }
+ }
+
+#ifdef FLB_HAVE_CHUNK_TRACE
+ if (event_chunk->type == (FLB_EVENT_TYPE_LOGS | FLB_EVENT_TYPE_HAS_TRACE)) {
+ json = flb_pack_msgpack_to_json_format(event_chunk->data,
+ event_chunk->size,
+ FLB_PACK_JSON_FORMAT_STREAM,
+ FLB_PACK_JSON_DATE_DOUBLE,
+ NULL);
+ if (json == NULL) {
+ flb_upstream_conn_release(u_conn);
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+ out_buf = (char *)json;
+ out_size = flb_sds_len(json);
+
+ if (flb_sds_printf(&ctx->metrics_endpoint, CALYPTIA_ENDPOINT_METRICS,
+ ctx->agent_id) == NULL) {
+ flb_upstream_conn_release(u_conn);
+ flb_sds_destroy(json);
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+ c = flb_http_client(u_conn, FLB_HTTP_POST, ctx->trace_endpoint,
+ out_buf, out_size, NULL, 0, NULL, 0);
+ if (!c) {
+ flb_upstream_conn_release(u_conn);
+ flb_sds_destroy(json);
+ flb_sds_destroy(ctx->metrics_endpoint);
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ /* perform request: 'ret' might be FLB_OK, FLB_ERROR or FLB_RETRY */
+ ret = calyptia_http_do(ctx, c, CALYPTIA_ACTION_TRACE);
+ if (ret == FLB_OK) {
+ flb_plg_debug(ctx->ins, "trace delivered OK");
+ }
+ else if (ret == FLB_ERROR) {
+ flb_plg_error(ctx->ins, "could not deliver trace");
+ debug_payload(ctx, out_buf, out_size);
+ }
+ flb_sds_destroy(json);
+ }
+#endif /* FLB_HAVE_CHUNK_TRACE */
+
+ flb_upstream_conn_release(u_conn);
+ flb_http_client_destroy(c);
+ FLB_OUTPUT_RETURN(ret);
+}
+
+static int cb_calyptia_exit(void *data, struct flb_config *config)
+{
+ struct flb_calyptia *ctx = data;
+
+ if (!ctx) {
+ return 0;
+ }
+
+ if (ctx->u) {
+ flb_upstream_destroy(ctx->u);
+ }
+
+ if (ctx->agent_id) {
+ flb_sds_destroy(ctx->agent_id);
+ }
+
+ if (ctx->agent_token) {
+ flb_sds_destroy(ctx->agent_token);
+ }
+
+ if (ctx->env) {
+ flb_env_destroy(ctx->env);
+ }
+
+ if (ctx->metrics_endpoint) {
+ flb_sds_destroy(ctx->metrics_endpoint);
+ }
+
+#ifdef FLB_HAVE_CHUNK_TRACE
+ if (ctx->trace_endpoint) {
+ flb_sds_destroy(ctx->trace_endpoint);
+ }
+#endif /* FLB_HAVE_CHUNK_TRACE */
+
+ if (ctx->fs) {
+ flb_fstore_destroy(ctx->fs);
+ }
+
+ flb_kv_release(&ctx->kv_labels);
+ flb_free(ctx);
+
+ return 0;
+}
+
+/* Configuration properties map */
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_STR, "cloud_host", CALYPTIA_HOST,
+ 0, FLB_TRUE, offsetof(struct flb_calyptia, cloud_host),
+ "",
+ },
+
+ {
+ FLB_CONFIG_MAP_INT, "cloud_port", CALYPTIA_PORT,
+ 0, FLB_TRUE, offsetof(struct flb_calyptia, cloud_port),
+ "",
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "api_key", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_calyptia, api_key),
+ "Calyptia Cloud API Key."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "machine_id", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_calyptia, machine_id),
+ "Custom machine_id to be used when registering agent"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "fleet_id", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_calyptia, fleet_id),
+ "Fleet ID for identifying as part of a managed fleet"
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "store_path", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_calyptia, store_path),
+ ""
+ },
+
+ {
+ FLB_CONFIG_MAP_SLIST_1, "add_label", NULL,
+ FLB_CONFIG_MAP_MULT, FLB_TRUE, offsetof(struct flb_calyptia, add_labels),
+ "Label to append to the generated metric."
+ },
+
+#ifdef FLB_HAVE_CHUNK_TRACE
+ {
+ FLB_CONFIG_MAP_STR, "pipeline_id", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_calyptia, pipeline_id),
+ "Pipeline ID for calyptia core traces."
+ },
+#endif
+
+ /* EOF */
+ {0}
+};
+
+struct flb_output_plugin out_calyptia_plugin = {
+ .name = "calyptia",
+ .description = "Calyptia Cloud",
+ .cb_init = cb_calyptia_init,
+ .cb_flush = cb_calyptia_flush,
+ .cb_exit = cb_calyptia_exit,
+ .config_map = config_map,
+ .flags = FLB_OUTPUT_NET | FLB_OUTPUT_PRIVATE | FLB_IO_OPT_TLS,
+ .event_type = FLB_OUTPUT_METRICS
+};
diff --git a/src/fluent-bit/plugins/out_calyptia/calyptia.h b/src/fluent-bit/plugins/out_calyptia/calyptia.h
new file mode 100644
index 000000000..db640ff10
--- /dev/null
+++ b/src/fluent-bit/plugins/out_calyptia/calyptia.h
@@ -0,0 +1,85 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_OUT_CALYPTIA_H
+#define FLB_OUT_CALYPTIA_H
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_upstream.h>
+#include <fluent-bit/flb_env.h>
+#include <fluent-bit/flb_fstore.h>
+
+/* End point */
+#define CALYPTIA_HOST "cloud-api.calyptia.com"
+#define CALYPTIA_PORT "443"
+
+/* HTTP action types */
+#define CALYPTIA_ACTION_REGISTER 0
+#define CALYPTIA_ACTION_PATCH 1
+#define CALYPTIA_ACTION_METRICS 2
+#define CALYPTIA_ACTION_TRACE 3
+
+/* Endpoints */
+#define CALYPTIA_ENDPOINT_CREATE "/v1/agents"
+#define CALYPTIA_ENDPOINT_PATCH "/v1/agents/%s"
+#define CALYPTIA_ENDPOINT_METRICS "/v1/agents/%s/metrics"
+#define CALYPTIA_ENDPOINT_TRACE "/v1/traces/%s"
+
+/* Storage */
+#define CALYPTIA_SESSION_FILE "session.CALYPTIA"
+
+/* Headers */
+#define CALYPTIA_H_PROJECT "X-Project-Token"
+#define CALYPTIA_H_AGENT_TOKEN "X-Agent-Token"
+#define CALYPTIA_H_CTYPE "Content-Type"
+#define CALYPTIA_H_CTYPE_JSON "application/json"
+#define CALYPTIA_H_CTYPE_MSGPACK "application/x-msgpack"
+
+struct flb_calyptia {
+ /* config map */
+ int cloud_port;
+ flb_sds_t api_key;
+ flb_sds_t cloud_host;
+ flb_sds_t store_path;
+
+ /* config reader for 'add_label' */
+ struct mk_list *add_labels;
+
+ /* internal */
+ flb_sds_t agent_id;
+ flb_sds_t agent_token;
+ flb_sds_t machine_id; /* machine-id */
+ flb_sds_t fleet_id; /* fleet-id */
+ flb_sds_t metrics_endpoint; /* metrics endpoint */
+ struct flb_fstore *fs; /* fstore ctx */
+ struct flb_fstore_stream *fs_stream; /* fstore stream */
+ struct flb_fstore_file *fs_file; /* fstore session file */
+ struct flb_env *env; /* environment */
+ struct flb_upstream *u; /* upstream connection */
+ struct mk_list kv_labels; /* parsed add_labels */
+ struct flb_output_instance *ins; /* plugin instance */
+ struct flb_config *config; /* Fluent Bit context */
+/* used for reporting chunk trace records to calyptia cloud. */
+#ifdef FLB_HAVE_CHUNK_TRACE
+ flb_sds_t trace_endpoint;
+ flb_sds_t pipeline_id;
+#endif /* FLB_HAVE_CHUNK_TRACE */
+};
+
+#endif
diff --git a/src/fluent-bit/plugins/out_chronicle/CMakeLists.txt b/src/fluent-bit/plugins/out_chronicle/CMakeLists.txt
new file mode 100644
index 000000000..ca9180305
--- /dev/null
+++ b/src/fluent-bit/plugins/out_chronicle/CMakeLists.txt
@@ -0,0 +1,6 @@
+set(src
+ chronicle_conf.c
+ chronicle.c
+ )
+
+FLB_PLUGIN(out_chronicle "${src}" "")
diff --git a/src/fluent-bit/plugins/out_chronicle/chronicle.c b/src/fluent-bit/plugins/out_chronicle/chronicle.c
new file mode 100644
index 000000000..479dd8035
--- /dev/null
+++ b/src/fluent-bit/plugins/out_chronicle/chronicle.c
@@ -0,0 +1,962 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2023 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_http_client.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_oauth2.h>
+#include <fluent-bit/flb_base64.h>
+#include <fluent-bit/flb_hash.h>
+#include <fluent-bit/flb_crypto.h>
+#include <fluent-bit/flb_signv4.h>
+#include <fluent-bit/flb_kv.h>
+#include <fluent-bit/flb_log_event_encoder.h>
+#include <fluent-bit/flb_log_event_decoder.h>
+
+#include <msgpack.h>
+
+#include "chronicle.h"
+#include "chronicle_conf.h"
+
+// TODO: The following code is copied from the Stackdriver plugin and should be
+// factored into common library functions.
+
+/*
+ * Base64 Encoding in JWT must:
+ *
+ * - remove any trailing padding '=' character
+ * - replace '+' with '-'
+ * - replace '/' with '_'
+ *
+ * ref: https://www.rfc-editor.org/rfc/rfc7515.txt Appendix C
+ */
+int chronicle_jwt_base64_url_encode(unsigned char *out_buf, size_t out_size,
+ unsigned char *in_buf, size_t in_size,
+ size_t *olen)
+
+{
+ int i;
+ size_t len;
+ int result;
+
+ /* do normal base64 encoding */
+ result = flb_base64_encode((unsigned char *) out_buf, out_size - 1,
+ &len, in_buf, in_size);
+ if (result != 0) {
+ return -1;
+ }
+
+ /* Replace '+' and '/' characters */
+ for (i = 0; i < len && out_buf[i] != '='; i++) {
+ if (out_buf[i] == '+') {
+ out_buf[i] = '-';
+ }
+ else if (out_buf[i] == '/') {
+ out_buf[i] = '_';
+ }
+ }
+
+ /* Now 'i' becomes the new length */
+ *olen = i;
+ return 0;
+}
+
+static int chronicle_jwt_encode(struct flb_chronicle *ctx,
+ char *payload, char *secret,
+ char **out_signature, size_t *out_size)
+{
+ int ret;
+ int len;
+ int buf_size;
+ size_t olen;
+ char *buf;
+ char *sigd;
+ char *headers = "{\"alg\": \"RS256\", \"typ\": \"JWT\"}";
+ unsigned char sha256_buf[32] = {0};
+ flb_sds_t out;
+ unsigned char sig[256] = {0};
+ size_t sig_len;
+
+ buf_size = (strlen(payload) + strlen(secret)) * 2;
+ buf = flb_malloc(buf_size);
+ if (!buf) {
+ flb_errno();
+ return -1;
+ }
+
+ /* Encode header */
+ len = strlen(headers);
+ ret = flb_base64_encode((unsigned char *) buf, buf_size - 1,
+ &olen, (unsigned char *) headers, len);
+ if (ret != 0) {
+ flb_free(buf);
+
+ return ret;
+ }
+
+ /* Create buffer to store JWT */
+ out = flb_sds_create_size(2048);
+ if (!out) {
+ flb_errno();
+ flb_free(buf);
+ return -1;
+ }
+
+ /* Append header */
+ flb_sds_cat(out, buf, olen);
+ flb_sds_cat(out, ".", 1);
+
+ /* Encode Payload */
+ len = strlen(payload);
+ chronicle_jwt_base64_url_encode((unsigned char *) buf, buf_size,
+ (unsigned char *) payload, len, &olen);
+
+ /* Append Payload */
+ flb_sds_cat(out, buf, olen);
+
+ /* do sha256() of base64(header).base64(payload) */
+ ret = flb_hash_simple(FLB_HASH_SHA256,
+ (unsigned char *) out, flb_sds_len(out),
+ sha256_buf, sizeof(sha256_buf));
+
+ if (ret != FLB_CRYPTO_SUCCESS) {
+ flb_plg_error(ctx->ins, "error hashing token");
+ flb_free(buf);
+ flb_sds_destroy(out);
+ return -1;
+ }
+
+ len = strlen(secret);
+ sig_len = sizeof(sig);
+
+ ret = flb_crypto_sign_simple(FLB_CRYPTO_PRIVATE_KEY,
+ FLB_CRYPTO_PADDING_PKCS1,
+ FLB_HASH_SHA256,
+ (unsigned char *) secret, len,
+ sha256_buf, sizeof(sha256_buf),
+ sig, &sig_len);
+
+ if (ret != FLB_CRYPTO_SUCCESS) {
+ flb_plg_error(ctx->ins, "error creating RSA context");
+ flb_free(buf);
+ flb_sds_destroy(out);
+ return -1;
+ }
+
+ sigd = flb_malloc(2048);
+ if (!sigd) {
+ flb_errno();
+ flb_free(buf);
+ flb_sds_destroy(out);
+ return -1;
+ }
+
+ chronicle_jwt_base64_url_encode((unsigned char *) sigd, 2048, sig, 256, &olen);
+
+ flb_sds_cat(out, ".", 1);
+ flb_sds_cat(out, sigd, olen);
+
+ *out_signature = out;
+ *out_size = flb_sds_len(out);
+
+ flb_free(buf);
+ flb_free(sigd);
+
+ return 0;
+}
+
+/* Create a new oauth2 context and get a oauth2 token */
+static int chronicle_get_oauth2_token(struct flb_chronicle *ctx)
+{
+ int ret;
+ char *token;
+ char *sig_data;
+ size_t sig_size;
+ time_t issued;
+ time_t expires;
+ char payload[1024];
+
+ /* Clear any previous oauth2 payload content */
+ flb_oauth2_payload_clear(ctx->o);
+
+ /* JWT encode for oauth2 */
+ issued = time(NULL);
+ expires = issued + FLB_CHRONICLE_TOKEN_REFRESH;
+
+ snprintf(payload, sizeof(payload) - 1,
+ "{\"iss\": \"%s\", \"scope\": \"%s\", "
+ "\"aud\": \"%s\", \"exp\": %lu, \"iat\": %lu}",
+ ctx->oauth_credentials->client_email, FLB_CHRONICLE_SCOPE,
+ FLB_CHRONICLE_AUTH_URL,
+ expires, issued);
+
+ /* Compose JWT signature */
+ ret = chronicle_jwt_encode(ctx, payload, ctx->oauth_credentials->private_key,
+ &sig_data, &sig_size);
+ if (ret != 0) {
+ flb_plg_error(ctx->ins, "JWT signature generation failed");
+ return -1;
+ }
+
+ flb_plg_debug(ctx->ins, "JWT signature:\n%s", sig_data);
+
+ ret = flb_oauth2_payload_append(ctx->o,
+ "grant_type", -1,
+ "urn%3Aietf%3Aparams%3Aoauth%3A"
+ "grant-type%3Ajwt-bearer", -1);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "error appending oauth2 params");
+ flb_sds_destroy(sig_data);
+ return -1;
+ }
+
+ ret = flb_oauth2_payload_append(ctx->o,
+ "assertion", -1,
+ sig_data, sig_size);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "error appending oauth2 params");
+ flb_sds_destroy(sig_data);
+ return -1;
+ }
+ flb_sds_destroy(sig_data);
+
+ /* Retrieve access token */
+ token = flb_oauth2_token_get(ctx->o);
+ if (!token) {
+ flb_plg_error(ctx->ins, "error retrieving oauth2 access token");
+ return -1;
+ }
+
+ return 0;
+}
+
+static flb_sds_t get_google_token(struct flb_chronicle *ctx)
+{
+ int ret = 0;
+ flb_sds_t output = NULL;
+
+ if (pthread_mutex_lock(&ctx->token_mutex)){
+ flb_plg_error(ctx->ins, "error locking mutex");
+ return NULL;
+ }
+
+ if (flb_oauth2_token_expired(ctx->o) == FLB_TRUE) {
+ ret = chronicle_get_oauth2_token(ctx);
+ }
+
+ /* Copy string to prevent race conditions (get_oauth2 can free the string) */
+ if (ret == 0) {
+ output = flb_sds_create(ctx->o->token_type);
+ flb_sds_printf(&output, " %s", ctx->o->access_token);
+ }
+
+ if (pthread_mutex_unlock(&ctx->token_mutex)){
+ flb_plg_error(ctx->ins, "error unlocking mutex");
+ if (output) {
+ flb_sds_destroy(output);
+ }
+ return NULL;
+ }
+
+ return output;
+}
+
+static int validate_log_type(struct flb_chronicle *ctx, struct flb_config *config,
+ const char *body, size_t len)
+{
+ int ret = -1;
+ int root_type;
+ char *msgpack_buf = NULL;
+ size_t msgpack_size;
+ size_t off = 0;
+ msgpack_unpacked result;
+ int i, j, k;
+ msgpack_object key;
+ msgpack_object val;
+ msgpack_object root;
+ msgpack_object *array;
+ msgpack_object *supported_type;
+ int root_map_size;
+ int array_size = 0;
+
+
+ ret = flb_pack_json(body, len,
+ &msgpack_buf, &msgpack_size,
+ &root_type, NULL);
+
+ if (ret != 0 || root_type != JSMN_OBJECT) {
+ flb_plg_error(ctx->ins, "json to msgpack conversion error");
+ }
+
+ ret = -1;
+ msgpack_unpacked_init(&result);
+ while (msgpack_unpack_next(&result, msgpack_buf, msgpack_size, &off) == MSGPACK_UNPACK_SUCCESS) {
+ if (result.data.type != MSGPACK_OBJECT_MAP) {
+ flb_plg_error(ctx->ins, "Invalid log_type payload");
+ ret = -2;
+
+ goto cleanup;
+ }
+
+ root = result.data;
+ root_map_size = root.via.map.size;
+
+ for (i = 0; i < root_map_size; i++) {
+ key = root.via.map.ptr[i].key;
+ val = root.via.map.ptr[i].val;
+
+ if (val.type != MSGPACK_OBJECT_ARRAY) {
+ flb_plg_error(ctx->ins, "Invalid inner array type of log_type payload");
+ ret = -2;
+
+ goto cleanup;
+ }
+
+ array = val.via.array.ptr;
+ array_size = val.via.array.size;
+
+ for (j = 0; j < array_size; j++) {
+ supported_type = &array[j];
+
+ if (supported_type->type != MSGPACK_OBJECT_MAP) {
+ flb_plg_error(ctx->ins, "Invalid inner maps of log_type payload");
+ ret = -2;
+
+ continue;
+ }
+
+ for (k = 0; k < supported_type->via.map.size; k++) {
+ key = supported_type->via.map.ptr[k].key;
+ val = supported_type->via.map.ptr[k].val;
+
+ if (strncmp("logType", key.via.str.ptr, key.via.str.size) == 0) {
+ if (strncmp(ctx->log_type, val.via.bin.ptr, val.via.str.size) == 0) {
+ ret = 0;
+ goto cleanup;
+ }
+ }
+ }
+ }
+ }
+ }
+
+cleanup:
+ msgpack_unpacked_destroy(&result);
+
+ /* release 'out_buf' if it was allocated */
+ if (msgpack_buf) {
+ flb_free(msgpack_buf);
+ }
+
+ return ret;
+}
+
+static int check_chronicle_log_type(struct flb_chronicle *ctx, struct flb_config *config)
+{
+ int ret;
+ size_t b_sent;
+ flb_sds_t token;
+ struct flb_connection *u_conn;
+ struct flb_http_client *c;
+
+ /* Get upstream connection */
+ u_conn = flb_upstream_conn_get(ctx->u);
+ if (!u_conn) {
+ return -1;
+ }
+
+ /* Get or renew Token */
+ token = get_google_token(ctx);
+
+ if (!token) {
+ flb_plg_error(ctx->ins, "cannot retrieve oauth2 token");
+ flb_upstream_conn_release(u_conn);
+ return -1;
+ }
+
+ /* Compose HTTP Client request */
+ c = flb_http_client(u_conn, FLB_HTTP_GET, FLB_CHRONICLE_LOG_TYPE_ENDPOINT,
+ NULL, 0, NULL, 0, NULL, 0);
+ if (!c) {
+ flb_plg_error(ctx->ins, "cannot create HTTP client context");
+ flb_upstream_conn_release(u_conn);
+ flb_sds_destroy(token);
+
+ return -1;
+ }
+
+ /* Chronicle supported types are growing. Not to specify the read limit. */
+ flb_http_buffer_size(c, 0);
+ flb_http_add_header(c, "User-Agent", 10, "Fluent-Bit", 10);
+ flb_http_add_header(c, "Content-Type", 12, "application/json", 16);
+
+ /* Compose and append Authorization header */
+ flb_http_add_header(c, "Authorization", 13, token, flb_sds_len(token));
+
+ /* Send HTTP request */
+ ret = flb_http_do(c, &b_sent);
+
+ /* validate response */
+ if (ret != 0) {
+ flb_plg_warn(ctx->ins, "http_do=%i", ret);
+ goto cleanup;
+ }
+ else {
+ /* The request was issued successfully, validate the 'error' field */
+ flb_plg_debug(ctx->ins, "HTTP Status=%i", c->resp.status);
+ if (c->resp.status == 200) {
+ ret = validate_log_type(ctx, config, c->resp.payload, c->resp.payload_size);
+ if (ret != 0) {
+ flb_plg_error(ctx->ins, "Validate log_type is failed");
+ goto cleanup;
+ }
+ }
+ else {
+ if (c->resp.payload && c->resp.payload_size > 0) {
+ /* we got an error */
+ flb_plg_warn(ctx->ins, "response\n%s", c->resp.payload);
+ }
+
+ goto cleanup;
+ }
+ }
+
+cleanup:
+
+ /* Cleanup */
+ flb_sds_destroy(token);
+ flb_http_client_destroy(c);
+ flb_upstream_conn_release(u_conn);
+
+ return ret;
+}
+
+static int cb_chronicle_init(struct flb_output_instance *ins,
+ struct flb_config *config, void *data)
+{
+ char *token;
+ int io_flags = FLB_IO_TLS;
+ struct flb_chronicle *ctx;
+ int ret;
+
+ /* Create config context */
+ ctx = flb_chronicle_conf_create(ins, config);
+ if (!ctx) {
+ flb_plg_error(ins, "configuration failed");
+ return -1;
+ }
+
+ flb_output_set_context(ins, ctx);
+
+ /* Network mode IPv6 */
+ if (ins->host.ipv6 == FLB_TRUE) {
+ io_flags |= FLB_IO_IPV6;
+ }
+
+ /* Create mutex for acquiring oauth tokens (they are shared across flush coroutines) */
+ pthread_mutex_init(&ctx->token_mutex, NULL);
+
+ /*
+ * Create upstream context for Chronicle Streaming Inserts
+ * (no oauth2 service)
+ */
+ ctx->u = flb_upstream_create_url(config, ctx->uri,
+ io_flags, ins->tls);
+ if (!ctx->u) {
+ flb_plg_error(ctx->ins, "upstream creation failed");
+ return -1;
+ }
+
+ /* Create oauth2 context */
+ ctx->o = flb_oauth2_create(ctx->config, FLB_CHRONICLE_AUTH_URL, 3000);
+ if (!ctx->o) {
+ flb_plg_error(ctx->ins, "cannot create oauth2 context");
+ return -1;
+ }
+ flb_output_upstream_set(ctx->u, ins);
+
+ /* Get or renew the OAuth2 token */
+ token = get_google_token(ctx);
+
+ if (!token) {
+ flb_plg_warn(ctx->ins, "token retrieval failed");
+ }
+ else {
+ flb_sds_destroy(token);
+ }
+
+ ret = check_chronicle_log_type(ctx, config);
+ if (ret != 0) {
+ flb_plg_error(ctx->ins, "Validate log_type failed. '%s' is not supported. ret = %d",
+ ctx->log_type, ret);
+ return -1;
+ }
+
+ return 0;
+}
+
+static flb_sds_t flb_pack_msgpack_extract_log_key(void *out_context, uint64_t bytes, struct flb_log_event log_event)
+{
+ int i;
+ int map_size;
+ int check = FLB_FALSE;
+ int found = FLB_FALSE;
+ int log_key_missing = 0;
+ int ret;
+ struct flb_chronicle *ctx = out_context;
+ char *val_buf;
+ char *key_str = NULL;
+ size_t key_str_size = 0;
+ size_t msgpack_size = bytes + bytes / 4;
+ size_t val_offset = 0;
+ flb_sds_t out_buf;
+ msgpack_object map;
+ msgpack_object key;
+ msgpack_object val;
+
+ /* Allocate buffer to store log_key contents */
+ val_buf = flb_calloc(1, msgpack_size);
+ if (val_buf == NULL) {
+ flb_plg_error(ctx->ins, "Could not allocate enough "
+ "memory to read record");
+ flb_errno();
+ return NULL;
+ }
+
+ /* Get the record/map */
+ map = *log_event.body;
+
+ if (map.type != MSGPACK_OBJECT_MAP) {
+ return NULL;
+ }
+
+ map_size = map.via.map.size;
+
+ /* Reset variables for found log_key and correct type */
+ found = FLB_FALSE;
+ check = FLB_FALSE;
+
+ /* Extract log_key from record and append to output buffer */
+ for (i = 0; i < map_size; i++) {
+ key = map.via.map.ptr[i].key;
+ val = map.via.map.ptr[i].val;
+
+ if (key.type == MSGPACK_OBJECT_BIN) {
+ key_str = (char *) key.via.bin.ptr;
+ key_str_size = key.via.bin.size;
+ check = FLB_TRUE;
+ }
+ if (key.type == MSGPACK_OBJECT_STR) {
+ key_str = (char *) key.via.str.ptr;
+ key_str_size = key.via.str.size;
+ check = FLB_TRUE;
+ }
+
+ if (check == FLB_TRUE) {
+ if (strncmp(ctx->log_key, key_str, key_str_size) == 0) {
+ found = FLB_TRUE;
+
+ /*
+ * Copy contents of value into buffer. Necessary to copy
+ * strings because flb_msgpack_to_json does not handle nested
+ * JSON gracefully and double escapes them.
+ */
+ if (val.type == MSGPACK_OBJECT_BIN) {
+ memcpy(val_buf + val_offset, val.via.bin.ptr, val.via.bin.size);
+ val_offset += val.via.bin.size;
+ val_buf[val_offset] = '\0';
+ val_offset++;
+ }
+ else if (val.type == MSGPACK_OBJECT_STR) {
+ memcpy(val_buf + val_offset, val.via.str.ptr, val.via.str.size);
+ val_offset += val.via.str.size;
+ val_buf[val_offset] = '\0';
+ val_offset++;
+ }
+ else {
+ ret = flb_msgpack_to_json(val_buf + val_offset,
+ msgpack_size - val_offset, &val);
+ if (ret < 0) {
+ break;
+ }
+ val_offset += ret;
+ val_buf[val_offset] = '\0';
+ val_offset++;
+ }
+ /* Exit early once log_key has been found for current record */
+ break;
+ }
+ }
+
+ /* If log_key was not found in the current record, mark log key as missing */
+ if (found == FLB_FALSE) {
+ log_key_missing++;
+ }
+ }
+
+ if (log_key_missing > 0) {
+ flb_plg_error(ctx->ins, "Could not find log_key '%s' in %d records",
+ ctx->log_key, log_key_missing);
+ }
+
+ /* If nothing was read, destroy buffer */
+ if (val_offset == 0) {
+ flb_free(val_buf);
+ return NULL;
+ }
+ val_buf[val_offset] = '\0';
+
+ /* Create output buffer to store contents */
+ out_buf = flb_sds_create(val_buf);
+ if (out_buf == NULL) {
+ flb_plg_error(ctx->ins, "Error creating buffer to store log_key contents.");
+ flb_errno();
+ }
+ flb_free(val_buf);
+
+ return out_buf;
+}
+
+static int chronicle_format(const void *data, size_t bytes,
+ const char *tag, size_t tag_len,
+ char **out_data, size_t *out_size,
+ struct flb_chronicle *ctx)
+{
+ int len;
+ int ret;
+ int array_size = 0;
+ size_t off = 0;
+ size_t last_off = 0;
+ size_t alloc_size = 0;
+ size_t s;
+ char time_formatted[255];
+ /* Parameters for Timestamp */
+ struct tm tm;
+ flb_sds_t out_buf;
+ struct flb_log_event_decoder log_decoder;
+ struct flb_log_event log_event;
+ msgpack_sbuffer mp_sbuf;
+ msgpack_packer mp_pck;
+ flb_sds_t log_text = NULL;
+ int log_text_size;
+
+ /* Count number of records */
+ ret = flb_log_event_decoder_init(&log_decoder, (char *) data, bytes);
+
+ if (ret != FLB_EVENT_DECODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event decoder initialization error : %d", ret);
+
+ return -1;
+ }
+
+ array_size = flb_mp_count(data, bytes);
+
+ /* Create temporary msgpack buffer */
+ msgpack_sbuffer_init(&mp_sbuf);
+ msgpack_packer_init(&mp_pck, &mp_sbuf, msgpack_sbuffer_write);
+
+ /*
+ * Pack root map (unstructured log):
+ * see: https://cloud.google.com/chronicle/docs/reference/ingestion-api#request_body_2
+ * {
+ * "customer_id": "c8c65bfa-5f2c-42d4-9189-64bb7b939f2c",
+ * "log_type": "BIND_DNS",
+ * "entries": [
+ * {
+ * "log_text": "26-Feb-2019 13:35:02.187 client 10.120.20.32#4238: query: altostrat.com IN A + (203.0.113.102)",
+ * "ts_epoch_microseconds": 1551188102187000
+ * },
+ * {
+ * "log_text": "26-Feb-2019 13:37:04.523 client 10.50.100.33#1116: query: examplepetstore.com IN A + (203.0.113.102)",
+ * "ts_rfc3339": "2019-26-02T13:37:04.523-08:00"
+ * },
+ * {
+ * "log_text": "26-Feb-2019 13:39:01.115 client 10.1.2.3#3333: query: www.example.com IN A + (203.0.113.102)"
+ * },
+ * ]
+ * }
+ */
+ msgpack_pack_map(&mp_pck, 3);
+
+ msgpack_pack_str(&mp_pck, 11);
+ msgpack_pack_str_body(&mp_pck, "customer_id", 11);
+
+ msgpack_pack_str(&mp_pck, strlen(ctx->customer_id));
+ msgpack_pack_str_body(&mp_pck, ctx->customer_id, strlen(ctx->customer_id));
+
+ msgpack_pack_str(&mp_pck, 8);
+ msgpack_pack_str_body(&mp_pck, "log_type", 8);
+
+ msgpack_pack_str(&mp_pck, strlen(ctx->log_type));
+ msgpack_pack_str_body(&mp_pck, ctx->log_type, strlen(ctx->log_type));
+
+ msgpack_pack_str(&mp_pck, 7);
+ msgpack_pack_str_body(&mp_pck, "entries", 7);
+
+ /* Append entries */
+ msgpack_pack_array(&mp_pck, array_size);
+
+ while ((ret = flb_log_event_decoder_next(
+ &log_decoder,
+ &log_event)) == FLB_EVENT_DECODER_SUCCESS) {
+ off = log_decoder.offset;
+ alloc_size = (off - last_off) + 128; /* JSON is larger than msgpack */
+ last_off = off;
+
+ /*
+ * Pack entries
+ *
+ * {
+ * "log_text": {...},
+ * "ts_rfc3339": "..."
+ * }
+ *
+ */
+ msgpack_pack_map(&mp_pck, 2);
+
+ /* log_text */
+ msgpack_pack_str(&mp_pck, 8);
+ msgpack_pack_str_body(&mp_pck, "log_text", 8);
+ if (ctx->log_key != NULL) {
+ log_text = flb_pack_msgpack_extract_log_key(ctx, bytes, log_event);
+ log_text_size = flb_sds_len(log_text);
+ }
+ else {
+ log_text = flb_msgpack_to_json_str(alloc_size, log_event.body);
+ log_text_size = strlen(log_text);
+ }
+
+ if (log_text == NULL) {
+ flb_plg_error(ctx->ins, "Could not marshal msgpack to output string");
+ return -1;
+ }
+ msgpack_pack_str(&mp_pck, log_text_size);
+ msgpack_pack_str_body(&mp_pck, log_text, log_text_size);
+
+ if (ctx->log_key != NULL) {
+ flb_sds_destroy(log_text);
+ }
+ else {
+ flb_free(log_text);
+ }
+ /* timestamp */
+ msgpack_pack_str(&mp_pck, 10);
+ msgpack_pack_str_body(&mp_pck, "ts_rfc3339", 10);
+
+ gmtime_r(&log_event.timestamp.tm.tv_sec, &tm);
+ s = strftime(time_formatted, sizeof(time_formatted) - 1,
+ FLB_STD_TIME_FMT, &tm);
+ len = snprintf(time_formatted + s, sizeof(time_formatted) - 1 - s,
+ ".%03" PRIu64 "Z",
+ (uint64_t) log_event.timestamp.tm.tv_nsec);
+ s += len;
+
+ msgpack_pack_str(&mp_pck, s);
+ msgpack_pack_str_body(&mp_pck, time_formatted, s);
+ }
+
+ /* Convert from msgpack to JSON */
+ out_buf = flb_msgpack_raw_to_json_sds(mp_sbuf.data, mp_sbuf.size);
+
+ flb_log_event_decoder_destroy(&log_decoder);
+ msgpack_sbuffer_destroy(&mp_sbuf);
+
+ if (!out_buf) {
+ flb_plg_error(ctx->ins, "error formatting JSON payload");
+ return -1;
+ }
+
+ *out_data = out_buf;
+ *out_size = flb_sds_len(out_buf);
+
+ return 0;
+}
+
+static void cb_chronicle_flush(struct flb_event_chunk *event_chunk,
+ struct flb_output_flush *out_flush,
+ struct flb_input_instance *i_ins,
+ void *out_context,
+ struct flb_config *config)
+{
+ (void) i_ins;
+ (void) config;
+ int ret;
+ int ret_code = FLB_RETRY;
+ size_t b_sent;
+ flb_sds_t token;
+ flb_sds_t payload_buf;
+ size_t payload_size;
+ struct flb_chronicle *ctx = out_context;
+ struct flb_connection *u_conn;
+ struct flb_http_client *c;
+
+ flb_plg_trace(ctx->ins, "flushing bytes %zu", event_chunk->size);
+
+ /* Get upstream connection */
+ u_conn = flb_upstream_conn_get(ctx->u);
+ if (!u_conn) {
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ /* Get or renew Token */
+ token = get_google_token(ctx);
+
+ if (!token) {
+ flb_plg_error(ctx->ins, "cannot retrieve oauth2 token");
+ flb_upstream_conn_release(u_conn);
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ /* Reformat msgpack to chronicle JSON payload */
+ ret = chronicle_format(event_chunk->data, event_chunk->size,
+ event_chunk->tag, flb_sds_len(event_chunk->tag),
+ &payload_buf, &payload_size, ctx);
+ if (ret != 0) {
+ flb_upstream_conn_release(u_conn);
+ flb_sds_destroy(token);
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ /* Compose HTTP Client request */
+ c = flb_http_client(u_conn, FLB_HTTP_POST, ctx->endpoint,
+ payload_buf, payload_size, NULL, 0, NULL, 0);
+ if (!c) {
+ flb_plg_error(ctx->ins, "cannot create HTTP client context");
+ flb_upstream_conn_release(u_conn);
+ flb_sds_destroy(token);
+ flb_sds_destroy(payload_buf);
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ flb_http_buffer_size(c, 4192);
+ flb_http_add_header(c, "User-Agent", 10, "Fluent-Bit", 10);
+ flb_http_add_header(c, "Content-Type", 12, "application/json", 16);
+
+ /* Compose and append Authorization header */
+ flb_http_add_header(c, "Authorization", 13, token, flb_sds_len(token));
+
+ /* Send HTTP request */
+ ret = flb_http_do(c, &b_sent);
+
+ /* validate response */
+ if (ret != 0) {
+ flb_plg_warn(ctx->ins, "http_do=%i", ret);
+ ret_code = FLB_RETRY;
+ }
+ else {
+ /* The request was issued successfully, validate the 'error' field */
+ flb_plg_debug(ctx->ins, "HTTP Status=%i", c->resp.status);
+ if (c->resp.status == 200) {
+ ret_code = FLB_OK;
+ }
+ else {
+ if (c->resp.payload && c->resp.payload_size > 0) {
+ /* we got an error */
+ flb_plg_warn(ctx->ins, "response\n%s", c->resp.payload);
+ }
+ ret_code = FLB_RETRY;
+ }
+ }
+
+ /* Cleanup */
+ flb_sds_destroy(payload_buf);
+ flb_sds_destroy(token);
+ flb_http_client_destroy(c);
+ flb_upstream_conn_release(u_conn);
+
+ /* Done */
+ FLB_OUTPUT_RETURN(ret_code);
+}
+
+static int cb_chronicle_exit(void *data, struct flb_config *config)
+{
+ struct flb_chronicle *ctx = data;
+
+ if (!ctx) {
+ return -1;
+ }
+
+ if (ctx->u) {
+ flb_upstream_destroy(ctx->u);
+ }
+
+ flb_chronicle_conf_destroy(ctx);
+ return 0;
+}
+
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_STR, "google_service_credentials", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_chronicle, credentials_file),
+ "Set the path for the google service credentials file"
+ },
+ // set in flb_chronicle_oauth_credentials
+ {
+ FLB_CONFIG_MAP_STR, "service_account_email", (char *)NULL,
+ 0, FLB_FALSE, 0,
+ "Set the service account email"
+ },
+ // set in flb_chronicle_oauth_credentials
+ {
+ FLB_CONFIG_MAP_STR, "service_account_secret", (char *)NULL,
+ 0, FLB_FALSE, 0,
+ "Set the service account secret"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "project_id", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_chronicle, project_id),
+ "Set the project id"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "customer_id", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_chronicle, customer_id),
+ "Set the customer id"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "log_type", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_chronicle, log_type),
+ "Set the log type"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "region", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_chronicle, region),
+ "Set the region"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "log_key", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_chronicle, log_key),
+ "Set the log key"
+ },
+ /* EOF */
+ {0}
+};
+
+struct flb_output_plugin out_chronicle_plugin = {
+ .name = "chronicle",
+ .description = "Send logs to Google Chronicle as unstructured log",
+ .cb_init = cb_chronicle_init,
+ .cb_flush = cb_chronicle_flush,
+ .cb_exit = cb_chronicle_exit,
+ .config_map = config_map,
+ /* Plugin flags */
+ .flags = FLB_OUTPUT_NET | FLB_IO_TLS,
+};
diff --git a/src/fluent-bit/plugins/out_chronicle/chronicle.h b/src/fluent-bit/plugins/out_chronicle/chronicle.h
new file mode 100644
index 000000000..0243223f0
--- /dev/null
+++ b/src/fluent-bit/plugins/out_chronicle/chronicle.h
@@ -0,0 +1,96 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2023 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_OUT_CHRONICLE
+#define FLB_OUT_CHRONICLE
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_output.h>
+#include <fluent-bit/flb_oauth2.h>
+#include <fluent-bit/flb_sds.h>
+
+/* refresh token every 50 minutes */
+#define FLB_CHRONICLE_TOKEN_REFRESH 3000
+
+/* Timestamp format */
+#define FLB_STD_TIME_FMT "%Y-%m-%dT%H:%M:%S"
+
+/* Chronicle unstructureed logs oauth scope */
+#define FLB_CHRONICLE_SCOPE "https://www.googleapis.com/auth/malachite-ingestion"
+
+/* Chronicle authorization URL */
+#define FLB_CHRONICLE_AUTH_URL "https://oauth2.googleapis.com/token"
+
+#define FLB_CHRONICLE_UNSTRUCTURED_ENDPOINT "/v2/unstructuredlogentries:batchCreate"
+#define FLB_CHRONICLE_LOG_TYPE_ENDPOINT "/v2/logtypes"
+#define FLB_CHRONICLE_URL_BASE "https://malachiteingestion-pa.googleapis.com"
+#define FLB_CHRONICLE_URL_BASE_EU "https://europe-malachiteingestion-pa.googleapis.com"
+#define FLB_CHRONICLE_URL_BASE_UK "https://europe-west2-malachiteingestion-pa.googleapis.com"
+#define FLB_CHRONICLE_URL_BASE_ASIA "https://asia-southeast1-malachiteingestion-pa.googleapis.com"
+
+struct flb_chronicle_oauth_credentials {
+ /* parsed credentials file */
+ flb_sds_t type;
+ flb_sds_t project_id;
+ flb_sds_t private_key_id;
+ flb_sds_t private_key;
+ flb_sds_t client_email;
+ flb_sds_t client_id;
+ flb_sds_t auth_uri;
+ flb_sds_t token_uri;
+};
+
+struct flb_chronicle {
+ /* credentials */
+ flb_sds_t credentials_file;
+
+ struct flb_chronicle_oauth_credentials *oauth_credentials;
+
+ /* chronicle configuration */
+ flb_sds_t project_id;
+ flb_sds_t customer_id;
+ flb_sds_t log_type;
+
+ flb_sds_t uri;
+ flb_sds_t health_uri;
+ flb_sds_t endpoint;
+ flb_sds_t region;
+ flb_sds_t log_key;
+
+ int json_date_format;
+ flb_sds_t json_date_key;
+ flb_sds_t date_key;
+
+ /* oauth2 context */
+ struct flb_oauth2 *o;
+
+ /* mutex for acquiring oauth tokens */
+ pthread_mutex_t token_mutex;
+
+ /* Upstream connection to the backend server */
+ struct flb_upstream *u;
+
+ /* Fluent Bit context */
+ struct flb_config *config;
+
+ /* Plugin output instance reference */
+ struct flb_output_instance *ins;
+};
+
+#endif
diff --git a/src/fluent-bit/plugins/out_chronicle/chronicle_conf.c b/src/fluent-bit/plugins/out_chronicle/chronicle_conf.c
new file mode 100644
index 000000000..5d6cdf9b2
--- /dev/null
+++ b/src/fluent-bit/plugins/out_chronicle/chronicle_conf.c
@@ -0,0 +1,421 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2023 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_unescape.h>
+#include <fluent-bit/flb_jsmn.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_aws_credentials.h>
+
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#include "chronicle.h"
+#include "chronicle_conf.h"
+
+
+static inline int key_cmp(char *str, int len, char *cmp) {
+
+ if (strlen(cmp) != len) {
+ return -1;
+ }
+
+ return strncasecmp(str, cmp, len);
+}
+
+static int flb_chronicle_read_credentials_file(struct flb_chronicle *ctx,
+ char *creds,
+ struct flb_chronicle_oauth_credentials *ctx_creds)
+{
+ int i;
+ int ret;
+ int len;
+ int key_len;
+ int val_len;
+ int tok_size = 32;
+ char *buf;
+ char *key;
+ char *val;
+ flb_sds_t tmp;
+ struct stat st;
+ jsmn_parser parser;
+ jsmntok_t *t;
+ jsmntok_t *tokens;
+
+ /* Validate credentials path */
+ ret = stat(creds, &st);
+ if (ret == -1) {
+ flb_errno();
+ flb_plg_error(ctx->ins, "cannot open credentials file: %s",
+ creds);
+ return -1;
+ }
+
+ if (!S_ISREG(st.st_mode) && !S_ISLNK(st.st_mode)) {
+ flb_plg_error(ctx->ins, "credentials file "
+ "is not a valid file: %s", creds);
+ return -1;
+ }
+
+ /* Read file content */
+ buf = mk_file_to_buffer(creds);
+ if (!buf) {
+ flb_plg_error(ctx->ins, "error reading credentials file: %s",
+ creds);
+ return -1;
+ }
+
+ /* Parse content */
+ jsmn_init(&parser);
+ tokens = flb_calloc(1, sizeof(jsmntok_t) * tok_size);
+ if (!tokens) {
+ flb_errno();
+ flb_free(buf);
+ return -1;
+ }
+
+ ret = jsmn_parse(&parser, buf, st.st_size, tokens, tok_size);
+ if (ret <= 0) {
+ flb_plg_error(ctx->ins, "invalid JSON credentials file: %s",
+ creds);
+ flb_free(buf);
+ flb_free(tokens);
+ return -1;
+ }
+
+ t = &tokens[0];
+ if (t->type != JSMN_OBJECT) {
+ flb_plg_error(ctx->ins, "invalid JSON map on file: %s",
+ creds);
+ flb_free(buf);
+ flb_free(tokens);
+ return -1;
+ }
+
+ /* Parse JSON tokens */
+ for (i = 1; i < ret; i++) {
+ t = &tokens[i];
+ if (t->type != JSMN_STRING) {
+ continue;
+ }
+
+ if (t->start == -1 || t->end == -1 || (t->start == 0 && t->end == 0)){
+ break;
+ }
+
+ /* Key */
+ key = buf + t->start;
+ key_len = (t->end - t->start);
+
+ /* Value */
+ i++;
+ t = &tokens[i];
+ val = buf + t->start;
+ val_len = (t->end - t->start);
+
+ if (key_cmp(key, key_len, "type") == 0) {
+ ctx_creds->type = flb_sds_create_len(val, val_len);
+ }
+ else if (key_cmp(key, key_len, "project_id") == 0) {
+ ctx_creds->project_id = flb_sds_create_len(val, val_len);
+ }
+ else if (key_cmp(key, key_len, "private_key_id") == 0) {
+ ctx_creds->private_key_id = flb_sds_create_len(val, val_len);
+ }
+ else if (key_cmp(key, key_len, "private_key") == 0) {
+ tmp = flb_sds_create_len(val, val_len);
+ if (tmp) {
+ /* Unescape private key */
+ len = flb_sds_len(tmp);
+ ctx_creds->private_key = flb_sds_create_size(len);
+ flb_unescape_string(tmp, len,
+ &ctx_creds->private_key);
+ flb_sds_destroy(tmp);
+ }
+ }
+ else if (key_cmp(key, key_len, "client_email") == 0) {
+ ctx_creds->client_email = flb_sds_create_len(val, val_len);
+ }
+ else if (key_cmp(key, key_len, "client_id") == 0) {
+ ctx_creds->client_id = flb_sds_create_len(val, val_len);
+ }
+ else if (key_cmp(key, key_len, "auth_uri") == 0) {
+ ctx_creds->auth_uri = flb_sds_create_len(val, val_len);
+ }
+ else if (key_cmp(key, key_len, "token_uri") == 0) {
+ ctx_creds->token_uri = flb_sds_create_len(val, val_len);
+ }
+ }
+
+ flb_free(buf);
+ flb_free(tokens);
+
+ return 0;
+}
+
+
+struct flb_chronicle *flb_chronicle_conf_create(struct flb_output_instance *ins,
+ struct flb_config *config)
+{
+ int ret;
+ const char *tmp;
+ struct flb_chronicle *ctx;
+ struct flb_chronicle_oauth_credentials *creds;
+
+ /* Allocate config context */
+ ctx = flb_calloc(1, sizeof(struct flb_chronicle));
+ if (!ctx) {
+ flb_errno();
+ return NULL;
+ }
+ ctx->ins = ins;
+ ctx->config = config;
+
+ ret = flb_output_config_map_set(ins, (void *)ctx);
+ if (ret == -1) {
+ flb_plg_error(ins, "unable to load configuration");
+ flb_free(ctx);
+ return NULL;
+ }
+
+ /* Lookup credentials file */
+ creds = flb_calloc(1, sizeof(struct flb_chronicle_oauth_credentials));
+ if (!creds) {
+ flb_errno();
+ flb_free(ctx);
+ return NULL;
+ }
+ ctx->oauth_credentials = creds;
+
+ if (ctx->credentials_file == NULL) {
+ tmp = getenv("GOOGLE_SERVICE_CREDENTIALS");
+ if (tmp) {
+ ctx->credentials_file = flb_sds_create(tmp);
+ }
+ }
+
+ if (ctx->credentials_file) {
+ ret = flb_chronicle_read_credentials_file(ctx,
+ ctx->credentials_file,
+ ctx->oauth_credentials);
+ if (ret != 0) {
+ flb_chronicle_conf_destroy(ctx);
+ return NULL;
+ }
+ }
+ else if (!ctx->credentials_file) {
+ /*
+ * If no credentials file has been defined, do manual lookup of the
+ * client email and the private key.
+ */
+
+ /* Service Account Email */
+ tmp = flb_output_get_property("service_account_email", ins);
+ if (tmp) {
+ creds->client_email = flb_sds_create(tmp);
+ }
+ else {
+ tmp = getenv("SERVICE_ACCOUNT_EMAIL");
+ if (tmp) {
+ creds->client_email = flb_sds_create(tmp);
+ }
+ }
+
+ /* Service Account Secret */
+ tmp = flb_output_get_property("service_account_secret", ins);
+ if (tmp) {
+ creds->private_key = flb_sds_create(tmp);
+ }
+ else {
+ tmp = getenv("SERVICE_ACCOUNT_SECRET");
+ if (tmp) {
+ creds->private_key = flb_sds_create(tmp);
+ }
+ }
+
+ if (!creds->client_email) {
+ flb_plg_error(ctx->ins, "service_account_email/client_email is not defined");
+ flb_chronicle_conf_destroy(ctx);
+ return NULL;
+ }
+
+ if (!creds->private_key) {
+ flb_plg_error(ctx->ins, "service_account_secret/private_key is not defined");
+ flb_chronicle_conf_destroy(ctx);
+ return NULL;
+ }
+ }
+
+ /* config: 'project_id' */
+ if (ctx->project_id == NULL) {
+ if (creds->project_id) {
+ /* flb_config_map_destroy uses the pointer within the config_map struct to
+ * free the value so if we assign it here it is safe to free later with the
+ * creds struct. If we do not we will leak here.
+ */
+ ctx->project_id = creds->project_id;
+ if (!ctx->project_id) {
+ flb_plg_error(ctx->ins,
+ "failed extracting 'project_id' from credentials.");
+ flb_chronicle_conf_destroy(ctx);
+ return NULL;
+ }
+ }
+ else {
+ flb_plg_error(ctx->ins,
+ "no 'project_id' configured or present in credentials.");
+ flb_chronicle_conf_destroy(ctx);
+ return NULL;
+ }
+ }
+
+ /* config: 'customer_id' */
+ if (ctx->customer_id == NULL) {
+ flb_plg_error(ctx->ins, "property 'customer_id' is not defined");
+ flb_chronicle_conf_destroy(ctx);
+ return NULL;
+ }
+
+ /* config: 'log_type' */
+ if (ctx->log_type == NULL) {
+ flb_plg_error(ctx->ins, "property 'log_type' is not defined");
+ flb_chronicle_conf_destroy(ctx);
+ return NULL;
+ }
+
+ /* Date key */
+ ctx->date_key = ctx->json_date_key;
+ tmp = flb_output_get_property("json_date_key", ins);
+ if (tmp) {
+ /* Just check if we have to disable it */
+ if (flb_utils_bool(tmp) == FLB_FALSE) {
+ ctx->date_key = NULL;
+ }
+ }
+
+ /* Date format for JSON output */
+ ctx->json_date_format = FLB_PACK_JSON_DATE_ISO8601;
+ tmp = flb_output_get_property("json_date_format", ins);
+ if (tmp) {
+ ret = flb_pack_to_json_date_type(tmp);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "invalid json_date_format '%s'. ", tmp);
+ return NULL;
+ }
+ else {
+ ctx->json_date_format = ret;
+ }
+ }
+
+ /* Create the target endpoint URI */
+ ctx->endpoint = flb_sds_create_size(sizeof(FLB_CHRONICLE_UNSTRUCTURED_ENDPOINT));
+ if (!ctx->endpoint) {
+ flb_errno();
+ flb_chronicle_conf_destroy(ctx);
+ return NULL;
+ }
+ ctx->endpoint = flb_sds_printf(&ctx->endpoint, FLB_CHRONICLE_UNSTRUCTURED_ENDPOINT);
+
+ /* Create the base URI */
+ if (ctx->region == NULL || strncasecmp(ctx->region, "US", 2) == 0) {
+ ctx->uri = flb_sds_create_size(sizeof(FLB_CHRONICLE_URL_BASE));
+ if (!ctx->uri) {
+ flb_errno();
+ flb_chronicle_conf_destroy(ctx);
+ return NULL;
+ }
+ ctx->uri = flb_sds_printf(&ctx->uri, FLB_CHRONICLE_URL_BASE);
+ }
+ else if (strncasecmp(ctx->region, "EU", 2) == 0){
+ ctx->uri = flb_sds_create_size(sizeof(FLB_CHRONICLE_URL_BASE_EU));
+ if (!ctx->uri) {
+ flb_errno();
+ flb_chronicle_conf_destroy(ctx);
+ return NULL;
+ }
+ ctx->uri = flb_sds_printf(&ctx->uri, FLB_CHRONICLE_URL_BASE_EU);
+ }
+ else if (strncasecmp(ctx->region, "UK", 2) == 0) {
+ ctx->uri = flb_sds_create_size(sizeof(FLB_CHRONICLE_URL_BASE_UK));
+ if (!ctx->uri) {
+ flb_errno();
+ flb_chronicle_conf_destroy(ctx);
+ return NULL;
+ }
+ ctx->uri = flb_sds_printf(&ctx->uri, FLB_CHRONICLE_URL_BASE_UK);
+ }
+ else if (strncasecmp(ctx->region, "ASIA", 4) == 0) {
+ ctx->uri = flb_sds_create_size(sizeof(FLB_CHRONICLE_URL_BASE_ASIA));
+ if (!ctx->uri) {
+ flb_errno();
+ flb_chronicle_conf_destroy(ctx);
+ return NULL;
+ }
+ ctx->uri = flb_sds_printf(&ctx->uri, FLB_CHRONICLE_URL_BASE_ASIA);
+ }
+ else {
+ flb_plg_error(ctx->ins, "unsupported region");
+ flb_chronicle_conf_destroy(ctx);
+ return NULL;
+ }
+ flb_plg_info(ctx->ins, "project='%s' custumer_id='%s' region='%s'",
+ ctx->project_id, ctx->customer_id, ctx->region);
+
+ return ctx;
+}
+
+
+int flb_chronicle_oauth_credentials_destroy(struct flb_chronicle_oauth_credentials *creds)
+{
+ if (!creds) {
+ return -1;
+ }
+ flb_sds_destroy(creds->type);
+ flb_sds_destroy(creds->project_id);
+ flb_sds_destroy(creds->private_key_id);
+ flb_sds_destroy(creds->private_key);
+ flb_sds_destroy(creds->client_email);
+ flb_sds_destroy(creds->client_id);
+ flb_sds_destroy(creds->auth_uri);
+ flb_sds_destroy(creds->token_uri);
+
+ flb_free(creds);
+
+ return 0;
+}
+
+int flb_chronicle_conf_destroy(struct flb_chronicle *ctx)
+{
+ if (!ctx) {
+ return -1;
+ }
+
+ flb_chronicle_oauth_credentials_destroy(ctx->oauth_credentials);
+
+ flb_sds_destroy(ctx->endpoint);
+ flb_sds_destroy(ctx->uri);
+
+ if (ctx->o) {
+ flb_oauth2_destroy(ctx->o);
+ }
+
+ flb_free(ctx);
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/out_chronicle/chronicle_conf.h b/src/fluent-bit/plugins/out_chronicle/chronicle_conf.h
new file mode 100644
index 000000000..76dcfb3d2
--- /dev/null
+++ b/src/fluent-bit/plugins/out_chronicle/chronicle_conf.h
@@ -0,0 +1,29 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2023 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_OUT_CHRONICLE_CONF_H
+#define FLB_OUT_CHRONICLE_CONF_H
+
+#include "chronicle.h"
+
+struct flb_chronicle *flb_chronicle_conf_create(struct flb_output_instance *ins,
+ struct flb_config *config);
+int flb_chronicle_conf_destroy(struct flb_chronicle *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/out_cloudwatch_logs/CMakeLists.txt b/src/fluent-bit/plugins/out_cloudwatch_logs/CMakeLists.txt
new file mode 100644
index 000000000..9e48217aa
--- /dev/null
+++ b/src/fluent-bit/plugins/out_cloudwatch_logs/CMakeLists.txt
@@ -0,0 +1,5 @@
+set(src
+ cloudwatch_logs.c
+ cloudwatch_api.c)
+
+FLB_PLUGIN(out_cloudwatch_logs "${src}" "")
diff --git a/src/fluent-bit/plugins/out_cloudwatch_logs/cloudwatch_api.c b/src/fluent-bit/plugins/out_cloudwatch_logs/cloudwatch_api.c
new file mode 100644
index 000000000..8043968cf
--- /dev/null
+++ b/src/fluent-bit/plugins/out_cloudwatch_logs/cloudwatch_api.c
@@ -0,0 +1,1564 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_compat.h>
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_output.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_slist.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_macros.h>
+#include <fluent-bit/flb_config_map.h>
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_log_event_decoder.h>
+
+#include <fluent-bit/flb_sds.h>
+#include <fluent-bit/flb_aws_credentials.h>
+#include <fluent-bit/flb_aws_util.h>
+#include <fluent-bit/flb_mem.h>
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_http_client.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_intermediate_metric.h>
+
+#include <monkey/mk_core.h>
+#include <msgpack.h>
+#include <string.h>
+#include <stdio.h>
+
+#ifndef FLB_SYSTEM_WINDOWS
+#include <unistd.h>
+#endif
+
+#include "cloudwatch_api.h"
+
+#define ERR_CODE_ALREADY_EXISTS "ResourceAlreadyExistsException"
+#define ERR_CODE_INVALID_SEQUENCE_TOKEN "InvalidSequenceTokenException"
+#define ERR_CODE_NOT_FOUND "ResourceNotFoundException"
+#define ERR_CODE_DATA_ALREADY_ACCEPTED "DataAlreadyAcceptedException"
+
+#define AMZN_REQUEST_ID_HEADER "x-amzn-RequestId"
+
+#define ONE_DAY_IN_MILLISECONDS 86400000
+#define FOUR_HOURS_IN_SECONDS 14400
+
+
+static struct flb_aws_header create_group_header = {
+ .key = "X-Amz-Target",
+ .key_len = 12,
+ .val = "Logs_20140328.CreateLogGroup",
+ .val_len = 28,
+};
+
+static struct flb_aws_header put_retention_policy_header = {
+ .key = "X-Amz-Target",
+ .key_len = 12,
+ .val = "Logs_20140328.PutRetentionPolicy",
+ .val_len = 32,
+};
+
+static struct flb_aws_header create_stream_header = {
+ .key = "X-Amz-Target",
+ .key_len = 12,
+ .val = "Logs_20140328.CreateLogStream",
+ .val_len = 29,
+};
+
+static struct flb_aws_header put_log_events_header[] = {
+ {
+ .key = "X-Amz-Target",
+ .key_len = 12,
+ .val = "Logs_20140328.PutLogEvents",
+ .val_len = 26,
+ },
+ {
+ .key = "x-amzn-logs-format",
+ .key_len = 18,
+ .val = "",
+ .val_len = 0,
+ },
+};
+
+int plugin_under_test()
+{
+ if (getenv("FLB_CLOUDWATCH_PLUGIN_UNDER_TEST") != NULL) {
+ return FLB_TRUE;
+ }
+
+ return FLB_FALSE;
+}
+
+char *mock_error_response(char *error_env_var)
+{
+ char *err_val = NULL;
+ char *error = NULL;
+ int len = 0;
+
+ err_val = getenv(error_env_var);
+ if (err_val != NULL && strlen(err_val) > 0) {
+ error = flb_malloc(strlen(err_val) + sizeof(char));
+ if (error == NULL) {
+ flb_errno();
+ return NULL;
+ }
+
+ len = strlen(err_val);
+ memcpy(error, err_val, len);
+ error[len] = '\0';
+ return error;
+ }
+
+ return NULL;
+}
+
+struct flb_http_client *mock_http_call(char *error_env_var, char *api)
+{
+ /* create an http client so that we can set the response */
+ struct flb_http_client *c = NULL;
+ char *error = mock_error_response(error_env_var);
+
+ c = flb_calloc(1, sizeof(struct flb_http_client));
+ if (!c) {
+ flb_errno();
+ flb_free(error);
+ return NULL;
+ }
+ mk_list_init(&c->headers);
+
+ if (error != NULL) {
+ c->resp.status = 400;
+ /* resp.data is freed on destroy, payload is supposed to reference it */
+ c->resp.data = error;
+ c->resp.payload = c->resp.data;
+ c->resp.payload_size = strlen(error);
+ }
+ else {
+ c->resp.status = 200;
+ c->resp.payload = "";
+ c->resp.payload_size = 0;
+ if (strcmp(api, "PutLogEvents") == 0) {
+ /* mocked success response */
+ c->resp.payload = "{\"nextSequenceToken\": \""
+ "49536701251539826331025683274032969384950891766572122113\"}";
+ c->resp.payload_size = strlen(c->resp.payload);
+ }
+ else {
+ c->resp.payload = "";
+ c->resp.payload_size = 0;
+ }
+ }
+
+ return c;
+}
+
+int compare_events(const void *a_arg, const void *b_arg)
+{
+ struct cw_event *r_a = (struct cw_event *) a_arg;
+ struct cw_event *r_b = (struct cw_event *) b_arg;
+
+ if (r_a->timestamp < r_b->timestamp) {
+ return -1;
+ }
+ else if (r_a->timestamp == r_b->timestamp) {
+ return 0;
+ }
+ else {
+ return 1;
+ }
+}
+
+static inline int try_to_write(char *buf, int *off, size_t left,
+ const char *str, size_t str_len)
+{
+ if (str_len <= 0){
+ str_len = strlen(str);
+ }
+ if (left <= *off+str_len) {
+ return FLB_FALSE;
+ }
+ memcpy(buf+*off, str, str_len);
+ *off += str_len;
+ return FLB_TRUE;
+}
+
+/*
+ * Writes the "header" for a put log events payload
+ */
+static int init_put_payload(struct flb_cloudwatch *ctx, struct cw_flush *buf,
+ struct log_stream *stream, int *offset)
+{
+ if (!try_to_write(buf->out_buf, offset, buf->out_buf_size,
+ "{\"logGroupName\":\"", 17)) {
+ goto error;
+ }
+
+ if (!try_to_write(buf->out_buf, offset, buf->out_buf_size,
+ stream->group, 0)) {
+ goto error;
+ }
+
+ if (!try_to_write(buf->out_buf, offset, buf->out_buf_size,
+ "\",\"logStreamName\":\"", 19)) {
+ goto error;
+ }
+
+ if (!try_to_write(buf->out_buf, offset, buf->out_buf_size,
+ stream->name, 0)) {
+ goto error;
+ }
+
+ if (!try_to_write(buf->out_buf, offset, buf->out_buf_size,
+ "\",", 2)) {
+ goto error;
+ }
+
+ if (stream->sequence_token) {
+ if (!try_to_write(buf->out_buf, offset, buf->out_buf_size,
+ "\"sequenceToken\":\"", 17)) {
+ goto error;
+ }
+
+ if (!try_to_write(buf->out_buf, offset, buf->out_buf_size,
+ stream->sequence_token, 0)) {
+ goto error;
+ }
+
+ if (!try_to_write(buf->out_buf, offset, buf->out_buf_size,
+ "\",", 2)) {
+ goto error;
+ }
+ }
+
+ if (!try_to_write(buf->out_buf, offset, buf->out_buf_size,
+ "\"logEvents\":[", 13)) {
+ goto error;
+ }
+
+ return 0;
+
+error:
+ return -1;
+}
+
+/*
+ * Writes a log event to the output buffer
+ */
+static int write_event(struct flb_cloudwatch *ctx, struct cw_flush *buf,
+ struct cw_event *event, int *offset)
+{
+ char ts[50];
+
+ if (!snprintf(ts, 50, "%llu", event->timestamp)) {
+ goto error;
+ }
+
+ if (!try_to_write(buf->out_buf, offset, buf->out_buf_size,
+ "{\"timestamp\":", 13)) {
+ goto error;
+ }
+
+ if (!try_to_write(buf->out_buf, offset, buf->out_buf_size,
+ ts, 0)) {
+ goto error;
+ }
+
+ if (!try_to_write(buf->out_buf, offset, buf->out_buf_size,
+ ",\"message\":\"", 12)) {
+ goto error;
+ }
+
+ if (!try_to_write(buf->out_buf, offset, buf->out_buf_size,
+ event->json, event->len)) {
+ goto error;
+ }
+
+ if (!try_to_write(buf->out_buf, offset, buf->out_buf_size,
+ "\"}", 2)) {
+ goto error;
+ }
+
+ return 0;
+
+error:
+ return -1;
+}
+
+/* Terminates a PutLogEvents payload */
+static int end_put_payload(struct flb_cloudwatch *ctx, struct cw_flush *buf,
+ int *offset)
+{
+ if (!try_to_write(buf->out_buf, offset, buf->out_buf_size,
+ "]}", 2)) {
+ return -1;
+ }
+ buf->out_buf[*offset] = '\0';
+
+ return 0;
+}
+
+static unsigned long long stream_time_span(struct log_stream *stream,
+ struct cw_event *event)
+{
+ if (stream->oldest_event == 0 || stream->newest_event == 0) {
+ return 0;
+ }
+
+ if (stream->oldest_event > event->timestamp) {
+ return stream->newest_event - event->timestamp;
+ }
+ else if (stream->newest_event < event->timestamp) {
+ return event->timestamp - stream->oldest_event;
+ }
+
+ return stream->newest_event - stream->oldest_event;
+}
+
+/* returns FLB_TRUE if time span is less than 24 hours, FLB_FALSE if greater */
+static int check_stream_time_span(struct log_stream *stream,
+ struct cw_event *event)
+{
+ unsigned long long span = stream_time_span(stream, event);
+
+ if (span < ONE_DAY_IN_MILLISECONDS) {
+ return FLB_TRUE;
+ }
+
+ return FLB_FALSE;
+}
+
+/* sets the oldest_event and newest_event fields */
+static void set_stream_time_span(struct log_stream *stream, struct cw_event *event)
+{
+ if (stream->oldest_event == 0 || stream->oldest_event > event->timestamp) {
+ stream->oldest_event = event->timestamp;
+ }
+
+ if (stream->newest_event == 0 || stream->newest_event < event->timestamp) {
+ stream->newest_event = event->timestamp;
+ }
+}
+
+/*
+ * Truncate log if needed. If truncated, only `written` is modified
+ * returns FLB_TRUE if truncated
+ */
+static int truncate_log(const struct flb_cloudwatch *ctx, const char *log_buffer,
+ size_t *written) {
+ size_t trailing_backslash_count = 0;
+
+ if (*written > MAX_EVENT_LEN) {
+ flb_plg_warn(ctx->ins, "[size=%zu] Truncating event which is larger than "
+ "max size allowed by CloudWatch", *written);
+ *written = MAX_EVENT_LEN;
+
+ /* remove trailing unescaped backslash if inadvertently synthesized */
+ while (trailing_backslash_count < *written &&
+ log_buffer[(*written - 1) - trailing_backslash_count] == '\\') {
+ trailing_backslash_count++;
+ }
+ if (trailing_backslash_count % 2 == 1) {
+ /* odd number of trailing backslashes, remove unpaired backslash */
+ (*written)--;
+ }
+ return FLB_TRUE;
+ }
+ return FLB_FALSE;
+}
+
+
+/*
+ * Processes the msgpack object
+ * -1 = failure, record not added
+ * 0 = success, record added
+ * 1 = we ran out of space, send and retry
+ * 2 = record could not be processed, discard it
+ * Returns 0 on success, -1 on general errors,
+ * and 1 if we ran out of space to write the event
+ * which means a send must occur
+ */
+int process_event(struct flb_cloudwatch *ctx, struct cw_flush *buf,
+ const msgpack_object *obj, struct flb_time *tms)
+{
+ size_t written;
+ int ret;
+ size_t size;
+ int offset = 0;
+ struct cw_event *event;
+ char *tmp_buf_ptr;
+
+ tmp_buf_ptr = buf->tmp_buf + buf->tmp_buf_offset;
+ ret = flb_msgpack_to_json(tmp_buf_ptr,
+ buf->tmp_buf_size - buf->tmp_buf_offset,
+ obj);
+ if (ret <= 0) {
+ /*
+ * failure to write to buffer,
+ * which means we ran out of space, and must send the logs
+ */
+ return 1;
+ }
+ written = (size_t) ret;
+ /* Discard empty messages (written == 2 means '""') */
+ if (written <= 2) {
+ flb_plg_debug(ctx->ins, "Found empty log message");
+ return 2;
+ }
+
+ /* the json string must be escaped, unless the log_key option is used */
+ if (ctx->log_key == NULL) {
+ /*
+ * check if event_buf is initialized and big enough
+ * If all chars need to be hex encoded (impossible), 6x space would be
+ * needed
+ */
+ size = written * 6;
+ if (buf->event_buf == NULL || buf->event_buf_size < size) {
+ flb_free(buf->event_buf);
+ buf->event_buf = flb_malloc(size);
+ buf->event_buf_size = size;
+ if (buf->event_buf == NULL) {
+ flb_errno();
+ return -1;
+ }
+ }
+ offset = 0;
+ if (!flb_utils_write_str(buf->event_buf, &offset, size,
+ tmp_buf_ptr, written)) {
+ return -1;
+ }
+ written = offset;
+
+ tmp_buf_ptr = buf->tmp_buf + buf->tmp_buf_offset;
+ if ((buf->tmp_buf_size - buf->tmp_buf_offset) < written) {
+ /* not enough space, send logs */
+ return 1;
+ }
+
+ /* truncate log, if needed */
+ truncate_log(ctx, buf->event_buf, &written);
+
+ /* copy serialized json to tmp_buf */
+ if (!strncpy(tmp_buf_ptr, buf->event_buf, written)) {
+ return -1;
+ }
+ }
+ else {
+ /*
+ * flb_msgpack_to_json will encase the value in quotes
+ * We don't want that for log_key, so we ignore the first
+ * and last character
+ */
+ written -= 2;
+ tmp_buf_ptr++; /* pass over the opening quote */
+ buf->tmp_buf_offset++; /* advance tmp_buf past opening quote */
+
+ /* truncate log, if needed */
+ truncate_log(ctx, tmp_buf_ptr, &written);
+ }
+
+ /* add log to events list */
+ buf->tmp_buf_offset += written;
+ event = &buf->events[buf->event_index];
+ event->json = tmp_buf_ptr;
+ event->len = written;
+ event->timestamp = (unsigned long long) (tms->tm.tv_sec * 1000ull +
+ tms->tm.tv_nsec/1000000);
+
+ return 0;
+}
+
+/* Resets or inits a cw_flush struct */
+void reset_flush_buf(struct flb_cloudwatch *ctx, struct cw_flush *buf) {
+ buf->event_index = 0;
+ buf->tmp_buf_offset = 0;
+ buf->event_index = 0;
+ buf->data_size = PUT_LOG_EVENTS_HEADER_LEN + PUT_LOG_EVENTS_FOOTER_LEN;
+ if (buf->current_stream != NULL) {
+ buf->data_size += strlen(buf->current_stream->name);
+ buf->data_size += strlen(buf->current_stream->group);
+ if (buf->current_stream->sequence_token) {
+ buf->data_size += strlen(buf->current_stream->sequence_token);
+ }
+ }
+}
+
+/* sorts events, constructs a put payload, and then sends */
+int send_log_events(struct flb_cloudwatch *ctx, struct cw_flush *buf) {
+ int ret;
+ int offset;
+ int i;
+ struct cw_event *event;
+
+ if (buf->event_index <= 0) {
+ return 0;
+ }
+
+ /* events must be sorted by timestamp in a put payload */
+ qsort(buf->events, buf->event_index, sizeof(struct cw_event), compare_events);
+
+retry:
+ buf->current_stream->newest_event = 0;
+ buf->current_stream->oldest_event = 0;
+
+ offset = 0;
+ ret = init_put_payload(ctx, buf, buf->current_stream, &offset);
+ if (ret < 0) {
+ flb_plg_error(ctx->ins, "Failed to initialize PutLogEvents payload");
+ return -1;
+ }
+
+ for (i = 0; i < buf->event_index; i++) {
+ event = &buf->events[i];
+ ret = write_event(ctx, buf, event, &offset);
+ if (ret < 0) {
+ flb_plg_error(ctx->ins, "Failed to write log event %d to "
+ "payload buffer", i);
+ return -1;
+ }
+ if (i != (buf->event_index - 1)) {
+ if (!try_to_write(buf->out_buf, &offset, buf->out_buf_size,
+ ",", 1)) {
+ flb_plg_error(ctx->ins, "Could not terminate log event with ','");
+ return -1;
+ }
+ }
+ }
+
+ ret = end_put_payload(ctx, buf, &offset);
+ if (ret < 0) {
+ flb_plg_error(ctx->ins, "Could not complete PutLogEvents payload");
+ return -1;
+ }
+
+ flb_plg_debug(ctx->ins, "cloudwatch:PutLogEvents: events=%d, payload=%d bytes", i, offset);
+ ret = put_log_events(ctx, buf, buf->current_stream, (size_t) offset);
+ if (ret < 0) {
+ flb_plg_error(ctx->ins, "Failed to send log events");
+ return -1;
+ }
+ else if (ret > 0) {
+ goto retry;
+ }
+
+ return 0;
+}
+
+ /*
+ * Processes the msgpack object, sends the current batch if needed
+ * -1 = failure, event not added
+ * 0 = success, event added
+ * 1 = event been skipped
+ * Returns 0 on success, -1 on general errors,
+ * and 1 if we found a empty event or a large event.
+ */
+int add_event(struct flb_cloudwatch *ctx, struct cw_flush *buf,
+ struct log_stream *stream,
+ const msgpack_object *obj, struct flb_time *tms)
+{
+ int ret;
+ struct cw_event *event;
+ int retry_add = FLB_FALSE;
+ int event_bytes = 0;
+
+ if (buf->event_index > 0 && buf->current_stream != stream) {
+ /* we already have events for a different stream, send them first */
+ retry_add = FLB_TRUE;
+ goto send;
+ }
+
+retry_add_event:
+ buf->current_stream = stream;
+ retry_add = FLB_FALSE;
+ if (buf->event_index == 0) {
+ /* init */
+ reset_flush_buf(ctx, buf);
+ }
+
+ ret = process_event(ctx, buf, obj, tms);
+ if (ret < 0) {
+ return -1;
+ }
+ else if (ret == 1) {
+ if (buf->event_index <= 0) {
+ /* somehow the record was larger than our entire request buffer */
+ flb_plg_warn(ctx->ins, "Discarding massive log record");
+ return 1; /* discard this record and return to caller */
+ }
+ /* send logs and then retry the add */
+ retry_add = FLB_TRUE;
+ goto send;
+ }
+ else if (ret == 2) {
+ /*
+ * discard this record and return to caller
+ * only happens for empty records in this plugin
+ */
+ return 1;
+ }
+
+ event = &buf->events[buf->event_index];
+ event_bytes = event->len + PUT_LOG_EVENTS_PER_EVENT_LEN;
+
+ if (check_stream_time_span(stream, event) == FLB_FALSE) {
+ /* do not send this event */
+ retry_add = FLB_TRUE;
+ goto send;
+ }
+
+ if ((buf->data_size + event_bytes) > PUT_LOG_EVENTS_PAYLOAD_SIZE) {
+ if (buf->event_index <= 0) {
+ /* somehow the record was larger than our entire request buffer */
+ flb_plg_warn(ctx->ins, "Discarding massive log record");
+ return 0; /* discard this record and return to caller */
+ }
+ /* do not send this event */
+ retry_add = FLB_TRUE;
+ goto send;
+ }
+
+ buf->data_size += event_bytes;
+ set_stream_time_span(stream, event);
+ buf->event_index++;
+
+ if (buf->event_index == MAX_EVENTS_PER_PUT) {
+ goto send;
+ }
+
+ /* send is not needed yet, return to caller */
+ return 0;
+
+send:
+ ret = send_log_events(ctx, buf);
+ reset_flush_buf(ctx, buf);
+ if (ret < 0) {
+ return -1;
+ }
+
+ if (retry_add == FLB_TRUE) {
+ goto retry_add_event;
+ }
+
+ return 0;
+}
+
+int should_add_to_emf(struct flb_intermediate_metric *an_item)
+{
+ /* Valid for cpu plugin */
+ if (strncmp(an_item->key.via.str.ptr, "cpu_", 4) == 0
+ || strncmp(an_item->key.via.str.ptr, "user_p", 6) == 0
+ || strncmp(an_item->key.via.str.ptr, "system_p", 8) == 0) {
+ return 1;
+ }
+
+ /* Valid for mem plugin */
+ if (strncmp(an_item->key.via.str.ptr, "Mem.total", 9) == 0
+ || strncmp(an_item->key.via.str.ptr, "Mem.used", 8) == 0
+ || strncmp(an_item->key.via.str.ptr, "Mem.free", 8) == 0
+ || strncmp(an_item->key.via.str.ptr, "Swap.total", 10) == 0
+ || strncmp(an_item->key.via.str.ptr, "Swap.used", 9) == 0
+ || strncmp(an_item->key.via.str.ptr, "Swap.free", 9) == 0) {
+ return 1;
+ }
+
+ return 0;
+}
+
+int pack_emf_payload(struct flb_cloudwatch *ctx,
+ struct mk_list *flb_intermediate_metrics,
+ const char *input_plugin,
+ struct flb_time tms,
+ msgpack_sbuffer *mp_sbuf,
+ msgpack_unpacked *mp_result,
+ msgpack_object *emf_payload)
+{
+ int total_items = mk_list_size(flb_intermediate_metrics) + 1;
+
+ struct mk_list *metric_temp;
+ struct mk_list *metric_head;
+ struct flb_intermediate_metric *an_item;
+ msgpack_unpack_return mp_ret;
+
+ /* Serialize values into the buffer using msgpack_sbuffer_write */
+ msgpack_packer mp_pck;
+ msgpack_packer_init(&mp_pck, mp_sbuf, msgpack_sbuffer_write);
+ msgpack_pack_map(&mp_pck, total_items);
+
+ /* Pack the _aws map */
+ msgpack_pack_str(&mp_pck, 4);
+ msgpack_pack_str_body(&mp_pck, "_aws", 4);
+
+ msgpack_pack_map(&mp_pck, 2);
+
+ msgpack_pack_str(&mp_pck, 9);
+ msgpack_pack_str_body(&mp_pck, "Timestamp", 9);
+ msgpack_pack_long_long(&mp_pck, tms.tm.tv_sec * 1000L);
+
+ msgpack_pack_str(&mp_pck, 17);
+ msgpack_pack_str_body(&mp_pck, "CloudWatchMetrics", 17);
+ msgpack_pack_array(&mp_pck, 1);
+
+ msgpack_pack_map(&mp_pck, 3);
+
+ msgpack_pack_str(&mp_pck, 9);
+ msgpack_pack_str_body(&mp_pck, "Namespace", 9);
+
+ if (ctx->metric_namespace) {
+ msgpack_pack_str(&mp_pck, flb_sds_len(ctx->metric_namespace));
+ msgpack_pack_str_body(&mp_pck, ctx->metric_namespace,
+ flb_sds_len(ctx->metric_namespace));
+ }
+ else {
+ msgpack_pack_str(&mp_pck, 18);
+ msgpack_pack_str_body(&mp_pck, "fluent-bit-metrics", 18);
+ }
+
+ msgpack_pack_str(&mp_pck, 10);
+ msgpack_pack_str_body(&mp_pck, "Dimensions", 10);
+
+ struct mk_list *head, *inner_head;
+ struct flb_split_entry *dimension_list, *entry;
+ struct mk_list *csv_values;
+ if (ctx->metric_dimensions) {
+ msgpack_pack_array(&mp_pck, mk_list_size(ctx->metric_dimensions));
+
+ mk_list_foreach(head, ctx->metric_dimensions) {
+ dimension_list = mk_list_entry(head, struct flb_split_entry, _head);
+ csv_values = flb_utils_split(dimension_list->value, ',', 256);
+ msgpack_pack_array(&mp_pck, mk_list_size(csv_values));
+
+ mk_list_foreach(inner_head, csv_values) {
+ entry = mk_list_entry(inner_head, struct flb_split_entry, _head);
+ msgpack_pack_str(&mp_pck, entry->len);
+ msgpack_pack_str_body(&mp_pck, entry->value, entry->len);
+ }
+ flb_utils_split_free(csv_values);
+ }
+ }
+ else {
+ msgpack_pack_array(&mp_pck, 0);
+ }
+
+ msgpack_pack_str(&mp_pck, 7);
+ msgpack_pack_str_body(&mp_pck, "Metrics", 7);
+
+ if (strcmp(input_plugin, "cpu") == 0) {
+ msgpack_pack_array(&mp_pck, 3);
+ }
+ else if (strcmp(input_plugin, "mem") == 0) {
+ msgpack_pack_array(&mp_pck, 6);
+ }
+ else {
+ msgpack_pack_array(&mp_pck, 0);
+ }
+
+ mk_list_foreach_safe(metric_head, metric_temp, flb_intermediate_metrics) {
+ an_item = mk_list_entry(metric_head, struct flb_intermediate_metric, _head);
+ if (should_add_to_emf(an_item) == 1) {
+ msgpack_pack_map(&mp_pck, 2);
+ msgpack_pack_str(&mp_pck, 4);
+ msgpack_pack_str_body(&mp_pck, "Name", 4);
+ msgpack_pack_object(&mp_pck, an_item->key);
+ msgpack_pack_str(&mp_pck, 4);
+ msgpack_pack_str_body(&mp_pck, "Unit", 4);
+ msgpack_pack_str(&mp_pck, strlen(an_item->metric_unit));
+ msgpack_pack_str_body(&mp_pck, an_item->metric_unit,
+ strlen(an_item->metric_unit));
+ }
+ }
+
+ /* Pack the metric values for each record */
+ mk_list_foreach_safe(metric_head, metric_temp, flb_intermediate_metrics) {
+ an_item = mk_list_entry(metric_head, struct flb_intermediate_metric, _head);
+ msgpack_pack_object(&mp_pck, an_item->key);
+ msgpack_pack_object(&mp_pck, an_item->value);
+ }
+
+ /*
+ * Deserialize the buffer into msgpack_object instance.
+ */
+
+ mp_ret = msgpack_unpack_next(mp_result, mp_sbuf->data, mp_sbuf->size, NULL);
+
+ if (mp_ret != MSGPACK_UNPACK_SUCCESS) {
+ flb_plg_error(ctx->ins, "msgpack_unpack returned non-success value %i", mp_ret);
+ return -1;
+ }
+
+ *emf_payload = mp_result->data;
+ return 0;
+}
+
+/*
+ * Main routine- processes msgpack and sends in batches which ignore the empty ones
+ * return value is the number of events processed and send.
+ */
+int process_and_send(struct flb_cloudwatch *ctx, const char *input_plugin,
+ struct cw_flush *buf, flb_sds_t tag,
+ const char *data, size_t bytes)
+{
+ int i = 0;
+ size_t map_size;
+ msgpack_object map;
+ msgpack_object_kv *kv;
+ msgpack_object key;
+ msgpack_object val;
+ msgpack_unpacked mp_emf_result;
+ msgpack_object emf_payload;
+ /* msgpack::sbuffer is a simple buffer implementation. */
+ msgpack_sbuffer mp_sbuf;
+
+ struct log_stream *stream;
+
+ char *key_str = NULL;
+ size_t key_str_size = 0;
+ int j;
+ int ret;
+ int check = FLB_FALSE;
+ int found = FLB_FALSE;
+
+ /* Added for EMF support */
+ struct flb_intermediate_metric *metric;
+ struct mk_list *tmp;
+ struct mk_list *head;
+ struct flb_intermediate_metric *an_item;
+
+ int intermediate_metric_type;
+ char *intermediate_metric_unit;
+ struct flb_log_event_decoder log_decoder;
+ struct flb_log_event log_event;
+
+ ret = flb_log_event_decoder_init(&log_decoder, (char *) data, bytes);
+
+ if (ret != FLB_EVENT_DECODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event decoder initialization error : %d", ret);
+
+ return -1;
+ }
+
+ if (strncmp(input_plugin, "cpu", 3) == 0) {
+ intermediate_metric_type = GAUGE;
+ intermediate_metric_unit = PERCENT;
+ }
+ else if (strncmp(input_plugin, "mem", 3) == 0) {
+ intermediate_metric_type = GAUGE;
+ intermediate_metric_unit = BYTES;
+ }
+
+ /* unpack msgpack */
+ while ((ret = flb_log_event_decoder_next(
+ &log_decoder,
+ &log_event)) == FLB_EVENT_DECODER_SUCCESS) {
+
+ /* Get the record/map */
+ map = *log_event.body;
+ map_size = map.via.map.size;
+
+ stream = get_log_stream(ctx, tag, map);
+ if (!stream) {
+ flb_plg_debug(ctx->ins, "Couldn't determine log group & stream for record with tag %s", tag);
+ goto error;
+ }
+
+ if (ctx->log_key) {
+ key_str = NULL;
+ key_str_size = 0;
+ check = FLB_FALSE;
+ found = FLB_FALSE;
+
+ kv = map.via.map.ptr;
+
+ for(j=0; j < map_size; j++) {
+ key = (kv+j)->key;
+ if (key.type == MSGPACK_OBJECT_BIN) {
+ key_str = (char *) key.via.bin.ptr;
+ key_str_size = key.via.bin.size;
+ check = FLB_TRUE;
+ }
+ if (key.type == MSGPACK_OBJECT_STR) {
+ key_str = (char *) key.via.str.ptr;
+ key_str_size = key.via.str.size;
+ check = FLB_TRUE;
+ }
+
+ if (check == FLB_TRUE) {
+ if (strncmp(ctx->log_key, key_str, key_str_size) == 0) {
+ found = FLB_TRUE;
+ val = (kv+j)->val;
+ ret = add_event(ctx, buf, stream, &val,
+ &log_event.timestamp);
+ if (ret < 0 ) {
+ goto error;
+ }
+ }
+ }
+
+ }
+ if (found == FLB_FALSE) {
+ flb_plg_error(ctx->ins, "Could not find log_key '%s' in record",
+ ctx->log_key);
+ }
+
+ if (ret == 0) {
+ i++;
+ }
+
+ continue;
+ }
+
+ if (strncmp(input_plugin, "cpu", 3) == 0
+ || strncmp(input_plugin, "mem", 3) == 0) {
+ /* Added for EMF support: Construct a list */
+ struct mk_list flb_intermediate_metrics;
+ mk_list_init(&flb_intermediate_metrics);
+
+ kv = map.via.map.ptr;
+
+ /*
+ * Iterate through the record map, extract intermediate metric data,
+ * and add to the list.
+ */
+ for (i = 0; i < map_size; i++) {
+ metric = flb_calloc(1, sizeof(struct flb_intermediate_metric));
+ if (!metric) {
+ goto error;
+ }
+
+ metric->key = (kv + i)->key;
+ metric->value = (kv + i)->val;
+ metric->metric_type = intermediate_metric_type;
+ metric->metric_unit = intermediate_metric_unit;
+ metric->timestamp = log_event.timestamp;
+
+ mk_list_add(&metric->_head, &flb_intermediate_metrics);
+
+ }
+
+ /* The msgpack object is only valid during the lifetime of the
+ * sbuffer & the unpacked result.
+ */
+ msgpack_sbuffer_init(&mp_sbuf);
+ msgpack_unpacked_init(&mp_emf_result);
+
+ ret = pack_emf_payload(ctx,
+ &flb_intermediate_metrics,
+ input_plugin,
+ log_event.timestamp,
+ &mp_sbuf,
+ &mp_emf_result,
+ &emf_payload);
+
+ /* free the intermediate metric list */
+
+ mk_list_foreach_safe(head, tmp, &flb_intermediate_metrics) {
+ an_item = mk_list_entry(head, struct flb_intermediate_metric, _head);
+ mk_list_del(&an_item->_head);
+ flb_free(an_item);
+ }
+
+ if (ret != 0) {
+ flb_plg_error(ctx->ins, "Failed to convert EMF metrics to msgpack object. ret=%i", ret);
+ msgpack_unpacked_destroy(&mp_emf_result);
+ msgpack_sbuffer_destroy(&mp_sbuf);
+ goto error;
+ }
+ ret = add_event(ctx, buf, stream, &emf_payload,
+ &log_event.timestamp);
+
+ msgpack_unpacked_destroy(&mp_emf_result);
+ msgpack_sbuffer_destroy(&mp_sbuf);
+
+ } else {
+ ret = add_event(ctx, buf, stream, &map,
+ &log_event.timestamp);
+ }
+
+ if (ret < 0 ) {
+ goto error;
+ }
+
+ if (ret == 0) {
+ i++;
+ }
+ }
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ /* send any remaining events */
+ ret = send_log_events(ctx, buf);
+ reset_flush_buf(ctx, buf);
+ if (ret < 0) {
+ return -1;
+ }
+
+ /* return number of events */
+ return i;
+
+error:
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ return -1;
+}
+
+struct log_stream *get_or_create_log_stream(struct flb_cloudwatch *ctx,
+ flb_sds_t stream_name,
+ flb_sds_t group_name)
+{
+ int ret;
+ struct log_stream *new_stream;
+ struct log_stream *stream;
+ struct mk_list *tmp;
+ struct mk_list *head;
+ time_t now;
+
+ /* check if the stream already exists */
+ now = time(NULL);
+ mk_list_foreach_safe(head, tmp, &ctx->streams) {
+ stream = mk_list_entry(head, struct log_stream, _head);
+ if (strcmp(stream_name, stream->name) == 0 && strcmp(group_name, stream->group) == 0) {
+ return stream;
+ }
+ else {
+ /* check if stream is expired, if so, clean it up */
+ if (stream->expiration < now) {
+ mk_list_del(&stream->_head);
+ log_stream_destroy(stream);
+ }
+ }
+ }
+
+ /* create the new stream */
+ new_stream = flb_calloc(1, sizeof(struct log_stream));
+ if (!new_stream) {
+ flb_errno();
+ return NULL;
+ }
+ new_stream->name = flb_sds_create(stream_name);
+ if (new_stream->name == NULL) {
+ flb_errno();
+ return NULL;
+ }
+ new_stream->group = flb_sds_create(group_name);
+ if (new_stream->group == NULL) {
+ flb_errno();
+ return NULL;
+ }
+
+ ret = create_log_stream(ctx, new_stream, FLB_TRUE);
+ if (ret < 0) {
+ log_stream_destroy(new_stream);
+ return NULL;
+ }
+ new_stream->expiration = time(NULL) + FOUR_HOURS_IN_SECONDS;
+
+ mk_list_add(&new_stream->_head, &ctx->streams);
+ return new_stream;
+}
+
+struct log_stream *get_log_stream(struct flb_cloudwatch *ctx, flb_sds_t tag,
+ const msgpack_object map)
+{
+ flb_sds_t group_name = NULL;
+ flb_sds_t stream_name = NULL;
+ flb_sds_t tmp_s = NULL;
+ int free_group = FLB_FALSE;
+ int free_stream = FLB_FALSE;
+ struct log_stream *stream;
+
+ /* templates take priority */
+ if (ctx->ra_stream) {
+ stream_name = flb_ra_translate_check(ctx->ra_stream, tag, flb_sds_len(tag),
+ map, NULL, FLB_TRUE);
+ }
+
+ if (ctx->ra_group) {
+ group_name = flb_ra_translate_check(ctx->ra_group, tag, flb_sds_len(tag),
+ map, NULL, FLB_TRUE);
+ }
+
+ if (stream_name == NULL) {
+ if (ctx->stream_name) {
+ stream_name = ctx->stream_name;
+ } else {
+ free_stream = FLB_TRUE;
+ /* use log_stream_prefix */
+ stream_name = flb_sds_create(ctx->log_stream_prefix);
+ if (!stream_name) {
+ flb_errno();
+ if (group_name) {
+ flb_sds_destroy(group_name);
+ }
+ return NULL;
+ }
+
+ tmp_s = flb_sds_cat(stream_name, tag, flb_sds_len(tag));
+ if (!tmp_s) {
+ flb_errno();
+ flb_sds_destroy(stream_name);
+ if (group_name) {
+ flb_sds_destroy(group_name);
+ }
+ return NULL;
+ }
+ stream_name = tmp_s;
+ }
+ } else {
+ free_stream = FLB_TRUE;
+ }
+
+ if (group_name == NULL) {
+ group_name = ctx->group_name;
+ } else {
+ free_group = FLB_TRUE;
+ }
+
+ flb_plg_debug(ctx->ins, "Using stream=%s, group=%s", stream_name, group_name);
+
+ stream = get_or_create_log_stream(ctx, stream_name, group_name);
+
+ if (free_group == FLB_TRUE) {
+ flb_sds_destroy(group_name);
+ }
+ if (free_stream == FLB_TRUE) {
+ flb_sds_destroy(stream_name);
+ }
+ return stream;
+}
+
+
+static int set_log_group_retention(struct flb_cloudwatch *ctx, struct log_stream *stream)
+{
+ if (ctx->log_retention_days <= 0) {
+ /* no need to set */
+ return 0;
+ }
+
+ struct flb_http_client *c = NULL;
+ struct flb_aws_client *cw_client;
+ flb_sds_t body;
+ flb_sds_t tmp;
+ flb_sds_t error;
+
+ flb_plg_info(ctx->ins, "Setting retention policy on log group %s to %dd", stream->group, ctx->log_retention_days);
+
+ body = flb_sds_create_size(68 + strlen(stream->group));
+ if (!body) {
+ flb_sds_destroy(body);
+ flb_errno();
+ return -1;
+ }
+
+ /* construct CreateLogGroup request body */
+ tmp = flb_sds_printf(&body, "{\"logGroupName\":\"%s\",\"retentionInDays\":%d}", stream->group, ctx->log_retention_days);
+ if (!tmp) {
+ flb_sds_destroy(body);
+ flb_errno();
+ return -1;
+ }
+ body = tmp;
+
+ if (plugin_under_test() == FLB_TRUE) {
+ c = mock_http_call("TEST_PUT_RETENTION_POLICY_ERROR", "PutRetentionPolicy");
+ }
+ else {
+ cw_client = ctx->cw_client;
+ c = cw_client->client_vtable->request(cw_client, FLB_HTTP_POST,
+ "/", body, strlen(body),
+ &put_retention_policy_header, 1);
+ }
+
+ if (c) {
+ flb_plg_debug(ctx->ins, "PutRetentionPolicy http status=%d", c->resp.status);
+
+ if (c->resp.status == 200) {
+ /* success */
+ flb_plg_info(ctx->ins, "Set retention policy to %d", ctx->log_retention_days);
+ flb_sds_destroy(body);
+ flb_http_client_destroy(c);
+ return 0;
+ }
+
+ /* Check error */
+ if (c->resp.payload_size > 0) {
+ error = flb_aws_error(c->resp.payload, c->resp.payload_size);
+ if (error != NULL) {
+ /* some other error occurred; notify user */
+ flb_aws_print_error(c->resp.payload, c->resp.payload_size,
+ "PutRetentionPolicy", ctx->ins);
+ flb_sds_destroy(error);
+ }
+ else {
+ /* error can not be parsed, print raw response to debug */
+ flb_plg_debug(ctx->ins, "Raw response: %s", c->resp.payload);
+ }
+ }
+ }
+
+ flb_plg_error(ctx->ins, "Failed to putRetentionPolicy");
+ if (c) {
+ flb_http_client_destroy(c);
+ }
+ flb_sds_destroy(body);
+
+ return -1;
+}
+
+int create_log_group(struct flb_cloudwatch *ctx, struct log_stream *stream)
+{
+ struct flb_http_client *c = NULL;
+ struct flb_aws_client *cw_client;
+ flb_sds_t body;
+ flb_sds_t tmp;
+ flb_sds_t error;
+ int ret;
+
+ flb_plg_info(ctx->ins, "Creating log group %s", stream->group);
+
+ body = flb_sds_create_size(25 + strlen(stream->group));
+ if (!body) {
+ flb_sds_destroy(body);
+ flb_errno();
+ return -1;
+ }
+
+ /* construct CreateLogGroup request body */
+ tmp = flb_sds_printf(&body, "{\"logGroupName\":\"%s\"}", stream->group);
+ if (!tmp) {
+ flb_sds_destroy(body);
+ flb_errno();
+ return -1;
+ }
+ body = tmp;
+
+ if (plugin_under_test() == FLB_TRUE) {
+ c = mock_http_call("TEST_CREATE_LOG_GROUP_ERROR", "CreateLogGroup");
+ }
+ else {
+ cw_client = ctx->cw_client;
+ c = cw_client->client_vtable->request(cw_client, FLB_HTTP_POST,
+ "/", body, strlen(body),
+ &create_group_header, 1);
+ }
+
+ if (c) {
+ flb_plg_debug(ctx->ins, "CreateLogGroup http status=%d", c->resp.status);
+
+ if (c->resp.status == 200) {
+ /* success */
+ flb_plg_info(ctx->ins, "Created log group %s", stream->group);
+ flb_sds_destroy(body);
+ flb_http_client_destroy(c);
+ ret = set_log_group_retention(ctx, stream);
+ return ret;
+ }
+
+ /* Check error */
+ if (c->resp.payload_size > 0) {
+ error = flb_aws_error(c->resp.payload, c->resp.payload_size);
+ if (error != NULL) {
+ if (strcmp(error, ERR_CODE_ALREADY_EXISTS) == 0) {
+ flb_plg_info(ctx->ins, "Log Group %s already exists",
+ stream->group);
+ flb_sds_destroy(body);
+ flb_sds_destroy(error);
+ flb_http_client_destroy(c);
+ ret = set_log_group_retention(ctx, stream);
+ return ret;
+ }
+ /* some other error occurred; notify user */
+ flb_aws_print_error(c->resp.payload, c->resp.payload_size,
+ "CreateLogGroup", ctx->ins);
+ flb_sds_destroy(error);
+ }
+ else {
+ /* error can not be parsed, print raw response to debug */
+ flb_plg_debug(ctx->ins, "Raw response: %s", c->resp.payload);
+ }
+ }
+ }
+
+ flb_plg_error(ctx->ins, "Failed to create log group");
+ if (c) {
+ flb_http_client_destroy(c);
+ }
+ flb_sds_destroy(body);
+ return -1;
+}
+
+int create_log_stream(struct flb_cloudwatch *ctx, struct log_stream *stream,
+ int can_retry)
+{
+
+ struct flb_http_client *c = NULL;
+ struct flb_aws_client *cw_client;
+ flb_sds_t body;
+ flb_sds_t tmp;
+ flb_sds_t error;
+ int ret;
+
+ flb_plg_info(ctx->ins, "Creating log stream %s in log group %s",
+ stream->name, stream->group);
+
+ body = flb_sds_create_size(50 + strlen(stream->group) +
+ strlen(stream->name));
+ if (!body) {
+ flb_sds_destroy(body);
+ flb_errno();
+ return -1;
+ }
+
+ /* construct CreateLogStream request body */
+ tmp = flb_sds_printf(&body,
+ "{\"logGroupName\":\"%s\",\"logStreamName\":\"%s\"}",
+ stream->group,
+ stream->name);
+ if (!tmp) {
+ flb_sds_destroy(body);
+ flb_errno();
+ return -1;
+ }
+ body = tmp;
+
+ cw_client = ctx->cw_client;
+ if (plugin_under_test() == FLB_TRUE) {
+ c = mock_http_call("TEST_CREATE_LOG_STREAM_ERROR", "CreateLogStream");
+ }
+ else {
+ c = cw_client->client_vtable->request(cw_client, FLB_HTTP_POST,
+ "/", body, strlen(body),
+ &create_stream_header, 1);
+ }
+
+ if (c) {
+ flb_plg_debug(ctx->ins,"CreateLogStream http status=%d",
+ c->resp.status);
+
+ if (c->resp.status == 200) {
+ /* success */
+ flb_plg_info(ctx->ins, "Created log stream %s", stream->name);
+ flb_sds_destroy(body);
+ flb_http_client_destroy(c);
+ return 0;
+ }
+
+ /* Check error */
+ if (c->resp.payload_size > 0) {
+ error = flb_aws_error(c->resp.payload, c->resp.payload_size);
+ if (error != NULL) {
+ if (strcmp(error, ERR_CODE_ALREADY_EXISTS) == 0) {
+ flb_plg_info(ctx->ins, "Log Stream %s already exists",
+ stream->name);
+ flb_sds_destroy(body);
+ flb_sds_destroy(error);
+ flb_http_client_destroy(c);
+ return 0;
+ }
+
+ if (strcmp(error, ERR_CODE_NOT_FOUND) == 0) {
+ flb_sds_destroy(body);
+ flb_sds_destroy(error);
+ flb_http_client_destroy(c);
+
+ if (ctx->create_group == FLB_TRUE) {
+ flb_plg_info(ctx->ins, "Log Group %s not found. Will attempt to create it.",
+ stream->group);
+ ret = create_log_group(ctx, stream);
+ if (ret < 0) {
+ return -1;
+ } else {
+ if (can_retry == FLB_TRUE) {
+ /* retry stream creation */
+ return create_log_stream(ctx, stream, FLB_FALSE);
+ } else {
+ /* we failed to create the stream */
+ return -1;
+ }
+ }
+ } else {
+ flb_plg_error(ctx->ins, "Log Group %s not found and `auto_create_group` disabled.",
+ stream->group);
+ }
+ return -1;
+ }
+ /* some other error occurred; notify user */
+ flb_aws_print_error(c->resp.payload, c->resp.payload_size,
+ "CreateLogStream", ctx->ins);
+ flb_sds_destroy(error);
+ }
+ else {
+ /* error can not be parsed, print raw response to debug */
+ flb_plg_debug(ctx->ins, "Raw response: %s", c->resp.payload);
+ }
+ }
+ }
+
+ flb_plg_error(ctx->ins, "Failed to create log stream");
+ if (c) {
+ flb_http_client_destroy(c);
+ }
+ flb_sds_destroy(body);
+ return -1;
+}
+
+/*
+ * Returns -1 on failure, 0 on success, and 1 for a sequence token error,
+ * which means the caller can retry.
+ */
+int put_log_events(struct flb_cloudwatch *ctx, struct cw_flush *buf,
+ struct log_stream *stream, size_t payload_size)
+{
+
+ struct flb_http_client *c = NULL;
+ struct flb_aws_client *cw_client;
+ flb_sds_t tmp;
+ flb_sds_t error;
+ int num_headers = 1;
+ int retry = FLB_TRUE;
+
+ flb_plg_debug(ctx->ins, "Sending log events to log stream %s", stream->name);
+
+ /* stream is being used, update expiration */
+ stream->expiration = time(NULL) + FOUR_HOURS_IN_SECONDS;
+
+ if (ctx->log_format != NULL) {
+ put_log_events_header[1].val = (char *) ctx->log_format;
+ put_log_events_header[1].val_len = strlen(ctx->log_format);
+ num_headers = 2;
+ }
+
+retry_request:
+ if (plugin_under_test() == FLB_TRUE) {
+ c = mock_http_call("TEST_PUT_LOG_EVENTS_ERROR", "PutLogEvents");
+ }
+ else {
+ cw_client = ctx->cw_client;
+ c = cw_client->client_vtable->request(cw_client, FLB_HTTP_POST,
+ "/", buf->out_buf, payload_size,
+ put_log_events_header, num_headers);
+ }
+
+ if (c) {
+ flb_plg_debug(ctx->ins, "PutLogEvents http status=%d", c->resp.status);
+
+ if (c->resp.status == 200) {
+ if (c->resp.data == NULL || c->resp.data_len == 0 || strstr(c->resp.data, AMZN_REQUEST_ID_HEADER) == NULL) {
+ /* code was 200, but response is invalid, treat as failure */
+ if (c->resp.data != NULL) {
+ flb_plg_debug(ctx->ins, "Could not find sequence token in "
+ "response: response body is empty: full data: `%.*s`", c->resp.data_len, c->resp.data);
+ }
+ flb_http_client_destroy(c);
+
+ if (retry == FLB_TRUE) {
+ flb_plg_debug(ctx->ins, "issuing immediate retry for invalid response");
+ retry = FLB_FALSE;
+ goto retry_request;
+ }
+ flb_plg_error(ctx->ins, "Recieved code 200 but response was invalid, %s header not found",
+ AMZN_REQUEST_ID_HEADER);
+ return -1;
+ }
+
+
+ /* success */
+ if (c->resp.payload_size > 0) {
+ flb_plg_debug(ctx->ins, "Sent events to %s", stream->name);
+ tmp = flb_json_get_val(c->resp.payload, c->resp.payload_size,
+ "nextSequenceToken");
+ if (tmp) {
+ if (stream->sequence_token != NULL) {
+ flb_sds_destroy(stream->sequence_token);
+ }
+ stream->sequence_token = tmp;
+
+ flb_http_client_destroy(c);
+ return 0;
+ }
+ else {
+ flb_plg_error(ctx->ins, "Could not find sequence token in "
+ "response: %s", c->resp.payload);
+ }
+ }
+
+ flb_http_client_destroy(c);
+ return 0;
+ }
+
+ /* Check error */
+ if (c->resp.payload_size > 0) {
+ error = flb_aws_error(c->resp.payload, c->resp.payload_size);
+ if (error != NULL) {
+ if (strcmp(error, ERR_CODE_INVALID_SEQUENCE_TOKEN) == 0) {
+ /*
+ * This case will happen when we do not know the correct
+ * sequence token; we can find it in the error response
+ * and retry.
+ */
+ flb_plg_debug(ctx->ins, "Sequence token was invalid, "
+ "will retry");
+ tmp = flb_json_get_val(c->resp.payload, c->resp.payload_size,
+ "expectedSequenceToken");
+ if (tmp) {
+ if (stream->sequence_token != NULL) {
+ flb_sds_destroy(stream->sequence_token);
+ }
+ stream->sequence_token = tmp;
+ flb_sds_destroy(error);
+ flb_http_client_destroy(c);
+ /* tell the caller to retry */
+ return 1;
+ }
+ } else if (strcmp(error, ERR_CODE_DATA_ALREADY_ACCEPTED) == 0) {
+ /* not sure what causes this but it counts as success */
+ flb_plg_info(ctx->ins, "Got %s, a previous retry must have succeeded asychronously", ERR_CODE_DATA_ALREADY_ACCEPTED);
+ flb_sds_destroy(error);
+ flb_http_client_destroy(c);
+ /* success */
+ return 0;
+ }
+ /* some other error occurred; notify user */
+ flb_aws_print_error(c->resp.payload, c->resp.payload_size,
+ "PutLogEvents", ctx->ins);
+ flb_sds_destroy(error);
+ }
+ else {
+ /* error could not be parsed, print raw response to debug */
+ flb_plg_debug(ctx->ins, "Raw response: %s", c->resp.payload);
+ }
+ }
+ }
+
+ flb_plg_error(ctx->ins, "Failed to send log events");
+ if (c) {
+ flb_http_client_destroy(c);
+ }
+ return -1;
+}
+
+
+void cw_flush_destroy(struct cw_flush *buf)
+{
+ if (buf) {
+ flb_free(buf->tmp_buf);
+ flb_free(buf->out_buf);
+ flb_free(buf->events);
+ flb_free(buf->event_buf);
+ flb_free(buf);
+ }
+}
diff --git a/src/fluent-bit/plugins/out_cloudwatch_logs/cloudwatch_api.h b/src/fluent-bit/plugins/out_cloudwatch_logs/cloudwatch_api.h
new file mode 100644
index 000000000..99919055b
--- /dev/null
+++ b/src/fluent-bit/plugins/out_cloudwatch_logs/cloudwatch_api.h
@@ -0,0 +1,57 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_OUT_CLOUDWATCH_API
+#define FLB_OUT_CLOUDWATCH_API
+
+/*
+ * The CloudWatch API documents that the maximum payload is 1,048,576 bytes
+ * For reasons that are under investigation, using that number in this plugin
+ * leads to API errors. No issues have been seen setting it to 1,000,000 bytes.
+ */
+#define PUT_LOG_EVENTS_PAYLOAD_SIZE 1048576
+#define MAX_EVENTS_PER_PUT 10000
+
+/* number of characters needed to 'start' a PutLogEvents payload */
+#define PUT_LOG_EVENTS_HEADER_LEN 72
+/* number of characters needed per event in a PutLogEvents payload */
+#define PUT_LOG_EVENTS_PER_EVENT_LEN 42
+/* number of characters needed to 'end' a PutLogEvents payload */
+#define PUT_LOG_EVENTS_FOOTER_LEN 4
+
+/* 256KiB minus 26 bytes for the event */
+#define MAX_EVENT_LEN 262118
+
+#include "cloudwatch_logs.h"
+
+void cw_flush_destroy(struct cw_flush *buf);
+
+int process_and_send(struct flb_cloudwatch *ctx, const char *input_plugin,
+ struct cw_flush *buf, flb_sds_t tag,
+ const char *data, size_t bytes);
+int create_log_stream(struct flb_cloudwatch *ctx, struct log_stream *stream, int can_retry);
+struct log_stream *get_log_stream(struct flb_cloudwatch *ctx, flb_sds_t tag,
+ const msgpack_object map);
+int put_log_events(struct flb_cloudwatch *ctx, struct cw_flush *buf,
+ struct log_stream *stream,
+ size_t payload_size);
+int create_log_group(struct flb_cloudwatch *ctx, struct log_stream *stream);
+int compare_events(const void *a_arg, const void *b_arg);
+
+#endif
diff --git a/src/fluent-bit/plugins/out_cloudwatch_logs/cloudwatch_logs.c b/src/fluent-bit/plugins/out_cloudwatch_logs/cloudwatch_logs.c
new file mode 100644
index 000000000..f6aef2240
--- /dev/null
+++ b/src/fluent-bit/plugins/out_cloudwatch_logs/cloudwatch_logs.c
@@ -0,0 +1,670 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_compat.h>
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_output.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_slist.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_config_map.h>
+#include <fluent-bit/flb_output_plugin.h>
+
+#include <fluent-bit/flb_sds.h>
+#include <fluent-bit/flb_aws_credentials.h>
+#include <fluent-bit/flb_aws_util.h>
+#include <fluent-bit/flb_mem.h>
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_http_client.h>
+#include <fluent-bit/flb_utils.h>
+
+#include <monkey/mk_core.h>
+#include <msgpack.h>
+#include <string.h>
+#include <stdio.h>
+
+#include "cloudwatch_logs.h"
+#include "cloudwatch_api.h"
+
+static struct flb_aws_header content_type_header = {
+ .key = "Content-Type",
+ .key_len = 12,
+ .val = "application/x-amz-json-1.1",
+ .val_len = 26,
+};
+
+static int cb_cloudwatch_init(struct flb_output_instance *ins,
+ struct flb_config *config, void *data)
+{
+ const char *tmp;
+ char *session_name = NULL;
+ struct flb_cloudwatch *ctx = NULL;
+ struct cw_flush *buf = NULL;
+ int ret;
+ flb_sds_t tmp_sds = NULL;
+ (void) config;
+ (void) data;
+
+ ctx = flb_calloc(1, sizeof(struct flb_cloudwatch));
+ if (!ctx) {
+ flb_errno();
+ return -1;
+ }
+
+ mk_list_init(&ctx->streams);
+
+ ctx->ins = ins;
+
+ /* Populate context with config map defaults and incoming properties */
+ ret = flb_output_config_map_set(ins, (void *) ctx);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "configuration error");
+ goto error;
+ }
+
+ tmp = flb_output_get_property("log_group_name", ins);
+ if (tmp) {
+ ctx->log_group = tmp;
+ ctx->group_name = flb_sds_create(tmp);
+ if (!ctx->group_name) {
+ flb_plg_error(ctx->ins, "Could not create log group context property");
+ goto error;
+ }
+ } else {
+ flb_plg_error(ctx->ins, "'log_group_name' is a required field");
+ goto error;
+ }
+
+ tmp = flb_output_get_property("log_stream_name", ins);
+ if (tmp) {
+ ctx->log_stream_name = tmp;
+ ctx->stream_name = flb_sds_create(tmp);
+ if (!ctx->stream_name) {
+ flb_plg_error(ctx->ins, "Could not create log group context property");
+ goto error;
+ }
+ }
+
+ tmp = flb_output_get_property("log_stream_prefix", ins);
+ if (tmp) {
+ ctx->log_stream_prefix = tmp;
+ }
+
+ if (!ctx->log_stream_name && !ctx->log_stream_prefix) {
+ flb_plg_error(ctx->ins, "Either 'log_stream_name' or 'log_stream_prefix'"
+ " is required");
+ goto error;
+ }
+
+ if (ctx->log_stream_name && ctx->log_stream_prefix) {
+ flb_plg_error(ctx->ins, "Either 'log_stream_name' or 'log_stream_prefix'"
+ " is required");
+ goto error;
+ }
+
+ tmp = flb_output_get_property("log_group_template", ins);
+ if (tmp) {
+ ctx->ra_group = flb_ra_create((char *) tmp, FLB_FALSE);
+ if (ctx->ra_group == NULL) {
+ flb_plg_error(ctx->ins, "Could not parse `log_group_template`");
+ goto error;
+ }
+ }
+
+ tmp = flb_output_get_property("log_stream_template", ins);
+ if (tmp) {
+ ctx->ra_stream = flb_ra_create((char *) tmp, FLB_FALSE);
+ if (ctx->ra_stream == NULL) {
+ flb_plg_error(ctx->ins, "Could not parse `log_stream_template`");
+ goto error;
+ }
+ }
+
+ tmp = flb_output_get_property("log_format", ins);
+ if (tmp) {
+ ctx->log_format = tmp;
+ }
+
+ tmp = flb_output_get_property("endpoint", ins);
+ if (tmp) {
+ ctx->custom_endpoint = FLB_TRUE;
+ ctx->endpoint = removeProtocol((char *) tmp, "https://");
+ }
+ else {
+ ctx->custom_endpoint = FLB_FALSE;
+ }
+
+ tmp = flb_output_get_property("log_key", ins);
+ if (tmp) {
+ ctx->log_key = tmp;
+ }
+
+ tmp = flb_output_get_property("extra_user_agent", ins);
+ if (tmp) {
+ ctx->extra_user_agent = tmp;
+ }
+
+ tmp = flb_output_get_property("region", ins);
+ if (tmp) {
+ ctx->region = tmp;
+ } else {
+ flb_plg_error(ctx->ins, "'region' is a required field");
+ goto error;
+ }
+
+ tmp = flb_output_get_property("metric_namespace", ins);
+ if (tmp)
+ {
+ flb_plg_info(ctx->ins, "Metric Namespace=%s", tmp);
+ ctx->metric_namespace = flb_sds_create(tmp);
+ }
+
+ tmp = flb_output_get_property("metric_dimensions", ins);
+ if (tmp)
+ {
+ flb_plg_info(ctx->ins, "Metric Dimensions=%s", tmp);
+ ctx->metric_dimensions = flb_utils_split(tmp, ';', 256);
+ }
+
+ ctx->create_group = FLB_FALSE;
+ tmp = flb_output_get_property("auto_create_group", ins);
+ if (tmp) {
+ ctx->create_group = flb_utils_bool(tmp);
+ }
+
+ ctx->retry_requests = FLB_TRUE;
+ tmp = flb_output_get_property("auto_retry_requests", ins);
+ /* native plugins use On/Off as bool, the old Go plugin used true/false */
+ if (tmp && (strcasecmp(tmp, "Off") == 0 || strcasecmp(tmp, "false") == 0)) {
+ ctx->retry_requests = FLB_FALSE;
+ }
+
+ ctx->log_retention_days = 0;
+ tmp = flb_output_get_property("log_retention_days", ins);
+ if (tmp) {
+ ctx->log_retention_days = atoi(tmp);
+ }
+
+ tmp = flb_output_get_property("role_arn", ins);
+ if (tmp) {
+ ctx->role_arn = tmp;
+ }
+
+ tmp = flb_output_get_property("sts_endpoint", ins);
+ if (tmp) {
+ ctx->sts_endpoint = (char *) tmp;
+ }
+
+ /* one tls instance for provider, one for cw client */
+ ctx->cred_tls = flb_tls_create(FLB_TLS_CLIENT_MODE,
+ FLB_TRUE,
+ ins->tls_debug,
+ ins->tls_vhost,
+ ins->tls_ca_path,
+ ins->tls_ca_file,
+ ins->tls_crt_file,
+ ins->tls_key_file,
+ ins->tls_key_passwd);
+
+ if (!ctx->cred_tls) {
+ flb_plg_error(ctx->ins, "Failed to create tls context");
+ goto error;
+ }
+
+ ctx->client_tls = flb_tls_create(FLB_TLS_CLIENT_MODE,
+ ins->tls_verify,
+ ins->tls_debug,
+ ins->tls_vhost,
+ ins->tls_ca_path,
+ ins->tls_ca_file,
+ ins->tls_crt_file,
+ ins->tls_key_file,
+ ins->tls_key_passwd);
+ if (!ctx->client_tls) {
+ flb_plg_error(ctx->ins, "Failed to create tls context");
+ goto error;
+ }
+
+ ctx->aws_provider = flb_standard_chain_provider_create(config,
+ ctx->cred_tls,
+ (char *) ctx->region,
+ (char *) ctx->sts_endpoint,
+ NULL,
+ flb_aws_client_generator(),
+ ctx->profile);
+ if (!ctx->aws_provider) {
+ flb_plg_error(ctx->ins, "Failed to create AWS Credential Provider");
+ goto error;
+ }
+
+ if(ctx->role_arn) {
+ /* set up sts assume role provider */
+ session_name = flb_sts_session_name();
+ if (!session_name) {
+ flb_plg_error(ctx->ins,
+ "Failed to generate random STS session name");
+ goto error;
+ }
+
+ /* STS provider needs yet another separate TLS instance */
+ ctx->sts_tls = flb_tls_create(FLB_TLS_CLIENT_MODE,
+ FLB_TRUE,
+ ins->tls_debug,
+ ins->tls_vhost,
+ ins->tls_ca_path,
+ ins->tls_ca_file,
+ ins->tls_crt_file,
+ ins->tls_key_file,
+ ins->tls_key_passwd);
+ if (!ctx->sts_tls) {
+ flb_errno();
+ goto error;
+ }
+
+ ctx->base_aws_provider = ctx->aws_provider;
+
+ ctx->aws_provider = flb_sts_provider_create(config,
+ ctx->sts_tls,
+ ctx->base_aws_provider,
+ (char *) ctx->external_id,
+ (char *) ctx->role_arn,
+ session_name,
+ (char *) ctx->region,
+ (char *) ctx->sts_endpoint,
+ NULL,
+ flb_aws_client_generator());
+ if (!ctx->aws_provider) {
+ flb_plg_error(ctx->ins,
+ "Failed to create AWS STS Credential Provider");
+ goto error;
+ }
+ /* session name can freed after provider is created */
+ flb_free(session_name);
+ session_name = NULL;
+ }
+
+ /* initialize credentials and set to sync mode */
+ ctx->aws_provider->provider_vtable->sync(ctx->aws_provider);
+ ctx->aws_provider->provider_vtable->init(ctx->aws_provider);
+ ctx->aws_provider->provider_vtable->upstream_set(ctx->aws_provider, ctx->ins);
+
+ if (ctx->endpoint == NULL) {
+ ctx->endpoint = flb_aws_endpoint("logs", (char *) ctx->region);
+ if (!ctx->endpoint) {
+ goto error;
+ }
+ }
+
+ struct flb_aws_client_generator *generator = flb_aws_client_generator();
+ ctx->cw_client = generator->create();
+ if (!ctx->cw_client) {
+ goto error;
+ }
+ ctx->cw_client->name = "cw_client";
+ ctx->cw_client->has_auth = FLB_TRUE;
+ ctx->cw_client->provider = ctx->aws_provider;
+ ctx->cw_client->region = (char *) ctx->region;
+ ctx->cw_client->service = "logs";
+ ctx->cw_client->port = (ins->host.port != 0) ? ins->host.port : 443;
+ ctx->cw_client->flags = (ins->use_tls) ? FLB_IO_TLS : FLB_IO_TCP;
+ ctx->cw_client->proxy = NULL;
+ ctx->cw_client->static_headers = &content_type_header;
+ ctx->cw_client->static_headers_len = 1;
+ tmp_sds = flb_sds_create(ctx->extra_user_agent);
+ if (!tmp_sds) {
+ flb_errno();
+ goto error;
+ }
+ ctx->cw_client->extra_user_agent = tmp_sds;
+ ctx->cw_client->retry_requests = ctx->retry_requests;
+
+ struct flb_upstream *upstream = flb_upstream_create(config, ctx->endpoint,
+ ctx->cw_client->port,
+ ctx->cw_client->flags,
+ ctx->client_tls);
+ if (!upstream) {
+ flb_plg_error(ctx->ins, "Connection initialization error");
+ goto error;
+ }
+
+ ctx->cw_client->upstream = upstream;
+ flb_output_upstream_set(upstream, ctx->ins);
+ ctx->cw_client->host = ctx->endpoint;
+
+ /* alloc the payload/processing buffer */
+ buf = flb_calloc(1, sizeof(struct cw_flush));
+ if (!buf) {
+ flb_errno();
+ goto error;
+ }
+
+ buf->out_buf = flb_malloc(PUT_LOG_EVENTS_PAYLOAD_SIZE);
+ if (!buf->out_buf) {
+ flb_errno();
+ cw_flush_destroy(buf);
+ goto error;
+ }
+ buf->out_buf_size = PUT_LOG_EVENTS_PAYLOAD_SIZE;
+
+ buf->tmp_buf = flb_malloc(sizeof(char) * PUT_LOG_EVENTS_PAYLOAD_SIZE);
+ if (!buf->tmp_buf) {
+ flb_errno();
+ cw_flush_destroy(buf);
+ goto error;
+ }
+ buf->tmp_buf_size = PUT_LOG_EVENTS_PAYLOAD_SIZE;
+
+ buf->events = flb_malloc(sizeof(struct cw_event) * MAX_EVENTS_PER_PUT);
+ if (!buf->events) {
+ flb_errno();
+ cw_flush_destroy(buf);
+ goto error;
+ }
+ buf->events_capacity = MAX_EVENTS_PER_PUT;
+
+ ctx->buf = buf;
+
+
+ /* Export context */
+ flb_output_set_context(ins, ctx);
+
+ return 0;
+
+error:
+ flb_free(session_name);
+ flb_plg_error(ctx->ins, "Initialization failed");
+ flb_cloudwatch_ctx_destroy(ctx);
+ return -1;
+}
+
+static void cb_cloudwatch_flush(struct flb_event_chunk *event_chunk,
+ struct flb_output_flush *out_flush,
+ struct flb_input_instance *i_ins,
+ void *out_context,
+ struct flb_config *config)
+{
+ struct flb_cloudwatch *ctx = out_context;
+ int event_count;
+ (void) i_ins;
+ (void) config;
+
+ event_count = process_and_send(ctx, i_ins->p->name, ctx->buf, event_chunk->tag,
+ event_chunk->data, event_chunk->size);
+ if (event_count < 0) {
+ flb_plg_error(ctx->ins, "Failed to send events");
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ // TODO: this msg is innaccurate if events are skipped
+ flb_plg_debug(ctx->ins, "Sent %d events to CloudWatch", event_count);
+
+ FLB_OUTPUT_RETURN(FLB_OK);
+}
+
+void flb_cloudwatch_ctx_destroy(struct flb_cloudwatch *ctx)
+{
+ struct log_stream *stream;
+ struct mk_list *tmp;
+ struct mk_list *head;
+
+ if (ctx != NULL) {
+ if (ctx->base_aws_provider) {
+ flb_aws_provider_destroy(ctx->base_aws_provider);
+ }
+
+ if (ctx->buf) {
+ cw_flush_destroy(ctx->buf);
+ }
+
+ if (ctx->aws_provider) {
+ flb_aws_provider_destroy(ctx->aws_provider);
+ }
+
+ if (ctx->cred_tls) {
+ flb_tls_destroy(ctx->cred_tls);
+ }
+
+ if (ctx->sts_tls) {
+ flb_tls_destroy(ctx->sts_tls);
+ }
+
+ if (ctx->client_tls) {
+ flb_tls_destroy(ctx->client_tls);
+ }
+
+ if (ctx->cw_client) {
+ flb_aws_client_destroy(ctx->cw_client);
+ }
+
+ if (ctx->custom_endpoint == FLB_FALSE) {
+ flb_free(ctx->endpoint);
+ }
+
+ if (ctx->ra_group) {
+ flb_ra_destroy(ctx->ra_group);
+ }
+
+ if (ctx->ra_stream) {
+ flb_ra_destroy(ctx->ra_stream);
+ }
+
+ if (ctx->group_name) {
+ flb_sds_destroy(ctx->group_name);
+ }
+
+ if (ctx->stream_name) {
+ flb_sds_destroy(ctx->stream_name);
+ }
+
+ mk_list_foreach_safe(head, tmp, &ctx->streams) {
+ stream = mk_list_entry(head, struct log_stream, _head);
+ mk_list_del(&stream->_head);
+ log_stream_destroy(stream);
+ }
+ flb_free(ctx);
+ }
+}
+
+static int cb_cloudwatch_exit(void *data, struct flb_config *config)
+{
+ struct flb_cloudwatch *ctx = data;
+
+ flb_cloudwatch_ctx_destroy(ctx);
+ return 0;
+}
+
+void log_stream_destroy(struct log_stream *stream)
+{
+ if (stream) {
+ if (stream->name) {
+ flb_sds_destroy(stream->name);
+ }
+ if (stream->sequence_token) {
+ flb_sds_destroy(stream->sequence_token);
+ }
+ if (stream->group) {
+ flb_sds_destroy(stream->group);
+ }
+ flb_free(stream);
+ }
+}
+
+/* Configuration properties map */
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_STR, "region", NULL,
+ 0, FLB_FALSE, 0,
+ "The AWS region to send logs to"
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "log_group_name", NULL,
+ 0, FLB_FALSE, 0,
+ "CloudWatch Log Group Name"
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "log_stream_name", NULL,
+ 0, FLB_FALSE, 0,
+ "CloudWatch Log Stream Name; not compatible with `log_stream_prefix`"
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "log_stream_prefix", NULL,
+ 0, FLB_FALSE, 0,
+ "Prefix for CloudWatch Log Stream Name; the tag is appended to the prefix"
+ " to form the stream name"
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "log_group_template", NULL,
+ 0, FLB_FALSE, 0,
+ "Template for CW Log Group name using record accessor syntax. "
+ "Plugin falls back to the log_group_name configured if needed."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "log_stream_template", NULL,
+ 0, FLB_FALSE, 0,
+ "Template for CW Log Stream name using record accessor syntax. "
+ "Plugin falls back to the log_stream_name or log_stream_prefix configured if needed."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "log_key", NULL,
+ 0, FLB_FALSE, 0,
+ "By default, the whole log record will be sent to CloudWatch. "
+ "If you specify a key name with this option, then only the value of "
+ "that key will be sent to CloudWatch. For example, if you are using "
+ "the Fluentd Docker log driver, you can specify log_key log and only "
+ "the log message will be sent to CloudWatch."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "extra_user_agent", NULL,
+ 0, FLB_FALSE, 0,
+ "This option appends a string to the default user agent. "
+ "AWS asks that you not manually set this field yourself, "
+ "it is reserved for use in our vended configurations, "
+ "for example, EKS Container Insights."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "log_format", NULL,
+ 0, FLB_FALSE, 0,
+ "An optional parameter that can be used to tell CloudWatch the format "
+ "of the data. A value of json/emf enables CloudWatch to extract custom "
+ "metrics embedded in a JSON payload."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "role_arn", NULL,
+ 0, FLB_FALSE, 0,
+ "ARN of an IAM role to assume (ex. for cross account access)."
+ },
+
+ {
+ FLB_CONFIG_MAP_BOOL, "auto_create_group", "false",
+ 0, FLB_FALSE, 0,
+ "Automatically create the log group (log streams will always automatically"
+ " be created)"
+ },
+
+ {
+ FLB_CONFIG_MAP_BOOL, "auto_retry_requests", "true",
+ 0, FLB_FALSE, 0,
+ "Immediately retry failed requests to AWS services once. This option "
+ "does not affect the normal Fluent Bit retry mechanism with backoff. "
+ "Instead, it enables an immediate retry with no delay for networking "
+ "errors, which may help improve throughput when there are transient/random "
+ "networking issues."
+ },
+
+ {
+ FLB_CONFIG_MAP_INT, "log_retention_days", "0",
+ 0, FLB_FALSE, 0,
+ "If set to a number greater than zero, and newly create log group's "
+ "retention policy is set to this many days. "
+ "Valid values are: [1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 3653]"
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "endpoint", NULL,
+ 0, FLB_FALSE, 0,
+ "Specify a custom endpoint for the CloudWatch Logs API"
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "sts_endpoint", NULL,
+ 0, FLB_FALSE, 0,
+ "Specify a custom endpoint for the STS API, can be used with the role_arn parameter"
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "external_id", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_cloudwatch, external_id),
+ "Specify an external ID for the STS API, can be used with the role_arn parameter if your role "
+ "requires an external ID."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "metric_namespace", NULL,
+ 0, FLB_FALSE, 0,
+ "Metric namespace for CloudWatch EMF logs"
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "metric_dimensions", NULL,
+ 0, FLB_FALSE, 0,
+ "Metric dimensions is a list of lists. If you have only one list of "
+ "dimensions, put the values as a comma seperated string. If you want to put "
+ "list of lists, use the list as semicolon seperated strings. If your value "
+ "is 'd1,d2;d3', we will consider it as [[d1, d2],[d3]]."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "profile", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_cloudwatch, profile),
+ "AWS Profile name. AWS Profiles can be configured with AWS CLI and are usually stored in "
+ "$HOME/.aws/ directory."
+ },
+
+ /* EOF */
+ {0}
+};
+
+/* Plugin registration */
+struct flb_output_plugin out_cloudwatch_logs_plugin = {
+ .name = "cloudwatch_logs",
+ .description = "Send logs to Amazon CloudWatch",
+ .cb_init = cb_cloudwatch_init,
+ .cb_flush = cb_cloudwatch_flush,
+ .cb_exit = cb_cloudwatch_exit,
+
+ /*
+ * Allow cloudwatch to use async network stack synchronously by opting into
+ * FLB_OUTPUT_SYNCHRONOUS synchronous task scheduler
+ */
+ .flags = FLB_OUTPUT_SYNCHRONOUS,
+ .workers = 1,
+
+ /* Configuration */
+ .config_map = config_map,
+};
diff --git a/src/fluent-bit/plugins/out_cloudwatch_logs/cloudwatch_logs.h b/src/fluent-bit/plugins/out_cloudwatch_logs/cloudwatch_logs.h
new file mode 100644
index 000000000..7fe8bf0b7
--- /dev/null
+++ b/src/fluent-bit/plugins/out_cloudwatch_logs/cloudwatch_logs.h
@@ -0,0 +1,158 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_OUT_CLOUDWATCH_LOGS_H
+#define FLB_OUT_CLOUDWATCH_LOGS_H
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_sds.h>
+#include <fluent-bit/flb_aws_credentials.h>
+#include <fluent-bit/flb_http_client.h>
+#include <fluent-bit/flb_aws_util.h>
+#include <fluent-bit/flb_signv4.h>
+
+#include <fluent-bit/flb_record_accessor.h>
+#include <fluent-bit/record_accessor/flb_ra_parser.h>
+
+/* buffers used for each flush */
+struct cw_flush {
+ /* temporary buffer for storing the serialized event messages */
+ char *tmp_buf;
+ size_t tmp_buf_size;
+ /* current index of tmp_buf */
+ size_t tmp_buf_offset;
+
+ /* projected final size of the payload for this flush */
+ size_t data_size;
+
+ /* log events- each of these has a pointer to their message in tmp_buf */
+ struct cw_event *events;
+ int events_capacity;
+ /* current event */
+ int event_index;
+
+ /* the payload of the API request */
+ char *out_buf;
+ size_t out_buf_size;
+
+ /* buffer used to temporarily hold an event during processing */
+ char *event_buf;
+ size_t event_buf_size;
+
+ /* current log stream that we are sending records too */
+ struct log_stream *current_stream;
+};
+
+struct cw_event {
+ char *json;
+ size_t len;
+ // TODO: re-usable in kinesis streams plugin if we make it timespec instead
+ // uint64_t?
+ unsigned long long timestamp;
+};
+
+struct log_stream {
+ flb_sds_t name;
+ flb_sds_t group;
+ flb_sds_t sequence_token;
+ /*
+ * log streams in CloudWatch do not expire; but our internal representations
+ * of them are periodically cleaned up if they have been unused for too long
+ */
+ time_t expiration;
+
+ /*
+ * Used to track the "time span" of a single PutLogEvents payload
+ * Which can not exceed 24 hours.
+ */
+ unsigned long long oldest_event;
+ unsigned long long newest_event;
+
+ struct mk_list _head;
+};
+
+void log_stream_destroy(struct log_stream *stream);
+
+struct flb_cloudwatch {
+ /*
+ * TLS instances can not be re-used. So we have one for:
+ * - Base cred provider (needed for EKS provider)
+ * - STS Assume role provider
+ * - The CloudWatch Logs client for this plugin
+ */
+ struct flb_tls *cred_tls;
+ struct flb_tls *sts_tls;
+ struct flb_tls *client_tls;
+ struct flb_aws_provider *aws_provider;
+ struct flb_aws_provider *base_aws_provider;
+ struct flb_aws_client *cw_client;
+
+ /* configuration options */
+ const char *log_stream_name;
+ const char *log_stream_prefix;
+ const char *log_group;
+ const char *region;
+ const char *sts_endpoint;
+ const char *log_format;
+ const char *role_arn;
+ const char *log_key;
+ const char *extra_user_agent;
+ const char *external_id;
+ const char *profile;
+ int custom_endpoint;
+ /* Should the plugin create the log group */
+ int create_group;
+
+ flb_sds_t group_name;
+ flb_sds_t stream_name;
+
+ /* Should requests to AWS services be retried */
+ int retry_requests;
+
+ /* If set to a number greater than zero, and newly create log group's retention policy is set to this many days. */
+ int log_retention_days;
+
+ /* must be freed on shutdown if custom_endpoint is not set */
+ char *endpoint;
+
+ /* templates */
+ struct flb_record_accessor *ra_group;
+ struct flb_record_accessor *ra_stream;
+
+ /* stores log streams we're putting to */
+ struct mk_list streams;
+
+ /* buffers for data processing and request payload */
+ struct cw_flush *buf;
+ /* The namespace to use for the metric */
+ flb_sds_t metric_namespace;
+
+ /* Metric dimensions is a list of lists. If you have only one list of
+ dimensions, put the values as a comma seperated string. If you want to put
+ list of lists, use the list as semicolon seperated strings. If your value
+ is 'd1,d2;d3', we will consider it as [[d1, d2],[d3]]*/
+ struct mk_list *metric_dimensions;
+
+ /* Plugin output instance reference */
+ struct flb_output_instance *ins;
+};
+
+void flb_cloudwatch_ctx_destroy(struct flb_cloudwatch *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/out_counter/CMakeLists.txt b/src/fluent-bit/plugins/out_counter/CMakeLists.txt
new file mode 100644
index 000000000..63f205699
--- /dev/null
+++ b/src/fluent-bit/plugins/out_counter/CMakeLists.txt
@@ -0,0 +1,4 @@
+set(src
+ counter.c)
+
+FLB_PLUGIN(out_counter "${src}" "")
diff --git a/src/fluent-bit/plugins/out_counter/counter.c b/src/fluent-bit/plugins/out_counter/counter.c
new file mode 100644
index 000000000..812b96782
--- /dev/null
+++ b/src/fluent-bit/plugins/out_counter/counter.c
@@ -0,0 +1,106 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_mp.h>
+
+#include <stdio.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+
+struct flb_counter_ctx {
+ uint64_t total;
+};
+
+static int cb_counter_init(struct flb_output_instance *ins,
+ struct flb_config *config,
+ void *data)
+{
+ (void) ins;
+ (void) config;
+ (void) data;
+ struct flb_counter_ctx *ctx;
+
+ ctx = flb_malloc(sizeof(struct flb_counter_ctx));
+ if (!ctx) {
+ flb_errno();
+ return -1;
+ }
+ ctx->total = 0;
+ flb_output_set_context(ins, ctx);
+ if (flb_output_config_map_set(ins, (void *)ctx) == -1) {
+ flb_plg_error(ins, "unable to load configuration");
+ flb_free(ctx);
+ return -1;
+ }
+
+ return 0;
+}
+
+static void cb_counter_flush(struct flb_event_chunk *event_chunk,
+ struct flb_output_flush *out_flush,
+ struct flb_input_instance *i_ins,
+ void *out_context,
+ struct flb_config *config)
+{
+ (void) i_ins;
+ (void) out_context;
+ (void) config;
+ size_t cnt;
+ struct flb_counter_ctx *ctx = out_context;
+ struct flb_time tm;
+
+ /* Count number of parent items */
+ cnt = flb_mp_count(event_chunk->data, event_chunk->size);
+ ctx->total += cnt;
+
+ flb_time_get(&tm);
+ printf("%f,%lu (total = %"PRIu64")\n", flb_time_to_double(&tm), cnt,
+ ctx->total);
+
+ FLB_OUTPUT_RETURN(FLB_OK);
+}
+
+static int cb_counter_exit(void *data, struct flb_config *config)
+{
+ struct flb_counter_ctx *ctx = data;
+
+ if (!ctx) {
+ return 0;
+ }
+
+ flb_free(ctx);
+ return 0;
+}
+
+static struct flb_config_map config_map[] = {
+ /* EOF */
+ {0}
+};
+
+struct flb_output_plugin out_counter_plugin = {
+ .name = "counter",
+ .description = "Records counter",
+ .cb_init = cb_counter_init,
+ .cb_flush = cb_counter_flush,
+ .cb_exit = cb_counter_exit,
+ .config_map = config_map,
+ .flags = 0,
+};
diff --git a/src/fluent-bit/plugins/out_datadog/CMakeLists.txt b/src/fluent-bit/plugins/out_datadog/CMakeLists.txt
new file mode 100644
index 000000000..6c32b3961
--- /dev/null
+++ b/src/fluent-bit/plugins/out_datadog/CMakeLists.txt
@@ -0,0 +1,6 @@
+set(src
+ datadog.c
+ datadog_conf.c
+ datadog_remap.c)
+
+FLB_PLUGIN(out_datadog "${src}" "")
diff --git a/src/fluent-bit/plugins/out_datadog/datadog.c b/src/fluent-bit/plugins/out_datadog/datadog.c
new file mode 100644
index 000000000..082ab0fac
--- /dev/null
+++ b/src/fluent-bit/plugins/out_datadog/datadog.c
@@ -0,0 +1,568 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_io.h>
+#include <fluent-bit/flb_log.h>
+#include <fluent-bit/flb_http_client.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_gzip.h>
+#include <fluent-bit/flb_config_map.h>
+#include <fluent-bit/flb_version.h>
+#include <fluent-bit/flb_log_event_decoder.h>
+
+#include <msgpack.h>
+
+#include "datadog.h"
+#include "datadog_conf.h"
+#include "datadog_remap.h"
+
+static int cb_datadog_init(struct flb_output_instance *ins,
+ struct flb_config *config, void *data)
+{
+ struct flb_out_datadog *ctx = NULL;
+ (void) data;
+
+ ctx = flb_datadog_conf_create(ins, config);
+ if (!ctx) {
+ return -1;
+ }
+
+ /* Set the plugin context */
+ flb_output_set_context(ins, ctx);
+ return 0;
+}
+
+static int64_t timestamp_format(const struct flb_time* tms) {
+ int64_t timestamp = 0;
+
+ /* Format the time, use milliseconds precision not nanoseconds */
+ timestamp = tms->tm.tv_sec * 1000;
+ timestamp += tms->tm.tv_nsec / 1000000;
+
+ /* round up if necessary */
+ if (tms->tm.tv_nsec % 1000000 >= 500000) {
+ ++timestamp;
+ }
+ return timestamp;
+}
+
+static void dd_msgpack_pack_key_value_str(msgpack_packer* mp_pck,
+ const char *key, size_t key_size,
+ const char *val, size_t val_size)
+{
+ msgpack_pack_str(mp_pck, key_size);
+ msgpack_pack_str_body(mp_pck, key, key_size);
+ msgpack_pack_str(mp_pck, val_size);
+ msgpack_pack_str_body(mp_pck,val, val_size);
+}
+
+static int dd_compare_msgpack_obj_key_with_str(const msgpack_object obj, const char *key, size_t key_size) {
+
+ if (obj.via.str.size == key_size && memcmp(obj.via.str.ptr,key, key_size) == 0) {
+ return FLB_TRUE;
+ }
+
+ return FLB_FALSE;
+}
+
+static int datadog_format(struct flb_config *config,
+ struct flb_input_instance *ins,
+ void *plugin_context,
+ void *flush_ctx,
+ int event_type,
+ const char *tag, int tag_len,
+ const void *data, size_t bytes,
+ void **out_data, size_t *out_size)
+{
+ int i;
+ int ind;
+ int byte_cnt = 64;
+ int remap_cnt;
+ int ret;
+ /* for msgpack global structs */
+ size_t array_size = 0;
+ msgpack_sbuffer mp_sbuf;
+ msgpack_packer mp_pck;
+ /* for sub msgpack objs */
+ int map_size;
+ int64_t timestamp;
+ msgpack_object map;
+ msgpack_object k;
+ msgpack_object v;
+ struct flb_out_datadog *ctx = plugin_context;
+ struct flb_event_chunk *event_chunk;
+
+ /* output buffer */
+ flb_sds_t out_buf;
+ flb_sds_t remapped_tags = NULL;
+ flb_sds_t tmp = NULL;
+ struct flb_log_event_decoder log_decoder;
+ struct flb_log_event log_event;
+
+ /* in normal flush callback we have the event_chunk set as flush context
+ * so we don't need to calculate the event len.
+ * But in test mode the formatter won't get the event_chunk as flush_ctx
+ */
+ if (flush_ctx != NULL) {
+ event_chunk = flush_ctx;
+ array_size = event_chunk->total_events;
+ } else {
+ array_size = flb_mp_count(data, bytes);
+ }
+
+ ret = flb_log_event_decoder_init(&log_decoder, (char *) data, bytes);
+
+ if (ret != FLB_EVENT_DECODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event decoder initialization error : %d", ret);
+
+ return -1;
+ }
+
+ /* Create temporary msgpack buffer */
+ msgpack_sbuffer_init(&mp_sbuf);
+ msgpack_packer_init(&mp_pck, &mp_sbuf, msgpack_sbuffer_write);
+
+ /* Prepare array for all entries */
+ msgpack_pack_array(&mp_pck, array_size);
+
+ while ((ret = flb_log_event_decoder_next(
+ &log_decoder,
+ &log_event)) == FLB_EVENT_DECODER_SUCCESS) {
+ timestamp = timestamp_format(&log_event.timestamp);
+
+ map = *log_event.body;
+ map_size = map.via.map.size;
+
+ /*
+ * msgpack requires knowing/allocating exact map size in advance, so we need to
+ * loop through the map twice. First time here to count how many attr we can
+ * remap to tags, and second time later where we actually perform the remapping.
+ */
+ remap_cnt = 0, byte_cnt = ctx->dd_tags ? flb_sds_len(ctx->dd_tags) : 0;
+ if (ctx->remap) {
+ for (i = 0; i < map_size; i++) {
+ if (dd_attr_need_remapping(map.via.map.ptr[i].key,
+ map.via.map.ptr[i].val) >= 0) {
+ remap_cnt++;
+ /*
+ * here we also *estimated* the size of buffer needed to hold the
+ * remapped tags. We can't know the size for sure until we do the
+ * remapping, the estimation here is just for efficiency, so that
+ * appending tags won't cause repeated resizing/copying
+ */
+ byte_cnt += 2 * (map.via.map.ptr[i].key.via.str.size +
+ map.via.map.ptr[i].val.via.str.size);
+ }
+ }
+
+ if (!remapped_tags) {
+ remapped_tags = flb_sds_create_size(byte_cnt);
+ if (!remapped_tags) {
+ flb_errno();
+ msgpack_sbuffer_destroy(&mp_sbuf);
+ flb_log_event_decoder_destroy(&log_decoder);
+ return -1;
+ }
+ }
+ else if (flb_sds_len(remapped_tags) < byte_cnt) {
+ tmp = flb_sds_increase(remapped_tags, byte_cnt - flb_sds_len(remapped_tags));
+ if (!tmp) {
+ flb_errno();
+ flb_sds_destroy(remapped_tags);
+ msgpack_sbuffer_destroy(&mp_sbuf);
+ flb_log_event_decoder_destroy(&log_decoder);
+ return -1;
+ }
+ remapped_tags = tmp;
+ }
+
+ /*
+ * we reuse this buffer across messages, which means we have to clear it
+ * for each message flb_sds doesn't have a clear function, so we copy a
+ * empty string to achieve the same effect
+ */
+ remapped_tags = flb_sds_copy(remapped_tags, "", 0);
+ }
+
+ /*
+ * build new object(map) with additional space for datadog entries for those
+ * remapped attributes, we need to remove them from the map. Note: If there were
+ * no dd_tags specified AND there will be remapped attributes, we need to add 1
+ * to account for the new presense of the dd_tags
+ */
+ if (remap_cnt && (ctx->dd_tags == NULL)) {
+ msgpack_pack_map(&mp_pck,
+ ctx->nb_additional_entries + map_size + 1 - remap_cnt);
+ }
+ else {
+ msgpack_pack_map(&mp_pck, ctx->nb_additional_entries + map_size - remap_cnt);
+ }
+
+ /* timestamp */
+ msgpack_pack_str(&mp_pck, flb_sds_len(ctx->json_date_key));
+ msgpack_pack_str_body(&mp_pck,
+ ctx->json_date_key,
+ flb_sds_len(ctx->json_date_key));
+ msgpack_pack_int64(&mp_pck, timestamp);
+
+ /* include_tag_key */
+ if (ctx->include_tag_key == FLB_TRUE) {
+ dd_msgpack_pack_key_value_str(&mp_pck,
+ ctx->tag_key, flb_sds_len(ctx->tag_key),
+ tag, tag_len);
+ }
+
+ /* dd_source */
+ if (ctx->dd_source != NULL) {
+ dd_msgpack_pack_key_value_str(&mp_pck,
+ FLB_DATADOG_DD_SOURCE_KEY,
+ sizeof(FLB_DATADOG_DD_SOURCE_KEY) -1,
+ ctx->dd_source, flb_sds_len(ctx->dd_source));
+ }
+
+ /* dd_service */
+ if (ctx->dd_service != NULL) {
+ dd_msgpack_pack_key_value_str(&mp_pck,
+ FLB_DATADOG_DD_SERVICE_KEY,
+ sizeof(FLB_DATADOG_DD_SERVICE_KEY) -1,
+ ctx->dd_service, flb_sds_len(ctx->dd_service));
+ }
+
+ /* Append initial object k/v */
+ ind = 0;
+ for (i = 0; i < map_size; i++) {
+ k = map.via.map.ptr[i].key;
+ v = map.via.map.ptr[i].val;
+
+ /*
+ * actually perform the remapping here. For matched attr, we remap and
+ * append them to remapped_tags buffer, then skip the rest of processing
+ * (so they won't be packed as attr)
+ */
+ if (ctx->remap && (ind = dd_attr_need_remapping(k, v)) >=0 ) {
+ ret = remapping[ind].remap_to_tag(remapping[ind].remap_tag_name, v,
+ &remapped_tags);
+ if (ret < 0) {
+ flb_plg_error(ctx->ins, "Failed to remap tag: %s, skipping", remapping[ind].remap_tag_name);
+ }
+ continue;
+ }
+
+ /* Mapping between input keys to specific datadog keys */
+ if (ctx->dd_message_key != NULL &&
+ dd_compare_msgpack_obj_key_with_str(k, ctx->dd_message_key,
+ flb_sds_len(ctx->dd_message_key)) == FLB_TRUE) {
+ msgpack_pack_str(&mp_pck, sizeof(FLB_DATADOG_DD_MESSAGE_KEY)-1);
+ msgpack_pack_str_body(&mp_pck, FLB_DATADOG_DD_MESSAGE_KEY,
+ sizeof(FLB_DATADOG_DD_MESSAGE_KEY)-1);
+ }
+ else {
+ msgpack_pack_object(&mp_pck, k);
+ }
+
+ msgpack_pack_object(&mp_pck, v);
+ }
+
+ /* here we concatenate ctx->dd_tags and remapped_tags, depending on their presence */
+ if (remap_cnt) {
+ if (ctx->dd_tags != NULL) {
+ tmp = flb_sds_cat(remapped_tags, FLB_DATADOG_TAG_SEPERATOR,
+ strlen(FLB_DATADOG_TAG_SEPERATOR));
+ if (!tmp) {
+ flb_errno();
+ flb_sds_destroy(remapped_tags);
+ msgpack_sbuffer_destroy(&mp_sbuf);
+ flb_log_event_decoder_destroy(&log_decoder);
+ return -1;
+ }
+ remapped_tags = tmp;
+ flb_sds_cat(remapped_tags, ctx->dd_tags, strlen(ctx->dd_tags));
+ if (!tmp) {
+ flb_errno();
+ flb_sds_destroy(remapped_tags);
+ msgpack_sbuffer_destroy(&mp_sbuf);
+ flb_log_event_decoder_destroy(&log_decoder);
+ return -1;
+ }
+ remapped_tags = tmp;
+ }
+ dd_msgpack_pack_key_value_str(&mp_pck,
+ FLB_DATADOG_DD_TAGS_KEY,
+ sizeof(FLB_DATADOG_DD_TAGS_KEY) -1,
+ remapped_tags, flb_sds_len(remapped_tags));
+ }
+ else if (ctx->dd_tags != NULL) {
+ dd_msgpack_pack_key_value_str(&mp_pck,
+ FLB_DATADOG_DD_TAGS_KEY,
+ sizeof(FLB_DATADOG_DD_TAGS_KEY) -1,
+ ctx->dd_tags, flb_sds_len(ctx->dd_tags));
+ }
+ }
+
+ /* Convert from msgpack to JSON */
+ out_buf = flb_msgpack_raw_to_json_sds(mp_sbuf.data, mp_sbuf.size);
+ msgpack_sbuffer_destroy(&mp_sbuf);
+
+ if (!out_buf) {
+ flb_plg_error(ctx->ins, "error formatting JSON payload");
+ if (remapped_tags) {
+ flb_sds_destroy(remapped_tags);
+ }
+ flb_log_event_decoder_destroy(&log_decoder);
+ return -1;
+ }
+
+ *out_data = out_buf;
+ *out_size = flb_sds_len(out_buf);
+
+ /* Cleanup */
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ if (remapped_tags) {
+ flb_sds_destroy(remapped_tags);
+ }
+
+ return 0;
+}
+
+static void cb_datadog_flush(struct flb_event_chunk *event_chunk,
+ struct flb_output_flush *out_flush,
+ struct flb_input_instance *i_ins,
+ void *out_context,
+ struct flb_config *config)
+{
+ struct flb_out_datadog *ctx = out_context;
+ struct flb_connection *upstream_conn;
+ struct flb_http_client *client;
+ void *out_buf;
+ size_t out_size;
+ flb_sds_t payload_buf;
+ size_t payload_size = 0;
+ void *final_payload_buf = NULL;
+ size_t final_payload_size = 0;
+ size_t b_sent;
+ int ret = FLB_ERROR;
+ int compressed = FLB_FALSE;
+
+ /* Get upstream connection */
+ upstream_conn = flb_upstream_conn_get(ctx->upstream);
+ if (!upstream_conn) {
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ /* Convert input data into a Datadog JSON payload */
+ ret = datadog_format(config, i_ins,
+ ctx, NULL,
+ event_chunk->type,
+ event_chunk->tag, flb_sds_len(event_chunk->tag),
+ event_chunk->data, event_chunk->size,
+ &out_buf, &out_size);
+ if (ret == -1) {
+ flb_upstream_conn_release(upstream_conn);
+ FLB_OUTPUT_RETURN(FLB_ERROR);
+ }
+
+ payload_buf = (flb_sds_t) out_buf;
+ payload_size = out_size;
+
+ /* Should we compress the payload ? */
+ if (ctx->compress_gzip == FLB_TRUE) {
+ ret = flb_gzip_compress((void *) payload_buf, payload_size,
+ &final_payload_buf, &final_payload_size);
+ if (ret == -1) {
+ flb_error("[out_http] cannot gzip payload, disabling compression");
+ } else {
+ compressed = FLB_TRUE;
+ }
+ } else {
+ final_payload_buf = payload_buf;
+ final_payload_size = payload_size;
+ }
+
+ /* Create HTTP client context */
+ client = flb_http_client(upstream_conn, FLB_HTTP_POST, ctx->uri,
+ final_payload_buf, final_payload_size,
+ ctx->host, ctx->port,
+ ctx->proxy, 0);
+ if (!client) {
+ flb_upstream_conn_release(upstream_conn);
+ FLB_OUTPUT_RETURN(FLB_ERROR);
+ }
+
+ /* Add the required headers to the URI */
+ flb_http_add_header(client, "User-Agent", 10, "Fluent-Bit", 10);
+ flb_http_add_header(client, FLB_DATADOG_API_HDR, sizeof(FLB_DATADOG_API_HDR) - 1, ctx->api_key, flb_sds_len(ctx->api_key));
+ flb_http_add_header(client, FLB_DATADOG_ORIGIN_HDR, sizeof(FLB_DATADOG_ORIGIN_HDR) - 1, "Fluent-Bit", 10);
+ flb_http_add_header(client, FLB_DATADOG_ORIGIN_VERSION_HDR, sizeof(FLB_DATADOG_ORIGIN_VERSION_HDR) - 1, FLB_VERSION_STR, sizeof(FLB_VERSION_STR) - 1);
+ flb_http_add_header(client,
+ FLB_DATADOG_CONTENT_TYPE, sizeof(FLB_DATADOG_CONTENT_TYPE) - 1,
+ FLB_DATADOG_MIME_JSON, sizeof(FLB_DATADOG_MIME_JSON) - 1);
+
+ /* Content Encoding: gzip */
+ if (compressed == FLB_TRUE) {
+ flb_http_set_content_encoding_gzip(client);
+ }
+ /* TODO: Append other headers if needed*/
+
+ /* finaly send the query */
+ ret = flb_http_do(client, &b_sent);
+ if (ret == 0) {
+ if (client->resp.status < 200 || client->resp.status > 205) {
+ flb_plg_error(ctx->ins, "%s%s:%i HTTP status=%i",
+ ctx->scheme, ctx->host, ctx->port,
+ client->resp.status);
+ ret = FLB_RETRY;
+ }
+ else {
+ if (client->resp.payload) {
+ flb_plg_debug(ctx->ins, "%s%s, port=%i, HTTP status=%i payload=%s",
+ ctx->scheme, ctx->host, ctx->port,
+ client->resp.status, client->resp.payload);
+ }
+ else {
+ flb_plg_debug(ctx->ins, "%s%s, port=%i, HTTP status=%i",
+ ctx->scheme, ctx->host, ctx->port,
+ client->resp.status);
+ }
+ ret = FLB_OK;
+ }
+ }
+ else {
+ flb_plg_error(ctx->ins, "could not flush records to %s:%i (http_do=%i)",
+ ctx->host, ctx->port, ret);
+ ret = FLB_RETRY;
+ }
+
+ /*
+ * If the final_payload_buf buffer is different than payload_buf, means
+ * we generated a different payload and must be freed.
+ */
+ if (final_payload_buf != payload_buf) {
+ flb_free(final_payload_buf);
+ }
+ /* Destroy HTTP client context */
+ flb_sds_destroy(payload_buf);
+ flb_http_client_destroy(client);
+ flb_upstream_conn_release(upstream_conn);
+
+ FLB_OUTPUT_RETURN(ret);
+}
+
+
+static int cb_datadog_exit(void *data, struct flb_config *config)
+{
+ struct flb_out_datadog *ctx = data;
+
+ if (!ctx) {
+ return 0;
+ }
+
+ flb_datadog_conf_destroy(ctx);
+ return 0;
+}
+
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_STR, "compress", "false",
+ 0, FLB_FALSE, 0,
+ "compresses the payload in GZIP format, "
+ "Datadog supports and recommends setting this to 'gzip'."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "apikey", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_out_datadog, api_key),
+ "Datadog API key"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "dd_service", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_out_datadog, dd_service),
+ "The human readable name for your service generating the logs "
+ "- the name of your application or database."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "dd_source", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_out_datadog, dd_source),
+ "A human readable name for the underlying technology of your service. "
+ "For example, 'postgres' or 'nginx'."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "dd_tags", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_out_datadog, dd_tags),
+ "The tags you want to assign to your logs in Datadog."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "proxy", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_out_datadog, proxy),
+ "Specify an HTTP Proxy. The expected format of this value is http://host:port. "
+ "Note that https is not supported yet."
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "include_tag_key", "false",
+ 0, FLB_TRUE, offsetof(struct flb_out_datadog, include_tag_key),
+ "If enabled, tag is appended to output. "
+ "The key name is used 'tag_key' property."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "tag_key", FLB_DATADOG_DEFAULT_TAG_KEY,
+ 0, FLB_TRUE, offsetof(struct flb_out_datadog, tag_key),
+ "The key name of tag. If 'include_tag_key' is false, "
+ "This property is ignored"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "dd_message_key", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_out_datadog, dd_message_key),
+ "By default, the plugin searches for the key 'log' "
+ "and remap the value to the key 'message'. "
+ "If the property is set, the plugin will search the property name key."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "provider", NULL,
+ 0, FLB_FALSE, 0,
+ "To activate the remapping, specify configuration flag provider with value 'ecs'"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "json_date_key", FLB_DATADOG_DEFAULT_TIME_KEY,
+ 0, FLB_TRUE, offsetof(struct flb_out_datadog, json_date_key),
+ "Date key name for output."
+ },
+
+ /* EOF */
+ {0}
+};
+
+struct flb_output_plugin out_datadog_plugin = {
+ .name = "datadog",
+ .description = "Send events to DataDog HTTP Event Collector",
+ .cb_init = cb_datadog_init,
+ .cb_flush = cb_datadog_flush,
+ .cb_exit = cb_datadog_exit,
+
+ /* Test */
+ .test_formatter.callback = datadog_format,
+
+ /* Config map */
+ .config_map = config_map,
+
+ /* Plugin flags */
+ .flags = FLB_OUTPUT_NET | FLB_IO_OPT_TLS,
+};
diff --git a/src/fluent-bit/plugins/out_datadog/datadog.h b/src/fluent-bit/plugins/out_datadog/datadog.h
new file mode 100644
index 000000000..1ca2d6f05
--- /dev/null
+++ b/src/fluent-bit/plugins/out_datadog/datadog.h
@@ -0,0 +1,81 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_OUT_DATADOG_H
+#define FLB_OUT_DATADOG_H
+
+#include <fluent-bit/flb_sds.h>
+#include <fluent-bit/flb_upstream.h>
+
+#define FLB_DATADOG_DEFAULT_HOST "http-intake.logs.datadoghq.com"
+#define FLB_DATADOG_DEFAULT_PORT 443
+#define FLB_DATADOG_DEFAULT_TIME_KEY "timestamp"
+#define FLB_DATADOG_DEFAULT_TAG_KEY "tagkey"
+#define FLB_DATADOG_DD_SOURCE_KEY "ddsource"
+#define FLB_DATADOG_DD_SERVICE_KEY "service"
+#define FLB_DATADOG_DD_TAGS_KEY "ddtags"
+#define FLB_DATADOG_DD_MESSAGE_KEY "message"
+#define FLB_DATADOG_DD_LOG_KEY "log"
+
+#define FLB_DATADOG_REMAP_PROVIDER "ecs"
+#define FLB_DATADOG_TAG_SEPERATOR ","
+
+#define FLB_DATADOG_API_HDR "DD-API-KEY"
+#define FLB_DATADOG_ORIGIN_HDR "DD-EVP-ORIGIN"
+#define FLB_DATADOG_ORIGIN_VERSION_HDR "DD-EVP-ORIGIN-VERSION"
+
+#define FLB_DATADOG_CONTENT_TYPE "Content-Type"
+#define FLB_DATADOG_MIME_JSON "application/json"
+
+struct flb_out_datadog {
+
+ /* Proxy */
+ flb_sds_t proxy;
+ char *proxy_host;
+ int proxy_port;
+
+ /* Configuration */
+ flb_sds_t scheme;
+ flb_sds_t host;
+ int port;
+ flb_sds_t uri;
+ flb_sds_t api_key;
+ int include_tag_key;
+ flb_sds_t tag_key;
+ bool remap;
+
+ /* final result */
+ flb_sds_t json_date_key;
+ int nb_additional_entries;
+ flb_sds_t dd_source;
+ flb_sds_t dd_service;
+ flb_sds_t dd_tags;
+ flb_sds_t dd_message_key;
+
+ /* Compression mode (gzip) */
+ int compress_gzip;
+
+ /* Upstream connection to the backend server */
+ struct flb_upstream *upstream;
+
+ /* Plugin instance reference */
+ struct flb_output_instance *ins;
+};
+
+#endif // FLB_OUT_DATADOG_H
diff --git a/src/fluent-bit/plugins/out_datadog/datadog_conf.c b/src/fluent-bit/plugins/out_datadog/datadog_conf.c
new file mode 100644
index 000000000..68377386c
--- /dev/null
+++ b/src/fluent-bit/plugins/out_datadog/datadog_conf.c
@@ -0,0 +1,223 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_config_map.h>
+
+#include "datadog.h"
+#include "datadog_conf.h"
+
+struct flb_out_datadog *flb_datadog_conf_create(struct flb_output_instance *ins,
+ struct flb_config *config)
+{
+ struct flb_out_datadog *ctx = NULL;
+ int io_flags = 0;
+ struct flb_upstream *upstream;
+ const char *api_key;
+ const char *tmp;
+ flb_sds_t tmp_sds;
+
+ int ret;
+ char *protocol = NULL;
+ char *host = NULL;
+ char *port = NULL;
+ char *uri = NULL;
+
+ /* Start resource creation */
+ ctx = flb_calloc(1, sizeof(struct flb_out_datadog));
+ if (!ctx) {
+ flb_errno();
+ return NULL;
+ }
+ ctx->ins = ins;
+ ctx->nb_additional_entries = 0;
+
+ ret = flb_output_config_map_set(ins, (void *) ctx);
+ if (ret == -1) {
+ flb_plg_error(ins, "flb_output_config_map_set failed");
+ flb_free(ctx);
+ return NULL;
+ }
+
+ tmp = flb_output_get_property("proxy", ins);
+ if (tmp) {
+ ret = flb_utils_url_split(tmp, &protocol, &host, &port, &uri);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "could not parse proxy parameter: '%s'", tmp);
+ flb_datadog_conf_destroy(ctx);
+ return NULL;
+ }
+
+ ctx->proxy_host = host;
+ ctx->proxy_port = atoi(port);
+ flb_free(protocol);
+ flb_free(port);
+ flb_free(uri);
+ }
+
+ /* use TLS ? */
+ if (ins->use_tls == FLB_TRUE) {
+ io_flags = FLB_IO_TLS;
+ tmp_sds = flb_sds_create("https://");
+ }
+ else {
+ io_flags = FLB_IO_TCP;
+ tmp_sds = flb_sds_create("http://");
+ }
+ if (!tmp_sds) {
+ flb_errno();
+ flb_datadog_conf_destroy(ctx);
+ return NULL;
+ }
+ ctx->scheme = tmp_sds;
+ flb_plg_debug(ctx->ins, "scheme: %s", ctx->scheme);
+
+ /* configure URI */
+ api_key = flb_output_get_property("apikey", ins);
+ if (api_key == NULL) {
+ flb_plg_error(ctx->ins, "no ApiKey configuration key defined");
+ flb_datadog_conf_destroy(ctx);
+ return NULL;
+ }
+
+ /* Tag Key */
+ if (ctx->include_tag_key == FLB_TRUE) {
+ ctx->nb_additional_entries++;
+ }
+
+ tmp = flb_output_get_property("dd_source", ins);
+ if (tmp) {
+ ctx->nb_additional_entries++;
+ }
+
+ tmp = flb_output_get_property("dd_service", ins);
+ if (tmp) {
+ ctx->nb_additional_entries++;
+ }
+
+ tmp = flb_output_get_property("dd_tags", ins);
+ if (tmp) {
+ ctx->nb_additional_entries++;
+ }
+
+ tmp = flb_output_get_property("provider", ins);
+ ctx->remap = tmp && (strlen(tmp) == strlen(FLB_DATADOG_REMAP_PROVIDER)) && \
+ (strncmp(tmp, FLB_DATADOG_REMAP_PROVIDER, strlen(tmp)) == 0);
+
+ ctx->uri = flb_sds_create("/api/v2/logs");
+ if (!ctx->uri) {
+ flb_plg_error(ctx->ins, "error on uri generation");
+ flb_datadog_conf_destroy(ctx);
+ return NULL;
+ }
+
+ flb_plg_debug(ctx->ins, "uri: %s", ctx->uri);
+
+ /* Get network configuration */
+ if (!ins->host.name) {
+ tmp_sds = flb_sds_create(FLB_DATADOG_DEFAULT_HOST);
+ }
+ else {
+ tmp_sds = flb_sds_create(ins->host.name);
+ }
+ if (!tmp_sds) {
+ flb_errno();
+ flb_datadog_conf_destroy(ctx);
+ return NULL;
+ }
+ ctx->host = tmp_sds;
+ flb_plg_debug(ctx->ins, "host: %s", ctx->host);
+
+ if (ins->host.port != 0) {
+ ctx->port = ins->host.port;
+ }
+ if (ctx->port == 0) {
+ ctx->port = FLB_DATADOG_DEFAULT_PORT;
+ if (ins->use_tls == FLB_FALSE) {
+ ctx->port = 80;
+ }
+ }
+ flb_plg_debug(ctx->ins, "port: %i", ctx->port);
+
+ /* Date tag for JSON output */
+ ctx->nb_additional_entries++;
+ flb_plg_debug(ctx->ins, "json_date_key: %s", ctx->json_date_key);
+
+ /* Compress (gzip) */
+ tmp = flb_output_get_property("compress", ins);
+ ctx->compress_gzip = FLB_FALSE;
+ if (tmp) {
+ if (strcasecmp(tmp, "gzip") == 0) {
+ ctx->compress_gzip = FLB_TRUE;
+ }
+ }
+ flb_plg_debug(ctx->ins, "compress_gzip: %i", ctx->compress_gzip);
+
+ /* Prepare an upstream handler */
+ if (ctx->proxy) {
+ flb_plg_trace(ctx->ins, "[out_datadog] Upstream Proxy=%s:%i",
+ ctx->proxy_host, ctx->proxy_port);
+ upstream = flb_upstream_create(config,
+ ctx->proxy_host,
+ ctx->proxy_port,
+ io_flags,
+ ins->tls);
+ }
+ else {
+ upstream = flb_upstream_create(config, ctx->host, ctx->port, io_flags, ins->tls);
+ }
+
+ if (!upstream) {
+ flb_plg_error(ctx->ins, "cannot create Upstream context");
+ flb_datadog_conf_destroy(ctx);
+ return NULL;
+ }
+ ctx->upstream = upstream;
+ flb_output_upstream_set(ctx->upstream, ins);
+
+ return ctx;
+}
+
+int flb_datadog_conf_destroy(struct flb_out_datadog *ctx)
+{
+ if (!ctx) {
+ return -1;
+ }
+
+ if (ctx->proxy_host) {
+ flb_free(ctx->proxy_host);
+ }
+ if (ctx->scheme) {
+ flb_sds_destroy(ctx->scheme);
+ }
+ if (ctx->host) {
+ flb_sds_destroy(ctx->host);
+ }
+ if (ctx->uri) {
+ flb_sds_destroy(ctx->uri);
+ }
+ if (ctx->upstream) {
+ flb_upstream_destroy(ctx->upstream);
+ }
+ flb_free(ctx);
+
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/out_datadog/datadog_conf.h b/src/fluent-bit/plugins/out_datadog/datadog_conf.h
new file mode 100644
index 000000000..057a5e5f2
--- /dev/null
+++ b/src/fluent-bit/plugins/out_datadog/datadog_conf.h
@@ -0,0 +1,33 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_OUT_DATADOG_CONF_H
+#define FLB_OUT_DATADOG_CONF_H
+
+#include <fluent-bit/flb_output.h>
+#include <fluent-bit/flb_config.h>
+
+#include "datadog.h"
+
+struct flb_out_datadog *flb_datadog_conf_create(struct flb_output_instance *ins,
+ struct flb_config *config);
+
+int flb_datadog_conf_destroy(struct flb_out_datadog *ctx);
+
+#endif // FLB_OUT_DATADOG_CONF_H
diff --git a/src/fluent-bit/plugins/out_datadog/datadog_remap.c b/src/fluent-bit/plugins/out_datadog/datadog_remap.c
new file mode 100644
index 000000000..7599a8f80
--- /dev/null
+++ b/src/fluent-bit/plugins/out_datadog/datadog_remap.c
@@ -0,0 +1,277 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_pack.h>
+
+#include "datadog.h"
+#include "datadog_remap.h"
+
+const char *ECS_ARN_PREFIX = "arn:aws:ecs:";
+const char *ECS_CLUSTER_PREFIX = "cluster/";
+const char *ECS_TASK_PREFIX = "task/";
+
+static int dd_remap_append_kv_to_ddtags(const char *key,
+ const char *val, size_t val_len, flb_sds_t *dd_tags_buf)
+{
+ flb_sds_t tmp;
+
+ if (flb_sds_len(*dd_tags_buf) != 0) {
+ tmp = flb_sds_cat(*dd_tags_buf, FLB_DATADOG_TAG_SEPERATOR, strlen(FLB_DATADOG_TAG_SEPERATOR));
+ if (!tmp) {
+ flb_errno();
+ return -1;
+ }
+ *dd_tags_buf = tmp;
+ }
+
+ tmp = flb_sds_cat(*dd_tags_buf, key, strlen(key));
+ if (!tmp) {
+ flb_errno();
+ return -1;
+ }
+ *dd_tags_buf = tmp;
+
+ tmp = flb_sds_cat(*dd_tags_buf, ":", 1);
+ if (!tmp) {
+ flb_errno();
+ return -1;
+ }
+ *dd_tags_buf = tmp;
+
+ tmp = flb_sds_cat(*dd_tags_buf, val, val_len);
+ if (!tmp) {
+ flb_errno();
+ return -1;
+ }
+ *dd_tags_buf = tmp;
+
+ return 0;
+}
+
+/* default remapping: just move the key/val pair under dd_tags */
+static int dd_remap_move_to_tags(const char *tag_name,
+ msgpack_object attr_value, flb_sds_t *dd_tags_buf)
+{
+ return dd_remap_append_kv_to_ddtags(tag_name, attr_value.via.str.ptr,
+ attr_value.via.str.size, dd_tags_buf);
+}
+
+/* remapping function for container_name */
+static int dd_remap_container_name(const char *tag_name,
+ msgpack_object attr_value, flb_sds_t *dd_tags_buf)
+{
+ /* remove the first / if present */
+ unsigned int adjust;
+ flb_sds_t buf = NULL;
+ int ret;
+
+ adjust = attr_value.via.str.ptr[0] == '/' ? 1 : 0;
+ buf = flb_sds_create_len(attr_value.via.str.ptr + adjust,
+ attr_value.via.str.size - adjust);
+ if (!buf) {
+ flb_errno();
+ return -1;
+ }
+ ret = dd_remap_append_kv_to_ddtags(tag_name, buf, strlen(buf), dd_tags_buf);
+ flb_sds_destroy(buf);
+ if (ret < 0) {
+ return -1;
+ }
+
+ return 0;
+}
+
+/* remapping function for ecs_cluster */
+static int dd_remap_ecs_cluster(const char *tag_name,
+ msgpack_object attr_value, flb_sds_t *dd_tags_buf)
+{
+ flb_sds_t buf = NULL;
+ char *cluster_name;
+ int ret;
+
+ buf = flb_sds_create_len(attr_value.via.str.ptr, attr_value.via.str.size);
+ if (!buf) {
+ flb_errno();
+ return -1;
+ }
+ cluster_name = strstr(buf, ECS_CLUSTER_PREFIX);
+
+ if (cluster_name != NULL) {
+ cluster_name += strlen(ECS_CLUSTER_PREFIX);
+ ret = dd_remap_append_kv_to_ddtags(tag_name, cluster_name, strlen(cluster_name), dd_tags_buf);
+ if (ret < 0) {
+ flb_sds_destroy(buf);
+ return -1;
+ }
+ }
+ else {
+ /*
+ * here the input is invalid: not in form of "XXXXXXcluster/"cluster-name
+ * we preverse the original value under tag "cluster_name".
+ */
+ ret = dd_remap_append_kv_to_ddtags(tag_name, buf, strlen(buf), dd_tags_buf);
+ if (ret < 0) {
+ flb_sds_destroy(buf);
+ return -1;
+ }
+ }
+ flb_sds_destroy(buf);
+ return 0;
+}
+
+/* remapping function for ecs_task_definition */
+static int dd_remap_ecs_task_definition(const char *tag_name,
+ msgpack_object attr_value, flb_sds_t *dd_tags_buf)
+{
+ flb_sds_t buf = NULL;
+ char *split;
+ int ret;
+
+ buf = flb_sds_create_len(attr_value.via.str.ptr, attr_value.via.str.size);
+ if (!buf) {
+ flb_errno();
+ return -1;
+ }
+ split = strchr(buf, ':');
+
+ if (split != NULL) {
+ ret = dd_remap_append_kv_to_ddtags("task_family", buf, split-buf, dd_tags_buf);
+ if (ret < 0) {
+ flb_sds_destroy(buf);
+ return -1;
+ }
+ ret = dd_remap_append_kv_to_ddtags("task_version", split+1, strlen(split+1), dd_tags_buf);
+ if (ret < 0) {
+ flb_sds_destroy(buf);
+ return -1;
+ }
+ }
+ else {
+ /*
+ * here the input is invalid: not in form of task_name:task_version
+ * we preverse the original value under tag "ecs_task_definition".
+ */
+ ret = dd_remap_append_kv_to_ddtags(tag_name, buf, strlen(buf), dd_tags_buf);
+ if (ret < 0) {
+ flb_sds_destroy(buf);
+ return -1;
+ }
+ }
+ flb_sds_destroy(buf);
+ return 0;
+}
+
+/* remapping function for ecs_task_arn */
+static int dd_remap_ecs_task_arn(const char *tag_name,
+ msgpack_object attr_value, flb_sds_t *dd_tags_buf)
+{
+ flb_sds_t buf;
+ char *remain;
+ char *split;
+ char *task_arn;
+ int ret;
+
+ buf = flb_sds_create_len(attr_value.via.str.ptr, attr_value.via.str.size);
+ if (!buf) {
+ flb_errno();
+ return -1;
+ }
+
+ /*
+ * if the input is invalid, not in the form of "arn:aws:ecs:region:XXXX"
+ * then we won't add the "region" in the dd_tags.
+ */
+ if ((strlen(buf) > strlen(ECS_ARN_PREFIX)) &&
+ (strncmp(buf, ECS_ARN_PREFIX, strlen(ECS_ARN_PREFIX)) == 0)) {
+
+ remain = buf + strlen(ECS_ARN_PREFIX);
+ split = strchr(remain, ':');
+
+ if (split != NULL) {
+ ret = dd_remap_append_kv_to_ddtags("region", remain, split-remain, dd_tags_buf);
+ if (ret < 0) {
+ flb_sds_destroy(buf);
+ return -1;
+ }
+ }
+ }
+
+ task_arn = strstr(buf, ECS_TASK_PREFIX);
+ if (task_arn != NULL) {
+ /* parse out the task_arn */
+ task_arn += strlen(ECS_TASK_PREFIX);
+ ret = dd_remap_append_kv_to_ddtags(tag_name, task_arn, strlen(task_arn), dd_tags_buf);
+ }
+ else {
+ /*
+ * if the input is invalid, not in the form of "XXXXXXXXtask/"task-arn
+ * then we preverse the original value under tag "task_arn".
+ */
+ ret = dd_remap_append_kv_to_ddtags(tag_name, buf, strlen(buf), dd_tags_buf);
+ }
+ flb_sds_destroy(buf);
+ if (ret < 0) {
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * Statically defines the set of remappings rules in the form of
+ * 1) original attr name 2) remapped tag name 3) remapping functions
+ * The remapping functions assume the input is valid, and will always
+ * produce one or more tags to be added in dd_tags.
+ */
+const struct dd_attr_tag_remapping remapping[] = {
+ {"container_id", "container_id", dd_remap_move_to_tags},
+ {"container_name", "container_name", dd_remap_container_name},
+ {"container_image", "container_image", dd_remap_move_to_tags},
+ {"ecs_cluster", "cluster_name", dd_remap_ecs_cluster},
+ {"ecs_task_definition", "ecs_task_definition", dd_remap_ecs_task_definition},
+ {"ecs_task_arn", "task_arn", dd_remap_ecs_task_arn}
+};
+
+/*
+ * Check against dd_attr_tag_remapping to see if a given attributes key/val
+ * pair need remapping. The key has to match origin_attr_name, and the val
+ * has to be of type string and non-empty.
+ * return value is the index of the remapping rule in dd_attr_tag_remapping,
+ * or -1 if no need to remap
+ */
+int dd_attr_need_remapping(const msgpack_object key, const msgpack_object val)
+{
+ int i;
+
+ if ((val.type != MSGPACK_OBJECT_STR) || (val.via.str.size == 0)) {
+ return -1;
+ }
+
+ for (i = 0; i < sizeof(remapping) / sizeof(struct dd_attr_tag_remapping); i++) {
+ if ((key.via.str.size == strlen(remapping[i].origin_attr_name) &&
+ memcmp(key.via.str.ptr,
+ remapping[i].origin_attr_name, key.via.str.size) == 0)) {
+ return i;
+ }
+ }
+
+ return -1;
+}
diff --git a/src/fluent-bit/plugins/out_datadog/datadog_remap.h b/src/fluent-bit/plugins/out_datadog/datadog_remap.h
new file mode 100644
index 000000000..f7061b0f2
--- /dev/null
+++ b/src/fluent-bit/plugins/out_datadog/datadog_remap.h
@@ -0,0 +1,37 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_OUT_DATADOG_REMAP_H
+#define FLB_OUT_DATADOG_REMAP_H
+
+#include "datadog.h"
+
+typedef int (*dd_attr_remap_to_tag_fn)(const char*, msgpack_object, flb_sds_t*);
+
+struct dd_attr_tag_remapping {
+ char* origin_attr_name; /* original attribute name */
+ char* remap_tag_name; /* tag name to remap to */
+ dd_attr_remap_to_tag_fn remap_to_tag; /* remapping function */
+};
+
+extern const struct dd_attr_tag_remapping remapping[];
+
+int dd_attr_need_remapping(const msgpack_object key, const msgpack_object val);
+
+#endif // FLB_OUT_DATADOG_REMAP_H
diff --git a/src/fluent-bit/plugins/out_es/CMakeLists.txt b/src/fluent-bit/plugins/out_es/CMakeLists.txt
new file mode 100644
index 000000000..4fad4f27c
--- /dev/null
+++ b/src/fluent-bit/plugins/out_es/CMakeLists.txt
@@ -0,0 +1,8 @@
+set(src
+ es_bulk.c
+ es_conf.c
+ es.c
+ murmur3.c)
+
+FLB_PLUGIN(out_es "${src}" "mk_core")
+target_link_libraries(flb-plugin-out_es)
diff --git a/src/fluent-bit/plugins/out_es/es.c b/src/fluent-bit/plugins/out_es/es.c
new file mode 100644
index 000000000..db2bcee5b
--- /dev/null
+++ b/src/fluent-bit/plugins/out_es/es.c
@@ -0,0 +1,1230 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_network.h>
+#include <fluent-bit/flb_http_client.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_signv4.h>
+#include <fluent-bit/flb_aws_credentials.h>
+#include <fluent-bit/flb_gzip.h>
+#include <fluent-bit/flb_record_accessor.h>
+#include <fluent-bit/flb_ra_key.h>
+#include <fluent-bit/flb_log_event_decoder.h>
+#include <msgpack.h>
+
+#include <time.h>
+
+#include "es.h"
+#include "es_conf.h"
+#include "es_bulk.h"
+#include "murmur3.h"
+
+struct flb_output_plugin out_es_plugin;
+
+static int es_pack_array_content(msgpack_packer *tmp_pck,
+ msgpack_object array,
+ struct flb_elasticsearch *ctx);
+
+#ifdef FLB_HAVE_AWS
+static flb_sds_t add_aws_auth(struct flb_http_client *c,
+ struct flb_elasticsearch *ctx)
+{
+ flb_sds_t signature = NULL;
+ int ret;
+
+ flb_plg_debug(ctx->ins, "Signing request with AWS Sigv4");
+
+ /* Amazon OpenSearch Sigv4 does not allow the host header to include the port */
+ ret = flb_http_strip_port_from_host(c);
+ if (ret < 0) {
+ flb_plg_error(ctx->ins, "could not strip port from host for sigv4");
+ return NULL;
+ }
+
+ /* AWS Fluent Bit user agent */
+ flb_http_add_header(c, "User-Agent", 10, "aws-fluent-bit-plugin", 21);
+
+ signature = flb_signv4_do(c, FLB_TRUE, FLB_TRUE, time(NULL),
+ ctx->aws_region, ctx->aws_service_name,
+ S3_MODE_SIGNED_PAYLOAD, ctx->aws_unsigned_headers,
+ ctx->aws_provider);
+ if (!signature) {
+ flb_plg_error(ctx->ins, "could not sign request with sigv4");
+ return NULL;
+ }
+ return signature;
+}
+#endif /* FLB_HAVE_AWS */
+
+static int es_pack_map_content(msgpack_packer *tmp_pck,
+ msgpack_object map,
+ struct flb_elasticsearch *ctx)
+{
+ int i;
+ char *ptr_key = NULL;
+ char buf_key[256];
+ msgpack_object *k;
+ msgpack_object *v;
+
+ for (i = 0; i < map.via.map.size; i++) {
+ k = &map.via.map.ptr[i].key;
+ v = &map.via.map.ptr[i].val;
+ ptr_key = NULL;
+
+ /* Store key */
+ const char *key_ptr = NULL;
+ size_t key_size = 0;
+
+ if (k->type == MSGPACK_OBJECT_BIN) {
+ key_ptr = k->via.bin.ptr;
+ key_size = k->via.bin.size;
+ }
+ else if (k->type == MSGPACK_OBJECT_STR) {
+ key_ptr = k->via.str.ptr;
+ key_size = k->via.str.size;
+ }
+
+ if (key_size < (sizeof(buf_key) - 1)) {
+ memcpy(buf_key, key_ptr, key_size);
+ buf_key[key_size] = '\0';
+ ptr_key = buf_key;
+ }
+ else {
+ /* Long map keys have a performance penalty */
+ ptr_key = flb_malloc(key_size + 1);
+ if (!ptr_key) {
+ flb_errno();
+ return -1;
+ }
+
+ memcpy(ptr_key, key_ptr, key_size);
+ ptr_key[key_size] = '\0';
+ }
+
+ /*
+ * Sanitize key name, Elastic Search 2.x don't allow dots
+ * in field names:
+ *
+ * https://goo.gl/R5NMTr
+ */
+ if (ctx->replace_dots == FLB_TRUE) {
+ char *p = ptr_key;
+ char *end = ptr_key + key_size;
+ while (p != end) {
+ if (*p == '.') *p = '_';
+ p++;
+ }
+ }
+
+ /* Append the key */
+ msgpack_pack_str(tmp_pck, key_size);
+ msgpack_pack_str_body(tmp_pck, ptr_key, key_size);
+
+ /* Release temporary key if was allocated */
+ if (ptr_key && ptr_key != buf_key) {
+ flb_free(ptr_key);
+ }
+ ptr_key = NULL;
+
+ /*
+ * The value can be any data type, if it's a map we need to
+ * sanitize to avoid dots.
+ */
+ if (v->type == MSGPACK_OBJECT_MAP) {
+ msgpack_pack_map(tmp_pck, v->via.map.size);
+ es_pack_map_content(tmp_pck, *v, ctx);
+ }
+ /*
+ * The value can be any data type, if it's an array we need to
+ * pass it to es_pack_array_content.
+ */
+ else if (v->type == MSGPACK_OBJECT_ARRAY) {
+ msgpack_pack_array(tmp_pck, v->via.array.size);
+ es_pack_array_content(tmp_pck, *v, ctx);
+ }
+ else {
+ msgpack_pack_object(tmp_pck, *v);
+ }
+ }
+ return 0;
+}
+
+/*
+ * Iterate through the array and sanitize elements.
+ * Mutual recursion with es_pack_map_content.
+ */
+static int es_pack_array_content(msgpack_packer *tmp_pck,
+ msgpack_object array,
+ struct flb_elasticsearch *ctx)
+{
+ int i;
+ msgpack_object *e;
+
+ for (i = 0; i < array.via.array.size; i++) {
+ e = &array.via.array.ptr[i];
+ if (e->type == MSGPACK_OBJECT_MAP)
+ {
+ msgpack_pack_map(tmp_pck, e->via.map.size);
+ es_pack_map_content(tmp_pck, *e, ctx);
+ }
+ else if (e->type == MSGPACK_OBJECT_ARRAY)
+ {
+ msgpack_pack_array(tmp_pck, e->via.array.size);
+ es_pack_array_content(tmp_pck, *e, ctx);
+ }
+ else
+ {
+ msgpack_pack_object(tmp_pck, *e);
+ }
+ }
+ return 0;
+}
+
+/*
+ * Get _id value from incoming record.
+ * If it successed, return the value as flb_sds_t.
+ * If it failed, return NULL.
+*/
+static flb_sds_t es_get_id_value(struct flb_elasticsearch *ctx,
+ msgpack_object *map)
+{
+ struct flb_ra_value *rval = NULL;
+ flb_sds_t tmp_str;
+ rval = flb_ra_get_value_object(ctx->ra_id_key, *map);
+ if (rval == NULL) {
+ flb_plg_warn(ctx->ins, "the value of %s is missing",
+ ctx->id_key);
+ return NULL;
+ }
+ else if(rval->o.type != MSGPACK_OBJECT_STR) {
+ flb_plg_warn(ctx->ins, "the value of %s is not string",
+ ctx->id_key);
+ flb_ra_key_value_destroy(rval);
+ return NULL;
+ }
+
+ tmp_str = flb_sds_create_len(rval->o.via.str.ptr,
+ rval->o.via.str.size);
+ if (tmp_str == NULL) {
+ flb_plg_warn(ctx->ins, "cannot create ID string from record");
+ flb_ra_key_value_destroy(rval);
+ return NULL;
+ }
+ flb_ra_key_value_destroy(rval);
+ return tmp_str;
+}
+
+static int compose_index_header(struct flb_elasticsearch *ctx,
+ int es_index_custom_len,
+ char *logstash_index, size_t logstash_index_size,
+ char *separator_str,
+ struct tm *tm)
+{
+ int ret;
+ int len;
+ char *p;
+ size_t s;
+
+ /* Compose Index header */
+ if (es_index_custom_len > 0) {
+ p = logstash_index + es_index_custom_len;
+ } else {
+ p = logstash_index + flb_sds_len(ctx->logstash_prefix);
+ }
+ len = p - logstash_index;
+ ret = snprintf(p, logstash_index_size - len, "%s",
+ separator_str);
+ if (ret > logstash_index_size - len) {
+ /* exceed limit */
+ return -1;
+ }
+ p += strlen(separator_str);
+ len += strlen(separator_str);
+
+ s = strftime(p, logstash_index_size - len,
+ ctx->logstash_dateformat, tm);
+ if (s==0) {
+ /* exceed limit */
+ return -1;
+ }
+ p += s;
+ *p++ = '\0';
+
+ return 0;
+}
+
+/*
+ * Convert the internal Fluent Bit data representation to the required
+ * one by Elasticsearch.
+ *
+ * 'Sadly' this process involves to convert from Msgpack to JSON.
+ */
+static int elasticsearch_format(struct flb_config *config,
+ struct flb_input_instance *ins,
+ void *plugin_context,
+ void *flush_ctx,
+ int event_type,
+ const char *tag, int tag_len,
+ const void *data, size_t bytes,
+ void **out_data, size_t *out_size)
+{
+ int ret;
+ int len;
+ int map_size;
+ int index_len = 0;
+ size_t s = 0;
+ size_t off = 0;
+ size_t off_prev = 0;
+ char *es_index;
+ char logstash_index[256];
+ char time_formatted[256];
+ char index_formatted[256];
+ char es_uuid[37];
+ flb_sds_t out_buf;
+ size_t out_buf_len = 0;
+ flb_sds_t tmp_buf;
+ flb_sds_t id_key_str = NULL;
+ // msgpack_unpacked result;
+ // msgpack_object root;
+ msgpack_object map;
+ // msgpack_object *obj;
+ flb_sds_t j_index;
+ struct es_bulk *bulk;
+ struct tm tm;
+ struct flb_time tms;
+ msgpack_sbuffer tmp_sbuf;
+ msgpack_packer tmp_pck;
+ uint16_t hash[8];
+ int es_index_custom_len;
+ struct flb_elasticsearch *ctx = plugin_context;
+ struct flb_log_event_decoder log_decoder;
+ struct flb_log_event log_event;
+
+ j_index = flb_sds_create_size(ES_BULK_HEADER);
+ if (j_index == NULL) {
+ flb_errno();
+ return -1;
+ }
+
+ ret = flb_log_event_decoder_init(&log_decoder, (char *) data, bytes);
+
+ if (ret != FLB_EVENT_DECODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event decoder initialization error : %d", ret);
+ flb_sds_destroy(j_index);
+
+ return -1;
+ }
+
+ /* Create the bulk composer */
+ bulk = es_bulk_create(bytes);
+ if (!bulk) {
+ flb_log_event_decoder_destroy(&log_decoder);
+ flb_sds_destroy(j_index);
+ return -1;
+ }
+
+ /* Copy logstash prefix if logstash format is enabled */
+ if (ctx->logstash_format == FLB_TRUE) {
+ strncpy(logstash_index, ctx->logstash_prefix, sizeof(logstash_index));
+ logstash_index[sizeof(logstash_index) - 1] = '\0';
+ }
+
+ /*
+ * If logstash format and id generation are disabled, pre-generate
+ * the index line for all records.
+ *
+ * The header stored in 'j_index' will be used for the all records on
+ * this payload.
+ */
+ if (ctx->logstash_format == FLB_FALSE && ctx->generate_id == FLB_FALSE) {
+ flb_time_get(&tms);
+ gmtime_r(&tms.tm.tv_sec, &tm);
+ strftime(index_formatted, sizeof(index_formatted) - 1,
+ ctx->index, &tm);
+ es_index = index_formatted;
+ if (ctx->suppress_type_name) {
+ index_len = flb_sds_snprintf(&j_index,
+ flb_sds_alloc(j_index),
+ ES_BULK_INDEX_FMT_WITHOUT_TYPE,
+ ctx->es_action,
+ es_index);
+ }
+ else {
+ index_len = flb_sds_snprintf(&j_index,
+ flb_sds_alloc(j_index),
+ ES_BULK_INDEX_FMT,
+ ctx->es_action,
+ es_index, ctx->type);
+ }
+ }
+
+ /*
+ * Some broken clients may have time drift up to year 1970
+ * this will generate corresponding index in Elasticsearch
+ * in order to prevent generating millions of indexes
+ * we can set to always use current time for index generation
+ */
+ if (ctx->current_time_index == FLB_TRUE) {
+ flb_time_get(&tms);
+ }
+
+ /* Iterate each record and do further formatting */
+ while ((ret = flb_log_event_decoder_next(
+ &log_decoder,
+ &log_event)) == FLB_EVENT_DECODER_SUCCESS) {
+
+ /* Only pop time from record if current_time_index is disabled */
+ if (ctx->current_time_index == FLB_FALSE) {
+ flb_time_copy(&tms, &log_event.timestamp);
+ }
+
+ map = *log_event.body;
+ map_size = map.via.map.size;
+
+ es_index_custom_len = 0;
+ if (ctx->logstash_prefix_key) {
+ flb_sds_t v = flb_ra_translate(ctx->ra_prefix_key,
+ (char *) tag, tag_len,
+ map, NULL);
+ if (v) {
+ len = flb_sds_len(v);
+ if (len > 128) {
+ len = 128;
+ memcpy(logstash_index, v, 128);
+ }
+ else {
+ memcpy(logstash_index, v, len);
+ }
+ es_index_custom_len = len;
+ flb_sds_destroy(v);
+ }
+ }
+
+ /* Create temporary msgpack buffer */
+ msgpack_sbuffer_init(&tmp_sbuf);
+ msgpack_packer_init(&tmp_pck, &tmp_sbuf, msgpack_sbuffer_write);
+
+ if (ctx->include_tag_key == FLB_TRUE) {
+ map_size++;
+ }
+
+ /* Set the new map size */
+ msgpack_pack_map(&tmp_pck, map_size + 1);
+
+ /* Append the time key */
+ msgpack_pack_str(&tmp_pck, flb_sds_len(ctx->time_key));
+ msgpack_pack_str_body(&tmp_pck, ctx->time_key, flb_sds_len(ctx->time_key));
+
+ /* Format the time */
+ gmtime_r(&tms.tm.tv_sec, &tm);
+ s = strftime(time_formatted, sizeof(time_formatted) - 1,
+ ctx->time_key_format, &tm);
+ if (ctx->time_key_nanos) {
+ len = snprintf(time_formatted + s, sizeof(time_formatted) - 1 - s,
+ ".%09" PRIu64 "Z", (uint64_t) tms.tm.tv_nsec);
+ } else {
+ len = snprintf(time_formatted + s, sizeof(time_formatted) - 1 - s,
+ ".%03" PRIu64 "Z",
+ (uint64_t) tms.tm.tv_nsec / 1000000);
+ }
+
+ s += len;
+ msgpack_pack_str(&tmp_pck, s);
+ msgpack_pack_str_body(&tmp_pck, time_formatted, s);
+
+ es_index = ctx->index;
+ if (ctx->logstash_format == FLB_TRUE) {
+ ret = compose_index_header(ctx, es_index_custom_len,
+ &logstash_index[0], sizeof(logstash_index),
+ ctx->logstash_prefix_separator, &tm);
+ if (ret < 0) {
+ /* retry with default separator */
+ compose_index_header(ctx, es_index_custom_len,
+ &logstash_index[0], sizeof(logstash_index),
+ "-", &tm);
+ }
+
+ es_index = logstash_index;
+ if (ctx->generate_id == FLB_FALSE) {
+ if (ctx->suppress_type_name) {
+ index_len = flb_sds_snprintf(&j_index,
+ flb_sds_alloc(j_index),
+ ES_BULK_INDEX_FMT_WITHOUT_TYPE,
+ ctx->es_action,
+ es_index);
+ }
+ else {
+ index_len = flb_sds_snprintf(&j_index,
+ flb_sds_alloc(j_index),
+ ES_BULK_INDEX_FMT,
+ ctx->es_action,
+ es_index, ctx->type);
+ }
+ }
+ }
+ else if (ctx->current_time_index == FLB_TRUE) {
+ /* Make sure we handle index time format for index */
+ strftime(index_formatted, sizeof(index_formatted) - 1,
+ ctx->index, &tm);
+ es_index = index_formatted;
+ }
+
+ /* Tag Key */
+ if (ctx->include_tag_key == FLB_TRUE) {
+ msgpack_pack_str(&tmp_pck, flb_sds_len(ctx->tag_key));
+ msgpack_pack_str_body(&tmp_pck, ctx->tag_key, flb_sds_len(ctx->tag_key));
+ msgpack_pack_str(&tmp_pck, tag_len);
+ msgpack_pack_str_body(&tmp_pck, tag, tag_len);
+ }
+
+ /*
+ * The map_content routine iterate over each Key/Value pair found in
+ * the map and do some sanitization for the key names.
+ *
+ * Elasticsearch have a restriction that key names cannot contain
+ * a dot; if some dot is found, it's replaced with an underscore.
+ */
+ ret = es_pack_map_content(&tmp_pck, map, ctx);
+ if (ret == -1) {
+ flb_log_event_decoder_destroy(&log_decoder);
+ msgpack_sbuffer_destroy(&tmp_sbuf);
+ es_bulk_destroy(bulk);
+ flb_sds_destroy(j_index);
+ return -1;
+ }
+
+ if (ctx->generate_id == FLB_TRUE) {
+ MurmurHash3_x64_128(tmp_sbuf.data, tmp_sbuf.size, 42, hash);
+ snprintf(es_uuid, sizeof(es_uuid),
+ "%04x%04x-%04x-%04x-%04x-%04x%04x%04x",
+ hash[0], hash[1], hash[2], hash[3],
+ hash[4], hash[5], hash[6], hash[7]);
+ if (ctx->suppress_type_name) {
+ index_len = flb_sds_snprintf(&j_index,
+ flb_sds_alloc(j_index),
+ ES_BULK_INDEX_FMT_ID_WITHOUT_TYPE,
+ ctx->es_action,
+ es_index, es_uuid);
+ }
+ else {
+ index_len = flb_sds_snprintf(&j_index,
+ flb_sds_alloc(j_index),
+ ES_BULK_INDEX_FMT_ID,
+ ctx->es_action,
+ es_index, ctx->type, es_uuid);
+ }
+ }
+ if (ctx->ra_id_key) {
+ id_key_str = es_get_id_value(ctx ,&map);
+ if (id_key_str) {
+ if (ctx->suppress_type_name) {
+ index_len = flb_sds_snprintf(&j_index,
+ flb_sds_alloc(j_index),
+ ES_BULK_INDEX_FMT_ID_WITHOUT_TYPE,
+ ctx->es_action,
+ es_index, id_key_str);
+ }
+ else {
+ index_len = flb_sds_snprintf(&j_index,
+ flb_sds_alloc(j_index),
+ ES_BULK_INDEX_FMT_ID,
+ ctx->es_action,
+ es_index, ctx->type, id_key_str);
+ }
+ flb_sds_destroy(id_key_str);
+ id_key_str = NULL;
+ }
+ }
+
+ /* Convert msgpack to JSON */
+ out_buf = flb_msgpack_raw_to_json_sds(tmp_sbuf.data, tmp_sbuf.size);
+ msgpack_sbuffer_destroy(&tmp_sbuf);
+ if (!out_buf) {
+ flb_log_event_decoder_destroy(&log_decoder);
+ es_bulk_destroy(bulk);
+ flb_sds_destroy(j_index);
+ return -1;
+ }
+
+ out_buf_len = flb_sds_len(out_buf);
+ if (strcasecmp(ctx->write_operation, FLB_ES_WRITE_OP_UPDATE) == 0) {
+ tmp_buf = out_buf;
+ out_buf = flb_sds_create_len(NULL, out_buf_len = out_buf_len + sizeof(ES_BULK_UPDATE_OP_BODY) - 2);
+ out_buf_len = snprintf(out_buf, out_buf_len, ES_BULK_UPDATE_OP_BODY, tmp_buf);
+ flb_sds_destroy(tmp_buf);
+ }
+ else if (strcasecmp(ctx->write_operation, FLB_ES_WRITE_OP_UPSERT) == 0) {
+ tmp_buf = out_buf;
+ out_buf = flb_sds_create_len(NULL, out_buf_len = out_buf_len + sizeof(ES_BULK_UPSERT_OP_BODY) - 2);
+ out_buf_len = snprintf(out_buf, out_buf_len, ES_BULK_UPSERT_OP_BODY, tmp_buf);
+ flb_sds_destroy(tmp_buf);
+ }
+
+ ret = es_bulk_append(bulk, j_index, index_len,
+ out_buf, out_buf_len,
+ bytes, off_prev);
+ flb_sds_destroy(out_buf);
+
+ off_prev = off;
+ if (ret == -1) {
+ /* We likely ran out of memory, abort here */
+ flb_log_event_decoder_destroy(&log_decoder);
+ *out_size = 0;
+ es_bulk_destroy(bulk);
+ flb_sds_destroy(j_index);
+ return -1;
+ }
+ }
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ /* Set outgoing data */
+ *out_data = bulk->ptr;
+ *out_size = bulk->len;
+
+ /*
+ * Note: we don't destroy the bulk as we need to keep the allocated
+ * buffer with the data. Instead we just release the bulk context and
+ * return the bulk->ptr buffer
+ */
+ flb_free(bulk);
+ if (ctx->trace_output) {
+ fwrite(*out_data, 1, *out_size, stdout);
+ fflush(stdout);
+ }
+ flb_sds_destroy(j_index);
+ return 0;
+}
+
+static int cb_es_init(struct flb_output_instance *ins,
+ struct flb_config *config,
+ void *data)
+{
+ struct flb_elasticsearch *ctx;
+
+ ctx = flb_es_conf_create(ins, config);
+ if (!ctx) {
+ flb_plg_error(ins, "cannot initialize plugin");
+ return -1;
+ }
+
+ flb_plg_debug(ctx->ins, "host=%s port=%i uri=%s index=%s type=%s",
+ ins->host.name, ins->host.port, ctx->uri,
+ ctx->index, ctx->type);
+
+ flb_output_set_context(ins, ctx);
+
+ /*
+ * This plugin instance uses the HTTP client interface, let's register
+ * it debugging callbacks.
+ */
+ flb_output_set_http_debug_callbacks(ins);
+
+ return 0;
+}
+
+static int elasticsearch_error_check(struct flb_elasticsearch *ctx,
+ struct flb_http_client *c)
+{
+ int i, j, k;
+ int ret;
+ int check = FLB_FALSE;
+ int root_type;
+ char *out_buf;
+ size_t off = 0;
+ size_t out_size;
+ msgpack_unpacked result;
+ msgpack_object root;
+ msgpack_object key;
+ msgpack_object val;
+ msgpack_object item;
+ msgpack_object item_key;
+ msgpack_object item_val;
+
+ /*
+ * Check if our payload is complete: there is such situations where
+ * the Elasticsearch HTTP response body is bigger than the HTTP client
+ * buffer so payload can be incomplete.
+ */
+ /* Convert JSON payload to msgpack */
+ ret = flb_pack_json(c->resp.payload, c->resp.payload_size,
+ &out_buf, &out_size, &root_type, NULL);
+ if (ret == -1) {
+ /* Is this an incomplete HTTP Request ? */
+ if (c->resp.payload_size <= 0) {
+ return FLB_TRUE;
+ }
+
+ /* Lookup error field */
+ if (strstr(c->resp.payload, "\"errors\":false,\"items\":[")) {
+ return FLB_FALSE;
+ }
+
+ flb_plg_error(ctx->ins, "could not pack/validate JSON response\n%s",
+ c->resp.payload);
+ return FLB_TRUE;
+ }
+
+ /* Lookup error field */
+ msgpack_unpacked_init(&result);
+ ret = msgpack_unpack_next(&result, out_buf, out_size, &off);
+ if (ret != MSGPACK_UNPACK_SUCCESS) {
+ flb_plg_error(ctx->ins, "Cannot unpack response to find error\n%s",
+ c->resp.payload);
+ return FLB_TRUE;
+ }
+
+ root = result.data;
+ if (root.type != MSGPACK_OBJECT_MAP) {
+ flb_plg_error(ctx->ins, "unexpected payload type=%i",
+ root.type);
+ check = FLB_TRUE;
+ goto done;
+ }
+
+ for (i = 0; i < root.via.map.size; i++) {
+ key = root.via.map.ptr[i].key;
+ if (key.type != MSGPACK_OBJECT_STR) {
+ flb_plg_error(ctx->ins, "unexpected key type=%i",
+ key.type);
+ check = FLB_TRUE;
+ goto done;
+ }
+
+ if (key.via.str.size == 6 && strncmp(key.via.str.ptr, "errors", 6) == 0) {
+ val = root.via.map.ptr[i].val;
+ if (val.type != MSGPACK_OBJECT_BOOLEAN) {
+ flb_plg_error(ctx->ins, "unexpected 'error' value type=%i",
+ val.type);
+ check = FLB_TRUE;
+ goto done;
+ }
+
+ /* If error == false, we are OK (no errors = FLB_FALSE) */
+ if (!val.via.boolean) {
+ /* no errors */
+ check = FLB_FALSE;
+ goto done;
+ }
+ }
+ else if (key.via.str.size == 5 && strncmp(key.via.str.ptr, "items", 5) == 0) {
+ val = root.via.map.ptr[i].val;
+ if (val.type != MSGPACK_OBJECT_ARRAY) {
+ flb_plg_error(ctx->ins, "unexpected 'items' value type=%i",
+ val.type);
+ check = FLB_TRUE;
+ goto done;
+ }
+
+ for (j = 0; j < val.via.array.size; j++) {
+ item = val.via.array.ptr[j];
+ if (item.type != MSGPACK_OBJECT_MAP) {
+ flb_plg_error(ctx->ins, "unexpected 'item' outer value type=%i",
+ item.type);
+ check = FLB_TRUE;
+ goto done;
+ }
+
+ if (item.via.map.size != 1) {
+ flb_plg_error(ctx->ins, "unexpected 'item' size=%i",
+ item.via.map.size);
+ check = FLB_TRUE;
+ goto done;
+ }
+
+ item = item.via.map.ptr[0].val;
+ if (item.type != MSGPACK_OBJECT_MAP) {
+ flb_plg_error(ctx->ins, "unexpected 'item' inner value type=%i",
+ item.type);
+ check = FLB_TRUE;
+ goto done;
+ }
+
+ for (k = 0; k < item.via.map.size; k++) {
+ item_key = item.via.map.ptr[k].key;
+ if (item_key.type != MSGPACK_OBJECT_STR) {
+ flb_plg_error(ctx->ins, "unexpected key type=%i",
+ item_key.type);
+ check = FLB_TRUE;
+ goto done;
+ }
+
+ if (item_key.via.str.size == 6 && strncmp(item_key.via.str.ptr, "status", 6) == 0) {
+ item_val = item.via.map.ptr[k].val;
+
+ if (item_val.type != MSGPACK_OBJECT_POSITIVE_INTEGER) {
+ flb_plg_error(ctx->ins, "unexpected 'status' value type=%i",
+ item_val.type);
+ check = FLB_TRUE;
+ goto done;
+ }
+ /* Check for errors other than version conflict (document already exists) */
+ if (item_val.via.i64 != 409) {
+ check = FLB_TRUE;
+ goto done;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ done:
+ flb_free(out_buf);
+ msgpack_unpacked_destroy(&result);
+ return check;
+}
+
+static void cb_es_flush(struct flb_event_chunk *event_chunk,
+ struct flb_output_flush *out_flush,
+ struct flb_input_instance *ins, void *out_context,
+ struct flb_config *config)
+{
+ int ret;
+ size_t pack_size;
+ char *pack;
+ void *out_buf;
+ size_t out_size;
+ size_t b_sent;
+ struct flb_elasticsearch *ctx = out_context;
+ struct flb_connection *u_conn;
+ struct flb_http_client *c;
+ flb_sds_t signature = NULL;
+ int compressed = FLB_FALSE;
+
+ /* Get upstream connection */
+ u_conn = flb_upstream_conn_get(ctx->u);
+ if (!u_conn) {
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ /* Convert format */
+ ret = elasticsearch_format(config, ins,
+ ctx, NULL,
+ event_chunk->type,
+ event_chunk->tag, flb_sds_len(event_chunk->tag),
+ event_chunk->data, event_chunk->size,
+ &out_buf, &out_size);
+ if (ret != 0) {
+ flb_upstream_conn_release(u_conn);
+ FLB_OUTPUT_RETURN(FLB_ERROR);
+ }
+
+ pack = (char *) out_buf;
+ pack_size = out_size;
+
+ /* Should we compress the payload ? */
+ if (ctx->compress_gzip == FLB_TRUE) {
+ ret = flb_gzip_compress((void *) pack, pack_size,
+ &out_buf, &out_size);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins,
+ "cannot gzip payload, disabling compression");
+ }
+ else {
+ compressed = FLB_TRUE;
+ }
+
+ /*
+ * The payload buffer is different than pack, means we must be free it.
+ */
+ if (out_buf != pack) {
+ flb_free(pack);
+ }
+
+ pack = (char *) out_buf;
+ pack_size = out_size;
+ }
+
+ /* Compose HTTP Client request */
+ c = flb_http_client(u_conn, FLB_HTTP_POST, ctx->uri,
+ pack, pack_size, NULL, 0, NULL, 0);
+
+ flb_http_buffer_size(c, ctx->buffer_size);
+
+#ifndef FLB_HAVE_AWS
+ flb_http_add_header(c, "User-Agent", 10, "Fluent-Bit", 10);
+#endif
+
+ flb_http_add_header(c, "Content-Type", 12, "application/x-ndjson", 20);
+
+ if (ctx->http_user && ctx->http_passwd) {
+ flb_http_basic_auth(c, ctx->http_user, ctx->http_passwd);
+ }
+ else if (ctx->cloud_user && ctx->cloud_passwd) {
+ flb_http_basic_auth(c, ctx->cloud_user, ctx->cloud_passwd);
+ }
+
+#ifdef FLB_HAVE_AWS
+ if (ctx->has_aws_auth == FLB_TRUE) {
+ signature = add_aws_auth(c, ctx);
+ if (!signature) {
+ goto retry;
+ }
+ }
+ else {
+ flb_http_add_header(c, "User-Agent", 10, "Fluent-Bit", 10);
+ }
+#endif
+
+ /* Content Encoding: gzip */
+ if (compressed == FLB_TRUE) {
+ flb_http_set_content_encoding_gzip(c);
+ }
+
+ /* Map debug callbacks */
+ flb_http_client_debug(c, ctx->ins->callback);
+
+ ret = flb_http_do(c, &b_sent);
+ if (ret != 0) {
+ flb_plg_warn(ctx->ins, "http_do=%i URI=%s", ret, ctx->uri);
+ goto retry;
+ }
+ else {
+ /* The request was issued successfully, validate the 'error' field */
+ flb_plg_debug(ctx->ins, "HTTP Status=%i URI=%s", c->resp.status, ctx->uri);
+ if (c->resp.status != 200 && c->resp.status != 201) {
+ if (c->resp.payload_size > 0) {
+ flb_plg_error(ctx->ins, "HTTP status=%i URI=%s, response:\n%s\n",
+ c->resp.status, ctx->uri, c->resp.payload);
+ }
+ else {
+ flb_plg_error(ctx->ins, "HTTP status=%i URI=%s",
+ c->resp.status, ctx->uri);
+ }
+ goto retry;
+ }
+
+ if (c->resp.payload_size > 0) {
+ /*
+ * Elasticsearch payload should be JSON, we convert it to msgpack
+ * and lookup the 'error' field.
+ */
+ ret = elasticsearch_error_check(ctx, c);
+ if (ret == FLB_TRUE) {
+ /* we got an error */
+ if (ctx->trace_error) {
+ /*
+ * If trace_error is set, trace the actual
+ * response from Elasticsearch explaining the problem.
+ * Trace_Output can be used to see the request.
+ */
+ if (pack_size < 4000) {
+ flb_plg_debug(ctx->ins, "error caused by: Input\n%.*s\n",
+ (int) pack_size, pack);
+ }
+ if (c->resp.payload_size < 4000) {
+ flb_plg_error(ctx->ins, "error: Output\n%s",
+ c->resp.payload);
+ } else {
+ /*
+ * We must use fwrite since the flb_log functions
+ * will truncate data at 4KB
+ */
+ fwrite(c->resp.payload, 1, c->resp.payload_size, stderr);
+ fflush(stderr);
+ }
+ }
+ goto retry;
+ }
+ else {
+ flb_plg_debug(ctx->ins, "Elasticsearch response\n%s",
+ c->resp.payload);
+ }
+ }
+ else {
+ goto retry;
+ }
+ }
+
+ /* Cleanup */
+ flb_http_client_destroy(c);
+ flb_free(pack);
+ flb_upstream_conn_release(u_conn);
+ if (signature) {
+ flb_sds_destroy(signature);
+ }
+ FLB_OUTPUT_RETURN(FLB_OK);
+
+ /* Issue a retry */
+ retry:
+ flb_http_client_destroy(c);
+ flb_free(pack);
+
+ if (out_buf != pack) {
+ flb_free(out_buf);
+ }
+
+ flb_upstream_conn_release(u_conn);
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+}
+
+static int cb_es_exit(void *data, struct flb_config *config)
+{
+ struct flb_elasticsearch *ctx = data;
+
+ flb_es_conf_destroy(ctx);
+ return 0;
+}
+
+/* Configuration properties map */
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_STR, "index", FLB_ES_DEFAULT_INDEX,
+ 0, FLB_TRUE, offsetof(struct flb_elasticsearch, index),
+ "Set an index name"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "type", FLB_ES_DEFAULT_TYPE,
+ 0, FLB_TRUE, offsetof(struct flb_elasticsearch, type),
+ "Set the document type property"
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "suppress_type_name", "false",
+ 0, FLB_TRUE, offsetof(struct flb_elasticsearch, suppress_type_name),
+ "If true, mapping types is removed. (for v7.0.0 or later)"
+ },
+
+ /* HTTP Authentication */
+ {
+ FLB_CONFIG_MAP_STR, "http_user", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_elasticsearch, http_user),
+ "Optional username credential for Elastic X-Pack access"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "http_passwd", "",
+ 0, FLB_TRUE, offsetof(struct flb_elasticsearch, http_passwd),
+ "Password for user defined in HTTP_User"
+ },
+
+ /* HTTP Compression */
+ {
+ FLB_CONFIG_MAP_STR, "compress", NULL,
+ 0, FLB_FALSE, 0,
+ "Set payload compression mechanism. Option available is 'gzip'"
+ },
+
+ /* Cloud Authentication */
+ {
+ FLB_CONFIG_MAP_STR, "cloud_id", NULL,
+ 0, FLB_FALSE, 0,
+ "Elastic cloud ID of the cluster to connect to"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "cloud_auth", NULL,
+ 0, FLB_FALSE, 0,
+ "Elastic cloud authentication credentials"
+ },
+
+ /* AWS Authentication */
+#ifdef FLB_HAVE_AWS
+ {
+ FLB_CONFIG_MAP_BOOL, "aws_auth", "false",
+ 0, FLB_TRUE, offsetof(struct flb_elasticsearch, has_aws_auth),
+ "Enable AWS Sigv4 Authentication"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "aws_region", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_elasticsearch, aws_region),
+ "AWS Region of your Amazon OpenSearch Service cluster"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "aws_sts_endpoint", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_elasticsearch, aws_sts_endpoint),
+ "Custom endpoint for the AWS STS API, used with the AWS_Role_ARN option"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "aws_role_arn", NULL,
+ 0, FLB_FALSE, 0,
+ "AWS IAM Role to assume to put records to your Amazon OpenSearch cluster"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "aws_external_id", NULL,
+ 0, FLB_FALSE, 0,
+ "External ID for the AWS IAM Role specified with `aws_role_arn`"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "aws_service_name", "es",
+ 0, FLB_TRUE, offsetof(struct flb_elasticsearch, aws_service_name),
+ "AWS Service Name"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "aws_profile", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_elasticsearch, aws_profile),
+ "AWS Profile name. AWS Profiles can be configured with AWS CLI and are usually stored in "
+ "$HOME/.aws/ directory."
+ },
+#endif
+
+ /* Logstash compatibility */
+ {
+ FLB_CONFIG_MAP_BOOL, "logstash_format", "false",
+ 0, FLB_TRUE, offsetof(struct flb_elasticsearch, logstash_format),
+ "Enable Logstash format compatibility"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "logstash_prefix", FLB_ES_DEFAULT_PREFIX,
+ 0, FLB_TRUE, offsetof(struct flb_elasticsearch, logstash_prefix),
+ "When Logstash_Format is enabled, the Index name is composed using a prefix "
+ "and the date, e.g: If Logstash_Prefix is equals to 'mydata' your index will "
+ "become 'mydata-YYYY.MM.DD'. The last string appended belongs to the date "
+ "when the data is being generated"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "logstash_prefix_separator", "-",
+ 0, FLB_TRUE, offsetof(struct flb_elasticsearch, logstash_prefix_separator),
+ "Set a separator between logstash_prefix and date."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "logstash_prefix_key", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_elasticsearch, logstash_prefix_key),
+ "When included: the value in the record that belongs to the key will be looked "
+ "up and over-write the Logstash_Prefix for index generation. If the key/value "
+ "is not found in the record then the Logstash_Prefix option will act as a "
+ "fallback. Nested keys are supported through record accessor pattern"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "logstash_dateformat", FLB_ES_DEFAULT_TIME_FMT,
+ 0, FLB_TRUE, offsetof(struct flb_elasticsearch, logstash_dateformat),
+ "Time format (based on strftime) to generate the second part of the Index name"
+ },
+
+ /* Custom Time and Tag keys */
+ {
+ FLB_CONFIG_MAP_STR, "time_key", FLB_ES_DEFAULT_TIME_KEY,
+ 0, FLB_TRUE, offsetof(struct flb_elasticsearch, time_key),
+ "When Logstash_Format is enabled, each record will get a new timestamp field. "
+ "The Time_Key property defines the name of that field"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "time_key_format", FLB_ES_DEFAULT_TIME_KEYF,
+ 0, FLB_TRUE, offsetof(struct flb_elasticsearch, time_key_format),
+ "When Logstash_Format is enabled, this property defines the format of the "
+ "timestamp"
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "time_key_nanos", "false",
+ 0, FLB_TRUE, offsetof(struct flb_elasticsearch, time_key_nanos),
+ "When Logstash_Format is enabled, enabling this property sends nanosecond "
+ "precision timestamps"
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "include_tag_key", "false",
+ 0, FLB_TRUE, offsetof(struct flb_elasticsearch, include_tag_key),
+ "When enabled, it append the Tag name to the record"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "tag_key", FLB_ES_DEFAULT_TAG_KEY,
+ 0, FLB_TRUE, offsetof(struct flb_elasticsearch, tag_key),
+ "When Include_Tag_Key is enabled, this property defines the key name for the tag"
+ },
+ {
+ FLB_CONFIG_MAP_SIZE, "buffer_size", FLB_ES_DEFAULT_HTTP_MAX,
+ 0, FLB_TRUE, offsetof(struct flb_elasticsearch, buffer_size),
+ "Specify the buffer size used to read the response from the Elasticsearch HTTP "
+ "service. This option is useful for debugging purposes where is required to read "
+ "full responses, note that response size grows depending of the number of records "
+ "inserted. To set an unlimited amount of memory set this value to 'false', "
+ "otherwise the value must be according to the Unit Size specification"
+ },
+
+ /* Elasticsearch specifics */
+ {
+ FLB_CONFIG_MAP_STR, "path", NULL,
+ 0, FLB_FALSE, 0,
+ "Elasticsearch accepts new data on HTTP query path '/_bulk'. But it is also "
+ "possible to serve Elasticsearch behind a reverse proxy on a subpath. This "
+ "option defines such path on the fluent-bit side. It simply adds a path "
+ "prefix in the indexing HTTP POST URI"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "pipeline", NULL,
+ 0, FLB_FALSE, 0,
+ "Newer versions of Elasticsearch allows to setup filters called pipelines. "
+ "This option allows to define which pipeline the database should use. For "
+ "performance reasons is strongly suggested to do parsing and filtering on "
+ "Fluent Bit side, avoid pipelines"
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "generate_id", "false",
+ 0, FLB_TRUE, offsetof(struct flb_elasticsearch, generate_id),
+ "When enabled, generate _id for outgoing records. This prevents duplicate "
+ "records when retrying ES"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "write_operation", "create",
+ 0, FLB_TRUE, offsetof(struct flb_elasticsearch, write_operation),
+ "Operation to use to write in bulk requests"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "id_key", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_elasticsearch, id_key),
+ "If set, _id will be the value of the key from incoming record."
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "replace_dots", "false",
+ 0, FLB_TRUE, offsetof(struct flb_elasticsearch, replace_dots),
+ "When enabled, replace field name dots with underscore, required by Elasticsearch "
+ "2.0-2.3."
+ },
+
+ {
+ FLB_CONFIG_MAP_BOOL, "current_time_index", "false",
+ 0, FLB_TRUE, offsetof(struct flb_elasticsearch, current_time_index),
+ "Use current time for index generation instead of message record"
+ },
+
+ /* Trace */
+ {
+ FLB_CONFIG_MAP_BOOL, "trace_output", "false",
+ 0, FLB_TRUE, offsetof(struct flb_elasticsearch, trace_output),
+ "When enabled print the Elasticsearch API calls to stdout (for diag only)"
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "trace_error", "false",
+ 0, FLB_TRUE, offsetof(struct flb_elasticsearch, trace_error),
+ "When enabled print the Elasticsearch exception to stderr (for diag only)"
+ },
+
+ /* EOF */
+ {0}
+};
+
+/* Plugin reference */
+struct flb_output_plugin out_es_plugin = {
+ .name = "es",
+ .description = "Elasticsearch",
+ .cb_init = cb_es_init,
+ .cb_pre_run = NULL,
+ .cb_flush = cb_es_flush,
+ .cb_exit = cb_es_exit,
+ .workers = 2,
+
+ /* Configuration */
+ .config_map = config_map,
+
+ /* Test */
+ .test_formatter.callback = elasticsearch_format,
+
+ /* Plugin flags */
+ .flags = FLB_OUTPUT_NET | FLB_IO_OPT_TLS,
+};
diff --git a/src/fluent-bit/plugins/out_es/es.h b/src/fluent-bit/plugins/out_es/es.h
new file mode 100644
index 000000000..5d187049f
--- /dev/null
+++ b/src/fluent-bit/plugins/out_es/es.h
@@ -0,0 +1,140 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_OUT_ES_H
+#define FLB_OUT_ES_H
+
+#define FLB_ES_DEFAULT_HOST "127.0.0.1"
+#define FLB_ES_DEFAULT_PORT 92000
+#define FLB_ES_DEFAULT_INDEX "fluent-bit"
+#define FLB_ES_DEFAULT_TYPE "_doc"
+#define FLB_ES_DEFAULT_PREFIX "logstash"
+#define FLB_ES_DEFAULT_TIME_FMT "%Y.%m.%d"
+#define FLB_ES_DEFAULT_TIME_KEY "@timestamp"
+#define FLB_ES_DEFAULT_TIME_KEYF "%Y-%m-%dT%H:%M:%S"
+#define FLB_ES_DEFAULT_TAG_KEY "flb-key"
+#define FLB_ES_DEFAULT_HTTP_MAX "512k"
+#define FLB_ES_DEFAULT_HTTPS_PORT 443
+#define FLB_ES_WRITE_OP_INDEX "index"
+#define FLB_ES_WRITE_OP_CREATE "create"
+#define FLB_ES_WRITE_OP_UPDATE "update"
+#define FLB_ES_WRITE_OP_UPSERT "upsert"
+
+struct flb_elasticsearch {
+ /* Elasticsearch index (database) and type (table) */
+ char *index;
+ char *type;
+ char suppress_type_name;
+
+ /* HTTP Auth */
+ char *http_user;
+ char *http_passwd;
+
+ /* Elastic Cloud Auth */
+ char *cloud_user;
+ char *cloud_passwd;
+
+ /* AWS Auth */
+#ifdef FLB_HAVE_AWS
+ int has_aws_auth;
+ char *aws_region;
+ char *aws_sts_endpoint;
+ char *aws_profile;
+ struct flb_aws_provider *aws_provider;
+ struct flb_aws_provider *base_aws_provider;
+ /* tls instances can't be re-used; aws provider requires a separate one */
+ struct flb_tls *aws_tls;
+ /* one for the standard chain provider, one for sts assume role */
+ struct flb_tls *aws_sts_tls;
+ char *aws_session_name;
+ char *aws_service_name;
+ struct mk_list *aws_unsigned_headers;
+#endif
+
+ /* HTTP Client Setup */
+ size_t buffer_size;
+
+ /*
+ * If enabled, replace field name dots with underscore, required for
+ * Elasticsearch 2.0-2.3.
+ */
+ int replace_dots;
+
+ int trace_output;
+ int trace_error;
+
+ /*
+ * Logstash compatibility options
+ * ==============================
+ */
+
+ /* enabled/disabled */
+ int logstash_format;
+ int generate_id;
+ int current_time_index;
+
+ /* prefix */
+ flb_sds_t logstash_prefix;
+ flb_sds_t logstash_prefix_separator;
+
+ /* prefix key */
+ flb_sds_t logstash_prefix_key;
+
+ /* date format */
+ flb_sds_t logstash_dateformat;
+
+ /* time key */
+ flb_sds_t time_key;
+
+ /* time key format */
+ flb_sds_t time_key_format;
+
+ /* time key nanoseconds */
+ int time_key_nanos;
+
+
+ /* write operation */
+ flb_sds_t write_operation;
+ /* write operation elasticsearch operation */
+ flb_sds_t es_action;
+
+ /* id_key */
+ flb_sds_t id_key;
+ struct flb_record_accessor *ra_id_key;
+
+ /* include_tag_key */
+ int include_tag_key;
+ flb_sds_t tag_key;
+
+ /* Elasticsearch HTTP API */
+ char uri[256];
+
+ struct flb_record_accessor *ra_prefix_key;
+
+ /* Compression mode (gzip) */
+ int compress_gzip;
+
+ /* Upstream connection to the backend server */
+ struct flb_upstream *u;
+
+ /* Plugin output instance reference */
+ struct flb_output_instance *ins;
+};
+
+#endif
diff --git a/src/fluent-bit/plugins/out_es/es_bulk.c b/src/fluent-bit/plugins/out_es/es_bulk.c
new file mode 100644
index 000000000..221f45ebd
--- /dev/null
+++ b/src/fluent-bit/plugins/out_es/es_bulk.c
@@ -0,0 +1,113 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <math.h>
+
+#include <fluent-bit.h>
+#include "es_bulk.h"
+
+struct es_bulk *es_bulk_create(size_t estimated_size)
+{
+ struct es_bulk *b;
+
+ if (estimated_size < ES_BULK_CHUNK) {
+ estimated_size = ES_BULK_CHUNK;
+ }
+
+ b = flb_malloc(sizeof(struct es_bulk));
+ if (!b) {
+ perror("calloc");
+ return NULL;
+ }
+ b->ptr = flb_malloc(estimated_size);
+ if (b->ptr == NULL) {
+ perror("malloc");
+ flb_free(b);
+ return NULL;
+ }
+
+ b->size = estimated_size;
+ b->len = 0;
+
+ return b;
+}
+
+void es_bulk_destroy(struct es_bulk *bulk)
+{
+ if (bulk->size > 0) {
+ flb_free(bulk->ptr);
+ }
+ flb_free(bulk);
+}
+
+int es_bulk_append(struct es_bulk *bulk, char *index, int i_len,
+ char *json, size_t j_len,
+ size_t whole_size, size_t converted_size)
+{
+ int available;
+ int append_size;
+ int required;
+ int remaining_size;
+ char *ptr;
+
+ required = i_len + j_len + ES_BULK_HEADER + 1;
+ available = (bulk->size - bulk->len);
+
+ if (available < required) {
+ /*
+ * estimate a converted size of json
+ * calculate
+ * 1. rest of msgpack data size
+ * 2. ratio from bulk json size and processed msgpack size.
+ */
+ append_size = required - available;
+ if (converted_size == 0) {
+ /* converted_size = 0 causes div/0 */
+ flb_debug("[out_es] converted_size is 0");
+ } else {
+ remaining_size = ceil((whole_size - converted_size) /* rest of size to convert */
+ * ((double)bulk->size / converted_size)); /* = json size / msgpack size */
+ append_size = fmax(append_size, remaining_size);
+ }
+ if (append_size < ES_BULK_CHUNK) {
+ /* append at least ES_BULK_CHUNK size */
+ append_size = ES_BULK_CHUNK;
+ }
+ ptr = flb_realloc(bulk->ptr, bulk->size + append_size);
+ if (!ptr) {
+ flb_errno();
+ return -1;
+ }
+ bulk->ptr = ptr;
+ bulk->size += append_size;
+ }
+
+ memcpy(bulk->ptr + bulk->len, index, i_len);
+ bulk->len += i_len;
+
+ memcpy(bulk->ptr + bulk->len, json, j_len);
+ bulk->len += j_len;
+ bulk->ptr[bulk->len] = '\n';
+ bulk->len++;
+
+ return 0;
+};
diff --git a/src/fluent-bit/plugins/out_es/es_bulk.h b/src/fluent-bit/plugins/out_es/es_bulk.h
new file mode 100644
index 000000000..7bb66dbbc
--- /dev/null
+++ b/src/fluent-bit/plugins/out_es/es_bulk.h
@@ -0,0 +1,46 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_OUT_ES_BULK_H
+#define FLB_OUT_ES_BULK_H
+
+#include <inttypes.h>
+
+#define ES_BULK_CHUNK 4096 /* Size of buffer chunks */
+#define ES_BULK_HEADER 165 /* ES Bulk API prefix line */
+#define ES_BULK_INDEX_FMT "{\"%s\":{\"_index\":\"%s\",\"_type\":\"%s\"}}\n"
+#define ES_BULK_INDEX_FMT_ID "{\"%s\":{\"_index\":\"%s\",\"_type\":\"%s\",\"_id\":\"%s\"}}\n"
+#define ES_BULK_INDEX_FMT_WITHOUT_TYPE "{\"%s\":{\"_index\":\"%s\"}}\n"
+#define ES_BULK_INDEX_FMT_ID_WITHOUT_TYPE "{\"%s\":{\"_index\":\"%s\",\"_id\":\"%s\"}}\n"
+#define ES_BULK_UPDATE_OP_BODY "{\"doc\":%s}"
+#define ES_BULK_UPSERT_OP_BODY "{\"doc_as_upsert\":true,\"doc\":%s}"
+
+struct es_bulk {
+ char *ptr;
+ uint32_t len;
+ uint32_t size;
+};
+
+struct es_bulk *es_bulk_create(size_t estimated_size);
+int es_bulk_append(struct es_bulk *bulk, char *index, int i_len,
+ char *json, size_t j_len,
+ size_t whole_size, size_t curr_size);
+void es_bulk_destroy(struct es_bulk *bulk);
+
+#endif
diff --git a/src/fluent-bit/plugins/out_es/es_conf.c b/src/fluent-bit/plugins/out_es/es_conf.c
new file mode 100644
index 000000000..48c8c3e25
--- /dev/null
+++ b/src/fluent-bit/plugins/out_es/es_conf.c
@@ -0,0 +1,537 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_mem.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_http_client.h>
+#include <fluent-bit/flb_record_accessor.h>
+#include <fluent-bit/flb_signv4.h>
+#include <fluent-bit/flb_aws_credentials.h>
+#include <fluent-bit/flb_base64.h>
+
+#include "es.h"
+#include "es_conf.h"
+
+/*
+ * extract_cloud_host extracts the public hostname
+ * of a deployment from a Cloud ID string.
+ *
+ * The Cloud ID string has the format "<deployment_name>:<base64_info>".
+ * Once decoded, the "base64_info" string has the format "<deployment_region>$<elasticsearch_hostname>$<kibana_hostname>"
+ * and the function returns "<elasticsearch_hostname>.<deployment_region>" token.
+ */
+static flb_sds_t extract_cloud_host(struct flb_elasticsearch *ctx,
+ const char *cloud_id)
+{
+
+ char *colon;
+ char *region;
+ char *host;
+ char *port = NULL;
+ char buf[256] = {0};
+ char cloud_host_buf[256] = {0};
+ const char dollar[2] = "$";
+ size_t len;
+ int ret;
+
+ /* keep only part after first ":" */
+ colon = strchr(cloud_id, ':');
+ if (colon == NULL) {
+ return NULL;
+ }
+ colon++;
+
+ /* decode base64 */
+ ret = flb_base64_decode((unsigned char *)buf, sizeof(buf), &len, (unsigned char *)colon, strlen(colon));
+ if (ret) {
+ flb_plg_error(ctx->ins, "cannot decode cloud_id");
+ return NULL;
+ }
+ region = strtok(buf, dollar);
+ if (region == NULL) {
+ return NULL;
+ }
+ host = strtok(NULL, dollar);
+ if (host == NULL) {
+ return NULL;
+ }
+
+ /*
+ * Some cloud id format is "<deployment_region>$<elasticsearch_hostname>:<port>$<kibana_hostname>" .
+ * e.g. https://github.com/elastic/beats/blob/v8.4.1/libbeat/cloudid/cloudid_test.go#L60
+ *
+ * It means the variable "host" can contains ':' and port number.
+ */
+ colon = strchr(host, ':');
+ if (colon != NULL) {
+ /* host contains host number */
+ *colon = '\0'; /* remove port number from host */
+ port = colon+1;
+ }
+
+ strcpy(cloud_host_buf, host);
+ strcat(cloud_host_buf, ".");
+ strcat(cloud_host_buf, region);
+ if (port != NULL) {
+ strcat(cloud_host_buf, ":");
+ strcat(cloud_host_buf, port);
+ }
+ return flb_sds_create(cloud_host_buf);
+}
+
+/*
+ * set_cloud_credentials gets a cloud_auth
+ * and sets the context's cloud_user and cloud_passwd.
+ * Example:
+ * cloud_auth = elastic:ZXVyb3BxxxxxxZTA1Ng
+ * ---->
+ * cloud_user = elastic
+ * cloud_passwd = ZXVyb3BxxxxxxZTA1Ng
+ */
+static void set_cloud_credentials(struct flb_elasticsearch *ctx,
+ const char *cloud_auth)
+{
+ /* extract strings */
+ int items = 0;
+ struct mk_list *toks;
+ struct mk_list *head;
+ struct flb_split_entry *entry;
+ toks = flb_utils_split((const char *)cloud_auth, ':', -1);
+ mk_list_foreach(head, toks) {
+ items++;
+ entry = mk_list_entry(head, struct flb_split_entry, _head);
+ if (items == 1) {
+ ctx->cloud_user = flb_strdup(entry->value);
+ }
+ if (items == 2) {
+ ctx->cloud_passwd = flb_strdup(entry->value);
+ }
+ }
+ flb_utils_split_free(toks);
+}
+
+struct flb_elasticsearch *flb_es_conf_create(struct flb_output_instance *ins,
+ struct flb_config *config)
+{
+ int len;
+ int io_flags = 0;
+ ssize_t ret;
+ char *buf;
+ const char *tmp;
+ const char *path;
+#ifdef FLB_HAVE_AWS
+ char *aws_role_arn = NULL;
+ char *aws_external_id = NULL;
+ char *aws_session_name = NULL;
+#endif
+ char *cloud_port_char;
+ char *cloud_host = NULL;
+ int cloud_host_port = 0;
+ int cloud_port = FLB_ES_DEFAULT_HTTPS_PORT;
+ struct flb_uri *uri = ins->host.uri;
+ struct flb_uri_field *f_index = NULL;
+ struct flb_uri_field *f_type = NULL;
+ struct flb_upstream *upstream;
+ struct flb_elasticsearch *ctx;
+
+ /* Allocate context */
+ ctx = flb_calloc(1, sizeof(struct flb_elasticsearch));
+ if (!ctx) {
+ flb_errno();
+ return NULL;
+ }
+ ctx->ins = ins;
+
+ if (uri) {
+ if (uri->count >= 2) {
+ f_index = flb_uri_get(uri, 0);
+ f_type = flb_uri_get(uri, 1);
+ }
+ }
+
+ /* handle cloud_id */
+ tmp = flb_output_get_property("cloud_id", ins);
+ if (tmp) {
+ cloud_host = extract_cloud_host(ctx, tmp);
+ if (cloud_host == NULL) {
+ flb_plg_error(ctx->ins, "cannot extract cloud_host");
+ flb_es_conf_destroy(ctx);
+ return NULL;
+ }
+ flb_plg_debug(ctx->ins, "extracted cloud_host: '%s'", cloud_host);
+
+ cloud_port_char = strchr(cloud_host, ':');
+
+ if (cloud_port_char == NULL) {
+ flb_plg_debug(ctx->ins, "cloud_host: '%s' does not contain a port: '%s'", cloud_host, cloud_host);
+ }
+ else {
+ cloud_port_char[0] = '\0';
+ cloud_port_char = &cloud_port_char[1];
+ flb_plg_debug(ctx->ins, "extracted cloud_port_char: '%s'", cloud_port_char);
+ cloud_host_port = (int) strtol(cloud_port_char, (char **) NULL, 10);
+ flb_plg_debug(ctx->ins, "converted cloud_port_char to port int: '%i'", cloud_host_port);
+ }
+
+ if (cloud_host_port == 0) {
+ cloud_host_port = cloud_port;
+ }
+
+ flb_plg_debug(ctx->ins,
+ "checked whether extracted port was null and set it to "
+ "default https port or not. Outcome: '%i' and cloud_host: '%s'.",
+ cloud_host_port, cloud_host);
+
+ if (ins->host.name != NULL) {
+ flb_sds_destroy(ins->host.name);
+ }
+
+ ins->host.name = cloud_host;
+ ins->host.port = cloud_host_port;
+ }
+
+ /* Set default network configuration */
+ flb_output_net_default("127.0.0.1", 9200, ins);
+
+ /* Populate context with config map defaults and incoming properties */
+ ret = flb_output_config_map_set(ins, (void *) ctx);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "configuration error");
+ flb_es_conf_destroy(ctx);
+ return NULL;
+ }
+
+ /* handle cloud_auth */
+ tmp = flb_output_get_property("cloud_auth", ins);
+ if (tmp) {
+ set_cloud_credentials(ctx, tmp);
+ }
+
+ /* use TLS ? */
+ if (ins->use_tls == FLB_TRUE) {
+ io_flags = FLB_IO_TLS;
+ }
+ else {
+ io_flags = FLB_IO_TCP;
+ }
+
+ if (ins->host.ipv6 == FLB_TRUE) {
+ io_flags |= FLB_IO_IPV6;
+ }
+
+ /* Compress (gzip) */
+ tmp = flb_output_get_property("compress", ins);
+ ctx->compress_gzip = FLB_FALSE;
+ if (tmp) {
+ if (strcasecmp(tmp, "gzip") == 0) {
+ ctx->compress_gzip = FLB_TRUE;
+ }
+ }
+
+ /* Prepare an upstream handler */
+ upstream = flb_upstream_create(config,
+ ins->host.name,
+ ins->host.port,
+ io_flags,
+ ins->tls);
+ if (!upstream) {
+ flb_plg_error(ctx->ins, "cannot create Upstream context");
+ flb_es_conf_destroy(ctx);
+ return NULL;
+ }
+ ctx->u = upstream;
+
+ /* Set instance flags into upstream */
+ flb_output_upstream_set(ctx->u, ins);
+
+ /* Set manual Index and Type */
+ if (f_index) {
+ ctx->index = flb_strdup(f_index->value); /* FIXME */
+ }
+
+ if (f_type) {
+ ctx->type = flb_strdup(f_type->value); /* FIXME */
+ }
+
+ /* HTTP Payload (response) maximum buffer size (0 == unlimited) */
+ if (ctx->buffer_size == -1) {
+ ctx->buffer_size = 0;
+ }
+
+ /* Elasticsearch: Path */
+ path = flb_output_get_property("path", ins);
+ if (!path) {
+ path = "";
+ }
+
+ /* Elasticsearch: Pipeline */
+ tmp = flb_output_get_property("pipeline", ins);
+ if (tmp) {
+ snprintf(ctx->uri, sizeof(ctx->uri) - 1, "%s/_bulk/?pipeline=%s", path, tmp);
+ }
+ else {
+ snprintf(ctx->uri, sizeof(ctx->uri) - 1, "%s/_bulk", path);
+ }
+
+ if (ctx->id_key) {
+ ctx->ra_id_key = flb_ra_create(ctx->id_key, FLB_FALSE);
+ if (ctx->ra_id_key == NULL) {
+ flb_plg_error(ins, "could not create record accessor for Id Key");
+ }
+ if (ctx->generate_id == FLB_TRUE) {
+ flb_plg_warn(ins, "Generate_ID is ignored when ID_key is set");
+ ctx->generate_id = FLB_FALSE;
+ }
+ }
+
+ if (ctx->write_operation) {
+ if (strcasecmp(ctx->write_operation, FLB_ES_WRITE_OP_INDEX) == 0) {
+ ctx->es_action = flb_strdup(FLB_ES_WRITE_OP_INDEX);
+ }
+ else if (strcasecmp(ctx->write_operation, FLB_ES_WRITE_OP_CREATE) == 0) {
+ ctx->es_action = flb_strdup(FLB_ES_WRITE_OP_CREATE);
+ }
+ else if (strcasecmp(ctx->write_operation, FLB_ES_WRITE_OP_UPDATE) == 0
+ || strcasecmp(ctx->write_operation, FLB_ES_WRITE_OP_UPSERT) == 0) {
+ ctx->es_action = flb_strdup(FLB_ES_WRITE_OP_UPDATE);
+ }
+ else {
+ flb_plg_error(ins, "wrong Write_Operation (should be one of index, create, update, upsert)");
+ flb_es_conf_destroy(ctx);
+ return NULL;
+ }
+ if (strcasecmp(ctx->es_action, FLB_ES_WRITE_OP_UPDATE) == 0
+ && !ctx->ra_id_key && ctx->generate_id == FLB_FALSE) {
+ flb_plg_error(ins, "Id_Key or Generate_Id must be set when Write_Operation update or upsert");
+ flb_es_conf_destroy(ctx);
+ return NULL;
+ }
+ }
+
+ if (ctx->logstash_prefix_key) {
+ if (ctx->logstash_prefix_key[0] != '$') {
+ len = flb_sds_len(ctx->logstash_prefix_key);
+ buf = flb_malloc(len + 2);
+ if (!buf) {
+ flb_errno();
+ flb_es_conf_destroy(ctx);
+ return NULL;
+ }
+ buf[0] = '$';
+ memcpy(buf + 1, ctx->logstash_prefix_key, len);
+ buf[len + 1] = '\0';
+
+ ctx->ra_prefix_key = flb_ra_create(buf, FLB_TRUE);
+ flb_free(buf);
+ }
+ else {
+ ctx->ra_prefix_key = flb_ra_create(ctx->logstash_prefix_key, FLB_TRUE);
+ }
+
+ if (!ctx->ra_prefix_key) {
+ flb_plg_error(ins, "invalid logstash_prefix_key pattern '%s'", tmp);
+ flb_es_conf_destroy(ctx);
+ return NULL;
+ }
+ }
+
+#ifdef FLB_HAVE_AWS
+ /* AWS Auth Unsigned Headers */
+ ctx->aws_unsigned_headers = flb_malloc(sizeof(struct mk_list));
+ if (ret != 0) {
+ flb_es_conf_destroy(ctx);
+ }
+ flb_slist_create(ctx->aws_unsigned_headers);
+ ret = flb_slist_add(ctx->aws_unsigned_headers, "Content-Length");
+ if (ret != 0) {
+ flb_es_conf_destroy(ctx);
+ return NULL;
+ }
+
+ /* AWS Auth */
+ ctx->has_aws_auth = FLB_FALSE;
+ tmp = flb_output_get_property("aws_auth", ins);
+ if (tmp) {
+ if (strncasecmp(tmp, "On", 2) == 0) {
+ ctx->has_aws_auth = FLB_TRUE;
+ flb_debug("[out_es] Enabled AWS Auth");
+
+ /* AWS provider needs a separate TLS instance */
+ ctx->aws_tls = flb_tls_create(FLB_TLS_CLIENT_MODE,
+ FLB_TRUE,
+ ins->tls_debug,
+ ins->tls_vhost,
+ ins->tls_ca_path,
+ ins->tls_ca_file,
+ ins->tls_crt_file,
+ ins->tls_key_file,
+ ins->tls_key_passwd);
+ if (!ctx->aws_tls) {
+ flb_errno();
+ flb_es_conf_destroy(ctx);
+ return NULL;
+ }
+
+ tmp = flb_output_get_property("aws_region", ins);
+ if (!tmp) {
+ flb_error("[out_es] aws_auth enabled but aws_region not set");
+ flb_es_conf_destroy(ctx);
+ return NULL;
+ }
+ ctx->aws_region = (char *) tmp;
+
+ tmp = flb_output_get_property("aws_sts_endpoint", ins);
+ if (tmp) {
+ ctx->aws_sts_endpoint = (char *) tmp;
+ }
+
+ ctx->aws_provider = flb_standard_chain_provider_create(config,
+ ctx->aws_tls,
+ ctx->aws_region,
+ ctx->aws_sts_endpoint,
+ NULL,
+ flb_aws_client_generator(),
+ ctx->aws_profile);
+ if (!ctx->aws_provider) {
+ flb_error("[out_es] Failed to create AWS Credential Provider");
+ flb_es_conf_destroy(ctx);
+ return NULL;
+ }
+
+ tmp = flb_output_get_property("aws_role_arn", ins);
+ if (tmp) {
+ /* Use the STS Provider */
+ ctx->base_aws_provider = ctx->aws_provider;
+ aws_role_arn = (char *) tmp;
+ aws_external_id = NULL;
+ tmp = flb_output_get_property("aws_external_id", ins);
+ if (tmp) {
+ aws_external_id = (char *) tmp;
+ }
+
+ aws_session_name = flb_sts_session_name();
+ if (!aws_session_name) {
+ flb_error("[out_es] Failed to create aws iam role "
+ "session name");
+ flb_es_conf_destroy(ctx);
+ return NULL;
+ }
+
+ /* STS provider needs yet another separate TLS instance */
+ ctx->aws_sts_tls = flb_tls_create(FLB_TLS_CLIENT_MODE,
+ FLB_TRUE,
+ ins->tls_debug,
+ ins->tls_vhost,
+ ins->tls_ca_path,
+ ins->tls_ca_file,
+ ins->tls_crt_file,
+ ins->tls_key_file,
+ ins->tls_key_passwd);
+ if (!ctx->aws_sts_tls) {
+ flb_errno();
+ flb_es_conf_destroy(ctx);
+ return NULL;
+ }
+
+ ctx->aws_provider = flb_sts_provider_create(config,
+ ctx->aws_sts_tls,
+ ctx->
+ base_aws_provider,
+ aws_external_id,
+ aws_role_arn,
+ aws_session_name,
+ ctx->aws_region,
+ ctx->aws_sts_endpoint,
+ NULL,
+ flb_aws_client_generator());
+ /* Session name can be freed once provider is created */
+ flb_free(aws_session_name);
+ if (!ctx->aws_provider) {
+ flb_error("[out_es] Failed to create AWS STS Credential "
+ "Provider");
+ flb_es_conf_destroy(ctx);
+ return NULL;
+ }
+
+ }
+
+ /* initialize credentials in sync mode */
+ ctx->aws_provider->provider_vtable->sync(ctx->aws_provider);
+ ctx->aws_provider->provider_vtable->init(ctx->aws_provider);
+ /* set back to async */
+ ctx->aws_provider->provider_vtable->async(ctx->aws_provider);
+ ctx->aws_provider->provider_vtable->upstream_set(ctx->aws_provider, ctx->ins);
+ }
+ }
+#endif
+
+ return ctx;
+}
+
+int flb_es_conf_destroy(struct flb_elasticsearch *ctx)
+{
+ if (!ctx) {
+ return 0;
+ }
+
+ if (ctx->u) {
+ flb_upstream_destroy(ctx->u);
+ }
+ if (ctx->ra_id_key) {
+ flb_ra_destroy(ctx->ra_id_key);
+ ctx->ra_id_key = NULL;
+ }
+ if (ctx->es_action) {
+ flb_free(ctx->es_action);
+ }
+
+#ifdef FLB_HAVE_AWS
+ if (ctx->base_aws_provider) {
+ flb_aws_provider_destroy(ctx->base_aws_provider);
+ }
+
+ if (ctx->aws_provider) {
+ flb_aws_provider_destroy(ctx->aws_provider);
+ }
+
+ if (ctx->aws_tls) {
+ flb_tls_destroy(ctx->aws_tls);
+ }
+
+ if (ctx->aws_sts_tls) {
+ flb_tls_destroy(ctx->aws_sts_tls);
+ }
+
+ if (ctx->aws_unsigned_headers) {
+ flb_slist_destroy(ctx->aws_unsigned_headers);
+ flb_free(ctx->aws_unsigned_headers);
+ }
+#endif
+
+ if (ctx->ra_prefix_key) {
+ flb_ra_destroy(ctx->ra_prefix_key);
+ }
+
+ flb_free(ctx->cloud_passwd);
+ flb_free(ctx->cloud_user);
+ flb_free(ctx);
+
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/out_es/es_conf.h b/src/fluent-bit/plugins/out_es/es_conf.h
new file mode 100644
index 000000000..3c421becf
--- /dev/null
+++ b/src/fluent-bit/plugins/out_es/es_conf.h
@@ -0,0 +1,33 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_OUT_ES_CONF_H
+#define FLB_OUT_ES_CONF_H
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_output.h>
+#include <fluent-bit/flb_config.h>
+
+#include "es.h"
+
+struct flb_elasticsearch *flb_es_conf_create(struct flb_output_instance *ins,
+ struct flb_config *config);
+int flb_es_conf_destroy(struct flb_elasticsearch *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/out_es/murmur3.c b/src/fluent-bit/plugins/out_es/murmur3.c
new file mode 100644
index 000000000..8fccb6de7
--- /dev/null
+++ b/src/fluent-bit/plugins/out_es/murmur3.c
@@ -0,0 +1,314 @@
+//-----------------------------------------------------------------------------
+// MurmurHash3 was written by Austin Appleby, and is placed in the public
+// domain. The author hereby disclaims copyright to this source code.
+
+// Note - The x86 and x64 versions do _not_ produce the same results, as the
+// algorithms are optimized for their respective platforms. You can still
+// compile and run any of them on any platform, but your performance with the
+// non-native version will be less than optimal.
+
+#include "murmur3.h"
+
+//-----------------------------------------------------------------------------
+// Platform-specific functions and macros
+
+#ifdef __GNUC__
+#define FORCE_INLINE __attribute__((always_inline)) inline
+#else
+#define FORCE_INLINE inline
+#endif
+
+static FORCE_INLINE uint32_t rotl32 ( uint32_t x, int8_t r )
+{
+ return (x << r) | (x >> (32 - r));
+}
+
+static FORCE_INLINE uint64_t rotl64 ( uint64_t x, int8_t r )
+{
+ return (x << r) | (x >> (64 - r));
+}
+
+#define ROTL32(x,y) rotl32(x,y)
+#define ROTL64(x,y) rotl64(x,y)
+
+#define BIG_CONSTANT(x) (x##LLU)
+
+//-----------------------------------------------------------------------------
+// Block read - if your platform needs to do endian-swapping or can only
+// handle aligned reads, do the conversion here
+
+#define getblock(p, i) (p[i])
+
+//-----------------------------------------------------------------------------
+// Finalization mix - force all bits of a hash block to avalanche
+
+static FORCE_INLINE uint32_t fmix32 ( uint32_t h )
+{
+ h ^= h >> 16;
+ h *= 0x85ebca6b;
+ h ^= h >> 13;
+ h *= 0xc2b2ae35;
+ h ^= h >> 16;
+
+ return h;
+}
+
+//----------
+
+static FORCE_INLINE uint64_t fmix64 ( uint64_t k )
+{
+ k ^= k >> 33;
+ k *= BIG_CONSTANT(0xff51afd7ed558ccd);
+ k ^= k >> 33;
+ k *= BIG_CONSTANT(0xc4ceb9fe1a85ec53);
+ k ^= k >> 33;
+
+ return k;
+}
+
+//-----------------------------------------------------------------------------
+
+void MurmurHash3_x86_32 ( const void * key, int len,
+ uint32_t seed, void * out )
+{
+ const uint8_t * data = (const uint8_t*)key;
+ const int nblocks = len / 4;
+ int i;
+
+ uint32_t h1 = seed;
+
+ uint32_t c1 = 0xcc9e2d51;
+ uint32_t c2 = 0x1b873593;
+
+ //----------
+ // body
+
+ const uint32_t * blocks = (const uint32_t *)(data + nblocks*4);
+
+ for(i = -nblocks; i; i++)
+ {
+ uint32_t k1 = getblock(blocks,i);
+
+ k1 *= c1;
+ k1 = ROTL32(k1,15);
+ k1 *= c2;
+
+ h1 ^= k1;
+ h1 = ROTL32(h1,13);
+ h1 = h1*5+0xe6546b64;
+ }
+
+ //----------
+ // tail
+
+ const uint8_t * tail = (const uint8_t*)(data + nblocks*4);
+
+ uint32_t k1 = 0;
+
+ switch(len & 3)
+ {
+ case 3: k1 ^= tail[2] << 16;
+ case 2: k1 ^= tail[1] << 8;
+ case 1: k1 ^= tail[0];
+ k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1;
+ };
+
+ //----------
+ // finalization
+
+ h1 ^= len;
+
+ h1 = fmix32(h1);
+
+ *(uint32_t*)out = h1;
+}
+
+//-----------------------------------------------------------------------------
+
+void MurmurHash3_x86_128 ( const void * key, const int len,
+ uint32_t seed, void * out )
+{
+ const uint8_t * data = (const uint8_t*)key;
+ const int nblocks = len / 16;
+ int i;
+
+ uint32_t h1 = seed;
+ uint32_t h2 = seed;
+ uint32_t h3 = seed;
+ uint32_t h4 = seed;
+
+ uint32_t c1 = 0x239b961b;
+ uint32_t c2 = 0xab0e9789;
+ uint32_t c3 = 0x38b34ae5;
+ uint32_t c4 = 0xa1e38b93;
+
+ //----------
+ // body
+
+ const uint32_t * blocks = (const uint32_t *)(data + nblocks*16);
+
+ for(i = -nblocks; i; i++)
+ {
+ uint32_t k1 = getblock(blocks,i*4+0);
+ uint32_t k2 = getblock(blocks,i*4+1);
+ uint32_t k3 = getblock(blocks,i*4+2);
+ uint32_t k4 = getblock(blocks,i*4+3);
+
+ k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1;
+
+ h1 = ROTL32(h1,19); h1 += h2; h1 = h1*5+0x561ccd1b;
+
+ k2 *= c2; k2 = ROTL32(k2,16); k2 *= c3; h2 ^= k2;
+
+ h2 = ROTL32(h2,17); h2 += h3; h2 = h2*5+0x0bcaa747;
+
+ k3 *= c3; k3 = ROTL32(k3,17); k3 *= c4; h3 ^= k3;
+
+ h3 = ROTL32(h3,15); h3 += h4; h3 = h3*5+0x96cd1c35;
+
+ k4 *= c4; k4 = ROTL32(k4,18); k4 *= c1; h4 ^= k4;
+
+ h4 = ROTL32(h4,13); h4 += h1; h4 = h4*5+0x32ac3b17;
+ }
+
+ //----------
+ // tail
+
+ const uint8_t * tail = (const uint8_t*)(data + nblocks*16);
+
+ uint32_t k1 = 0;
+ uint32_t k2 = 0;
+ uint32_t k3 = 0;
+ uint32_t k4 = 0;
+
+ switch(len & 15)
+ {
+ case 15: k4 ^= tail[14] << 16;
+ case 14: k4 ^= tail[13] << 8;
+ case 13: k4 ^= tail[12] << 0;
+ k4 *= c4; k4 = ROTL32(k4,18); k4 *= c1; h4 ^= k4;
+
+ case 12: k3 ^= tail[11] << 24;
+ case 11: k3 ^= tail[10] << 16;
+ case 10: k3 ^= tail[ 9] << 8;
+ case 9: k3 ^= tail[ 8] << 0;
+ k3 *= c3; k3 = ROTL32(k3,17); k3 *= c4; h3 ^= k3;
+
+ case 8: k2 ^= tail[ 7] << 24;
+ case 7: k2 ^= tail[ 6] << 16;
+ case 6: k2 ^= tail[ 5] << 8;
+ case 5: k2 ^= tail[ 4] << 0;
+ k2 *= c2; k2 = ROTL32(k2,16); k2 *= c3; h2 ^= k2;
+
+ case 4: k1 ^= tail[ 3] << 24;
+ case 3: k1 ^= tail[ 2] << 16;
+ case 2: k1 ^= tail[ 1] << 8;
+ case 1: k1 ^= tail[ 0] << 0;
+ k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1;
+ };
+
+ //----------
+ // finalization
+
+ h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len;
+
+ h1 += h2; h1 += h3; h1 += h4;
+ h2 += h1; h3 += h1; h4 += h1;
+
+ h1 = fmix32(h1);
+ h2 = fmix32(h2);
+ h3 = fmix32(h3);
+ h4 = fmix32(h4);
+
+ h1 += h2; h1 += h3; h1 += h4;
+ h2 += h1; h3 += h1; h4 += h1;
+
+ ((uint32_t*)out)[0] = h1;
+ ((uint32_t*)out)[1] = h2;
+ ((uint32_t*)out)[2] = h3;
+ ((uint32_t*)out)[3] = h4;
+}
+
+//-----------------------------------------------------------------------------
+
+void MurmurHash3_x64_128 ( const void * key, const int len,
+ const uint32_t seed, void * out )
+{
+ const uint8_t * data = (const uint8_t*)key;
+ const int nblocks = len / 16;
+ int i;
+
+ uint64_t h1 = seed;
+ uint64_t h2 = seed;
+
+ uint64_t c1 = BIG_CONSTANT(0x87c37b91114253d5);
+ uint64_t c2 = BIG_CONSTANT(0x4cf5ad432745937f);
+
+ //----------
+ // body
+
+ const uint64_t * blocks = (const uint64_t *)(data);
+
+ for(i = 0; i < nblocks; i++)
+ {
+ uint64_t k1 = getblock(blocks,i*2+0);
+ uint64_t k2 = getblock(blocks,i*2+1);
+
+ k1 *= c1; k1 = ROTL64(k1,31); k1 *= c2; h1 ^= k1;
+
+ h1 = ROTL64(h1,27); h1 += h2; h1 = h1*5+0x52dce729;
+
+ k2 *= c2; k2 = ROTL64(k2,33); k2 *= c1; h2 ^= k2;
+
+ h2 = ROTL64(h2,31); h2 += h1; h2 = h2*5+0x38495ab5;
+ }
+
+ //----------
+ // tail
+
+ const uint8_t * tail = (const uint8_t*)(data + nblocks*16);
+
+ uint64_t k1 = 0;
+ uint64_t k2 = 0;
+
+ switch(len & 15)
+ {
+ case 15: k2 ^= (uint64_t)(tail[14]) << 48;
+ case 14: k2 ^= (uint64_t)(tail[13]) << 40;
+ case 13: k2 ^= (uint64_t)(tail[12]) << 32;
+ case 12: k2 ^= (uint64_t)(tail[11]) << 24;
+ case 11: k2 ^= (uint64_t)(tail[10]) << 16;
+ case 10: k2 ^= (uint64_t)(tail[ 9]) << 8;
+ case 9: k2 ^= (uint64_t)(tail[ 8]) << 0;
+ k2 *= c2; k2 = ROTL64(k2,33); k2 *= c1; h2 ^= k2;
+
+ case 8: k1 ^= (uint64_t)(tail[ 7]) << 56;
+ case 7: k1 ^= (uint64_t)(tail[ 6]) << 48;
+ case 6: k1 ^= (uint64_t)(tail[ 5]) << 40;
+ case 5: k1 ^= (uint64_t)(tail[ 4]) << 32;
+ case 4: k1 ^= (uint64_t)(tail[ 3]) << 24;
+ case 3: k1 ^= (uint64_t)(tail[ 2]) << 16;
+ case 2: k1 ^= (uint64_t)(tail[ 1]) << 8;
+ case 1: k1 ^= (uint64_t)(tail[ 0]) << 0;
+ k1 *= c1; k1 = ROTL64(k1,31); k1 *= c2; h1 ^= k1;
+ };
+
+ //----------
+ // finalization
+
+ h1 ^= len; h2 ^= len;
+
+ h1 += h2;
+ h2 += h1;
+
+ h1 = fmix64(h1);
+ h2 = fmix64(h2);
+
+ h1 += h2;
+ h2 += h1;
+
+ ((uint64_t*)out)[0] = h1;
+ ((uint64_t*)out)[1] = h2;
+}
+
+//-----------------------------------------------------------------------------
diff --git a/src/fluent-bit/plugins/out_es/murmur3.h b/src/fluent-bit/plugins/out_es/murmur3.h
new file mode 100644
index 000000000..c85395a14
--- /dev/null
+++ b/src/fluent-bit/plugins/out_es/murmur3.h
@@ -0,0 +1,29 @@
+//-----------------------------------------------------------------------------
+// MurmurHash3 was written by Austin Appleby, and is placed in the
+// public domain. The author hereby disclaims copyright to this source
+// code.
+
+#ifndef _MURMURHASH3_H_
+#define _MURMURHASH3_H_
+
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+//-----------------------------------------------------------------------------
+
+void MurmurHash3_x86_32 (const void *key, int len, uint32_t seed, void *out);
+
+void MurmurHash3_x86_128(const void *key, int len, uint32_t seed, void *out);
+
+void MurmurHash3_x64_128(const void *key, int len, uint32_t seed, void *out);
+
+//-----------------------------------------------------------------------------
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // _MURMURHASH3_H_ \ No newline at end of file
diff --git a/src/fluent-bit/plugins/out_exit/CMakeLists.txt b/src/fluent-bit/plugins/out_exit/CMakeLists.txt
new file mode 100644
index 000000000..d9b7168e2
--- /dev/null
+++ b/src/fluent-bit/plugins/out_exit/CMakeLists.txt
@@ -0,0 +1,4 @@
+set(src
+ exit.c)
+
+FLB_PLUGIN(out_exit "${src}" "")
diff --git a/src/fluent-bit/plugins/out_exit/exit.c b/src/fluent-bit/plugins/out_exit/exit.c
new file mode 100644
index 000000000..dc3532b2e
--- /dev/null
+++ b/src/fluent-bit/plugins/out_exit/exit.c
@@ -0,0 +1,108 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_utils.h>
+
+#define FLB_EXIT_FLUSH_COUNT "1"
+
+struct flb_exit {
+ int is_running;
+ int count;
+
+ /* config */
+ int flush_count;
+};
+
+static int cb_exit_init(struct flb_output_instance *ins, struct flb_config *config,
+ void *data)
+{
+ int ret;
+ (void) config;
+ (void) data;
+ struct flb_exit *ctx;
+
+ ctx = flb_malloc(sizeof(struct flb_exit));
+ if (!ctx) {
+ flb_errno();
+ return -1;
+ }
+ ctx->count = 0;
+ ctx->is_running = FLB_TRUE;
+
+ ret = flb_output_config_map_set(ins, (void *) ctx);
+ if (ret == -1) {
+ flb_free(ctx);
+ return -1;
+ }
+
+ flb_output_set_context(ins, ctx);
+
+ return 0;
+}
+
+static void cb_exit_flush(struct flb_event_chunk *event_chunk,
+ struct flb_output_flush *out_flush,
+ struct flb_input_instance *i_ins,
+ void *out_context,
+ struct flb_config *config)
+{
+ (void) i_ins;
+ (void) out_context;
+ struct flb_exit *ctx = out_context;
+
+ ctx->count++;
+ if (ctx->is_running == FLB_TRUE && ctx->count >= ctx->flush_count) {
+ flb_engine_exit(config);
+ ctx->is_running = FLB_FALSE;
+ }
+
+ FLB_OUTPUT_RETURN(FLB_OK);
+}
+
+static int cb_exit_exit(void *data, struct flb_config *config)
+{
+ struct flb_exit *ctx = data;
+ (void) config;
+
+ flb_free(ctx);
+ return 0;
+}
+
+/* Configuration properties map */
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_INT, "flush_count", FLB_EXIT_FLUSH_COUNT,
+ 0, FLB_TRUE, offsetof(struct flb_exit, flush_count),
+ NULL
+ },
+
+ /* EOF */
+ {0}
+};
+
+struct flb_output_plugin out_exit_plugin = {
+ .name = "exit",
+ .description = "Exit after a number of flushes (test purposes)",
+ .cb_init = cb_exit_init,
+ .cb_flush = cb_exit_flush,
+ .cb_exit = cb_exit_exit,
+ .config_map = config_map,
+ .flags = 0,
+};
diff --git a/src/fluent-bit/plugins/out_file/CMakeLists.txt b/src/fluent-bit/plugins/out_file/CMakeLists.txt
new file mode 100644
index 000000000..8db7675a4
--- /dev/null
+++ b/src/fluent-bit/plugins/out_file/CMakeLists.txt
@@ -0,0 +1,4 @@
+set(src
+ file.c)
+
+FLB_PLUGIN(out_file "${src}" "")
diff --git a/src/fluent-bit/plugins/out_file/file.c b/src/fluent-bit/plugins/out_file/file.c
new file mode 100644
index 000000000..d5f8a036a
--- /dev/null
+++ b/src/fluent-bit/plugins/out_file/file.c
@@ -0,0 +1,705 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_mem.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_metrics.h>
+#include <fluent-bit/flb_log_event_decoder.h>
+#include <msgpack.h>
+
+#include <stdio.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+#ifdef FLB_SYSTEM_WINDOWS
+#include <Shlobj.h>
+#include <Shlwapi.h>
+#endif
+
+#include "file.h"
+
+#ifdef FLB_SYSTEM_WINDOWS
+#define NEWLINE "\r\n"
+#define S_ISDIR(m) (((m) & S_IFMT) == S_IFDIR)
+#else
+#define NEWLINE "\n"
+#endif
+
+struct flb_file_conf {
+ const char *out_path;
+ const char *out_file;
+ const char *delimiter;
+ const char *label_delimiter;
+ const char *template;
+ int format;
+ int csv_column_names;
+ int mkdir;
+ struct flb_output_instance *ins;
+};
+
+static char *check_delimiter(const char *str)
+{
+ if (str == NULL) {
+ return NULL;
+ }
+
+ if (!strcasecmp(str, "\\t") || !strcasecmp(str, "tab")) {
+ return "\t";
+ }
+ else if (!strcasecmp(str, "space")) {
+ return " ";
+ }
+ else if (!strcasecmp(str, "comma")) {
+ return ",";
+ }
+
+ return NULL;
+}
+
+
+static int cb_file_init(struct flb_output_instance *ins,
+ struct flb_config *config,
+ void *data)
+{
+ int ret;
+ const char *tmp;
+ char *ret_str;
+ (void) config;
+ (void) data;
+ struct flb_file_conf *ctx;
+
+ ctx = flb_calloc(1, sizeof(struct flb_file_conf));
+ if (!ctx) {
+ flb_errno();
+ return -1;
+ }
+ ctx->ins = ins;
+ ctx->format = FLB_OUT_FILE_FMT_JSON; /* default */
+ ctx->delimiter = NULL;
+ ctx->label_delimiter = NULL;
+ ctx->template = NULL;
+
+ ret = flb_output_config_map_set(ins, (void *) ctx);
+ if (ret == -1) {
+ flb_free(ctx);
+ return -1;
+ }
+
+ /* Optional, file format */
+ tmp = flb_output_get_property("Format", ins);
+ if (tmp) {
+ if (!strcasecmp(tmp, "csv")) {
+ ctx->format = FLB_OUT_FILE_FMT_CSV;
+ ctx->delimiter = ",";
+ }
+ else if (!strcasecmp(tmp, "ltsv")) {
+ ctx->format = FLB_OUT_FILE_FMT_LTSV;
+ ctx->delimiter = "\t";
+ ctx->label_delimiter = ":";
+ }
+ else if (!strcasecmp(tmp, "plain")) {
+ ctx->format = FLB_OUT_FILE_FMT_PLAIN;
+ ctx->delimiter = NULL;
+ ctx->label_delimiter = NULL;
+ }
+ else if (!strcasecmp(tmp, "msgpack")) {
+ ctx->format = FLB_OUT_FILE_FMT_MSGPACK;
+ ctx->delimiter = NULL;
+ ctx->label_delimiter = NULL;
+ }
+ else if (!strcasecmp(tmp, "template")) {
+ ctx->format = FLB_OUT_FILE_FMT_TEMPLATE;
+ }
+ else if (!strcasecmp(tmp, "out_file")) {
+ /* for explicit setting */
+ ctx->format = FLB_OUT_FILE_FMT_JSON;
+ }
+ else {
+ flb_plg_error(ctx->ins, "unknown format %s. abort.", tmp);
+ flb_free(ctx);
+ return -1;
+ }
+ }
+
+ tmp = flb_output_get_property("delimiter", ins);
+ ret_str = check_delimiter(tmp);
+ if (ret_str != NULL) {
+ ctx->delimiter = ret_str;
+ }
+
+ tmp = flb_output_get_property("label_delimiter", ins);
+ ret_str = check_delimiter(tmp);
+ if (ret_str != NULL) {
+ ctx->label_delimiter = ret_str;
+ }
+
+ /* Set the context */
+ flb_output_set_context(ins, ctx);
+
+ return 0;
+}
+
+static int csv_output(FILE *fp, int column_names,
+ struct flb_time *tm, msgpack_object *obj,
+ struct flb_file_conf *ctx)
+{
+ int i;
+ int map_size;
+ msgpack_object_kv *kv = NULL;
+
+ if (obj->type == MSGPACK_OBJECT_MAP && obj->via.map.size > 0) {
+ kv = obj->via.map.ptr;
+ map_size = obj->via.map.size;
+
+ if (column_names == FLB_TRUE) {
+ fprintf(fp, "timestamp%s", ctx->delimiter);
+ for (i = 0; i < map_size; i++) {
+ msgpack_object_print(fp, (kv+i)->key);
+ if (i + 1 < map_size) {
+ fprintf(fp, "%s", ctx->delimiter);
+ }
+ }
+ fprintf(fp, NEWLINE);
+ }
+
+ fprintf(fp, "%lld.%.09ld%s",
+ (long long) tm->tm.tv_sec, tm->tm.tv_nsec, ctx->delimiter);
+
+ for (i = 0; i < map_size - 1; i++) {
+ msgpack_object_print(fp, (kv+i)->val);
+ fprintf(fp, "%s", ctx->delimiter);
+ }
+
+ msgpack_object_print(fp, (kv+(map_size-1))->val);
+ fprintf(fp, NEWLINE);
+ }
+ return 0;
+}
+
+static int ltsv_output(FILE *fp, struct flb_time *tm, msgpack_object *obj,
+ struct flb_file_conf *ctx)
+{
+ msgpack_object_kv *kv = NULL;
+ int i;
+ int map_size;
+
+ if (obj->type == MSGPACK_OBJECT_MAP && obj->via.map.size > 0) {
+ kv = obj->via.map.ptr;
+ map_size = obj->via.map.size;
+ fprintf(fp, "\"time\"%s%f%s",
+ ctx->label_delimiter,
+ flb_time_to_double(tm),
+ ctx->delimiter);
+
+ for (i = 0; i < map_size - 1; i++) {
+ msgpack_object_print(fp, (kv+i)->key);
+ fprintf(fp, "%s", ctx->label_delimiter);
+ msgpack_object_print(fp, (kv+i)->val);
+ fprintf(fp, "%s", ctx->delimiter);
+ }
+
+ msgpack_object_print(fp, (kv+(map_size-1))->key);
+ fprintf(fp, "%s", ctx->label_delimiter);
+ msgpack_object_print(fp, (kv+(map_size-1))->val);
+ fprintf(fp, NEWLINE);
+ }
+ return 0;
+}
+
+static int template_output_write(struct flb_file_conf *ctx,
+ FILE *fp, struct flb_time *tm, msgpack_object *obj,
+ const char *key, int size)
+{
+ int i;
+ msgpack_object_kv *kv;
+
+ /*
+ * Right now we treat "{time}" specially and fill the placeholder
+ * with the metadata timestamp (formatted as float).
+ */
+ if (!strncmp(key, "time", size)) {
+ fprintf(fp, "%f", flb_time_to_double(tm));
+ return 0;
+ }
+
+ if (obj->type != MSGPACK_OBJECT_MAP) {
+ flb_plg_error(ctx->ins, "invalid object type (type=%i)", obj->type);
+ return -1;
+ }
+
+ for (i = 0; i < obj->via.map.size; i++) {
+ kv = obj->via.map.ptr + i;
+
+ if (size != kv->key.via.str.size) {
+ continue;
+ }
+
+ if (!memcmp(key, kv->key.via.str.ptr, size)) {
+ if (kv->val.type == MSGPACK_OBJECT_STR) {
+ fwrite(kv->val.via.str.ptr, 1, kv->val.via.str.size, fp);
+ }
+ else {
+ msgpack_object_print(fp, kv->val);
+ }
+ return 0;
+ }
+ }
+ return -1;
+}
+
+/*
+ * Python-like string templating for out_file.
+ *
+ * This accepts a format string like "my name is {name}" and fills
+ * placeholders using corresponding values in a record.
+ *
+ * e.g. {"name":"Tom"} => "my name is Tom"
+ */
+static int template_output(FILE *fp, struct flb_time *tm, msgpack_object *obj,
+ struct flb_file_conf *ctx)
+{
+ int i;
+ int len = strlen(ctx->template);
+ int keysize;
+ const char *key;
+ const char *pos;
+ const char *inbrace = NULL; /* points to the last open brace */
+
+ for (i = 0; i < len; i++) {
+ pos = ctx->template + i;
+ if (*pos == '{') {
+ if (inbrace) {
+ /*
+ * This means that we find another open brace inside
+ * braces (e.g. "{a{b}"). Ignore the previous one.
+ */
+ fwrite(inbrace, 1, pos - inbrace, fp);
+ }
+ inbrace = pos;
+ }
+ else if (*pos == '}' && inbrace) {
+ key = inbrace + 1;
+ keysize = pos - inbrace - 1;
+
+ if (template_output_write(ctx, fp, tm, obj, key, keysize)) {
+ fwrite(inbrace, 1, pos - inbrace + 1, fp);
+ }
+ inbrace = NULL;
+ }
+ else {
+ if (!inbrace) {
+ fputc(*pos, fp);
+ }
+ }
+ }
+
+ /* Handle an unclosed brace like "{abc" */
+ if (inbrace) {
+ fputs(inbrace, fp);
+ }
+ fputs(NEWLINE, fp);
+ return 0;
+}
+
+
+static int plain_output(FILE *fp, msgpack_object *obj, size_t alloc_size)
+{
+ char *buf;
+
+ buf = flb_msgpack_to_json_str(alloc_size, obj);
+ if (buf) {
+ fprintf(fp, "%s" NEWLINE,
+ buf);
+ flb_free(buf);
+ }
+ return 0;
+}
+
+static void print_metrics_text(struct flb_output_instance *ins,
+ FILE *fp,
+ const void *data, size_t bytes)
+{
+ int ret;
+ size_t off = 0;
+ cfl_sds_t text;
+ struct cmt *cmt = NULL;
+
+ /* get cmetrics context */
+ ret = cmt_decode_msgpack_create(&cmt, (char *) data, bytes, &off);
+ if (ret != 0) {
+ flb_plg_error(ins, "could not process metrics payload");
+ return;
+ }
+
+ /* convert to text representation */
+ text = cmt_encode_text_create(cmt);
+
+ /* destroy cmt context */
+ cmt_destroy(cmt);
+
+ fprintf(fp, "%s", text);
+ cmt_encode_text_destroy(text);
+}
+
+static int mkpath(struct flb_output_instance *ins, const char *dir)
+{
+ struct stat st;
+ char *dup_dir = NULL;
+#ifdef FLB_SYSTEM_MACOS
+ char *parent_dir = NULL;
+#endif
+
+ int ret;
+
+ if (!dir) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ if (strlen(dir) == 0) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ if (stat(dir, &st) == 0) {
+ if (S_ISDIR (st.st_mode)) {
+ return 0;
+ }
+ flb_plg_error(ins, "%s is not a directory", dir);
+ errno = ENOTDIR;
+ return -1;
+ }
+
+#ifdef FLB_SYSTEM_WINDOWS
+ char path[MAX_PATH];
+
+ if (_fullpath(path, dir, MAX_PATH) == NULL) {
+ return -1;
+ }
+
+ if (SHCreateDirectoryExA(NULL, path, NULL) != ERROR_SUCCESS) {
+ return -1;
+ }
+ return 0;
+#elif FLB_SYSTEM_MACOS
+ dup_dir = strdup(dir);
+ if (!dup_dir) {
+ return -1;
+ }
+
+ /* macOS's dirname(3) should return current directory when slash
+ * charachter is not included in passed string.
+ * And note that macOS's dirname(3) does not modify passed string.
+ */
+ parent_dir = dirname(dup_dir);
+ if (stat(parent_dir, &st) == 0 && strncmp(parent_dir, ".", 1)) {
+ if (S_ISDIR (st.st_mode)) {
+ flb_plg_debug(ins, "creating directory %s", dup_dir);
+ ret = mkdir(dup_dir, 0755);
+ free(dup_dir);
+ return ret;
+ }
+ }
+
+ ret = mkpath(ins, dirname(dup_dir));
+ if (ret != 0) {
+ free(dup_dir);
+ return ret;
+ }
+ flb_plg_debug(ins, "creating directory %s", dup_dir);
+ ret = mkdir(dup_dir, 0755);
+ free(dup_dir);
+ return ret;
+#else
+ dup_dir = strdup(dir);
+ if (!dup_dir) {
+ return -1;
+ }
+ ret = mkpath(ins, dirname(dup_dir));
+ free(dup_dir);
+ if (ret != 0) {
+ return ret;
+ }
+ flb_plg_debug(ins, "creating directory %s", dir);
+ return mkdir(dir, 0755);
+#endif
+}
+
+static void cb_file_flush(struct flb_event_chunk *event_chunk,
+ struct flb_output_flush *out_flush,
+ struct flb_input_instance *ins,
+ void *out_context,
+ struct flb_config *config)
+{
+ int ret;
+ int column_names;
+ FILE * fp;
+ size_t off = 0;
+ size_t last_off = 0;
+ size_t alloc_size = 0;
+ size_t total;
+ char out_file[PATH_MAX];
+ char *buf;
+ long file_pos;
+ struct flb_file_conf *ctx = out_context;
+ struct flb_log_event_decoder log_decoder;
+ struct flb_log_event log_event;
+ char* out_file_copy;
+
+ (void) config;
+
+ /* Set the right output file */
+ if (ctx->out_path) {
+ if (ctx->out_file) {
+ snprintf(out_file, PATH_MAX - 1, "%s/%s",
+ ctx->out_path, ctx->out_file);
+ }
+ else {
+ snprintf(out_file, PATH_MAX - 1, "%s/%s",
+ ctx->out_path, event_chunk->tag);
+ }
+ }
+ else {
+ if (ctx->out_file) {
+ snprintf(out_file, PATH_MAX - 1, "%s", ctx->out_file);
+ }
+ else {
+ snprintf(out_file, PATH_MAX - 1, "%s", event_chunk->tag);
+ }
+ }
+
+ /* Open output file with default name as the Tag */
+ fp = fopen(out_file, "ab+");
+ if (ctx->mkdir == FLB_TRUE && fp == NULL && errno == ENOENT) {
+ out_file_copy = strdup(out_file);
+ if (out_file_copy) {
+#ifdef FLB_SYSTEM_WINDOWS
+ PathRemoveFileSpecA(out_file_copy);
+ ret = mkpath(ctx->ins, out_file_copy);
+#else
+ ret = mkpath(ctx->ins, dirname(out_file_copy));
+#endif
+ free(out_file_copy);
+ if (ret == 0) {
+ fp = fopen(out_file, "ab+");
+ }
+ }
+ }
+ if (fp == NULL) {
+ flb_errno();
+ flb_plg_error(ctx->ins, "error opening: %s", out_file);
+ FLB_OUTPUT_RETURN(FLB_ERROR);
+ }
+
+ /*
+ * Get current file stream position, we gather this in case 'csv' format
+ * needs to write the column names.
+ */
+ file_pos = ftell(fp);
+
+ /* Check if the event type is metrics, handle the payload differently */
+ if (event_chunk->type == FLB_INPUT_METRICS) {
+ print_metrics_text(ctx->ins, fp,
+ event_chunk->data, event_chunk->size);
+ fclose(fp);
+ FLB_OUTPUT_RETURN(FLB_OK);
+ }
+
+ /*
+ * Msgpack output format used to create unit tests files, useful for
+ * Fluent Bit developers.
+ */
+ if (ctx->format == FLB_OUT_FILE_FMT_MSGPACK) {
+ off = 0;
+ total = 0;
+
+ do {
+ ret = fwrite((char *) event_chunk->data + off, 1,
+ event_chunk->size - off, fp);
+ if (ret < 0) {
+ flb_errno();
+ fclose(fp);
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+ total += ret;
+ } while (total < event_chunk->size);
+
+ fclose(fp);
+ FLB_OUTPUT_RETURN(FLB_OK);
+ }
+
+ ret = flb_log_event_decoder_init(&log_decoder,
+ (char *) event_chunk->data,
+ event_chunk->size);
+
+ if (ret != FLB_EVENT_DECODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event decoder initialization error : %d", ret);
+
+ fclose(fp);
+ FLB_OUTPUT_RETURN(FLB_ERROR);
+ }
+
+ /*
+ * Upon flush, for each array, lookup the time and the first field
+ * of the map to use as a data point.
+ */
+ while ((ret = flb_log_event_decoder_next(
+ &log_decoder,
+ &log_event)) == FLB_EVENT_DECODER_SUCCESS) {
+ alloc_size = (off - last_off) + 128; /* JSON is larger than msgpack */
+ last_off = off;
+
+ switch (ctx->format){
+ case FLB_OUT_FILE_FMT_JSON:
+ buf = flb_msgpack_to_json_str(alloc_size, log_event.body);
+ if (buf) {
+ fprintf(fp, "%s: [%"PRIu64".%09lu, %s]" NEWLINE,
+ event_chunk->tag,
+ log_event.timestamp.tm.tv_sec, log_event.timestamp.tm.tv_nsec,
+ buf);
+ flb_free(buf);
+ }
+ else {
+ flb_log_event_decoder_destroy(&log_decoder);
+ fclose(fp);
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+ break;
+ case FLB_OUT_FILE_FMT_CSV:
+ if (ctx->csv_column_names == FLB_TRUE && file_pos == 0) {
+ column_names = FLB_TRUE;
+ file_pos = 1;
+ }
+ else {
+ column_names = FLB_FALSE;
+ }
+ csv_output(fp, column_names,
+ &log_event.timestamp,
+ log_event.body, ctx);
+ break;
+ case FLB_OUT_FILE_FMT_LTSV:
+ ltsv_output(fp,
+ &log_event.timestamp,
+ log_event.body, ctx);
+ break;
+ case FLB_OUT_FILE_FMT_PLAIN:
+ plain_output(fp, log_event.body, alloc_size);
+
+ break;
+ case FLB_OUT_FILE_FMT_TEMPLATE:
+ template_output(fp,
+ &log_event.timestamp,
+ log_event.body, ctx);
+
+ break;
+ }
+ }
+
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ fclose(fp);
+
+ FLB_OUTPUT_RETURN(FLB_OK);
+}
+
+static int cb_file_exit(void *data, struct flb_config *config)
+{
+ struct flb_file_conf *ctx = data;
+
+ if (!ctx) {
+ return 0;
+ }
+
+ flb_free(ctx);
+ return 0;
+}
+
+/* Configuration properties map */
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_STR, "path", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_file_conf, out_path),
+ "Absolute path to store the files. This parameter is optional"
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "file", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_file_conf, out_file),
+ "Name of the target file to write the records. If 'path' is specified, "
+ "the value is prefixed"
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "format", NULL,
+ 0, FLB_FALSE, 0,
+ "Specify the output data format, the available options are: plain (json), "
+ "csv, ltsv and template. If no value is set the outgoing data is formatted "
+ "using the tag and the record in json"
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "delimiter", NULL,
+ 0, FLB_FALSE, 0,
+ "Set a custom delimiter for the records"
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "label_delimiter", NULL,
+ 0, FLB_FALSE, 0,
+ "Set a custom label delimiter, to be used with 'ltsv' format"
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "template", "{time} {message}",
+ 0, FLB_TRUE, offsetof(struct flb_file_conf, template),
+ "Set a custom template format for the data"
+ },
+
+ {
+ FLB_CONFIG_MAP_BOOL, "csv_column_names", "false",
+ 0, FLB_TRUE, offsetof(struct flb_file_conf, csv_column_names),
+ "Add column names (keys) in the first line of the target file"
+ },
+
+ {
+ FLB_CONFIG_MAP_BOOL, "mkdir", "false",
+ 0, FLB_TRUE, offsetof(struct flb_file_conf, mkdir),
+ "Recursively create output directory if it does not exist. Permissions set to 0755"
+ },
+
+ /* EOF */
+ {0}
+};
+
+struct flb_output_plugin out_file_plugin = {
+ .name = "file",
+ .description = "Generate log file",
+ .cb_init = cb_file_init,
+ .cb_flush = cb_file_flush,
+ .cb_exit = cb_file_exit,
+ .flags = 0,
+ .workers = 1,
+ .event_type = FLB_OUTPUT_LOGS | FLB_OUTPUT_METRICS,
+ .config_map = config_map,
+};
diff --git a/src/fluent-bit/plugins/out_file/file.h b/src/fluent-bit/plugins/out_file/file.h
new file mode 100644
index 000000000..c04e51b5c
--- /dev/null
+++ b/src/fluent-bit/plugins/out_file/file.h
@@ -0,0 +1,32 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_OUT_FILE
+#define FLB_OUT_FILE
+
+enum {
+ FLB_OUT_FILE_FMT_JSON,
+ FLB_OUT_FILE_FMT_CSV,
+ FLB_OUT_FILE_FMT_LTSV,
+ FLB_OUT_FILE_FMT_PLAIN,
+ FLB_OUT_FILE_FMT_MSGPACK,
+ FLB_OUT_FILE_FMT_TEMPLATE,
+};
+
+#endif
diff --git a/src/fluent-bit/plugins/out_flowcounter/CMakeLists.txt b/src/fluent-bit/plugins/out_flowcounter/CMakeLists.txt
new file mode 100644
index 000000000..7699b196a
--- /dev/null
+++ b/src/fluent-bit/plugins/out_flowcounter/CMakeLists.txt
@@ -0,0 +1,4 @@
+set(src
+ out_flowcounter.c)
+
+FLB_PLUGIN(out_flowcounter "${src}" "")
diff --git a/src/fluent-bit/plugins/out_flowcounter/out_flowcounter.c b/src/fluent-bit/plugins/out_flowcounter/out_flowcounter.c
new file mode 100644
index 000000000..1ed14636f
--- /dev/null
+++ b/src/fluent-bit/plugins/out_flowcounter/out_flowcounter.c
@@ -0,0 +1,297 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_log_event_decoder.h>
+
+#include <msgpack.h>
+
+#include "out_flowcounter.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <inttypes.h>
+#include <time.h>
+
+
+#define PLUGIN_NAME "out_flowcounter"
+
+static void count_initialized(struct flb_out_fcount_buffer* buf)
+{
+ buf->bytes = 0;
+ buf->counts = 0;
+}
+
+static int time_is_valid(time_t t, struct flb_flowcounter *ctx)
+{
+ if (t < ctx->buf[ctx->index].until - ctx->tick) {
+ return FLB_FALSE;
+ }
+ return FLB_TRUE;
+}
+
+static int configure(struct flb_flowcounter *ctx,
+ struct flb_output_instance *ins,
+ struct flb_config *config)
+{
+ int i;
+ time_t base = time(NULL);
+ const char* pval = NULL;
+
+ /* default */
+ ctx->unit = FLB_UNIT_MIN;
+ ctx->tick = 60;
+
+ pval = flb_output_get_property("unit", ins);
+ if (pval != NULL) {
+ /* check unit of duration */
+ if (!strcasecmp(pval, FLB_UNIT_SEC)) {
+ ctx->unit = FLB_UNIT_SEC;
+ ctx->tick = 1;
+ }
+ else if (!strcasecmp(pval, FLB_UNIT_HOUR)) {
+ ctx->unit = FLB_UNIT_HOUR;
+ ctx->tick = 3600;
+ }
+ else if(!strcasecmp(pval, FLB_UNIT_DAY)) {
+ ctx->unit = FLB_UNIT_DAY;
+ ctx->tick = 86400;
+ }
+ }
+
+ flb_plg_debug(ctx->ins, "unit is \"%s\"", ctx->unit);
+
+ /* initialize buffer */
+ ctx->size = (config->flush / ctx->tick) + 1;
+ flb_plg_debug(ctx->ins, "buffer size=%d", ctx->size);
+
+ ctx->index = 0;
+ ctx->buf = flb_malloc(sizeof(struct flb_out_fcount_buffer) * ctx->size);
+ if (!ctx->buf) {
+ flb_errno();
+ return -1;
+ }
+
+ for (i = 0; i < ctx->size; i++) {
+ ctx->buf[i].until = base + ctx->tick*i;
+ count_initialized(&ctx->buf[i]);
+ }
+
+ return 0;
+}
+
+static void output_fcount(FILE *f, struct flb_flowcounter *ctx,
+ struct flb_out_fcount_buffer *buf)
+{
+ fprintf(f,
+ "[%s] [%lu, {"
+ "\"counts\":%"PRIu64", "
+ "\"bytes\":%"PRIu64", "
+ "\"counts/%s\":%"PRIu64", "
+ "\"bytes/%s\":%"PRIu64" }"
+ "]\n",
+ PLUGIN_NAME, buf->until,
+ buf->counts,
+ buf->bytes,
+ ctx->unit, buf->counts/ctx->tick,
+ ctx->unit, buf->bytes/ctx->tick);
+ /* TODO filtering with tag? */
+}
+
+static void count_up(struct flb_log_event *log_event,
+ struct flb_out_fcount_buffer *ctx, uint64_t size)
+{
+ ctx->counts++;
+ ctx->bytes += size;
+
+ /*TODO extract specific data from log_event */
+}
+
+static int out_fcount_init(struct flb_output_instance *ins, struct flb_config *config,
+ void *data)
+{
+ int ret;
+ (void) data;
+
+ struct flb_flowcounter *ctx = NULL;
+
+ ctx = flb_malloc(sizeof(struct flb_flowcounter));
+ if (ctx == NULL) {
+ flb_errno();
+ return -1;
+ }
+ ctx->ins = ins;
+
+ ret = flb_output_config_map_set(ins, (void *) ctx);
+ if (ret == -1) {
+ flb_free(ctx);
+ return -1;
+ }
+
+ ret = configure(ctx, ins, config);
+ if (ret < 0) {
+ flb_free(ctx);
+ return -1;
+ }
+
+ flb_output_set_context(ins, ctx);
+
+ return 0;
+}
+
+static struct flb_out_fcount_buffer* seek_buffer(time_t t,
+ struct flb_flowcounter *ctx)
+{
+ int i = ctx->index;
+ int32_t diff;
+
+ while (1) {
+ diff = (int32_t) difftime(ctx->buf[i].until, t);
+ if (diff >= 0 && diff <= ctx->tick) {
+ return &ctx->buf[i];
+ }
+ i++;
+
+ if (i >= ctx->size) {
+ i = 0;
+ }
+
+ if (i == ctx->index) {
+ break;
+ }
+ }
+ return NULL;
+}
+
+
+
+static void out_fcount_flush(struct flb_event_chunk *event_chunk,
+ struct flb_output_flush *out_flush,
+ struct flb_input_instance *i_ins,
+ void *out_context,
+ struct flb_config *config)
+{
+ struct flb_flowcounter *ctx = out_context;
+ struct flb_out_fcount_buffer *buf = NULL;
+ size_t off = 0;
+ time_t t;
+ uint64_t last_off = 0;
+ uint64_t byte_data = 0;
+ struct flb_time tm;
+ struct flb_log_event_decoder log_decoder;
+ struct flb_log_event log_event;
+ int ret;
+
+ (void) i_ins;
+ (void) config;
+
+ ret = flb_log_event_decoder_init(&log_decoder,
+ (char *) event_chunk->data,
+ event_chunk->size);
+
+ if (ret != FLB_EVENT_DECODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event decoder initialization error : %d", ret);
+
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+ while ((ret = flb_log_event_decoder_next(
+ &log_decoder,
+ &log_event)) == FLB_EVENT_DECODER_SUCCESS) {
+ if (ctx->event_based) {
+ flb_time_copy(&tm, &log_event.timestamp);
+ }
+ else {
+ flb_time_get(&tm);
+ }
+ t = tm.tm.tv_sec;
+ if (time_is_valid(t, ctx) == FLB_FALSE) {
+ flb_plg_warn(ctx->ins, "out of range. Skip the record");
+ continue;
+ }
+
+ byte_data = (uint64_t)(off - last_off);
+ last_off = off;
+
+ buf = seek_buffer(t, ctx);
+
+ while (buf == NULL) {
+ /* flush buffer */
+ output_fcount(stdout, ctx, &ctx->buf[ctx->index]);
+ count_initialized(&ctx->buf[ctx->index]);
+ ctx->buf[ctx->index].until += ctx->tick * ctx->size;
+
+ ctx->index++;
+ if (ctx->index >= ctx->size) {
+ ctx->index = 0;
+ }
+ buf = seek_buffer(t, ctx);
+ }
+
+ if (buf != NULL) {
+ count_up(&log_event, buf, byte_data);
+ }
+ }
+
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ FLB_OUTPUT_RETURN(FLB_OK);
+}
+
+static int out_fcount_exit(void *data, struct flb_config* config)
+{
+ struct flb_flowcounter *ctx = data;
+
+ if (!ctx) {
+ return 0;
+ }
+
+ flb_free(ctx->buf);
+ flb_free(ctx);
+ return 0;
+}
+
+/* Configuration properties map */
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_STR, "unit", NULL,
+ 0, FLB_FALSE, 0,
+ NULL
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "event_based", "false",
+ 0, FLB_TRUE, offsetof(struct flb_flowcounter, event_based),
+ NULL
+ },
+
+ /* EOF */
+ {0}
+};
+
+struct flb_output_plugin out_flowcounter_plugin = {
+ .name = "flowcounter",
+ .description = "FlowCounter",
+ .cb_init = out_fcount_init,
+ .cb_flush = out_fcount_flush,
+ .cb_exit = out_fcount_exit,
+ .config_map = config_map,
+ .flags = 0,
+};
diff --git a/src/fluent-bit/plugins/out_flowcounter/out_flowcounter.h b/src/fluent-bit/plugins/out_flowcounter/out_flowcounter.h
new file mode 100644
index 000000000..de714cf9e
--- /dev/null
+++ b/src/fluent-bit/plugins/out_flowcounter/out_flowcounter.h
@@ -0,0 +1,49 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_OUT_FLOWCOUNTER
+#define FLB_OUT_FLOWCOUNTER
+
+#include <fluent-bit/flb_output.h>
+#include <stdint.h>
+
+#define FLB_UNIT_SEC "second"
+#define FLB_UNIT_MIN "minute"
+#define FLB_UNIT_HOUR "hour"
+#define FLB_UNIT_DAY "day"
+
+struct flb_out_fcount_buffer {
+ time_t until;
+ uint64_t counts;
+ uint64_t bytes;
+};
+
+struct flb_flowcounter {
+ char *unit;
+ int32_t tick;
+ int event_based;
+
+ struct flb_out_fcount_buffer *buf;
+ int index;
+ int size;
+
+ struct flb_output_instance *ins;
+};
+
+#endif
diff --git a/src/fluent-bit/plugins/out_forward/CMakeLists.txt b/src/fluent-bit/plugins/out_forward/CMakeLists.txt
new file mode 100644
index 000000000..fd639c2eb
--- /dev/null
+++ b/src/fluent-bit/plugins/out_forward/CMakeLists.txt
@@ -0,0 +1,6 @@
+set(src
+ forward.c
+ forward_format.c
+ )
+
+FLB_PLUGIN(out_forward "${src}" "")
diff --git a/src/fluent-bit/plugins/out_forward/README.md b/src/fluent-bit/plugins/out_forward/README.md
new file mode 100644
index 000000000..f08b45619
--- /dev/null
+++ b/src/fluent-bit/plugins/out_forward/README.md
@@ -0,0 +1,12 @@
+# Fluentd Forward Protocol Implementation
+
+This plugin is based in Fluentd Forward Protocol Spec v1 available here:
+
+- https://github.com/fluent/fluentd/wiki/Forward-Protocol-Specification-v1
+
+The following Event modes are implemented:
+
+- Message Mode
+- Forward Mode
+
+Depending of the configuration, the plugin will decide to go with Message Mode or Forward Mode.
diff --git a/src/fluent-bit/plugins/out_forward/forward.c b/src/fluent-bit/plugins/out_forward/forward.c
new file mode 100644
index 000000000..8cc2ca2cc
--- /dev/null
+++ b/src/fluent-bit/plugins/out_forward/forward.c
@@ -0,0 +1,1832 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_network.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_upstream.h>
+#include <fluent-bit/flb_upstream_ha.h>
+#include <fluent-bit/flb_hash.h>
+#include <fluent-bit/flb_crypto.h>
+#include <fluent-bit/flb_config_map.h>
+#include <fluent-bit/flb_random.h>
+#include <fluent-bit/flb_gzip.h>
+#include <fluent-bit/flb_log_event.h>
+#include <msgpack.h>
+
+#include "forward.h"
+#include "forward_format.h"
+
+#ifdef FLB_HAVE_UNIX_SOCKET
+#include <sys/socket.h>
+#include <sys/un.h>
+#endif
+
+#define SECURED_BY "Fluent Bit"
+
+pthread_once_t uds_connection_tls_slot_init_once_control = PTHREAD_ONCE_INIT;
+FLB_TLS_DEFINE(struct flb_forward_uds_connection, uds_connection);
+
+void initialize_uds_connection_tls_slot()
+{
+ FLB_TLS_INIT(uds_connection);
+}
+
+#ifdef FLB_HAVE_UNIX_SOCKET
+static flb_sockfd_t forward_unix_connect(struct flb_forward_config *config,
+ struct flb_forward *ctx)
+{
+ flb_sockfd_t fd = -1;
+ struct sockaddr_un address;
+
+ if (sizeof(address.sun_path) <= flb_sds_len(config->unix_path)) {
+ flb_plg_error(ctx->ins, "unix_path is too long");
+ return -1;
+ }
+
+ memset(&address, 0, sizeof(struct sockaddr_un));
+
+ fd = flb_net_socket_create(AF_UNIX, FLB_FALSE);
+ if (fd < 0) {
+ flb_plg_error(ctx->ins, "flb_net_socket_create error");
+ return -1;
+ }
+
+ address.sun_family = AF_UNIX;
+ strncpy(address.sun_path, config->unix_path, flb_sds_len(config->unix_path));
+
+ if(connect(fd, (const struct sockaddr*) &address, sizeof(address)) < 0) {
+ flb_errno();
+ close(fd);
+
+ return -1;
+ }
+
+ return fd;
+}
+
+static flb_sockfd_t forward_uds_get_conn(struct flb_forward_config *config,
+ struct flb_forward *ctx)
+{
+ struct flb_forward_uds_connection *connection_entry;
+ flb_sockfd_t connection;
+
+ connection_entry = FLB_TLS_GET(uds_connection);
+
+ /* We need to allow the code to try to get the value from the TLS
+ * regardless of if it's provided with a config and context because
+ * when we establish the connection we do have both of them but those
+ * are not passed along to the functions in charge of doing IO.
+ */
+
+ if (connection_entry == NULL) {
+ if (config == NULL ||
+ ctx == NULL) {
+ return -1;
+ }
+
+ connection_entry = flb_calloc(1, sizeof(struct flb_forward_uds_connection));
+
+ if (connection_entry == NULL) {
+ flb_errno();
+
+ return -1;
+ }
+
+ connection = forward_unix_connect(config, ctx);
+
+ if (connection == -1) {
+ flb_free(connection_entry);
+
+ return -1;
+ }
+
+ connection_entry->descriptor = connection;
+
+ pthread_mutex_lock(&ctx->uds_connection_list_mutex);
+
+ cfl_list_add(&connection_entry->_head, &ctx->uds_connection_list);
+
+ pthread_mutex_unlock(&ctx->uds_connection_list_mutex);
+
+ FLB_TLS_SET(uds_connection, connection_entry);
+ }
+
+ return connection_entry->descriptor;
+}
+
+static void forward_uds_drop_conn(struct flb_forward *ctx,
+ flb_sockfd_t connection)
+{
+ struct flb_forward_uds_connection *connection_entry;
+
+ if (ctx != NULL) {
+ connection_entry = FLB_TLS_GET(uds_connection);
+
+ if (connection_entry != NULL) {
+ pthread_mutex_lock(&ctx->uds_connection_list_mutex);
+
+ if (connection == connection_entry->descriptor) {
+ close(connection);
+
+ if (!cfl_list_entry_is_orphan(&connection_entry->_head)) {
+ cfl_list_del(&connection_entry->_head);
+ }
+
+ free(connection_entry);
+
+ FLB_TLS_SET(uds_connection, NULL);
+ }
+
+ pthread_mutex_unlock(&ctx->uds_connection_list_mutex);
+ }
+ }
+}
+
+static void forward_uds_drop_all(struct flb_forward *ctx)
+{
+ struct flb_forward_uds_connection *connection_entry;
+ struct cfl_list *head;
+ struct cfl_list *tmp;
+
+ if (ctx != NULL) {
+ pthread_mutex_lock(&ctx->uds_connection_list_mutex);
+
+ cfl_list_foreach_safe(head, tmp, &ctx->uds_connection_list) {
+ connection_entry = cfl_list_entry(head,
+ struct flb_forward_uds_connection,
+ _head);
+
+ if (connection_entry->descriptor != -1) {
+ close(connection_entry->descriptor);
+
+ connection_entry->descriptor = -1;
+ }
+
+ if (!cfl_list_entry_is_orphan(&connection_entry->_head)) {
+ cfl_list_del(&connection_entry->_head);
+ }
+
+ free(connection_entry);
+ }
+
+ pthread_mutex_unlock(&ctx->uds_connection_list_mutex);
+ }
+}
+
+/* In these functions forward_uds_get_conn
+ * should not return -1 because it should have been
+ * called earlier with a proper context and it should
+ * have saved a file descriptor to the TLS.
+ */
+
+static int io_unix_write(struct flb_connection *unused, int deprecated_fd, const void* data,
+ size_t len, size_t *out_len)
+{
+ flb_sockfd_t uds_conn;
+
+ uds_conn = forward_uds_get_conn(NULL, NULL);
+
+ return flb_io_fd_write(uds_conn, data, len, out_len);
+}
+
+static int io_unix_read(struct flb_connection *unused, int deprecated_fd, void* buf,size_t len)
+{
+ flb_sockfd_t uds_conn;
+
+ uds_conn = forward_uds_get_conn(NULL, NULL);
+
+ return flb_io_fd_read(uds_conn, buf, len);
+}
+
+#else
+
+static flb_sockfd_t forward_uds_get_conn(struct flb_forward_config *config,
+ struct flb_forward *ctx)
+{
+ (void) config;
+ (void) ctx;
+
+ return -1;
+}
+
+static void forward_uds_drop_conn(struct flb_forward *ctx,
+ flb_sockfd_t connection)
+{
+ (void) ctx;
+ (void) connection;
+}
+
+static void forward_uds_drop_all(struct flb_forward *ctx)
+{
+ (void) ctx;
+}
+
+#endif
+
+#ifdef FLB_HAVE_TLS
+
+static int io_net_write(struct flb_connection *conn, int unused_fd,
+ const void* data, size_t len, size_t *out_len)
+{
+ return flb_io_net_write(conn, data, len, out_len);
+}
+
+static int io_net_read(struct flb_connection *conn, int unused_fd,
+ void* buf, size_t len)
+{
+ return flb_io_net_read(conn, buf, len);
+}
+
+static int secure_forward_init(struct flb_forward *ctx,
+ struct flb_forward_config *fc)
+{
+ return 0;
+}
+
+#endif
+
+static inline void print_msgpack_status(struct flb_forward *ctx,
+ int ret, char *context)
+{
+ switch (ret) {
+ case MSGPACK_UNPACK_EXTRA_BYTES:
+ flb_plg_error(ctx->ins, "%s MSGPACK_UNPACK_EXTRA_BYTES", context);
+ break;
+ case MSGPACK_UNPACK_CONTINUE:
+ flb_plg_trace(ctx->ins, "%s MSGPACK_UNPACK_CONTINUE", context);
+ break;
+ case MSGPACK_UNPACK_PARSE_ERROR:
+ flb_plg_error(ctx->ins, "%s MSGPACK_UNPACK_PARSE_ERROR", context);
+ break;
+ case MSGPACK_UNPACK_NOMEM_ERROR:
+ flb_plg_error(ctx->ins, "%s MSGPACK_UNPACK_NOMEM_ERROR", context);
+ break;
+ }
+}
+
+/* Read a secure forward msgpack message */
+static int secure_forward_read(struct flb_forward *ctx,
+ struct flb_connection *u_conn,
+ struct flb_forward_config *fc,
+ char *buf, size_t size, size_t *out_len)
+{
+ int ret;
+ size_t off;
+ size_t avail;
+ size_t buf_off = 0;
+ msgpack_unpacked result;
+
+ msgpack_unpacked_init(&result);
+ while (1) {
+ avail = size - buf_off;
+ if (avail < 1) {
+ goto error;
+ }
+
+ /* Read the message */
+ ret = fc->io_read(u_conn, fc->unix_fd, buf + buf_off, size - buf_off);
+ if (ret <= 0) {
+ goto error;
+ }
+ buf_off += ret;
+
+ /* Validate */
+ off = 0;
+ ret = msgpack_unpack_next(&result, buf, buf_off, &off);
+ switch (ret) {
+ case MSGPACK_UNPACK_SUCCESS:
+ msgpack_unpacked_destroy(&result);
+ *out_len = buf_off;
+ return 0;
+ default:
+ print_msgpack_status(ctx, ret, "handshake");
+ goto error;
+ };
+ }
+
+ error:
+ msgpack_unpacked_destroy(&result);
+ return -1;
+}
+
+static void secure_forward_set_ping(struct flb_forward_ping *ping,
+ msgpack_object *map)
+{
+ int i;
+ msgpack_object key;
+ msgpack_object val;
+ const char *ptr;
+ int len;
+
+ memset(ping, 0, sizeof(struct flb_forward_ping));
+ ping->keepalive = 1; /* default, as per spec */
+
+ for (i = 0; i < map->via.map.size; i++) {
+ key = map->via.map.ptr[i].key;
+ val = map->via.map.ptr[i].val;
+
+ ptr = key.via.str.ptr;
+ len = key.via.str.size;
+
+ if (len == 5 && memcmp(ptr, "nonce", len) == 0) {
+ ping->nonce = val.via.bin.ptr;
+ ping->nonce_len = val.via.bin.size;
+ }
+ else if (len == 4 && memcmp(ptr, "auth", len) == 0) {
+ ping->auth = val.via.bin.ptr;
+ ping->auth_len = val.via.bin.size;
+ }
+ else if (len == 9 && memcmp(ptr, "keepalive", len) == 0) {
+ ping->keepalive = val.via.boolean;
+ }
+ }
+}
+
+static int secure_forward_hash_shared_key(struct flb_forward_config *fc,
+ struct flb_forward_ping *ping,
+ char *buf, int buflen)
+{
+ size_t length_entries[4];
+ unsigned char *data_entries[4];
+ uint8_t hash[64];
+ int result;
+
+ if (buflen < 128) {
+ return -1;
+ }
+
+ data_entries[0] = (unsigned char *) fc->shared_key_salt;
+ length_entries[0] = 16;
+
+ data_entries[1] = (unsigned char *) fc->self_hostname;
+ length_entries[1] = strlen(fc->self_hostname);
+
+ data_entries[2] = (unsigned char *) ping->nonce;
+ length_entries[2] = ping->nonce_len;
+
+ data_entries[3] = (unsigned char *) fc->shared_key;
+ length_entries[3] = strlen(fc->shared_key);
+
+ result = flb_hash_simple_batch(FLB_HASH_SHA512,
+ 4,
+ data_entries,
+ length_entries,
+ hash,
+ sizeof(hash));
+
+ if (result != FLB_CRYPTO_SUCCESS) {
+ return -1;
+ }
+
+ flb_forward_format_bin_to_hex(hash, 64, buf);
+
+ return 0;
+}
+
+static int secure_forward_hash_password(struct flb_forward_config *fc,
+ struct flb_forward_ping *ping,
+ char *buf, int buflen)
+{
+ size_t length_entries[3];
+ unsigned char *data_entries[3];
+ uint8_t hash[64];
+ int result;
+
+ if (buflen < 128) {
+ return -1;
+ }
+
+ data_entries[0] = (unsigned char *) ping->auth;
+ length_entries[0] = ping->auth_len;
+
+ data_entries[1] = (unsigned char *) fc->username;
+ length_entries[1] = strlen(fc->username);
+
+ data_entries[2] = (unsigned char *) fc->password;
+ length_entries[2] = strlen(fc->password);
+
+ result = flb_hash_simple_batch(FLB_HASH_SHA512,
+ 3,
+ data_entries,
+ length_entries,
+ hash,
+ sizeof(hash));
+
+ if (result != FLB_CRYPTO_SUCCESS) {
+ return -1;
+ }
+
+ flb_forward_format_bin_to_hex(hash, 64, buf);
+
+ return 0;
+}
+
+static int secure_forward_ping(struct flb_connection *u_conn,
+ msgpack_object map,
+ struct flb_forward_config *fc,
+ struct flb_forward *ctx)
+{
+ int ret;
+ size_t bytes_sent;
+ char shared_key_hexdigest[128];
+ char password_hexdigest[128];
+ msgpack_sbuffer mp_sbuf;
+ msgpack_packer mp_pck;
+ struct flb_forward_ping ping;
+
+ secure_forward_set_ping(&ping, &map);
+
+ if (ping.nonce == NULL) {
+ flb_plg_error(ctx->ins, "nonce not found");
+ return -1;
+ }
+
+ if (secure_forward_hash_shared_key(fc, &ping, shared_key_hexdigest, 128)) {
+ flb_plg_error(ctx->ins, "failed to hash shared_key");
+ return -1;
+ }
+
+ if (ping.auth != NULL) {
+ if (secure_forward_hash_password(fc, &ping, password_hexdigest, 128)) {
+ flb_plg_error(ctx->ins, "failed to hash password");
+ return -1;
+ }
+ }
+
+ /* Prepare outgoing msgpack PING */
+ msgpack_sbuffer_init(&mp_sbuf);
+ msgpack_packer_init(&mp_pck, &mp_sbuf, msgpack_sbuffer_write);
+ msgpack_pack_array(&mp_pck, 6);
+
+ /* [0] PING */
+ msgpack_pack_str(&mp_pck, 4);
+ msgpack_pack_str_body(&mp_pck, "PING", 4);
+
+ /* [1] Hostname */
+ msgpack_pack_str(&mp_pck, flb_sds_len(fc->self_hostname));
+ msgpack_pack_str_body(&mp_pck, fc->self_hostname,
+ flb_sds_len(fc->self_hostname));
+
+ /* [2] Shared key salt */
+ msgpack_pack_str(&mp_pck, 16);
+ msgpack_pack_str_body(&mp_pck, fc->shared_key_salt, 16);
+
+ /* [3] Shared key in Hexdigest format */
+ msgpack_pack_str(&mp_pck, 128);
+ msgpack_pack_str_body(&mp_pck, shared_key_hexdigest, 128);
+
+ /* [4] Username and password (optional) */
+ if (ping.auth != NULL) {
+ msgpack_pack_str(&mp_pck, strlen(fc->username));
+ msgpack_pack_str_body(&mp_pck, fc->username, strlen(fc->username));
+ msgpack_pack_str(&mp_pck, 128);
+ msgpack_pack_str_body(&mp_pck, password_hexdigest, 128);
+ }
+ else {
+ msgpack_pack_str(&mp_pck, 0);
+ msgpack_pack_str_body(&mp_pck, "", 0);
+ msgpack_pack_str(&mp_pck, 0);
+ msgpack_pack_str_body(&mp_pck, "", 0);
+ }
+
+ ret = fc->io_write(u_conn, fc->unix_fd, mp_sbuf.data, mp_sbuf.size, &bytes_sent);
+ flb_plg_debug(ctx->ins, "PING sent: ret=%i bytes sent=%lu", ret, bytes_sent);
+
+ msgpack_sbuffer_destroy(&mp_sbuf);
+
+ if (ret > -1 && bytes_sent > 0) {
+ return 0;
+ }
+
+ return -1;
+}
+
+static int secure_forward_pong(struct flb_forward *ctx, char *buf, int buf_size)
+{
+ int ret;
+ char msg[32] = {0};
+ size_t off = 0;
+ msgpack_unpacked result;
+ msgpack_object root;
+ msgpack_object o;
+
+ msgpack_unpacked_init(&result);
+ ret = msgpack_unpack_next(&result, buf, buf_size, &off);
+ if (ret != MSGPACK_UNPACK_SUCCESS) {
+ return -1;
+ }
+
+ root = result.data;
+ if (root.type != MSGPACK_OBJECT_ARRAY) {
+ goto error;
+ }
+
+ if (root.via.array.size < 4) {
+ goto error;
+ }
+
+ o = root.via.array.ptr[0];
+ if (o.type != MSGPACK_OBJECT_STR) {
+ goto error;
+ }
+
+ if (strncmp(o.via.str.ptr, "PONG", 4) != 0 || o.via.str.size != 4) {
+ goto error;
+ }
+
+ o = root.via.array.ptr[1];
+ if (o.type != MSGPACK_OBJECT_BOOLEAN) {
+ goto error;
+ }
+
+ if (o.via.boolean) {
+ msgpack_unpacked_destroy(&result);
+ return 0;
+ }
+ else {
+ o = root.via.array.ptr[2];
+ memcpy(msg, o.via.str.ptr, o.via.str.size);
+ flb_plg_error(ctx->ins, "failed authorization: %s", msg);
+ }
+
+ error:
+ msgpack_unpacked_destroy(&result);
+ return -1;
+}
+
+static int secure_forward_handshake(struct flb_connection *u_conn,
+ struct flb_forward_config *fc,
+ struct flb_forward *ctx)
+{
+ int ret;
+ char buf[1024];
+ size_t out_len;
+ size_t off;
+ msgpack_unpacked result;
+ msgpack_object root;
+ msgpack_object o;
+
+ /* Wait for server HELO */
+ ret = secure_forward_read(ctx, u_conn, fc, buf, sizeof(buf) - 1, &out_len);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "handshake error expecting HELO");
+ return -1;
+ }
+
+ /* Unpack message and validate */
+ off = 0;
+ msgpack_unpacked_init(&result);
+ ret = msgpack_unpack_next(&result, buf, out_len, &off);
+ if (ret != MSGPACK_UNPACK_SUCCESS) {
+ print_msgpack_status(ctx, ret, "HELO");
+ return -1;
+ }
+
+ /* Parse HELO message */
+ root = result.data;
+ if (root.via.array.size < 2) {
+ flb_plg_error(ctx->ins, "Invalid HELO message");
+ msgpack_unpacked_destroy(&result);
+ return -1;
+ }
+
+ o = root.via.array.ptr[0];
+ if (o.type != MSGPACK_OBJECT_STR) {
+ flb_plg_error(ctx->ins, "Invalid HELO type message");
+ msgpack_unpacked_destroy(&result);
+ return -1;
+ }
+
+ if (strncmp(o.via.str.ptr, "HELO", 4) != 0 || o.via.str.size != 4) {
+ flb_plg_error(ctx->ins, "Invalid HELO content message");
+ msgpack_unpacked_destroy(&result);
+ return -1;
+ }
+
+ flb_plg_debug(ctx->ins, "protocol: received HELO");
+
+ /* Compose and send PING message */
+ o = root.via.array.ptr[1];
+ ret = secure_forward_ping(u_conn, o, fc, ctx);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "Failed PING");
+ msgpack_unpacked_destroy(&result);
+ return -1;
+ }
+
+ /* Expect a PONG */
+ ret = secure_forward_read(ctx, u_conn, fc, buf, sizeof(buf) - 1, &out_len);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "handshake error expecting HELO");
+ msgpack_unpacked_destroy(&result);
+ return -1;
+ }
+
+ /* Process PONG */
+ ret = secure_forward_pong(ctx, buf, out_len);
+ if (ret == -1) {
+ msgpack_unpacked_destroy(&result);
+ return -1;
+ }
+
+ msgpack_unpacked_destroy(&result);
+ return 0;
+}
+
+static int forward_read_ack(struct flb_forward *ctx,
+ struct flb_forward_config *fc,
+ struct flb_connection *u_conn,
+ char *chunk, int chunk_len)
+{
+ int ret;
+ int i;
+ size_t out_len;
+ size_t off;
+ const char *ack;
+ size_t ack_len;
+ msgpack_unpacked result;
+ msgpack_object root;
+ msgpack_object_map map;
+ msgpack_object key;
+ msgpack_object val;
+ char buf[512]; /* ack should never be bigger */
+
+ flb_plg_trace(ctx->ins, "wait ACK (%.*s)", chunk_len, chunk);
+
+ /* Wait for server ACK */
+ ret = secure_forward_read(ctx, u_conn, fc, buf, sizeof(buf) - 1, &out_len);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "cannot get ack");
+ return -1;
+ }
+
+ /* Unpack message and validate */
+ off = 0;
+ msgpack_unpacked_init(&result);
+ ret = msgpack_unpack_next(&result, buf, out_len, &off);
+ if (ret != MSGPACK_UNPACK_SUCCESS) {
+ print_msgpack_status(ctx, ret, "ACK");
+ goto error;
+ }
+
+ /* Parse ACK message */
+ root = result.data;
+ if (root.type != MSGPACK_OBJECT_MAP) {
+ flb_plg_error(ctx->ins, "ACK response not MAP (type:%d)", root.type);
+ goto error;
+ }
+
+ map = root.via.map;
+ ack = NULL;
+ /* Lookup ack field */
+ for (i = 0; i < map.size; i++) {
+ key = map.ptr[i].key;
+ if (key.via.str.size == 3 && strncmp(key.via.str.ptr, "ack", 3) == 0) {
+ val = map.ptr[i].val;
+ ack_len = val.via.str.size;
+ ack = val.via.str.ptr;
+ break;
+ }
+ }
+
+ if (!ack) {
+ flb_plg_error(ctx->ins, "ack: ack not found");
+ goto error;
+ }
+
+ if (ack_len != chunk_len) {
+ flb_plg_error(ctx->ins,
+ "ack: ack len does not match ack(%ld)(%.*s) chunk(%d)(%.*s)",
+ ack_len, (int) ack_len, ack,
+ chunk_len, (int) chunk_len, chunk);
+ goto error;
+ }
+
+ if (strncmp(ack, chunk, ack_len) != 0) {
+ flb_plg_error(ctx->ins, "ACK: mismatch received=%s, expected=(%.*s)",
+ ack, chunk_len, chunk);
+ goto error;
+ }
+
+ flb_plg_debug(ctx->ins, "protocol: received ACK %.*s", (int)ack_len, ack);
+ msgpack_unpacked_destroy(&result);
+ return 0;
+
+ error:
+ msgpack_unpacked_destroy(&result);
+ return -1;
+}
+
+
+static int forward_config_init(struct flb_forward_config *fc,
+ struct flb_forward *ctx)
+{
+ if (fc->io_read == NULL || fc->io_write == NULL) {
+ flb_plg_error(ctx->ins, "io_read/io_write is NULL");
+ return -1;
+ }
+
+#ifdef FLB_HAVE_TLS
+ /* Initialize Secure Forward mode */
+ if (fc->secured == FLB_TRUE) {
+ secure_forward_init(ctx, fc);
+ }
+#endif
+
+ /* Generate the shared key salt */
+ if (flb_random_bytes(fc->shared_key_salt, 16)) {
+ flb_plg_error(ctx->ins, "cannot generate shared key salt");
+ return -1;
+ }
+
+ mk_list_add(&fc->_head, &ctx->configs);
+ return 0;
+}
+
+static flb_sds_t config_get_property(char *prop,
+ struct flb_upstream_node *node,
+ struct flb_forward *ctx)
+{
+ if (node) {
+ return (flb_sds_t) flb_upstream_node_get_property(prop, node);
+ }
+ else {
+ return (flb_sds_t) flb_output_get_property(prop, ctx->ins);
+ }
+}
+
+static int config_set_properties(struct flb_upstream_node *node,
+ struct flb_forward_config *fc,
+ struct flb_forward *ctx)
+{
+ flb_sds_t tmp;
+
+ /* Shared Key */
+ tmp = config_get_property("empty_shared_key", node, ctx);
+ if (tmp && flb_utils_bool(tmp)) {
+ fc->empty_shared_key = FLB_TRUE;
+ }
+ else {
+ fc->empty_shared_key = FLB_FALSE;
+ }
+
+ tmp = config_get_property("shared_key", node, ctx);
+ if (fc->empty_shared_key) {
+ fc->shared_key = flb_sds_create("");
+ }
+ else if (tmp) {
+ fc->shared_key = flb_sds_create(tmp);
+ }
+ else {
+ fc->shared_key = NULL;
+ }
+
+ tmp = config_get_property("username", node, ctx);
+ if (tmp) {
+ fc->username = tmp;
+ }
+ else {
+ fc->username = "";
+ }
+
+ tmp = config_get_property("password", node, ctx);
+ if (tmp) {
+ fc->password = tmp;
+ }
+ else {
+ fc->password = "";
+ }
+
+ /* Self Hostname */
+ tmp = config_get_property("self_hostname", node, ctx);
+ if (tmp) {
+ fc->self_hostname = flb_sds_create(tmp);
+ }
+ else {
+ fc->self_hostname = flb_sds_create("localhost");
+ }
+
+ /* Backward compatible timing mode */
+ tmp = config_get_property("time_as_integer", node, ctx);
+ if (tmp) {
+ fc->time_as_integer = flb_utils_bool(tmp);
+ }
+ else {
+ fc->time_as_integer = FLB_FALSE;
+ }
+
+ /* send always options (with size) */
+ tmp = config_get_property("send_options", node, ctx);
+ if (tmp) {
+ fc->send_options = flb_utils_bool(tmp);
+ }
+
+ /* add_option -> extra_options: if the user has defined 'add_option'
+ * we need to enable the 'send_options' flag
+ */
+ if (fc->extra_options && mk_list_size(fc->extra_options) > 0) {
+ fc->send_options = FLB_TRUE;
+ }
+
+ /* require ack response (implies send_options) */
+ tmp = config_get_property("require_ack_response", node, ctx);
+ if (tmp) {
+ fc->require_ack_response = flb_utils_bool(tmp);
+ if (fc->require_ack_response) {
+ fc->send_options = FLB_TRUE;
+ }
+ }
+
+ /* Tag Overwrite */
+ tmp = config_get_property("tag", node, ctx);
+ if (tmp) {
+ /* Set the tag */
+ fc->tag = flb_sds_create(tmp);
+ if (!fc->tag) {
+ flb_plg_error(ctx->ins, "cannot allocate tag");
+ return -1;
+ }
+
+#ifdef FLB_HAVE_RECORD_ACCESSOR
+ /* Record Accessor */
+ fc->ra_tag = flb_ra_create(fc->tag, FLB_TRUE);
+ if (!fc->ra_tag) {
+ flb_plg_error(ctx->ins, "cannot create record accessor for tag: %s",
+ fc->tag);
+ return -1;
+ }
+
+ /* Static record accessor ? (no dynamic values from map) */
+ fc->ra_static = flb_ra_is_static(fc->ra_tag);
+#endif
+ }
+ else {
+ fc->tag = NULL;
+
+ }
+
+ /* compress (implies send_options) */
+ tmp = config_get_property("compress", node, ctx);
+ if (tmp) {
+ if (!strcasecmp(tmp, "text")) {
+ fc->compress = COMPRESS_NONE;
+ }
+ else if (!strcasecmp(tmp, "gzip")) {
+ fc->compress = COMPRESS_GZIP;
+ fc->send_options = FLB_TRUE;
+ }
+ else {
+ flb_plg_error(ctx->ins, "invalid compress mode: %s", tmp);
+ return -1;
+ }
+ }
+ else {
+ fc->compress = COMPRESS_NONE;
+ }
+
+ if (fc->compress != COMPRESS_NONE && fc->time_as_integer == FLB_TRUE) {
+ flb_plg_error(ctx->ins, "compress mode %s is incompatible with "
+ "time_as_integer", tmp);
+ return -1;
+ }
+
+#ifdef FLB_HAVE_RECORD_ACCESSOR
+ if (fc->compress != COMPRESS_NONE &&
+ (fc->ra_tag && fc->ra_static == FLB_FALSE) ) {
+ flb_plg_error(ctx->ins, "compress mode %s is incompatible with dynamic "
+ "tags", tmp);
+ return -1;
+ }
+#endif
+
+ return 0;
+}
+
+static void forward_config_destroy(struct flb_forward_config *fc)
+{
+ flb_sds_destroy(fc->shared_key);
+ flb_sds_destroy(fc->self_hostname);
+ flb_sds_destroy(fc->tag);
+
+#ifdef FLB_HAVE_RECORD_ACCESSOR
+ if (fc->ra_tag) {
+ flb_ra_destroy(fc->ra_tag);
+ }
+#endif
+
+ flb_free(fc);
+}
+
+/* Configure in HA mode */
+static int forward_config_ha(const char *upstream_file,
+ struct flb_forward *ctx,
+ struct flb_config *config)
+{
+ int ret;
+ struct mk_list *head;
+ struct flb_upstream_node *node;
+ struct flb_forward_config *fc = NULL;
+
+ ctx->ha_mode = FLB_TRUE;
+ ctx->ha = flb_upstream_ha_from_file(upstream_file, config);
+ if (!ctx->ha) {
+ flb_plg_error(ctx->ins, "cannot load Upstream file");
+ return -1;
+ }
+
+ /* Iterate nodes and create a forward_config context */
+ mk_list_foreach(head, &ctx->ha->nodes) {
+ node = mk_list_entry(head, struct flb_upstream_node, _head);
+
+ /* create forward_config context */
+ fc = flb_calloc(1, sizeof(struct flb_forward_config));
+ if (!fc) {
+ flb_errno();
+ flb_plg_error(ctx->ins, "failed config allocation");
+ continue;
+ }
+ fc->unix_fd = -1;
+ fc->secured = FLB_FALSE;
+ fc->io_write = io_net_write;
+ fc->io_read = io_net_read;
+
+ /* Is TLS enabled ? */
+ if (node->tls_enabled == FLB_TRUE) {
+ fc->secured = FLB_TRUE;
+ }
+
+ /* Read properties into 'fc' context */
+ config_set_properties(node, fc, ctx);
+
+ /* Initialize and validate forward_config context */
+ ret = forward_config_init(fc, ctx);
+ if (ret == -1) {
+ if (fc) {
+ forward_config_destroy(fc);
+ }
+ return -1;
+ }
+
+ /* Set our forward_config context into the node */
+ flb_upstream_node_set_data(fc, node);
+ }
+
+ flb_output_upstream_ha_set(ctx->ha, ctx->ins);
+
+ return 0;
+}
+
+static int forward_config_simple(struct flb_forward *ctx,
+ struct flb_output_instance *ins,
+ struct flb_config *config)
+{
+ int ret;
+ int io_flags;
+ struct flb_forward_config *fc = NULL;
+ struct flb_upstream *upstream;
+
+ /* Set default network configuration if not set */
+ flb_output_net_default("127.0.0.1", 24224, ins);
+
+ /* Configuration context */
+ fc = flb_calloc(1, sizeof(struct flb_forward_config));
+ if (!fc) {
+ flb_errno();
+ return -1;
+ }
+ fc->unix_fd = -1;
+ fc->secured = FLB_FALSE;
+ fc->io_write = NULL;
+ fc->io_read = NULL;
+
+ /* Set default values */
+ ret = flb_output_config_map_set(ins, fc);
+ if (ret == -1) {
+ flb_free(fc);
+ return -1;
+ }
+
+ /* Check if TLS is enabled */
+#ifdef FLB_HAVE_TLS
+ if (ins->use_tls == FLB_TRUE) {
+ io_flags = FLB_IO_TLS;
+ fc->secured = FLB_TRUE;
+ }
+ else {
+ io_flags = FLB_IO_TCP;
+ }
+#else
+ io_flags = FLB_IO_TCP;
+#endif
+
+ if (ins->host.ipv6 == FLB_TRUE) {
+ io_flags |= FLB_IO_IPV6;
+ }
+
+ if (fc->unix_path) {
+#ifdef FLB_HAVE_UNIX_SOCKET
+ /* In older versions if the UDS server was not up
+ * at this point fluent-bit would fail because it
+ * would not be able to establish the conntection.
+ *
+ * With the concurrency fixes we moved the connection
+ * to a later stage which will cause fluent-bit to
+ * properly launch but if the UDS server is not
+ * available at flush time then an error similar to
+ * the one we would get for a network based output
+ * plugin will be logged and FLB_RETRY will be returned.
+ */
+
+ fc->io_write = io_unix_write;
+ fc->io_read = io_unix_read;
+#else
+ flb_plg_error(ctx->ins, "unix_path is not supported");
+ flb_free(fc);
+ flb_free(ctx);
+ return -1;
+#endif /* FLB_HAVE_UNIX_SOCKET */
+ }
+ else {
+ /* Prepare an upstream handler */
+ upstream = flb_upstream_create(config,
+ ins->host.name,
+ ins->host.port,
+ io_flags, ins->tls);
+ if (!upstream) {
+ flb_free(fc);
+ flb_free(ctx);
+ return -1;
+ }
+ fc->io_write = io_net_write;
+ fc->io_read = io_net_read;
+ ctx->u = upstream;
+ flb_output_upstream_set(ctx->u, ins);
+ }
+ /* Read properties into 'fc' context */
+ config_set_properties(NULL, fc, ctx);
+
+ /* Initialize and validate forward_config context */
+ ret = forward_config_init(fc, ctx);
+ if (ret == -1) {
+ if (fc) {
+ forward_config_destroy(fc);
+ }
+ return -1;
+ }
+
+ return 0;
+}
+
+static int cb_forward_init(struct flb_output_instance *ins,
+ struct flb_config *config, void *data)
+{
+ int ret;
+ const char *tmp;
+ struct flb_forward *ctx;
+ (void) data;
+
+ ctx = flb_calloc(1, sizeof(struct flb_forward));
+ if (!ctx) {
+ flb_errno();
+ return -1;
+ }
+
+ ret = pthread_once(&uds_connection_tls_slot_init_once_control,
+ initialize_uds_connection_tls_slot);
+
+ if (ret != 0) {
+ flb_errno();
+ flb_free(ctx);
+
+ return -1;
+ }
+
+ ret = pthread_mutex_init(&ctx->uds_connection_list_mutex, NULL);
+
+ if (ret != 0) {
+ flb_errno();
+ flb_free(ctx);
+
+ return -1;
+ }
+
+ cfl_list_init(&ctx->uds_connection_list);
+
+ ctx->ins = ins;
+ mk_list_init(&ctx->configs);
+ flb_output_set_context(ins, ctx);
+
+
+ /* Configure HA or simple mode ? */
+ tmp = flb_output_get_property("upstream", ins);
+ if (tmp) {
+ ret = forward_config_ha(tmp, ctx, config);
+ }
+ else {
+ ret = forward_config_simple(ctx, ins, config);
+ }
+
+ return ret;
+}
+
+struct flb_forward_config *flb_forward_target(struct flb_forward *ctx,
+ struct flb_upstream_node **node)
+{
+ struct flb_forward_config *fc = NULL;
+ struct flb_upstream_node *f_node;
+
+ if (ctx->ha_mode == FLB_TRUE) {
+ f_node = flb_upstream_ha_node_get(ctx->ha);
+ if (!f_node) {
+ return NULL;
+ }
+
+ /* Get forward_config stored in node opaque data */
+ fc = flb_upstream_node_get_data(f_node);
+ *node = f_node;
+ }
+ else {
+ fc = mk_list_entry_first(&ctx->configs,
+ struct flb_forward_config,
+ _head);
+ *node = NULL;
+ }
+ return fc;
+}
+
+static int flush_message_mode(struct flb_forward *ctx,
+ struct flb_forward_config *fc,
+ struct flb_connection *u_conn,
+ char *buf, size_t size)
+{
+ int ret;
+ int ok = MSGPACK_UNPACK_SUCCESS;
+ size_t sent = 0;
+ size_t rec_size;
+ size_t pre = 0;
+ size_t off = 0;
+ msgpack_object root;
+ msgpack_object options;
+ msgpack_object chunk;
+ msgpack_unpacked result;
+
+ /* If the sender requires 'ack' from the remote end-point */
+ if (fc->require_ack_response) {
+ msgpack_unpacked_init(&result);
+ while (msgpack_unpack_next(&result, buf, size, &off) == ok) {
+ /* get the record size */
+ rec_size = off - pre;
+
+ /* write single message */
+ ret = fc->io_write(u_conn,fc->unix_fd,
+ buf + pre, rec_size, &sent);
+ pre = off;
+
+ if (ret == -1) {
+ /*
+ * FIXME: we might take advantage of 'flush_ctx' and store the
+ * message that failed it delivery, we could have retries but with
+ * the flush context.
+ */
+ flb_plg_error(ctx->ins, "message_mode: error sending message");
+ msgpack_unpacked_destroy(&result);
+ return FLB_RETRY;
+ }
+
+ /* Sucessful delivery, now get message 'chunk' and wait for it */
+ root = result.data;
+ options = root.via.array.ptr[3];
+ chunk = options.via.map.ptr[0].val;
+
+ /* Read ACK */
+ ret = forward_read_ack(ctx, fc, u_conn,
+ (char *) chunk.via.str.ptr, chunk.via.str.size);
+ if (ret == -1) {
+ msgpack_unpacked_destroy(&result);
+ return FLB_RETRY;
+ }
+ }
+
+ /* All good */
+ msgpack_unpacked_destroy(&result);
+ return FLB_OK;
+ }
+
+ /* Normal data write */
+ ret = fc->io_write(u_conn, fc->unix_fd, buf, size, &sent);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "message_mode: error sending data");
+ return FLB_RETRY;
+ }
+
+ return FLB_OK;
+}
+
+/* pack payloads of cmetrics or ctraces with Fluentd compat format */
+static int pack_metricses_payload(msgpack_packer *mp_pck, const void *data, size_t bytes) {
+ int entries;
+ struct flb_time tm;
+
+ /* Format with event stream format of entries: [[time, [{entries map}]]] */
+ msgpack_pack_array(mp_pck, 1);
+ msgpack_pack_array(mp_pck, 2);
+ flb_time_get(&tm);
+ flb_time_append_to_msgpack(&tm, mp_pck, 0);
+ entries = flb_mp_count(data, bytes);
+ msgpack_pack_array(mp_pck, entries);
+
+ return 0;
+}
+
+#include <fluent-bit/flb_pack.h>
+/*
+ * Forward Mode: this is the generic mechanism used in Fluent Bit, it takes
+ * advantage of the internal data representation and avoid re-formatting data,
+ * it only sends a msgpack header, pre-existent 'data' records and options.
+ *
+ * note: if the user has enabled time_as_integer (compat mode for Fluentd <= 0.12),
+ * the 'flush_forward_compat_mode' is used instead.
+ */
+static int flush_forward_mode(struct flb_forward *ctx,
+ struct flb_forward_config *fc,
+ struct flb_connection *u_conn,
+ int event_type,
+ const char *tag, int tag_len,
+ const void *data, size_t bytes,
+ char *opts_buf, size_t opts_size)
+{
+ int ret;
+ int entries;
+ int send_options;
+ size_t off = 0;
+ size_t bytes_sent;
+ msgpack_object root;
+ msgpack_object chunk;
+ msgpack_unpacked result;
+ msgpack_sbuffer mp_sbuf;
+ msgpack_packer mp_pck;
+ void *final_data;
+ size_t final_bytes;
+ char *transcoded_buffer;
+ size_t transcoded_length;
+
+ transcoded_buffer = NULL;
+ transcoded_length = 0;
+
+ /* Pack message header */
+ msgpack_sbuffer_init(&mp_sbuf);
+ msgpack_packer_init(&mp_pck, &mp_sbuf, msgpack_sbuffer_write);
+
+ send_options = fc->send_options;
+ if (event_type == FLB_EVENT_TYPE_METRICS || event_type == FLB_EVENT_TYPE_TRACES) {
+ send_options = FLB_TRUE;
+ }
+ msgpack_pack_array(&mp_pck, send_options ? 3 : 2);
+
+ /* Tag */
+ flb_forward_format_append_tag(ctx, fc, &mp_pck, NULL, tag, tag_len);
+
+ if (!fc->fwd_retain_metadata && event_type == FLB_EVENT_TYPE_LOGS) {
+ ret = flb_forward_format_transcode(ctx, FLB_LOG_EVENT_FORMAT_FORWARD,
+ (char *) data, bytes,
+ &transcoded_buffer,
+ &transcoded_length);
+
+ if (ret != 0) {
+ flb_plg_error(ctx->ins, "could not transcode entries");
+ msgpack_sbuffer_destroy(&mp_sbuf);
+ return FLB_RETRY;
+ }
+ }
+
+ if (fc->compress == COMPRESS_GZIP) {
+ /* When compress is set, we switch from using Forward mode to using
+ * CompressedPackedForward mode.
+ */
+
+ if (transcoded_buffer != NULL) {
+ ret = flb_gzip_compress((void *) transcoded_buffer,
+ transcoded_length,
+ &final_data,
+ &final_bytes);
+ }
+ else {
+ ret = flb_gzip_compress((void *) data, bytes, &final_data, &final_bytes);
+ }
+
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "could not compress entries");
+ msgpack_sbuffer_destroy(&mp_sbuf);
+
+ if (transcoded_buffer != NULL) {
+ flb_free(transcoded_buffer);
+ }
+
+ return FLB_RETRY;
+ }
+
+ msgpack_pack_bin(&mp_pck, final_bytes);
+ }
+ else {
+ if (transcoded_buffer != NULL) {
+ final_data = (void *) transcoded_buffer;
+ final_bytes = transcoded_length;
+ }
+ else {
+ final_data = (void *) data;
+ final_bytes = bytes;
+ }
+
+ if (event_type == FLB_EVENT_TYPE_LOGS) {
+ /* for log events we create an array for the serialized messages */
+ entries = flb_mp_count(data, bytes);
+ msgpack_pack_array(&mp_pck, entries);
+ }
+ else {
+ /* FLB_EVENT_TYPE_METRICS and FLB_EVENT_TYPE_TRACES */
+ if (fc->fluentd_compat) {
+ pack_metricses_payload(&mp_pck, data, bytes);
+ }
+ else {
+ msgpack_pack_bin(&mp_pck, final_bytes);
+ }
+ }
+ }
+
+ /* Write message header */
+ ret = fc->io_write(u_conn, fc->unix_fd, mp_sbuf.data, mp_sbuf.size, &bytes_sent);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "could not write forward header");
+ msgpack_sbuffer_destroy(&mp_sbuf);
+ if (fc->compress == COMPRESS_GZIP) {
+ flb_free(final_data);
+ }
+
+ if (transcoded_buffer != NULL) {
+ flb_free(transcoded_buffer);
+ }
+
+ return FLB_RETRY;
+ }
+ msgpack_sbuffer_destroy(&mp_sbuf);
+
+ /* Write msgpack content / entries */
+ ret = fc->io_write(u_conn, fc->unix_fd, final_data, final_bytes, &bytes_sent);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "could not write forward entries");
+ if (fc->compress == COMPRESS_GZIP) {
+ flb_free(final_data);
+ }
+
+ if (transcoded_buffer != NULL) {
+ flb_free(transcoded_buffer);
+ }
+
+ return FLB_RETRY;
+ }
+
+ if (fc->compress == COMPRESS_GZIP) {
+ flb_free(final_data);
+ }
+
+ if (transcoded_buffer != NULL) {
+ flb_free(transcoded_buffer);
+ }
+
+ /* Write options */
+ if (send_options == FLB_TRUE) {
+ ret = fc->io_write(u_conn, fc->unix_fd, opts_buf, opts_size, &bytes_sent);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "could not write forward options");
+ return FLB_RETRY;
+ }
+ }
+
+ /* If the sender requires 'ack' from the remote end-point */
+ if (fc->require_ack_response) {
+ msgpack_unpacked_init(&result);
+ ret = msgpack_unpack_next(&result, opts_buf, opts_size, &off);
+ if (ret != MSGPACK_UNPACK_SUCCESS) {
+ msgpack_unpacked_destroy(&result);
+ return -1;
+ }
+
+ /* Sucessful delivery, now get message 'chunk' and wait for it */
+ root = result.data;
+
+ /* 'chunk' is always in the first key of the map */
+ chunk = root.via.map.ptr[0].val;
+
+ /* Read ACK */
+ ret = forward_read_ack(ctx, fc, u_conn,
+ (char *) chunk.via.str.ptr, chunk.via.str.size);
+ if (ret == -1) {
+ msgpack_unpacked_destroy(&result);
+ return FLB_RETRY;
+ }
+
+ /* All good */
+ msgpack_unpacked_destroy(&result);
+ return FLB_OK;
+ }
+
+ return FLB_OK;
+}
+
+/*
+ * Forward Mode Compat: data is packaged in Forward mode but the timestamps are
+ * integers (compat mode for Fluentd <= 0.12).
+ */
+static int flush_forward_compat_mode(struct flb_forward *ctx,
+ struct flb_forward_config *fc,
+ struct flb_connection *u_conn,
+ const char *tag, int tag_len,
+ const void *data, size_t bytes)
+{
+ int ret;
+ size_t off = 0;
+ size_t bytes_sent;
+ msgpack_object root;
+ msgpack_object chunk;
+ msgpack_object map; /* dummy parameter */
+ msgpack_unpacked result;
+
+ /* Write message header */
+ ret = fc->io_write(u_conn, fc->unix_fd, data, bytes, &bytes_sent);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "could not write forward compat mode records");
+ return FLB_RETRY;
+ }
+
+ /* If the sender requires 'ack' from the remote end-point */
+ if (fc->require_ack_response) {
+ msgpack_unpacked_init(&result);
+ ret = msgpack_unpack_next(&result, data, bytes, &off);
+ if (ret != MSGPACK_UNPACK_SUCCESS) {
+ msgpack_unpacked_destroy(&result);
+ return -1;
+ }
+
+ /* Sucessful delivery, now get message 'chunk' and wait for it */
+ root = result.data;
+
+ map = root.via.array.ptr[2];
+
+ /* 'chunk' is always in the first key of the map */
+ chunk = map.via.map.ptr[0].val;
+
+ /* Read ACK */
+ ret = forward_read_ack(ctx, fc, u_conn,
+ (char *) chunk.via.str.ptr, chunk.via.str.size);
+ if (ret == -1) {
+ msgpack_unpacked_destroy(&result);
+ return FLB_RETRY;
+ }
+
+ /* All good */
+ msgpack_unpacked_destroy(&result);
+ return FLB_OK;
+ }
+
+ return FLB_OK;
+}
+
+static void cb_forward_flush(struct flb_event_chunk *event_chunk,
+ struct flb_output_flush *out_flush,
+ struct flb_input_instance *i_ins,
+ void *out_context,
+ struct flb_config *config)
+{
+ int ret = -1;
+ int mode;
+ msgpack_packer mp_pck;
+ msgpack_sbuffer mp_sbuf;
+ void *out_buf = NULL;
+ size_t out_size = 0;
+ struct flb_forward *ctx = out_context;
+ struct flb_forward_config *fc = NULL;
+ struct flb_connection *u_conn = NULL;
+ struct flb_upstream_node *node = NULL;
+ struct flb_forward_flush *flush_ctx;
+ flb_sockfd_t uds_conn;
+
+ (void) i_ins;
+ (void) config;
+
+ fc = flb_forward_target(ctx, &node);
+ if (!fc) {
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ flb_plg_debug(ctx->ins, "request %lu bytes to flush",
+ event_chunk->size);
+
+ /* Initialize packager */
+ msgpack_sbuffer_init(&mp_sbuf);
+ msgpack_packer_init(&mp_pck, &mp_sbuf, msgpack_sbuffer_write);
+
+ /*
+ * Flush context: structure used to pass custom information to the
+ * formatter function.
+ */
+ flush_ctx = flb_calloc(1, sizeof(struct flb_forward_flush));
+ if (!flush_ctx) {
+ flb_errno();
+ msgpack_sbuffer_destroy(&mp_sbuf);
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+ flush_ctx->fc = fc;
+
+ /* Format the right payload and retrieve the 'forward mode' used */
+ mode = flb_forward_format(config, i_ins, ctx, flush_ctx,
+ event_chunk->type,
+ event_chunk->tag, flb_sds_len(event_chunk->tag),
+ event_chunk->data, event_chunk->size,
+ &out_buf, &out_size);
+
+ /* Get a TCP connection instance */
+ if (fc->unix_path == NULL) {
+ if (ctx->ha_mode == FLB_TRUE) {
+ u_conn = flb_upstream_conn_get(node->u);
+ }
+ else {
+ u_conn = flb_upstream_conn_get(ctx->u);
+ }
+
+ if (!u_conn) {
+ flb_plg_error(ctx->ins, "no upstream connections available");
+ msgpack_sbuffer_destroy(&mp_sbuf);
+ if (fc->time_as_integer == FLB_TRUE) {
+ flb_free(out_buf);
+ }
+ flb_free(flush_ctx);
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ uds_conn = -1;
+ }
+ else {
+ uds_conn = forward_uds_get_conn(fc, ctx);
+
+ if (uds_conn == -1) {
+ flb_plg_error(ctx->ins, "no unix socket connection available");
+
+ msgpack_sbuffer_destroy(&mp_sbuf);
+ if (fc->time_as_integer == FLB_TRUE) {
+ flb_free(out_buf);
+ }
+ flb_free(flush_ctx);
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ /* This is a hack, because the rest of the code is written to use
+ * the shared forward config unix_fd field so at this point we need
+ * to ensure that we either have a working connection or we can
+ * establish one regardless of not passing it along.
+ *
+ * Later on we will get the file descriptor from the TLS.
+ */
+ }
+
+ /*
+ * Shared Key: if ka_count > 0 it means the handshake has already been done lately
+ */
+ if (fc->shared_key && u_conn->ka_count == 0) {
+ ret = secure_forward_handshake(u_conn, fc, ctx);
+ flb_plg_debug(ctx->ins, "handshake status = %i", ret);
+ if (ret == -1) {
+ if (u_conn) {
+ flb_upstream_conn_release(u_conn);
+ }
+
+ if (uds_conn != -1) {
+ forward_uds_drop_conn(ctx, uds_conn);
+ }
+
+ msgpack_sbuffer_destroy(&mp_sbuf);
+ if (fc->time_as_integer == FLB_TRUE) {
+ flb_free(out_buf);
+ }
+ flb_free(flush_ctx);
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+ }
+
+ /*
+ * Note about the mode used for different type of events/messages:
+ *
+ * - Logs can be send either by using MODE_MESSAGE, MODE_FORWARD
+ * OR MODE_FORWARD_COMPAT.
+ *
+ * - Metrics and Traces uses MODE_FORWARD only.
+ */
+
+ if (mode == MODE_MESSAGE) {
+ ret = flush_message_mode(ctx, fc, u_conn, out_buf, out_size);
+ flb_free(out_buf);
+ }
+ else if (mode == MODE_FORWARD) {
+ ret = flush_forward_mode(ctx, fc, u_conn,
+ event_chunk->type,
+ event_chunk->tag, flb_sds_len(event_chunk->tag),
+ event_chunk->data, event_chunk->size,
+ out_buf, out_size);
+ flb_free(out_buf);
+ }
+ else if (mode == MODE_FORWARD_COMPAT) {
+ ret = flush_forward_compat_mode(ctx, fc, u_conn,
+ event_chunk->tag,
+ flb_sds_len(event_chunk->tag),
+ out_buf, out_size);
+ flb_free(out_buf);
+ }
+
+ if (u_conn) {
+ flb_upstream_conn_release(u_conn);
+ }
+
+ if (ret != FLB_OK) {
+ /* Since UDS connections have been used as permanent
+ * connections up to this point we only release the
+ * connection in case of error.
+ *
+ * There could be a logical error in here but what
+ * I think at the moment is, if something goes wrong
+ * we can just drop the connection and let the worker
+ * establish a new one the next time a flush happens.
+ */
+
+ if (uds_conn != -1) {
+ forward_uds_drop_conn(ctx, uds_conn);
+ }
+ }
+
+ flb_free(flush_ctx);
+ FLB_OUTPUT_RETURN(ret);
+}
+
+static int cb_forward_exit(void *data, struct flb_config *config)
+{
+ struct flb_forward *ctx = data;
+ struct flb_forward_config *fc;
+ struct mk_list *head;
+ struct mk_list *tmp;
+ (void) config;
+
+ if (!ctx) {
+ return 0;
+ }
+
+ /* Destroy forward_config contexts */
+ mk_list_foreach_safe(head, tmp, &ctx->configs) {
+ fc = mk_list_entry(head, struct flb_forward_config, _head);
+
+ mk_list_del(&fc->_head);
+ forward_config_destroy(fc);
+ }
+
+ forward_uds_drop_all(ctx);
+
+ if (ctx->ha_mode == FLB_TRUE) {
+ if (ctx->ha) {
+ flb_upstream_ha_destroy(ctx->ha);
+ }
+ }
+ else {
+ if (ctx->u) {
+ flb_upstream_destroy(ctx->u);
+ }
+ }
+
+ pthread_mutex_destroy(&ctx->uds_connection_list_mutex);
+
+ flb_free(ctx);
+
+ return 0;
+}
+
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_BOOL, "time_as_integer", "false",
+ 0, FLB_TRUE, offsetof(struct flb_forward_config, time_as_integer),
+ "Set timestamp in integer format (compat mode for old Fluentd v0.12)"
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "retain_metadata_in_forward_mode", "false",
+ 0, FLB_TRUE, offsetof(struct flb_forward_config, fwd_retain_metadata),
+ "Retain metadata when operating in forward mode"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "shared_key", NULL,
+ 0, FLB_FALSE, 0,
+ "Shared key for authentication"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "self_hostname", NULL,
+ 0, FLB_FALSE, 0,
+ "Hostname"
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "empty_shared_key", "false",
+ 0, FLB_TRUE, offsetof(struct flb_forward_config, empty_shared_key),
+ "Set an empty shared key for authentication"
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "send_options", "false",
+ 0, FLB_TRUE, offsetof(struct flb_forward_config, send_options),
+ "Send 'forward protocol options' to remote endpoint"
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "require_ack_response", "false",
+ 0, FLB_TRUE, offsetof(struct flb_forward_config, require_ack_response),
+ "Require that remote endpoint confirms data reception"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "username", "",
+ 0, FLB_TRUE, offsetof(struct flb_forward_config, username),
+ "Username for authentication"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "password", "",
+ 0, FLB_TRUE, offsetof(struct flb_forward_config, password),
+ "Password for authentication"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "unix_path", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_forward_config, unix_path),
+ "Path to unix socket. It is ignored when 'upstream' property is set"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "upstream", NULL,
+ 0, FLB_FALSE, 0,
+ "Path to 'upstream' configuration file (define multiple nodes)"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "tag", NULL,
+ 0, FLB_FALSE, 0,
+ "Set a custom Tag for the outgoing records"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "compress", NULL,
+ 0, FLB_FALSE, 0,
+ "Compression mode"
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "fluentd_compat", "false",
+ 0, FLB_TRUE, offsetof(struct flb_forward_config, fluentd_compat),
+ "Send metrics and traces with Fluentd compatible format"
+ },
+
+ {
+ FLB_CONFIG_MAP_SLIST_2, "add_option", NULL,
+ FLB_CONFIG_MAP_MULT, FLB_TRUE, offsetof(struct flb_forward_config, extra_options),
+ "Set an extra Forward protocol option. This is an advance feature, use it only for "
+ "very specific use-cases."
+ },
+
+ /* EOF */
+ {0}
+};
+
+/* Plugin reference */
+struct flb_output_plugin out_forward_plugin = {
+ .name = "forward",
+ .description = "Forward (Fluentd protocol)",
+
+ /* Callbacks */
+ .cb_init = cb_forward_init,
+ .cb_pre_run = NULL,
+ .cb_flush = cb_forward_flush,
+ .cb_exit = cb_forward_exit,
+ .workers = 2,
+
+ /* Config map validator */
+ .config_map = config_map,
+
+ /* Test */
+ .test_formatter.callback = flb_forward_format,
+
+ /* Flags */
+ .flags = FLB_OUTPUT_NET | FLB_IO_OPT_TLS,
+
+ /* Event types */
+ .event_type = FLB_OUTPUT_LOGS | FLB_OUTPUT_METRICS | FLB_OUTPUT_TRACES
+};
diff --git a/src/fluent-bit/plugins/out_forward/forward.h b/src/fluent-bit/plugins/out_forward/forward.h
new file mode 100644
index 000000000..8e77e6e11
--- /dev/null
+++ b/src/fluent-bit/plugins/out_forward/forward.h
@@ -0,0 +1,146 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_OUT_FORWARD
+#define FLB_OUT_FORWARD
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_sds.h>
+#include <fluent-bit/flb_upstream_ha.h>
+#include <fluent-bit/flb_record_accessor.h>
+#include <fluent-bit/flb_connection.h>
+#include <fluent-bit/flb_pthread.h>
+#include <cfl/cfl_list.h>
+
+/*
+ * Forward modes
+ * =============
+ */
+
+/*
+ * Message mode
+ * ------------
+ * https://github.com/fluent/fluentd/wiki/Forward-Protocol-Specification-v1#message-modes
+ */
+#define MODE_MESSAGE 0
+
+/*
+ * Forward mode
+ * ------------
+ * https://github.com/fluent/fluentd/wiki/Forward-Protocol-Specification-v1#forward-mode
+ */
+#define MODE_FORWARD 1
+
+/*
+ * Forward Compat: similar to MODE_FORWARD, but it sends the timestamps as unsigned
+ * integers for compatibility with very old versions of Fluentd that don't have timestamps
+ * with nanoseconds. This mode only applies for Logs.
+ */
+#define MODE_FORWARD_COMPAT 3
+
+/* Compression options */
+#define COMPRESS_NONE 0
+#define COMPRESS_GZIP 1
+
+/*
+ * Configuration: we put this separate from the main
+ * context so every Upstream Node can have it own configuration
+ * reference and pass it smoothly to the required caller.
+ *
+ * On simple mode (no HA), the structure is referenced
+ * by flb_forward->config. In HA mode the structure is referenced
+ * by the Upstream node context as an opaque data type.
+ */
+struct flb_forward_config {
+ int secured; /* Using Secure Forward mode ? */
+ int compress; /* Using compression ? */
+ int time_as_integer; /* Use backward compatible timestamp ? */
+ int fluentd_compat; /* Use Fluentd compatible payload for
+ * metrics and ctraces */
+
+ /* add extra options to the Forward payload (advanced) */
+ struct mk_list *extra_options;
+
+ int fwd_retain_metadata; /* Do not drop metadata in forward mode */
+
+ /* config */
+ flb_sds_t shared_key; /* shared key */
+ flb_sds_t self_hostname; /* hostname used in certificate */
+ flb_sds_t tag; /* Overwrite tag on forward */
+ int empty_shared_key; /* use an empty string as shared key */
+ int require_ack_response; /* Require acknowledge for "chunk" */
+ int send_options; /* send options in messages */
+ flb_sds_t unix_path; /* unix socket path */
+ int unix_fd;
+
+ const char *username;
+ const char *password;
+
+ /* mbedTLS specifics */
+ unsigned char shared_key_salt[16];
+
+#ifdef FLB_HAVE_RECORD_ACCESSOR
+ struct flb_record_accessor *ra_tag; /* Tag Record accessor */
+ int ra_static; /* Is the record accessor static ? */
+#endif
+ int (*io_write)(struct flb_connection* conn, int fd, const void* data,
+ size_t len, size_t *out_len);
+ int (*io_read)(struct flb_connection* conn, int fd, void* buf, size_t len);
+ struct mk_list _head; /* Link to list flb_forward->configs */
+};
+
+struct flb_forward_uds_connection {
+ flb_sockfd_t descriptor;
+ struct cfl_list _head; /* Link to list flb_forward->uds_connnection_list */
+};
+
+/* Plugin Context */
+struct flb_forward {
+ /* if HA mode is enabled */
+ int ha_mode; /* High Availability mode enabled ? */
+ char *ha_upstream; /* Upstream configuration file */
+ struct flb_upstream_ha *ha;
+
+ struct cfl_list uds_connection_list;
+ pthread_mutex_t uds_connection_list_mutex;
+
+ /* Upstream handler and config context for single mode (no HA) */
+ struct flb_upstream *u;
+ struct mk_list configs;
+ struct flb_output_instance *ins;
+};
+
+struct flb_forward_ping {
+ const char *nonce;
+ int nonce_len;
+ const char *auth;
+ int auth_len;
+ int keepalive;
+};
+
+/* Flush callback context */
+struct flb_forward_flush {
+ struct flb_forward_config *fc;
+ char checksum_hex[33];
+};
+
+struct flb_forward_config *flb_forward_target(struct flb_forward *ctx,
+ struct flb_upstream_node **node);
+
+#endif
diff --git a/src/fluent-bit/plugins/out_forward/forward_format.c b/src/fluent-bit/plugins/out_forward/forward_format.c
new file mode 100644
index 000000000..48dedd862
--- /dev/null
+++ b/src/fluent-bit/plugins/out_forward/forward_format.c
@@ -0,0 +1,640 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_mp.h>
+#include <fluent-bit/flb_hash.h>
+#include <fluent-bit/flb_crypto.h>
+#include <fluent-bit/flb_record_accessor.h>
+#include <fluent-bit/flb_log_event_encoder.h>
+#include <fluent-bit/flb_log_event_decoder.h>
+
+#include "forward.h"
+
+void flb_forward_format_bin_to_hex(uint8_t *buf, size_t len, char *out)
+{
+ int i;
+ static char map[] = "0123456789abcdef";
+
+ for (i = 0; i < len; i++) {
+ out[i * 2] = map[buf[i] >> 4];
+ out[i * 2 + 1] = map[buf[i] & 0x0f];
+ }
+}
+
+int flb_forward_format_append_tag(struct flb_forward *ctx,
+ struct flb_forward_config *fc,
+ msgpack_packer *mp_pck,
+ msgpack_object *map,
+ const char *tag, int tag_len)
+{
+#ifdef FLB_HAVE_RECORD_ACCESSOR
+ flb_sds_t tmp;
+ msgpack_object m;
+
+ memset(&m, 0, sizeof(m));
+
+ if (!fc->ra_tag) {
+ msgpack_pack_str(mp_pck, tag_len);
+ msgpack_pack_str_body(mp_pck, tag, tag_len);
+ return 0;
+ }
+
+ if (map) {
+ m = *map;
+ }
+
+ /* Tag */
+ tmp = flb_ra_translate(fc->ra_tag, (char *) tag, tag_len, m, NULL);
+ if (!tmp) {
+ flb_plg_warn(ctx->ins, "Tag translation failed, using default Tag");
+ msgpack_pack_str(mp_pck, tag_len);
+ msgpack_pack_str_body(mp_pck, tag, tag_len);
+ }
+ else {
+ msgpack_pack_str(mp_pck, flb_sds_len(tmp));
+ msgpack_pack_str_body(mp_pck, tmp, flb_sds_len(tmp));
+ flb_sds_destroy(tmp);
+ }
+#else
+ msgpack_pack_str(mp_pck, tag_len);
+ msgpack_pack_str_body(mp_pck, tag, tag_len);
+
+#endif
+
+ return 0;
+}
+
+static int append_options(struct flb_forward *ctx,
+ struct flb_forward_config *fc,
+ int event_type,
+ msgpack_packer *mp_pck,
+ int entries, void *data, size_t bytes,
+ msgpack_object *metadata,
+ char *out_chunk)
+{
+ char *chunk = NULL;
+ uint8_t checksum[64];
+ int result;
+ struct mk_list *head;
+ struct flb_config_map_val *mv;
+ struct flb_mp_map_header mh;
+ struct flb_slist_entry *eopt_key;
+ struct flb_slist_entry *eopt_val;
+
+ /* options is map, use the dynamic map type */
+ flb_mp_map_header_init(&mh, mp_pck);
+
+ if (fc->require_ack_response == FLB_TRUE) {
+ /*
+ * for ack we calculate sha512 of context, take 16 bytes,
+ * make 32 byte hex string of it
+ */
+ result = flb_hash_simple(FLB_HASH_SHA512,
+ data, bytes,
+ checksum, sizeof(checksum));
+
+ if (result != FLB_CRYPTO_SUCCESS) {
+ return -1;
+ }
+
+ flb_forward_format_bin_to_hex(checksum, 16, out_chunk);
+
+ out_chunk[32] = '\0';
+ chunk = (char *) out_chunk;
+ }
+
+ /* "chunk": '<checksum-base-64>' */
+ if (chunk) {
+ flb_mp_map_header_append(&mh);
+ msgpack_pack_str(mp_pck, 5);
+ msgpack_pack_str_body(mp_pck, "chunk", 5);
+ msgpack_pack_str(mp_pck, 32);
+ msgpack_pack_str_body(mp_pck, out_chunk, 32);
+ }
+
+ /* "size": entries */
+ if (entries > 0) {
+ flb_mp_map_header_append(&mh);
+ msgpack_pack_str(mp_pck, 4);
+ msgpack_pack_str_body(mp_pck, "size", 4);
+ msgpack_pack_int64(mp_pck, entries);
+ }
+
+ /* "compressed": "gzip" */
+ if (entries > 0 && /* not message mode */
+ fc->time_as_integer == FLB_FALSE && /* not compat mode */
+ fc->compress == COMPRESS_GZIP) {
+
+ flb_mp_map_header_append(&mh);
+ msgpack_pack_str(mp_pck, 10);
+ msgpack_pack_str_body(mp_pck, "compressed", 10);
+ msgpack_pack_str(mp_pck, 4);
+ msgpack_pack_str_body(mp_pck, "gzip", 4);
+ }
+
+ /* event type (FLB_EVENT_TYPE_LOGS, FLB_EVENT_TYPE_METRICS, FLB_EVENT_TYPE_TRACES) */
+ flb_mp_map_header_append(&mh);
+ msgpack_pack_str(mp_pck, 13);
+ msgpack_pack_str_body(mp_pck, "fluent_signal", 13);
+ msgpack_pack_int64(mp_pck, event_type);
+
+ /* process 'extra_option(s)' */
+ if (fc->extra_options) {
+ flb_config_map_foreach(head, mv, fc->extra_options) {
+ eopt_key = mk_list_entry_first(mv->val.list, struct flb_slist_entry, _head);
+ eopt_val = mk_list_entry_last(mv->val.list, struct flb_slist_entry, _head);
+
+ flb_mp_map_header_append(&mh);
+ msgpack_pack_str(mp_pck, flb_sds_len(eopt_key->str));
+ msgpack_pack_str_body(mp_pck, eopt_key->str, flb_sds_len(eopt_key->str));
+ msgpack_pack_str(mp_pck, flb_sds_len(eopt_val->str));
+ msgpack_pack_str_body(mp_pck, eopt_val->str, flb_sds_len(eopt_val->str));
+ }
+ }
+
+ if (metadata != NULL &&
+ metadata->type == MSGPACK_OBJECT_MAP &&
+ metadata->via.map.size > 0) {
+ flb_mp_map_header_append(&mh);
+ msgpack_pack_str_with_body(mp_pck, "metadata", 8);
+ msgpack_pack_object(mp_pck, *metadata);
+ }
+
+ flb_mp_map_header_end(&mh);
+
+ flb_plg_debug(ctx->ins,
+ "send options records=%d chunk='%s'",
+ entries, out_chunk ? out_chunk : "NULL");
+ return 0;
+}
+
+#ifdef FLB_HAVE_RECORD_ACCESSOR
+/*
+ * Forward Protocol: Message Mode
+ * ------------------------------
+ * This mode is only used if the Tag is dynamically composed using some
+ * content of the records.
+ *
+ * [
+ * "TAG",
+ * TIMESTAMP,
+ * RECORD/MAP,
+ * *OPTIONS*
+ * ]
+ *
+ */
+static int flb_forward_format_message_mode(struct flb_forward *ctx,
+ struct flb_forward_config *fc,
+ struct flb_forward_flush *ff,
+ const char *tag, int tag_len,
+ const void *data, size_t bytes,
+ void **out_buf, size_t *out_size)
+{
+ int entries = 0;
+ size_t pre = 0;
+ size_t off = 0;
+ size_t record_size;
+ char *chunk;
+ char chunk_buf[33];
+ msgpack_packer mp_pck;
+ msgpack_sbuffer mp_sbuf;
+ struct flb_time tm;
+ struct flb_log_event_decoder log_decoder;
+ struct flb_log_event log_event;
+ int ret;
+
+ /*
+ * Our only reason to use Message Mode is because the user wants to generate
+ * dynamic Tags based on records content.
+ */
+ if (!fc->ra_tag) {
+ return -1;
+ }
+
+ /*
+ * if the case, we need to compose a new outgoing buffer instead
+ * of use the original one.
+ */
+ msgpack_sbuffer_init(&mp_sbuf);
+ msgpack_packer_init(&mp_pck, &mp_sbuf, msgpack_sbuffer_write);
+
+ ret = flb_log_event_decoder_init(&log_decoder, (char *) data, bytes);
+
+ if (ret != FLB_EVENT_DECODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event decoder initialization error : %d", ret);
+
+ return -1;
+ }
+
+ while ((ret = flb_log_event_decoder_next(
+ &log_decoder,
+ &log_event)) == FLB_EVENT_DECODER_SUCCESS) {
+ flb_time_copy(&tm, &log_event.timestamp);
+
+ /* Prepare main array: tag, timestamp and record/map */
+ msgpack_pack_array(&mp_pck, 4);
+
+ /* Generate dynamic Tag or use default one */
+ flb_forward_format_append_tag(ctx, fc, &mp_pck,
+ log_event.body,
+ tag, tag_len);
+
+ /* Pack timestamp */
+ if (fc->time_as_integer == FLB_TRUE) {
+ flb_time_append_to_msgpack(&log_event.timestamp,
+ &mp_pck,
+ FLB_TIME_ETFMT_INT);
+ }
+ else {
+ flb_time_append_to_msgpack(&log_event.timestamp,
+ &mp_pck,
+ FLB_TIME_ETFMT_V1_FIXEXT);
+ }
+
+ /* Pack records */
+ msgpack_pack_object(&mp_pck, *log_event.body);
+
+ record_size = off - pre;
+
+ if (ff) {
+ chunk = ff->checksum_hex;
+ }
+ else {
+ chunk = chunk_buf;
+ }
+
+ append_options(ctx, fc, FLB_EVENT_TYPE_LOGS, &mp_pck, 0,
+ (char *) data + pre, record_size,
+ log_event.metadata,
+ chunk);
+
+ pre = off;
+ entries++;
+ }
+
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ *out_buf = mp_sbuf.data;
+ *out_size = mp_sbuf.size;
+
+ return entries;
+}
+#endif
+
+int flb_forward_format_transcode(
+ struct flb_forward *ctx, int format,
+ char *input_buffer, size_t input_length,
+ char **output_buffer, size_t *output_length)
+{
+ struct flb_log_event_encoder log_encoder;
+ struct flb_log_event_decoder log_decoder;
+ struct flb_log_event log_event;
+ int result;
+
+ result = flb_log_event_decoder_init(&log_decoder, input_buffer, input_length);
+
+ if (result != FLB_EVENT_DECODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event decoder initialization error : %d", result);
+
+ return -1;
+ }
+
+ result = flb_log_event_encoder_init(&log_encoder, format);
+
+ if (result != FLB_EVENT_ENCODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event encoder initialization error : %d", result);
+
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ return -1;
+ }
+
+ while ((result = flb_log_event_decoder_next(
+ &log_decoder,
+ &log_event)) == FLB_EVENT_DECODER_SUCCESS) {
+
+ result = flb_log_event_encoder_begin_record(&log_encoder);
+
+ if (result == FLB_EVENT_ENCODER_SUCCESS) {
+ result = flb_log_event_encoder_set_timestamp(
+ &log_encoder, &log_event.timestamp);
+ }
+
+ if (result == FLB_EVENT_ENCODER_SUCCESS) {
+ result = flb_log_event_encoder_set_metadata_from_msgpack_object(
+ &log_encoder,
+ log_event.metadata);
+ }
+
+ if (result == FLB_EVENT_ENCODER_SUCCESS) {
+ result = flb_log_event_encoder_set_body_from_msgpack_object(
+ &log_encoder,
+ log_event.body);
+ }
+
+ if (result == FLB_EVENT_ENCODER_SUCCESS) {
+ result = flb_log_event_encoder_commit_record(&log_encoder);
+ }
+ }
+
+ if (log_encoder.output_length > 0) {
+ *output_buffer = log_encoder.output_buffer;
+ *output_length = log_encoder.output_length;
+
+ flb_log_event_encoder_claim_internal_buffer_ownership(&log_encoder);
+
+ result = 0;
+ }
+ else {
+ flb_plg_error(ctx->ins,
+ "Log event encoder error : %d", result);
+
+ result = -1;
+ }
+
+ flb_log_event_decoder_destroy(&log_decoder);
+ flb_log_event_encoder_destroy(&log_encoder);
+
+ return result;
+}
+
+/*
+ * Forward Protocol: Forward Mode
+ * ------------------------------
+ * In forward mode we don't format the serialized entries. We just compose
+ * the outgoing 'options'.
+ */
+static int flb_forward_format_forward_mode(struct flb_forward *ctx,
+ struct flb_forward_config *fc,
+ struct flb_forward_flush *ff,
+ int event_type,
+ const char *tag, int tag_len,
+ const void *data, size_t bytes,
+ void **out_buf, size_t *out_size)
+{
+ int result;
+ int entries = 0;
+ char *chunk;
+ char chunk_buf[33];
+ msgpack_packer mp_pck;
+ msgpack_sbuffer mp_sbuf;
+ char *transcoded_buffer;
+ size_t transcoded_length;
+
+ msgpack_sbuffer_init(&mp_sbuf);
+ msgpack_packer_init(&mp_pck, &mp_sbuf, msgpack_sbuffer_write);
+
+ if (ff) {
+ chunk = ff->checksum_hex;
+ }
+ else {
+ chunk = chunk_buf;
+ }
+
+ if (fc->send_options == FLB_TRUE || (event_type == FLB_EVENT_TYPE_METRICS || event_type == FLB_EVENT_TYPE_TRACES)) {
+ if (event_type == FLB_EVENT_TYPE_LOGS) {
+ entries = flb_mp_count(data, bytes);
+ }
+ else {
+ /* for non logs, we don't count the number of entries */
+ entries = 0;
+ }
+
+ if (!fc->fwd_retain_metadata && event_type == FLB_EVENT_TYPE_LOGS) {
+ result = flb_forward_format_transcode(ctx, FLB_LOG_EVENT_FORMAT_FORWARD,
+ (char *) data, bytes,
+ &transcoded_buffer,
+ &transcoded_length);
+
+ if (result == 0) {
+ append_options(ctx, fc, event_type, &mp_pck, entries,
+ transcoded_buffer,
+ transcoded_length,
+ NULL, chunk);
+
+ free(transcoded_buffer);
+ }
+ }
+ else {
+ append_options(ctx, fc, event_type, &mp_pck, entries, (char *) data, bytes, NULL, chunk);
+ }
+ }
+
+ *out_buf = mp_sbuf.data;
+ *out_size = mp_sbuf.size;
+
+ return 0;
+}
+
+/*
+ * Forward Protocol: Forward Mode Compat (for Fluentd <= 0.12)
+ * -----------------------------------------------------------
+ * Use Forward mode but format the timestamp as integers
+ *
+ * note: yes, the function name it's a big long...
+ */
+static int flb_forward_format_forward_compat_mode(struct flb_forward *ctx,
+ struct flb_forward_config *fc,
+ struct flb_forward_flush *ff,
+ const char *tag, int tag_len,
+ const void *data, size_t bytes,
+ void **out_buf, size_t *out_size)
+{
+ int entries = 0;
+ char *chunk;
+ char chunk_buf[33];
+ msgpack_packer mp_pck;
+ msgpack_sbuffer mp_sbuf;
+ struct flb_log_event_decoder log_decoder;
+ struct flb_log_event log_event;
+ int ret;
+
+ ret = flb_log_event_decoder_init(&log_decoder, (char *) data, bytes);
+
+ if (ret != FLB_EVENT_DECODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event decoder initialization error : %d", ret);
+
+ return -1;
+ }
+
+ msgpack_sbuffer_init(&mp_sbuf);
+ msgpack_packer_init(&mp_pck, &mp_sbuf, msgpack_sbuffer_write);
+
+ if (ff) {
+ chunk = ff->checksum_hex;
+ }
+ else {
+ chunk = chunk_buf;
+ }
+
+ msgpack_pack_array(&mp_pck, fc->send_options ? 3 : 2);
+
+ /* Tag */
+ flb_forward_format_append_tag(ctx, fc, &mp_pck,
+ NULL, tag, tag_len);
+
+ /* Entries */
+ entries = flb_mp_count(data, bytes);
+ msgpack_pack_array(&mp_pck, entries);
+
+ while ((ret = flb_log_event_decoder_next(
+ &log_decoder,
+ &log_event)) == FLB_EVENT_DECODER_SUCCESS) {
+ msgpack_pack_array(&mp_pck, 2);
+
+ /* Pack timestamp */
+ if (fc->time_as_integer == FLB_TRUE) {
+ flb_time_append_to_msgpack(&log_event.timestamp,
+ &mp_pck,
+ FLB_TIME_ETFMT_INT);
+ }
+ else {
+ flb_time_append_to_msgpack(&log_event.timestamp,
+ &mp_pck,
+ FLB_TIME_ETFMT_V1_FIXEXT);
+ }
+
+ /* Pack records */
+ msgpack_pack_object(&mp_pck, *log_event.body);
+ }
+
+ if (fc->send_options == FLB_TRUE) {
+ append_options(ctx, fc, FLB_EVENT_TYPE_LOGS, &mp_pck, entries,
+ (char *) data, bytes, NULL, chunk);
+ }
+
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ *out_buf = mp_sbuf.data;
+ *out_size = mp_sbuf.size;
+
+ return 0;
+}
+
+int flb_forward_format(struct flb_config *config,
+ struct flb_input_instance *ins,
+ void *ins_ctx,
+ void *flush_ctx,
+ int event_type,
+ const char *tag, int tag_len,
+ const void *data, size_t bytes,
+ void **out_buf, size_t *out_size)
+{
+ int ret = 0;
+ int mode = MODE_FORWARD;
+ struct flb_upstream_node *node = NULL;
+ struct flb_forward_config *fc;
+ struct flb_forward_flush *ff = flush_ctx;
+ struct flb_forward *ctx = ins_ctx;
+
+ if (!flush_ctx) {
+ fc = flb_forward_target(ctx, &node);
+ }
+ else {
+ fc = ff->fc;
+ }
+
+ if (!fc) {
+ flb_plg_error(ctx->ins, "cannot get an Upstream single or HA node");
+ return -1;
+ }
+
+ if (event_type == FLB_EVENT_TYPE_METRICS) {
+ mode = MODE_FORWARD;
+ goto do_formatting;
+ }
+ else if (event_type == FLB_EVENT_TYPE_TRACES) {
+ mode = MODE_FORWARD;
+ goto do_formatting;
+ }
+
+#ifdef FLB_HAVE_RECORD_ACCESSOR
+ /*
+ * Based in the configuration, decide the preferred protocol mode
+ */
+ if (fc->ra_tag && fc->ra_static == FLB_FALSE) {
+ /*
+ * Dynamic tag per records needs to include the Tag for every entry,
+ * if record accessor option has been enabled we jump into this
+ * mode.
+ */
+ mode = MODE_MESSAGE;
+ }
+ else {
+#endif
+ /* Forward Modes */
+ if (fc->time_as_integer == FLB_FALSE) {
+ /*
+ * In forward mode we optimize in memory allocation and we reuse the
+ * original msgpack buffer. So we don't compose the outgoing buffer
+ * and just let the caller handle it.
+ */
+ mode = MODE_FORWARD;
+ }
+ else if (fc->time_as_integer == FLB_TRUE) {
+ /*
+ * This option is similar to MODE_FORWARD but since we have to convert the
+ * timestamp to integer type, we need to format the buffer (in the previous
+ * case we avoid that step.
+ */
+ mode = MODE_FORWARD_COMPAT;
+ }
+
+#ifdef FLB_HAVE_RECORD_ACCESSOR
+ }
+#endif
+
+
+do_formatting:
+
+ /* Message Mode: the user needs custom Tags */
+ if (mode == MODE_MESSAGE) {
+#ifdef FLB_HAVE_RECORD_ACCESSOR
+ ret = flb_forward_format_message_mode(ctx, fc, ff,
+ tag, tag_len,
+ data, bytes,
+ out_buf, out_size);
+#endif
+ }
+ else if (mode == MODE_FORWARD) {
+ ret = flb_forward_format_forward_mode(ctx, fc, ff,
+ event_type,
+ tag, tag_len,
+ data, bytes,
+ out_buf, out_size);
+ }
+ else if (mode == MODE_FORWARD_COMPAT) {
+ ret = flb_forward_format_forward_compat_mode(ctx, fc, ff,
+ tag, tag_len,
+ data, bytes,
+ out_buf, out_size);
+ }
+
+ if (ret == -1) {
+ return -1;
+ }
+
+ return mode;
+}
diff --git a/src/fluent-bit/plugins/out_forward/forward_format.h b/src/fluent-bit/plugins/out_forward/forward_format.h
new file mode 100644
index 000000000..bc6c47349
--- /dev/null
+++ b/src/fluent-bit/plugins/out_forward/forward_format.h
@@ -0,0 +1,48 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_OUT_FORWARD_FORMAT_H
+#define FLB_OUT_FORWARD_FORMAT_H
+
+#include <fluent-bit/flb_output_plugin.h>
+#include "forward.h"
+
+void flb_forward_format_bin_to_hex(uint8_t *buf, size_t len, char *out);
+
+int flb_forward_format_append_tag(struct flb_forward *ctx,
+ struct flb_forward_config *fc,
+ msgpack_packer *mp_pck,
+ msgpack_object *map,
+ const char *tag, int tag_len);
+
+int flb_forward_format(struct flb_config *config,
+ struct flb_input_instance *ins,
+ void *ins_ctx,
+ void *flush_ctx,
+ int event_type,
+ const char *tag, int tag_len,
+ const void *data, size_t bytes,
+ void **out_buf, size_t *out_size);
+
+int flb_forward_format_transcode(
+ struct flb_forward *ctx, int format,
+ char *input_buffer, size_t input_length,
+ char **output_buffer, size_t *output_length);
+
+#endif
diff --git a/src/fluent-bit/plugins/out_gelf/CMakeLists.txt b/src/fluent-bit/plugins/out_gelf/CMakeLists.txt
new file mode 100644
index 000000000..2de1e9987
--- /dev/null
+++ b/src/fluent-bit/plugins/out_gelf/CMakeLists.txt
@@ -0,0 +1,5 @@
+set(src
+ gelf.c
+ )
+
+FLB_PLUGIN(out_gelf "${src}" "")
diff --git a/src/fluent-bit/plugins/out_gelf/gelf.c b/src/fluent-bit/plugins/out_gelf/gelf.c
new file mode 100644
index 000000000..6d7284641
--- /dev/null
+++ b/src/fluent-bit/plugins/out_gelf/gelf.c
@@ -0,0 +1,556 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_str.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_gzip.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_network.h>
+#include <fluent-bit/flb_random.h>
+#include <fluent-bit/flb_config_map.h>
+#include <fluent-bit/flb_log_event_decoder.h>
+#include <msgpack.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <errno.h>
+
+#include "gelf.h"
+
+#ifndef MSG_DONTWAIT
+ #define MSG_DONTWAIT 0
+#endif
+
+#ifndef MSG_NOSIGNAL
+ #define MSG_NOSIGNAL 0
+#endif
+
+/*
+ * Version 1.1 (11/2013)
+ * A GELF message is a GZIP’d or ZLIB’d JSON string with the following fields:
+ * version string (UTF-8) GELF spec version – “1.1”; MUST be set by client
+ * library.
+ * host string (UTF-8) the name of the host, source or application that sent
+ * this message; MUST be set by client library.
+ * short_message string (UTF-8) a short descriptive message; MUST be set by
+ * client library.
+ * full_message string (UTF-8) a long message that can i.e. contain a
+ * backtrace; optional.
+ * timestamp number Seconds since UNIX epoch with optional decimal places
+ * for milliseconds; SHOULD be set by client library. Will be set to NOW
+ * by server if absent.
+ * level number the level equal to the standard syslog levels; optional,
+ * default is 1 (ALERT).
+ * facility string (UTF-8) optional, deprecated. Send as additional field i
+ * instead.
+ * line number the line in a file that caused the error (decimal); optional,
+ * deprecated. Send as additional field instead.
+ * file string (UTF-8) the file (with path if you want) that caused the error
+ * (string); optional, deprecated. Send as additional field instead.
+ * _[additional field] string (UTF-8) or number every field you send and
+ * prefix with a _ (underscore) will be treated as an additional field.
+ * Allowed characters in field names are any word character (letter,
+ * number, underscore), dashes and dots. The verifying regular expression
+ * is: ^[\w\.\-]*$
+ * Libraries SHOULD not allow to send id as additional field (_id). Graylog
+ * server nodes omit this field automatically.
+ */
+
+/*
+ * Generate a unique message ID. The upper 48-bit is milliseconds
+ * since the Epoch, the lower 16-bit is a random nonce.
+ */
+static uint64_t message_id(void)
+{
+ uint64_t now;
+ uint16_t nonce;
+ struct flb_time tm;
+
+ if (flb_time_get(&tm) != -1) {
+ now = (uint64_t) tm.tm.tv_sec * 1000 + tm.tm.tv_nsec / 1000000;
+ }
+ else {
+ now = (uint64_t) time(NULL) * 1000;
+ }
+ nonce = (uint16_t) rand();
+
+ return (now << 16) | nonce;
+}
+
+/*
+ * A GELF header is 12 bytes in size. It has the following
+ * structure:
+ *
+ * +---+---+---+---+---+---+---+---+---+---+---+---+
+ * | MAGIC | MESSAGE ID |SEQ|NUM|
+ * +---+---+---+---+---+---+---+---+---+---+---+---+
+ *
+ * NUM is the total number of packets to send. SEQ is the
+ * unique sequence number for each packet (zero-indexed).
+ */
+#define GELF_MAGIC "\x1e\x0f"
+#define GELF_HEADER_SIZE 12
+
+static void init_chunk_header(uint8_t *buf, int count)
+{
+ uint64_t msgid = message_id();
+
+ memcpy(buf, GELF_MAGIC, 2);
+ memcpy(buf + 2, &msgid, 8);
+ buf[10] = 0;
+ buf[11] = count;
+}
+
+/*
+ * Chunked GELF
+ * Prepend the following structure to your GELF message to make it chunked:
+ * Chunked GELF magic bytes 2 bytes 0x1e 0x0f
+ * Message ID 8 bytes Must be the same for every chunk of this message.
+ * Identifying the whole message and is used to reassemble the chunks later.
+ * Generate from millisecond timestamp + hostname for example.
+ * Sequence number 1 byte The sequence number of this chunk. Starting at 0
+ * and always less than the sequence count.
+ * Sequence count 1 byte Total number of chunks this message has.
+ * All chunks MUST arrive within 5 seconds or the server will discard all
+ * already arrived and still arriving chunks.
+ * A message MUST NOT consist of more than 128 chunks.
+ */
+static int gelf_send_udp_chunked(struct flb_out_gelf_config *ctx, void *msg,
+ size_t msg_size)
+{
+ int ret;
+ uint8_t n;
+ size_t chunks;
+ size_t offset;
+ size_t len;
+ uint8_t *buf = (uint8_t *) ctx->pckt_buf;
+
+ chunks = msg_size / ctx->pckt_size;
+ if (msg_size % ctx->pckt_size != 0) {
+ chunks++;
+ }
+
+ if (chunks > 128) {
+ flb_plg_error(ctx->ins, "message too big: %zd bytes", msg_size);
+ return -1;
+ }
+
+ init_chunk_header(buf, chunks);
+
+ offset = 0;
+ for (n = 0; n < chunks; n++) {
+ buf[10] = n;
+
+ len = msg_size - offset;
+ if (ctx->pckt_size < len) {
+ len = ctx->pckt_size;
+ }
+ memcpy(buf + GELF_HEADER_SIZE, (char *) msg + offset, len);
+
+ ret = send(ctx->fd, buf, len + GELF_HEADER_SIZE,
+ MSG_DONTWAIT | MSG_NOSIGNAL);
+ if (ret == -1) {
+ flb_errno();
+ }
+ offset += ctx->pckt_size;
+ }
+ return 0;
+}
+
+static int gelf_send_udp_pckt (struct flb_out_gelf_config *ctx, char *msg,
+ size_t msg_size)
+{
+ int ret;
+
+ if (msg_size > ctx->pckt_size) {
+ gelf_send_udp_chunked(ctx, msg, msg_size);
+ }
+ else {
+ ret = send(ctx->fd, msg, msg_size, MSG_DONTWAIT | MSG_NOSIGNAL);
+ if (ret == -1) {
+ flb_errno();
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int gelf_send_udp(struct flb_out_gelf_config *ctx, char *msg,
+ size_t msg_size)
+{
+ int ret;
+ int status;
+ void *zdata;
+ size_t zdata_len;
+
+ if (ctx->compress == FLB_TRUE || (msg_size > ctx->pckt_size)) {
+ ret = flb_gzip_compress(msg, msg_size, &zdata, &zdata_len);
+ if (ret != 0) {
+ return -1;
+ }
+
+ status = gelf_send_udp_pckt (ctx, zdata, zdata_len);
+ flb_free(zdata);
+ if (status < 0) {
+ return status;
+ }
+ }
+ else {
+ status = send(ctx->fd, msg, msg_size, MSG_DONTWAIT | MSG_NOSIGNAL);
+ if (status < 0) {
+ return status;
+ }
+ }
+
+ return 0;
+}
+
+static void cb_gelf_flush(struct flb_event_chunk *event_chunk,
+ struct flb_output_flush *out_flush,
+ struct flb_input_instance *i_ins,
+ void *out_context,
+ struct flb_config *config)
+{
+ int ret;
+ flb_sds_t s;
+ flb_sds_t tmp;
+ size_t off = 0;
+ size_t prev_off = 0;
+ size_t size = 0;
+ size_t bytes_sent;
+ msgpack_object map;
+ struct flb_connection *u_conn = NULL;
+ struct flb_out_gelf_config *ctx = out_context;
+ struct flb_log_event_decoder log_decoder;
+ struct flb_log_event log_event;
+
+ if (ctx->mode != FLB_GELF_UDP) {
+ u_conn = flb_upstream_conn_get(ctx->u);
+ if (!u_conn) {
+ flb_plg_error(ctx->ins, "no upstream connections available");
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+ }
+
+ ret = flb_log_event_decoder_init(&log_decoder,
+ (char *) event_chunk->data,
+ event_chunk->size);
+
+ if (ret != FLB_EVENT_DECODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event decoder initialization error : %d", ret);
+
+ if (ctx->mode != FLB_GELF_UDP) {
+ flb_upstream_conn_release(u_conn);
+ }
+
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ while ((ret = flb_log_event_decoder_next(
+ &log_decoder,
+ &log_event)) == FLB_EVENT_DECODER_SUCCESS) {
+ off = log_decoder.offset;
+ size = off - prev_off;
+ prev_off = off;
+
+ map = *log_event.body;
+
+ size = (size * 1.4);
+ s = flb_sds_create_size(size);
+ if (s == NULL) {
+ flb_log_event_decoder_destroy(&log_decoder);
+ FLB_OUTPUT_RETURN(FLB_ERROR);
+ }
+
+ tmp = flb_msgpack_to_gelf(&s, &map, &log_event.timestamp,
+ &(ctx->fields));
+ if (tmp != NULL) {
+ s = tmp;
+ if (ctx->mode == FLB_GELF_UDP) {
+ ret = gelf_send_udp(ctx, s, flb_sds_len(s));
+ if (ret == -1) {
+ if (ctx->mode != FLB_GELF_UDP) {
+ flb_upstream_conn_release(u_conn);
+ }
+
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ flb_sds_destroy(s);
+
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+ }
+ else {
+ /* write gelf json plus \0 */
+ ret = flb_io_net_write(u_conn,
+ s, flb_sds_len(s) + 1, &bytes_sent);
+ if (ret == -1) {
+ flb_errno();
+
+ if (ctx->mode != FLB_GELF_UDP) {
+ flb_upstream_conn_release(u_conn);
+ }
+
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ flb_sds_destroy(s);
+
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+ }
+ }
+ else {
+ flb_plg_error(ctx->ins, "error encoding to GELF");
+ }
+
+ flb_sds_destroy(s);
+ }
+
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ if (ctx->mode != FLB_GELF_UDP) {
+ flb_upstream_conn_release(u_conn);
+ }
+
+ FLB_OUTPUT_RETURN(FLB_OK);
+}
+
+static int cb_gelf_init(struct flb_output_instance *ins, struct flb_config *config,
+ void *data)
+{
+ int ret;
+ const char *tmp;
+ struct flb_out_gelf_config *ctx = NULL;
+
+ /* Set default network configuration */
+ flb_output_net_default("127.0.0.1", 12201, ins);
+
+ /* Allocate plugin context */
+ ctx = flb_calloc(1, sizeof(struct flb_out_gelf_config));
+ if (!ctx) {
+ flb_errno();
+ return -1;
+ }
+ ctx->ins = ins;
+
+ ret = flb_output_config_map_set(ins, (void *) ctx);
+ if (ret == -1) {
+ flb_plg_error(ins, "flb_output_config_map_set failed");
+ flb_free(ctx);
+ return -1;
+ }
+
+ /* Config Mode */
+ tmp = flb_output_get_property("mode", ins);
+ if (tmp) {
+ if (!strcasecmp(tmp, "tcp")) {
+ ctx->mode = FLB_GELF_TCP;
+ }
+ else if (!strcasecmp(tmp, "tls")) {
+ ctx->mode = FLB_GELF_TLS;
+ }
+ else if (!strcasecmp(tmp, "udp")) {
+ ctx->mode = FLB_GELF_UDP;
+ }
+ else {
+ flb_plg_error(ctx->ins, "Unknown gelf mode %s", tmp);
+ flb_free(ctx);
+ return -1;
+ }
+ }
+ else {
+ ctx->mode = FLB_GELF_UDP;
+ }
+
+ /* Config Gelf_Timestamp_Key */
+ tmp = flb_output_get_property("gelf_timestamp_key", ins);
+ if (tmp) {
+ ctx->fields.timestamp_key = flb_sds_create(tmp);
+ }
+
+ /* Config Gelf_Host_Key */
+ tmp = flb_output_get_property("gelf_host_key", ins);
+ if (tmp) {
+ ctx->fields.host_key = flb_sds_create(tmp);
+ }
+
+ /* Config Gelf_Short_Message_Key */
+ tmp = flb_output_get_property("gelf_short_message_key", ins);
+ if (tmp) {
+ ctx->fields.short_message_key = flb_sds_create(tmp);
+ }
+
+ /* Config Gelf_Full_Message_Key */
+ tmp = flb_output_get_property("gelf_full_message_key", ins);
+ if (tmp) {
+ ctx->fields.full_message_key = flb_sds_create(tmp);
+ }
+
+ /* Config Gelf_Level_Key */
+ tmp = flb_output_get_property("gelf_level_key", ins);
+ if (tmp) {
+ ctx->fields.level_key = flb_sds_create(tmp);
+ }
+
+ /* init random seed */
+ if (flb_random_bytes((unsigned char *) &ctx->seed, sizeof(int))) {
+ ctx->seed = time(NULL);
+ }
+ srand(ctx->seed);
+
+ ctx->fd = -1;
+ ctx->pckt_buf = NULL;
+
+ if (ctx->mode == FLB_GELF_UDP) {
+ ctx->fd = flb_net_udp_connect(ins->host.name, ins->host.port,
+ ins->net_setup.source_address);
+ if (ctx->fd < 0) {
+ flb_free(ctx);
+ return -1;
+ }
+ ctx->pckt_buf = flb_malloc(GELF_HEADER_SIZE + ctx->pckt_size);
+ if (ctx->pckt_buf == NULL) {
+ flb_socket_close(ctx->fd);
+ flb_free(ctx);
+ return -1;
+ }
+ }
+ else {
+ int io_flags = FLB_IO_TCP;
+
+ if (ctx->mode == FLB_GELF_TLS) {
+ io_flags = FLB_IO_TLS;
+ }
+
+ if (ins->host.ipv6 == FLB_TRUE) {
+ io_flags |= FLB_IO_IPV6;
+ }
+
+ ctx->u = flb_upstream_create(config, ins->host.name, ins->host.port,
+ io_flags, ins->tls);
+ if (!(ctx->u)) {
+ flb_free(ctx);
+ return -1;
+ }
+ flb_output_upstream_set(ctx->u, ins);
+ }
+
+ /* Set the plugin context */
+ flb_output_set_context(ins, ctx);
+ return 0;
+}
+
+static int cb_gelf_exit(void *data, struct flb_config *config)
+{
+ struct flb_out_gelf_config *ctx = data;
+
+ if (ctx == NULL) {
+ return 0;
+ }
+
+ if (ctx->u) {
+ flb_upstream_destroy(ctx->u);
+ }
+ if (ctx->fd >= 0) {
+ close(ctx->fd);
+ }
+
+ flb_sds_destroy(ctx->fields.timestamp_key);
+ flb_sds_destroy(ctx->fields.host_key);
+ flb_sds_destroy(ctx->fields.short_message_key);
+ flb_sds_destroy(ctx->fields.full_message_key);
+ flb_sds_destroy(ctx->fields.level_key);
+
+ flb_free(ctx->pckt_buf);
+ flb_free(ctx);
+
+ return 0;
+}
+
+
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_STR, "mode", "udp",
+ 0, FLB_FALSE, 0,
+ "The protocol to use. 'tls', 'tcp' or 'udp'"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "gelf_short_message_key", NULL,
+ 0, FLB_FALSE, 0,
+ "A short descriptive message (MUST be set in GELF)"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "gelf_timestamp_key", NULL,
+ 0, FLB_FALSE, 0,
+ "Timestamp key name (SHOULD be set in GELF)"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "gelf_host_key", NULL,
+ 0, FLB_FALSE, 0,
+ "Key which its value is used as the name of the host,"
+ "source or application that sent this message. (MUST be set in GELF) "
+ },
+ {
+ FLB_CONFIG_MAP_STR, "gelf_full_message_key", NULL,
+ 0, FLB_FALSE, 0,
+ "Key to use as the long message that can i.e. contain a backtrace. "
+ "(Optional in GELF)"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "gelf_level_key", NULL,
+ 0, FLB_FALSE, 0,
+ "Key to be used as the log level. "
+ "Its value must be in standard syslog levels (between 0 and 7). "
+ "(Optional in GELF)"
+ },
+ {
+ FLB_CONFIG_MAP_INT, "packet_size", "1420",
+ 0, FLB_TRUE, offsetof(struct flb_out_gelf_config, pckt_size),
+ "If transport protocol is udp, you can set the size of packets to be sent."
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "compress", "true",
+ 0, FLB_TRUE, offsetof(struct flb_out_gelf_config, compress),
+ "If transport protocol is udp, "
+ "you can set this if you want your UDP packets to be compressed."
+ },
+
+ /* EOF */
+ {0}
+};
+
+/* Plugin reference */
+struct flb_output_plugin out_gelf_plugin = {
+ .name = "gelf",
+ .description = "GELF Output",
+ .cb_init = cb_gelf_init,
+ .cb_pre_run = NULL,
+ .cb_flush = cb_gelf_flush,
+ .cb_exit = cb_gelf_exit,
+ .flags = FLB_OUTPUT_NET | FLB_IO_OPT_TLS,
+ .config_map = config_map
+};
diff --git a/src/fluent-bit/plugins/out_gelf/gelf.h b/src/fluent-bit/plugins/out_gelf/gelf.h
new file mode 100644
index 000000000..b834059cf
--- /dev/null
+++ b/src/fluent-bit/plugins/out_gelf/gelf.h
@@ -0,0 +1,47 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_OUT_GELF_H
+#define FLB_OUT_GELF_H
+
+#define FLB_GELF_UDP 0
+#define FLB_GELF_TCP 1
+#define FLB_GELF_TLS 2
+
+#include <fluent-bit/flb_output_plugin.h>
+
+struct flb_out_gelf_config {
+
+ struct flb_gelf_fields fields;
+
+ /* Upstream connection to the backend server */
+ struct flb_upstream *u;
+ flb_sockfd_t fd;
+
+ int pckt_size;
+ char *pckt_buf;
+ int compress;
+ unsigned int seed;
+
+ int mode;
+
+ struct flb_output_instance *ins;
+};
+
+#endif
diff --git a/src/fluent-bit/plugins/out_http/CMakeLists.txt b/src/fluent-bit/plugins/out_http/CMakeLists.txt
new file mode 100644
index 000000000..216561017
--- /dev/null
+++ b/src/fluent-bit/plugins/out_http/CMakeLists.txt
@@ -0,0 +1,6 @@
+set(src
+ http.c
+ http_conf.c
+ )
+
+FLB_PLUGIN(out_http "${src}" "mk_core")
diff --git a/src/fluent-bit/plugins/out_http/http.c b/src/fluent-bit/plugins/out_http/http.c
new file mode 100644
index 000000000..f88b6346d
--- /dev/null
+++ b/src/fluent-bit/plugins/out_http/http.c
@@ -0,0 +1,774 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_output.h>
+#include <fluent-bit/flb_http_client.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_str.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_sds.h>
+#include <fluent-bit/flb_gzip.h>
+#include <fluent-bit/flb_record_accessor.h>
+#include <fluent-bit/flb_log_event_decoder.h>
+#include <msgpack.h>
+
+#ifdef FLB_HAVE_SIGNV4
+#ifdef FLB_HAVE_AWS
+#include <fluent-bit/flb_aws_credentials.h>
+#include <fluent-bit/flb_signv4.h>
+#endif
+#endif
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+#include <errno.h>
+
+#include "http.h"
+#include "http_conf.h"
+
+#include <fluent-bit/flb_callback.h>
+
+static int cb_http_init(struct flb_output_instance *ins,
+ struct flb_config *config, void *data)
+{
+ struct flb_out_http *ctx = NULL;
+ (void) data;
+
+ ctx = flb_http_conf_create(ins, config);
+ if (!ctx) {
+ return -1;
+ }
+
+ /* Set the plugin context */
+ flb_output_set_context(ins, ctx);
+
+ /*
+ * This plugin instance uses the HTTP client interface, let's register
+ * it debugging callbacks.
+ */
+ flb_output_set_http_debug_callbacks(ins);
+
+ return 0;
+}
+
+static void append_headers(struct flb_http_client *c,
+ char **headers)
+{
+ int i;
+ char *header_key;
+ char *header_value;
+
+ i = 0;
+ header_key = NULL;
+ header_value = NULL;
+ while (*headers) {
+ if (i % 2 == 0) {
+ header_key = *headers;
+ }
+ else {
+ header_value = *headers;
+ }
+ if (header_key && header_value) {
+ flb_http_add_header(c,
+ header_key,
+ strlen(header_key),
+ header_value,
+ strlen(header_value));
+ flb_free(header_key);
+ flb_free(header_value);
+ header_key = NULL;
+ header_value = NULL;
+ }
+ headers++;
+ i++;
+ }
+}
+
+static int http_post(struct flb_out_http *ctx,
+ const void *body, size_t body_len,
+ const char *tag, int tag_len,
+ char **headers)
+{
+ int ret;
+ int out_ret = FLB_OK;
+ int compressed = FLB_FALSE;
+ size_t b_sent;
+ void *payload_buf = NULL;
+ size_t payload_size = 0;
+ struct flb_upstream *u;
+ struct flb_connection *u_conn;
+ struct flb_http_client *c;
+ struct mk_list *head;
+ struct flb_config_map_val *mv;
+ struct flb_slist_entry *key = NULL;
+ struct flb_slist_entry *val = NULL;
+ flb_sds_t signature = NULL;
+
+ /* Get upstream context and connection */
+ u = ctx->u;
+ u_conn = flb_upstream_conn_get(u);
+ if (!u_conn) {
+ flb_plg_error(ctx->ins, "no upstream connections available to %s:%i",
+ u->tcp_host, u->tcp_port);
+ return FLB_RETRY;
+ }
+
+ /* Map payload */
+ payload_buf = (void *) body;
+ payload_size = body_len;
+
+ /* Should we compress the payload ? */
+ if (ctx->compress_gzip == FLB_TRUE) {
+ ret = flb_gzip_compress((void *) body, body_len,
+ &payload_buf, &payload_size);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins,
+ "cannot gzip payload, disabling compression");
+ }
+ else {
+ compressed = FLB_TRUE;
+ }
+ }
+
+ /* Create HTTP client context */
+ c = flb_http_client(u_conn, FLB_HTTP_POST, ctx->uri,
+ payload_buf, payload_size,
+ ctx->host, ctx->port,
+ ctx->proxy, 0);
+
+
+ if (c->proxy.host) {
+ flb_plg_debug(ctx->ins, "[http_client] proxy host: %s port: %i",
+ c->proxy.host, c->proxy.port);
+ }
+
+ /* Allow duplicated headers ? */
+ flb_http_allow_duplicated_headers(c, ctx->allow_dup_headers);
+
+ /*
+ * Direct assignment of the callback context to the HTTP client context.
+ * This needs to be improved through a more clean API.
+ */
+ c->cb_ctx = ctx->ins->callback;
+
+ /* Append headers */
+ if (headers) {
+ append_headers(c, headers);
+ }
+ else if ((ctx->out_format == FLB_PACK_JSON_FORMAT_JSON) ||
+ (ctx->out_format == FLB_PACK_JSON_FORMAT_STREAM) ||
+ (ctx->out_format == FLB_PACK_JSON_FORMAT_LINES) ||
+ (ctx->out_format == FLB_HTTP_OUT_GELF)) {
+ flb_http_add_header(c,
+ FLB_HTTP_CONTENT_TYPE,
+ sizeof(FLB_HTTP_CONTENT_TYPE) - 1,
+ FLB_HTTP_MIME_JSON,
+ sizeof(FLB_HTTP_MIME_JSON) - 1);
+ }
+ else {
+ flb_http_add_header(c,
+ FLB_HTTP_CONTENT_TYPE,
+ sizeof(FLB_HTTP_CONTENT_TYPE) - 1,
+ FLB_HTTP_MIME_MSGPACK,
+ sizeof(FLB_HTTP_MIME_MSGPACK) - 1);
+ }
+
+ if (ctx->header_tag) {
+ flb_http_add_header(c,
+ ctx->header_tag,
+ flb_sds_len(ctx->header_tag),
+ tag, tag_len);
+ }
+
+ /* Content Encoding: gzip */
+ if (compressed == FLB_TRUE) {
+ flb_http_set_content_encoding_gzip(c);
+ }
+
+ /* Basic Auth headers */
+ if (ctx->http_user && ctx->http_passwd) {
+ flb_http_basic_auth(c, ctx->http_user, ctx->http_passwd);
+ }
+
+ flb_http_add_header(c, "User-Agent", 10, "Fluent-Bit", 10);
+
+ flb_config_map_foreach(head, mv, ctx->headers) {
+ key = mk_list_entry_first(mv->val.list, struct flb_slist_entry, _head);
+ val = mk_list_entry_last(mv->val.list, struct flb_slist_entry, _head);
+
+ flb_http_add_header(c,
+ key->str, flb_sds_len(key->str),
+ val->str, flb_sds_len(val->str));
+ }
+
+#ifdef FLB_HAVE_SIGNV4
+#ifdef FLB_HAVE_AWS
+ /* AWS SigV4 headers */
+ if (ctx->has_aws_auth == FLB_TRUE) {
+ flb_plg_debug(ctx->ins, "signing request with AWS Sigv4");
+ signature = flb_signv4_do(c,
+ FLB_TRUE, /* normalize URI ? */
+ FLB_TRUE, /* add x-amz-date header ? */
+ time(NULL),
+ (char *) ctx->aws_region,
+ (char *) ctx->aws_service,
+ 0, NULL,
+ ctx->aws_provider);
+
+ if (!signature) {
+ flb_plg_error(ctx->ins, "could not sign request with sigv4");
+ out_ret = FLB_RETRY;
+ goto cleanup;
+ }
+ flb_sds_destroy(signature);
+ }
+#endif
+#endif
+
+ ret = flb_http_do(c, &b_sent);
+ if (ret == 0) {
+ /*
+ * Only allow the following HTTP status:
+ *
+ * - 200: OK
+ * - 201: Created
+ * - 202: Accepted
+ * - 203: no authorative resp
+ * - 204: No Content
+ * - 205: Reset content
+ *
+ */
+ if (c->resp.status < 200 || c->resp.status > 205) {
+ if (ctx->log_response_payload &&
+ c->resp.payload && c->resp.payload_size > 0) {
+ flb_plg_error(ctx->ins, "%s:%i, HTTP status=%i\n%s",
+ ctx->host, ctx->port,
+ c->resp.status, c->resp.payload);
+ }
+ else {
+ flb_plg_error(ctx->ins, "%s:%i, HTTP status=%i",
+ ctx->host, ctx->port, c->resp.status);
+ }
+ out_ret = FLB_RETRY;
+ }
+ else {
+ if (ctx->log_response_payload &&
+ c->resp.payload && c->resp.payload_size > 0) {
+ flb_plg_info(ctx->ins, "%s:%i, HTTP status=%i\n%s",
+ ctx->host, ctx->port,
+ c->resp.status, c->resp.payload);
+ }
+ else {
+ flb_plg_info(ctx->ins, "%s:%i, HTTP status=%i",
+ ctx->host, ctx->port,
+ c->resp.status);
+ }
+ }
+ }
+ else {
+ flb_plg_error(ctx->ins, "could not flush records to %s:%i (http_do=%i)",
+ ctx->host, ctx->port, ret);
+ out_ret = FLB_RETRY;
+ }
+
+cleanup:
+ /*
+ * If the payload buffer is different than incoming records in body, means
+ * we generated a different payload and must be freed.
+ */
+ if (payload_buf != body) {
+ flb_free(payload_buf);
+ }
+
+ /* Destroy HTTP client context */
+ flb_http_client_destroy(c);
+
+ /* Release the TCP connection */
+ flb_upstream_conn_release(u_conn);
+
+ return out_ret;
+}
+
+static int compose_payload_gelf(struct flb_out_http *ctx,
+ const char *data, uint64_t bytes,
+ void **out_body, size_t *out_size)
+{
+ flb_sds_t s;
+ flb_sds_t tmp = NULL;
+ size_t size = 0;
+ msgpack_object map;
+ struct flb_log_event_decoder log_decoder;
+ struct flb_log_event log_event;
+ int ret;
+
+ size = bytes * 1.5;
+
+ /* Allocate buffer for our new payload */
+ s = flb_sds_create_size(size);
+ if (!s) {
+ flb_plg_error(ctx->ins, "flb_sds_create_size failed");
+ return FLB_RETRY;
+ }
+
+ ret = flb_log_event_decoder_init(&log_decoder, (char *) data, bytes);
+
+ if (ret != FLB_EVENT_DECODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event decoder initialization error : %d", ret);
+
+ flb_sds_destroy(s);
+
+ return FLB_RETRY;
+ }
+
+ while ((ret = flb_log_event_decoder_next(
+ &log_decoder,
+ &log_event)) == FLB_EVENT_DECODER_SUCCESS) {
+ map = *log_event.body;
+
+ tmp = flb_msgpack_to_gelf(&s, &map,
+ &log_event.timestamp,
+ &(ctx->gelf_fields));
+ if (!tmp) {
+ flb_plg_error(ctx->ins, "error encoding to GELF");
+
+ flb_sds_destroy(s);
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ return FLB_ERROR;
+ }
+
+ /* Append new line */
+ tmp = flb_sds_cat(s, "\n", 1);
+ if (!tmp) {
+ flb_plg_error(ctx->ins, "error concatenating records");
+
+ flb_sds_destroy(s);
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ return FLB_RETRY;
+ }
+
+ s = tmp;
+ }
+
+ *out_body = s;
+ *out_size = flb_sds_len(s);
+
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ return FLB_OK;
+}
+
+static int compose_payload(struct flb_out_http *ctx,
+ const void *in_body, size_t in_size,
+ void **out_body, size_t *out_size)
+{
+ flb_sds_t encoded;
+
+ *out_body = NULL;
+ *out_size = 0;
+
+ if ((ctx->out_format == FLB_PACK_JSON_FORMAT_JSON) ||
+ (ctx->out_format == FLB_PACK_JSON_FORMAT_STREAM) ||
+ (ctx->out_format == FLB_PACK_JSON_FORMAT_LINES)) {
+
+ encoded = flb_pack_msgpack_to_json_format(in_body,
+ in_size,
+ ctx->out_format,
+ ctx->json_date_format,
+ ctx->date_key);
+ if (encoded == NULL) {
+ flb_plg_error(ctx->ins, "failed to convert json");
+ return FLB_ERROR;
+ }
+ *out_body = (void*)encoded;
+ *out_size = flb_sds_len(encoded);
+ }
+ else if (ctx->out_format == FLB_HTTP_OUT_GELF) {
+ return compose_payload_gelf(ctx, in_body, in_size, out_body, out_size);
+ }
+ else {
+ /* Nothing to do, if the format is msgpack */
+ *out_body = (void *)in_body;
+ *out_size = in_size;
+ }
+
+ return FLB_OK;
+}
+
+static char **extract_headers(msgpack_object *obj) {
+ size_t i;
+ char **headers = NULL;
+ size_t str_count;
+ msgpack_object_map map;
+ msgpack_object_str k;
+ msgpack_object_str v;
+
+ if (obj->type != MSGPACK_OBJECT_MAP) {
+ goto err;
+ }
+
+ map = obj->via.map;
+ str_count = map.size * 2 + 1;
+ headers = flb_calloc(str_count, sizeof *headers);
+
+ if (!headers) {
+ goto err;
+ }
+
+ for (i = 0; i < map.size; i++) {
+ if (map.ptr[i].key.type != MSGPACK_OBJECT_STR ||
+ map.ptr[i].val.type != MSGPACK_OBJECT_STR) {
+ continue;
+ }
+
+ k = map.ptr[i].key.via.str;
+ v = map.ptr[i].val.via.str;
+
+ headers[i * 2] = strndup(k.ptr, k.size);
+
+ if (!headers[i]) {
+ goto err;
+ }
+
+ headers[i * 2 + 1] = strndup(v.ptr, v.size);
+
+ if (!headers[i]) {
+ goto err;
+ }
+ }
+
+ return headers;
+
+err:
+ if (headers) {
+ for (i = 0; i < str_count; i++) {
+ if (headers[i]) {
+ flb_free(headers[i]);
+ }
+ }
+ flb_free(headers);
+ }
+ return NULL;
+}
+
+static int post_all_requests(struct flb_out_http *ctx,
+ const char *data, size_t size,
+ flb_sds_t body_key,
+ flb_sds_t headers_key,
+ struct flb_event_chunk *event_chunk)
+{
+ msgpack_object map;
+ msgpack_object *k;
+ msgpack_object *v;
+ msgpack_object *start_key;
+ const char *body;
+ size_t body_size;
+ bool body_found;
+ bool headers_found;
+ char **headers;
+ size_t record_count = 0;
+ int ret = 0;
+ struct flb_log_event_decoder log_decoder;
+ struct flb_log_event log_event;
+
+ ret = flb_log_event_decoder_init(&log_decoder, (char *) data, size);
+
+ if (ret != FLB_EVENT_DECODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event decoder initialization error : %d", ret);
+
+ return -1;
+ }
+
+ while ((flb_log_event_decoder_next(
+ &log_decoder,
+ &log_event)) == FLB_EVENT_DECODER_SUCCESS) {
+ headers = NULL;
+ body_found = false;
+ headers_found = false;
+
+ map = *log_event.body;
+
+ if (map.type != MSGPACK_OBJECT_MAP) {
+ ret = -1;
+ break;
+ }
+
+ if (!flb_ra_get_kv_pair(ctx->body_ra, map, &start_key, &k, &v)) {
+ if (v->type == MSGPACK_OBJECT_STR || v->type == MSGPACK_OBJECT_BIN) {
+ body = v->via.str.ptr;
+ body_size = v->via.str.size;
+ body_found = true;
+ }
+ else {
+ flb_plg_warn(ctx->ins,
+ "failed to extract body using pattern \"%s\" "
+ "(must be a msgpack string or bin)", ctx->body_key);
+ }
+ }
+
+ if (!flb_ra_get_kv_pair(ctx->headers_ra, map, &start_key, &k, &v)) {
+ headers = extract_headers(v);
+ if (headers) {
+ headers_found = true;
+ }
+ else {
+ flb_plg_warn(ctx->ins,
+ "error extracting headers using pattern \"%s\"",
+ ctx->headers_key);
+ }
+ }
+
+ if (body_found && headers_found) {
+ flb_plg_trace(ctx->ins, "posting record %zu", record_count++);
+ ret = http_post(ctx, body, body_size, event_chunk->tag,
+ flb_sds_len(event_chunk->tag), headers);
+ }
+ else {
+ flb_plg_warn(ctx->ins,
+ "failed to extract body/headers using patterns "
+ "\"%s\" and \"%s\"", ctx->body_key, ctx->headers_key);
+ ret = -1;
+ continue;
+ }
+
+ flb_free(headers);
+ }
+
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ return ret;
+}
+
+static void cb_http_flush(struct flb_event_chunk *event_chunk,
+ struct flb_output_flush *out_flush,
+ struct flb_input_instance *i_ins,
+ void *out_context,
+ struct flb_config *config)
+{
+ int ret = FLB_ERROR;
+ struct flb_out_http *ctx = out_context;
+ void *out_body;
+ size_t out_size;
+ (void) i_ins;
+
+ if (ctx->body_key) {
+ ret = post_all_requests(ctx, event_chunk->data, event_chunk->size,
+ ctx->body_key, ctx->headers_key, event_chunk);
+ if (ret < 0) {
+ flb_plg_error(ctx->ins,
+ "failed to post requests body key \"%s\"", ctx->body_key);
+ }
+ }
+ else {
+ ret = compose_payload(ctx, event_chunk->data, event_chunk->size,
+ &out_body, &out_size);
+ if (ret != FLB_OK) {
+ FLB_OUTPUT_RETURN(ret);
+ }
+
+ if ((ctx->out_format == FLB_PACK_JSON_FORMAT_JSON) ||
+ (ctx->out_format == FLB_PACK_JSON_FORMAT_STREAM) ||
+ (ctx->out_format == FLB_PACK_JSON_FORMAT_LINES) ||
+ (ctx->out_format == FLB_HTTP_OUT_GELF)) {
+ ret = http_post(ctx, out_body, out_size,
+ event_chunk->tag, flb_sds_len(event_chunk->tag), NULL);
+ flb_sds_destroy(out_body);
+ }
+ else {
+ /* msgpack */
+ ret = http_post(ctx,
+ event_chunk->data, event_chunk->size,
+ event_chunk->tag, flb_sds_len(event_chunk->tag), NULL);
+ }
+ }
+
+ FLB_OUTPUT_RETURN(ret);
+}
+
+static int cb_http_exit(void *data, struct flb_config *config)
+{
+ struct flb_out_http *ctx = data;
+
+ flb_http_conf_destroy(ctx);
+ return 0;
+}
+
+/* Configuration properties map */
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_STR, "proxy", NULL,
+ 0, FLB_FALSE, 0,
+ "Specify an HTTP Proxy. The expected format of this value is http://host:port. "
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "allow_duplicated_headers", "true",
+ 0, FLB_TRUE, offsetof(struct flb_out_http, allow_dup_headers),
+ "Specify if duplicated headers are allowed or not"
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "log_response_payload", "true",
+ 0, FLB_TRUE, offsetof(struct flb_out_http, log_response_payload),
+ "Specify if the response paylod should be logged or not"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "http_user", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_out_http, http_user),
+ "Set HTTP auth user"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "http_passwd", "",
+ 0, FLB_TRUE, offsetof(struct flb_out_http, http_passwd),
+ "Set HTTP auth password"
+ },
+#ifdef FLB_HAVE_SIGNV4
+#ifdef FLB_HAVE_AWS
+ {
+ FLB_CONFIG_MAP_BOOL, "aws_auth", "false",
+ 0, FLB_TRUE, offsetof(struct flb_out_http, has_aws_auth),
+ "Enable AWS SigV4 authentication"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "aws_service", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_out_http, aws_service),
+ "AWS destination service code, used by SigV4 authentication"
+ },
+ FLB_AWS_CREDENTIAL_BASE_CONFIG_MAP(FLB_HTTP_AWS_CREDENTIAL_PREFIX),
+#endif
+#endif
+ {
+ FLB_CONFIG_MAP_STR, "header_tag", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_out_http, header_tag),
+ "Set a HTTP header which value is the Tag"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "format", NULL,
+ 0, FLB_FALSE, 0,
+ "Set desired payload format: json, json_stream, json_lines, gelf or msgpack"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "json_date_format", NULL,
+ 0, FLB_FALSE, 0,
+ FBL_PACK_JSON_DATE_FORMAT_DESCRIPTION
+ },
+ {
+ FLB_CONFIG_MAP_STR, "json_date_key", "date",
+ 0, FLB_TRUE, offsetof(struct flb_out_http, json_date_key),
+ "Specify the name of the date field in output"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "compress", NULL,
+ 0, FLB_FALSE, 0,
+ "Set payload compression mechanism. Option available is 'gzip'"
+ },
+ {
+ FLB_CONFIG_MAP_SLIST_1, "header", NULL,
+ FLB_CONFIG_MAP_MULT, FLB_TRUE, offsetof(struct flb_out_http, headers),
+ "Add a HTTP header key/value pair. Multiple headers can be set"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "uri", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_out_http, uri),
+ "Specify an optional HTTP URI for the target web server, e.g: /something"
+ },
+
+ /* Gelf Properties */
+ {
+ FLB_CONFIG_MAP_STR, "gelf_timestamp_key", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_out_http, gelf_fields.timestamp_key),
+ "Specify the key to use for 'timestamp' in gelf format"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "gelf_host_key", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_out_http, gelf_fields.host_key),
+ "Specify the key to use for the 'host' in gelf format"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "gelf_short_message_key", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_out_http, gelf_fields.short_message_key),
+ "Specify the key to use as the 'short' message in gelf format"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "gelf_full_message_key", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_out_http, gelf_fields.full_message_key),
+ "Specify the key to use for the 'full' message in gelf format"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "gelf_level_key", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_out_http, gelf_fields.level_key),
+ "Specify the key to use for the 'level' in gelf format"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "body_key", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_out_http, body_key),
+ "Specify the key which contains the body"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "headers_key", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_out_http, headers_key),
+ "Specify the key which contains the headers"
+ },
+
+ /* EOF */
+ {0}
+};
+
+static int cb_http_format_test(struct flb_config *config,
+ struct flb_input_instance *ins,
+ void *plugin_context,
+ void *flush_ctx,
+ int event_type,
+ const char *tag, int tag_len,
+ const void *data, size_t bytes,
+ void **out_data, size_t *out_size)
+{
+ struct flb_out_http *ctx = plugin_context;
+ int ret;
+
+ ret = compose_payload(ctx, data, bytes, out_data, out_size);
+ if (ret != FLB_OK) {
+ flb_error("ret=%d", ret);
+ return -1;
+ }
+ return 0;
+}
+
+/* Plugin reference */
+struct flb_output_plugin out_http_plugin = {
+ .name = "http",
+ .description = "HTTP Output",
+ .cb_init = cb_http_init,
+ .cb_pre_run = NULL,
+ .cb_flush = cb_http_flush,
+ .cb_exit = cb_http_exit,
+ .config_map = config_map,
+
+ /* for testing */
+ .test_formatter.callback = cb_http_format_test,
+
+ .flags = FLB_OUTPUT_NET | FLB_IO_OPT_TLS,
+ .workers = 2
+};
diff --git a/src/fluent-bit/plugins/out_http/http.h b/src/fluent-bit/plugins/out_http/http.h
new file mode 100644
index 000000000..151658f31
--- /dev/null
+++ b/src/fluent-bit/plugins/out_http/http.h
@@ -0,0 +1,103 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_OUT_HTTP_H
+#define FLB_OUT_HTTP_H
+
+#define FLB_HTTP_OUT_MSGPACK FLB_PACK_JSON_FORMAT_NONE
+#define FLB_HTTP_OUT_GELF 20
+
+#define FLB_HTTP_CONTENT_TYPE "Content-Type"
+#define FLB_HTTP_MIME_MSGPACK "application/msgpack"
+#define FLB_HTTP_MIME_JSON "application/json"
+
+#ifdef FLB_HAVE_SIGNV4
+#ifdef FLB_HAVE_AWS
+#define FLB_HTTP_AWS_CREDENTIAL_PREFIX "aws_"
+#endif
+#endif
+
+struct flb_out_http {
+ /* HTTP Auth */
+ char *http_user;
+ char *http_passwd;
+
+ /* AWS Auth */
+#ifdef FLB_HAVE_SIGNV4
+#ifdef FLB_HAVE_AWS
+ int has_aws_auth;
+ struct flb_aws_provider *aws_provider;
+ const char *aws_region;
+ const char *aws_service;
+#endif
+#endif
+
+ /* Proxy */
+ const char *proxy;
+ char *proxy_host;
+ int proxy_port;
+
+ /* Output format */
+ int out_format;
+
+ int json_date_format;
+ flb_sds_t json_date_key;
+ flb_sds_t date_key; /* internal use */
+
+ /* HTTP URI */
+ char *uri;
+ char *host;
+ int port;
+
+ /* GELF fields */
+ struct flb_gelf_fields gelf_fields;
+
+ /* which record key to use as body */
+ flb_sds_t body_key;
+
+ struct flb_record_accessor *body_ra;
+
+ /* override headers with contents of the map in the key specified here */
+ flb_sds_t headers_key;
+
+ struct flb_record_accessor *headers_ra;
+
+ /* Include tag in header */
+ flb_sds_t header_tag;
+
+ /* Compression mode (gzip) */
+ int compress_gzip;
+
+ /* Allow duplicated headers */
+ int allow_dup_headers;
+
+ /* Log the response paylod */
+ int log_response_payload;
+
+ /* Upstream connection to the backend server */
+ struct flb_upstream *u;
+
+ /* Arbitrary HTTP headers */
+ struct mk_list *headers;
+
+ /* Plugin instance */
+ struct flb_output_instance *ins;
+};
+
+#endif
diff --git a/src/fluent-bit/plugins/out_http/http_conf.c b/src/fluent-bit/plugins/out_http/http_conf.c
new file mode 100644
index 000000000..cf3f2101a
--- /dev/null
+++ b/src/fluent-bit/plugins/out_http/http_conf.c
@@ -0,0 +1,298 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_sds.h>
+#include <fluent-bit/flb_kv.h>
+#include <fluent-bit/flb_record_accessor.h>
+#ifdef FLB_HAVE_SIGNV4
+#ifdef FLB_HAVE_AWS
+#include <fluent-bit/flb_aws_credentials.h>
+#endif
+#endif
+#include "http.h"
+#include "http_conf.h"
+
+struct flb_out_http *flb_http_conf_create(struct flb_output_instance *ins,
+ struct flb_config *config)
+{
+ int ret;
+ int ulen;
+ int io_flags = 0;
+ char *protocol = NULL;
+ char *host = NULL;
+ char *port = NULL;
+ char *uri = NULL;
+ char *tmp_uri = NULL;
+ const char *tmp;
+ struct flb_upstream *upstream;
+ struct flb_out_http *ctx = NULL;
+
+ /* Allocate plugin context */
+ ctx = flb_calloc(1, sizeof(struct flb_out_http));
+ if (!ctx) {
+ flb_errno();
+ return NULL;
+ }
+ ctx->ins = ins;
+
+ ret = flb_output_config_map_set(ins, (void *) ctx);
+ if (ret == -1) {
+ flb_free(ctx);
+ return NULL;
+ }
+
+ if (ctx->headers_key && !ctx->body_key) {
+ flb_plg_error(ctx->ins, "when setting headers_key, body_key is also required");
+ flb_free(ctx);
+ return NULL;
+ }
+
+ if (ctx->body_key && !ctx->headers_key) {
+ flb_plg_error(ctx->ins, "when setting body_key, headers_key is also required");
+ flb_free(ctx);
+ return NULL;
+ }
+
+ if (ctx->body_key && ctx->headers_key) {
+ ctx->body_ra = flb_ra_create(ctx->body_key, FLB_FALSE);
+ if (!ctx->body_ra) {
+ flb_plg_error(ctx->ins, "failed to allocate body record accessor");
+ flb_free(ctx);
+ return NULL;
+ }
+
+ ctx->headers_ra = flb_ra_create(ctx->headers_key, FLB_FALSE);
+ if (!ctx->headers_ra) {
+ flb_plg_error(ctx->ins, "failed to allocate headers record accessor");
+ flb_free(ctx);
+ return NULL;
+ }
+ }
+
+ /*
+ * Check if a Proxy have been set, if so the Upstream manager will use
+ * the Proxy end-point and then we let the HTTP client know about it, so
+ * it can adjust the HTTP requests.
+ */
+ tmp = flb_output_get_property("proxy", ins);
+ if (tmp) {
+ ret = flb_utils_url_split(tmp, &protocol, &host, &port, &uri);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "could not parse proxy parameter: '%s'", tmp);
+ flb_free(ctx);
+ return NULL;
+ }
+
+ ctx->proxy_host = host;
+ ctx->proxy_port = atoi(port);
+ ctx->proxy = tmp;
+ flb_free(protocol);
+ flb_free(port);
+ flb_free(uri);
+ uri = NULL;
+ }
+ else {
+ flb_output_net_default("127.0.0.1", 80, ins);
+ }
+
+ /* Check if AWS SigV4 authentication is enabled */
+#ifdef FLB_HAVE_SIGNV4
+#ifdef FLB_HAVE_AWS
+ if (ctx->has_aws_auth) {
+ ctx->aws_service = flb_output_get_property(FLB_HTTP_AWS_CREDENTIAL_PREFIX
+ "service", ctx->ins);
+ if (!ctx->aws_service) {
+ flb_plg_error(ins, "aws_auth option requires " FLB_HTTP_AWS_CREDENTIAL_PREFIX
+ "service to be set");
+ flb_free(ctx);
+ return NULL;
+ }
+
+ ctx->aws_provider = flb_managed_chain_provider_create(
+ ins,
+ config,
+ FLB_HTTP_AWS_CREDENTIAL_PREFIX,
+ NULL,
+ flb_aws_client_generator()
+ );
+ if (!ctx->aws_provider) {
+ flb_plg_error(ins, "failed to create aws credential provider for sigv4 auth");
+ flb_free(ctx);
+ return NULL;
+ }
+
+ /* If managed provider creation succeeds, then region key is present */
+ ctx->aws_region = flb_output_get_property(FLB_HTTP_AWS_CREDENTIAL_PREFIX
+ "region", ctx->ins);
+ }
+#endif /* !FLB_HAVE_AWS */
+#endif /* !FLB_HAVE_SIGNV4 */
+
+ /* Check if SSL/TLS is enabled */
+#ifdef FLB_HAVE_TLS
+ if (ins->use_tls == FLB_TRUE) {
+ io_flags = FLB_IO_TLS;
+ }
+ else {
+ io_flags = FLB_IO_TCP;
+ }
+#else
+ io_flags = FLB_IO_TCP;
+#endif
+
+ if (ins->host.ipv6 == FLB_TRUE) {
+ io_flags |= FLB_IO_IPV6;
+ }
+
+ if (ctx->proxy) {
+ flb_plg_trace(ctx->ins, "Upstream Proxy=%s:%i",
+ ctx->proxy_host, ctx->proxy_port);
+ upstream = flb_upstream_create(config,
+ ctx->proxy_host,
+ ctx->proxy_port,
+ io_flags, ins->tls);
+ }
+ else {
+ upstream = flb_upstream_create(config,
+ ins->host.name,
+ ins->host.port,
+ io_flags, ins->tls);
+ }
+
+ if (!upstream) {
+ flb_free(ctx);
+ return NULL;
+ }
+
+ if (ins->host.uri) {
+ uri = flb_strdup(ins->host.uri->full);
+ }
+ else {
+ tmp = flb_output_get_property("uri", ins);
+ if (tmp) {
+ uri = flb_strdup(tmp);
+ }
+ }
+
+ if (!uri) {
+ uri = flb_strdup("/");
+ }
+ else if (uri[0] != '/') {
+ ulen = strlen(uri);
+ tmp_uri = flb_malloc(ulen + 2);
+ tmp_uri[0] = '/';
+ memcpy(tmp_uri + 1, uri, ulen);
+ tmp_uri[ulen + 1] = '\0';
+ flb_free(uri);
+ uri = tmp_uri;
+ }
+
+ /* Output format */
+ ctx->out_format = FLB_PACK_JSON_FORMAT_NONE;
+ tmp = flb_output_get_property("format", ins);
+ if (tmp) {
+ if (strcasecmp(tmp, "gelf") == 0) {
+ ctx->out_format = FLB_HTTP_OUT_GELF;
+ }
+ else {
+ ret = flb_pack_to_json_format_type(tmp);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "unrecognized 'format' option. "
+ "Using 'msgpack'");
+ }
+ else {
+ ctx->out_format = ret;
+ }
+ }
+ }
+
+ /* Date key */
+ ctx->date_key = ctx->json_date_key;
+ tmp = flb_output_get_property("json_date_key", ins);
+ if (tmp) {
+ /* Just check if we have to disable it */
+ if (flb_utils_bool(tmp) == FLB_FALSE) {
+ ctx->date_key = NULL;
+ }
+ }
+
+ /* Date format for JSON output */
+ ctx->json_date_format = FLB_PACK_JSON_DATE_DOUBLE;
+ tmp = flb_output_get_property("json_date_format", ins);
+ if (tmp) {
+ ret = flb_pack_to_json_date_type(tmp);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "unrecognized 'json_date_format' option. "
+ "Using 'double'.");
+ }
+ else {
+ ctx->json_date_format = ret;
+ }
+ }
+
+ /* Compress (gzip) */
+ tmp = flb_output_get_property("compress", ins);
+ ctx->compress_gzip = FLB_FALSE;
+ if (tmp) {
+ if (strcasecmp(tmp, "gzip") == 0) {
+ ctx->compress_gzip = FLB_TRUE;
+ }
+ }
+
+ ctx->u = upstream;
+ ctx->uri = uri;
+ ctx->host = ins->host.name;
+ ctx->port = ins->host.port;
+
+ /* Set instance flags into upstream */
+ flb_output_upstream_set(ctx->u, ins);
+
+ return ctx;
+}
+
+void flb_http_conf_destroy(struct flb_out_http *ctx)
+{
+ if (!ctx) {
+ return;
+ }
+
+ if (ctx->body_ra && ctx->headers_ra) {
+ flb_ra_destroy(ctx->body_ra);
+ flb_ra_destroy(ctx->headers_ra);
+ }
+
+ if (ctx->u) {
+ flb_upstream_destroy(ctx->u);
+ }
+
+#ifdef FLB_HAVE_SIGNV4
+#ifdef FLB_HAVE_AWS
+ if (ctx->aws_provider) {
+ flb_aws_provider_destroy(ctx->aws_provider);
+ }
+#endif
+#endif
+
+ flb_free(ctx->proxy_host);
+ flb_free(ctx->uri);
+ flb_free(ctx);
+}
diff --git a/src/fluent-bit/plugins/out_http/http_conf.h b/src/fluent-bit/plugins/out_http/http_conf.h
new file mode 100644
index 000000000..9e87e1002
--- /dev/null
+++ b/src/fluent-bit/plugins/out_http/http_conf.h
@@ -0,0 +1,32 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_OUT_HTTP_CONF_H
+#define FLB_OUT_HTTP_CONF_H
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_output.h>
+
+#include "http.h"
+
+struct flb_out_http *flb_http_conf_create(struct flb_output_instance *ins,
+ struct flb_config *config);
+void flb_http_conf_destroy(struct flb_out_http *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/out_influxdb/CMakeLists.txt b/src/fluent-bit/plugins/out_influxdb/CMakeLists.txt
new file mode 100644
index 000000000..75d85a6b5
--- /dev/null
+++ b/src/fluent-bit/plugins/out_influxdb/CMakeLists.txt
@@ -0,0 +1,5 @@
+set(src
+ influxdb_bulk.c
+ influxdb.c)
+
+FLB_PLUGIN(out_influxdb "${src}" "")
diff --git a/src/fluent-bit/plugins/out_influxdb/influxdb.c b/src/fluent-bit/plugins/out_influxdb/influxdb.c
new file mode 100644
index 000000000..71e489fbc
--- /dev/null
+++ b/src/fluent-bit/plugins/out_influxdb/influxdb.c
@@ -0,0 +1,682 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_http_client.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_metrics.h>
+#include <fluent-bit/flb_log_event_decoder.h>
+
+#include <msgpack.h>
+
+#include "influxdb.h"
+#include "influxdb_bulk.h"
+
+#include <stdio.h>
+
+/*
+ * Returns FLB_TRUE when the specified key is in Tag_Keys list,
+ * otherwise FLB_FALSE
+ */
+static int is_tagged_key(struct flb_influxdb *ctx,
+ const char *key, int kl, int type);
+
+/*
+ * Increments the timestamp when it is duplicated
+ */
+static void influxdb_tsmod(struct flb_time *ts, struct flb_time *dupe,
+ struct flb_time *last) {
+ if (flb_time_equal(ts, last) || flb_time_equal(ts, dupe)) {
+ ++dupe->tm.tv_nsec;
+ flb_time_copy(last, ts);
+ flb_time_copy(ts, dupe);
+ }
+ else {
+ flb_time_copy(last, ts);
+ flb_time_copy(dupe, ts);
+ }
+}
+
+/*
+ * Convert the internal Fluent Bit data representation to the required one
+ * by InfluxDB.
+ */
+static char *influxdb_format(const char *tag, int tag_len,
+ const void *data, size_t bytes, size_t *out_size,
+ struct flb_influxdb *ctx)
+{
+ int i;
+ int ret;
+ int n_size;
+ uint64_t seq = 0;
+ char *buf;
+ char *str = NULL;
+ size_t str_size;
+ char tmp[128];
+ msgpack_object map;
+ struct flb_time tm;
+ struct influxdb_bulk *bulk = NULL;
+ struct influxdb_bulk *bulk_head = NULL;
+ struct influxdb_bulk *bulk_body = NULL;
+ struct flb_log_event_decoder log_decoder;
+ struct flb_log_event log_event;
+
+ ret = flb_log_event_decoder_init(&log_decoder, (char *) data, bytes);
+
+ if (ret != FLB_EVENT_DECODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event decoder initialization error : %d", ret);
+
+ return NULL;
+ }
+
+ /* Create the bulk composer */
+ bulk = influxdb_bulk_create();
+ if (!bulk) {
+ goto error;
+ }
+
+ bulk_head = influxdb_bulk_create();
+ if (!bulk_head) {
+ goto error;
+ }
+
+ bulk_body = influxdb_bulk_create();
+ if (!bulk_body) {
+ goto error;
+ }
+
+ while ((ret = flb_log_event_decoder_next(
+ &log_decoder,
+ &log_event)) == FLB_EVENT_DECODER_SUCCESS) {
+ flb_time_copy(&tm, &log_event.timestamp);
+
+ map = *log_event.body;
+ n_size = map.via.map.size + 1;
+
+ seq = ctx->seq;
+ if (ctx->seq + 1 >= 100000) {
+ seq = 1;
+ }
+ else {
+ ctx->seq++;
+ }
+
+ ret = influxdb_bulk_append_header(bulk_head,
+ tag, tag_len,
+ seq,
+ ctx->seq_name, ctx->seq_len);
+ if (ret == -1) {
+ goto error;
+ }
+
+ for (i = 0; i < n_size - 1; i++) {
+ msgpack_object *k = &map.via.map.ptr[i].key;
+ msgpack_object *v = &map.via.map.ptr[i].val;
+
+ if (k->type != MSGPACK_OBJECT_BIN && k->type != MSGPACK_OBJECT_STR) {
+ continue;
+ }
+
+ int quote = FLB_FALSE;
+
+ /* key */
+ const char *key = NULL;
+ int key_len;
+
+ /* val */
+ const char *val = NULL;
+ int val_len;
+
+ if (k->type == MSGPACK_OBJECT_STR) {
+ key = k->via.str.ptr;
+ key_len = k->via.str.size;
+ }
+ else {
+ key = k->via.bin.ptr;
+ key_len = k->via.bin.size;
+ }
+
+ /* Store value */
+ if (v->type == MSGPACK_OBJECT_NIL) {
+ /* Missing values are Null by default in InfluxDB */
+ continue;
+ }
+ else if (v->type == MSGPACK_OBJECT_BOOLEAN) {
+ if (v->via.boolean) {
+ val = "TRUE";
+ val_len = 4;
+ }
+ else {
+ val = "FALSE";
+ val_len = 5;
+ }
+ }
+ else if (v->type == MSGPACK_OBJECT_POSITIVE_INTEGER) {
+ val = tmp;
+ val_len = snprintf(tmp, sizeof(tmp) - 1, "%" PRIu64, v->via.u64);
+ }
+ else if (v->type == MSGPACK_OBJECT_NEGATIVE_INTEGER) {
+ val = tmp;
+ val_len = snprintf(tmp, sizeof(tmp) - 1, "%" PRId64, v->via.i64);
+ }
+ else if (v->type == MSGPACK_OBJECT_FLOAT || v->type == MSGPACK_OBJECT_FLOAT32) {
+ val = tmp;
+ val_len = snprintf(tmp, sizeof(tmp) - 1, "%f", v->via.f64);
+ }
+ else if (v->type == MSGPACK_OBJECT_STR) {
+ /* String value */
+ quote = FLB_TRUE;
+ val = v->via.str.ptr;
+ val_len = v->via.str.size;
+ }
+ else if (v->type == MSGPACK_OBJECT_BIN) {
+ /* Bin value */
+ quote = FLB_TRUE;
+ val = v->via.bin.ptr;
+ val_len = v->via.bin.size;
+ }
+
+ if (!val || !key) {
+ continue;
+ }
+
+ /* is this a string ? */
+ if (quote == FLB_TRUE) {
+ ret = flb_utils_write_str_buf(val, val_len,
+ &str, &str_size);
+ if (ret == -1) {
+ flb_errno();
+ goto error;
+ }
+
+ val = str;
+ val_len = str_size;
+ }
+
+ if (is_tagged_key(ctx, key, key_len, v->type)) {
+ /* Append key/value data into the bulk_head */
+ ret = influxdb_bulk_append_kv(bulk_head,
+ key, key_len,
+ val, val_len,
+ false);
+ }
+ else {
+ /* Append key/value data into the bulk_body */
+ ret = influxdb_bulk_append_kv(bulk_body,
+ key, key_len,
+ val, val_len,
+ quote);
+ }
+
+ if (quote == FLB_TRUE) {
+ flb_free(str);
+ str_size = 0;
+ }
+
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "cannot append key/value");
+ goto error;
+ }
+ }
+
+ /* Check have data fields */
+ if (bulk_body->len > 0) {
+ /* Modify timestamp in avoidance of duplication */
+ influxdb_tsmod(&tm, &ctx->ts_dupe, &ctx->ts_last);
+ /* Append the timestamp */
+ ret = influxdb_bulk_append_timestamp(bulk_body, &tm);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "cannot append timestamp");
+ goto error;
+ }
+
+ /* Append collected data to final bulk */
+ if (influxdb_bulk_append_bulk(bulk, bulk_head, '\n') != 0 ||
+ influxdb_bulk_append_bulk(bulk, bulk_body, ' ') != 0) {
+ goto error;
+ }
+ }
+ else {
+ flb_plg_warn(ctx->ins, "skip send record, "
+ "since no record available "
+ "or all fields are tagged in record");
+ /* Following records maybe ok, so continue processing */
+ }
+
+ /* Reset bulk_head and bulk_body */
+ bulk_head->len = 0;
+ bulk_body->len = 0;
+ }
+
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ *out_size = bulk->len;
+ buf = bulk->ptr;
+
+ /*
+ * Note: we don't destroy the bulk as we need to keep the allocated
+ * buffer with the data. Instead we just release the bulk context and
+ * return the bulk->ptr buffer
+ */
+ flb_free(bulk);
+ influxdb_bulk_destroy(bulk_head);
+ influxdb_bulk_destroy(bulk_body);
+
+ return buf;
+
+error:
+ if (bulk != NULL) {
+ influxdb_bulk_destroy(bulk);
+ }
+ if (bulk_head != NULL) {
+ influxdb_bulk_destroy(bulk_head);
+ }
+ if (bulk_body != NULL) {
+ influxdb_bulk_destroy(bulk_body);
+ }
+
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ return NULL;
+}
+
+static int cb_influxdb_init(struct flb_output_instance *ins, struct flb_config *config,
+ void *data)
+{
+ int ret;
+ int io_flags = 0;
+ const char *tmp;
+ struct flb_upstream *upstream;
+ struct flb_influxdb *ctx;
+
+ /* Set default network configuration */
+ flb_output_net_default(FLB_INFLUXDB_HOST, FLB_INFLUXDB_PORT, ins);
+
+ /* Allocate plugin context */
+ ctx = flb_calloc(1, sizeof(struct flb_influxdb));
+ if (!ctx) {
+ flb_errno();
+ return -1;
+ }
+ ctx->ins = ins;
+
+ /* Register context with plugin instance */
+ flb_output_set_context(ins, ctx);
+
+ /*
+ * This plugin instance uses the HTTP client interface, let's register
+ * it debugging callbacks.
+ */
+ flb_output_set_http_debug_callbacks(ins);
+
+ /* Load config map */
+ ret = flb_output_config_map_set(ins, (void *) ctx);
+ if (ret == -1) {
+ return -1;
+ }
+
+ if (ins->use_tls == FLB_TRUE) {
+ io_flags = FLB_IO_TLS;
+ }
+ else {
+ io_flags = FLB_IO_TCP;
+ }
+
+ /* sequence tag */
+ tmp = flb_output_get_property("sequence_tag", ins);
+ if (!tmp) {
+ ctx->seq_name = flb_strdup("_seq");
+ }
+ else if (strcmp(tmp, "off") == 0) {
+ ctx->seq_name = flb_strdup("");
+ }
+ else {
+ ctx->seq_name = flb_strdup(tmp);
+ }
+ ctx->seq_len = strlen(ctx->seq_name);
+
+ if (ctx->custom_uri) {
+ /* custom URI endpoint (e.g: Grafana */
+ if (ctx->custom_uri[0] != '/') {
+ flb_plg_error(ctx->ins,
+ "'custom_uri' value must start wih a forward slash '/'");
+ return -1;
+ }
+ snprintf(ctx->uri, sizeof(ctx->uri) - 1, "%s", ctx->custom_uri);
+ }
+ else if (ctx->bucket) {
+ /* bucket: api v2 */
+ snprintf(ctx->uri, sizeof(ctx->uri) - 1,
+ "/api/v2/write?org=%s&bucket=%s&precision=ns",
+ ctx->organization, ctx->bucket);
+ }
+ else {
+ snprintf(ctx->uri, sizeof(ctx->uri) - 1,
+ "/write?db=%s&precision=n",
+ ctx->database);
+ }
+
+ if (ins->host.ipv6 == FLB_TRUE) {
+ io_flags |= FLB_IO_IPV6;
+ }
+
+
+ /* Tag_Keys */
+ tmp = flb_output_get_property("tag_keys", ins);
+ if (tmp) {
+ ctx->tag_keys = flb_utils_split(tmp, ' ', 256);
+ }
+ else {
+ ctx->tag_keys = NULL;
+ }
+
+ /* Prepare an upstream handler */
+ upstream = flb_upstream_create(config,
+ ins->host.name,
+ ins->host.port,
+ io_flags,
+ ins->tls);
+ if (!upstream) {
+ flb_free(ctx);
+ return -1;
+ }
+ ctx->u = upstream;
+ ctx->seq = 0;
+ flb_output_upstream_set(ctx->u, ins);
+
+ flb_time_zero(&ctx->ts_dupe);
+ flb_time_zero(&ctx->ts_last);
+
+ flb_plg_debug(ctx->ins, "host=%s port=%i", ins->host.name, ins->host.port);
+
+ return 0;
+}
+
+static int format_metrics(struct flb_output_instance *ins,
+ const void *data, size_t bytes,
+ char **out_buf, size_t *out_size)
+{
+ int ret;
+ size_t off = 0;
+ cfl_sds_t text;
+ struct cmt *cmt = NULL;
+
+ /* get cmetrics context */
+ ret = cmt_decode_msgpack_create(&cmt, (char *) data, bytes, &off);
+ if (ret != 0) {
+ flb_plg_error(ins, "could not process metrics payload");
+ return -1;
+ }
+
+ /* convert to text representation */
+ text = cmt_encode_influx_create(cmt);
+ if (!text) {
+ cmt_destroy(cmt);
+ return -1;
+ }
+
+ /* destroy cmt context */
+ cmt_destroy(cmt);
+
+ *out_buf = text;
+ *out_size = flb_sds_len(text);
+
+ return 0;
+}
+
+static void cb_influxdb_flush(struct flb_event_chunk *event_chunk,
+ struct flb_output_flush *out_flush,
+ struct flb_input_instance *i_ins,
+ void *out_context,
+ struct flb_config *config)
+{
+ int ret;
+ int out_ret = FLB_OK;
+ int is_metric = FLB_FALSE;
+ size_t b_sent;
+ size_t bytes_out;
+ char *pack;
+ char tmp[128];
+ struct mk_list *head;
+ struct flb_connection *u_conn;
+ struct flb_http_client *c;
+ struct flb_config_map_val *mv;
+ struct flb_slist_entry *key = NULL;
+ struct flb_slist_entry *val = NULL;
+ struct flb_influxdb *ctx = out_context;
+
+ /* Convert format: metrics / logs */
+ if (event_chunk->type == FLB_EVENT_TYPE_METRICS) {
+ /* format metrics */
+ ret = format_metrics(ctx->ins,
+ (char *) event_chunk->data,
+ event_chunk->size,
+ &pack, &bytes_out);
+ if (ret == -1) {
+ FLB_OUTPUT_RETURN(FLB_ERROR);
+ }
+ is_metric = FLB_TRUE;
+ }
+ else {
+ /* format logs */
+ pack = influxdb_format(event_chunk->tag, flb_sds_len(event_chunk->tag),
+ event_chunk->data, event_chunk->size,
+ &bytes_out, ctx);
+ if (!pack) {
+ FLB_OUTPUT_RETURN(FLB_ERROR);
+ }
+ }
+
+ /* Get upstream connection */
+ u_conn = flb_upstream_conn_get(ctx->u);
+ if (!u_conn) {
+ if (is_metric) {
+ cmt_encode_influx_destroy(pack);
+ }
+ else {
+ flb_free(pack);
+ }
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ /* Compose HTTP Client request */
+ c = flb_http_client(u_conn, FLB_HTTP_POST, ctx->uri,
+ pack, bytes_out, NULL, 0, NULL, 0);
+ flb_http_add_header(c, "User-Agent", 10, "Fluent-Bit", 10);
+
+ if (ctx->http_token) {
+ ret = snprintf(tmp, sizeof(tmp) - 1, "Token %s", ctx->http_token);
+ flb_http_add_header(c, FLB_HTTP_HEADER_AUTH, sizeof FLB_HTTP_HEADER_AUTH - 1, tmp, ret);
+ }
+ else if (ctx->http_user && ctx->http_passwd) {
+ flb_http_basic_auth(c, ctx->http_user, ctx->http_passwd);
+ }
+
+ /* Append custom headers if any */
+ flb_config_map_foreach(head, mv, ctx->headers) {
+ key = mk_list_entry_first(mv->val.list, struct flb_slist_entry, _head);
+ val = mk_list_entry_last(mv->val.list, struct flb_slist_entry, _head);
+
+ flb_http_add_header(c,
+ key->str, flb_sds_len(key->str),
+ val->str, flb_sds_len(val->str));
+ }
+
+ /* Map debug callbacks */
+ flb_http_client_debug(c, ctx->ins->callback);
+
+ ret = flb_http_do(c, &b_sent);
+ if (ret == 0) {
+ if (c->resp.status != 200 && c->resp.status != 204) {
+ if (c->resp.payload_size > 0) {
+ flb_plg_error(ctx->ins, "http_status=%i\n%s",
+ c->resp.status, c->resp.payload);
+ }
+ else {
+ flb_plg_debug(ctx->ins, "http_status=%i",
+ c->resp.status);
+ }
+ }
+ flb_plg_debug(ctx->ins, "http_do=%i OK", ret);
+ }
+ else {
+ flb_plg_error(ctx->ins, "http_do=%i", ret);
+ out_ret = FLB_RETRY;
+ }
+
+ flb_http_client_destroy(c);
+
+ if (is_metric) {
+ cmt_encode_influx_destroy(pack);
+ }
+ else {
+ flb_free(pack);
+ }
+
+ /* Release the connection */
+ flb_upstream_conn_release(u_conn);
+
+ FLB_OUTPUT_RETURN(out_ret);
+}
+
+static int cb_influxdb_exit(void *data, struct flb_config *config)
+{
+ struct flb_influxdb *ctx = data;
+
+ if (!ctx) {
+ return 0;
+ }
+
+ if (ctx->tag_keys) {
+ flb_utils_split_free(ctx->tag_keys);
+ }
+
+ flb_upstream_destroy(ctx->u);
+ flb_free(ctx);
+
+ return 0;
+}
+
+int is_tagged_key(struct flb_influxdb *ctx, const char *key, int kl, int type)
+{
+ if (type == MSGPACK_OBJECT_STR) {
+ if (ctx->auto_tags) {
+ return FLB_TRUE;
+ }
+ }
+
+ struct mk_list *head;
+ struct flb_split_entry *entry;
+
+ if (ctx->tag_keys) {
+ mk_list_foreach(head, ctx->tag_keys) {
+ entry = mk_list_entry(head, struct flb_split_entry, _head);
+ if (kl == entry->len && strncmp(key, entry->value, kl) == 0) {
+ return FLB_TRUE;
+ }
+ }
+ }
+
+ return FLB_FALSE;
+}
+
+/* Configuration properties map */
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_STR, "database", "fluentbit",
+ 0, FLB_TRUE, offsetof(struct flb_influxdb, database),
+ "Set the database name."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "bucket", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_influxdb, bucket),
+ "Specify the bucket name, used on InfluxDB API v2."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "org", "fluent",
+ 0, FLB_TRUE, offsetof(struct flb_influxdb, organization),
+ "Set the Organization name."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "sequence_tag", NULL,
+ 0, FLB_FALSE, 0,
+ "Specify the sequence tag."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "uri", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_influxdb, custom_uri),
+ "Specify a custom URI endpoint (must start with '/')."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "http_user", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_influxdb, http_user),
+ "HTTP Basic Auth username."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "http_passwd", "",
+ 0, FLB_TRUE, offsetof(struct flb_influxdb, http_passwd),
+ "HTTP Basic Auth password."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "http_token", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_influxdb, http_token),
+ "Set InfluxDB HTTP Token API v2."
+ },
+
+ {
+ FLB_CONFIG_MAP_SLIST_1, "http_header", NULL,
+ FLB_CONFIG_MAP_MULT, FLB_TRUE, offsetof(struct flb_influxdb, headers),
+ "Add a HTTP header key/value pair. Multiple headers can be set"
+ },
+
+ {
+ FLB_CONFIG_MAP_BOOL, "auto_tags", "false",
+ 0, FLB_TRUE, offsetof(struct flb_influxdb, auto_tags),
+ "Automatically tag keys where value is string."
+ },
+
+ {
+ FLB_CONFIG_MAP_BOOL, "tag_keys", NULL,
+ 0, FLB_FALSE, 0,
+ "Space separated list of keys that needs to be tagged."
+ },
+
+ /* EOF */
+ {0}
+};
+
+struct flb_output_plugin out_influxdb_plugin = {
+ .name = "influxdb",
+ .description = "InfluxDB Time Series",
+ .cb_init = cb_influxdb_init,
+ .cb_pre_run = NULL,
+ .cb_flush = cb_influxdb_flush,
+ .cb_exit = cb_influxdb_exit,
+ .config_map = config_map,
+ .flags = FLB_OUTPUT_NET | FLB_IO_OPT_TLS,
+ .event_type = FLB_OUTPUT_LOGS | FLB_OUTPUT_METRICS
+};
diff --git a/src/fluent-bit/plugins/out_influxdb/influxdb.h b/src/fluent-bit/plugins/out_influxdb/influxdb.h
new file mode 100644
index 000000000..13a72d4b8
--- /dev/null
+++ b/src/fluent-bit/plugins/out_influxdb/influxdb.h
@@ -0,0 +1,78 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_OUT_INFLUXDB_H
+#define FLB_OUT_INFLUXDB_H
+
+#include <fluent-bit/flb_output.h>
+#include <fluent-bit/flb_time.h>
+
+#define FLB_INFLUXDB_HOST "127.0.0.1"
+#define FLB_INFLUXDB_PORT 8086
+
+struct flb_influxdb {
+ uint64_t seq;
+
+ char uri[2048];
+
+ /* v1 */
+ /* database */
+ flb_sds_t database;
+
+ /* HTTP Auth */
+ flb_sds_t http_user;
+ flb_sds_t http_passwd;
+
+ // v2
+ /* bucket */
+ flb_sds_t bucket;
+
+ /* organization */
+ flb_sds_t organization;
+
+ /* custom HTTP URI */
+ flb_sds_t custom_uri;
+
+ /* HTTP Token */
+ flb_sds_t http_token;
+
+ /* sequence tag */
+ char *seq_name;
+ int seq_len;
+
+ /* auto_tags: on/off */
+ int auto_tags;
+
+ /* tag_keys: space separated list of key */
+ struct mk_list *tag_keys;
+
+ /* Arbitrary HTTP headers */
+ struct mk_list *headers;
+
+ /* Upstream connection to the backend server */
+ struct flb_upstream *u;
+
+ /* used for incrementing identical timestamps */
+ struct flb_time ts_dupe;
+ struct flb_time ts_last;
+
+ struct flb_output_instance *ins;
+};
+
+#endif
diff --git a/src/fluent-bit/plugins/out_influxdb/influxdb_bulk.c b/src/fluent-bit/plugins/out_influxdb/influxdb_bulk.c
new file mode 100644
index 000000000..2542ee3c4
--- /dev/null
+++ b/src/fluent-bit/plugins/out_influxdb/influxdb_bulk.c
@@ -0,0 +1,233 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <ctype.h>
+
+#include <fluent-bit.h>
+#include "influxdb.h"
+#include "influxdb_bulk.h"
+
+static const uint64_t ONE_BILLION = 1000000000;
+
+static int influxdb_escape(char *out, const char *str, int size, bool quote) {
+ int out_size = 0;
+ int i;
+ for (i = 0; i < size; ++i) {
+ char ch = str[i];
+ if (quote ? (ch == '"' || ch == '\\') : (isspace(ch) || ch == ',' || ch == '=')) {
+ out[out_size++] = '\\';
+ } else if (ch == '\\') {
+ out[out_size++] = '\\';
+ }
+ out[out_size++] = ch;
+ }
+ return out_size;
+}
+
+static int influxdb_bulk_buffer(struct influxdb_bulk *bulk, int required)
+{
+ int new_size;
+ int available;
+ char *ptr;
+
+ available = (bulk->size - bulk->len);
+ if (available < required) {
+ new_size = bulk->size + available + required + INFLUXDB_BULK_CHUNK;
+ ptr = flb_realloc(bulk->ptr, new_size);
+ if (!ptr) {
+ flb_errno();
+ return -1;
+ }
+ bulk->ptr = ptr;
+ bulk->size = new_size;
+ }
+
+ return 0;
+}
+
+struct influxdb_bulk *influxdb_bulk_create()
+{
+ struct influxdb_bulk *b;
+
+ b = flb_malloc(sizeof(struct influxdb_bulk));
+ if (!b) {
+ perror("calloc");
+ return NULL;
+ }
+
+ b->ptr = flb_malloc(INFLUXDB_BULK_CHUNK);
+ if (!b->ptr) {
+ perror("malloc");
+ flb_free(b);
+ return NULL;
+ }
+
+ b->size = INFLUXDB_BULK_CHUNK;
+ b->len = 0;
+
+ return b;
+}
+
+void influxdb_bulk_destroy(struct influxdb_bulk *bulk)
+{
+ if (bulk->size > 0) {
+ flb_free(bulk->ptr);
+ }
+ flb_free(bulk);
+}
+
+int influxdb_bulk_append_header(struct influxdb_bulk *bulk,
+ const char *tag, int tag_len,
+ uint64_t seq_n, const char *seq, int seq_len)
+{
+ int ret;
+ int required;
+
+ required = tag_len + 1 + seq_len + 1 + 32;
+
+ /* Make sure we have enough space */
+ ret = influxdb_bulk_buffer(bulk, required);
+ if (ret != 0) {
+ return -1;
+ }
+
+ /* Tag, sequence and final space */
+ memcpy(bulk->ptr + bulk->len, tag, tag_len);
+ bulk->len += tag_len;
+
+ if (seq_len != 0) {
+ bulk->ptr[bulk->len] = ',';
+ bulk->len++;
+
+ /* Sequence number */
+ memcpy(bulk->ptr + bulk->len, seq, seq_len);
+ bulk->len += seq_len;
+
+ bulk->ptr[bulk->len] = '=';
+ bulk->len++;
+
+ ret = snprintf(bulk->ptr + bulk->len, 32, "%" PRIu64, seq_n);
+ bulk->len += ret;
+ }
+
+ /* Add a NULL byte for debugging purposes */
+ bulk->ptr[bulk->len] = '\0';
+
+ return 0;
+}
+
+int influxdb_bulk_append_kv(struct influxdb_bulk *bulk,
+ const char *key, int k_len,
+ const char *val, int v_len,
+ int quote)
+{
+ int ret;
+ int required;
+
+ /* Reserve double space for keys and values in case of escaping */
+ required = k_len * 2 + 1 + v_len * 2 + 1 + 1;
+ if (quote) {
+ required += 2;
+ }
+
+ /* Make sure we have enough space */
+ ret = influxdb_bulk_buffer(bulk, required);
+ if (ret != 0) {
+ return -1;
+ }
+
+ if (bulk->len > 0) {
+ bulk->ptr[bulk->len] = ',';
+ bulk->len++;
+ }
+
+ /* key */
+ bulk->len += influxdb_escape(bulk->ptr + bulk->len, key, k_len, false);
+
+ /* separator */
+ bulk->ptr[bulk->len] = '=';
+ bulk->len++;
+
+ /* value */
+ if (quote) {
+ bulk->ptr[bulk->len] = '"';
+ bulk->len++;
+ }
+ bulk->len += influxdb_escape(bulk->ptr + bulk->len, val, v_len, quote);
+ if (quote) {
+ bulk->ptr[bulk->len] = '"';
+ bulk->len++;
+ }
+
+ /* Add a NULL byte for debugging purposes */
+ bulk->ptr[bulk->len] = '\0';
+
+ return 0;
+};
+
+int influxdb_bulk_append_timestamp(struct influxdb_bulk *bulk,
+ struct flb_time *t)
+{
+ int ret;
+ int len;
+ uint64_t timestamp;
+
+ /* Make sure we have enough space */
+ ret = influxdb_bulk_buffer(bulk, 128);
+ if (ret != 0) {
+ return -1;
+ }
+
+ /* Timestamp is in Nanoseconds */
+ timestamp = (t->tm.tv_sec * ONE_BILLION) + t->tm.tv_nsec;
+ len = snprintf(bulk->ptr + bulk->len, 127, " %" PRIu64, timestamp);
+ if (len == -1) {
+ return -1;
+ }
+ bulk->len += len;
+ bulk->ptr[bulk->len] = '\0';
+
+ return 0;
+};
+
+int influxdb_bulk_append_bulk(struct influxdb_bulk *bulk_to,
+ struct influxdb_bulk *bulk_from,
+ char separator)
+{
+ if (influxdb_bulk_buffer(bulk_to, bulk_from->len + 2) != 0) {
+ return -1;
+ }
+
+ if (bulk_to->len > 0) {
+ bulk_to->ptr[bulk_to->len] = separator;
+ bulk_to->len += 1;
+ }
+
+ memcpy(bulk_to->ptr + bulk_to->len,
+ bulk_from->ptr, bulk_from->len * sizeof(char));
+ bulk_to->len += bulk_from->len;
+
+ /* Add a NULL byte for always terminating with NULL */
+ bulk_to->ptr[bulk_to->len] = '\0';
+
+ return 0;
+};
diff --git a/src/fluent-bit/plugins/out_influxdb/influxdb_bulk.h b/src/fluent-bit/plugins/out_influxdb/influxdb_bulk.h
new file mode 100644
index 000000000..66f914528
--- /dev/null
+++ b/src/fluent-bit/plugins/out_influxdb/influxdb_bulk.h
@@ -0,0 +1,54 @@
+ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_INFLUXDB_BULK_H
+#define FLB_INFLUXDB_BULK_H
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_time.h>
+#include <inttypes.h>
+
+#define INFLUXDB_BULK_CHUNK 4096 /* 4KB of buffer chunks */
+
+struct influxdb_bulk {
+ char *ptr;
+ uint32_t len;
+ uint32_t size;
+};
+
+struct influxdb_bulk *influxdb_bulk_create();
+
+int influxdb_bulk_append_header(struct influxdb_bulk *bulk,
+ const char *tag, int tag_len,
+ uint64_t seq_n, const char *seq, int seq_len);
+
+int influxdb_bulk_append_kv(struct influxdb_bulk *bulk,
+ const char *key, int k_len,
+ const char *val, int v_len,
+ int quote);
+
+int influxdb_bulk_append_bulk(struct influxdb_bulk *bulk_to,
+ struct influxdb_bulk *bulk_from,
+ char separator);
+
+void influxdb_bulk_destroy(struct influxdb_bulk *bulk);
+int influxdb_bulk_append_timestamp(struct influxdb_bulk *bulk,
+ struct flb_time *t);
+
+#endif
diff --git a/src/fluent-bit/plugins/out_kafka/CMakeLists.txt b/src/fluent-bit/plugins/out_kafka/CMakeLists.txt
new file mode 100644
index 000000000..526910d49
--- /dev/null
+++ b/src/fluent-bit/plugins/out_kafka/CMakeLists.txt
@@ -0,0 +1,8 @@
+# Fluent Bit Kafka Output plugin
+set(src
+ kafka_config.c
+ kafka_topic.c
+ kafka.c)
+
+FLB_PLUGIN(out_kafka "${src}" "rdkafka")
+target_link_libraries(flb-plugin-out_kafka -lpthread)
diff --git a/src/fluent-bit/plugins/out_kafka/kafka.c b/src/fluent-bit/plugins/out_kafka/kafka.c
new file mode 100644
index 000000000..ff700a687
--- /dev/null
+++ b/src/fluent-bit/plugins/out_kafka/kafka.c
@@ -0,0 +1,658 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_log_event_decoder.h>
+
+#include "kafka_config.h"
+#include "kafka_topic.h"
+
+void cb_kafka_msg(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage,
+ void *opaque)
+{
+ struct flb_out_kafka *ctx = (struct flb_out_kafka *) opaque;
+
+ if (rkmessage->err) {
+ flb_plg_warn(ctx->ins, "message delivery failed: %s",
+ rd_kafka_err2str(rkmessage->err));
+ }
+ else {
+ flb_plg_debug(ctx->ins, "message delivered (%zd bytes, "
+ "partition %"PRId32")",
+ rkmessage->len, rkmessage->partition);
+ }
+}
+
+void cb_kafka_logger(const rd_kafka_t *rk, int level,
+ const char *fac, const char *buf)
+{
+ struct flb_out_kafka *ctx;
+
+ ctx = (struct flb_out_kafka *) rd_kafka_opaque(rk);
+
+ if (level <= FLB_KAFKA_LOG_ERR) {
+ flb_plg_error(ctx->ins, "%s: %s",
+ rk ? rd_kafka_name(rk) : NULL, buf);
+ }
+ else if (level == FLB_KAFKA_LOG_WARNING) {
+ flb_plg_warn(ctx->ins, "%s: %s",
+ rk ? rd_kafka_name(rk) : NULL, buf);
+ }
+ else if (level == FLB_KAFKA_LOG_NOTICE || level == FLB_KAFKA_LOG_INFO) {
+ flb_plg_info(ctx->ins, "%s: %s",
+ rk ? rd_kafka_name(rk) : NULL, buf);
+ }
+ else if (level == FLB_KAFKA_LOG_DEBUG) {
+ flb_plg_debug(ctx->ins, "%s: %s",
+ rk ? rd_kafka_name(rk) : NULL, buf);
+ }
+}
+
+static int cb_kafka_init(struct flb_output_instance *ins,
+ struct flb_config *config,
+ void *data)
+{
+ struct flb_out_kafka *ctx;
+
+ /* Configuration */
+ ctx = flb_out_kafka_create(ins, config);
+ if (!ctx) {
+ flb_plg_error(ins, "failed to initialize");
+ return -1;
+ }
+
+ /* Set global context */
+ flb_output_set_context(ins, ctx);
+ return 0;
+}
+
+int produce_message(struct flb_time *tm, msgpack_object *map,
+ struct flb_out_kafka *ctx, struct flb_config *config)
+{
+ int i;
+ int ret;
+ int size;
+ int queue_full_retries = 0;
+ char *out_buf;
+ size_t out_size;
+ struct mk_list *head;
+ struct mk_list *topics;
+ struct flb_split_entry *entry;
+ char *dynamic_topic;
+ char *message_key = NULL;
+ size_t message_key_len = 0;
+ struct flb_kafka_topic *topic = NULL;
+ msgpack_sbuffer mp_sbuf;
+ msgpack_packer mp_pck;
+ msgpack_object key;
+ msgpack_object val;
+ flb_sds_t s;
+
+#ifdef FLB_HAVE_AVRO_ENCODER
+ // used to flag when a buffer needs to be freed for avro
+ bool avro_fast_buffer = true;
+
+ // avro encoding uses a buffer
+ // the majority of lines are fairly small
+ // so using static buffer for these is much more efficient
+ // larger sizes will allocate
+#ifndef AVRO_DEFAULT_BUFFER_SIZE
+#define AVRO_DEFAULT_BUFFER_SIZE 2048
+#endif
+ static char avro_buff[AVRO_DEFAULT_BUFFER_SIZE];
+
+ // don't take lines that are too large
+ // these lines will log a warning
+ // this roughly a log line of 250000 chars
+#ifndef AVRO_LINE_MAX_LEN
+#define AVRO_LINE_MAX_LEN 1000000
+
+ // this is a convenience
+#define AVRO_FREE(X, Y) if (!X) { flb_free(Y); }
+#endif
+
+ // this is just to keep the code cleaner
+ // the avro encoding includes
+ // an embedded schemaid which is used
+ // the embedding is a null byte
+ // followed by a 16 byte schemaid
+#define AVRO_SCHEMA_OVERHEAD 16 + 1
+#endif
+
+ flb_debug("in produce_message\n");
+ if (flb_log_check(FLB_LOG_DEBUG))
+ msgpack_object_print(stderr, *map);
+
+ /* Init temporal buffers */
+ msgpack_sbuffer_init(&mp_sbuf);
+ msgpack_packer_init(&mp_pck, &mp_sbuf, msgpack_sbuffer_write);
+
+ if (ctx->format == FLB_KAFKA_FMT_JSON || ctx->format == FLB_KAFKA_FMT_MSGP) {
+ /* Make room for the timestamp */
+ size = map->via.map.size + 1;
+ msgpack_pack_map(&mp_pck, size);
+
+ /* Pack timestamp */
+ msgpack_pack_str(&mp_pck, ctx->timestamp_key_len);
+ msgpack_pack_str_body(&mp_pck,
+ ctx->timestamp_key, ctx->timestamp_key_len);
+ switch (ctx->timestamp_format) {
+ case FLB_JSON_DATE_DOUBLE:
+ msgpack_pack_double(&mp_pck, flb_time_to_double(tm));
+ break;
+
+ case FLB_JSON_DATE_ISO8601:
+ case FLB_JSON_DATE_ISO8601_NS:
+ {
+ size_t date_len;
+ int len;
+ struct tm _tm;
+ char time_formatted[36];
+
+ /* Format the time; use microsecond precision (not nanoseconds). */
+ gmtime_r(&tm->tm.tv_sec, &_tm);
+ date_len = strftime(time_formatted, sizeof(time_formatted) - 1,
+ FLB_JSON_DATE_ISO8601_FMT, &_tm);
+
+ if (ctx->timestamp_format == FLB_JSON_DATE_ISO8601) {
+ len = snprintf(time_formatted + date_len, sizeof(time_formatted) - 1 - date_len,
+ ".%06" PRIu64 "Z", (uint64_t) tm->tm.tv_nsec / 1000);
+ }
+ else {
+ /* FLB_JSON_DATE_ISO8601_NS */
+ len = snprintf(time_formatted + date_len, sizeof(time_formatted) - 1 - date_len,
+ ".%09" PRIu64 "Z", (uint64_t) tm->tm.tv_nsec);
+ }
+ date_len += len;
+
+ msgpack_pack_str(&mp_pck, date_len);
+ msgpack_pack_str_body(&mp_pck, time_formatted, date_len);
+ }
+ break;
+ }
+ }
+ else {
+ size = map->via.map.size;
+ msgpack_pack_map(&mp_pck, size);
+ }
+
+ for (i = 0; i < map->via.map.size; i++) {
+ key = map->via.map.ptr[i].key;
+ val = map->via.map.ptr[i].val;
+
+ msgpack_pack_object(&mp_pck, key);
+ msgpack_pack_object(&mp_pck, val);
+
+ /* Lookup message key */
+ if (ctx->message_key_field && !message_key && val.type == MSGPACK_OBJECT_STR) {
+ if (key.via.str.size == ctx->message_key_field_len &&
+ strncmp(key.via.str.ptr, ctx->message_key_field, ctx->message_key_field_len) == 0) {
+ message_key = (char *) val.via.str.ptr;
+ message_key_len = val.via.str.size;
+ }
+ }
+
+ /* Lookup key/topic */
+ if (ctx->topic_key && !topic && val.type == MSGPACK_OBJECT_STR) {
+ if (key.via.str.size == ctx->topic_key_len &&
+ strncmp(key.via.str.ptr, ctx->topic_key, ctx->topic_key_len) == 0) {
+ topic = flb_kafka_topic_lookup((char *) val.via.str.ptr,
+ val.via.str.size, ctx);
+ /* Add extracted topic on the fly to topiclist */
+ if (ctx->dynamic_topic) {
+ /* Only if default topic is set and this topicname is not set for this message */
+ if (strncmp(topic->name, flb_kafka_topic_default(ctx)->name, val.via.str.size) == 0 &&
+ (strncmp(topic->name, val.via.str.ptr, val.via.str.size) != 0) ) {
+ if (memchr(val.via.str.ptr, ',', val.via.str.size)) {
+ /* Don't allow commas in kafkatopic name */
+ flb_warn("',' not allowed in dynamic_kafka topic names");
+ continue;
+ }
+ if (val.via.str.size > 64) {
+ /* Don't allow length of dynamic kafka topics > 64 */
+ flb_warn(" dynamic kafka topic length > 64 not allowed");
+ continue;
+ }
+ dynamic_topic = flb_malloc(val.via.str.size + 1);
+ if (!dynamic_topic) {
+ /* Use default topic */
+ flb_errno();
+ continue;
+ }
+ strncpy(dynamic_topic, val.via.str.ptr, val.via.str.size);
+ dynamic_topic[val.via.str.size] = '\0';
+ topics = flb_utils_split(dynamic_topic, ',', 0);
+ if (!topics) {
+ /* Use the default topic */
+ flb_errno();
+ flb_free(dynamic_topic);
+ continue;
+ }
+ mk_list_foreach(head, topics) {
+ /* Add the (one) found topicname to the topic configuration */
+ entry = mk_list_entry(head, struct flb_split_entry, _head);
+ topic = flb_kafka_topic_create(entry->value, ctx);
+ if (!topic) {
+ /* Use default topic */
+ flb_error("[out_kafka] cannot register topic '%s'",
+ entry->value);
+ topic = flb_kafka_topic_lookup((char *) val.via.str.ptr,
+ val.via.str.size, ctx);
+ }
+ else {
+ flb_info("[out_kafka] new topic added: %s", dynamic_topic);
+ }
+ }
+ flb_free(dynamic_topic);
+ }
+ }
+ }
+ }
+ }
+
+ if (ctx->format == FLB_KAFKA_FMT_JSON) {
+ s = flb_msgpack_raw_to_json_sds(mp_sbuf.data, mp_sbuf.size);
+ if (!s) {
+ flb_plg_error(ctx->ins, "error encoding to JSON");
+ msgpack_sbuffer_destroy(&mp_sbuf);
+ return FLB_ERROR;
+ }
+ out_buf = s;
+ out_size = flb_sds_len(out_buf);
+ }
+ else if (ctx->format == FLB_KAFKA_FMT_MSGP) {
+ out_buf = mp_sbuf.data;
+ out_size = mp_sbuf.size;
+ }
+ else if (ctx->format == FLB_KAFKA_FMT_GELF) {
+ s = flb_msgpack_raw_to_gelf(mp_sbuf.data, mp_sbuf.size,
+ tm, &(ctx->gelf_fields));
+ if (s == NULL) {
+ flb_plg_error(ctx->ins, "error encoding to GELF");
+ msgpack_sbuffer_destroy(&mp_sbuf);
+ return FLB_ERROR;
+ }
+ out_buf = s;
+ out_size = flb_sds_len(s);
+ }
+#ifdef FLB_HAVE_AVRO_ENCODER
+ else if (ctx->format == FLB_KAFKA_FMT_AVRO) {
+
+ flb_plg_debug(ctx->ins, "avro schema ID:%s:\n", ctx->avro_fields.schema_id);
+ flb_plg_debug(ctx->ins, "avro schema string:%s:\n", ctx->avro_fields.schema_str);
+
+ // if there's no data then log it and return
+ if (mp_sbuf.size == 0) {
+ flb_plg_error(ctx->ins, "got zero bytes decoding to avro AVRO:schemaID:%s:\n", ctx->avro_fields.schema_id);
+ msgpack_sbuffer_destroy(&mp_sbuf);
+ return FLB_OK;
+ }
+
+ // is the line is too long log it and return
+ if (mp_sbuf.size > AVRO_LINE_MAX_LEN) {
+ flb_plg_warn(ctx->ins, "skipping long line AVRO:len:%zu:limit:%zu:schemaID:%s:\n", (size_t)mp_sbuf.size, (size_t)AVRO_LINE_MAX_LEN, ctx->avro_fields.schema_id);
+ msgpack_sbuffer_destroy(&mp_sbuf);
+ return FLB_OK;
+ }
+
+ flb_plg_debug(ctx->ins, "using default buffer AVRO:len:%zu:limit:%zu:schemaID:%s:\n", (size_t)mp_sbuf.size, (size_t)AVRO_DEFAULT_BUFFER_SIZE, ctx->avro_fields.schema_id);
+ out_buf = avro_buff;
+ out_size = AVRO_DEFAULT_BUFFER_SIZE;
+
+ if (mp_sbuf.size + AVRO_SCHEMA_OVERHEAD >= AVRO_DEFAULT_BUFFER_SIZE) {
+ flb_plg_info(ctx->ins, "upsizing to dynamic buffer AVRO:len:%zu:schemaID:%s:\n", (size_t)mp_sbuf.size, ctx->avro_fields.schema_id);
+ avro_fast_buffer = false;
+ // avro will always be smaller than msgpack
+ // it contains no meta-info aside from the schemaid
+ // all the metadata is in the schema which is not part of the msg
+ // add schemaid + magic byte for safety buffer and allocate
+ // that's 16 byte schemaid and one byte magic byte
+ out_size = mp_sbuf.size + AVRO_SCHEMA_OVERHEAD;
+ out_buf = flb_malloc(out_size);
+ if (!out_buf) {
+ flb_plg_error(ctx->ins, "error allocating memory for decoding to AVRO:schema:%s:schemaID:%s:\n", ctx->avro_fields.schema_str, ctx->avro_fields.schema_id);
+ msgpack_sbuffer_destroy(&mp_sbuf);
+ return FLB_ERROR;
+ }
+ }
+
+ if(!flb_msgpack_raw_to_avro_sds(mp_sbuf.data, mp_sbuf.size, &ctx->avro_fields, out_buf, &out_size)) {
+ flb_plg_error(ctx->ins, "error encoding to AVRO:schema:%s:schemaID:%s:\n", ctx->avro_fields.schema_str, ctx->avro_fields.schema_id);
+ msgpack_sbuffer_destroy(&mp_sbuf);
+ if (!avro_fast_buffer) {
+ flb_free(out_buf);
+ }
+ return FLB_ERROR;
+ }
+
+ }
+#endif
+
+ if (!message_key) {
+ message_key = ctx->message_key;
+ message_key_len = ctx->message_key_len;
+ }
+
+ if (!topic) {
+ topic = flb_kafka_topic_default(ctx);
+ }
+ if (!topic) {
+ flb_plg_error(ctx->ins, "no default topic found");
+ msgpack_sbuffer_destroy(&mp_sbuf);
+#ifdef FLB_HAVE_AVRO_ENCODER
+ if (ctx->format == FLB_KAFKA_FMT_AVRO) {
+ AVRO_FREE(avro_fast_buffer, out_buf)
+ }
+#endif
+ return FLB_ERROR;
+ }
+
+ retry:
+ /*
+ * If the local rdkafka queue is full, we retry up to 'queue_full_retries'
+ * times set by the configuration (default: 10). If the configuration
+ * property was set to '0' or 'false', we don't impose a limit. Use that
+ * value under your own risk.
+ */
+ if (ctx->queue_full_retries > 0 &&
+ queue_full_retries >= ctx->queue_full_retries) {
+ if (ctx->format != FLB_KAFKA_FMT_MSGP) {
+ flb_sds_destroy(s);
+ }
+ msgpack_sbuffer_destroy(&mp_sbuf);
+#ifdef FLB_HAVE_AVRO_ENCODER
+ if (ctx->format == FLB_KAFKA_FMT_AVRO) {
+ AVRO_FREE(avro_fast_buffer, out_buf)
+ }
+#endif
+ /*
+ * Unblock the flush requests so that the
+ * engine could try sending data again.
+ */
+ ctx->blocked = FLB_FALSE;
+ return FLB_RETRY;
+ }
+
+ ret = rd_kafka_produce(topic->tp,
+ RD_KAFKA_PARTITION_UA,
+ RD_KAFKA_MSG_F_COPY,
+ out_buf, out_size,
+ message_key, message_key_len,
+ ctx);
+
+ if (ret == -1) {
+ flb_error(
+ "%% Failed to produce to topic %s: %s\n",
+ rd_kafka_topic_name(topic->tp),
+ rd_kafka_err2str(rd_kafka_last_error()));
+
+ /*
+ * rdkafka queue is full, keep trying 'locally' for a few seconds,
+ * otherwise let the caller to issue a main retry againt the engine.
+ */
+ if (rd_kafka_last_error() == RD_KAFKA_RESP_ERR__QUEUE_FULL) {
+ flb_plg_warn(ctx->ins,
+ "internal queue is full, retrying in one second");
+
+ /*
+ * If the queue is full, first make sure to discard any further
+ * flush request from the engine. This means 'the caller will
+ * issue a retry at a later time'.
+ */
+ ctx->blocked = FLB_TRUE;
+
+ /*
+ * Next step is to give it some time to the background rdkafka
+ * library to do it own work. By default rdkafka wait 1 second
+ * or up to 10000 messages to be enqueued before delivery.
+ *
+ * If the kafka broker is down we should try a couple of times
+ * to enqueue this message, if we exceed 10 times, we just
+ * issue a full retry of the data chunk.
+ */
+ flb_time_sleep(1000);
+ rd_kafka_poll(ctx->kafka.rk, 0);
+
+ /* Issue a re-try */
+ queue_full_retries++;
+ goto retry;
+ }
+ }
+ else {
+ flb_plg_debug(ctx->ins, "enqueued message (%zd bytes) for topic '%s'",
+ out_size, rd_kafka_topic_name(topic->tp));
+ }
+ ctx->blocked = FLB_FALSE;
+
+ rd_kafka_poll(ctx->kafka.rk, 0);
+ if (ctx->format == FLB_KAFKA_FMT_JSON) {
+ flb_sds_destroy(s);
+ }
+ if (ctx->format == FLB_KAFKA_FMT_GELF) {
+ flb_sds_destroy(s);
+ }
+#ifdef FLB_HAVE_AVRO_ENCODER
+ if (ctx->format == FLB_KAFKA_FMT_AVRO) {
+ AVRO_FREE(avro_fast_buffer, out_buf)
+ }
+#endif
+
+ msgpack_sbuffer_destroy(&mp_sbuf);
+ return FLB_OK;
+}
+
+static void cb_kafka_flush(struct flb_event_chunk *event_chunk,
+ struct flb_output_flush *out_flush,
+ struct flb_input_instance *i_ins,
+ void *out_context,
+ struct flb_config *config)
+{
+
+ int ret;
+ struct flb_out_kafka *ctx = out_context;
+ struct flb_log_event_decoder log_decoder;
+ struct flb_log_event log_event;
+
+ /*
+ * If the context is blocked, means rdkafka queue is full and no more
+ * messages can be appended. For our called (Fluent Bit engine) means
+ * that is not possible to work on this now and it need to 'retry'.
+ */
+ if (ctx->blocked == FLB_TRUE) {
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ ret = flb_log_event_decoder_init(&log_decoder,
+ (char *) event_chunk->data,
+ event_chunk->size);
+
+ if (ret != FLB_EVENT_DECODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event decoder initialization error : %d", ret);
+
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ /* Iterate the original buffer and perform adjustments */
+ while ((ret = flb_log_event_decoder_next(
+ &log_decoder,
+ &log_event)) == FLB_EVENT_DECODER_SUCCESS) {
+ ret = produce_message(&log_event.timestamp,
+ log_event.body,
+ ctx, config);
+
+ if (ret != FLB_OK) {
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ FLB_OUTPUT_RETURN(ret);
+ }
+ }
+
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ FLB_OUTPUT_RETURN(FLB_OK);
+}
+
+static void kafka_flush_force(struct flb_out_kafka *ctx,
+ struct flb_config *config)
+{
+ int ret;
+
+ if (!ctx) {
+ return;
+ }
+
+ if (ctx->kafka.rk) {
+ ret = rd_kafka_flush(ctx->kafka.rk, config->grace * 1000);
+ if (ret != RD_KAFKA_RESP_ERR_NO_ERROR) {
+ flb_plg_warn(ctx->ins, "Failed to force flush: %s",
+ rd_kafka_err2str(ret));
+ }
+ }
+}
+
+static int cb_kafka_exit(void *data, struct flb_config *config)
+{
+ struct flb_out_kafka *ctx = data;
+
+ kafka_flush_force(ctx, config);
+ flb_out_kafka_destroy(ctx);
+ return 0;
+}
+
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_STR, "topic_key", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_out_kafka, topic_key),
+ "Which record to use as the kafka topic."
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "dynamic_topic", "false",
+ 0, FLB_TRUE, offsetof(struct flb_out_kafka, dynamic_topic),
+ "Activate dynamic topics."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "format", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_out_kafka, format_str),
+ "Set the record output format."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "message_key", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_out_kafka, message_key),
+ "Which record key to use as the message data."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "message_key_field", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_out_kafka, message_key_field),
+ "Which record key field to use as the message data."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "timestamp_key", FLB_KAFKA_TS_KEY,
+ 0, FLB_TRUE, offsetof(struct flb_out_kafka, timestamp_key),
+ "Set the key for the the timestamp."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "timestamp_format", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_out_kafka, timestamp_format_str),
+ "Set the format the timestamp is in."
+ },
+ {
+ FLB_CONFIG_MAP_INT, "queue_full_retries", FLB_KAFKA_QUEUE_FULL_RETRIES,
+ 0, FLB_TRUE, offsetof(struct flb_out_kafka, queue_full_retries),
+ "Set the number of local retries to enqueue the data."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "gelf_timestamp_key", (char *)NULL,
+ 0, FLB_FALSE, 0,
+ "Set the timestamp key for gelf output."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "gelf_host_key", (char *)NULL,
+ 0, FLB_FALSE, 0,
+ "Set the host key for gelf output."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "gelf_short_message_key", (char *)NULL,
+ 0, FLB_FALSE, 0,
+ "Set the short message key for gelf output."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "gelf_full_message_key", (char *)NULL,
+ 0, FLB_FALSE, 0,
+ "Set the full message key for gelf output."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "gelf_level_key", (char *)NULL,
+ 0, FLB_FALSE, 0,
+ "Set the level key for gelf output."
+ },
+#ifdef FLB_HAVE_AVRO_ENCODER
+ {
+ FLB_CONFIG_MAP_STR, "schema_str", (char *)NULL,
+ 0, FLB_FALSE, 0,
+ "Set AVRO schema."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "schema_id", (char *)NULL,
+ 0, FLB_FALSE, 0,
+ "Set AVRO schema ID."
+ },
+#endif
+ {
+ FLB_CONFIG_MAP_STR, "topics", (char *)NULL,
+ 0, FLB_FALSE, 0,
+ "Set the kafka topics, delimited by commas."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "brokers", (char *)NULL,
+ 0, FLB_FALSE, 0,
+ "Set the kafka brokers, delimited by commas."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "client_id", (char *)NULL,
+ 0, FLB_FALSE, 0,
+ "Set the kafka client_id."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "group_id", (char *)NULL,
+ 0, FLB_FALSE, 0,
+ "Set the kafka group_id."
+ },
+ {
+ FLB_CONFIG_MAP_STR_PREFIX, "rdkafka.", NULL,
+ //FLB_CONFIG_MAP_MULT, FLB_TRUE, offsetof(struct flb_out_kafka, rdkafka_opts),
+ 0, FLB_FALSE, 0,
+ "Set the kafka group_id."
+ },
+ /* EOF */
+ {0}
+};
+
+struct flb_output_plugin out_kafka_plugin = {
+ .name = "kafka",
+ .description = "Kafka",
+ .cb_init = cb_kafka_init,
+ .cb_flush = cb_kafka_flush,
+ .cb_exit = cb_kafka_exit,
+ .config_map = config_map,
+ .flags = 0
+};
diff --git a/src/fluent-bit/plugins/out_kafka/kafka_callbacks.h b/src/fluent-bit/plugins/out_kafka/kafka_callbacks.h
new file mode 100644
index 000000000..f496cba8c
--- /dev/null
+++ b/src/fluent-bit/plugins/out_kafka/kafka_callbacks.h
@@ -0,0 +1,31 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_OUT_KAFKA_CALLBACKS_H
+#define FLB_OUT_KAFKA_CALLBACKS_H
+
+#include "rdkafka.h"
+
+void cb_kafka_msg(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage,
+ void *opaque);
+
+void cb_kafka_logger(const rd_kafka_t *rk, int level,
+ const char *fac, const char *buf);
+
+#endif
diff --git a/src/fluent-bit/plugins/out_kafka/kafka_config.c b/src/fluent-bit/plugins/out_kafka/kafka_config.c
new file mode 100644
index 000000000..3c00f3682
--- /dev/null
+++ b/src/fluent-bit/plugins/out_kafka/kafka_config.c
@@ -0,0 +1,253 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_output.h>
+#include <fluent-bit/flb_mem.h>
+#include <fluent-bit/flb_kv.h>
+#include <fluent-bit/flb_utils.h>
+
+#include "kafka_config.h"
+#include "kafka_topic.h"
+#include "kafka_callbacks.h"
+
+struct flb_out_kafka *flb_out_kafka_create(struct flb_output_instance *ins,
+ struct flb_config *config)
+{
+ int ret;
+ const char *tmp;
+ char errstr[512];
+ struct mk_list *head;
+ struct mk_list *topics;
+ struct flb_split_entry *entry;
+ struct flb_out_kafka *ctx;
+
+ /* Configuration context */
+ ctx = flb_calloc(1, sizeof(struct flb_out_kafka));
+ if (!ctx) {
+ flb_errno();
+ return NULL;
+ }
+ ctx->ins = ins;
+ ctx->blocked = FLB_FALSE;
+
+ ret = flb_output_config_map_set(ins, (void*) ctx);
+ if (ret == -1) {
+ flb_plg_error(ins, "unable to load configuration.");
+ flb_free(ctx);
+
+ return NULL;
+ }
+
+ /* rdkafka config context */
+ ctx->conf = flb_kafka_conf_create(&ctx->kafka, &ins->properties, 0);
+ if (!ctx->conf) {
+ flb_plg_error(ctx->ins, "error creating context");
+ flb_free(ctx);
+ return NULL;
+ }
+
+ /* Set our global opaque data (plugin context*/
+ rd_kafka_conf_set_opaque(ctx->conf, ctx);
+
+ /* Callback: message delivery */
+ rd_kafka_conf_set_dr_msg_cb(ctx->conf, cb_kafka_msg);
+
+ /* Callback: log */
+ rd_kafka_conf_set_log_cb(ctx->conf, cb_kafka_logger);
+
+ /* Config: Topic_Key */
+ if (ctx->topic_key) {
+ ctx->topic_key_len = strlen(ctx->topic_key);
+ }
+
+ /* Config: Format */
+ if (ctx->format_str) {
+ if (strcasecmp(ctx->format_str, "json") == 0) {
+ ctx->format = FLB_KAFKA_FMT_JSON;
+ }
+ else if (strcasecmp(ctx->format_str, "msgpack") == 0) {
+ ctx->format = FLB_KAFKA_FMT_MSGP;
+ }
+ else if (strcasecmp(ctx->format_str, "gelf") == 0) {
+ ctx->format = FLB_KAFKA_FMT_GELF;
+ }
+#ifdef FLB_HAVE_AVRO_ENCODER
+ else if (strcasecmp(ctx->format_str, "avro") == 0) {
+ ctx->format = FLB_KAFKA_FMT_AVRO;
+ }
+#endif
+ }
+ else {
+ ctx->format = FLB_KAFKA_FMT_JSON;
+ }
+
+ /* Config: Message_Key */
+ if (ctx->message_key) {
+ ctx->message_key_len = strlen(ctx->message_key);
+ }
+ else {
+ ctx->message_key_len = 0;
+ }
+
+ /* Config: Message_Key_Field */
+ if (ctx->message_key_field) {
+ ctx->message_key_field_len = strlen(ctx->message_key_field);
+ }
+ else {
+ ctx->message_key_field_len = 0;
+ }
+
+ /* Config: Timestamp_Key */
+ if (ctx->timestamp_key) {
+ ctx->timestamp_key_len = strlen(ctx->timestamp_key);
+ }
+
+ /* Config: Timestamp_Format */
+ ctx->timestamp_format = FLB_JSON_DATE_DOUBLE;
+ if (ctx->timestamp_format_str) {
+ if (strcasecmp(ctx->timestamp_format_str, "iso8601") == 0) {
+ ctx->timestamp_format = FLB_JSON_DATE_ISO8601;
+ }
+ else if (strcasecmp(ctx->timestamp_format_str, "iso8601_ns") == 0) {
+ ctx->timestamp_format = FLB_JSON_DATE_ISO8601_NS;
+ }
+ }
+
+ /* set number of retries: note that if the number is zero, means forever */
+ if (ctx->queue_full_retries < 0) {
+ ctx->queue_full_retries = 0;
+ }
+
+ /* Config Gelf_Short_Message_Key */
+ tmp = flb_output_get_property("gelf_short_message_key", ins);
+ if (tmp) {
+ ctx->gelf_fields.short_message_key = flb_sds_create(tmp);
+ }
+
+ /* Config Gelf_Full_Message_Key */
+ tmp = flb_output_get_property("gelf_full_message_key", ins);
+ if (tmp) {
+ ctx->gelf_fields.full_message_key = flb_sds_create(tmp);
+ }
+
+ /* Config Gelf_Level_Key */
+ tmp = flb_output_get_property("gelf_level_key", ins);
+ if (tmp) {
+ ctx->gelf_fields.level_key = flb_sds_create(tmp);
+ }
+
+ /* Kafka Producer */
+ ctx->kafka.rk = rd_kafka_new(RD_KAFKA_PRODUCER, ctx->conf,
+ errstr, sizeof(errstr));
+ if (!ctx->kafka.rk) {
+ flb_plg_error(ctx->ins, "failed to create producer: %s",
+ errstr);
+ flb_out_kafka_destroy(ctx);
+ return NULL;
+ }
+
+#ifdef FLB_HAVE_AVRO_ENCODER
+ /* Config AVRO */
+ tmp = flb_output_get_property("schema_str", ins);
+ if (tmp) {
+ ctx->avro_fields.schema_str = flb_sds_create(tmp);
+ }
+ tmp = flb_output_get_property("schema_id", ins);
+ if (tmp) {
+ ctx->avro_fields.schema_id = flb_sds_create(tmp);
+ }
+#endif
+
+ /* Config: Topic */
+ mk_list_init(&ctx->topics);
+ tmp = flb_output_get_property("topics", ins);
+ if (!tmp) {
+ flb_kafka_topic_create(FLB_KAFKA_TOPIC, ctx);
+ }
+ else {
+ topics = flb_utils_split(tmp, ',', -1);
+ if (!topics) {
+ flb_plg_warn(ctx->ins, "invalid topics defined, setting default");
+ flb_kafka_topic_create(FLB_KAFKA_TOPIC, ctx);
+ }
+ else {
+ /* Register each topic */
+ mk_list_foreach(head, topics) {
+ entry = mk_list_entry(head, struct flb_split_entry, _head);
+ if (!flb_kafka_topic_create(entry->value, ctx)) {
+ flb_plg_error(ctx->ins, "cannot register topic '%s'",
+ entry->value);
+ }
+ }
+ flb_utils_split_free(topics);
+ }
+ }
+
+ flb_plg_info(ctx->ins, "brokers='%s' topics='%s'", ctx->kafka.brokers, tmp);
+#ifdef FLB_HAVE_AVRO_ENCODER
+ flb_plg_info(ctx->ins, "schemaID='%s' schema='%s'", ctx->avro_fields.schema_id, ctx->avro_fields.schema_str);
+#endif
+
+ return ctx;
+}
+
+int flb_out_kafka_destroy(struct flb_out_kafka *ctx)
+{
+ if (!ctx) {
+ return 0;
+ }
+
+ if (ctx->kafka.brokers) {
+ flb_free(ctx->kafka.brokers);
+ }
+
+ flb_kafka_topic_destroy_all(ctx);
+
+ if (ctx->kafka.rk) {
+ rd_kafka_destroy(ctx->kafka.rk);
+ }
+
+ if (ctx->topic_key) {
+ flb_free(ctx->topic_key);
+ }
+
+ if (ctx->message_key) {
+ flb_free(ctx->message_key);
+ }
+
+ if (ctx->message_key_field) {
+ flb_free(ctx->message_key_field);
+ }
+
+ flb_sds_destroy(ctx->gelf_fields.timestamp_key);
+ flb_sds_destroy(ctx->gelf_fields.host_key);
+ flb_sds_destroy(ctx->gelf_fields.short_message_key);
+ flb_sds_destroy(ctx->gelf_fields.full_message_key);
+ flb_sds_destroy(ctx->gelf_fields.level_key);
+
+#ifdef FLB_HAVE_AVRO_ENCODER
+ // avro
+ flb_sds_destroy(ctx->avro_fields.schema_id);
+ flb_sds_destroy(ctx->avro_fields.schema_str);
+#endif
+
+ flb_free(ctx);
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/out_kafka/kafka_config.h b/src/fluent-bit/plugins/out_kafka/kafka_config.h
new file mode 100644
index 000000000..1ef2cce16
--- /dev/null
+++ b/src/fluent-bit/plugins/out_kafka/kafka_config.h
@@ -0,0 +1,129 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_OUT_KAFKA_CONFIG_H
+#define FLB_OUT_KAFKA_CONFIG_H
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_pack.h>
+#ifdef FLB_HAVE_AVRO_ENCODER
+#include <fluent-bit/flb_avro.h>
+#endif
+
+#include <fluent-bit/flb_kafka.h>
+
+#define FLB_KAFKA_FMT_JSON 0
+#define FLB_KAFKA_FMT_MSGP 1
+#define FLB_KAFKA_FMT_GELF 2
+#ifdef FLB_HAVE_AVRO_ENCODER
+#define FLB_KAFKA_FMT_AVRO 3
+#endif
+#define FLB_KAFKA_TS_KEY "@timestamp"
+#define FLB_KAFKA_QUEUE_FULL_RETRIES "10"
+
+/* rdkafka log levels based on syslog(3) */
+#define FLB_KAFKA_LOG_EMERG 0
+#define FLB_KAFKA_LOG_ALERT 1
+#define FLB_KAFKA_LOG_CRIT 2
+#define FLB_KAFKA_LOG_ERR 3
+#define FLB_KAFKA_LOG_WARNING 4
+#define FLB_KAFKA_LOG_NOTICE 5
+#define FLB_KAFKA_LOG_INFO 6
+#define FLB_KAFKA_LOG_DEBUG 7
+
+#define FLB_JSON_DATE_DOUBLE 0
+#define FLB_JSON_DATE_ISO8601 1
+#define FLB_JSON_DATE_ISO8601_NS 2
+#define FLB_JSON_DATE_ISO8601_FMT "%Y-%m-%dT%H:%M:%S"
+
+struct flb_kafka_topic {
+ int name_len;
+ char *name;
+ rd_kafka_topic_t *tp;
+ struct mk_list _head;
+};
+
+struct flb_out_kafka {
+ struct flb_kafka kafka;
+ /* Config Parameters */
+ int format;
+ flb_sds_t format_str;
+
+ /* Optional topic key for routing */
+ int topic_key_len;
+ char *topic_key;
+
+ int timestamp_key_len;
+ char *timestamp_key;
+ int timestamp_format;
+ flb_sds_t timestamp_format_str;
+
+ int message_key_len;
+ char *message_key;
+
+ int message_key_field_len;
+ char *message_key_field;
+
+ /* Gelf Keys */
+ struct flb_gelf_fields gelf_fields;
+
+ /* Head of defined topics by configuration */
+ struct mk_list topics;
+
+ /*
+ * Blocked Status: since rdkafka have it own buffering queue, there is a
+ * chance that the queue becomes full, when that happens our default
+ * behavior is the following:
+ *
+ * - out_kafka yields and try to continue every second until it succeed. In
+ * the meanwhile blocked flag gets FLB_TRUE value.
+ * - when flushing more records and blocked == FLB_TRUE, issue
+ * a retry.
+ */
+ int blocked;
+
+ int dynamic_topic;
+
+ int queue_full_retries;
+
+ /* Internal */
+ rd_kafka_conf_t *conf;
+
+ /* Plugin instance */
+ struct flb_output_instance *ins;
+
+#ifdef FLB_HAVE_AVRO_ENCODER
+ // avro serialization requires a schema
+ // the schema is stored in json in avro_schema_str
+ //
+ // optionally the schema ID can be stashed in the avro data stream
+ // the schema ID is stored in avro_schema_id
+ // this is common at this time with large kafka installations and schema registries
+ // flb_sds_t avro_schema_str;
+ // flb_sds_t avro_schema_id;
+ struct flb_avro_fields avro_fields;
+#endif
+
+};
+
+struct flb_out_kafka *flb_out_kafka_create(struct flb_output_instance *ins,
+ struct flb_config *config);
+int flb_out_kafka_destroy(struct flb_out_kafka *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/out_kafka/kafka_topic.c b/src/fluent-bit/plugins/out_kafka/kafka_topic.c
new file mode 100644
index 000000000..2db8698b1
--- /dev/null
+++ b/src/fluent-bit/plugins/out_kafka/kafka_topic.c
@@ -0,0 +1,120 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_log.h>
+#include <fluent-bit/flb_mem.h>
+
+#include "kafka_config.h"
+#include "rdkafka.h"
+
+struct flb_kafka_topic *flb_kafka_topic_create(char *name,
+ struct flb_out_kafka *ctx)
+{
+ rd_kafka_topic_t *tp;
+ struct flb_kafka_topic *topic;
+
+ tp = rd_kafka_topic_new(ctx->kafka.rk, name, NULL);
+ if (!tp) {
+ flb_plg_error(ctx->ins, "failed to create topic: %s",
+ rd_kafka_err2str(rd_kafka_last_error()));
+ return NULL;
+ }
+
+ topic = flb_malloc(sizeof(struct flb_kafka_topic));
+ if (!topic) {
+ flb_errno();
+ return NULL;
+ }
+
+ topic->name = flb_strdup(name);
+ topic->name_len = strlen(name);
+ topic->tp = tp;
+ mk_list_add(&topic->_head, &ctx->topics);
+
+ return topic;
+}
+
+int flb_kafka_topic_destroy(struct flb_kafka_topic *topic,
+ struct flb_out_kafka *ctx)
+{
+ mk_list_del(&topic->_head);
+ rd_kafka_topic_destroy(topic->tp);
+ flb_free(topic->name);
+ flb_free(topic);
+
+ return 0;
+}
+
+int flb_kafka_topic_destroy_all(struct flb_out_kafka *ctx)
+{
+ int c = 0;
+ struct mk_list *tmp;
+ struct mk_list *head;
+ struct flb_kafka_topic *topic;
+
+ mk_list_foreach_safe(head, tmp, &ctx->topics) {
+ topic = mk_list_entry(head, struct flb_kafka_topic, _head);
+ flb_kafka_topic_destroy(topic, ctx);
+ c++;
+ }
+
+ return c;
+}
+
+/* Get first topic of the list (default topic) */
+struct flb_kafka_topic *flb_kafka_topic_default(struct flb_out_kafka *ctx)
+{
+ struct flb_kafka_topic *topic;
+
+ if (mk_list_is_empty(&ctx->topics) == 0) {
+ return NULL;
+ }
+
+ topic = mk_list_entry_first(&ctx->topics, struct flb_kafka_topic,
+ _head);
+ return topic;
+}
+
+struct flb_kafka_topic *flb_kafka_topic_lookup(char *name,
+ int name_len,
+ struct flb_out_kafka *ctx)
+{
+ struct mk_list *head;
+ struct flb_kafka_topic *topic;
+
+ if (!ctx->topic_key) {
+ return flb_kafka_topic_default(ctx);
+ }
+
+ mk_list_foreach(head, &ctx->topics) {
+ topic = mk_list_entry(head, struct flb_kafka_topic, _head);
+ if (topic->name_len != name_len) {
+ continue;
+ }
+
+ if (strncmp(name, topic->name, topic->name_len) == 0) {
+ return topic;
+ }
+ }
+
+ /* No matches, return the default topic */
+ return flb_kafka_topic_default(ctx);
+
+}
diff --git a/src/fluent-bit/plugins/out_kafka/kafka_topic.h b/src/fluent-bit/plugins/out_kafka/kafka_topic.h
new file mode 100644
index 000000000..9b1203b96
--- /dev/null
+++ b/src/fluent-bit/plugins/out_kafka/kafka_topic.h
@@ -0,0 +1,34 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_KAFKA_TOPIC_H
+#define FLB_KAFKA_TOPIC_H
+
+struct flb_kafka_topic *flb_kafka_topic_create(char *name,
+ struct flb_out_kafka *ctx);
+int flb_kafka_topic_destroy(struct flb_kafka_topic *topic,
+ struct flb_out_kafka *ctx);
+int flb_kafka_topic_destroy_all(struct flb_out_kafka *ctx);
+struct flb_kafka_topic *flb_kafka_topic_default(struct flb_out_kafka *ctx);
+
+struct flb_kafka_topic *flb_kafka_topic_lookup(char *name,
+ int name_len,
+ struct flb_out_kafka *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/out_kafka_rest/CMakeLists.txt b/src/fluent-bit/plugins/out_kafka_rest/CMakeLists.txt
new file mode 100644
index 000000000..39df92f77
--- /dev/null
+++ b/src/fluent-bit/plugins/out_kafka_rest/CMakeLists.txt
@@ -0,0 +1,5 @@
+set(src
+ kafka_conf.c
+ kafka.c)
+
+FLB_PLUGIN(out_kafka_rest "${src}" "")
diff --git a/src/fluent-bit/plugins/out_kafka_rest/kafka.c b/src/fluent-bit/plugins/out_kafka_rest/kafka.c
new file mode 100644
index 000000000..f3b6153a6
--- /dev/null
+++ b/src/fluent-bit/plugins/out_kafka_rest/kafka.c
@@ -0,0 +1,351 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_sds.h>
+#include <fluent-bit/flb_http_client.h>
+#include <fluent-bit/flb_config_map.h>
+#include <fluent-bit/flb_log_event_decoder.h>
+#include <msgpack.h>
+
+#include "kafka.h"
+#include "kafka_conf.h"
+
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_STR, "message_key", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_kafka_rest, message_key),
+ "Specify a message key. "
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "time_key", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_kafka_rest, time_key),
+ "Specify the name of the field that holds the record timestamp. "
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "topic", "fluent-bit",
+ 0, FLB_TRUE, offsetof(struct flb_kafka_rest, topic),
+ "Specify the kafka topic. "
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "url_path", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_kafka_rest, url_path),
+ "Specify an optional HTTP URL path for the target web server, e.g: /something"
+ },
+
+ {
+ FLB_CONFIG_MAP_DOUBLE, "partition", "-1",
+ 0, FLB_TRUE, offsetof(struct flb_kafka_rest, partition),
+ "Specify kafka partition number. "
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "time_key_format", FLB_KAFKA_TIME_KEYF,
+ 0, FLB_TRUE, offsetof(struct flb_kafka_rest, time_key_format),
+ "Specify the format of the timestamp. "
+ },
+
+ {
+ FLB_CONFIG_MAP_BOOL, "include_tag_key", "false",
+ 0, FLB_TRUE, offsetof(struct flb_kafka_rest, include_tag_key),
+ "Specify whether to append tag name to final record. "
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "tag_key", "_flb-key",
+ 0, FLB_TRUE, offsetof(struct flb_kafka_rest, tag_key),
+ "Specify the key name of the record if include_tag_key is enabled. "
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "avro_http_header", "false",
+ 0, FLB_TRUE, offsetof(struct flb_kafka_rest, avro_http_header),
+ "Specify if the format has avro header in http request"
+ },
+
+ /* EOF */
+ {0}
+};
+/*
+ * Convert the internal Fluent Bit data representation to the required
+ * one by Kafka REST Proxy.
+ */
+static flb_sds_t kafka_rest_format(const void *data, size_t bytes,
+ const char *tag, int tag_len,
+ size_t *out_size,
+ struct flb_kafka_rest *ctx)
+{
+ int i;
+ int len;
+ int arr_size = 0;
+ int map_size;
+ size_t s;
+ flb_sds_t out_buf;
+ char time_formatted[256];
+ msgpack_object map;
+ msgpack_object key;
+ msgpack_object val;
+ struct tm tm;
+ msgpack_sbuffer mp_sbuf;
+ msgpack_packer mp_pck;
+ struct flb_log_event_decoder log_decoder;
+ struct flb_log_event log_event;
+ int ret;
+
+ ret = flb_log_event_decoder_init(&log_decoder, (char *) data, bytes);
+
+ if (ret != FLB_EVENT_DECODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event decoder initialization error : %d", ret);
+
+ return NULL;
+ }
+
+ /* Init temporary buffers */
+ msgpack_sbuffer_init(&mp_sbuf);
+ msgpack_packer_init(&mp_pck, &mp_sbuf, msgpack_sbuffer_write);
+
+ /* Count number of entries */
+ arr_size = flb_mp_count(data, bytes);
+
+ /* Root map */
+ msgpack_pack_map(&mp_pck, 1);
+ msgpack_pack_str(&mp_pck, 7);
+ msgpack_pack_str_body(&mp_pck, "records", 7);
+
+ msgpack_pack_array(&mp_pck, arr_size);
+
+ /* Iterate and compose array content */
+ while ((ret = flb_log_event_decoder_next(
+ &log_decoder,
+ &log_event)) == FLB_EVENT_DECODER_SUCCESS) {
+ map = *log_event.body;
+ map_size = 1;
+
+ if (ctx->partition >= 0) {
+ map_size++;
+ }
+
+ if (ctx->message_key != NULL) {
+ map_size++;
+ }
+
+ msgpack_pack_map(&mp_pck, map_size);
+ if (ctx->partition >= 0) {
+ msgpack_pack_str(&mp_pck, 9);
+ msgpack_pack_str_body(&mp_pck, "partition", 9);
+ msgpack_pack_int64(&mp_pck, ctx->partition);
+ }
+
+
+ if (ctx->message_key != NULL) {
+ msgpack_pack_str(&mp_pck, 3);
+ msgpack_pack_str_body(&mp_pck, "key", 3);
+ msgpack_pack_str(&mp_pck, ctx->message_key_len);
+ msgpack_pack_str_body(&mp_pck, ctx->message_key, ctx->message_key_len);
+ }
+
+ /* Value Map Size */
+ map_size = map.via.map.size;
+ map_size++;
+ if (ctx->include_tag_key == FLB_TRUE) {
+ map_size++;
+ }
+
+ msgpack_pack_str(&mp_pck, 5);
+ msgpack_pack_str_body(&mp_pck, "value", 5);
+
+ msgpack_pack_map(&mp_pck, map_size);
+
+ /* Time key and time formatted */
+ msgpack_pack_str(&mp_pck, ctx->time_key_len);
+ msgpack_pack_str_body(&mp_pck, ctx->time_key, ctx->time_key_len);
+
+ /* Format the time */
+ gmtime_r(&log_event.timestamp.tm.tv_sec, &tm);
+ s = strftime(time_formatted, sizeof(time_formatted) - 1,
+ ctx->time_key_format, &tm);
+ len = snprintf(time_formatted + s, sizeof(time_formatted) - 1 - s,
+ ".%09" PRIu64 "Z", (uint64_t) log_event.timestamp.tm.tv_nsec);
+ s += len;
+ msgpack_pack_str(&mp_pck, s);
+ msgpack_pack_str_body(&mp_pck, time_formatted, s);
+
+ /* Tag Key */
+ if (ctx->include_tag_key == FLB_TRUE) {
+ msgpack_pack_str(&mp_pck, ctx->tag_key_len);
+ msgpack_pack_str_body(&mp_pck, ctx->tag_key, ctx->tag_key_len);
+ msgpack_pack_str(&mp_pck, tag_len);
+ msgpack_pack_str_body(&mp_pck, tag, tag_len);
+ }
+
+ for (i = 0; i < map.via.map.size; i++) {
+ key = map.via.map.ptr[i].key;
+ val = map.via.map.ptr[i].val;
+
+ msgpack_pack_object(&mp_pck, key);
+ msgpack_pack_object(&mp_pck, val);
+ }
+ }
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ /* Convert to JSON */
+ out_buf = flb_msgpack_raw_to_json_sds(mp_sbuf.data, mp_sbuf.size);
+ msgpack_sbuffer_destroy(&mp_sbuf);
+ if (!out_buf) {
+ return NULL;
+ }
+
+ *out_size = flb_sds_len(out_buf);
+
+ return out_buf;
+}
+
+static int cb_kafka_init(struct flb_output_instance *ins,
+ struct flb_config *config,
+ void *data)
+{
+ (void) ins;
+ (void) config;
+ (void) data;
+ struct flb_kafka_rest *ctx;
+
+ ctx = flb_kr_conf_create(ins, config);
+ if (!ctx) {
+ flb_plg_error(ins, "cannot initialize plugin");
+ return -1;
+ }
+
+ flb_plg_debug(ctx->ins, "host=%s port=%i",
+ ins->host.name, ins->host.port);
+ flb_output_set_context(ins, ctx);
+
+ return 0;
+}
+
+static void cb_kafka_flush(struct flb_event_chunk *event_chunk,
+ struct flb_output_flush *out_flush,
+ struct flb_input_instance *i_ins,
+ void *out_context,
+ struct flb_config *config)
+{
+ int ret;
+ flb_sds_t js;
+ size_t js_size;
+ size_t b_sent;
+ struct flb_http_client *c;
+ struct flb_connection *u_conn;
+ struct flb_kafka_rest *ctx = out_context;
+ (void) i_ins;
+
+ /* Get upstream connection */
+ u_conn = flb_upstream_conn_get(ctx->u);
+ if (!u_conn) {
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ /* Convert format */
+ js = kafka_rest_format(event_chunk->data, event_chunk->size,
+ event_chunk->tag, flb_sds_len(event_chunk->tag),
+ &js_size, ctx);
+ if (!js) {
+ flb_upstream_conn_release(u_conn);
+ FLB_OUTPUT_RETURN(FLB_ERROR);
+ }
+
+ /* Compose HTTP Client request */
+ c = flb_http_client(u_conn, FLB_HTTP_POST, ctx->uri,
+ js, js_size, NULL, 0, NULL, 0);
+ flb_http_add_header(c, "User-Agent", 10, "Fluent-Bit", 10);
+ if (ctx->avro_http_header == FLB_TRUE) {
+ flb_http_add_header(c,
+ "Content-Type", 12,
+ "application/vnd.kafka.avro.v2+json", 34);
+ }
+ else {
+ flb_http_add_header(c,
+ "Content-Type", 12,
+ "application/vnd.kafka.json.v2+json", 34);
+ }
+
+ if (ctx->http_user && ctx->http_passwd) {
+ flb_http_basic_auth(c, ctx->http_user, ctx->http_passwd);
+ }
+
+ ret = flb_http_do(c, &b_sent);
+ if (ret != 0) {
+ flb_plg_warn(ctx->ins, "http_do=%i", ret);
+ goto retry;
+ }
+ else {
+ /* The request was issued successfully, validate the 'error' field */
+ flb_plg_debug(ctx->ins, "HTTP Status=%i", c->resp.status);
+ if (c->resp.status != 200) {
+ if (c->resp.payload_size > 0) {
+ flb_plg_debug(ctx->ins, "Kafka REST response\n%s",
+ c->resp.payload);
+ }
+ goto retry;
+ }
+
+ if (c->resp.payload_size > 0) {
+ flb_plg_debug(ctx->ins, "Kafka REST response\n%s",
+ c->resp.payload);
+ }
+ else {
+ goto retry;
+ }
+ }
+
+ /* Cleanup */
+ flb_http_client_destroy(c);
+ flb_sds_destroy(js);
+ flb_upstream_conn_release(u_conn);
+ FLB_OUTPUT_RETURN(FLB_OK);
+
+ /* Issue a retry */
+ retry:
+ flb_http_client_destroy(c);
+ flb_sds_destroy(js);
+ flb_upstream_conn_release(u_conn);
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+}
+
+static int cb_kafka_exit(void *data, struct flb_config *config)
+{
+ struct flb_kafka_rest *ctx = data;
+
+ flb_kr_conf_destroy(ctx);
+ return 0;
+}
+
+struct flb_output_plugin out_kafka_rest_plugin = {
+ .name = "kafka-rest",
+ .description = "Kafka REST Proxy",
+ .cb_init = cb_kafka_init,
+ .cb_flush = cb_kafka_flush,
+ .cb_exit = cb_kafka_exit,
+ .config_map = config_map,
+ .flags = FLB_OUTPUT_NET | FLB_IO_OPT_TLS,
+};
diff --git a/src/fluent-bit/plugins/out_kafka_rest/kafka.h b/src/fluent-bit/plugins/out_kafka_rest/kafka.h
new file mode 100644
index 000000000..c2d220e7d
--- /dev/null
+++ b/src/fluent-bit/plugins/out_kafka_rest/kafka.h
@@ -0,0 +1,66 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_OUT_KAFKA_REST_H
+#define FLB_OUT_KAFKA_REST_H
+
+#define FLB_KAFKA_TIME_KEY "@timestamp"
+#define FLB_KAFKA_TIME_KEYF "%Y-%m-%dT%H:%M:%S"
+#define FLB_KAFKA_TAG_KEY "_flb-key"
+
+struct flb_kafka_rest {
+ /* Kafka specifics */
+ long partition;
+ char *topic;
+ int message_key_len;
+ char *message_key;
+
+ /* HTTP Auth */
+ char *http_user;
+ char *http_passwd;
+
+ /* time key */
+ int time_key_len;
+ char *time_key;
+
+ /* time key format */
+ int time_key_format_len;
+ char *time_key_format;
+
+ /* include_tag_key */
+ int include_tag_key;
+ int tag_key_len;
+ char *tag_key;
+
+ /* HTTP URI */
+ char uri[256];
+ char *url_path;
+
+ /* Upstream connection to the backend server */
+ struct flb_upstream *u;
+
+ /* Plugin instance */
+ struct flb_output_instance *ins;
+
+ /* Avro http header*/
+ int avro_http_header;
+};
+
+
+#endif
diff --git a/src/fluent-bit/plugins/out_kafka_rest/kafka_conf.c b/src/fluent-bit/plugins/out_kafka_rest/kafka_conf.c
new file mode 100644
index 000000000..3df50eb8b
--- /dev/null
+++ b/src/fluent-bit/plugins/out_kafka_rest/kafka_conf.c
@@ -0,0 +1,223 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_mem.h>
+#include <fluent-bit/flb_utils.h>
+
+#include "kafka.h"
+#include "kafka_conf.h"
+
+struct flb_kafka_rest *flb_kr_conf_create(struct flb_output_instance *ins,
+ struct flb_config *config)
+{
+ long part;
+ int io_flags = 0;
+ const char *tmp;
+ char *endptr;
+ struct flb_upstream *upstream;
+ struct flb_kafka_rest *ctx;
+ int ret;
+
+ /* Allocate context */
+ ctx = flb_calloc(1, sizeof(struct flb_kafka_rest));
+ if (!ctx) {
+ flb_errno();
+ return NULL;
+ }
+ ctx->ins = ins;
+
+ ret = flb_output_config_map_set(ins, (void *) ctx);
+ if (ret == -1) {
+ flb_free(ctx);
+ return NULL;
+ }
+
+ /* Get network configuration */
+ flb_output_net_default("127.0.0.1", 8082, ins);
+
+ /* use TLS ? */
+ if (ins->use_tls == FLB_TRUE) {
+ io_flags = FLB_IO_TLS;
+ }
+ else {
+ io_flags = FLB_IO_TCP;
+ }
+
+ if (ins->host.ipv6 == FLB_TRUE) {
+ io_flags |= FLB_IO_IPV6;
+ }
+
+ /* Prepare an upstream handler */
+ upstream = flb_upstream_create(config,
+ ins->host.name,
+ ins->host.port,
+ io_flags,
+ ins->tls);
+ if (!upstream) {
+ flb_plg_error(ctx->ins, "cannot create Upstream context");
+ flb_kr_conf_destroy(ctx);
+ return NULL;
+ }
+ ctx->u = upstream;
+ flb_output_upstream_set(ctx->u, ins);
+
+ flb_output_upstream_set(ctx->u, ins);
+
+ /* HTTP Auth */
+ tmp = flb_output_get_property("http_user", ins);
+ if (tmp) {
+ ctx->http_user = flb_strdup(tmp);
+
+ tmp = flb_output_get_property("http_passwd", ins);
+ if (tmp) {
+ ctx->http_passwd = flb_strdup(tmp);
+ }
+ else {
+ ctx->http_passwd = flb_strdup("");
+ }
+ }
+
+ /* Time Key */
+ tmp = flb_output_get_property("time_key", ins);
+ if (tmp) {
+ ctx->time_key = flb_strdup(tmp);
+ ctx->time_key_len = strlen(tmp);
+ }
+ else {
+ ctx->time_key = flb_strdup(FLB_KAFKA_TIME_KEY);
+ ctx->time_key_len = sizeof(FLB_KAFKA_TIME_KEY) - 1;
+ }
+
+ /* Time Key Format */
+ tmp = flb_output_get_property("time_key_format", ins);
+ if (tmp) {
+ ctx->time_key_format = flb_strdup(tmp);
+ ctx->time_key_format_len = strlen(tmp);
+ }
+ else {
+ ctx->time_key_format = flb_strdup(FLB_KAFKA_TIME_KEYF);
+ ctx->time_key_format_len = sizeof(FLB_KAFKA_TIME_KEYF) - 1;
+ }
+
+ /* Include Tag key */
+ tmp = flb_output_get_property("include_tag_key", ins);
+ if (tmp) {
+ ctx->include_tag_key = flb_utils_bool(tmp);
+ }
+ else {
+ ctx->include_tag_key = FLB_FALSE;
+ }
+
+ /* Tag Key */
+ if (ctx->include_tag_key == FLB_TRUE) {
+ tmp = flb_output_get_property("tag_key", ins);
+ if (tmp) {
+ ctx->tag_key = flb_strdup(tmp);
+ ctx->tag_key_len = strlen(tmp);
+ if (tmp[0] != '_') {
+ flb_plg_warn(ctx->ins, "consider use a tag_key "
+ "that starts with '_'");
+ }
+ }
+ else {
+ ctx->tag_key = flb_strdup(FLB_KAFKA_TAG_KEY);
+ ctx->tag_key_len = sizeof(FLB_KAFKA_TAG_KEY) - 1;
+ }
+ }
+
+ /* Kafka: partition */
+ tmp = flb_output_get_property("partition", ins);
+ if (tmp) {
+ errno = 0;
+ part = strtol(tmp, &endptr, 10);
+ if ((errno == ERANGE && (part == LONG_MAX || part == LONG_MIN))
+ || (errno != 0 && part == 0)) {
+ flb_plg_error(ctx->ins, "invalid partition number");
+ }
+
+ if (endptr == tmp) {
+ flb_plg_error(ctx->ins, "invalid partition number");
+ }
+ ctx->partition = part;
+ }
+ else {
+ ctx->partition = -1;
+ }
+
+ /* Kafka: topic */
+ tmp = flb_output_get_property("topic", ins);
+ if (tmp) {
+ ctx->topic = flb_strdup(tmp);
+ }
+ else {
+ ctx->topic = flb_strdup("fluent-bit");
+ }
+
+ /* Set partition based on topic */
+ tmp = flb_output_get_property("url_path", ins);
+ if (tmp) {
+ ctx->url_path = flb_strdup(tmp);
+ snprintf(ctx->uri, sizeof(ctx->uri) - 1, "%s/topics/%s", ctx->url_path, ctx->topic);
+ } else {
+ ctx->url_path = NULL;
+ snprintf(ctx->uri, sizeof(ctx->uri) - 1, "/topics/%s", ctx->topic);
+ }
+
+ /* Kafka: message key */
+ tmp = flb_output_get_property("message_key", ins);
+ if (tmp) {
+ ctx->message_key = flb_strdup(tmp);
+ ctx->message_key_len = strlen(tmp);
+ }
+ else {
+ ctx->message_key = NULL;
+ ctx->message_key_len = 0;
+ }
+
+ return ctx;
+}
+
+int flb_kr_conf_destroy(struct flb_kafka_rest *ctx)
+{
+ flb_free(ctx->topic);
+ flb_free(ctx->http_user);
+ flb_free(ctx->http_passwd);
+
+ flb_free(ctx->time_key);
+ flb_free(ctx->time_key_format);
+
+ if (ctx->url_path) {
+ flb_free(ctx->url_path);
+ }
+
+ if (ctx->include_tag_key) {
+ flb_free(ctx->tag_key);
+ }
+
+ if (ctx->message_key) {
+ flb_free(ctx->message_key);
+ }
+
+ flb_upstream_destroy(ctx->u);
+ flb_free(ctx);
+
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/out_kafka_rest/kafka_conf.h b/src/fluent-bit/plugins/out_kafka_rest/kafka_conf.h
new file mode 100644
index 000000000..1d80445b3
--- /dev/null
+++ b/src/fluent-bit/plugins/out_kafka_rest/kafka_conf.h
@@ -0,0 +1,33 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_OUT_KAFKA_REST_CONF_H
+#define FLB_OUT_KAFKA_REST_CONF_H
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_output.h>
+
+#include "kafka.h"
+
+struct flb_kafka_rest *flb_kr_conf_create(struct flb_output_instance *ins,
+ struct flb_config *config);
+int flb_kr_conf_destroy(struct flb_kafka_rest *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/out_kinesis_firehose/CMakeLists.txt b/src/fluent-bit/plugins/out_kinesis_firehose/CMakeLists.txt
new file mode 100644
index 000000000..9cbf05d36
--- /dev/null
+++ b/src/fluent-bit/plugins/out_kinesis_firehose/CMakeLists.txt
@@ -0,0 +1,5 @@
+set(src
+ firehose.c
+ firehose_api.c)
+
+FLB_PLUGIN(out_kinesis_firehose "${src}" "")
diff --git a/src/fluent-bit/plugins/out_kinesis_firehose/firehose.c b/src/fluent-bit/plugins/out_kinesis_firehose/firehose.c
new file mode 100644
index 000000000..a66d3f37c
--- /dev/null
+++ b/src/fluent-bit/plugins/out_kinesis_firehose/firehose.c
@@ -0,0 +1,503 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_compat.h>
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_output.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_slist.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_config_map.h>
+#include <fluent-bit/flb_output_plugin.h>
+
+#include <fluent-bit/flb_sds.h>
+#include <fluent-bit/flb_aws_credentials.h>
+#include <fluent-bit/flb_aws_util.h>
+#include <fluent-bit/flb_mem.h>
+#include <fluent-bit/flb_http_client.h>
+#include <fluent-bit/flb_utils.h>
+
+#include <fluent-bit/aws/flb_aws_compress.h>
+
+#include <monkey/mk_core.h>
+#include <msgpack.h>
+#include <string.h>
+#include <stdio.h>
+
+#include "firehose.h"
+#include "firehose_api.h"
+
+static struct flb_aws_header content_type_header = {
+ .key = "Content-Type",
+ .key_len = 12,
+ .val = "application/x-amz-json-1.1",
+ .val_len = 26,
+};
+
+static int cb_firehose_init(struct flb_output_instance *ins,
+ struct flb_config *config, void *data)
+{
+ const char *tmp;
+ char *session_name = NULL;
+ struct flb_firehose *ctx = NULL;
+ int ret;
+ (void) config;
+ (void) data;
+
+ ctx = flb_calloc(1, sizeof(struct flb_firehose));
+ if (!ctx) {
+ flb_errno();
+ return -1;
+ }
+
+ ctx->ins = ins;
+
+ /* Populate context with config map defaults and incoming properties */
+ ret = flb_output_config_map_set(ins, (void *) ctx);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "configuration error");
+ goto error;
+ }
+
+ tmp = flb_output_get_property("delivery_stream", ins);
+ if (tmp) {
+ ctx->delivery_stream = tmp;
+ } else {
+ flb_plg_error(ctx->ins, "'delivery_stream' is a required field");
+ goto error;
+ }
+
+ tmp = flb_output_get_property("time_key", ins);
+ if (tmp) {
+ ctx->time_key = tmp;
+ }
+
+ tmp = flb_output_get_property("time_key_format", ins);
+ if (tmp) {
+ ctx->time_key_format = tmp;
+ } else {
+ ctx->time_key_format = DEFAULT_TIME_KEY_FORMAT;
+ }
+
+ tmp = flb_output_get_property("log_key", ins);
+ if (tmp) {
+ ctx->log_key = tmp;
+ }
+
+ if (ctx->log_key && ctx->time_key) {
+ flb_plg_error(ctx->ins, "'time_key' and 'log_key' can not be used together");
+ goto error;
+ }
+
+ tmp = flb_output_get_property("endpoint", ins);
+ if (tmp) {
+ ctx->custom_endpoint = FLB_TRUE;
+ ctx->endpoint = removeProtocol((char *) tmp, "https://");
+ }
+ else {
+ ctx->custom_endpoint = FLB_FALSE;
+ }
+
+ tmp = flb_output_get_property("sts_endpoint", ins);
+ if (tmp) {
+ ctx->sts_endpoint = (char *) tmp;
+ }
+
+ tmp = flb_output_get_property("compression", ins);
+ if (tmp) {
+ ret = flb_aws_compression_get_type(tmp);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "unknown compression: %s", tmp);
+ goto error;
+ }
+ ctx->compression = ret;
+ }
+
+ tmp = flb_output_get_property("log_key", ins);
+ if (tmp) {
+ ctx->log_key = tmp;
+ }
+
+ tmp = flb_output_get_property("region", ins);
+ if (tmp) {
+ ctx->region = tmp;
+ } else {
+ flb_plg_error(ctx->ins, "'region' is a required field");
+ goto error;
+ }
+
+ tmp = flb_output_get_property("role_arn", ins);
+ if (tmp) {
+ ctx->role_arn = tmp;
+ }
+
+ /* one tls instance for provider, one for cw client */
+ ctx->cred_tls = flb_tls_create(FLB_TLS_CLIENT_MODE,
+ FLB_TRUE,
+ ins->tls_debug,
+ ins->tls_vhost,
+ ins->tls_ca_path,
+ ins->tls_ca_file,
+ ins->tls_crt_file,
+ ins->tls_key_file,
+ ins->tls_key_passwd);
+
+ if (!ctx->cred_tls) {
+ flb_plg_error(ctx->ins, "Failed to create tls context");
+ goto error;
+ }
+
+ ctx->client_tls = flb_tls_create(FLB_TLS_CLIENT_MODE,
+ FLB_TRUE,
+ ins->tls_debug,
+ ins->tls_vhost,
+ ins->tls_ca_path,
+ ins->tls_ca_file,
+ ins->tls_crt_file,
+ ins->tls_key_file,
+ ins->tls_key_passwd);
+ if (!ctx->client_tls) {
+ flb_plg_error(ctx->ins, "Failed to create tls context");
+ goto error;
+ }
+
+ ctx->aws_provider = flb_standard_chain_provider_create(config,
+ ctx->cred_tls,
+ (char *) ctx->region,
+ ctx->sts_endpoint,
+ NULL,
+ flb_aws_client_generator(),
+ ctx->profile);
+ if (!ctx->aws_provider) {
+ flb_plg_error(ctx->ins, "Failed to create AWS Credential Provider");
+ goto error;
+ }
+
+ if(ctx->role_arn) {
+ /* set up sts assume role provider */
+ session_name = flb_sts_session_name();
+ if (!session_name) {
+ flb_plg_error(ctx->ins,
+ "Failed to generate random STS session name");
+ goto error;
+ }
+
+ /* STS provider needs yet another separate TLS instance */
+ ctx->sts_tls = flb_tls_create(FLB_TLS_CLIENT_MODE,
+ FLB_TRUE,
+ ins->tls_debug,
+ ins->tls_vhost,
+ ins->tls_ca_path,
+ ins->tls_ca_file,
+ ins->tls_crt_file,
+ ins->tls_key_file,
+ ins->tls_key_passwd);
+ if (!ctx->sts_tls) {
+ flb_errno();
+ goto error;
+ }
+
+ ctx->base_aws_provider = ctx->aws_provider;
+
+ ctx->aws_provider = flb_sts_provider_create(config,
+ ctx->sts_tls,
+ ctx->base_aws_provider,
+ (char *) ctx->external_id,
+ (char *) ctx->role_arn,
+ session_name,
+ (char *) ctx->region,
+ ctx->sts_endpoint,
+ NULL,
+ flb_aws_client_generator());
+ if (!ctx->aws_provider) {
+ flb_plg_error(ctx->ins,
+ "Failed to create AWS STS Credential Provider");
+ goto error;
+ }
+ /* session name can freed after provider is created */
+ flb_free(session_name);
+ session_name = NULL;
+ }
+
+ /* initialize credentials and set to sync mode */
+ ctx->aws_provider->provider_vtable->sync(ctx->aws_provider);
+ ctx->aws_provider->provider_vtable->init(ctx->aws_provider);
+ ctx->aws_provider->provider_vtable->upstream_set(ctx->aws_provider, ctx->ins);
+
+ if (ctx->endpoint == NULL) {
+ ctx->endpoint = flb_aws_endpoint("firehose", (char *) ctx->region);
+ if (!ctx->endpoint) {
+ goto error;
+ }
+ }
+
+ struct flb_aws_client_generator *generator = flb_aws_client_generator();
+ ctx->firehose_client = generator->create();
+ if (!ctx->firehose_client) {
+ goto error;
+ }
+ ctx->firehose_client->name = "firehose_client";
+ ctx->firehose_client->has_auth = FLB_TRUE;
+ ctx->firehose_client->provider = ctx->aws_provider;
+ ctx->firehose_client->region = (char *) ctx->region;
+ ctx->firehose_client->retry_requests = ctx->retry_requests;
+ ctx->firehose_client->service = "firehose";
+ ctx->firehose_client->port = 443;
+ ctx->firehose_client->flags = 0;
+ ctx->firehose_client->proxy = NULL;
+ ctx->firehose_client->static_headers = &content_type_header;
+ ctx->firehose_client->static_headers_len = 1;
+
+ struct flb_upstream *upstream = flb_upstream_create(config, ctx->endpoint,
+ 443, FLB_IO_TLS,
+ ctx->client_tls);
+ if (!upstream) {
+ flb_plg_error(ctx->ins, "Connection initialization error");
+ goto error;
+ }
+
+ ctx->firehose_client->upstream = upstream;
+ flb_output_upstream_set(upstream, ctx->ins);
+
+ ctx->firehose_client->host = ctx->endpoint;
+
+ /* Export context */
+ flb_output_set_context(ins, ctx);
+
+ return 0;
+
+error:
+ flb_free(session_name);
+ flb_plg_error(ctx->ins, "Initialization failed");
+ flb_firehose_ctx_destroy(ctx);
+ return -1;
+}
+
+struct flush *new_flush_buffer()
+{
+ struct flush *buf;
+
+
+ buf = flb_calloc(1, sizeof(struct flush));
+ if (!buf) {
+ flb_errno();
+ return NULL;
+ }
+
+ buf->tmp_buf = flb_malloc(sizeof(char) * PUT_RECORD_BATCH_PAYLOAD_SIZE);
+ if (!buf->tmp_buf) {
+ flb_errno();
+ flush_destroy(buf);
+ return NULL;
+ }
+ buf->tmp_buf_size = PUT_RECORD_BATCH_PAYLOAD_SIZE;
+
+ buf->events = flb_malloc(sizeof(struct firehose_event) * MAX_EVENTS_PER_PUT);
+ if (!buf->events) {
+ flb_errno();
+ flush_destroy(buf);
+ return NULL;
+ }
+ buf->events_capacity = MAX_EVENTS_PER_PUT;
+
+ return buf;
+}
+
+static void cb_firehose_flush(struct flb_event_chunk *event_chunk,
+ struct flb_output_flush *out_flush,
+ struct flb_input_instance *i_ins,
+ void *out_context,
+ struct flb_config *config)
+{
+ struct flb_firehose *ctx = out_context;
+ int ret;
+ struct flush *buf;
+ (void) i_ins;
+ (void) config;
+
+ buf = new_flush_buffer();
+ if (!buf) {
+ flb_plg_error(ctx->ins, "Failed to construct flush buffer");
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ ret = process_and_send_records(ctx, buf,
+ event_chunk->data, event_chunk->size);
+ if (ret < 0) {
+ flb_plg_error(ctx->ins, "Failed to send records");
+ flush_destroy(buf);
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ flb_plg_debug(ctx->ins, "Processed %d records, sent %d to %s",
+ buf->records_processed, buf->records_sent, ctx->delivery_stream);
+ flush_destroy(buf);
+
+ FLB_OUTPUT_RETURN(FLB_OK);
+}
+
+void flb_firehose_ctx_destroy(struct flb_firehose *ctx)
+{
+ if (ctx != NULL) {
+ if (ctx->base_aws_provider) {
+ flb_aws_provider_destroy(ctx->base_aws_provider);
+ }
+
+ if (ctx->aws_provider) {
+ flb_aws_provider_destroy(ctx->aws_provider);
+ }
+
+ if (ctx->cred_tls) {
+ flb_tls_destroy(ctx->cred_tls);
+ }
+
+ if (ctx->sts_tls) {
+ flb_tls_destroy(ctx->sts_tls);
+ }
+
+ if (ctx->client_tls) {
+ flb_tls_destroy(ctx->client_tls);
+ }
+
+ if (ctx->firehose_client) {
+ flb_aws_client_destroy(ctx->firehose_client);
+ }
+
+ if (ctx->custom_endpoint == FLB_FALSE) {
+ flb_free(ctx->endpoint);
+ }
+
+ flb_free(ctx);
+ }
+}
+
+static int cb_firehose_exit(void *data, struct flb_config *config)
+{
+ struct flb_firehose *ctx = data;
+
+ flb_firehose_ctx_destroy(ctx);
+ return 0;
+}
+
+/* Configuration properties map */
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_STR, "region", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_firehose, region),
+ "The AWS region of your delivery stream"
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "delivery_stream", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_firehose, delivery_stream),
+ "Firehose delivery stream name"
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "time_key", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_firehose, time_key),
+ "Add the timestamp to the record under this key. By default the timestamp "
+ "from Fluent Bit will not be added to records sent to Kinesis."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "time_key_format", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_firehose, time_key_format),
+ "strftime compliant format string for the timestamp; for example, "
+ "the default is '%Y-%m-%dT%H:%M:%S'. This option is used with time_key. "
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "role_arn", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_firehose, role_arn),
+ "ARN of an IAM role to assume (ex. for cross account access)."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "endpoint", NULL,
+ 0, FLB_FALSE, 0,
+ "Specify a custom endpoint for the Firehose API"
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "sts_endpoint", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_firehose, sts_endpoint),
+ "Custom endpoint for the STS API."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "external_id", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_firehose, external_id),
+ "Specify an external ID for the STS API, can be used with the role_arn parameter if your role "
+ "requires an external ID."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "compression", NULL,
+ 0, FLB_FALSE, 0,
+ "Compression type for Firehose records. Each log record is individually compressed "
+ "and sent to Firehose. 'gzip' and 'arrow' are the supported values. "
+ "'arrow' is only an available if Apache Arrow was enabled at compile time. "
+ "Defaults to no compression."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "log_key", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_firehose, log_key),
+ "By default, the whole log record will be sent to Firehose. "
+ "If you specify a key name with this option, then only the value of "
+ "that key will be sent to Firehose. For example, if you are using "
+ "the Fluentd Docker log driver, you can specify `log_key log` and only "
+ "the log message will be sent to Firehose."
+ },
+
+ {
+ FLB_CONFIG_MAP_BOOL, "auto_retry_requests", "true",
+ 0, FLB_TRUE, offsetof(struct flb_firehose, retry_requests),
+ "Immediately retry failed requests to AWS services once. This option "
+ "does not affect the normal Fluent Bit retry mechanism with backoff. "
+ "Instead, it enables an immediate retry with no delay for networking "
+ "errors, which may help improve throughput when there are transient/random "
+ "networking issues."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "profile", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_firehose, profile),
+ "AWS Profile name. AWS Profiles can be configured with AWS CLI and are usually stored in "
+ "$HOME/.aws/ directory."
+ },
+ /* EOF */
+ {0}
+};
+
+/* Plugin registration */
+struct flb_output_plugin out_kinesis_firehose_plugin = {
+ .name = "kinesis_firehose",
+ .description = "Send logs to Amazon Kinesis Firehose",
+ .cb_init = cb_firehose_init,
+ .cb_flush = cb_firehose_flush,
+ .cb_exit = cb_firehose_exit,
+ .workers = 1,
+ .flags = 0,
+
+ /* Configuration */
+ .config_map = config_map,
+};
diff --git a/src/fluent-bit/plugins/out_kinesis_firehose/firehose.h b/src/fluent-bit/plugins/out_kinesis_firehose/firehose.h
new file mode 100644
index 000000000..98e2659d8
--- /dev/null
+++ b/src/fluent-bit/plugins/out_kinesis_firehose/firehose.h
@@ -0,0 +1,104 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_OUT_FIREHOSE_H
+#define FLB_OUT_FIREHOSE_H
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_sds.h>
+#include <fluent-bit/flb_aws_credentials.h>
+#include <fluent-bit/flb_http_client.h>
+#include <fluent-bit/flb_aws_util.h>
+#include <fluent-bit/flb_signv4.h>
+
+#define DEFAULT_TIME_KEY_FORMAT "%Y-%m-%dT%H:%M:%S"
+
+/* buffers used for each flush */
+struct flush {
+ /* temporary buffer for storing the serialized event messages */
+ char *tmp_buf;
+ size_t tmp_buf_size;
+ /* current index of tmp_buf */
+ size_t tmp_buf_offset;
+
+ /* projected final size of the payload for this flush */
+ size_t data_size;
+
+ /* log records- each of these has a pointer to their message in tmp_buf */
+ struct firehose_event *events;
+ int events_capacity;
+ /* current event */
+ int event_index;
+
+ /* the payload of the API request */
+ char *out_buf;
+ size_t out_buf_size;
+
+ /* buffer used to temporarily hold an event during processing */
+ char *event_buf;
+ size_t event_buf_size;
+
+ int records_sent;
+ int records_processed;
+};
+
+struct firehose_event {
+ char *json;
+ size_t len;
+ struct timespec timestamp;
+};
+
+struct flb_firehose {
+ /*
+ * TLS instances can not be re-used. So we have one for:
+ * - Base cred provider (needed for EKS provider)
+ * - STS Assume role provider
+ * - The CloudWatch Logs client for this plugin
+ */
+ struct flb_tls *cred_tls;
+ struct flb_tls *sts_tls;
+ struct flb_tls *client_tls;
+ struct flb_aws_provider *aws_provider;
+ struct flb_aws_provider *base_aws_provider;
+ struct flb_aws_client *firehose_client;
+
+ /* configuration options */
+ const char *delivery_stream;
+ const char *time_key;
+ const char *time_key_format;
+ const char *region;
+ const char *role_arn;
+ const char *log_key;
+ const char *external_id;
+ char *sts_endpoint;
+ char *profile;
+ int custom_endpoint;
+ int retry_requests;
+ int compression;
+
+ /* must be freed on shutdown if custom_endpoint is not set */
+ char *endpoint;
+
+ /* Plugin output instance reference */
+ struct flb_output_instance *ins;
+};
+
+void flb_firehose_ctx_destroy(struct flb_firehose *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/out_kinesis_firehose/firehose_api.c b/src/fluent-bit/plugins/out_kinesis_firehose/firehose_api.c
new file mode 100644
index 000000000..5c2f0c2f9
--- /dev/null
+++ b/src/fluent-bit/plugins/out_kinesis_firehose/firehose_api.c
@@ -0,0 +1,959 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_compat.h>
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_output.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_slist.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_macros.h>
+#include <fluent-bit/flb_config_map.h>
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_log_event_decoder.h>
+
+#include <fluent-bit/flb_sds.h>
+#include <fluent-bit/flb_aws_credentials.h>
+#include <fluent-bit/flb_aws_util.h>
+#include <fluent-bit/flb_mem.h>
+#include <fluent-bit/flb_http_client.h>
+#include <fluent-bit/flb_utils.h>
+
+#include <fluent-bit/flb_base64.h>
+#include <fluent-bit/aws/flb_aws_compress.h>
+
+#include <monkey/mk_core.h>
+#include <msgpack.h>
+#include <string.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#ifndef FLB_SYSTEM_WINDOWS
+#include <unistd.h>
+#endif
+
+#include "firehose_api.h"
+
+#define ERR_CODE_SERVICE_UNAVAILABLE "ServiceUnavailableException"
+
+static struct flb_aws_header put_record_batch_header = {
+ .key = "X-Amz-Target",
+ .key_len = 12,
+ .val = "Firehose_20150804.PutRecordBatch",
+ .val_len = 32,
+};
+
+static inline int try_to_write(char *buf, int *off, size_t left,
+ const char *str, size_t str_len)
+{
+ if (str_len <= 0){
+ str_len = strlen(str);
+ }
+ if (left <= *off+str_len) {
+ return FLB_FALSE;
+ }
+ memcpy(buf+*off, str, str_len);
+ *off += str_len;
+ return FLB_TRUE;
+}
+
+/*
+ * Writes the "header" for a put_record_batch payload
+ */
+static int init_put_payload(struct flb_firehose *ctx, struct flush *buf,
+ int *offset)
+{
+ if (!try_to_write(buf->out_buf, offset, buf->out_buf_size,
+ "{\"DeliveryStreamName\":\"", 23)) {
+ goto error;
+ }
+
+ if (!try_to_write(buf->out_buf, offset, buf->out_buf_size,
+ ctx->delivery_stream, 0)) {
+ goto error;
+ }
+
+ if (!try_to_write(buf->out_buf, offset, buf->out_buf_size,
+ "\",\"Records\":[", 13)) {
+ goto error;
+ }
+ return 0;
+
+error:
+ return -1;
+}
+
+/*
+ * Writes a log event to the output buffer
+ */
+static int write_event(struct flb_firehose *ctx, struct flush *buf,
+ struct firehose_event *event, int *offset)
+{
+ if (!try_to_write(buf->out_buf, offset, buf->out_buf_size,
+ "{\"Data\":\"", 9)) {
+ goto error;
+ }
+
+ if (!try_to_write(buf->out_buf, offset, buf->out_buf_size,
+ event->json, event->len)) {
+ goto error;
+ }
+
+ if (!try_to_write(buf->out_buf, offset, buf->out_buf_size,
+ "\"}", 2)) {
+ goto error;
+ }
+
+ return 0;
+
+error:
+ return -1;
+}
+
+/* Terminates a PutRecordBatch payload */
+static int end_put_payload(struct flb_firehose *ctx, struct flush *buf,
+ int *offset)
+{
+ if (!try_to_write(buf->out_buf, offset, buf->out_buf_size,
+ "]}", 2)) {
+ return -1;
+ }
+ buf->out_buf[*offset] = '\0';
+
+ return 0;
+}
+
+
+/*
+ * Processes the msgpack object
+ * -1 = failure, record not added
+ * 0 = success, record added
+ * 1 = we ran out of space, send and retry
+ * 2 = record could not be processed, discard it
+ * Returns 0 on success, -1 on general errors,
+ * and 1 if we ran out of space to write the event
+ * which means a send must occur
+ */
+static int process_event(struct flb_firehose *ctx, struct flush *buf,
+ const msgpack_object *obj, struct flb_time *tms)
+{
+ size_t written = 0;
+ int ret;
+ size_t size;
+ size_t b64_len;
+ struct firehose_event *event;
+ char *tmp_buf_ptr;
+ char *time_key_ptr;
+ struct tm time_stamp;
+ struct tm *tmp;
+ size_t len;
+ size_t tmp_size;
+ void *compressed_tmp_buf;
+ char *out_buf;
+
+ tmp_buf_ptr = buf->tmp_buf + buf->tmp_buf_offset;
+ ret = flb_msgpack_to_json(tmp_buf_ptr,
+ buf->tmp_buf_size - buf->tmp_buf_offset,
+ obj);
+ if (ret <= 0) {
+ /*
+ * negative value means failure to write to buffer,
+ * which means we ran out of space, and must send the logs
+ *
+ * TODO: This could also incorrectly be triggered if the record
+ * is larger than MAX_EVENT_SIZE
+ */
+ return 1;
+ }
+ written = (size_t) ret;
+
+ /* Discard empty messages (written == 2 means '""') */
+ if (written <= 2) {
+ flb_plg_debug(ctx->ins, "Found empty log message, %s", ctx->delivery_stream);
+ return 2;
+ }
+
+ if (ctx->log_key) {
+ /*
+ * flb_msgpack_to_json will encase the value in quotes
+ * We don't want that for log_key, so we ignore the first
+ * and last character
+ */
+ written -= 2;
+ tmp_buf_ptr++; /* pass over the opening quote */
+ buf->tmp_buf_offset++;
+ }
+
+ /* is (written + 1) because we still have to append newline */
+ if ((written + 1) >= MAX_EVENT_SIZE) {
+ flb_plg_warn(ctx->ins, "[size=%zu] Discarding record which is larger than "
+ "max size allowed by Firehose, %s", written + 1,
+ ctx->delivery_stream);
+ return 2;
+ }
+
+ if (ctx->time_key) {
+ /* append time_key to end of json string */
+ tmp = gmtime_r(&tms->tm.tv_sec, &time_stamp);
+ if (!tmp) {
+ flb_plg_error(ctx->ins, "Could not create time stamp for %lu unix "
+ "seconds, discarding record, %s", tms->tm.tv_sec,
+ ctx->delivery_stream);
+ return 2;
+ }
+
+ /* format time output and return the length */
+ len = flb_aws_strftime_precision(&out_buf, ctx->time_key_format, tms);
+
+ /* how much space do we have left */
+ tmp_size = (buf->tmp_buf_size - buf->tmp_buf_offset) - written;
+ if (len > tmp_size) {
+ /* not enough space - tell caller to retry */
+ flb_free(out_buf);
+ return 1;
+ }
+
+ if (len == 0) {
+ /*
+ * when the length of out_buf is not enough for time_key_format,
+ * time_key will not be added to record.
+ */
+ flb_plg_error(ctx->ins, "Failed to add time_key %s to record, %s",
+ ctx->time_key, ctx->delivery_stream);
+ flb_free(out_buf);
+ }
+ else {
+ time_key_ptr = tmp_buf_ptr + written - 1;
+ memcpy(time_key_ptr, ",", 1);
+ time_key_ptr++;
+ memcpy(time_key_ptr, "\"", 1);
+ time_key_ptr++;
+ memcpy(time_key_ptr, ctx->time_key, strlen(ctx->time_key));
+ time_key_ptr += strlen(ctx->time_key);
+ memcpy(time_key_ptr, "\":\"", 3);
+ time_key_ptr += 3;
+ tmp_size = buf->tmp_buf_size - buf->tmp_buf_offset;
+ tmp_size -= (time_key_ptr - tmp_buf_ptr);
+
+ /* merge out_buf to time_key_ptr */
+ memcpy(time_key_ptr, out_buf, len);
+ flb_free(out_buf);
+ time_key_ptr += len;
+ memcpy(time_key_ptr, "\"}", 2);
+ time_key_ptr += 2;
+ written = (time_key_ptr - tmp_buf_ptr);
+ }
+ }
+
+ /* is (written + 1) because we still have to append newline */
+ if ((written + 1) >= MAX_EVENT_SIZE) {
+ flb_plg_warn(ctx->ins, "[size=%zu] Discarding record which is larger than "
+ "max size allowed by Firehose, %s", written + 1,
+ ctx->delivery_stream);
+ return 2;
+ }
+
+ /* append newline to record */
+
+ tmp_size = (buf->tmp_buf_size - buf->tmp_buf_offset) - written;
+ if (tmp_size <= 1) {
+ /* no space left- tell caller to retry */
+ return 1;
+ }
+
+ memcpy(tmp_buf_ptr + written, "\n", 1);
+ written++;
+
+ if (ctx->compression == FLB_AWS_COMPRESS_NONE) {
+ /*
+ * check if event_buf is initialized and big enough
+ * Base64 encoding will increase size by ~4/3
+ */
+ size = (written * 1.5) + 4;
+ if (buf->event_buf == NULL || buf->event_buf_size < size) {
+ flb_free(buf->event_buf);
+ buf->event_buf = flb_malloc(size);
+ buf->event_buf_size = size;
+ if (buf->event_buf == NULL) {
+ flb_errno();
+ return -1;
+ }
+ }
+
+ tmp_buf_ptr = buf->tmp_buf + buf->tmp_buf_offset;
+
+ ret = flb_base64_encode((unsigned char *) buf->event_buf, size, &b64_len,
+ (unsigned char *) tmp_buf_ptr, written);
+ if (ret != 0) {
+ flb_errno();
+ return -1;
+ }
+ written = b64_len;
+ }
+ else {
+ /*
+ * compress event, truncating input if needed
+ * replace event buffer with compressed buffer
+ */
+ ret = flb_aws_compression_b64_truncate_compress(ctx->compression,
+ MAX_B64_EVENT_SIZE,
+ tmp_buf_ptr,
+ written, &compressed_tmp_buf,
+ &size); /* evaluate size */
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "Unable to compress record, discarding, "
+ "%s", ctx->delivery_stream);
+ return 2;
+ }
+ flb_free(buf->event_buf);
+ buf->event_buf = compressed_tmp_buf;
+ compressed_tmp_buf = NULL;
+ written = size;
+ }
+
+ tmp_buf_ptr = buf->tmp_buf + buf->tmp_buf_offset;
+ if ((buf->tmp_buf_size - buf->tmp_buf_offset) < written) {
+ /* not enough space, send logs */
+ return 1;
+ }
+
+ /* copy serialized json to tmp_buf */
+ memcpy(tmp_buf_ptr, buf->event_buf, written);
+
+ buf->tmp_buf_offset += written;
+ event = &buf->events[buf->event_index];
+ event->json = tmp_buf_ptr;
+ event->len = written;
+ event->timestamp.tv_sec = tms->tm.tv_sec;
+ event->timestamp.tv_nsec = tms->tm.tv_nsec;
+
+ return 0;
+}
+
+/* Resets or inits a flush struct */
+static void reset_flush_buf(struct flb_firehose *ctx, struct flush *buf) {
+ buf->event_index = 0;
+ buf->tmp_buf_offset = 0;
+ buf->data_size = PUT_RECORD_BATCH_HEADER_LEN + PUT_RECORD_BATCH_FOOTER_LEN;
+ buf->data_size += strlen(ctx->delivery_stream);
+}
+
+/* constructs a put payload, and then sends */
+static int send_log_events(struct flb_firehose *ctx, struct flush *buf) {
+ int ret;
+ int offset;
+ int i;
+ struct firehose_event *event;
+
+ if (buf->event_index <= 0) {
+ /*
+ * event_index should always be 1 more than the actual last event index
+ * when this function is called.
+ * Except in the case where send_log_events() is called at the end of
+ * process_and_send. If all records were already sent, event_index
+ * will be 0. Hence this check.
+ */
+ return 0;
+ }
+
+ /* alloc out_buf if needed */
+ if (buf->out_buf == NULL || buf->out_buf_size < buf->data_size) {
+ if (buf->out_buf != NULL) {
+ flb_free(buf->out_buf);
+ }
+ buf->out_buf = flb_malloc(buf->data_size + 1);
+ if (!buf->out_buf) {
+ flb_errno();
+ return -1;
+ }
+ buf->out_buf_size = buf->data_size;
+ }
+
+ offset = 0;
+ ret = init_put_payload(ctx, buf, &offset);
+ if (ret < 0) {
+ flb_plg_error(ctx->ins, "Failed to initialize PutRecordBatch payload, %s",
+ ctx->delivery_stream);
+ return -1;
+ }
+
+ for (i = 0; i < buf->event_index; i++) {
+ event = &buf->events[i];
+ ret = write_event(ctx, buf, event, &offset);
+ if (ret < 0) {
+ flb_plg_error(ctx->ins, "Failed to write log record %d to "
+ "payload buffer, %s", i, ctx->delivery_stream);
+ return -1;
+ }
+ if (i != (buf->event_index -1)) {
+ if (!try_to_write(buf->out_buf, &offset, buf->out_buf_size,
+ ",", 1)) {
+ flb_plg_error(ctx->ins, "Could not terminate record with ','");
+ return -1;
+ }
+ }
+ }
+
+ ret = end_put_payload(ctx, buf, &offset);
+ if (ret < 0) {
+ flb_plg_error(ctx->ins, "Could not complete PutRecordBatch payload");
+ return -1;
+ }
+ flb_plg_debug(ctx->ins, "firehose:PutRecordBatch: events=%d, payload=%d bytes", i, offset);
+ ret = put_record_batch(ctx, buf, (size_t) offset, i);
+ if (ret < 0) {
+ flb_plg_error(ctx->ins, "Failed to send log records");
+ return -1;
+ }
+ buf->records_sent += i;
+
+ return 0;
+}
+
+/*
+ * Processes the msgpack object, sends the current batch if needed
+ */
+static int add_event(struct flb_firehose *ctx, struct flush *buf,
+ const msgpack_object *obj, struct flb_time *tms)
+{
+ int ret;
+ struct firehose_event *event;
+ int retry_add = FLB_FALSE;
+ size_t event_bytes = 0;
+
+ if (buf->event_index == 0) {
+ /* init */
+ reset_flush_buf(ctx, buf);
+ }
+
+retry_add_event:
+ retry_add = FLB_FALSE;
+ ret = process_event(ctx, buf, obj, tms);
+ if (ret < 0) {
+ return -1;
+ }
+ else if (ret == 1) {
+ if (buf->event_index <= 0) {
+ /* somehow the record was larger than our entire request buffer */
+ flb_plg_warn(ctx->ins, "Discarding massive log record, %s",
+ ctx->delivery_stream);
+ return 0; /* discard this record and return to caller */
+ }
+ /* send logs and then retry the add */
+ retry_add = FLB_TRUE;
+ goto send;
+ } else if (ret == 2) {
+ /* discard this record and return to caller */
+ flb_plg_warn(ctx->ins, "Discarding large or unprocessable record, %s",
+ ctx->delivery_stream);
+ return 0;
+ }
+
+ event = &buf->events[buf->event_index];
+ event_bytes = event->len + PUT_RECORD_BATCH_PER_RECORD_LEN;
+
+ if ((buf->data_size + event_bytes) > PUT_RECORD_BATCH_PAYLOAD_SIZE) {
+ if (buf->event_index <= 0) {
+ /* somehow the record was larger than our entire request buffer */
+ flb_plg_warn(ctx->ins, "[size=%zu] Discarding massive log record, %s",
+ event_bytes, ctx->delivery_stream);
+ return 0; /* discard this record and return to caller */
+ }
+ /* do not send this event */
+ retry_add = FLB_TRUE;
+ goto send;
+ }
+
+ /* send is not needed yet, return to caller */
+ buf->data_size += event_bytes;
+ buf->event_index++;
+
+ if (buf->event_index == MAX_EVENTS_PER_PUT) {
+ goto send;
+ }
+
+ return 0;
+
+send:
+ ret = send_log_events(ctx, buf);
+ reset_flush_buf(ctx, buf);
+ if (ret < 0) {
+ return -1;
+ }
+
+ if (retry_add == FLB_TRUE) {
+ goto retry_add_event;
+ }
+
+ return 0;
+}
+
+/*
+ * Main routine- processes msgpack and sends in batches
+ * return value is the number of events processed (number sent is stored in buf)
+ */
+int process_and_send_records(struct flb_firehose *ctx, struct flush *buf,
+ const char *data, size_t bytes)
+{
+ // size_t off = 0;
+ int i = 0;
+ size_t map_size;
+ // msgpack_unpacked result;
+ // msgpack_object *obj;
+ msgpack_object map;
+ // msgpack_object root;
+ msgpack_object_kv *kv;
+ msgpack_object key;
+ msgpack_object val;
+ char *key_str = NULL;
+ size_t key_str_size = 0;
+ int j;
+ int ret;
+ int check = FLB_FALSE;
+ int found = FLB_FALSE;
+ // struct flb_time tms;
+ struct flb_log_event_decoder log_decoder;
+ struct flb_log_event log_event;
+
+ ret = flb_log_event_decoder_init(&log_decoder, (char *) data, bytes);
+
+ if (ret != FLB_EVENT_DECODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event decoder initialization error : %d", ret);
+
+ return -1;
+ }
+
+
+ while ((ret = flb_log_event_decoder_next(
+ &log_decoder,
+ &log_event)) == FLB_EVENT_DECODER_SUCCESS) {
+ map = *log_event.body;
+ map_size = map.via.map.size;
+
+ if (ctx->log_key) {
+ key_str = NULL;
+ key_str_size = 0;
+ check = FLB_FALSE;
+ found = FLB_FALSE;
+
+ kv = map.via.map.ptr;
+
+ for(j=0; j < map_size; j++) {
+ key = (kv+j)->key;
+ if (key.type == MSGPACK_OBJECT_BIN) {
+ key_str = (char *) key.via.bin.ptr;
+ key_str_size = key.via.bin.size;
+ check = FLB_TRUE;
+ }
+ if (key.type == MSGPACK_OBJECT_STR) {
+ key_str = (char *) key.via.str.ptr;
+ key_str_size = key.via.str.size;
+ check = FLB_TRUE;
+ }
+
+ if (check == FLB_TRUE) {
+ if (strncmp(ctx->log_key, key_str, key_str_size) == 0) {
+ found = FLB_TRUE;
+ val = (kv+j)->val;
+ ret = add_event(ctx, buf, &val, &log_event.timestamp);
+ if (ret < 0 ) {
+ goto error;
+ }
+ }
+ }
+
+ }
+ if (found == FLB_FALSE) {
+ flb_plg_error(ctx->ins, "Could not find log_key '%s' in record, %s",
+ ctx->log_key, ctx->delivery_stream);
+ }
+ else {
+ i++;
+ }
+ continue;
+ }
+
+ ret = add_event(ctx, buf, &map, &log_event.timestamp);
+ if (ret < 0 ) {
+ goto error;
+ }
+ i++;
+ }
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ /* send any remaining events */
+ ret = send_log_events(ctx, buf);
+ reset_flush_buf(ctx, buf);
+
+ if (ret < 0) {
+ return -1;
+ }
+
+ /* return number of events processed */
+ buf->records_processed = i;
+
+ return i;
+
+error:
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ return -1;
+}
+
+/*
+ * Returns number of failed records on success, -1 on failure
+ */
+static int process_api_response(struct flb_firehose *ctx,
+ struct flb_http_client *c)
+{
+ int i;
+ int k;
+ int w;
+ int ret;
+ int failed_records = -1;
+ int root_type;
+ char *out_buf;
+ int throughput_exceeded = FLB_FALSE;
+ size_t off = 0;
+ size_t out_size;
+ msgpack_unpacked result;
+ msgpack_object root;
+ msgpack_object key;
+ msgpack_object val;
+ msgpack_object response;
+ msgpack_object response_key;
+ msgpack_object response_val;
+
+ if (strstr(c->resp.payload, "\"FailedPutCount\":0")) {
+ return 0;
+ }
+
+ /* Convert JSON payload to msgpack */
+ ret = flb_pack_json(c->resp.payload, c->resp.payload_size,
+ &out_buf, &out_size, &root_type, NULL);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "could not pack/validate JSON API response\n%s",
+ c->resp.payload);
+ return -1;
+ }
+
+ /* Lookup error field */
+ msgpack_unpacked_init(&result);
+ ret = msgpack_unpack_next(&result, out_buf, out_size, &off);
+ if (ret != MSGPACK_UNPACK_SUCCESS) {
+ flb_plg_error(ctx->ins, "Cannot unpack response to find error\n%s",
+ c->resp.payload);
+ failed_records = -1;
+ goto done;
+ }
+
+ root = result.data;
+ if (root.type != MSGPACK_OBJECT_MAP) {
+ flb_plg_error(ctx->ins, "unexpected payload type=%i",
+ root.type);
+ failed_records = -1;
+ goto done;
+ }
+
+ for (i = 0; i < root.via.map.size; i++) {
+ key = root.via.map.ptr[i].key;
+ if (key.type != MSGPACK_OBJECT_STR) {
+ flb_plg_error(ctx->ins, "unexpected key type=%i",
+ key.type);
+ failed_records = -1;
+ goto done;
+ }
+
+ if (key.via.str.size >= 14 &&
+ strncmp(key.via.str.ptr, "FailedPutCount", 14) == 0) {
+ val = root.via.map.ptr[i].val;
+ if (val.type != MSGPACK_OBJECT_POSITIVE_INTEGER) {
+ flb_plg_error(ctx->ins, "unexpected 'FailedPutCount' value type=%i",
+ val.type);
+ failed_records = -1;
+ goto done;
+ }
+
+ failed_records = val.via.u64;
+ if (failed_records == 0) {
+ /* no need to check RequestResponses field */
+ goto done;
+ }
+ }
+
+ if (key.via.str.size >= 14 &&
+ strncmp(key.via.str.ptr, "RequestResponses", 16) == 0) {
+ val = root.via.map.ptr[i].val;
+ if (val.type != MSGPACK_OBJECT_ARRAY) {
+ flb_plg_error(ctx->ins, "unexpected 'RequestResponses' value type=%i",
+ val.type);
+ failed_records = -1;
+ goto done;
+ }
+
+ if (val.via.array.size == 0) {
+ flb_plg_error(ctx->ins, "'RequestResponses' field in response is empty");
+ failed_records = -1;
+ goto done;
+ }
+
+ for (k = 0; k < val.via.array.size; k++) {
+ /* iterate through the responses */
+ response = val.via.array.ptr[k];
+ if (response.type != MSGPACK_OBJECT_MAP) {
+ flb_plg_error(ctx->ins, "unexpected 'RequestResponses[%d]' value type=%i",
+ k, response.type);
+ failed_records = -1;
+ goto done;
+ }
+ for (w = 0; w < response.via.map.size; w++) {
+ /* iterate through the response's keys */
+ response_key = response.via.map.ptr[w].key;
+ if (response_key.type != MSGPACK_OBJECT_STR) {
+ flb_plg_error(ctx->ins, "unexpected key type=%i",
+ response_key.type);
+ failed_records = -1;
+ goto done;
+ }
+ if (response_key.via.str.size >= 9 &&
+ strncmp(response_key.via.str.ptr, "ErrorCode", 9) == 0) {
+ response_val = response.via.map.ptr[w].val;
+ if (!throughput_exceeded &&
+ response_val.via.str.size >= 27 &&
+ (strncmp(response_val.via.str.ptr,
+ ERR_CODE_SERVICE_UNAVAILABLE, 27) == 0)) {
+ throughput_exceeded = FLB_TRUE;
+ flb_plg_error(ctx->ins, "Throughput limits may have been exceeded, %s",
+ ctx->delivery_stream);
+ }
+ flb_plg_debug(ctx->ins, "Record %i failed with err_code=%.*s",
+ k, response_val.via.str.size,
+ response_val.via.str.ptr);
+ }
+ if (response_key.via.str.size >= 12 &&
+ strncmp(response_key.via.str.ptr, "ErrorMessage", 12) == 0) {
+ response_val = response.via.map.ptr[w].val;
+ flb_plg_debug(ctx->ins, "Record %i failed with err_msg=%.*s",
+ k, response_val.via.str.size,
+ response_val.via.str.ptr);
+ }
+ }
+ }
+ }
+ }
+
+ done:
+ flb_free(out_buf);
+ msgpack_unpacked_destroy(&result);
+ return failed_records;
+}
+
+static int plugin_under_test()
+{
+ if (getenv("FLB_FIREHOSE_PLUGIN_UNDER_TEST") != NULL) {
+ return FLB_TRUE;
+ }
+
+ return FLB_FALSE;
+}
+
+static char *mock_error_response(char *error_env_var)
+{
+ char *err_val = NULL;
+ char *error = NULL;
+ int len = 0;
+
+ err_val = getenv(error_env_var);
+ if (err_val != NULL && strlen(err_val) > 0) {
+ error = flb_malloc(strlen(err_val) + sizeof(char));
+ if (error == NULL) {
+ flb_errno();
+ return NULL;
+ }
+
+ len = strlen(err_val);
+ memcpy(error, err_val, len);
+ error[len] = '\0';
+ return error;
+ }
+
+ return NULL;
+}
+
+int partial_success()
+{
+ char *err_val = NULL;
+
+ err_val = getenv("PARTIAL_SUCCESS_CASE");
+ if (err_val != NULL && strlen(err_val) > 0) {
+ return FLB_TRUE;
+ }
+
+ return FLB_FALSE;
+}
+
+static struct flb_http_client *mock_http_call(char *error_env_var)
+{
+ /* create an http client so that we can set the response */
+ struct flb_http_client *c = NULL;
+ char *error = mock_error_response(error_env_var);
+
+ c = flb_calloc(1, sizeof(struct flb_http_client));
+ if (!c) {
+ flb_errno();
+ flb_free(error);
+ return NULL;
+ }
+ mk_list_init(&c->headers);
+
+ if (error != NULL) {
+ c->resp.status = 400;
+ /* resp.data is freed on destroy, payload is supposed to reference it */
+ c->resp.data = error;
+ c->resp.payload = c->resp.data;
+ c->resp.payload_size = strlen(error);
+ }
+ else {
+ c->resp.status = 200;
+ c->resp.payload = "";
+ c->resp.payload_size = 0;
+ if (partial_success() == FLB_TRUE) {
+ /* mocked partial failure response */
+ c->resp.payload = "{\"Encrypted\": false,\"FailedPutCount\": 1,\"RequestResponses\":[{\"RecordId\": \"Me0CqhxK3BK3MiBWgy/AydQrVUg7vbc40Z4zNds3jiiJDscqGtWFz9bJugbrAoN70YCaxpXgmyR9R+LFxS2rleDepqFljYArBtXnRmVzSMOAzTJZlwsO84+757kBvA5RUycF3wC3XZjFtUFP0Q4QTdhuD8HMJBvKGiBY9Yy5jBUmZuKhXxCLQ/YTwKQaQKn4fnc5iISxaErPXsWMI7OApHZ1eFGvcHVZ\"},{\"RecordId\": \"NRAZVkblYgWWDSvTAF/9jBR4MlciEUFV+QIjb1D8uar7YbC3wqeLQuSZ0GEopGlE/8JAK9h9aAyTub5lH5V+bZuR3SeKKABWoJ788/tI455Kup9oRzmXTKWiXeklxmAe9MtsSz0y4t3oIrSLq8e3QVH9DJKWdhDkIXd8lXK1wuJi8tKmnNgxFob/Cz398kQFXPc4JwKj3Dv3Ou0qibZiusko6f7yBUve\",\"ErrorCode\":\"ServiceUnavailableException\",\"ErrorMessage\": \"Catsssss\"},{\"RecordId\": \"InFGTFvML/MGCLtnC3moI/zCISrKSScu/D8oCGmeIIeVaYUfywHpr2NmsQiZsxUL9+4ThOm2ypxqFGudZvgXQ45gUWMG+R4Y5xzS03N+vQ71+UaL392jY6HUs2SxYkZQe6vpdK+xHaJJ1b8uE++Laxg9rmsXtNt193WjmH3FhU1veu9pnSiGZgqC7czpyVgvZBNeWc+hTjEVicj3VAHBg/9yRN0sC30C\",\"ErrorCode\":\"ServiceUnavailableException\",\"ErrorMessage\": \"Catsssss 2\"},{\"RecordId\":\"KufmrRJ2z8zAgYAYGz6rm4BQC8SA7g87lQJQl2DQ+Be5EiEpr5bG33ilnQVvo1Q05BJuQBnjbw2cm919Ya72awapxfOBdZcPPKJN7KDZV/n1DFCDDrJ2vgyNK4qhKdo3Mr7nyrBpkLIs93PdxOdrTh11Y9HHEaFtim0cHJYpKCSZBjNObfWjfjHx5TuB7L3PHQqMKMu0MT5L9gPgVXHElGalqKZGTcfB\"}]}";
+ c->resp.payload_size = strlen(c->resp.payload);
+ }
+ else {
+ /* mocked success response */
+ c->resp.payload = "{\"Encrypted\": false,\"FailedPutCount\": 0,\"RequestResponses\":[{\"RecordId\": \"Me0CqhxK3BK3MiBWgy/AydQrVUg7vbc40Z4zNds3jiiJDscqGtWFz9bJugbrAoN70YCaxpXgmyR9R+LFxS2rleDepqFljYArBtXnRmVzSMOAzTJZlwsO84+757kBvA5RUycF3wC3XZjFtUFP0Q4QTdhuD8HMJBvKGiBY9Yy5jBUmZuKhXxCLQ/YTwKQaQKn4fnc5iISxaErPXsWMI7OApHZ1eFGvcHVZ\"},{\"RecordId\": \"NRAZVkblYgWWDSvTAF/9jBR4MlciEUFV+QIjb1D8uar7YbC3wqeLQuSZ0GEopGlE/8JAK9h9aAyTub5lH5V+bZuR3SeKKABWoJ788/tI455Kup9oRzmXTKWiXeklxmAe9MtsSz0y4t3oIrSLq8e3QVH9DJKWdhDkIXd8lXK1wuJi8tKmnNgxFob/Cz398kQFXPc4JwKj3Dv3Ou0qibZiusko6f7yBUve\"},{\"RecordId\": \"InFGTFvML/MGCLtnC3moI/zCISrKSScu/D8oCGmeIIeVaYUfywHpr2NmsQiZsxUL9+4ThOm2ypxqFGudZvgXQ45gUWMG+R4Y5xzS03N+vQ71+UaL392jY6HUs2SxYkZQe6vpdK+xHaJJ1b8uE++Laxg9rmsXtNt193WjmH3FhU1veu9pnSiGZgqC7czpyVgvZBNeWc+hTjEVicj3VAHBg/9yRN0sC30C\"},{\"RecordId\": \"KufmrRJ2z8zAgYAYGz6rm4BQC8SA7g87lQJQl2DQ+Be5EiEpr5bG33ilnQVvo1Q05BJuQBnjbw2cm919Ya72awapxfOBdZcPPKJN7KDZV/n1DFCDDrJ2vgyNK4qhKdo3Mr7nyrBpkLIs93PdxOdrTh11Y9HHEaFtim0cHJYpKCSZBjNObfWjfjHx5TuB7L3PHQqMKMu0MT5L9gPgVXHElGalqKZGTcfB\"}]}";
+ c->resp.payload_size = strlen(c->resp.payload);
+ }
+ }
+
+ return c;
+}
+
+
+/*
+ * Returns -1 on failure, 0 on success
+ */
+int put_record_batch(struct flb_firehose *ctx, struct flush *buf,
+ size_t payload_size, int num_records)
+{
+
+ struct flb_http_client *c = NULL;
+ struct flb_aws_client *firehose_client;
+ flb_sds_t error;
+ int failed_records = 0;
+
+ flb_plg_debug(ctx->ins, "Sending log records to delivery stream %s",
+ ctx->delivery_stream);
+
+ if (plugin_under_test() == FLB_TRUE) {
+ c = mock_http_call("TEST_PUT_RECORD_BATCH_ERROR");
+ }
+ else {
+ firehose_client = ctx->firehose_client;
+ c = firehose_client->client_vtable->request(firehose_client, FLB_HTTP_POST,
+ "/", buf->out_buf, payload_size,
+ &put_record_batch_header, 1);
+ }
+
+ if (c) {
+ flb_plg_debug(ctx->ins, "PutRecordBatch http status=%d", c->resp.status);
+
+ if (c->resp.status == 200) {
+ /* Firehose API can return partial success- check response */
+ if (c->resp.payload_size > 0) {
+ failed_records = process_api_response(ctx, c);
+ if (failed_records < 0) {
+ flb_plg_error(ctx->ins, "PutRecordBatch response "
+ "could not be parsed, %s",
+ c->resp.payload);
+ flb_http_client_destroy(c);
+ return -1;
+ }
+ if (failed_records == num_records) {
+ flb_plg_error(ctx->ins, "PutRecordBatch request returned "
+ "with no records successfully recieved, %s",
+ ctx->delivery_stream);
+ flb_http_client_destroy(c);
+ return -1;
+ }
+ if (failed_records > 0) {
+ flb_plg_error(ctx->ins, "%d out of %d records failed to be "
+ "delivered, will retry this batch, %s",
+ failed_records, num_records,
+ ctx->delivery_stream);
+ flb_http_client_destroy(c);
+ return -1;
+ }
+ }
+ flb_plg_debug(ctx->ins, "Sent events to %s", ctx->delivery_stream);
+ flb_http_client_destroy(c);
+ return 0;
+ }
+
+ /* Check error */
+ if (c->resp.payload_size > 0) {
+ error = flb_aws_error(c->resp.payload, c->resp.payload_size);
+ if (error != NULL) {
+ if (strcmp(error, ERR_CODE_SERVICE_UNAVAILABLE) == 0) {
+ flb_plg_error(ctx->ins, "Throughput limits for %s "
+ "may have been exceeded.",
+ ctx->delivery_stream);
+ }
+ if (strncmp(error, "SerializationException", 22) == 0) {
+ /*
+ * If this happens, we habe a bug in the code
+ * User should send us the output to debug
+ */
+ flb_plg_error(ctx->ins, "<<------Bug in Code------>>");
+ printf("Malformed request: %s", buf->out_buf);
+ }
+ flb_aws_print_error(c->resp.payload, c->resp.payload_size,
+ "PutRecordBatch", ctx->ins);
+ flb_sds_destroy(error);
+ }
+ else {
+ /* error could not be parsed, print raw response to debug */
+ flb_plg_debug(ctx->ins, "Raw response: %s", c->resp.payload);
+ }
+ }
+ }
+
+ flb_plg_error(ctx->ins, "Failed to send log records to %s", ctx->delivery_stream);
+ if (c) {
+ flb_http_client_destroy(c);
+ }
+ return -1;
+}
+
+
+void flush_destroy(struct flush *buf)
+{
+ if (buf) {
+ flb_free(buf->tmp_buf);
+ flb_free(buf->out_buf);
+ flb_free(buf->events);
+ flb_free(buf->event_buf);
+ flb_free(buf);
+ }
+}
diff --git a/src/fluent-bit/plugins/out_kinesis_firehose/firehose_api.h b/src/fluent-bit/plugins/out_kinesis_firehose/firehose_api.h
new file mode 100644
index 000000000..55e107a55
--- /dev/null
+++ b/src/fluent-bit/plugins/out_kinesis_firehose/firehose_api.h
@@ -0,0 +1,45 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_OUT_CLOUDWATCH_API
+#define FLB_OUT_CLOUDWATCH_API
+
+#define PUT_RECORD_BATCH_PAYLOAD_SIZE 4194304
+#define MAX_EVENTS_PER_PUT 500
+#define MAX_EVENT_SIZE 1024000
+#define MAX_B64_EVENT_SIZE 1365336 /* ceil(1024000 / 3) * 4 */
+
+/* number of characters needed to 'start' a PutRecordBatch payload */
+#define PUT_RECORD_BATCH_HEADER_LEN 42
+/* number of characters needed per record in a PutRecordBatch payload */
+#define PUT_RECORD_BATCH_PER_RECORD_LEN 12
+/* number of characters needed to 'end' a PutRecordBatch payload */
+#define PUT_RECORD_BATCH_FOOTER_LEN 4
+
+#include "firehose.h"
+
+void flush_destroy(struct flush *buf);
+
+int process_and_send_records(struct flb_firehose *ctx, struct flush *buf,
+ const char *data, size_t bytes);
+
+int put_record_batch(struct flb_firehose *ctx, struct flush *buf,
+ size_t payload_size, int num_records);
+
+#endif
diff --git a/src/fluent-bit/plugins/out_kinesis_streams/CMakeLists.txt b/src/fluent-bit/plugins/out_kinesis_streams/CMakeLists.txt
new file mode 100644
index 000000000..d95110ee2
--- /dev/null
+++ b/src/fluent-bit/plugins/out_kinesis_streams/CMakeLists.txt
@@ -0,0 +1,5 @@
+set(src
+ kinesis.c
+ kinesis_api.c)
+
+FLB_PLUGIN(out_kinesis_streams "${src}" "")
diff --git a/src/fluent-bit/plugins/out_kinesis_streams/kinesis.c b/src/fluent-bit/plugins/out_kinesis_streams/kinesis.c
new file mode 100644
index 000000000..85dd37948
--- /dev/null
+++ b/src/fluent-bit/plugins/out_kinesis_streams/kinesis.c
@@ -0,0 +1,499 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_compat.h>
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_output.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_slist.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_config_map.h>
+#include <fluent-bit/flb_output_plugin.h>
+
+#include <fluent-bit/flb_sds.h>
+#include <fluent-bit/flb_aws_credentials.h>
+#include <fluent-bit/flb_aws_util.h>
+#include <fluent-bit/flb_mem.h>
+#include <fluent-bit/flb_http_client.h>
+#include <fluent-bit/flb_utils.h>
+
+#include <monkey/mk_core.h>
+#include <msgpack.h>
+#include <string.h>
+#include <stdio.h>
+
+#include "kinesis.h"
+#include "kinesis_api.h"
+
+static struct flb_aws_header content_type_header = {
+ .key = "Content-Type",
+ .key_len = 12,
+ .val = "application/x-amz-json-1.1",
+ .val_len = 26,
+};
+
+static int cb_kinesis_init(struct flb_output_instance *ins,
+ struct flb_config *config, void *data)
+{
+ const char *tmp;
+ char *session_name = NULL;
+ struct flb_kinesis *ctx = NULL;
+ int ret;
+ (void) config;
+ (void) data;
+
+ ctx = flb_calloc(1, sizeof(struct flb_kinesis));
+ if (!ctx) {
+ flb_errno();
+ return -1;
+ }
+
+ ctx->ins = ins;
+
+ /* Populate context with config map defaults and incoming properties */
+ ret = flb_output_config_map_set(ins, (void *) ctx);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "configuration error");
+ goto error;
+ }
+
+ tmp = flb_output_get_property("stream", ins);
+ if (tmp) {
+ ctx->stream_name = tmp;
+ } else {
+ flb_plg_error(ctx->ins, "'stream' is a required field");
+ goto error;
+ }
+
+ tmp = flb_output_get_property("time_key", ins);
+ if (tmp) {
+ ctx->time_key = tmp;
+ }
+
+ tmp = flb_output_get_property("time_key_format", ins);
+ if (tmp) {
+ ctx->time_key_format = tmp;
+ } else {
+ ctx->time_key_format = DEFAULT_TIME_KEY_FORMAT;
+ }
+
+ tmp = flb_output_get_property("log_key", ins);
+ if (tmp) {
+ ctx->log_key = tmp;
+ }
+
+ if (ctx->log_key && ctx->time_key) {
+ flb_plg_error(ctx->ins, "'time_key' and 'log_key' can not be used together");
+ goto error;
+ }
+
+ tmp = flb_output_get_property("endpoint", ins);
+ if (tmp) {
+ ctx->custom_endpoint = FLB_TRUE;
+ ctx->endpoint = removeProtocol((char *) tmp, "https://");
+ }
+ else {
+ ctx->custom_endpoint = FLB_FALSE;
+ }
+
+ tmp = flb_output_get_property("sts_endpoint", ins);
+ if (tmp) {
+ ctx->sts_endpoint = (char *) tmp;
+ }
+
+
+ tmp = flb_output_get_property("log_key", ins);
+ if (tmp) {
+ ctx->log_key = tmp;
+ }
+
+ tmp = flb_output_get_property("region", ins);
+ if (tmp) {
+ ctx->region = tmp;
+ } else {
+ flb_plg_error(ctx->ins, "'region' is a required field");
+ goto error;
+ }
+
+ tmp = flb_output_get_property("role_arn", ins);
+ if (tmp) {
+ ctx->role_arn = tmp;
+ }
+
+ /* one tls instance for provider, one for cw client */
+ ctx->cred_tls = flb_tls_create(FLB_TLS_CLIENT_MODE,
+ FLB_TRUE,
+ ins->tls_debug,
+ ins->tls_vhost,
+ ins->tls_ca_path,
+ ins->tls_ca_file,
+ ins->tls_crt_file,
+ ins->tls_key_file,
+ ins->tls_key_passwd);
+
+ if (!ctx->cred_tls) {
+ flb_plg_error(ctx->ins, "Failed to create tls context");
+ goto error;
+ }
+
+ ctx->client_tls = flb_tls_create(FLB_TLS_CLIENT_MODE,
+ FLB_TRUE,
+ ins->tls_debug,
+ ins->tls_vhost,
+ ins->tls_ca_path,
+ ins->tls_ca_file,
+ ins->tls_crt_file,
+ ins->tls_key_file,
+ ins->tls_key_passwd);
+ if (!ctx->client_tls) {
+ flb_plg_error(ctx->ins, "Failed to create tls context");
+ goto error;
+ }
+
+ ctx->aws_provider = flb_standard_chain_provider_create(config,
+ ctx->cred_tls,
+ (char *) ctx->region,
+ ctx->sts_endpoint,
+ NULL,
+ flb_aws_client_generator(),
+ ctx->profile);
+ if (!ctx->aws_provider) {
+ flb_plg_error(ctx->ins, "Failed to create AWS Credential Provider");
+ goto error;
+ }
+
+ ctx->uuid = flb_sts_session_name();
+ if (!ctx->uuid) {
+ flb_plg_error(ctx->ins,
+ "Failed to generate plugin instance UUID");
+ goto error;
+ }
+
+ if(ctx->role_arn) {
+ /* set up sts assume role provider */
+ session_name = flb_sts_session_name();
+ if (!session_name) {
+ flb_plg_error(ctx->ins,
+ "Failed to generate random STS session name");
+ goto error;
+ }
+
+ /* STS provider needs yet another separate TLS instance */
+ ctx->sts_tls = flb_tls_create(FLB_TLS_CLIENT_MODE,
+ FLB_TRUE,
+ ins->tls_debug,
+ ins->tls_vhost,
+ ins->tls_ca_path,
+ ins->tls_ca_file,
+ ins->tls_crt_file,
+ ins->tls_key_file,
+ ins->tls_key_passwd);
+ if (!ctx->sts_tls) {
+ flb_errno();
+ goto error;
+ }
+
+ ctx->base_aws_provider = ctx->aws_provider;
+
+ ctx->aws_provider = flb_sts_provider_create(config,
+ ctx->sts_tls,
+ ctx->base_aws_provider,
+ (char *) ctx->external_id,
+ (char *) ctx->role_arn,
+ session_name,
+ (char *) ctx->region,
+ ctx->sts_endpoint,
+ NULL,
+ flb_aws_client_generator());
+ if (!ctx->aws_provider) {
+ flb_plg_error(ctx->ins,
+ "Failed to create AWS STS Credential Provider");
+ goto error;
+ }
+ /* session name can freed after provider is created */
+ flb_free(session_name);
+ session_name = NULL;
+ }
+
+ /* initialize credentials and set to sync mode */
+ ctx->aws_provider->provider_vtable->sync(ctx->aws_provider);
+ ctx->aws_provider->provider_vtable->init(ctx->aws_provider);
+ ctx->aws_provider->provider_vtable->upstream_set(ctx->aws_provider, ctx->ins);
+
+ if (ctx->endpoint == NULL) {
+ ctx->endpoint = flb_aws_endpoint("kinesis", (char *) ctx->region);
+ if (!ctx->endpoint) {
+ goto error;
+ }
+ }
+
+ struct flb_aws_client_generator *generator = flb_aws_client_generator();
+ ctx->kinesis_client = generator->create();
+ if (!ctx->kinesis_client) {
+ goto error;
+ }
+ ctx->kinesis_client->name = "kinesis_client";
+ ctx->kinesis_client->has_auth = FLB_TRUE;
+ ctx->kinesis_client->provider = ctx->aws_provider;
+ ctx->kinesis_client->region = (char *) ctx->region;
+ ctx->kinesis_client->retry_requests = ctx->retry_requests;
+ ctx->kinesis_client->service = "kinesis";
+ ctx->kinesis_client->port = 443;
+ ctx->kinesis_client->flags = 0;
+ ctx->kinesis_client->proxy = NULL;
+ ctx->kinesis_client->static_headers = &content_type_header;
+ ctx->kinesis_client->static_headers_len = 1;
+
+ struct flb_upstream *upstream = flb_upstream_create(config, ctx->endpoint,
+ 443, FLB_IO_TLS,
+ ctx->client_tls);
+ if (!upstream) {
+ flb_plg_error(ctx->ins, "Connection initialization error");
+ goto error;
+ }
+
+ ctx->kinesis_client->upstream = upstream;
+ flb_output_upstream_set(upstream, ctx->ins);
+
+ ctx->kinesis_client->host = ctx->endpoint;
+
+ /* Export context */
+ flb_output_set_context(ins, ctx);
+
+ return 0;
+
+error:
+ flb_free(session_name);
+ flb_plg_error(ctx->ins, "Initialization failed");
+ flb_kinesis_ctx_destroy(ctx);
+ return -1;
+}
+
+static struct flush *new_flush_buffer(const char *tag, int tag_len)
+{
+ struct flush *buf;
+
+
+ buf = flb_calloc(1, sizeof(struct flush));
+ if (!buf) {
+ flb_errno();
+ return NULL;
+ }
+
+ buf->tmp_buf = flb_malloc(sizeof(char) * PUT_RECORDS_PAYLOAD_SIZE);
+ if (!buf->tmp_buf) {
+ flb_errno();
+ kinesis_flush_destroy(buf);
+ return NULL;
+ }
+ buf->tmp_buf_size = PUT_RECORDS_PAYLOAD_SIZE;
+
+ buf->events = flb_malloc(sizeof(struct kinesis_event) * MAX_EVENTS_PER_PUT);
+ if (!buf->events) {
+ flb_errno();
+ kinesis_flush_destroy(buf);
+ return NULL;
+ }
+ buf->events_capacity = MAX_EVENTS_PER_PUT;
+
+ buf->tag = tag;
+ buf->tag_len = tag_len;
+
+ return buf;
+}
+
+static void cb_kinesis_flush(struct flb_event_chunk *event_chunk,
+ struct flb_output_flush *out_flush,
+ struct flb_input_instance *i_ins,
+ void *out_context,
+ struct flb_config *config)
+{
+ struct flb_kinesis *ctx = out_context;
+ int ret;
+ struct flush *buf;
+ (void) i_ins;
+ (void) config;
+
+ buf = new_flush_buffer(event_chunk->tag, flb_sds_len(event_chunk->tag));
+ if (!buf) {
+ flb_plg_error(ctx->ins, "Failed to construct flush buffer");
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ ret = process_and_send_to_kinesis(ctx, buf,
+ event_chunk->data,
+ event_chunk->size);
+ if (ret < 0) {
+ flb_plg_error(ctx->ins, "Failed to send records to kinesis");
+ kinesis_flush_destroy(buf);
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ flb_plg_debug(ctx->ins, "Processed %d records, sent %d to %s",
+ buf->records_processed, buf->records_sent, ctx->stream_name);
+ kinesis_flush_destroy(buf);
+
+ FLB_OUTPUT_RETURN(FLB_OK);
+}
+
+void flb_kinesis_ctx_destroy(struct flb_kinesis *ctx)
+{
+ if (ctx != NULL) {
+ if (ctx->base_aws_provider) {
+ flb_aws_provider_destroy(ctx->base_aws_provider);
+ }
+
+ if (ctx->aws_provider) {
+ flb_aws_provider_destroy(ctx->aws_provider);
+ }
+
+ if (ctx->cred_tls) {
+ flb_tls_destroy(ctx->cred_tls);
+ }
+
+ if (ctx->sts_tls) {
+ flb_tls_destroy(ctx->sts_tls);
+ }
+
+ if (ctx->client_tls) {
+ flb_tls_destroy(ctx->client_tls);
+ }
+
+ if (ctx->kinesis_client) {
+ flb_aws_client_destroy(ctx->kinesis_client);
+ }
+
+ if (ctx->custom_endpoint == FLB_FALSE) {
+ flb_free(ctx->endpoint);
+ }
+
+ if (ctx->uuid) {
+ flb_free(ctx->uuid);
+ }
+
+ flb_free(ctx);
+ }
+}
+
+static int cb_kinesis_exit(void *data, struct flb_config *config)
+{
+ struct flb_kinesis *ctx = data;
+
+ flb_kinesis_ctx_destroy(ctx);
+ return 0;
+}
+
+/* Configuration properties map */
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_STR, "region", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_kinesis, region),
+ "The AWS region of your kinesis stream"
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "stream", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_kinesis, stream_name),
+ "Kinesis stream name"
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "time_key", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_kinesis, time_key),
+ "Add the timestamp to the record under this key. By default the timestamp "
+ "from Fluent Bit will not be added to records sent to Kinesis."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "time_key_format", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_kinesis, time_key_format),
+ "strftime compliant format string for the timestamp; for example, "
+ "the default is '%Y-%m-%dT%H:%M:%S'. This option is used with time_key. "
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "role_arn", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_kinesis, role_arn),
+ "ARN of an IAM role to assume (ex. for cross account access)."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "endpoint", NULL,
+ 0, FLB_FALSE, 0,
+ "Specify a custom endpoint for the Kinesis API"
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "sts_endpoint", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_kinesis, sts_endpoint),
+ "Custom endpoint for the STS API."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "external_id", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_kinesis, external_id),
+ "Specify an external ID for the STS API, can be used with the role_arn parameter if your role "
+ "requires an external ID."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "log_key", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_kinesis, log_key),
+ "By default, the whole log record will be sent to Kinesis. "
+ "If you specify a key name with this option, then only the value of "
+ "that key will be sent to Kinesis. For example, if you are using "
+ "the Fluentd Docker log driver, you can specify `log_key log` and only "
+ "the log message will be sent to Kinesis."
+ },
+
+ {
+ FLB_CONFIG_MAP_BOOL, "auto_retry_requests", "true",
+ 0, FLB_TRUE, offsetof(struct flb_kinesis, retry_requests),
+ "Immediately retry failed requests to AWS services once. This option "
+ "does not affect the normal Fluent Bit retry mechanism with backoff. "
+ "Instead, it enables an immediate retry with no delay for networking "
+ "errors, which may help improve throughput when there are transient/random "
+ "networking issues."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "profile", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_kinesis, profile),
+ "AWS Profile name. AWS Profiles can be configured with AWS CLI and are usually stored in "
+ "$HOME/.aws/ directory."
+ },
+
+ /* EOF */
+ {0}
+};
+
+/* Plugin registration */
+struct flb_output_plugin out_kinesis_streams_plugin = {
+ .name = "kinesis_streams",
+ .description = "Send logs to Amazon Kinesis Streams",
+ .cb_init = cb_kinesis_init,
+ .cb_flush = cb_kinesis_flush,
+ .cb_exit = cb_kinesis_exit,
+ .workers = 1,
+ .flags = 0,
+
+ /* Configuration */
+ .config_map = config_map,
+};
diff --git a/src/fluent-bit/plugins/out_kinesis_streams/kinesis.h b/src/fluent-bit/plugins/out_kinesis_streams/kinesis.h
new file mode 100644
index 000000000..75d41e107
--- /dev/null
+++ b/src/fluent-bit/plugins/out_kinesis_streams/kinesis.h
@@ -0,0 +1,109 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_OUT_KINESIS_H
+#define FLB_OUT_KINESIS_H
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_sds.h>
+#include <fluent-bit/flb_aws_credentials.h>
+#include <fluent-bit/flb_http_client.h>
+#include <fluent-bit/flb_aws_util.h>
+#include <fluent-bit/flb_signv4.h>
+
+#define DEFAULT_TIME_KEY_FORMAT "%Y-%m-%dT%H:%M:%S"
+
+/* buffers used for each flush */
+struct flush {
+ /* temporary buffer for storing the serialized event messages */
+ char *tmp_buf;
+ size_t tmp_buf_size;
+ /* current index of tmp_buf */
+ size_t tmp_buf_offset;
+
+ /* projected final size of the payload for this flush */
+ size_t data_size;
+
+ /* log records- each of these has a pointer to their message in tmp_buf */
+ struct kinesis_event *events;
+ int events_capacity;
+ /* current event */
+ int event_index;
+
+ /* the payload of the API request */
+ char *out_buf;
+ size_t out_buf_size;
+
+ /* buffer used to temporarily hold an event during processing */
+ char *event_buf;
+ size_t event_buf_size;
+
+ int records_sent;
+ int records_processed;
+
+ const char *tag;
+ int tag_len;
+};
+
+struct kinesis_event {
+ char *json;
+ size_t len;
+ struct timespec timestamp;
+};
+
+struct flb_kinesis {
+ /*
+ * TLS instances can not be re-used. So we have one for:
+ * - Base cred provider (needed for EKS provider)
+ * - STS Assume role provider
+ * - The CloudWatch Logs client for this plugin
+ */
+ struct flb_tls *cred_tls;
+ struct flb_tls *sts_tls;
+ struct flb_tls *client_tls;
+ struct flb_aws_provider *aws_provider;
+ struct flb_aws_provider *base_aws_provider;
+ struct flb_aws_client *kinesis_client;
+
+ /* configuration options */
+ const char *stream_name;
+ const char *time_key;
+ const char *time_key_format;
+ const char *region;
+ const char *role_arn;
+ const char *log_key;
+ const char *external_id;
+ int retry_requests;
+ char *sts_endpoint;
+ int custom_endpoint;
+ char *profile;
+
+ /* in this plugin the 'random' partition key is a uuid + fluent tag + timestamp */
+ char *uuid;
+
+ /* must be freed on shutdown if custom_endpoint is not set */
+ char *endpoint;
+
+ /* Plugin output instance reference */
+ struct flb_output_instance *ins;
+};
+
+void flb_kinesis_ctx_destroy(struct flb_kinesis *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/out_kinesis_streams/kinesis_api.c b/src/fluent-bit/plugins/out_kinesis_streams/kinesis_api.c
new file mode 100644
index 000000000..9124657bc
--- /dev/null
+++ b/src/fluent-bit/plugins/out_kinesis_streams/kinesis_api.c
@@ -0,0 +1,987 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_compat.h>
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_output.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_slist.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_macros.h>
+#include <fluent-bit/flb_config_map.h>
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_log_event_decoder.h>
+
+#include <fluent-bit/flb_sds.h>
+#include <fluent-bit/flb_aws_credentials.h>
+#include <fluent-bit/flb_aws_util.h>
+#include <fluent-bit/flb_mem.h>
+#include <fluent-bit/flb_http_client.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_base64.h>
+
+#include <monkey/mk_core.h>
+#include <msgpack.h>
+#include <string.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#ifndef FLB_SYSTEM_WINDOWS
+#include <unistd.h>
+#endif
+
+#include "kinesis_api.h"
+
+#define ERR_CODE_EXCEEDED_THROUGHPUT "ProvisionedThroughputExceededException"
+
+static struct flb_aws_header put_records_target_header = {
+ .key = "X-Amz-Target",
+ .key_len = 12,
+ .val = "Kinesis_20131202.PutRecords",
+ .val_len = 27,
+};
+
+static inline int try_to_write(char *buf, int *off, size_t left,
+ const char *str, size_t str_len)
+{
+ if (str_len <= 0){
+ str_len = strlen(str);
+ }
+ if (left <= *off+str_len) {
+ return FLB_FALSE;
+ }
+ memcpy(buf+*off, str, str_len);
+ *off += str_len;
+ return FLB_TRUE;
+}
+
+/*
+ * Writes the "header" for a put_records payload
+ */
+static int init_put_payload(struct flb_kinesis *ctx, struct flush *buf,
+ int *offset)
+{
+ if (!try_to_write(buf->out_buf, offset, buf->out_buf_size,
+ "{\"StreamName\":\"", 15)) {
+ goto error;
+ }
+
+ if (!try_to_write(buf->out_buf, offset, buf->out_buf_size,
+ ctx->stream_name, 0)) {
+ goto error;
+ }
+
+ if (!try_to_write(buf->out_buf, offset, buf->out_buf_size,
+ "\",\"Records\":[", 13)) {
+ goto error;
+ }
+ return 0;
+
+error:
+ return -1;
+}
+
+/*
+ * Simple and fast hashing algorithm to create random partition keys
+ */
+static flb_sds_t random_partition_key(const char *tag)
+{
+ int c;
+ unsigned long hash = 5381;
+ unsigned long hash2 = 5381;
+ flb_sds_t hash_str;
+ flb_sds_t tmp;
+ struct flb_time tm;
+
+ /* get current time */
+ flb_time_get(&tm);
+
+ /* compose hash */
+ while ((c = *tag++)) {
+ hash = ((hash << 5) + hash) + c; /* hash * 33 + c */
+ }
+ hash2 = (unsigned long) hash2 * tm.tm.tv_sec * tm.tm.tv_nsec;
+
+ /* flb_sds_printf allocs if the incoming sds is not at least 64 bytes */
+ hash_str = flb_sds_create_size(64);
+ if (!hash_str) {
+ flb_errno();
+ return NULL;
+ }
+ tmp = flb_sds_printf(&hash_str, "%lu%lu", hash % 7919, hash2 % 7919);
+ if (!tmp) {
+ flb_errno();
+ flb_sds_destroy(hash_str);
+ return NULL;
+ }
+ hash_str = tmp;
+
+ return hash_str;
+}
+
+/*
+ * Writes a log event to the output buffer
+ */
+static int write_event(struct flb_kinesis *ctx, struct flush *buf,
+ struct kinesis_event *event, int *offset)
+{
+ flb_sds_t tag_timestamp = NULL;
+
+ if (!try_to_write(buf->out_buf, offset, buf->out_buf_size,
+ "{\"Data\":\"", 9)) {
+ goto error;
+ }
+
+ if (!try_to_write(buf->out_buf, offset, buf->out_buf_size,
+ event->json, event->len)) {
+ goto error;
+ }
+
+ if (!try_to_write(buf->out_buf, offset, buf->out_buf_size,
+ "\",\"PartitionKey\":\"", 18)) {
+ goto error;
+ }
+
+ if (!try_to_write(buf->out_buf, offset, buf->out_buf_size,
+ ctx->uuid, 10)) {
+ goto error;
+ }
+
+ tag_timestamp = random_partition_key(buf->tag);
+ if (!tag_timestamp) {
+ flb_plg_error(ctx->ins, "failed to generate partition key for %s", buf->tag);
+ goto error;
+ }
+
+ if (!try_to_write(buf->out_buf, offset, buf->out_buf_size,
+ tag_timestamp, 0)) {
+ flb_sds_destroy(tag_timestamp);
+ goto error;
+ }
+ flb_sds_destroy(tag_timestamp);
+
+ if (!try_to_write(buf->out_buf, offset, buf->out_buf_size,
+ "\"}", 2)) {
+ goto error;
+ }
+
+ return 0;
+
+error:
+ return -1;
+}
+
+/* Terminates a PutRecords payload */
+static int end_put_payload(struct flb_kinesis *ctx, struct flush *buf,
+ int *offset)
+{
+ if (!try_to_write(buf->out_buf, offset, buf->out_buf_size,
+ "]}", 2)) {
+ return -1;
+ }
+ buf->out_buf[*offset] = '\0';
+
+ return 0;
+}
+
+
+/*
+ * Processes the msgpack object
+ * -1 = failure, record not added
+ * 0 = success, record added
+ * 1 = we ran out of space, send and retry
+ * 2 = record could not be processed, discard it
+ * Returns 0 on success, -1 on general errors,
+ * and 1 if we ran out of space to write the event
+ * which means a send must occur
+ */
+static int process_event(struct flb_kinesis *ctx, struct flush *buf,
+ const msgpack_object *obj, struct flb_time *tms)
+{
+ size_t written = 0;
+ int ret;
+ size_t size;
+ size_t b64_len;
+ struct kinesis_event *event;
+ char *tmp_buf_ptr;
+ char *time_key_ptr;
+ struct tm time_stamp;
+ struct tm *tmp;
+ size_t len;
+ size_t tmp_size;
+ char *out_buf;
+
+ tmp_buf_ptr = buf->tmp_buf + buf->tmp_buf_offset;
+ ret = flb_msgpack_to_json(tmp_buf_ptr,
+ buf->tmp_buf_size - buf->tmp_buf_offset,
+ obj);
+ if (ret <= 0) {
+ /*
+ * negative value means failure to write to buffer,
+ * which means we ran out of space, and must send the logs
+ *
+ * TODO: This could also incorrectly be triggered if the record
+ * is larger than MAX_EVENT_SIZE
+ */
+ return 1;
+ }
+ written = (size_t) ret;
+
+ /* Discard empty messages (written == 2 means '""') */
+ if (written <= 2) {
+ flb_plg_debug(ctx->ins, "Found empty log message, %s", ctx->stream_name);
+ return 2;
+ }
+
+ if (ctx->log_key) {
+ /*
+ * flb_msgpack_to_json will encase the value in quotes
+ * We don't want that for log_key, so we ignore the first
+ * and last character
+ */
+ written -= 2;
+ tmp_buf_ptr++; /* pass over the opening quote */
+ buf->tmp_buf_offset++;
+ }
+
+ /* is (written + 1) because we still have to append newline */
+ if ((written + 1) >= MAX_EVENT_SIZE) {
+ flb_plg_warn(ctx->ins, "[size=%zu] Discarding record which is larger than "
+ "max size allowed by Kinesis, %s", written + 1,
+ ctx->stream_name);
+ return 2;
+ }
+
+ if (ctx->time_key) {
+ /* append time_key to end of json string */
+ tmp = gmtime_r(&tms->tm.tv_sec, &time_stamp);
+ if (!tmp) {
+ flb_plg_error(ctx->ins, "Could not create time stamp for %lu unix "
+ "seconds, discarding record, %s", tms->tm.tv_sec,
+ ctx->stream_name);
+ return 2;
+ }
+
+ /* format time output and return the length */
+ len = flb_aws_strftime_precision(&out_buf, ctx->time_key_format, tms);
+
+ /* how much space do we have left */
+ tmp_size = (buf->tmp_buf_size - buf->tmp_buf_offset) - written;
+ if (len > tmp_size) {
+ /* not enough space - tell caller to retry */
+ flb_free(out_buf);
+ return 1;
+ }
+
+ if (len == 0) {
+ /*
+ * when the length of out_buf is not enough for time_key_format,
+ * time_key will not be added to record.
+ */
+ flb_plg_error(ctx->ins, "Failed to add time_key %s to record, %s",
+ ctx->time_key, ctx->stream_name);
+ flb_free(out_buf);
+ }
+ else {
+ time_key_ptr = tmp_buf_ptr + written - 1;
+ memcpy(time_key_ptr, ",", 1);
+ time_key_ptr++;
+ memcpy(time_key_ptr, "\"", 1);
+ time_key_ptr++;
+ memcpy(time_key_ptr, ctx->time_key, strlen(ctx->time_key));
+ time_key_ptr += strlen(ctx->time_key);
+ memcpy(time_key_ptr, "\":\"", 3);
+ time_key_ptr += 3;
+
+ /* merge out_buf to time_key_ptr */
+ memcpy(time_key_ptr, out_buf, len);
+ flb_free(out_buf);
+ time_key_ptr += len;
+ memcpy(time_key_ptr, "\"}", 2);
+ time_key_ptr += 2;
+ written = (time_key_ptr - tmp_buf_ptr);
+ }
+ }
+
+ /* is (written + 1) because we still have to append newline */
+ if ((written + 1) >= MAX_EVENT_SIZE) {
+ flb_plg_warn(ctx->ins, "[size=%zu] Discarding record which is larger than "
+ "max size allowed by Kinesis, %s", written + 1,
+ ctx->stream_name);
+ return 2;
+ }
+
+ /* append newline to record */
+
+ tmp_size = (buf->tmp_buf_size - buf->tmp_buf_offset) - written;
+ if (tmp_size <= 1) {
+ /* no space left- tell caller to retry */
+ return 1;
+ }
+
+ memcpy(tmp_buf_ptr + written, "\n", 1);
+ written++;
+
+ /*
+ * check if event_buf is initialized and big enough
+ * Base64 encoding will increase size by ~4/3
+ */
+ size = (written * 1.5) + 4;
+ if (buf->event_buf == NULL || buf->event_buf_size < size) {
+ flb_free(buf->event_buf);
+ buf->event_buf = flb_malloc(size);
+ buf->event_buf_size = size;
+ if (buf->event_buf == NULL) {
+ flb_errno();
+ return -1;
+ }
+ }
+
+ tmp_buf_ptr = buf->tmp_buf + buf->tmp_buf_offset;
+ ret = flb_base64_encode((unsigned char *) buf->event_buf, size, &b64_len,
+ (unsigned char *) tmp_buf_ptr, written);
+ if (ret != 0) {
+ flb_errno();
+ return -1;
+ }
+ written = b64_len;
+
+ tmp_buf_ptr = buf->tmp_buf + buf->tmp_buf_offset;
+ if ((buf->tmp_buf_size - buf->tmp_buf_offset) < written) {
+ /* not enough space, send logs */
+ return 1;
+ }
+
+ /* copy serialized json to tmp_buf */
+ memcpy(tmp_buf_ptr, buf->event_buf, written);
+
+ buf->tmp_buf_offset += written;
+ event = &buf->events[buf->event_index];
+ event->json = tmp_buf_ptr;
+ event->len = written;
+ event->timestamp.tv_sec = tms->tm.tv_sec;
+ event->timestamp.tv_nsec = tms->tm.tv_nsec;
+
+ return 0;
+}
+
+/* Resets or inits a flush struct */
+static void reset_flush_buf(struct flb_kinesis *ctx, struct flush *buf) {
+ buf->event_index = 0;
+ buf->tmp_buf_offset = 0;
+ buf->data_size = PUT_RECORDS_HEADER_LEN + PUT_RECORDS_FOOTER_LEN;
+ buf->data_size += strlen(ctx->stream_name);
+}
+
+/* constructs a put payload, and then sends */
+static int send_log_events(struct flb_kinesis *ctx, struct flush *buf) {
+ int ret;
+ int offset;
+ int i;
+ struct kinesis_event *event;
+
+ if (buf->event_index <= 0) {
+ /*
+ * event_index should always be 1 more than the actual last event index
+ * when this function is called.
+ * Except in the case where send_log_events() is called at the end of
+ * process_and_send_to_kinesis. If all records were already sent, event_index
+ * will be 0. Hence this check.
+ */
+ return 0;
+ }
+
+ /* alloc out_buf if needed */
+ if (buf->out_buf == NULL || buf->out_buf_size < buf->data_size) {
+ if (buf->out_buf != NULL) {
+ flb_free(buf->out_buf);
+ }
+ buf->out_buf = flb_malloc(buf->data_size + 1);
+ if (!buf->out_buf) {
+ flb_errno();
+ return -1;
+ }
+ buf->out_buf_size = buf->data_size;
+ }
+
+ offset = 0;
+ ret = init_put_payload(ctx, buf, &offset);
+ if (ret < 0) {
+ flb_plg_error(ctx->ins, "Failed to initialize PutRecords payload, %s",
+ ctx->stream_name);
+ return -1;
+ }
+
+ for (i = 0; i < buf->event_index; i++) {
+ event = &buf->events[i];
+ ret = write_event(ctx, buf, event, &offset);
+ if (ret < 0) {
+ flb_plg_error(ctx->ins, "Failed to write log record %d to "
+ "payload buffer, %s", i, ctx->stream_name);
+ return -1;
+ }
+ if (i != (buf->event_index -1)) {
+ if (!try_to_write(buf->out_buf, &offset, buf->out_buf_size,
+ ",", 1)) {
+ flb_plg_error(ctx->ins, "Could not terminate record with ','");
+ return -1;
+ }
+ }
+ }
+
+ ret = end_put_payload(ctx, buf, &offset);
+ if (ret < 0) {
+ flb_plg_error(ctx->ins, "Could not complete PutRecords payload");
+ return -1;
+ }
+ flb_plg_debug(ctx->ins, "kinesis:PutRecords: events=%d, payload=%d bytes", i, offset);
+ ret = put_records(ctx, buf, (size_t) offset, i);
+ if (ret < 0) {
+ flb_plg_error(ctx->ins, "Failed to send log records");
+ return -1;
+ }
+ buf->records_sent += i;
+
+ return 0;
+}
+
+/*
+ * Processes the msgpack object, sends the current batch if needed
+ */
+static int add_event(struct flb_kinesis *ctx, struct flush *buf,
+ const msgpack_object *obj, struct flb_time *tms)
+{
+ int ret;
+ struct kinesis_event *event;
+ int retry_add = FLB_FALSE;
+ size_t event_bytes = 0;
+
+ if (buf->event_index == 0) {
+ /* init */
+ reset_flush_buf(ctx, buf);
+ }
+
+retry_add_event:
+ retry_add = FLB_FALSE;
+ ret = process_event(ctx, buf, obj, tms);
+ if (ret < 0) {
+ return -1;
+ }
+ else if (ret == 1) {
+ if (buf->event_index <= 0) {
+ /* somehow the record was larger than our entire request buffer */
+ flb_plg_warn(ctx->ins, "Discarding massive log record, %s",
+ ctx->stream_name);
+ return 0; /* discard this record and return to caller */
+ }
+ /* send logs and then retry the add */
+ retry_add = FLB_TRUE;
+ goto send;
+ } else if (ret == 2) {
+ /* discard this record and return to caller */
+ flb_plg_warn(ctx->ins, "Discarding large or unprocessable record, %s",
+ ctx->stream_name);
+ return 0;
+ }
+
+ event = &buf->events[buf->event_index];
+ event_bytes = event->len + PUT_RECORDS_PER_RECORD_LEN;
+
+ if ((buf->data_size + event_bytes) > PUT_RECORDS_PAYLOAD_SIZE) {
+ if (buf->event_index <= 0) {
+ /* somehow the record was larger than our entire request buffer */
+ flb_plg_warn(ctx->ins, "[size=%zu] Discarding massive log record, %s",
+ event_bytes, ctx->stream_name);
+ return 0; /* discard this record and return to caller */
+ }
+ /* do not send this event */
+ retry_add = FLB_TRUE;
+ goto send;
+ }
+
+ /* send is not needed yet, return to caller */
+ buf->data_size += event_bytes;
+ buf->event_index++;
+
+ if (buf->event_index == MAX_EVENTS_PER_PUT) {
+ goto send;
+ }
+
+ return 0;
+
+send:
+ ret = send_log_events(ctx, buf);
+ reset_flush_buf(ctx, buf);
+ if (ret < 0) {
+ return -1;
+ }
+
+ if (retry_add == FLB_TRUE) {
+ goto retry_add_event;
+ }
+
+ return 0;
+}
+
+/*
+ * Main routine- processes msgpack and sends in batches
+ * return value is the number of events processed (number sent is stored in buf)
+ */
+int process_and_send_to_kinesis(struct flb_kinesis *ctx, struct flush *buf,
+ const char *data, size_t bytes)
+{
+ int i = 0;
+ size_t map_size;
+ msgpack_object map;
+ msgpack_object_kv *kv;
+ msgpack_object key;
+ msgpack_object val;
+ char *key_str = NULL;
+ size_t key_str_size = 0;
+ int j;
+ int ret;
+ int check = FLB_FALSE;
+ int found = FLB_FALSE;
+ struct flb_log_event_decoder log_decoder;
+ struct flb_log_event log_event;
+
+ ret = flb_log_event_decoder_init(&log_decoder, (char *) data, bytes);
+
+ if (ret != FLB_EVENT_DECODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event decoder initialization error : %d", ret);
+
+ return -1;
+ }
+
+ while ((ret = flb_log_event_decoder_next(
+ &log_decoder,
+ &log_event)) == FLB_EVENT_DECODER_SUCCESS) {
+ map = *log_event.body;
+ map_size = map.via.map.size;
+
+ if (ctx->log_key) {
+ key_str = NULL;
+ key_str_size = 0;
+ check = FLB_FALSE;
+ found = FLB_FALSE;
+
+ kv = map.via.map.ptr;
+
+ for(j=0; j < map_size; j++) {
+ key = (kv+j)->key;
+ if (key.type == MSGPACK_OBJECT_BIN) {
+ key_str = (char *) key.via.bin.ptr;
+ key_str_size = key.via.bin.size;
+ check = FLB_TRUE;
+ }
+ if (key.type == MSGPACK_OBJECT_STR) {
+ key_str = (char *) key.via.str.ptr;
+ key_str_size = key.via.str.size;
+ check = FLB_TRUE;
+ }
+
+ if (check == FLB_TRUE) {
+ if (strncmp(ctx->log_key, key_str, key_str_size) == 0) {
+ found = FLB_TRUE;
+ val = (kv+j)->val;
+ ret = add_event(ctx, buf, &val, &log_event.timestamp);
+ if (ret < 0 ) {
+ goto error;
+ }
+ }
+ }
+
+ }
+ if (found == FLB_FALSE) {
+ flb_plg_error(ctx->ins, "Could not find log_key '%s' in record, %s",
+ ctx->log_key, ctx->stream_name);
+ }
+ else {
+ i++;
+ }
+ continue;
+ }
+
+ ret = add_event(ctx, buf, &map, &log_event.timestamp);
+ if (ret < 0 ) {
+ goto error;
+ }
+ i++;
+ }
+
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ /* send any remaining events */
+ ret = send_log_events(ctx, buf);
+ reset_flush_buf(ctx, buf);
+ if (ret < 0) {
+ return -1;
+ }
+
+ /* return number of events processed */
+ buf->records_processed = i;
+ return i;
+
+error:
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ return -1;
+}
+
+/*
+ * Returns number of failed records on success, -1 on failure
+ */
+static int process_api_response(struct flb_kinesis *ctx,
+ struct flb_http_client *c)
+{
+ int i;
+ int k;
+ int w;
+ int ret;
+ int failed_records = -1;
+ int root_type;
+ char *out_buf;
+ int throughput_exceeded = FLB_FALSE;
+ size_t off = 0;
+ size_t out_size;
+ msgpack_unpacked result;
+ msgpack_object root;
+ msgpack_object key;
+ msgpack_object val;
+ msgpack_object response;
+ msgpack_object response_key;
+ msgpack_object response_val;
+
+ if (strstr(c->resp.payload, "\"FailedRecordCount\":0")) {
+ return 0;
+ }
+
+ /* Convert JSON payload to msgpack */
+ ret = flb_pack_json(c->resp.payload, c->resp.payload_size,
+ &out_buf, &out_size, &root_type, NULL);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "could not pack/validate JSON API response\n%s",
+ c->resp.payload);
+ return -1;
+ }
+
+ /* Lookup error field */
+ msgpack_unpacked_init(&result);
+ ret = msgpack_unpack_next(&result, out_buf, out_size, &off);
+ if (ret != MSGPACK_UNPACK_SUCCESS) {
+ flb_plg_error(ctx->ins, "Cannot unpack response to find error\n%s",
+ c->resp.payload);
+ failed_records = -1;
+ goto done;
+ }
+
+ root = result.data;
+ if (root.type != MSGPACK_OBJECT_MAP) {
+ flb_plg_error(ctx->ins, "unexpected payload type=%i",
+ root.type);
+ failed_records = -1;
+ goto done;
+ }
+
+ for (i = 0; i < root.via.map.size; i++) {
+ key = root.via.map.ptr[i].key;
+ if (key.type != MSGPACK_OBJECT_STR) {
+ flb_plg_error(ctx->ins, "unexpected key type=%i",
+ key.type);
+ failed_records = -1;
+ goto done;
+ }
+
+ if (key.via.str.size >= 14 &&
+ strncmp(key.via.str.ptr, "FailedRecordCount", 14) == 0) {
+ val = root.via.map.ptr[i].val;
+ if (val.type != MSGPACK_OBJECT_POSITIVE_INTEGER) {
+ flb_plg_error(ctx->ins, "unexpected 'FailedRecordCount' value type=%i",
+ val.type);
+ failed_records = -1;
+ goto done;
+ }
+
+ failed_records = val.via.u64;
+ if (failed_records == 0) {
+ /* no need to check RequestResponses field */
+ goto done;
+ }
+ }
+
+ if (key.via.str.size >= 14 &&
+ strncmp(key.via.str.ptr, "Records", 7) == 0) {
+ val = root.via.map.ptr[i].val;
+ if (val.type != MSGPACK_OBJECT_ARRAY) {
+ flb_plg_error(ctx->ins, "unexpected 'Records' value type=%i",
+ val.type);
+ failed_records = -1;
+ goto done;
+ }
+
+ if (val.via.array.size == 0) {
+ flb_plg_error(ctx->ins, "'Records' field in response is empty");
+ failed_records = -1;
+ goto done;
+ }
+
+ for (k = 0; k < val.via.array.size; k++) {
+ /* iterate through the responses */
+ response = val.via.array.ptr[k];
+ if (response.type != MSGPACK_OBJECT_MAP) {
+ flb_plg_error(ctx->ins, "unexpected 'Records[%d]' value type=%i",
+ k, response.type);
+ failed_records = -1;
+ goto done;
+ }
+ for (w = 0; w < response.via.map.size; w++) {
+ /* iterate through the response's keys */
+ response_key = response.via.map.ptr[w].key;
+ if (response_key.type != MSGPACK_OBJECT_STR) {
+ flb_plg_error(ctx->ins, "unexpected key type=%i",
+ response_key.type);
+ failed_records = -1;
+ goto done;
+ }
+ if (response_key.via.str.size >= 9 &&
+ strncmp(response_key.via.str.ptr, "ErrorCode", 9) == 0) {
+ response_val = response.via.map.ptr[w].val;
+ if (!throughput_exceeded &&
+ response_val.via.str.size >= 38 &&
+ (strncmp(response_val.via.str.ptr,
+ ERR_CODE_EXCEEDED_THROUGHPUT, 38) == 0)) {
+ throughput_exceeded = FLB_TRUE;
+ flb_plg_error(ctx->ins, "Throughput limits may have been exceeded, %s",
+ ctx->stream_name);
+ }
+ flb_plg_debug(ctx->ins, "Record %i failed with err_code=%.*s",
+ k, response_val.via.str.size,
+ response_val.via.str.ptr);
+ }
+ if (response_key.via.str.size >= 12 &&
+ strncmp(response_key.via.str.ptr, "ErrorMessage", 12) == 0) {
+ response_val = response.via.map.ptr[w].val;
+ flb_plg_debug(ctx->ins, "Record %i failed with err_msg=%.*s",
+ k, response_val.via.str.size,
+ response_val.via.str.ptr);
+ }
+ }
+ }
+ }
+ }
+
+ done:
+ flb_free(out_buf);
+ msgpack_unpacked_destroy(&result);
+ return failed_records;
+}
+
+static int plugin_under_test()
+{
+ if (getenv("FLB_KINESIS_PLUGIN_UNDER_TEST") != NULL) {
+ return FLB_TRUE;
+ }
+
+ return FLB_FALSE;
+}
+
+static char *mock_error_response(char *error_env_var)
+{
+ char *err_val = NULL;
+ char *error = NULL;
+ int len = 0;
+
+ err_val = getenv(error_env_var);
+ if (err_val != NULL && strlen(err_val) > 0) {
+ error = flb_malloc(strlen(err_val) + sizeof(char));
+ if (error == NULL) {
+ flb_errno();
+ return NULL;
+ }
+
+ len = strlen(err_val);
+ memcpy(error, err_val, len);
+ error[len] = '\0';
+ return error;
+ }
+
+ return NULL;
+}
+
+static int partial_success()
+{
+ char *err_val = NULL;
+
+ err_val = getenv("PARTIAL_SUCCESS_CASE");
+ if (err_val != NULL && strlen(err_val) > 0) {
+ return FLB_TRUE;
+ }
+
+ return FLB_FALSE;
+}
+
+static struct flb_http_client *mock_http_call(char *error_env_var)
+{
+ /* create an http client so that we can set the response */
+ struct flb_http_client *c = NULL;
+ char *error = mock_error_response(error_env_var);
+
+ c = flb_calloc(1, sizeof(struct flb_http_client));
+ if (!c) {
+ flb_errno();
+ flb_free(error);
+ return NULL;
+ }
+ mk_list_init(&c->headers);
+
+ if (error != NULL) {
+ c->resp.status = 400;
+ /* resp.data is freed on destroy, payload is supposed to reference it */
+ c->resp.data = error;
+ c->resp.payload = c->resp.data;
+ c->resp.payload_size = strlen(error);
+ }
+ else {
+ c->resp.status = 200;
+ c->resp.payload = "";
+ c->resp.payload_size = 0;
+ if (partial_success() == FLB_TRUE) {
+ /* mocked partial failure response */
+ c->resp.payload = "{\"FailedRecordCount\":2,\"Records\":[{\"SequenceNumber\":\"49543463076548007577105092703039560359975228518395012686\",\"ShardId\":\"shardId-000000000000\"},{\"ErrorCode\":\"ProvisionedThroughputExceededException\",\"ErrorMessage\":\"Rate exceeded for shard shardId-000000000001 in stream exampleStreamName under account 111111111111.\"},{\"ErrorCode\":\"InternalFailure\",\"ErrorMessage\":\"Internal service failure.\"}]}";
+ c->resp.payload_size = strlen(c->resp.payload);
+ }
+ else {
+ /* mocked success response */
+ c->resp.payload = "{\"FailedRecordCount\":0,\"Records\":[{\"SequenceNumber\":\"49543463076548007577105092703039560359975228518395019266\",\"ShardId\":\"shardId-000000000000\"},{\"SequenceNumber\":\"49543463076570308322303623326179887152428262250726293522\",\"ShardId\":\"shardId-000000000001\"},{\"SequenceNumber\":\"49543463076570308322303623326179887152428262250726293588\",\"ShardId\":\"shardId-000000000003\"}]}";
+ c->resp.payload_size = strlen(c->resp.payload);
+ }
+ }
+
+ return c;
+}
+
+
+/*
+ * Returns -1 on failure, 0 on success
+ */
+int put_records(struct flb_kinesis *ctx, struct flush *buf,
+ size_t payload_size, int num_records)
+{
+
+ struct flb_http_client *c = NULL;
+ struct flb_aws_client *kinesis_client;
+ flb_sds_t error;
+ int failed_records = 0;
+
+ flb_plg_debug(ctx->ins, "Sending log records to stream %s",
+ ctx->stream_name);
+
+ if (plugin_under_test() == FLB_TRUE) {
+ c = mock_http_call("TEST_PUT_RECORDS_ERROR");
+ }
+ else {
+ kinesis_client = ctx->kinesis_client;
+ c = kinesis_client->client_vtable->request(kinesis_client, FLB_HTTP_POST,
+ "/", buf->out_buf, payload_size,
+ &put_records_target_header, 1);
+ }
+
+ if (c) {
+ flb_plg_debug(ctx->ins, "PutRecords http status=%d", c->resp.status);
+
+ if (c->resp.status == 200) {
+ /* Kinesis API can return partial success- check response */
+ if (c->resp.payload_size > 0) {
+ failed_records = process_api_response(ctx, c);
+ if (failed_records < 0) {
+ flb_plg_error(ctx->ins, "PutRecords response "
+ "could not be parsed, %s",
+ c->resp.payload);
+ flb_http_client_destroy(c);
+ return -1;
+ }
+ if (failed_records == num_records) {
+ flb_plg_error(ctx->ins, "PutRecords request returned "
+ "with no records successfully recieved, %s",
+ ctx->stream_name);
+ flb_http_client_destroy(c);
+ return -1;
+ }
+ if (failed_records > 0) {
+ flb_plg_error(ctx->ins, "%d out of %d records failed to be "
+ "delivered, will retry this batch, %s",
+ failed_records, num_records,
+ ctx->stream_name);
+ flb_http_client_destroy(c);
+ return -1;
+ }
+ }
+ flb_plg_debug(ctx->ins, "Sent events to %s", ctx->stream_name);
+ flb_http_client_destroy(c);
+ return 0;
+ }
+
+ /* Check error */
+ if (c->resp.payload_size > 0) {
+ error = flb_aws_error(c->resp.payload, c->resp.payload_size);
+ if (error != NULL) {
+ if (strcmp(error, ERR_CODE_EXCEEDED_THROUGHPUT) == 0) {
+ flb_plg_error(ctx->ins, "Throughput limits for %s "
+ "may have been exceeded.",
+ ctx->stream_name);
+ }
+ if (strncmp(error, "SerializationException", 22) == 0) {
+ /*
+ * If this happens, we habe a bug in the code
+ * User should send us the output to debug
+ */
+ flb_plg_error(ctx->ins, "<<------Bug in Code------>>");
+ printf("Malformed request: %s", buf->out_buf);
+ }
+ flb_aws_print_error(c->resp.payload, c->resp.payload_size,
+ "PutRecords", ctx->ins);
+ flb_sds_destroy(error);
+ }
+ else {
+ /* error could not be parsed, print raw response to debug */
+ flb_plg_debug(ctx->ins, "Raw response: %s", c->resp.payload);
+ }
+ }
+ }
+
+ flb_plg_error(ctx->ins, "Failed to send log records to %s", ctx->stream_name);
+ if (c) {
+ flb_http_client_destroy(c);
+ }
+ return -1;
+}
+
+
+void kinesis_flush_destroy(struct flush *buf)
+{
+ if (buf) {
+ flb_free(buf->tmp_buf);
+ flb_free(buf->out_buf);
+ flb_free(buf->events);
+ flb_free(buf->event_buf);
+ flb_free(buf);
+ }
+}
diff --git a/src/fluent-bit/plugins/out_kinesis_streams/kinesis_api.h b/src/fluent-bit/plugins/out_kinesis_streams/kinesis_api.h
new file mode 100644
index 000000000..e44de6d4d
--- /dev/null
+++ b/src/fluent-bit/plugins/out_kinesis_streams/kinesis_api.h
@@ -0,0 +1,44 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_OUT_KINESIS_API
+#define FLB_OUT_KINESIS_API
+
+#define PUT_RECORDS_PAYLOAD_SIZE 5242880
+#define MAX_EVENTS_PER_PUT 500
+#define MAX_EVENT_SIZE 1048556 /* 1048576 - 20 bytes for partition key */
+
+/* number of characters needed to 'start' a PutRecords payload */
+#define PUT_RECORDS_HEADER_LEN 30
+/* number of characters needed per record in a PutRecords payload */
+#define PUT_RECORDS_PER_RECORD_LEN 48
+/* number of characters needed to 'end' a PutRecords payload */
+#define PUT_RECORDS_FOOTER_LEN 4
+
+#include "kinesis.h"
+
+void kinesis_flush_destroy(struct flush *buf);
+
+int process_and_send_to_kinesis(struct flb_kinesis *ctx, struct flush *buf,
+ const char *data, size_t bytes);
+
+int put_records(struct flb_kinesis *ctx, struct flush *buf,
+ size_t payload_size, int num_records);
+
+#endif
diff --git a/src/fluent-bit/plugins/out_lib/CMakeLists.txt b/src/fluent-bit/plugins/out_lib/CMakeLists.txt
new file mode 100644
index 000000000..b50ecc2ed
--- /dev/null
+++ b/src/fluent-bit/plugins/out_lib/CMakeLists.txt
@@ -0,0 +1,5 @@
+set(src
+ out_lib.c)
+
+FLB_PLUGIN(out_lib "${src}" "")
+#target_link_libraries(flb-plugin-out_lib msgpack)
diff --git a/src/fluent-bit/plugins/out_lib/out_lib.c b/src/fluent-bit/plugins/out_lib/out_lib.c
new file mode 100644
index 000000000..da0972243
--- /dev/null
+++ b/src/fluent-bit/plugins/out_lib/out_lib.c
@@ -0,0 +1,222 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdio.h>
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_lib.h>
+#include <msgpack.h>
+
+#include "out_lib.h"
+
+#define PLUGIN_NAME "out_lib"
+
+static int configure(struct flb_out_lib_config *ctx,
+ struct flb_output_instance *ins)
+{
+ const char *tmp;
+
+ tmp = flb_output_get_property("format", ins);
+ if (!tmp) {
+ ctx->format = FLB_OUT_LIB_FMT_MSGPACK;
+ }
+ else {
+ if (strcasecmp(tmp, FLB_FMT_STR_MSGPACK) == 0) {
+ ctx->format = FLB_OUT_LIB_FMT_MSGPACK;
+ }
+ else if (strcasecmp(tmp, FLB_FMT_STR_JSON) == 0) {
+ ctx->format = FLB_OUT_LIB_FMT_JSON;
+ }
+ }
+
+ tmp = flb_output_get_property("max_records", ins);
+ if (tmp) {
+ ctx->max_records = atoi(tmp);
+ }
+ else {
+ ctx->max_records = 0;
+ }
+
+ return 0;
+}
+
+
+/**
+ * User callback is passed from flb_output(ctx, output, callback)
+ *
+ * The prototype of callback should be
+ * int (*callback)(void* data, size_t size );
+ * @param data The data which comes from input plugin.
+ * @param size The size of data.
+ * @return success ? 0 : negative value
+ *
+ */
+static int out_lib_init(struct flb_output_instance *ins,
+ struct flb_config *config,
+ void *data)
+{
+ struct flb_out_lib_config *ctx = NULL;
+ struct flb_lib_out_cb *cb_data = data;
+ (void) config;
+
+ ctx = flb_calloc(1, sizeof(struct flb_out_lib_config));
+ if (ctx == NULL) {
+ flb_errno();
+ return -1;
+ }
+ ctx->ins = ins;
+
+ if (cb_data) {
+ /* Set user callback and data */
+ ctx->cb_func = cb_data->cb;
+ ctx->cb_data = cb_data->data;
+ }
+ else {
+ flb_plg_error(ctx->ins, "Callback is not set");
+ flb_free(ctx);
+ return -1;
+ }
+
+ configure(ctx, ins);
+ flb_output_set_context(ins, ctx);
+
+ return 0;
+}
+
+static void out_lib_flush(struct flb_event_chunk *event_chunk,
+ struct flb_output_flush *out_flush,
+ struct flb_input_instance *i_ins,
+ void *out_context,
+ struct flb_config *config)
+{
+ int len;
+ int count = 0;
+ size_t off = 0;
+ size_t last_off = 0;
+ size_t data_size = 0;
+ size_t alloc_size = 0;
+ size_t out_size = 0;
+ char *buf = NULL;
+ char *out_buf = NULL;
+ char *data_for_user = NULL;
+ msgpack_object *obj;
+ msgpack_unpacked result;
+ struct flb_time tm;
+ struct flb_out_lib_config *ctx = out_context;
+ (void) i_ins;
+ (void) config;
+
+ msgpack_unpacked_init(&result);
+ while (msgpack_unpack_next(&result,
+ event_chunk->data,
+ event_chunk->size, &off) == MSGPACK_UNPACK_SUCCESS) {
+ if (ctx->max_records > 0 && count >= ctx->max_records) {
+ break;
+ }
+ switch(ctx->format) {
+ case FLB_OUT_LIB_FMT_MSGPACK:
+ alloc_size = (off - last_off);
+
+ /* copy raw bytes */
+ data_for_user = flb_malloc(alloc_size);
+ if (!data_for_user) {
+ flb_errno();
+ msgpack_unpacked_destroy(&result);
+ FLB_OUTPUT_RETURN(FLB_ERROR);
+ }
+
+ memcpy(data_for_user,
+ (char *) event_chunk->data + last_off, alloc_size);
+ data_size = alloc_size;
+ break;
+ case FLB_OUT_LIB_FMT_JSON:
+#ifdef FLB_HAVE_METRICS
+ if (event_chunk->type == FLB_EVENT_TYPE_METRICS) {
+ alloc_size = (off - last_off) + 4096;
+ buf = flb_msgpack_to_json_str(alloc_size, &result.data);
+ if (buf == NULL) {
+ msgpack_unpacked_destroy(&result);
+ FLB_OUTPUT_RETURN(FLB_ERROR);
+ }
+ data_size = strlen(buf);
+ data_for_user = buf;
+ }
+ else {
+#endif
+ /* JSON is larger than msgpack */
+ alloc_size = (off - last_off) + 128;
+
+ flb_time_pop_from_msgpack(&tm, &result, &obj);
+ buf = flb_msgpack_to_json_str(alloc_size, obj);
+ if (!buf) {
+ msgpack_unpacked_destroy(&result);
+ FLB_OUTPUT_RETURN(FLB_ERROR);
+ }
+
+ len = strlen(buf);
+ out_size = len + 32;
+ out_buf = flb_malloc(out_size);
+ if (!out_buf) {
+ flb_errno();
+ msgpack_unpacked_destroy(&result);
+ FLB_OUTPUT_RETURN(FLB_ERROR);
+ }
+
+ len = snprintf(out_buf, out_size, "[%f,%s]",
+ flb_time_to_double(&tm),
+ buf);
+ flb_free(buf);
+ data_for_user = out_buf;
+ data_size = len;
+#ifdef FLB_HAVE_METRICS
+ }
+#endif
+ break;
+ }
+
+ /* Invoke user callback */
+ ctx->cb_func(data_for_user, data_size, ctx->cb_data);
+ last_off = off;
+ count++;
+ }
+
+ msgpack_unpacked_destroy(&result);
+ FLB_OUTPUT_RETURN(FLB_OK);
+}
+
+static int out_lib_exit(void *data, struct flb_config *config)
+{
+ struct flb_out_lib_config *ctx = data;
+
+ flb_free(ctx);
+ return 0;
+}
+
+struct flb_output_plugin out_lib_plugin = {
+ .name = "lib",
+ .description = "Library mode Output",
+ .cb_init = out_lib_init,
+ .cb_flush = out_lib_flush,
+ .cb_exit = out_lib_exit,
+ .event_type = FLB_OUTPUT_LOGS | FLB_OUTPUT_METRICS,
+ .flags = 0,
+};
diff --git a/src/fluent-bit/plugins/out_lib/out_lib.h b/src/fluent-bit/plugins/out_lib/out_lib.h
new file mode 100644
index 000000000..5e0925ede
--- /dev/null
+++ b/src/fluent-bit/plugins/out_lib/out_lib.h
@@ -0,0 +1,42 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_OUT_LIB
+#define FLB_OUT_LIB
+
+#include <fluent-bit/flb_output_plugin.h>
+
+enum {
+ FLB_OUT_LIB_FMT_MSGPACK = 0,
+ FLB_OUT_LIB_FMT_JSON,
+ FLB_OUT_LIB_FMT_ERROR,
+};
+
+#define FLB_FMT_STR_MSGPACK "msgpack"
+#define FLB_FMT_STR_JSON "json"
+
+struct flb_out_lib_config {
+ int format;
+ int max_records;
+ int (*cb_func)(void *record, size_t size, void *data);
+ void *cb_data;
+ struct flb_output_instance *ins;
+};
+
+#endif
diff --git a/src/fluent-bit/plugins/out_logdna/CMakeLists.txt b/src/fluent-bit/plugins/out_logdna/CMakeLists.txt
new file mode 100644
index 000000000..2bfa1f9d8
--- /dev/null
+++ b/src/fluent-bit/plugins/out_logdna/CMakeLists.txt
@@ -0,0 +1,5 @@
+set(src
+ logdna.c
+ )
+
+FLB_PLUGIN(out_logdna "${src}" "")
diff --git a/src/fluent-bit/plugins/out_logdna/logdna.c b/src/fluent-bit/plugins/out_logdna/logdna.c
new file mode 100644
index 000000000..e3ab9f56f
--- /dev/null
+++ b/src/fluent-bit/plugins/out_logdna/logdna.c
@@ -0,0 +1,591 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_mp.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_env.h>
+#include <fluent-bit/flb_http_client.h>
+#include <fluent-bit/flb_log_event_decoder.h>
+
+#include "logdna.h"
+
+static inline int primary_key_check(msgpack_object k, char *name, int len)
+{
+ if (k.type != MSGPACK_OBJECT_STR) {
+ return FLB_FALSE;
+ }
+
+ if (k.via.str.size != len) {
+ return FLB_FALSE;
+ }
+
+ if (memcmp(k.via.str.ptr, name, len) == 0) {
+ return FLB_TRUE;
+ }
+
+ return FLB_FALSE;
+}
+
+/*
+ * This function looks for the following keys and add them to the buffer
+ *
+ * - level or severity
+ * - file
+ * - app
+ * - meta
+ */
+static int record_append_primary_keys(struct flb_logdna *ctx,
+ msgpack_object *map,
+ msgpack_packer *mp_sbuf)
+{
+ int i;
+ int c = 0;
+ msgpack_object *level = NULL;
+ msgpack_object *file = NULL;
+ msgpack_object *app = NULL;
+ msgpack_object *meta = NULL;
+ msgpack_object k;
+ msgpack_object v;
+
+ for (i = 0; i < map->via.array.size; i++) {
+ k = map->via.map.ptr[i].key;
+ v = map->via.map.ptr[i].val;
+
+ /* Level - optional */
+ if (!level &&
+ (primary_key_check(k, "level", 5) == FLB_TRUE ||
+ primary_key_check(k, "severity", 8) == FLB_TRUE)) {
+ level = &k;
+ msgpack_pack_str(mp_sbuf, 5);
+ msgpack_pack_str_body(mp_sbuf, "level", 5);
+ msgpack_pack_object(mp_sbuf, v);
+ c++;
+ }
+
+ /* Meta - optional */
+ if (!meta && primary_key_check(k, "meta", 4) == FLB_TRUE) {
+ meta = &k;
+ msgpack_pack_str(mp_sbuf, 4);
+ msgpack_pack_str_body(mp_sbuf, "meta", 4);
+ msgpack_pack_object(mp_sbuf, v);
+ c++;
+ }
+
+ /* File */
+ if (!file && primary_key_check(k, "file", 4) == FLB_TRUE) {
+ file = &k;
+ msgpack_pack_str(mp_sbuf, 4);
+ msgpack_pack_str_body(mp_sbuf, "file", 4);
+ msgpack_pack_object(mp_sbuf, v);
+ c++;
+ }
+
+ /* App */
+ if (primary_key_check(k, "app", 3) == FLB_TRUE) {
+ app = &k;
+ msgpack_pack_str(mp_sbuf, 3);
+ msgpack_pack_str_body(mp_sbuf, "app", 3);
+ msgpack_pack_object(mp_sbuf, v);
+ c++;
+ }
+ }
+
+ /* Set the global file name if the record did not provided one */
+ if (!file && ctx->file) {
+ msgpack_pack_str(mp_sbuf, 4);
+ msgpack_pack_str_body(mp_sbuf, "file", 4);
+ msgpack_pack_str(mp_sbuf, flb_sds_len(ctx->file));
+ msgpack_pack_str_body(mp_sbuf, ctx->file, flb_sds_len(ctx->file));
+ c++;
+ }
+
+
+ /* If no application name is set, set the default */
+ if (!app) {
+ msgpack_pack_str(mp_sbuf, 3);
+ msgpack_pack_str_body(mp_sbuf, "app", 3);
+ msgpack_pack_str(mp_sbuf, flb_sds_len(ctx->app));
+ msgpack_pack_str_body(mp_sbuf, ctx->app, flb_sds_len(ctx->app));
+ c++;
+ }
+
+ return c;
+}
+
+static flb_sds_t logdna_compose_payload(struct flb_logdna *ctx,
+ const void *data, size_t bytes,
+ const char *tag, int tag_len)
+{
+ int ret;
+ int len;
+ int total_lines;
+ int array_size = 0;
+ off_t map_off;
+ char *line_json;
+ flb_sds_t json;
+ msgpack_packer mp_pck;
+ msgpack_sbuffer mp_sbuf;
+ struct flb_log_event_decoder log_decoder;
+ struct flb_log_event log_event;
+
+ ret = flb_log_event_decoder_init(&log_decoder, (char *) data, bytes);
+
+ if (ret != FLB_EVENT_DECODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event decoder initialization error : %d", ret);
+
+ return NULL;
+ }
+
+ /* Count number of records */
+ total_lines = flb_mp_count(data, bytes);
+
+ /* Initialize msgpack buffers */
+ msgpack_sbuffer_init(&mp_sbuf);
+ msgpack_packer_init(&mp_pck, &mp_sbuf, msgpack_sbuffer_write);
+
+ msgpack_pack_map(&mp_pck, 1);
+
+ msgpack_pack_str(&mp_pck, 5);
+ msgpack_pack_str_body(&mp_pck, "lines", 5);
+
+ msgpack_pack_array(&mp_pck, total_lines);
+
+ while ((ret = flb_log_event_decoder_next(
+ &log_decoder,
+ &log_event)) == FLB_EVENT_DECODER_SUCCESS) {
+ map_off = mp_sbuf.size;
+
+ array_size = 2;
+ msgpack_pack_map(&mp_pck, array_size);
+
+ /*
+ * Append primary keys found, the return values is the number of appended
+ * keys to the record, we use that to adjust the map header size.
+ */
+ ret = record_append_primary_keys(ctx, log_event.body, &mp_pck);
+ array_size += ret;
+
+ /* Timestamp */
+ msgpack_pack_str(&mp_pck, 9);
+ msgpack_pack_str_body(&mp_pck, "timestamp", 9);
+ msgpack_pack_int(&mp_pck, (int) flb_time_to_double(&log_event.timestamp));
+
+ /* Line */
+ msgpack_pack_str(&mp_pck, 4);
+ msgpack_pack_str_body(&mp_pck, "line", 4);
+
+ line_json = flb_msgpack_to_json_str(1024, log_event.body);
+ len = strlen(line_json);
+ msgpack_pack_str(&mp_pck, len);
+ msgpack_pack_str_body(&mp_pck, line_json, len);
+ flb_free(line_json);
+
+ /* Adjust map header size */
+ flb_mp_set_map_header_size(mp_sbuf.data + map_off, array_size);
+ }
+
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ json = flb_msgpack_raw_to_json_sds(mp_sbuf.data, mp_sbuf.size);
+ msgpack_sbuffer_destroy(&mp_sbuf);
+
+ return json;
+}
+
+static void logdna_config_destroy(struct flb_logdna *ctx)
+{
+ if (ctx->u) {
+ flb_upstream_destroy(ctx->u);
+ }
+
+ if (ctx->tags_formatted) {
+ flb_sds_destroy(ctx->tags_formatted);
+ }
+
+ flb_free(ctx);
+}
+
+static struct flb_logdna *logdna_config_create(struct flb_output_instance *ins,
+ struct flb_config *config)
+{
+ int ret;
+ int len = 0;
+ char *hostname;
+ flb_sds_t tmp;
+ flb_sds_t encoded;
+ struct mk_list *head;
+ struct flb_slist_entry *tag_entry;
+ struct flb_logdna *ctx;
+ struct flb_upstream *upstream;
+
+ /* Create context */
+ ctx = flb_calloc(1, sizeof(struct flb_logdna));
+ if (!ctx) {
+ flb_errno();
+ return NULL;
+ }
+ ctx->ins = ins;
+
+ /* Load config map */
+ ret = flb_output_config_map_set(ins, (void *) ctx);
+ if (ret == -1) {
+ logdna_config_destroy(ctx);
+ return NULL;
+ }
+
+ /* validate API key */
+ if (!ctx->api_key) {
+ flb_plg_error(ins, "no `api_key` was set, this is a mandatory property");
+ logdna_config_destroy(ctx);
+ return NULL;
+ }
+
+ /*
+ * Tags: this value is a linked list of values created by the config map
+ * reader.
+ */
+ if (ctx->tags) {
+ /* For every tag, make sure no empty spaces exists */
+ mk_list_foreach(head, ctx->tags) {
+ tag_entry = mk_list_entry(head, struct flb_slist_entry, _head);
+ len += flb_sds_len(tag_entry->str) + 1;
+ }
+
+ /* Compose a full tag for URI request */
+ ctx->tags_formatted = flb_sds_create_size(len);
+ if (!ctx->tags_formatted) {
+ logdna_config_destroy(ctx);
+ return NULL;
+ }
+
+ mk_list_foreach(head, ctx->tags) {
+ tag_entry = mk_list_entry(head, struct flb_slist_entry, _head);
+
+ encoded = flb_uri_encode(tag_entry->str,
+ flb_sds_len(tag_entry->str));
+ tmp = flb_sds_cat(ctx->tags_formatted,
+ encoded, flb_sds_len(encoded));
+ ctx->tags_formatted = tmp;
+ flb_sds_destroy(encoded);
+
+ if (tag_entry->_head.next != ctx->tags) {
+ tmp = flb_sds_cat(ctx->tags_formatted, ",", 1);
+ ctx->tags_formatted = tmp;
+ }
+ }
+ }
+
+ /*
+ * Hostname: if the hostname was not set manually, try to get it from the
+ * environment variable.
+ *
+ * Note that hostname is populated by a config map, and config maps are
+ * immutable so we use an internal variable to do a final composition
+ * if required.
+ */
+ if (!ctx->hostname) {
+ tmp = NULL;
+ hostname = (char *) flb_env_get(config->env, "HOSTNAME");
+ if (hostname) {
+ ctx->_hostname = flb_sds_create(hostname);
+ }
+ else {
+ ctx->_hostname = flb_sds_create("unknown");
+ }
+ }
+ else {
+ ctx->_hostname = flb_sds_create(ctx->hostname);
+ }
+
+ /* Bail if unsuccessful hostname creation */
+ if (!ctx->_hostname) {
+ flb_free(ctx);
+ return NULL;
+ }
+
+ /* Create Upstream connection context */
+ upstream = flb_upstream_create(config,
+ ctx->logdna_host,
+ ctx->logdna_port,
+ FLB_IO_TLS, ins->tls);
+ if (!upstream) {
+ flb_free(ctx);
+ return NULL;
+ }
+ ctx->u = upstream;
+ flb_output_upstream_set(ctx->u, ins);
+
+ /* Set networking defaults */
+ flb_output_net_default(FLB_LOGDNA_HOST, atoi(FLB_LOGDNA_PORT), ins);
+ return ctx;
+}
+
+static int cb_logdna_init(struct flb_output_instance *ins,
+ struct flb_config *config, void *data)
+{
+ struct flb_logdna *ctx;
+
+ ctx = logdna_config_create(ins, config);
+ if (!ctx) {
+ flb_plg_error(ins, "cannot initialize configuration");
+ return -1;
+ }
+
+ flb_output_set_context(ins, ctx);
+
+ /*
+ * This plugin instance uses the HTTP client interface, let's register
+ * it debugging callbacks.
+ */
+ flb_output_set_http_debug_callbacks(ins);
+
+ flb_plg_info(ins, "configured, hostname=%s", ctx->hostname);
+ return 0;
+}
+
+static void cb_logdna_flush(struct flb_event_chunk *event_chunk,
+ struct flb_output_flush *out_flush,
+ struct flb_input_instance *i_ins,
+ void *out_context,
+ struct flb_config *config)
+{
+ int ret;
+ int out_ret = FLB_OK;
+ size_t b_sent;
+ flb_sds_t uri;
+ flb_sds_t tmp;
+ flb_sds_t payload;
+ struct flb_logdna *ctx = out_context;
+ struct flb_connection *u_conn;
+ struct flb_http_client *c;
+
+ /* Format the data to the expected LogDNA Payload */
+ payload = logdna_compose_payload(ctx,
+ event_chunk->data,
+ event_chunk->size,
+ event_chunk->tag,
+ flb_sds_len(event_chunk->tag));
+ if (!payload) {
+ flb_plg_error(ctx->ins, "cannot compose request payload");
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ /* Lookup an available connection context */
+ u_conn = flb_upstream_conn_get(ctx->u);
+ if (!u_conn) {
+ flb_plg_error(ctx->ins, "no upstream connections available");
+ flb_sds_destroy(payload);
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ /* Compose the HTTP URI */
+ uri = flb_sds_create_size(256);
+ if (!uri) {
+ flb_plg_error(ctx->ins, "cannot allocate buffer for URI");
+ flb_sds_destroy(payload);
+ flb_free(ctx);
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+ tmp = flb_sds_printf(&uri,
+ "/logs/ingest?hostname=%s&mac=%s&ip=%s&now=%lu&tags=%s",
+ ctx->_hostname,
+ ctx->mac_addr,
+ ctx->ip_addr,
+ time(NULL),
+ ctx->tags_formatted);
+ if (!tmp) {
+ flb_plg_error(ctx->ins, "error formatting URI");
+ flb_sds_destroy(payload);
+ flb_free(ctx);
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ /* Create HTTP client context */
+ c = flb_http_client(u_conn, FLB_HTTP_POST, uri,
+ payload, flb_sds_len(payload),
+ ctx->logdna_host, ctx->logdna_port,
+ NULL, 0);
+ if (!c) {
+ flb_plg_error(ctx->ins, "cannot create HTTP client context");
+ flb_sds_destroy(uri);
+ flb_sds_destroy(payload);
+ flb_upstream_conn_release(u_conn);
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ /* Set callback context to the HTTP client context */
+ flb_http_set_callback_context(c, ctx->ins->callback);
+
+ /* User Agent */
+ flb_http_add_header(c, "User-Agent", 10, "Fluent-Bit", 10);
+
+ /* Add Content-Type header */
+ flb_http_add_header(c,
+ FLB_LOGDNA_CT, sizeof(FLB_LOGDNA_CT) - 1,
+ FLB_LOGDNA_CT_JSON, sizeof(FLB_LOGDNA_CT_JSON) - 1);
+
+ /* Add auth */
+ flb_http_basic_auth(c, ctx->api_key, "");
+
+ flb_http_strip_port_from_host(c);
+
+ /* Send HTTP request */
+ ret = flb_http_do(c, &b_sent);
+
+ /* Destroy buffers */
+ flb_sds_destroy(uri);
+ flb_sds_destroy(payload);
+
+ /* Validate HTTP client return status */
+ if (ret == 0) {
+ /*
+ * Only allow the following HTTP status:
+ *
+ * - 200: OK
+ * - 201: Created
+ * - 202: Accepted
+ * - 203: no authorative resp
+ * - 204: No Content
+ * - 205: Reset content
+ *
+ */
+ if (c->resp.status < 200 || c->resp.status > 205) {
+ if (c->resp.payload) {
+ flb_plg_error(ctx->ins, "%s:%i, HTTP status=%i\n%s",
+ ctx->logdna_host, ctx->logdna_port, c->resp.status,
+ c->resp.payload);
+ }
+ else {
+ flb_plg_error(ctx->ins, "%s:%i, HTTP status=%i",
+ ctx->logdna_host, ctx->logdna_port, c->resp.status);
+ }
+ out_ret = FLB_RETRY;
+ }
+ else {
+ if (c->resp.payload) {
+ flb_plg_info(ctx->ins, "%s:%i, HTTP status=%i\n%s",
+ ctx->logdna_host, ctx->logdna_port,
+ c->resp.status, c->resp.payload);
+ }
+ else {
+ flb_plg_info(ctx->ins, "%s:%i, HTTP status=%i",
+ ctx->logdna_host, ctx->logdna_port,
+ c->resp.status);
+ }
+ }
+ }
+ else {
+ flb_plg_error(ctx->ins, "could not flush records to %s:%s (http_do=%i)",
+ FLB_LOGDNA_HOST, FLB_LOGDNA_PORT, ret);
+ out_ret = FLB_RETRY;
+ }
+
+ flb_http_client_destroy(c);
+ flb_upstream_conn_release(u_conn);
+ FLB_OUTPUT_RETURN(out_ret);
+}
+
+static int cb_logdna_exit(void *data, struct flb_config *config)
+{
+ struct flb_logdna *ctx = data;
+
+ if (!ctx) {
+ return 0;
+ }
+
+ if (ctx->_hostname) {
+ flb_sds_destroy(ctx->_hostname);
+ }
+ logdna_config_destroy(ctx);
+ return 0;
+}
+
+/* Configuration properties map */
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_STR, "logdna_host", FLB_LOGDNA_HOST,
+ 0, FLB_TRUE, offsetof(struct flb_logdna, logdna_host),
+ "LogDNA Host address"
+ },
+
+ {
+ FLB_CONFIG_MAP_INT, "logdna_port", FLB_LOGDNA_PORT,
+ 0, FLB_TRUE, offsetof(struct flb_logdna, logdna_port),
+ "LogDNA TCP port"
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "api_key", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_logdna, api_key),
+ "Logdna API key"
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "hostname", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_logdna, hostname),
+ "Local Server or device host name"
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "mac", "",
+ 0, FLB_TRUE, offsetof(struct flb_logdna, mac_addr),
+ "MAC address (optional)"
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "ip", "",
+ 0, FLB_TRUE, offsetof(struct flb_logdna, ip_addr),
+ "IP address (optional)"
+ },
+
+ {
+ FLB_CONFIG_MAP_CLIST, "tags", "",
+ 0, FLB_TRUE, offsetof(struct flb_logdna, tags),
+ "Tags (optional)"
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "file", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_logdna, file),
+ "Name of the monitored file (optional)"
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "app", "Fluent Bit",
+ 0, FLB_TRUE, offsetof(struct flb_logdna, app),
+ "Name of the application generating the data (optional)"
+ },
+
+ /* EOF */
+ {0}
+
+};
+
+/* Plugin reference */
+struct flb_output_plugin out_logdna_plugin = {
+ .name = "logdna",
+ .description = "LogDNA",
+ .cb_init = cb_logdna_init,
+ .cb_flush = cb_logdna_flush,
+ .cb_exit = cb_logdna_exit,
+ .config_map = config_map,
+ .flags = FLB_OUTPUT_NET | FLB_IO_TLS,
+};
diff --git a/src/fluent-bit/plugins/out_logdna/logdna.h b/src/fluent-bit/plugins/out_logdna/logdna.h
new file mode 100644
index 000000000..f7ca19a43
--- /dev/null
+++ b/src/fluent-bit/plugins/out_logdna/logdna.h
@@ -0,0 +1,51 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_OUT_LOGDNA_H
+#define FLB_OUT_LOGDNA_H
+
+#include <fluent-bit/flb_output.h>
+#include <fluent-bit/flb_upstream.h>
+
+#define FLB_LOGDNA_HOST "logs.logdna.com"
+#define FLB_LOGDNA_PORT "443"
+#define FLB_LOGDNA_CT "Content-Type"
+#define FLB_LOGDNA_CT_JSON "application/json; charset=UTF-8"
+
+struct flb_logdna {
+ /* Incoming Configuration Properties */
+ flb_sds_t logdna_host;
+ int logdna_port;
+ flb_sds_t api_key;
+ flb_sds_t hostname;
+ flb_sds_t mac_addr;
+ flb_sds_t ip_addr;
+ flb_sds_t file;
+ flb_sds_t app;
+ struct mk_list *tags;
+
+ /* Internal */
+ flb_sds_t _hostname;
+ flb_sds_t tags_formatted;
+ struct flb_upstream *u;
+ struct flb_output_instance *ins;
+};
+
+
+#endif
diff --git a/src/fluent-bit/plugins/out_loki/CMakeLists.txt b/src/fluent-bit/plugins/out_loki/CMakeLists.txt
new file mode 100644
index 000000000..d91f0aa73
--- /dev/null
+++ b/src/fluent-bit/plugins/out_loki/CMakeLists.txt
@@ -0,0 +1,5 @@
+set(src
+ loki.c
+ )
+
+FLB_PLUGIN(out_loki "${src}" "")
diff --git a/src/fluent-bit/plugins/out_loki/loki.c b/src/fluent-bit/plugins/out_loki/loki.c
new file mode 100644
index 000000000..d93a3f9aa
--- /dev/null
+++ b/src/fluent-bit/plugins/out_loki/loki.c
@@ -0,0 +1,1868 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_version.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_http_client.h>
+#include <fluent-bit/flb_ra_key.h>
+#include <fluent-bit/flb_thread_storage.h>
+#include <fluent-bit/record_accessor/flb_ra_parser.h>
+#include <fluent-bit/flb_mp.h>
+#include <fluent-bit/flb_log_event_decoder.h>
+#include <fluent-bit/flb_gzip.h>
+
+#include <ctype.h>
+#include <sys/stat.h>
+
+#include "loki.h"
+
+struct flb_loki_dynamic_tenant_id_entry {
+ flb_sds_t value;
+ struct cfl_list _head;
+};
+
+pthread_once_t initialization_guard = PTHREAD_ONCE_INIT;
+
+FLB_TLS_DEFINE(struct flb_loki_dynamic_tenant_id_entry,
+ thread_local_tenant_id);
+
+void initialize_thread_local_storage()
+{
+ FLB_TLS_INIT(thread_local_tenant_id);
+}
+
+static struct flb_loki_dynamic_tenant_id_entry *dynamic_tenant_id_create() {
+ struct flb_loki_dynamic_tenant_id_entry *entry;
+
+ entry = (struct flb_loki_dynamic_tenant_id_entry *) \
+ flb_calloc(1, sizeof(struct flb_loki_dynamic_tenant_id_entry));
+
+ if (entry != NULL) {
+ entry->value = NULL;
+
+ cfl_list_entry_init(&entry->_head);
+ }
+
+ return entry;
+}
+
+static void dynamic_tenant_id_destroy(struct flb_loki_dynamic_tenant_id_entry *entry) {
+ if (entry != NULL) {
+ if (entry->value != NULL) {
+ flb_sds_destroy(entry->value);
+
+ entry->value = NULL;
+ }
+
+ if (!cfl_list_entry_is_orphan(&entry->_head)) {
+ cfl_list_del(&entry->_head);
+ }
+
+ flb_free(entry);
+ }
+}
+
+static void flb_loki_kv_init(struct mk_list *list)
+{
+ mk_list_init(list);
+}
+
+static inline void safe_sds_cat(flb_sds_t *buf, const char *str, int len)
+{
+ flb_sds_t tmp;
+
+ tmp = flb_sds_cat(*buf, str, len);
+ if (tmp) {
+ *buf = tmp;
+ }
+}
+
+static inline void normalize_cat(struct flb_ra_parser *rp, flb_sds_t *name)
+{
+ int sub;
+ int len;
+ char tmp[64];
+ struct mk_list *s_head;
+ struct flb_ra_key *key;
+ struct flb_ra_subentry *entry;
+
+ /* Iterate record accessor keys */
+ key = rp->key;
+ if (rp->type == FLB_RA_PARSER_STRING) {
+ safe_sds_cat(name, key->name, flb_sds_len(key->name));
+ }
+ else if (rp->type == FLB_RA_PARSER_KEYMAP) {
+ safe_sds_cat(name, key->name, flb_sds_len(key->name));
+ if (mk_list_size(key->subkeys) > 0) {
+ safe_sds_cat(name, "_", 1);
+ }
+
+ sub = 0;
+ mk_list_foreach(s_head, key->subkeys) {
+ entry = mk_list_entry(s_head, struct flb_ra_subentry, _head);
+
+ if (sub > 0) {
+ safe_sds_cat(name, "_", 1);
+ }
+ if (entry->type == FLB_RA_PARSER_STRING) {
+ safe_sds_cat(name, entry->str, flb_sds_len(entry->str));
+ }
+ else if (entry->type == FLB_RA_PARSER_ARRAY_ID) {
+ len = snprintf(tmp, sizeof(tmp) -1, "%d",
+ entry->array_id);
+ safe_sds_cat(name, tmp, len);
+ }
+ sub++;
+ }
+ }
+}
+
+static flb_sds_t normalize_ra_key_name(struct flb_loki *ctx,
+ struct flb_record_accessor *ra)
+{
+ int c = 0;
+ flb_sds_t name;
+ struct mk_list *head;
+ struct flb_ra_parser *rp;
+
+ name = flb_sds_create_size(128);
+ if (!name) {
+ return NULL;
+ }
+
+ mk_list_foreach(head, &ra->list) {
+ rp = mk_list_entry(head, struct flb_ra_parser, _head);
+ if (c > 0) {
+ flb_sds_cat(name, "_", 1);
+ }
+ normalize_cat(rp, &name);
+ c++;
+ }
+
+ return name;
+}
+
+void flb_loki_kv_destroy(struct flb_loki_kv *kv)
+{
+ /* destroy key and value */
+ flb_sds_destroy(kv->key);
+ if (kv->val_type == FLB_LOKI_KV_STR) {
+ flb_sds_destroy(kv->str_val);
+ }
+ else if (kv->val_type == FLB_LOKI_KV_RA) {
+ flb_ra_destroy(kv->ra_val);
+ }
+
+ if (kv->ra_key) {
+ flb_ra_destroy(kv->ra_key);
+ }
+
+ if (kv->key_normalized) {
+ flb_sds_destroy(kv->key_normalized);
+ }
+
+ flb_free(kv);
+}
+
+int flb_loki_kv_append(struct flb_loki *ctx, char *key, char *val)
+{
+ int ra_count = 0;
+ int k_len;
+ int ret;
+ struct flb_loki_kv *kv;
+
+ if (!key) {
+ return -1;
+ }
+
+ if (!val && key[0] != '$') {
+ return -1;
+ }
+
+ kv = flb_calloc(1, sizeof(struct flb_loki_kv));
+ if (!kv) {
+ flb_errno();
+ return -1;
+ }
+
+ k_len = strlen(key);
+ if (key[0] == '$' && k_len >= 2 && isdigit(key[1])) {
+ flb_plg_error(ctx->ins,
+ "key name for record accessor cannot start with a number: %s",
+ key);
+ flb_free(kv);
+ return -1;
+ }
+
+ kv->key = flb_sds_create(key);
+ if (!kv->key) {
+ flb_free(kv);
+ return -1;
+ }
+
+ /*
+ * If the key starts with a '$', it means its a record accessor pattern and
+ * the key value pair will be formed using the key name and it proper value.
+ */
+ if (key[0] == '$' && val == NULL) {
+ kv->ra_key = flb_ra_create(key, FLB_TRUE);
+ if (!kv->ra_key) {
+ flb_plg_error(ctx->ins,
+ "invalid key record accessor pattern for key '%s'",
+ key);
+ flb_loki_kv_destroy(kv);
+ return -1;
+ }
+
+ /* Normalize 'key name' using record accessor pattern */
+ kv->key_normalized = normalize_ra_key_name(ctx, kv->ra_key);
+ if (!kv->key_normalized) {
+ flb_plg_error(ctx->ins,
+ "could not normalize key pattern name '%s'\n",
+ kv->ra_key->pattern);
+ flb_loki_kv_destroy(kv);
+ return -1;
+ }
+ /* remove record keys placed as stream labels via 'labels' and 'label_keys' */
+ ret = flb_slist_add(&ctx->remove_keys_derived, key);
+ if (ret < 0) {
+ flb_loki_kv_destroy(kv);
+ return -1;
+ }
+ ra_count++;
+ }
+ else if (val[0] == '$') {
+ /* create a record accessor context */
+ kv->val_type = FLB_LOKI_KV_RA;
+ kv->ra_val = flb_ra_create(val, FLB_TRUE);
+ if (!kv->ra_val) {
+ flb_plg_error(ctx->ins,
+ "invalid record accessor pattern for key '%s': %s",
+ key, val);
+ flb_loki_kv_destroy(kv);
+ return -1;
+ }
+ ret = flb_slist_add(&ctx->remove_keys_derived, val);
+ if (ret < 0) {
+ flb_loki_kv_destroy(kv);
+ return -1;
+ }
+ ra_count++;
+ }
+ else {
+ kv->val_type = FLB_LOKI_KV_STR;
+ kv->str_val = flb_sds_create(val);
+ if (!kv->str_val) {
+ flb_loki_kv_destroy(kv);
+ return -1;
+ }
+ }
+ mk_list_add(&kv->_head, &ctx->labels_list);
+
+ /* return the number of record accessor values */
+ return ra_count;
+}
+
+static void flb_loki_kv_exit(struct flb_loki *ctx)
+{
+ struct mk_list *tmp;
+ struct mk_list *head;
+ struct flb_loki_kv *kv;
+
+ mk_list_foreach_safe(head, tmp, &ctx->labels_list) {
+ kv = mk_list_entry(head, struct flb_loki_kv, _head);
+
+ /* unlink and destroy */
+ mk_list_del(&kv->_head);
+ flb_loki_kv_destroy(kv);
+ }
+}
+
+/* Pack a label key, it also perform sanitization of the characters */
+static int pack_label_key(msgpack_packer *mp_pck, char *key, int key_len)
+{
+ int i;
+ int k_len = key_len;
+ int is_digit = FLB_FALSE;
+ char *p;
+ size_t prev_size;
+
+ /* Normalize key name using the packed value */
+ if (isdigit(*key)) {
+ is_digit = FLB_TRUE;
+ k_len++;
+ }
+
+ /* key: pack the length */
+ msgpack_pack_str(mp_pck, k_len);
+ if (is_digit) {
+ msgpack_pack_str_body(mp_pck, "_", 1);
+ }
+
+ /* save the current offset */
+ prev_size = ((msgpack_sbuffer *) mp_pck->data)->size;
+
+ /* Pack the key name */
+ msgpack_pack_str_body(mp_pck, key, key_len);
+
+ /* 'p' will point to where the key was written */
+ p = (char *) (((msgpack_sbuffer*) mp_pck->data)->data + prev_size);
+
+ /* and sanitize the key characters */
+ for (i = 0; i < key_len; i++) {
+ if (!isalnum(p[i]) && p[i] != '_') {
+ p[i] = '_';
+ }
+ }
+
+ return 0;
+}
+
+static flb_sds_t pack_labels(struct flb_loki *ctx,
+ msgpack_packer *mp_pck,
+ char *tag, int tag_len,
+ msgpack_object *map)
+{
+ int i;
+ flb_sds_t ra_val;
+ struct mk_list *head;
+ struct flb_ra_value *rval = NULL;
+ struct flb_loki_kv *kv;
+ msgpack_object k;
+ msgpack_object v;
+ struct flb_mp_map_header mh;
+
+
+ /* Initialize dynamic map header */
+ flb_mp_map_header_init(&mh, mp_pck);
+
+ mk_list_foreach(head, &ctx->labels_list) {
+ kv = mk_list_entry(head, struct flb_loki_kv, _head);
+
+ /* record accessor key/value pair */
+ if (kv->ra_key != NULL && kv->ra_val == NULL) {
+ ra_val = flb_ra_translate(kv->ra_key, tag, tag_len, *(map), NULL);
+ if (!ra_val || flb_sds_len(ra_val) == 0) {
+ /* if no value is retruned or if it's empty, just skip it */
+ flb_plg_warn(ctx->ins,
+ "empty record accessor key translation for pattern: %s",
+ kv->ra_key->pattern);
+ }
+ else {
+ /* Pack the key and value */
+ flb_mp_map_header_append(&mh);
+
+ /* We skip the first '$' character since it won't be valid in Loki */
+ pack_label_key(mp_pck, kv->key_normalized,
+ flb_sds_len(kv->key_normalized));
+
+ msgpack_pack_str(mp_pck, flb_sds_len(ra_val));
+ msgpack_pack_str_body(mp_pck, ra_val, flb_sds_len(ra_val));
+ }
+
+ if (ra_val) {
+ flb_sds_destroy(ra_val);
+ }
+ continue;
+ }
+
+ /*
+ * The code is a bit duplicated to be able to manage the exception of an
+ * invalid or empty value, on that case the k/v is skipped.
+ */
+ if (kv->val_type == FLB_LOKI_KV_STR) {
+ flb_mp_map_header_append(&mh);
+ msgpack_pack_str(mp_pck, flb_sds_len(kv->key));
+ msgpack_pack_str_body(mp_pck, kv->key, flb_sds_len(kv->key));
+ msgpack_pack_str(mp_pck, flb_sds_len(kv->str_val));
+ msgpack_pack_str_body(mp_pck, kv->str_val, flb_sds_len(kv->str_val));
+ }
+ else if (kv->val_type == FLB_LOKI_KV_RA) {
+ /* record accessor type */
+ ra_val = flb_ra_translate(kv->ra_val, tag, tag_len, *(map), NULL);
+ if (!ra_val || flb_sds_len(ra_val) == 0) {
+ flb_plg_debug(ctx->ins, "could not translate record accessor");
+ }
+ else {
+ flb_mp_map_header_append(&mh);
+ msgpack_pack_str(mp_pck, flb_sds_len(kv->key));
+ msgpack_pack_str_body(mp_pck, kv->key, flb_sds_len(kv->key));
+ msgpack_pack_str(mp_pck, flb_sds_len(ra_val));
+ msgpack_pack_str_body(mp_pck, ra_val, flb_sds_len(ra_val));
+ }
+
+ if (ra_val) {
+ flb_sds_destroy(ra_val);
+ }
+ }
+ }
+
+ if (ctx->auto_kubernetes_labels == FLB_TRUE) {
+ rval = flb_ra_get_value_object(ctx->ra_k8s, *map);
+ if (rval && rval->o.type == MSGPACK_OBJECT_MAP) {
+ for (i = 0; i < rval->o.via.map.size; i++) {
+ k = rval->o.via.map.ptr[i].key;
+ v = rval->o.via.map.ptr[i].val;
+
+ if (k.type != MSGPACK_OBJECT_STR || v.type != MSGPACK_OBJECT_STR) {
+ continue;
+ }
+
+ /* append the key/value pair */
+ flb_mp_map_header_append(&mh);
+
+ /* Pack key */
+ pack_label_key(mp_pck, (char *) k.via.str.ptr, k.via.str.size);
+
+ /* Pack the value */
+ msgpack_pack_str(mp_pck, v.via.str.size);
+ msgpack_pack_str_body(mp_pck, v.via.str.ptr, v.via.str.size);
+ }
+ }
+
+ if (rval) {
+ flb_ra_key_value_destroy(rval);
+ }
+ }
+
+ /* Check if we added any label, if no one has been set, set the defaul 'job' */
+ if (mh.entries == 0) {
+ /* pack the default entry */
+ flb_mp_map_header_append(&mh);
+ msgpack_pack_str(mp_pck, 3);
+ msgpack_pack_str_body(mp_pck, "job", 3);
+ msgpack_pack_str(mp_pck, 10);
+ msgpack_pack_str_body(mp_pck, "fluent-bit", 10);
+ }
+ flb_mp_map_header_end(&mh);
+ return 0;
+}
+
+static int create_label_map_entry(struct flb_loki *ctx,
+ struct flb_sds_list *list, msgpack_object *val, int *ra_used)
+{
+ msgpack_object key;
+ flb_sds_t label_key;
+ flb_sds_t val_str;
+ int i;
+ int len;
+ int ret;
+
+ if (ctx == NULL || list == NULL || val == NULL || ra_used == NULL) {
+ return -1;
+ }
+
+ switch (val->type) {
+ case MSGPACK_OBJECT_STR:
+ label_key = flb_sds_create_len(val->via.str.ptr, val->via.str.size);
+ if (label_key == NULL) {
+ flb_errno();
+ return -1;
+ }
+
+ val_str = flb_ra_create_str_from_list(list);
+ if (val_str == NULL) {
+ flb_plg_error(ctx->ins, "[%s] flb_ra_create_from_list failed", __FUNCTION__);
+ flb_sds_destroy(label_key);
+ return -1;
+ }
+
+ /* for debugging
+ printf("label_key=%s val_str=%s\n", label_key, val_str);
+ */
+
+ ret = flb_loki_kv_append(ctx, label_key, val_str);
+ flb_sds_destroy(label_key);
+ flb_sds_destroy(val_str);
+ if (ret == -1) {
+ return -1;
+ }
+ *ra_used = *ra_used + 1;
+
+ break;
+ case MSGPACK_OBJECT_MAP:
+ len = val->via.map.size;
+ for (i=0; i<len; i++) {
+ key = val->via.map.ptr[i].key;
+ if (key.type != MSGPACK_OBJECT_STR) {
+ flb_plg_error(ctx->ins, "[%s] key is not string", __FUNCTION__);
+ return -1;
+ }
+ ret = flb_sds_list_add(list, (char*)key.via.str.ptr, key.via.str.size);
+ if (ret < 0) {
+ flb_plg_error(ctx->ins, "[%s] flb_sds_list_add failed", __FUNCTION__);
+ return -1;
+ }
+
+ ret = create_label_map_entry(ctx, list, &val->via.map.ptr[i].val, ra_used);
+ if (ret < 0) {
+ return -1;
+ }
+
+ ret = flb_sds_list_del_last_entry(list);
+ if (ret < 0) {
+ flb_plg_error(ctx->ins, "[%s] flb_sds_list_del_last_entry failed", __FUNCTION__);
+ return -1;
+ }
+ }
+
+ break;
+ default:
+ flb_plg_error(ctx->ins, "[%s] value type is not str or map. type=%d", __FUNCTION__, val->type);
+ return -1;
+ }
+ return 0;
+}
+
+static int create_label_map_entries(struct flb_loki *ctx,
+ char *msgpack_buf, size_t msgpack_size, int *ra_used)
+{
+ struct flb_sds_list *list = NULL;
+ msgpack_unpacked result;
+ size_t off = 0;
+ int i;
+ int len;
+ int ret;
+ msgpack_object key;
+
+ if (ctx == NULL || msgpack_buf == NULL || ra_used == NULL) {
+ return -1;
+ }
+
+ msgpack_unpacked_init(&result);
+ while(msgpack_unpack_next(&result, msgpack_buf, msgpack_size, &off) == MSGPACK_UNPACK_SUCCESS) {
+ if (result.data.type != MSGPACK_OBJECT_MAP) {
+ flb_plg_error(ctx->ins, "[%s] data type is not map", __FUNCTION__);
+ msgpack_unpacked_destroy(&result);
+ return -1;
+ }
+
+ len = result.data.via.map.size;
+ for (i=0; i<len; i++) {
+ list = flb_sds_list_create();
+ if (list == NULL) {
+ flb_plg_error(ctx->ins, "[%s] flb_sds_list_create failed", __FUNCTION__);
+ msgpack_unpacked_destroy(&result);
+ return -1;
+ }
+ key = result.data.via.map.ptr[i].key;
+ if (key.type != MSGPACK_OBJECT_STR) {
+ flb_plg_error(ctx->ins, "[%s] key is not string", __FUNCTION__);
+ flb_sds_list_destroy(list);
+ msgpack_unpacked_destroy(&result);
+ return -1;
+ }
+
+ ret = flb_sds_list_add(list, (char*)key.via.str.ptr, key.via.str.size);
+ if (ret < 0) {
+ flb_plg_error(ctx->ins, "[%s] flb_sds_list_add failed", __FUNCTION__);
+ flb_sds_list_destroy(list);
+ msgpack_unpacked_destroy(&result);
+ return -1;
+ }
+
+ ret = create_label_map_entry(ctx, list, &result.data.via.map.ptr[i].val, ra_used);
+ if (ret < 0) {
+ flb_plg_error(ctx->ins, "[%s] create_label_map_entry failed", __FUNCTION__);
+ flb_sds_list_destroy(list);
+ msgpack_unpacked_destroy(&result);
+ return -1;
+ }
+
+ flb_sds_list_destroy(list);
+ list = NULL;
+ }
+ }
+
+ msgpack_unpacked_destroy(&result);
+
+ return 0;
+}
+
+static int read_label_map_path_file(struct flb_output_instance *ins, flb_sds_t path,
+ char **out_buf, size_t *out_size)
+{
+ int ret;
+ int root_type;
+ char *buf = NULL;
+ char *msgp_buf = NULL;
+ FILE *fp = NULL;
+ struct stat st;
+ size_t file_size;
+ size_t ret_size;
+
+ ret = access(path, R_OK);
+ if (ret < 0) {
+ flb_errno();
+ flb_plg_error(ins, "can't access %s", path);
+ return -1;
+ }
+
+ ret = stat(path, &st);
+ if (ret < 0) {
+ flb_errno();
+ flb_plg_error(ins, "stat failed %s", path);
+ return -1;
+ }
+ file_size = st.st_size;
+
+ fp = fopen(path, "r");
+ if (fp == NULL) {
+ flb_plg_error(ins, "can't open %s", path);
+ return -1;
+ }
+
+ buf = flb_malloc(file_size);
+ if (buf == NULL) {
+ flb_plg_error(ins, "malloc failed");
+ fclose(fp);
+ return -1;
+ }
+
+ ret_size = fread(buf, 1, file_size, fp);
+ if (ret_size < file_size && feof(fp) != 0) {
+ flb_plg_error(ins, "fread failed");
+ fclose(fp);
+ flb_free(buf);
+ return -1;
+ }
+
+ ret = flb_pack_json(buf, file_size, &msgp_buf, &ret_size, &root_type, NULL);
+ if (ret < 0) {
+ flb_plg_error(ins, "flb_pack_json failed");
+ fclose(fp);
+ flb_free(buf);
+ return -1;
+ }
+
+ *out_buf = msgp_buf;
+ *out_size = ret_size;
+
+ fclose(fp);
+ flb_free(buf);
+ return 0;
+}
+
+static int load_label_map_path(struct flb_loki *ctx, flb_sds_t path, int *ra_used)
+{
+ int ret;
+ char *msgpack_buf = NULL;
+ size_t msgpack_size;
+
+ ret = read_label_map_path_file(ctx->ins, path, &msgpack_buf, &msgpack_size);
+ if (ret < 0) {
+ return -1;
+ }
+
+ ret = create_label_map_entries(ctx, msgpack_buf, msgpack_size, ra_used);
+ if (ret < 0) {
+ flb_free(msgpack_buf);
+ return -1;
+ }
+
+ if (msgpack_buf != NULL) {
+ flb_free(msgpack_buf);
+ }
+
+ return 0;
+}
+
+static int parse_labels(struct flb_loki *ctx)
+{
+ int ret;
+ int ra_used = 0;
+ char *p;
+ flb_sds_t key;
+ flb_sds_t val;
+ struct mk_list *head;
+ struct flb_slist_entry *entry;
+
+ flb_loki_kv_init(&ctx->labels_list);
+
+ if (ctx->labels) {
+ mk_list_foreach(head, ctx->labels) {
+ entry = mk_list_entry(head, struct flb_slist_entry, _head);
+
+ /* record accessor label key ? */
+ if (entry->str[0] == '$') {
+ ret = flb_loki_kv_append(ctx, entry->str, NULL);
+ if (ret == -1) {
+ return -1;
+ }
+ else if (ret > 0) {
+ ra_used++;
+ }
+ continue;
+ }
+
+ p = strchr(entry->str, '=');
+ if (!p) {
+ flb_plg_error(ctx->ins, "invalid key value pair on '%s'",
+ entry->str);
+ return -1;
+ }
+
+ key = flb_sds_create_size((p - entry->str) + 1);
+ flb_sds_cat(key, entry->str, p - entry->str);
+ val = flb_sds_create(p + 1);
+ if (!key) {
+ flb_plg_error(ctx->ins,
+ "invalid key value pair on '%s'",
+ entry->str);
+ return -1;
+ }
+ if (!val || flb_sds_len(val) == 0) {
+ flb_plg_error(ctx->ins,
+ "invalid key value pair on '%s'",
+ entry->str);
+ flb_sds_destroy(key);
+ return -1;
+ }
+
+ ret = flb_loki_kv_append(ctx, key, val);
+ flb_sds_destroy(key);
+ flb_sds_destroy(val);
+
+ if (ret == -1) {
+ return -1;
+ }
+ else if (ret > 0) {
+ ra_used++;
+ }
+ }
+ }
+
+ /* Append label keys set in the configuration */
+ if (ctx->label_keys) {
+ mk_list_foreach(head, ctx->label_keys) {
+ entry = mk_list_entry(head, struct flb_slist_entry, _head);
+ if (entry->str[0] != '$') {
+ flb_plg_error(ctx->ins,
+ "invalid label key, the name must start with '$'");
+ return -1;
+ }
+
+ ret = flb_loki_kv_append(ctx, entry->str, NULL);
+ if (ret == -1) {
+ return -1;
+ }
+ else if (ret > 0) {
+ ra_used++;
+ }
+ }
+ }
+
+ /* label_map_path */
+ if (ctx->label_map_path) {
+ ret = load_label_map_path(ctx, ctx->label_map_path, &ra_used);
+ if (ret != 0) {
+ flb_plg_error(ctx->ins, "failed to load label_map_path");
+ }
+ }
+
+ if (ctx->auto_kubernetes_labels == FLB_TRUE) {
+ ctx->ra_k8s = flb_ra_create("$kubernetes['labels']", FLB_TRUE);
+ if (!ctx->ra_k8s) {
+ flb_plg_error(ctx->ins,
+ "could not create record accessor for Kubernetes labels");
+ return -1;
+ }
+ }
+
+ /*
+ * If the variable 'ra_used' is greater than zero, means that record accessor is
+ * being used to compose the stream labels.
+ */
+ ctx->ra_used = ra_used;
+ return 0;
+}
+
+static int key_is_duplicated(struct mk_list *list, char *str, int len)
+{
+ struct mk_list *head;
+ struct flb_slist_entry *entry;
+
+ mk_list_foreach(head, list) {
+ entry = mk_list_entry(head, struct flb_slist_entry, _head);
+ if (flb_sds_len(entry->str) == len &&
+ strncmp(entry->str, str, len) == 0) {
+ return FLB_TRUE;
+ }
+ }
+
+ return FLB_FALSE;
+}
+
+static int prepare_remove_keys(struct flb_loki *ctx)
+{
+ int ret;
+ int len;
+ int size;
+ char *tmp;
+ struct mk_list *head;
+ struct flb_slist_entry *entry;
+ struct mk_list *patterns;
+
+ patterns = &ctx->remove_keys_derived;
+
+ /* Add remove keys set in the configuration */
+ if (ctx->remove_keys) {
+ mk_list_foreach(head, ctx->remove_keys) {
+ entry = mk_list_entry(head, struct flb_slist_entry, _head);
+
+ if (entry->str[0] != '$') {
+ tmp = flb_malloc(flb_sds_len(entry->str) + 2);
+ if (!tmp) {
+ flb_errno();
+ continue;
+ }
+ else {
+ tmp[0] = '$';
+ len = flb_sds_len(entry->str);
+ memcpy(tmp + 1, entry->str, len);
+ tmp[len + 1] = '\0';
+ len++;
+ }
+ }
+ else {
+ tmp = entry->str;
+ len = flb_sds_len(entry->str);
+ }
+
+ ret = key_is_duplicated(patterns, tmp, len);
+ if (ret == FLB_TRUE) {
+ if (entry->str != tmp) {
+ flb_free(tmp);
+ }
+ continue;
+ }
+
+ ret = flb_slist_add_n(patterns, tmp, len);
+ if (entry->str != tmp) {
+ flb_free(tmp);
+ }
+ if (ret < 0) {
+ return -1;
+ }
+ }
+ size = mk_list_size(patterns);
+ flb_plg_debug(ctx->ins, "remove_mpa size: %d", size);
+ if (size > 0) {
+ ctx->remove_mpa = flb_mp_accessor_create(patterns);
+ if (ctx->remove_mpa == NULL) {
+ return -1;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static void loki_config_destroy(struct flb_loki *ctx)
+{
+ if (ctx->u) {
+ flb_upstream_destroy(ctx->u);
+ }
+
+ if (ctx->ra_k8s) {
+ flb_ra_destroy(ctx->ra_k8s);
+ }
+ if (ctx->ra_tenant_id_key) {
+ flb_ra_destroy(ctx->ra_tenant_id_key);
+ }
+
+ if (ctx->remove_mpa) {
+ flb_mp_accessor_destroy(ctx->remove_mpa);
+ }
+ flb_slist_destroy(&ctx->remove_keys_derived);
+
+ flb_loki_kv_exit(ctx);
+ flb_free(ctx);
+}
+
+static struct flb_loki *loki_config_create(struct flb_output_instance *ins,
+ struct flb_config *config)
+{
+ int ret;
+ int io_flags = 0;
+ struct flb_loki *ctx;
+ struct flb_upstream *upstream;
+ char *compress;
+
+ /* Create context */
+ ctx = flb_calloc(1, sizeof(struct flb_loki));
+ if (!ctx) {
+ flb_errno();
+ return NULL;
+ }
+ ctx->ins = ins;
+ flb_loki_kv_init(&ctx->labels_list);
+
+ /* Register context with plugin instance */
+ flb_output_set_context(ins, ctx);
+
+ /* Set networking defaults */
+ flb_output_net_default(FLB_LOKI_HOST, FLB_LOKI_PORT, ins);
+
+ /* Load config map */
+ ret = flb_output_config_map_set(ins, (void *) ctx);
+ if (ret == -1) {
+ return NULL;
+ }
+
+ /* Initialize final remove_keys list */
+ flb_slist_create(&ctx->remove_keys_derived);
+
+ /* Parse labels */
+ ret = parse_labels(ctx);
+ if (ret == -1) {
+ return NULL;
+ }
+
+ /* Load remove keys */
+ ret = prepare_remove_keys(ctx);
+ if (ret == -1) {
+ return NULL;
+ }
+
+ /* tenant_id_key */
+ if (ctx->tenant_id_key_config) {
+ ctx->ra_tenant_id_key = flb_ra_create(ctx->tenant_id_key_config, FLB_FALSE);
+ if (!ctx->ra_tenant_id_key) {
+ flb_plg_error(ctx->ins,
+ "could not create record accessor for Tenant ID");
+ }
+ }
+
+ /* Compress (gzip) */
+ compress = (char *) flb_output_get_property("compress", ins);
+ ctx->compress_gzip = FLB_FALSE;
+ if (compress) {
+ if (strcasecmp(compress, "gzip") == 0) {
+ ctx->compress_gzip = FLB_TRUE;
+ }
+ }
+
+ /* Line Format */
+ if (strcasecmp(ctx->line_format, "json") == 0) {
+ ctx->out_line_format = FLB_LOKI_FMT_JSON;
+ }
+ else if (strcasecmp(ctx->line_format, "key_value") == 0) {
+ ctx->out_line_format = FLB_LOKI_FMT_KV;
+ }
+ else {
+ flb_plg_error(ctx->ins, "invalid 'line_format' value: %s",
+ ctx->line_format);
+ return NULL;
+ }
+
+ /* use TLS ? */
+ if (ins->use_tls == FLB_TRUE) {
+ io_flags = FLB_IO_TLS;
+ }
+ else {
+ io_flags = FLB_IO_TCP;
+ }
+
+ if (ins->host.ipv6 == FLB_TRUE) {
+ io_flags |= FLB_IO_IPV6;
+ }
+
+ /* Create Upstream connection context */
+ upstream = flb_upstream_create(config,
+ ins->host.name,
+ ins->host.port,
+ io_flags,
+ ins->tls);
+ if (!upstream) {
+ return NULL;
+ }
+ ctx->u = upstream;
+ flb_output_upstream_set(ctx->u, ins);
+ ctx->tcp_port = ins->host.port;
+ ctx->tcp_host = ins->host.name;
+
+ return ctx;
+}
+
+/*
+ * Convert struct flb_tm timestamp value to nanoseconds and then it pack it as
+ * a string.
+ */
+static void pack_timestamp(msgpack_packer *mp_pck, struct flb_time *tms)
+{
+ int len;
+ char buf[64];
+ uint64_t nanosecs;
+
+ /* convert to nanoseconds */
+ nanosecs = flb_time_to_nanosec(tms);
+
+ /* format as a string */
+ len = snprintf(buf, sizeof(buf) - 1, "%" PRIu64, nanosecs);
+
+ /* pack the value */
+ msgpack_pack_str(mp_pck, len);
+ msgpack_pack_str_body(mp_pck, buf, len);
+}
+
+
+static void pack_format_line_value(flb_sds_t *buf, msgpack_object *val)
+{
+ int i;
+ int len;
+ char temp[512];
+ msgpack_object k;
+ msgpack_object v;
+
+ if (val->type == MSGPACK_OBJECT_STR) {
+ safe_sds_cat(buf, "\"", 1);
+ safe_sds_cat(buf, val->via.str.ptr, val->via.str.size);
+ safe_sds_cat(buf, "\"", 1);
+ }
+ else if (val->type == MSGPACK_OBJECT_NIL) {
+ safe_sds_cat(buf, "null", 4);
+ }
+ else if (val->type == MSGPACK_OBJECT_BOOLEAN) {
+ if (val->via.boolean) {
+ safe_sds_cat(buf, "true", 4);
+ }
+ else {
+ safe_sds_cat(buf, "false", 5);
+ }
+ }
+ else if (val->type == MSGPACK_OBJECT_POSITIVE_INTEGER) {
+ len = snprintf(temp, sizeof(temp)-1, "%"PRIu64, val->via.u64);
+ safe_sds_cat(buf, temp, len);
+ }
+ else if (val->type == MSGPACK_OBJECT_NEGATIVE_INTEGER) {
+ len = snprintf(temp, sizeof(temp)-1, "%"PRId64, val->via.i64);
+ safe_sds_cat(buf, temp, len);
+ }
+ else if (val->type == MSGPACK_OBJECT_FLOAT32 ||
+ val->type == MSGPACK_OBJECT_FLOAT64) {
+ if (val->via.f64 == (double)(long long int) val->via.f64) {
+ len = snprintf(temp, sizeof(temp)-1, "%.1f", val->via.f64);
+ }
+ else {
+ len = snprintf(temp, sizeof(temp)-1, "%.16g", val->via.f64);
+ }
+ safe_sds_cat(buf, temp, len);
+ }
+ else if (val->type == MSGPACK_OBJECT_ARRAY) {
+ safe_sds_cat(buf, "\"[", 2);
+ for (i = 0; i < val->via.array.size; i++) {
+ v = val->via.array.ptr[i];
+ if (i > 0) {
+ safe_sds_cat(buf, " ", 1);
+ }
+ pack_format_line_value(buf, &v);
+ }
+ safe_sds_cat(buf, "]\"", 2);
+ }
+ else if (val->type == MSGPACK_OBJECT_MAP) {
+ safe_sds_cat(buf, "\"map[", 5);
+
+ for (i = 0; i < val->via.map.size; i++) {
+ k = val->via.map.ptr[i].key;
+ v = val->via.map.ptr[i].val;
+
+ if (k.type != MSGPACK_OBJECT_STR) {
+ continue;
+ }
+
+ if (i > 0) {
+ safe_sds_cat(buf, " ", 1);
+ }
+
+ safe_sds_cat(buf, k.via.str.ptr, k.via.str.size);
+ safe_sds_cat(buf, ":", 1);
+ pack_format_line_value(buf, &v);
+ }
+ safe_sds_cat(buf, "]\"", 2);
+ }
+ else {
+
+ return;
+ }
+}
+
+// seek tenant id from map and set it to dynamic_tenant_id
+static int get_tenant_id_from_record(struct flb_loki *ctx, msgpack_object *map,
+ flb_sds_t *dynamic_tenant_id)
+{
+ struct flb_ra_value *rval = NULL;
+ flb_sds_t tmp_str;
+ int cmp_len;
+
+ rval = flb_ra_get_value_object(ctx->ra_tenant_id_key, *map);
+
+ if (rval == NULL) {
+ flb_plg_warn(ctx->ins, "the value of %s is missing",
+ ctx->tenant_id_key_config);
+ return -1;
+ }
+ else if (rval->o.type != MSGPACK_OBJECT_STR) {
+ flb_plg_warn(ctx->ins, "the value of %s is not string",
+ ctx->tenant_id_key_config);
+ flb_ra_key_value_destroy(rval);
+ return -1;
+ }
+
+ tmp_str = flb_sds_create_len(rval->o.via.str.ptr,
+ rval->o.via.str.size);
+ if (tmp_str == NULL) {
+ flb_plg_warn(ctx->ins, "cannot create tenant ID string from record");
+ flb_ra_key_value_destroy(rval);
+ return -1;
+ }
+
+ // check if already dynamic_tenant_id is set.
+ if (*dynamic_tenant_id != NULL) {
+ cmp_len = flb_sds_len(*dynamic_tenant_id);
+
+ if ((rval->o.via.str.size == cmp_len) &&
+ flb_sds_cmp(tmp_str, *dynamic_tenant_id, cmp_len) == 0) {
+ // tenant_id is same. nothing to do.
+ flb_ra_key_value_destroy(rval);
+ flb_sds_destroy(tmp_str);
+
+ return 0;
+ }
+
+ flb_plg_warn(ctx->ins, "Tenant ID is overwritten %s -> %s",
+ *dynamic_tenant_id, tmp_str);
+
+ flb_sds_destroy(*dynamic_tenant_id);
+ }
+
+ // this sds will be released after setting http header.
+ *dynamic_tenant_id = tmp_str;
+ flb_plg_debug(ctx->ins, "Tenant ID is %s", *dynamic_tenant_id);
+
+ flb_ra_key_value_destroy(rval);
+ return 0;
+}
+
+static int pack_record(struct flb_loki *ctx,
+ msgpack_packer *mp_pck, msgpack_object *rec,
+ flb_sds_t *dynamic_tenant_id)
+{
+ int i;
+ int skip = 0;
+ int len;
+ int ret;
+ int size_hint = 1024;
+ char *line;
+ flb_sds_t buf;
+ msgpack_object key;
+ msgpack_object val;
+ char *tmp_sbuf_data = NULL;
+ size_t tmp_sbuf_size;
+ msgpack_unpacked mp_buffer;
+ size_t off = 0;
+
+ /*
+ * Get tenant id from record before removing keys.
+ * https://github.com/fluent/fluent-bit/issues/6207
+ */
+ if (ctx->ra_tenant_id_key && rec->type == MSGPACK_OBJECT_MAP) {
+ get_tenant_id_from_record(ctx, rec, dynamic_tenant_id);
+ }
+
+ /* Remove keys in remove_keys */
+ msgpack_unpacked_init(&mp_buffer);
+ if (ctx->remove_mpa) {
+ ret = flb_mp_accessor_keys_remove(ctx->remove_mpa, rec,
+ (void *) &tmp_sbuf_data, &tmp_sbuf_size);
+ if (ret == FLB_TRUE) {
+ ret = msgpack_unpack_next(&mp_buffer, tmp_sbuf_data, tmp_sbuf_size, &off);
+ if (ret != MSGPACK_UNPACK_SUCCESS) {
+ flb_free(tmp_sbuf_data);
+ msgpack_unpacked_destroy(&mp_buffer);
+ return -1;
+ }
+ rec = &mp_buffer.data;
+ }
+ }
+
+ /* Drop single key */
+ if (ctx->drop_single_key == FLB_TRUE && rec->type == MSGPACK_OBJECT_MAP && rec->via.map.size == 1) {
+ if (ctx->out_line_format == FLB_LOKI_FMT_JSON) {
+ rec = &rec->via.map.ptr[0].val;
+ } else if (ctx->out_line_format == FLB_LOKI_FMT_KV) {
+ val = rec->via.map.ptr[0].val;
+
+ if (val.type == MSGPACK_OBJECT_STR) {
+ msgpack_pack_str(mp_pck, val.via.str.size);
+ msgpack_pack_str_body(mp_pck, val.via.str.ptr, val.via.str.size);
+ } else {
+ buf = flb_sds_create_size(size_hint);
+ if (!buf) {
+ msgpack_unpacked_destroy(&mp_buffer);
+ if (tmp_sbuf_data) {
+ flb_free(tmp_sbuf_data);
+ }
+ return -1;
+ }
+ pack_format_line_value(&buf, &val);
+ msgpack_pack_str(mp_pck, flb_sds_len(buf));
+ msgpack_pack_str_body(mp_pck, buf, flb_sds_len(buf));
+ flb_sds_destroy(buf);
+ }
+
+ msgpack_unpacked_destroy(&mp_buffer);
+ if (tmp_sbuf_data) {
+ flb_free(tmp_sbuf_data);
+ }
+
+ return 0;
+ }
+ }
+
+ if (ctx->out_line_format == FLB_LOKI_FMT_JSON) {
+ line = flb_msgpack_to_json_str(size_hint, rec);
+ if (!line) {
+ if (tmp_sbuf_data) {
+ flb_free(tmp_sbuf_data);
+ }
+ msgpack_unpacked_destroy(&mp_buffer);
+ return -1;
+ }
+ len = strlen(line);
+ msgpack_pack_str(mp_pck, len);
+ msgpack_pack_str_body(mp_pck, line, len);
+ flb_free(line);
+ }
+ else if (ctx->out_line_format == FLB_LOKI_FMT_KV) {
+ if (rec->type != MSGPACK_OBJECT_MAP) {
+ msgpack_unpacked_destroy(&mp_buffer);
+ if (tmp_sbuf_data) {
+ flb_free(tmp_sbuf_data);
+ }
+ return -1;
+ }
+
+ buf = flb_sds_create_size(size_hint);
+ if (!buf) {
+ msgpack_unpacked_destroy(&mp_buffer);
+ if (tmp_sbuf_data) {
+ flb_free(tmp_sbuf_data);
+ }
+ return -1;
+ }
+
+ for (i = 0; i < rec->via.map.size; i++) {
+ key = rec->via.map.ptr[i].key;
+ val = rec->via.map.ptr[i].val;
+
+ if (key.type != MSGPACK_OBJECT_STR) {
+ skip++;
+ continue;
+ }
+
+ if (i > skip) {
+ safe_sds_cat(&buf, " ", 1);
+ }
+
+ safe_sds_cat(&buf, key.via.str.ptr, key.via.str.size);
+ safe_sds_cat(&buf, "=", 1);
+ pack_format_line_value(&buf, &val);
+ }
+
+ msgpack_pack_str(mp_pck, flb_sds_len(buf));
+ msgpack_pack_str_body(mp_pck, buf, flb_sds_len(buf));
+ flb_sds_destroy(buf);
+ }
+
+ msgpack_unpacked_destroy(&mp_buffer);
+ if (tmp_sbuf_data) {
+ flb_free(tmp_sbuf_data);
+ }
+
+ return 0;
+}
+
+/* Initialization callback */
+static int cb_loki_init(struct flb_output_instance *ins,
+ struct flb_config *config, void *data)
+{
+ int result;
+ struct flb_loki *ctx;
+
+ /* Create plugin context */
+ ctx = loki_config_create(ins, config);
+ if (!ctx) {
+ flb_plg_error(ins, "cannot initialize configuration");
+ return -1;
+ }
+
+ result = pthread_mutex_init(&ctx->dynamic_tenant_list_lock, NULL);
+
+ if (result != 0) {
+ flb_errno();
+
+ flb_plg_error(ins, "cannot initialize dynamic tenant id list lock");
+
+ loki_config_destroy(ctx);
+
+ return -1;
+ }
+
+ result = pthread_once(&initialization_guard,
+ initialize_thread_local_storage);
+
+ if (result != 0) {
+ flb_errno();
+
+ flb_plg_error(ins, "cannot initialize thread local storage");
+
+ loki_config_destroy(ctx);
+
+ return -1;
+ }
+
+ cfl_list_init(&ctx->dynamic_tenant_list);
+
+ /*
+ * This plugin instance uses the HTTP client interface, let's register
+ * it debugging callbacks.
+ */
+ flb_output_set_http_debug_callbacks(ins);
+
+ flb_plg_info(ins,
+ "configured, hostname=%s:%i",
+ ctx->tcp_host, ctx->tcp_port);
+ return 0;
+}
+
+static flb_sds_t loki_compose_payload(struct flb_loki *ctx,
+ int total_records,
+ char *tag, int tag_len,
+ const void *data, size_t bytes,
+ flb_sds_t *dynamic_tenant_id)
+{
+ // int mp_ok = MSGPACK_UNPACK_SUCCESS;
+ // size_t off = 0;
+ flb_sds_t json;
+ // struct flb_time tms;
+ // msgpack_unpacked result;
+ msgpack_packer mp_pck;
+ msgpack_sbuffer mp_sbuf;
+ // msgpack_object *obj;
+ struct flb_log_event_decoder log_decoder;
+ struct flb_log_event log_event;
+ int ret;
+
+ /*
+ * Fluent Bit uses Loki API v1 to push records in JSON format, this
+ * is the expected structure:
+ *
+ * {
+ * "streams": [
+ * {
+ * "stream": {
+ * "label": "value"
+ * },
+ * "values": [
+ * [ "<unix epoch in nanoseconds>", "<log line>" ],
+ * [ "<unix epoch in nanoseconds>", "<log line>" ]
+ * ]
+ * }
+ * ]
+ * }
+ */
+
+ ret = flb_log_event_decoder_init(&log_decoder, (char *) data, bytes);
+
+ if (ret != FLB_EVENT_DECODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event decoder initialization error : %d", ret);
+
+ return NULL;
+ }
+
+ /* Initialize msgpack buffers */
+ msgpack_sbuffer_init(&mp_sbuf);
+ msgpack_packer_init(&mp_pck, &mp_sbuf, msgpack_sbuffer_write);
+
+ /* Main map */
+ msgpack_pack_map(&mp_pck, 1);
+
+ /* streams */
+ msgpack_pack_str(&mp_pck, 7);
+ msgpack_pack_str_body(&mp_pck, "streams", 7);
+
+ if (ctx->ra_used == 0 && ctx->auto_kubernetes_labels == FLB_FALSE) {
+ /*
+ * If labels are cached, there is no record accessor or custom
+ * keys, so it's safe to put one main stream and attach all the
+ * values.
+ */
+ msgpack_pack_array(&mp_pck, 1);
+
+ /* map content: streams['stream'] & streams['values'] */
+ msgpack_pack_map(&mp_pck, 2);
+
+ /* streams['stream'] */
+ msgpack_pack_str(&mp_pck, 6);
+ msgpack_pack_str_body(&mp_pck, "stream", 6);
+
+ /* Pack stream labels */
+ pack_labels(ctx, &mp_pck, tag, tag_len, NULL);
+
+ /* streams['values'] */
+ msgpack_pack_str(&mp_pck, 6);
+ msgpack_pack_str_body(&mp_pck, "values", 6);
+ msgpack_pack_array(&mp_pck, total_records);
+
+ while ((ret = flb_log_event_decoder_next(
+ &log_decoder,
+ &log_event)) == FLB_EVENT_DECODER_SUCCESS) {
+ msgpack_pack_array(&mp_pck, 2);
+
+ /* Append the timestamp */
+ pack_timestamp(&mp_pck, &log_event.timestamp);
+ pack_record(ctx, &mp_pck, log_event.body, dynamic_tenant_id);
+ }
+ }
+ else {
+ /*
+ * Here there are no cached labels and the labels are composed by
+ * each record content. To simplify the operation just create
+ * one stream per record.
+ */
+ msgpack_pack_array(&mp_pck, total_records);
+
+ while ((ret = flb_log_event_decoder_next(
+ &log_decoder,
+ &log_event)) == FLB_EVENT_DECODER_SUCCESS) {
+ /* map content: streams['stream'] & streams['values'] */
+ msgpack_pack_map(&mp_pck, 2);
+
+ /* streams['stream'] */
+ msgpack_pack_str(&mp_pck, 6);
+ msgpack_pack_str_body(&mp_pck, "stream", 6);
+
+ /* Pack stream labels */
+ pack_labels(ctx, &mp_pck, tag, tag_len, log_event.body);
+
+ /* streams['values'] */
+ msgpack_pack_str(&mp_pck, 6);
+ msgpack_pack_str_body(&mp_pck, "values", 6);
+ msgpack_pack_array(&mp_pck, 1);
+
+ msgpack_pack_array(&mp_pck, 2);
+
+ /* Append the timestamp */
+ pack_timestamp(&mp_pck, &log_event.timestamp);
+ pack_record(ctx, &mp_pck, log_event.body, dynamic_tenant_id);
+ }
+ }
+
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ json = flb_msgpack_raw_to_json_sds(mp_sbuf.data, mp_sbuf.size);
+
+ msgpack_sbuffer_destroy(&mp_sbuf);
+
+ return json;
+}
+
+static void payload_release(void *payload, int compressed)
+{
+ if (compressed) {
+ flb_free(payload);
+ }
+ else {
+ flb_sds_destroy(payload);
+ }
+}
+
+static void cb_loki_flush(struct flb_event_chunk *event_chunk,
+ struct flb_output_flush *out_flush,
+ struct flb_input_instance *i_ins,
+ void *out_context,
+ struct flb_config *config)
+{
+ int ret;
+ int out_ret = FLB_OK;
+ size_t b_sent;
+ flb_sds_t payload = NULL;
+ flb_sds_t out_buf = NULL;
+ size_t out_size;
+ int compressed = FLB_FALSE;
+ struct flb_loki *ctx = out_context;
+ struct flb_connection *u_conn;
+ struct flb_http_client *c;
+ struct flb_loki_dynamic_tenant_id_entry *dynamic_tenant_id;
+
+ dynamic_tenant_id = FLB_TLS_GET(thread_local_tenant_id);
+
+ if (dynamic_tenant_id == NULL) {
+ dynamic_tenant_id = dynamic_tenant_id_create();
+
+ if (dynamic_tenant_id == NULL) {
+ flb_errno();
+ flb_plg_error(ctx->ins, "cannot allocate dynamic tenant id");
+
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ FLB_TLS_SET(thread_local_tenant_id, dynamic_tenant_id);
+
+ pthread_mutex_lock(&ctx->dynamic_tenant_list_lock);
+
+ cfl_list_add(&dynamic_tenant_id->_head, &ctx->dynamic_tenant_list);
+
+ pthread_mutex_unlock(&ctx->dynamic_tenant_list_lock);
+ }
+
+ /* Format the data to the expected Newrelic Payload */
+ payload = loki_compose_payload(ctx,
+ event_chunk->total_events,
+ (char *) event_chunk->tag,
+ flb_sds_len(event_chunk->tag),
+ event_chunk->data, event_chunk->size,
+ &dynamic_tenant_id->value);
+
+ if (!payload) {
+ flb_plg_error(ctx->ins, "cannot compose request payload");
+
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ /* Map buffer */
+ out_buf = payload;
+ out_size = flb_sds_len(payload);
+
+ if (ctx->compress_gzip == FLB_TRUE) {
+ ret = flb_gzip_compress((void *) payload, flb_sds_len(payload), (void **) &out_buf, &out_size);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins,
+ "cannot gzip payload, disabling compression");
+ } else {
+ compressed = FLB_TRUE;
+ /* payload is not longer needed */
+ flb_sds_destroy(payload);
+ }
+ }
+
+ /* Lookup an available connection context */
+ u_conn = flb_upstream_conn_get(ctx->u);
+ if (!u_conn) {
+ flb_plg_error(ctx->ins, "no upstream connections available");
+
+ payload_release(out_buf, compressed);
+
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ /* Create HTTP client context */
+ c = flb_http_client(u_conn, FLB_HTTP_POST, FLB_LOKI_URI,
+ out_buf, out_size,
+ ctx->tcp_host, ctx->tcp_port,
+ NULL, 0);
+ if (!c) {
+ flb_plg_error(ctx->ins, "cannot create HTTP client context");
+
+ payload_release(out_buf, compressed);
+ flb_upstream_conn_release(u_conn);
+
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ /* Set callback context to the HTTP client context */
+ flb_http_set_callback_context(c, ctx->ins->callback);
+
+ /* User Agent */
+ flb_http_add_header(c, "User-Agent", 10, "Fluent-Bit", 10);
+
+ /* Auth headers */
+ if (ctx->http_user && ctx->http_passwd) { /* Basic */
+ flb_http_basic_auth(c, ctx->http_user, ctx->http_passwd);
+ } else if (ctx->bearer_token) { /* Bearer token */
+ flb_http_bearer_auth(c, ctx->bearer_token);
+ }
+
+ /* Add Content-Type header */
+ flb_http_add_header(c,
+ FLB_LOKI_CT, sizeof(FLB_LOKI_CT) - 1,
+ FLB_LOKI_CT_JSON, sizeof(FLB_LOKI_CT_JSON) - 1);
+
+ if (compressed == FLB_TRUE) {
+ flb_http_set_content_encoding_gzip(c);
+ }
+
+ /* Add X-Scope-OrgID header */
+ if (dynamic_tenant_id->value != NULL) {
+ flb_http_add_header(c,
+ FLB_LOKI_HEADER_SCOPE, sizeof(FLB_LOKI_HEADER_SCOPE) - 1,
+ dynamic_tenant_id->value,
+ flb_sds_len(dynamic_tenant_id->value));
+ }
+ else if (ctx->tenant_id) {
+ flb_http_add_header(c,
+ FLB_LOKI_HEADER_SCOPE, sizeof(FLB_LOKI_HEADER_SCOPE) - 1,
+ ctx->tenant_id, flb_sds_len(ctx->tenant_id));
+ }
+
+ /* Send HTTP request */
+ ret = flb_http_do(c, &b_sent);
+ payload_release(out_buf, compressed);
+
+ /* Validate HTTP client return status */
+ if (ret == 0) {
+ /*
+ * Only allow the following HTTP status:
+ *
+ * - 200: OK
+ * - 201: Created
+ * - 202: Accepted
+ * - 203: no authorative resp
+ * - 204: No Content
+ * - 205: Reset content
+ *
+ */
+ if (c->resp.status == 400) {
+ /*
+ * Loki will return 400 if incoming data is out of order.
+ * We should not retry such data.
+ */
+ flb_plg_error(ctx->ins, "%s:%i, HTTP status=%i Not retrying.\n%s",
+ ctx->tcp_host, ctx->tcp_port, c->resp.status,
+ c->resp.payload);
+ out_ret = FLB_ERROR;
+ }
+ else if (c->resp.status < 200 || c->resp.status > 205) {
+ if (c->resp.payload) {
+ flb_plg_error(ctx->ins, "%s:%i, HTTP status=%i\n%s",
+ ctx->tcp_host, ctx->tcp_port, c->resp.status,
+ c->resp.payload);
+ }
+ else {
+ flb_plg_error(ctx->ins, "%s:%i, HTTP status=%i",
+ ctx->tcp_host, ctx->tcp_port, c->resp.status);
+ }
+ out_ret = FLB_RETRY;
+ }
+ else {
+ if (c->resp.payload) {
+ flb_plg_debug(ctx->ins, "%s:%i, HTTP status=%i\n%s",
+ ctx->tcp_host, ctx->tcp_port,
+ c->resp.status, c->resp.payload);
+ }
+ else {
+ flb_plg_debug(ctx->ins, "%s:%i, HTTP status=%i",
+ ctx->tcp_host, ctx->tcp_port,
+ c->resp.status);
+ }
+ }
+ }
+ else {
+ flb_plg_error(ctx->ins, "could not flush records to %s:%i (http_do=%i)",
+ ctx->tcp_host, ctx->tcp_port, ret);
+ out_ret = FLB_RETRY;
+ }
+
+ flb_http_client_destroy(c);
+ flb_upstream_conn_release(u_conn);
+
+ FLB_OUTPUT_RETURN(out_ret);
+}
+
+static void release_dynamic_tenant_ids(struct cfl_list *dynamic_tenant_list)
+{
+ struct cfl_list *iterator;
+ struct cfl_list *backup;
+ struct flb_loki_dynamic_tenant_id_entry *entry;
+
+ cfl_list_foreach_safe(iterator, backup, dynamic_tenant_list) {
+ entry = cfl_list_entry(iterator,
+ struct flb_loki_dynamic_tenant_id_entry,
+ _head);
+
+ dynamic_tenant_id_destroy(entry);
+ }
+}
+
+static int cb_loki_exit(void *data, struct flb_config *config)
+{
+ struct flb_loki *ctx = data;
+
+ if (!ctx) {
+ return 0;
+ }
+
+ pthread_mutex_lock(&ctx->dynamic_tenant_list_lock);
+
+ release_dynamic_tenant_ids(&ctx->dynamic_tenant_list);
+
+ pthread_mutex_unlock(&ctx->dynamic_tenant_list_lock);
+
+ loki_config_destroy(ctx);
+
+ return 0;
+}
+
+/* Configuration properties map */
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_STR, "tenant_id", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_loki, tenant_id),
+ "Tenant ID used by default to push logs to Loki. If omitted or empty "
+ "it assumes Loki is running in single-tenant mode and no X-Scope-OrgID "
+ "header is sent."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "tenant_id_key", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_loki, tenant_id_key_config),
+ "If set, X-Scope-OrgID will be the value of the key from incoming record. "
+ "It is useful to set X-Scode-OrgID dynamically."
+ },
+
+ {
+ FLB_CONFIG_MAP_CLIST, "labels", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_loki, labels),
+ "labels for API requests. If no value is set, the default label is 'job=fluent-bit'"
+ },
+
+ {
+ FLB_CONFIG_MAP_BOOL, "auto_kubernetes_labels", "false",
+ 0, FLB_TRUE, offsetof(struct flb_loki, auto_kubernetes_labels),
+ "If set to true, it will add all Kubernetes labels to Loki labels.",
+ },
+
+ {
+ FLB_CONFIG_MAP_BOOL, "drop_single_key", "false",
+ 0, FLB_TRUE, offsetof(struct flb_loki, drop_single_key),
+ "If set to true and only a single key remains, the log line sent to Loki "
+ "will be the value of that key.",
+ },
+
+ {
+ FLB_CONFIG_MAP_CLIST, "label_keys", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_loki, label_keys),
+ "Comma separated list of keys to use as stream labels."
+ },
+
+ {
+ FLB_CONFIG_MAP_CLIST, "remove_keys", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_loki, remove_keys),
+ "Comma separated list of keys to remove."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "line_format", "json",
+ 0, FLB_TRUE, offsetof(struct flb_loki, line_format),
+ "Format to use when flattening the record to a log line. Valid values are "
+ "'json' or 'key_value'. If set to 'json' the log line sent to Loki will be "
+ "the Fluent Bit record dumped as json. If set to 'key_value', the log line "
+ "will be each item in the record concatenated together (separated by a "
+ "single space) in the format '='."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "label_map_path", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_loki, label_map_path),
+ "A label map file path"
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "http_user", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_loki, http_user),
+ "Set HTTP auth user"
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "http_passwd", "",
+ 0, FLB_TRUE, offsetof(struct flb_loki, http_passwd),
+ "Set HTTP auth password"
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "bearer_token", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_loki, bearer_token),
+ "Set bearer token auth"
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "compress", NULL,
+ 0, FLB_FALSE, 0,
+ "Set payload compression in network transfer. Option available is 'gzip'"
+ },
+
+ /* EOF */
+ {0}
+};
+
+/* for testing */
+static int cb_loki_format_test(struct flb_config *config,
+ struct flb_input_instance *ins,
+ void *plugin_context,
+ void *flush_ctx,
+ int event_type,
+ const char *tag, int tag_len,
+ const void *data, size_t bytes,
+ void **out_data, size_t *out_size)
+{
+ int total_records;
+ flb_sds_t payload = NULL;
+ flb_sds_t dynamic_tenant_id;
+ struct flb_loki *ctx = plugin_context;
+
+ dynamic_tenant_id = NULL;
+
+ /* Count number of records */
+ total_records = flb_mp_count(data, bytes);
+
+ payload = loki_compose_payload(ctx, total_records,
+ (char *) tag, tag_len, data, bytes,
+ &dynamic_tenant_id);
+ if (payload == NULL) {
+ if (dynamic_tenant_id != NULL) {
+ flb_sds_destroy(dynamic_tenant_id);
+ }
+
+ return -1;
+ }
+
+ *out_data = payload;
+ *out_size = flb_sds_len(payload);
+
+ return 0;
+}
+
+/* Plugin reference */
+struct flb_output_plugin out_loki_plugin = {
+ .name = "loki",
+ .description = "Loki",
+ .cb_init = cb_loki_init,
+ .cb_flush = cb_loki_flush,
+ .cb_exit = cb_loki_exit,
+ .config_map = config_map,
+
+ /* for testing */
+ .test_formatter.callback = cb_loki_format_test,
+
+ .flags = FLB_OUTPUT_NET | FLB_IO_OPT_TLS,
+};
diff --git a/src/fluent-bit/plugins/out_loki/loki.h b/src/fluent-bit/plugins/out_loki/loki.h
new file mode 100644
index 000000000..2011cee3d
--- /dev/null
+++ b/src/fluent-bit/plugins/out_loki/loki.h
@@ -0,0 +1,98 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_OUT_LOKI_H
+#define FLB_OUT_LOKI_H
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_record_accessor.h>
+#include <fluent-bit/flb_upstream.h>
+#include <fluent-bit/flb_hash_table.h>
+#include <cfl/cfl_list.h>
+
+#define FLB_LOKI_CT "Content-Type"
+#define FLB_LOKI_CT_JSON "application/json"
+#define FLB_LOKI_URI "/loki/api/v1/push"
+#define FLB_LOKI_HOST "127.0.0.1"
+#define FLB_LOKI_PORT 3100
+#define FLB_LOKI_HEADER_SCOPE "X-Scope-OrgID"
+
+#define FLB_LOKI_KV_STR 0 /* sds string */
+#define FLB_LOKI_KV_RA 1 /* record accessor */
+#define FLB_LOKI_KV_K8S 2 /* kubernetes label */
+
+/* Output line format */
+#define FLB_LOKI_FMT_JSON 0
+#define FLB_LOKI_FMT_KV 1
+
+struct flb_loki_kv {
+ int val_type; /* FLB_LOKI_KV_STR or FLB_LOKI_KV_RA */
+ flb_sds_t key; /* string key */
+ flb_sds_t str_val; /* string value */
+ flb_sds_t key_normalized; /* normalized key name when using ra */
+ struct flb_record_accessor *ra_key; /* record accessor key context */
+ struct flb_record_accessor *ra_val; /* record accessor value context */
+ struct mk_list _head; /* link to flb_loki->labels_list */
+};
+
+struct flb_loki {
+ /* Public configuration properties */
+ int auto_kubernetes_labels;
+ int drop_single_key;
+ flb_sds_t line_format;
+ flb_sds_t tenant_id;
+ flb_sds_t tenant_id_key_config;
+ int compress_gzip;
+
+ /* HTTP Auth */
+ flb_sds_t http_user;
+ flb_sds_t http_passwd;
+
+ /* Bearer Token Auth */
+ flb_sds_t bearer_token;
+
+ /* Labels */
+ struct mk_list *labels;
+ struct mk_list *label_keys;
+ struct mk_list *remove_keys;
+
+ flb_sds_t label_map_path;
+
+ /* Private */
+ int tcp_port;
+ char *tcp_host;
+ int out_line_format;
+ int ra_used; /* number of record accessor label keys */
+ struct flb_record_accessor *ra_k8s; /* kubernetes record accessor */
+ struct mk_list labels_list; /* list of flb_loki_kv nodes */
+ struct mk_list remove_keys_derived; /* remove_keys with label RAs */
+ struct flb_mp_accessor *remove_mpa; /* remove_keys multi-pattern accessor */
+ struct flb_record_accessor *ra_tenant_id_key; /* dynamic tenant id key */
+
+ struct cfl_list dynamic_tenant_list;
+ pthread_mutex_t dynamic_tenant_list_lock;
+
+ /* Upstream Context */
+ struct flb_upstream *u;
+
+ /* Plugin instance */
+ struct flb_output_instance *ins;
+};
+
+#endif
diff --git a/src/fluent-bit/plugins/out_nats/CMakeLists.txt b/src/fluent-bit/plugins/out_nats/CMakeLists.txt
new file mode 100644
index 000000000..9dabb2f2c
--- /dev/null
+++ b/src/fluent-bit/plugins/out_nats/CMakeLists.txt
@@ -0,0 +1,4 @@
+set(src
+ nats.c)
+
+FLB_PLUGIN(out_nats "${src}" "")
diff --git a/src/fluent-bit/plugins/out_nats/nats.c b/src/fluent-bit/plugins/out_nats/nats.c
new file mode 100644
index 000000000..aac615c13
--- /dev/null
+++ b/src/fluent-bit/plugins/out_nats/nats.c
@@ -0,0 +1,252 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_config_map.h>
+#include <fluent-bit/flb_log_event_decoder.h>
+
+#include <stdio.h>
+#include <msgpack.h>
+
+#include "nats.h"
+
+static int cb_nats_init(struct flb_output_instance *ins, struct flb_config *config,
+ void *data)
+{
+ int io_flags;
+ int ret;
+ struct flb_upstream *upstream;
+ struct flb_out_nats_config *ctx;
+
+ /* Set default network configuration */
+ flb_output_net_default("127.0.0.1", 4222, ins);
+
+ /* Allocate plugin context */
+ ctx = flb_malloc(sizeof(struct flb_out_nats_config));
+ if (!ctx) {
+ flb_errno();
+ return -1;
+ }
+
+ /* Set default values */
+ ret = flb_output_config_map_set(ins, ctx);
+ if (ret == -1) {
+ flb_plg_error(ins, "flb_output_config_map_set failed");
+ flb_free(ctx);
+ return -1;
+ }
+
+ io_flags = FLB_IO_TCP;
+ if (ins->host.ipv6 == FLB_TRUE) {
+ io_flags |= FLB_IO_IPV6;
+ }
+
+ /* Prepare an upstream handler */
+ upstream = flb_upstream_create(config,
+ ins->host.name,
+ ins->host.port,
+ io_flags,
+ NULL);
+ if (!upstream) {
+ flb_free(ctx);
+ return -1;
+ }
+ ctx->u = upstream;
+ ctx->ins = ins;
+ flb_output_upstream_set(ctx->u, ins);
+ flb_output_set_context(ins, ctx);
+
+ return 0;
+}
+
+static int msgpack_to_json(struct flb_out_nats_config *ctx,
+ const void *data, size_t bytes,
+ const char *tag, int tag_len,
+ char **out_json, size_t *out_size)
+{
+ int i;
+ int map_size;
+ size_t array_size = 0;
+ flb_sds_t out_buf;
+ msgpack_object map;
+ msgpack_object m_key;
+ msgpack_object m_val;
+ msgpack_packer mp_pck;
+ msgpack_sbuffer mp_sbuf;
+ struct flb_log_event_decoder log_decoder;
+ struct flb_log_event log_event;
+ int ret;
+
+ ret = flb_log_event_decoder_init(&log_decoder, (char *) data, bytes);
+
+ if (ret != FLB_EVENT_DECODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event decoder initialization error : %d", ret);
+
+ return -1;
+ }
+
+ array_size = flb_mp_count(data, bytes);
+
+ /* Convert MsgPack to JSON */
+ msgpack_sbuffer_init(&mp_sbuf);
+ msgpack_packer_init(&mp_pck, &mp_sbuf, msgpack_sbuffer_write);
+ msgpack_pack_array(&mp_pck, array_size);
+
+ while ((ret = flb_log_event_decoder_next(
+ &log_decoder,
+ &log_event)) == FLB_EVENT_DECODER_SUCCESS) {
+ map = *log_event.body;
+ map_size = map.via.map.size;
+
+ msgpack_pack_array(&mp_pck, 2);
+ msgpack_pack_double(&mp_pck, flb_time_to_double(&log_event.timestamp));
+
+ msgpack_pack_map(&mp_pck, map_size + 1);
+ msgpack_pack_str(&mp_pck, 3);
+ msgpack_pack_str_body(&mp_pck, "tag", 3);
+ msgpack_pack_str(&mp_pck, tag_len);
+ msgpack_pack_str_body(&mp_pck, tag, tag_len);
+
+ for (i = 0; i < map_size; i++) {
+ m_key = map.via.map.ptr[i].key;
+ m_val = map.via.map.ptr[i].val;
+
+ msgpack_pack_object(&mp_pck, m_key);
+ msgpack_pack_object(&mp_pck, m_val);
+ }
+ }
+
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ out_buf = flb_msgpack_raw_to_json_sds(mp_sbuf.data, mp_sbuf.size);
+ msgpack_sbuffer_destroy(&mp_sbuf);
+
+ if (!out_buf) {
+ return -1;
+ }
+
+ *out_json = out_buf;
+ *out_size = flb_sds_len(out_buf);
+
+ return 0;
+}
+
+static void cb_nats_flush(struct flb_event_chunk *event_chunk,
+ struct flb_output_flush *out_flush,
+ struct flb_input_instance *i_ins,
+ void *out_context,
+ struct flb_config *config)
+{
+ int ret;
+ size_t bytes_sent;
+ size_t json_len;
+ flb_sds_t json_msg;
+ char *request;
+ int req_len;
+ struct flb_out_nats_config *ctx = out_context;
+ struct flb_connection *u_conn;
+
+ u_conn = flb_upstream_conn_get(ctx->u);
+ if (!u_conn) {
+ flb_plg_error(ctx->ins, "no upstream connections available");
+ FLB_OUTPUT_RETURN(FLB_ERROR);
+ }
+
+ /* Before to flush the content check if we need to start the handshake */
+ ret = flb_io_net_write(u_conn,
+ NATS_CONNECT,
+ sizeof(NATS_CONNECT) - 1,
+ &bytes_sent);
+ if (ret == -1) {
+ flb_upstream_conn_release(u_conn);
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ /* Convert original Fluent Bit MsgPack format to JSON */
+ ret = msgpack_to_json(ctx,
+ event_chunk->data, event_chunk->size,
+ event_chunk->tag, flb_sds_len(event_chunk->tag),
+ &json_msg, &json_len);
+ if (ret == -1) {
+ flb_upstream_conn_release(u_conn);
+ FLB_OUTPUT_RETURN(FLB_ERROR);
+ }
+
+ /* Compose the NATS Publish request */
+ request = flb_malloc(json_len + flb_sds_len(event_chunk->tag) + 32);
+ if (!request) {
+ flb_errno();
+ flb_sds_destroy(json_msg);
+ flb_upstream_conn_release(u_conn);
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ req_len = snprintf(request, flb_sds_len(event_chunk->tag)+ 32,
+ "PUB %s %zu\r\n",
+ event_chunk->tag, json_len);
+
+ /* Append JSON message and ending CRLF */
+ memcpy(request + req_len, json_msg, json_len);
+ req_len += json_len;
+ request[req_len++] = '\r';
+ request[req_len++] = '\n';
+ flb_sds_destroy(json_msg);
+
+ ret = flb_io_net_write(u_conn, request, req_len, &bytes_sent);
+ if (ret == -1) {
+ flb_errno();
+ flb_free(request);
+ flb_upstream_conn_release(u_conn);
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ flb_free(request);
+ flb_upstream_conn_release(u_conn);
+ FLB_OUTPUT_RETURN(FLB_OK);
+}
+
+int cb_nats_exit(void *data, struct flb_config *config)
+{
+ (void) config;
+ struct flb_out_nats_config *ctx = data;
+
+ flb_upstream_destroy(ctx->u);
+ flb_free(ctx);
+
+ return 0;
+}
+
+static struct flb_config_map config_map[] = {
+ /* EOF */
+ {0}
+};
+
+struct flb_output_plugin out_nats_plugin = {
+ .name = "nats",
+ .description = "NATS Server",
+ .cb_init = cb_nats_init,
+ .cb_flush = cb_nats_flush,
+ .cb_exit = cb_nats_exit,
+ .flags = FLB_OUTPUT_NET,
+ .config_map = config_map
+};
diff --git a/src/fluent-bit/plugins/out_nats/nats.h b/src/fluent-bit/plugins/out_nats/nats.h
new file mode 100644
index 000000000..ae0586d82
--- /dev/null
+++ b/src/fluent-bit/plugins/out_nats/nats.h
@@ -0,0 +1,33 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_OUT_NATS_H
+#define FLB_OUT_NATS_H
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_version.h>
+
+#define NATS_CONNECT "CONNECT {\"verbose\":false,\"pedantic\":false,\"ssl_required\":false,\"name\":\"fluent-bit\",\"lang\":\"c\",\"version\":\"" FLB_VERSION_STR "\"}\r\n"
+
+struct flb_out_nats_config {
+ struct flb_upstream *u;
+ struct flb_output_instance *ins;
+};
+
+#endif
diff --git a/src/fluent-bit/plugins/out_nrlogs/CMakeLists.txt b/src/fluent-bit/plugins/out_nrlogs/CMakeLists.txt
new file mode 100644
index 000000000..c4ae7a224
--- /dev/null
+++ b/src/fluent-bit/plugins/out_nrlogs/CMakeLists.txt
@@ -0,0 +1,5 @@
+set(src
+ newrelic.c
+ )
+
+FLB_PLUGIN(out_nrlogs "${src}" "")
diff --git a/src/fluent-bit/plugins/out_nrlogs/newrelic.c b/src/fluent-bit/plugins/out_nrlogs/newrelic.c
new file mode 100644
index 000000000..54163a66a
--- /dev/null
+++ b/src/fluent-bit/plugins/out_nrlogs/newrelic.c
@@ -0,0 +1,566 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_version.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_gzip.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_http_client.h>
+#include <fluent-bit/flb_log_event_decoder.h>
+
+#include "newrelic.h"
+
+static inline uint64_t time_to_milliseconds(struct flb_time *tms)
+{
+ return ((tms->tm.tv_sec * 1000) + (tms->tm.tv_nsec / 1000000));
+}
+
+static inline int key_matches(msgpack_object k, char *name, int len)
+{
+ if (k.type != MSGPACK_OBJECT_STR) {
+ return FLB_FALSE;
+ }
+
+ if (k.via.str.size != len) {
+ return FLB_FALSE;
+ }
+
+ if (memcmp(k.via.str.ptr, name, len) == 0) {
+ return FLB_TRUE;
+ }
+
+ return FLB_FALSE;
+}
+
+static int package_record(struct flb_time *ts, msgpack_object *map,
+ msgpack_packer *mp_pck)
+{
+ int i;
+ int map_size = 0;
+ uint64_t timestamp_ms;
+ int log = -1;
+ int message = -1;
+ msgpack_object k;
+ msgpack_object v;
+
+ /* Check if 'message' or 'log' key exists in the record */
+ for (i = 0; i < map->via.map.size; i++) {
+ k = map->via.map.ptr[i].key;
+
+ if (message == -1 && key_matches(k, "message", 7) == FLB_TRUE) {
+ message = i;
+ continue;
+ }
+
+ /* If we find 'log', just stop iterating */
+ if (log == -1 && key_matches(k, "log", 3) == FLB_TRUE) {
+ log = i;
+ break;
+ }
+ }
+
+ /* The log map contains at least 2 entries: 'timestamp' and 'attributes' */
+ map_size = 2;
+
+ /* If 'log' or 'message' are set, we add the 'message' key */
+ if (log >= 0 || message >= 0) {
+ map_size++;
+ }
+
+ /* Package the final record */
+ msgpack_pack_map(mp_pck, map_size);
+
+ /* Convert timestamp to milliseconds */
+ timestamp_ms = time_to_milliseconds(ts);
+
+ /* Pack timestamp */
+ msgpack_pack_str(mp_pck, 9);
+ msgpack_pack_str_body(mp_pck, "timestamp", 9);
+ msgpack_pack_uint64(mp_pck, timestamp_ms);
+
+ /* Keep 'log' over 'message' */
+ if (log >= 0) {
+ message = -1;
+ msgpack_pack_str(mp_pck, 7);
+ msgpack_pack_str_body(mp_pck, "message", 7);
+ v = map->via.map.ptr[log].val;
+ msgpack_pack_object(mp_pck, v);
+ }
+ else if (message >= 0) {
+ msgpack_pack_str(mp_pck, 7);
+ msgpack_pack_str_body(mp_pck, "message", 7);
+ v = map->via.map.ptr[message].val;
+ msgpack_pack_object(mp_pck, v);
+ }
+
+ /* Adjust attributes map size */
+ map_size = map->via.map.size;
+ if (log >= 0 || message >= 0) {
+ map_size--;
+ }
+
+ msgpack_pack_str(mp_pck, 10);
+ msgpack_pack_str_body(mp_pck, "attributes", 10);
+ msgpack_pack_map(mp_pck, map_size);
+
+ /* Pack remaining attributes */
+ for (i = 0; i < map->via.map.size; i++) {
+ k = map->via.map.ptr[i].key;
+ v = map->via.map.ptr[i].val;
+
+ if (log >= 0 && key_matches(k, "log", 3) == FLB_TRUE) {
+ continue;
+ }
+
+ if (message >= 0 && key_matches(k, "message", 7) == FLB_TRUE) {
+ continue;
+ }
+
+ msgpack_pack_object(mp_pck, k);
+ msgpack_pack_object(mp_pck, v);
+ }
+
+ return 0;
+}
+
+static flb_sds_t newrelic_compose_payload(struct flb_newrelic *ctx,
+ const void *data, size_t bytes)
+{
+ int total_records;
+ flb_sds_t json;
+ msgpack_packer mp_pck;
+ msgpack_sbuffer mp_sbuf;
+ struct flb_log_event_decoder log_decoder;
+ struct flb_log_event log_event;
+ int ret;
+
+ /*
+ * Following the New Relic Fluentd implementation, this is the
+ * suggested structure for our payload:
+ *
+ * payload = {[
+ * 'common' => {
+ * 'attributes' => {
+ * 'plugin' => {
+ * 'type' => 'fluentd',
+ * 'version' => NewrelicFluentdOutput::VERSION,
+ * }
+ * }
+ * },
+ * 'logs' => []
+ * ]}
+ */
+
+ ret = flb_log_event_decoder_init(&log_decoder, (char *) data, bytes);
+
+ if (ret != FLB_EVENT_DECODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event decoder initialization error : %d", ret);
+
+ return NULL;
+ }
+
+ /* Count number of records */
+ total_records = flb_mp_count(data, bytes);
+
+ /* Initialize msgpack buffers */
+ msgpack_sbuffer_init(&mp_sbuf);
+ msgpack_packer_init(&mp_pck, &mp_sbuf, msgpack_sbuffer_write);
+
+ /* The New Relic MELT API format is wrapped in an array */
+ msgpack_pack_array(&mp_pck, 1);
+
+ /* Map for 'common' and 'logs' */
+ msgpack_pack_map(&mp_pck, 2);
+
+ /* 'common' map */
+ msgpack_pack_str(&mp_pck, 6);
+ msgpack_pack_str_body(&mp_pck, "common", 6);
+ msgpack_pack_map(&mp_pck, 1);
+
+ /* common['attributes'] */
+ msgpack_pack_str(&mp_pck, 10);
+ msgpack_pack_str_body(&mp_pck, "attributes", 10);
+ msgpack_pack_map(&mp_pck, 1);
+
+ /* common['attributes']['plugin'] */
+ msgpack_pack_str(&mp_pck, 6);
+ msgpack_pack_str_body(&mp_pck, "plugin", 6);
+ msgpack_pack_map(&mp_pck, 2);
+
+ /* common['attributes']['plugin']['type'] = 'Fluent Bit' */
+ msgpack_pack_str(&mp_pck, 4);
+ msgpack_pack_str_body(&mp_pck, "type", 4);
+ msgpack_pack_str(&mp_pck, 10);
+ msgpack_pack_str_body(&mp_pck, "Fluent Bit", 10);
+
+ /* common['attributes']['plugin']['version'] = 'FLB_VERSION_STR' */
+ msgpack_pack_str(&mp_pck, 7);
+ msgpack_pack_str_body(&mp_pck, "version", 7);
+ msgpack_pack_str(&mp_pck, sizeof(FLB_VERSION_STR) - 1);
+ msgpack_pack_str_body(&mp_pck, FLB_VERSION_STR, sizeof(FLB_VERSION_STR) - 1);
+
+ /* 'logs' array */
+ msgpack_pack_str(&mp_pck, 4);
+ msgpack_pack_str_body(&mp_pck, "logs", 4);
+ msgpack_pack_array(&mp_pck, total_records);
+
+ while ((ret = flb_log_event_decoder_next(
+ &log_decoder,
+ &log_event)) == FLB_EVENT_DECODER_SUCCESS) {
+ /* Package the record */
+ package_record(&log_event.timestamp, log_event.body, &mp_pck);
+ }
+
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ json = flb_msgpack_raw_to_json_sds(mp_sbuf.data, mp_sbuf.size);
+
+ msgpack_sbuffer_destroy(&mp_sbuf);
+
+ return json;
+}
+
+static void newrelic_config_destroy(struct flb_newrelic *ctx)
+{
+ flb_free(ctx->nr_protocol);
+ flb_free(ctx->nr_host);
+ flb_free(ctx->nr_uri);
+
+ if (ctx->u) {
+ flb_upstream_destroy(ctx->u);
+ }
+ flb_free(ctx);
+}
+
+static struct flb_newrelic *newrelic_config_create(struct flb_output_instance *ins,
+ struct flb_config *config)
+{
+ int ret;
+ char *port = NULL;
+ struct flb_newrelic *ctx;
+ struct flb_upstream *upstream;
+
+ /* Create context */
+ ctx = flb_calloc(1, sizeof(struct flb_newrelic));
+ if (!ctx) {
+ flb_errno();
+ return NULL;
+ }
+ ctx->ins = ins;
+
+ /* Load config map */
+ ret = flb_output_config_map_set(ins, (void *) ctx);
+ if (ret == -1) {
+ newrelic_config_destroy(ctx);
+ return NULL;
+ }
+
+ /* At least we need one of api_key or license_key */
+ if (!ctx->api_key && !ctx->license_key) {
+ flb_plg_error(ctx->ins, "no 'api_key' or 'license_key' was configured");
+ newrelic_config_destroy(ctx);
+ return NULL;
+ }
+
+ /* Parse Base URL */
+ ret = flb_utils_url_split(ctx->base_uri,
+ &ctx->nr_protocol,
+ &ctx->nr_host,
+ &port,
+ &ctx->nr_uri);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "error parsing base_uri '%s'", ctx->base_uri);
+ newrelic_config_destroy(ctx);
+ return NULL;
+ }
+ ctx->nr_port = atoi(port);
+ flb_free(port);
+
+ if (strcasecmp(ctx->compress, "gzip") == 0) {
+ ctx->compress_gzip = FLB_TRUE;
+ }
+ else if (flb_utils_bool(ctx->compress) == FLB_FALSE) {
+ ctx->compress_gzip = FLB_FALSE;
+ }
+ else {
+ flb_plg_warn(ctx->ins,
+ "unknown compress encoding value '%s', "
+ "payload compression has been disabled",
+ ctx->compress);
+ ctx->compress_gzip = FLB_FALSE;
+ }
+
+ /* Create Upstream connection context */
+ upstream = flb_upstream_create(config,
+ ctx->nr_host,
+ ctx->nr_port,
+ FLB_IO_TLS, ins->tls);
+ if (!upstream) {
+ flb_free(ctx);
+ return NULL;
+ }
+ ctx->u = upstream;
+ flb_output_upstream_set(ctx->u, ins);
+
+ return ctx;
+}
+
+static int cb_newrelic_init(struct flb_output_instance *ins,
+ struct flb_config *config, void *data)
+{
+ struct flb_newrelic *ctx;
+
+ /* Create plugin context */
+ ctx = newrelic_config_create(ins, config);
+ if (!ctx) {
+ flb_plg_error(ins, "cannot initialize configuration");
+ return -1;
+ }
+
+ /* Register context with plugin instance */
+ flb_output_set_context(ins, ctx);
+
+ /*
+ * This plugin instance uses the HTTP client interface, let's register
+ * it debugging callbacks.
+ */
+ flb_output_set_http_debug_callbacks(ins);
+
+ flb_plg_info(ins, "configured, hostname=%s:%i", ctx->nr_host, ctx->nr_port);
+ return 0;
+}
+
+static void cb_newrelic_flush(struct flb_event_chunk *event_chunk,
+ struct flb_output_flush *out_flush,
+ struct flb_input_instance *i_ins,
+ void *out_context,
+ struct flb_config *config)
+{
+ int ret;
+ int out_ret = FLB_OK;
+ int compressed = FLB_FALSE;
+ size_t b_sent;
+ flb_sds_t payload;
+ void *payload_buf = NULL;
+ size_t payload_size = 0;
+ struct flb_newrelic *ctx = out_context;
+ struct flb_connection *u_conn;
+ struct flb_http_client *c;
+
+ /* Format the data to the expected Newrelic Payload */
+ payload = newrelic_compose_payload(ctx,
+ event_chunk->data, event_chunk->size);
+ if (!payload) {
+ flb_plg_error(ctx->ins, "cannot compose request payload");
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ /* Map payload */
+ payload_buf = (void *) payload;
+ payload_size = flb_sds_len(payload);
+
+ /* Should we compress the payload ? */
+ if (ctx->compress_gzip == FLB_TRUE) {
+ ret = flb_gzip_compress(payload, flb_sds_len(payload),
+ &payload_buf, &payload_size);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins,
+ "cannot gzip payload, disabling compression");
+ }
+ else {
+ compressed = FLB_TRUE;
+ flb_sds_destroy(payload);
+ }
+ }
+
+ /* Lookup an available connection context */
+ u_conn = flb_upstream_conn_get(ctx->u);
+ if (!u_conn) {
+ flb_plg_error(ctx->ins, "no upstream connections available");
+ if (compressed == FLB_TRUE) {
+ flb_free(payload_buf);
+ }
+ else {
+ flb_sds_destroy(payload);
+ }
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ /* Create HTTP client context */
+ c = flb_http_client(u_conn, FLB_HTTP_POST, ctx->nr_uri,
+ payload_buf, payload_size,
+ ctx->nr_host, ctx->nr_port,
+ NULL, 0);
+ if (!c) {
+ flb_plg_error(ctx->ins, "cannot create HTTP client context");
+ if (compressed == FLB_TRUE) {
+ flb_free(payload_buf);
+ }
+ else {
+ flb_sds_destroy(payload);
+ }
+ flb_upstream_conn_release(u_conn);
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ /* Set callback context to the HTTP client context */
+ flb_http_set_callback_context(c, ctx->ins->callback);
+
+ /* User Agent */
+ flb_http_add_header(c, "User-Agent", 10, "Fluent-Bit", 10);
+
+ /* API / License Key */
+ if (ctx->license_key) {
+ flb_http_add_header(c,
+ "X-License-Key", 13,
+ ctx->license_key, flb_sds_len(ctx->license_key));
+ }
+ else if (ctx->api_key) {
+ flb_http_add_header(c,
+ "X-Insert-Key", 12,
+ ctx->api_key, flb_sds_len(ctx->api_key));
+ }
+
+ /* Add Content-Type header */
+ flb_http_add_header(c,
+ FLB_NEWRELIC_CT, sizeof(FLB_NEWRELIC_CT) - 1,
+ FLB_NEWRELIC_CT_JSON, sizeof(FLB_NEWRELIC_CT_JSON) - 1);
+
+ /* Encoding */
+ if (compressed == FLB_TRUE) {
+ flb_http_set_content_encoding_gzip(c);
+ }
+
+ /* Send HTTP request */
+ ret = flb_http_do(c, &b_sent);
+
+ /* Destroy buffers */
+ if (compressed == FLB_FALSE) {
+ flb_sds_destroy(payload);
+ }
+ else {
+ flb_free(payload_buf);
+ }
+
+ /* Validate HTTP client return status */
+ if (ret == 0) {
+ /*
+ * Only allow the following HTTP status:
+ *
+ * - 200: OK
+ * - 201: Created
+ * - 202: Accepted
+ * - 203: no authorative resp
+ * - 204: No Content
+ * - 205: Reset content
+ *
+ */
+ if (c->resp.status < 200 || c->resp.status > 205) {
+ if (c->resp.payload) {
+ flb_plg_error(ctx->ins, "%s:%i, HTTP status=%i\n%s",
+ ctx->nr_host, ctx->nr_port, c->resp.status,
+ c->resp.payload);
+ }
+ else {
+ flb_plg_error(ctx->ins, "%s:%i, HTTP status=%i",
+ ctx->nr_host, ctx->nr_port, c->resp.status);
+ }
+ out_ret = FLB_RETRY;
+ }
+ else {
+ if (c->resp.payload) {
+ flb_plg_info(ctx->ins, "%s:%i, HTTP status=%i\n%s",
+ ctx->nr_host, ctx->nr_port,
+ c->resp.status, c->resp.payload);
+ }
+ else {
+ flb_plg_info(ctx->ins, "%s:%i, HTTP status=%i",
+ ctx->nr_host, ctx->nr_port,
+ c->resp.status);
+ }
+ }
+ }
+ else {
+ flb_plg_error(ctx->ins, "could not flush records to %s:%i (http_do=%i)",
+ ctx->nr_host, ctx->nr_port, ret);
+ out_ret = FLB_RETRY;
+ }
+
+ flb_http_client_destroy(c);
+ flb_upstream_conn_release(u_conn);
+ FLB_OUTPUT_RETURN(out_ret);
+}
+
+static int cb_newrelic_exit(void *data, struct flb_config *config)
+{
+ struct flb_newrelic *ctx = data;
+
+ if (!ctx) {
+ return 0;
+ }
+
+ newrelic_config_destroy(ctx);
+
+ return 0;
+}
+
+/* Configuration properties map */
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_STR, "base_uri", FLB_NEWRELIC_BASE_URI,
+ 0, FLB_TRUE, offsetof(struct flb_newrelic, base_uri),
+ "New Relic Host address"
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "api_key", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_newrelic, api_key),
+ "New Relic API Key"
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "license_key", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_newrelic, license_key),
+ "New Relic License Key"
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "compress", "gzip",
+ 0, FLB_TRUE, offsetof(struct flb_newrelic, compress),
+ "Set payload compression mechanism",
+ },
+
+ /* EOF */
+ {0}
+
+};
+
+/* Plugin reference */
+struct flb_output_plugin out_nrlogs_plugin = {
+ .name = "nrlogs",
+ .description = "New Relic",
+ .cb_init = cb_newrelic_init,
+ .cb_flush = cb_newrelic_flush,
+ .cb_exit = cb_newrelic_exit,
+ .config_map = config_map,
+ .flags = FLB_OUTPUT_NET | FLB_IO_TLS,
+};
diff --git a/src/fluent-bit/plugins/out_nrlogs/newrelic.h b/src/fluent-bit/plugins/out_nrlogs/newrelic.h
new file mode 100644
index 000000000..60a8b92e6
--- /dev/null
+++ b/src/fluent-bit/plugins/out_nrlogs/newrelic.h
@@ -0,0 +1,52 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_OUT_NEWRELIC_H
+#define FLB_OUT_NEWRELIC_H
+
+#include <fluent-bit/flb_output.h>
+#include <fluent-bit/flb_upstream.h>
+
+#define FLB_NEWRELIC_BASE_URI "https://log-api.newrelic.com/log/v1"
+
+#define FLB_NEWRELIC_CT "Content-Type"
+#define FLB_NEWRELIC_CT_JSON "application/json"
+
+struct flb_newrelic {
+ /* Incoming Configuration Properties */
+ flb_sds_t base_uri;
+ flb_sds_t api_key;
+ flb_sds_t license_key;
+ flb_sds_t compress;
+
+ /* Internal parsed URL */
+ char *nr_protocol;
+ char *nr_host;
+ int nr_port;
+ char *nr_uri;
+ int compress_gzip;
+
+ /* Upstream Context */
+ struct flb_upstream *u;
+
+ /* Plugin instance */
+ struct flb_output_instance *ins;
+};
+
+#endif
diff --git a/src/fluent-bit/plugins/out_null/CMakeLists.txt b/src/fluent-bit/plugins/out_null/CMakeLists.txt
new file mode 100644
index 000000000..77a80fe90
--- /dev/null
+++ b/src/fluent-bit/plugins/out_null/CMakeLists.txt
@@ -0,0 +1,4 @@
+set(src
+ null.c)
+
+FLB_PLUGIN(out_null "${src}" "")
diff --git a/src/fluent-bit/plugins/out_null/null.c b/src/fluent-bit/plugins/out_null/null.c
new file mode 100644
index 000000000..a1e1689a0
--- /dev/null
+++ b/src/fluent-bit/plugins/out_null/null.c
@@ -0,0 +1,178 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_utils.h>
+
+struct flb_null {
+ struct flb_output_instance *ins;
+
+ /* config map properties */
+ int out_format;
+ int json_date_format;
+ flb_sds_t json_date_key;
+ flb_sds_t date_key;
+};
+
+int cb_null_init(struct flb_output_instance *ins, struct flb_config *config,
+ void *data)
+{
+ int ret;
+ (void) config;
+ (void) data;
+ const char *tmp;
+ struct flb_null *ctx;
+
+ ctx = flb_malloc(sizeof(struct flb_null));
+ if (!ctx) {
+ flb_errno();
+ return -1;
+ }
+ ctx->ins = ins;
+
+ ret = flb_output_config_map_set(ins, (void *) ctx);
+ if (ret == -1) {
+ flb_free(ctx);
+ return -1;
+ }
+
+ ctx->out_format = FLB_PACK_JSON_FORMAT_NONE;
+ tmp = flb_output_get_property("format", ins);
+ if (tmp) {
+ ret = flb_pack_to_json_format_type(tmp);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "unrecognized 'format' option. "
+ "Using 'msgpack'");
+ }
+ else {
+ ctx->out_format = ret;
+ }
+ }
+
+ /* Date key */
+ ctx->date_key = ctx->json_date_key;
+ tmp = flb_output_get_property("json_date_key", ins);
+ if (tmp) {
+ /* Just check if we have to disable it */
+ if (flb_utils_bool(tmp) == FLB_FALSE) {
+ ctx->date_key = NULL;
+ }
+ }
+
+ /* Date format for JSON output */
+ ctx->json_date_format = FLB_PACK_JSON_DATE_DOUBLE;
+ tmp = flb_output_get_property("json_date_format", ins);
+ if (tmp) {
+ ret = flb_pack_to_json_date_type(tmp);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "invalid json_date_format '%s'. "
+ "Using 'double' type", tmp);
+ }
+ else {
+ ctx->json_date_format = ret;
+ }
+ }
+
+ flb_output_set_context(ins, ctx);
+
+ return 0;
+}
+
+static void cb_null_flush(struct flb_event_chunk *event_chunk,
+ struct flb_output_flush *out_flush,
+ struct flb_input_instance *i_ins,
+ void *out_context,
+ struct flb_config *config)
+{
+ (void) out_context;
+ (void) config;
+ flb_sds_t json;
+ struct flb_null *ctx = out_context;
+
+#ifdef FLB_HAVE_METRICS
+ /* Check if the event type is metrics, just return */
+ if (event_chunk->type == FLB_EVENT_TYPE_METRICS) {
+ FLB_OUTPUT_RETURN(FLB_OK);
+ }
+#endif
+
+ /*
+ * There are cases where the user might want to test the performance
+ * of msgpack payload conversion to JSON. Nothing will be printed,
+ * just encodeed and destroyed.
+ */
+ if (ctx->out_format != FLB_PACK_JSON_FORMAT_NONE) {
+ json = flb_pack_msgpack_to_json_format(event_chunk->data,
+ event_chunk->size,
+ ctx->out_format,
+ ctx->json_date_format,
+ ctx->date_key);
+ flb_sds_destroy(json);
+ }
+
+ flb_plg_debug(ctx->ins, "discarding %lu bytes", event_chunk->size);
+ FLB_OUTPUT_RETURN(FLB_OK);
+}
+
+/* Configuration properties map */
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_STR, "format", NULL,
+ 0, FLB_FALSE, 0,
+ "Specifies the data format to be printed. Supported formats are msgpack json, json_lines and json_stream."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "json_date_format", NULL,
+ 0, FLB_FALSE, 0,
+ "Specifies the name of the date field in output."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "json_date_key", "date",
+ 0, FLB_TRUE, offsetof(struct flb_null, json_date_key),
+ FBL_PACK_JSON_DATE_FORMAT_DESCRIPTION
+ },
+
+ /* EOF */
+ {0}
+};
+
+static int cb_null_exit(void *data, struct flb_config *config)
+{
+ struct flb_null *ctx = data;
+
+ if (!ctx) {
+ return 0;
+ }
+
+ flb_free(ctx);
+ return 0;
+}
+
+struct flb_output_plugin out_null_plugin = {
+ .name = "null",
+ .description = "Throws away events",
+ .cb_init = cb_null_init,
+ .cb_flush = cb_null_flush,
+ .cb_exit = cb_null_exit,
+ .event_type = FLB_OUTPUT_LOGS | FLB_OUTPUT_METRICS,
+ .config_map = config_map,
+ .flags = 0,
+ .workers = 1,
+};
diff --git a/src/fluent-bit/plugins/out_opensearch/CMakeLists.txt b/src/fluent-bit/plugins/out_opensearch/CMakeLists.txt
new file mode 100644
index 000000000..0e2bf59fe
--- /dev/null
+++ b/src/fluent-bit/plugins/out_opensearch/CMakeLists.txt
@@ -0,0 +1,6 @@
+set(src
+ os_conf.c
+ opensearch.c
+ )
+
+FLB_PLUGIN(out_opensearch "${src}" "")
diff --git a/src/fluent-bit/plugins/out_opensearch/opensearch.c b/src/fluent-bit/plugins/out_opensearch/opensearch.c
new file mode 100644
index 000000000..dbd0fa8d0
--- /dev/null
+++ b/src/fluent-bit/plugins/out_opensearch/opensearch.c
@@ -0,0 +1,1291 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_network.h>
+#include <fluent-bit/flb_http_client.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_signv4.h>
+#include <fluent-bit/flb_aws_credentials.h>
+#include <fluent-bit/flb_gzip.h>
+#include <fluent-bit/flb_record_accessor.h>
+#include <fluent-bit/flb_ra_key.h>
+#include <fluent-bit/flb_log_event_decoder.h>
+#include <msgpack.h>
+
+#include <cfl/cfl.h>
+
+#include "opensearch.h"
+#include "os_conf.h"
+
+static int os_pack_array_content(msgpack_packer *tmp_pck,
+ msgpack_object array,
+ struct flb_opensearch *ctx);
+
+#ifdef FLB_HAVE_AWS
+static flb_sds_t add_aws_auth(struct flb_http_client *c,
+ struct flb_opensearch *ctx)
+{
+ flb_sds_t signature = NULL;
+ int ret;
+
+ flb_plg_debug(ctx->ins, "Signing request with AWS Sigv4");
+
+ /* Amazon OpenSearch Sigv4 does not allow the host header to include the port */
+ ret = flb_http_strip_port_from_host(c);
+ if (ret < 0) {
+ flb_plg_error(ctx->ins, "could not strip port from host for sigv4");
+ return NULL;
+ }
+
+ /* AWS Fluent Bit user agent */
+ flb_http_add_header(c, "User-Agent", 10, "aws-fluent-bit-plugin", 21);
+
+ signature = flb_signv4_do(c, FLB_TRUE, FLB_TRUE, time(NULL),
+ ctx->aws_region, ctx->aws_service_name,
+ S3_MODE_SIGNED_PAYLOAD, ctx->aws_unsigned_headers,
+ ctx->aws_provider);
+ if (!signature) {
+ flb_plg_error(ctx->ins, "could not sign request with sigv4");
+ return NULL;
+ }
+ return signature;
+}
+#endif /* FLB_HAVE_AWS */
+
+static int os_pack_map_content(msgpack_packer *tmp_pck,
+ msgpack_object map,
+ struct flb_opensearch *ctx)
+{
+ int i;
+ char *ptr_key = NULL;
+ char buf_key[256];
+ msgpack_object *k;
+ msgpack_object *v;
+
+ for (i = 0; i < map.via.map.size; i++) {
+ k = &map.via.map.ptr[i].key;
+ v = &map.via.map.ptr[i].val;
+ ptr_key = NULL;
+
+ /* Store key */
+ const char *key_ptr = NULL;
+ size_t key_size = 0;
+
+ if (k->type == MSGPACK_OBJECT_BIN) {
+ key_ptr = k->via.bin.ptr;
+ key_size = k->via.bin.size;
+ }
+ else if (k->type == MSGPACK_OBJECT_STR) {
+ key_ptr = k->via.str.ptr;
+ key_size = k->via.str.size;
+ }
+
+ if (key_size < (sizeof(buf_key) - 1)) {
+ memcpy(buf_key, key_ptr, key_size);
+ buf_key[key_size] = '\0';
+ ptr_key = buf_key;
+ }
+ else {
+ /* Long map keys have a performance penalty */
+ ptr_key = flb_malloc(key_size + 1);
+ if (!ptr_key) {
+ flb_errno();
+ return -1;
+ }
+
+ memcpy(ptr_key, key_ptr, key_size);
+ ptr_key[key_size] = '\0';
+ }
+
+ /*
+ * Sanitize key name, it don't allow dots in field names:
+ *
+ * https://goo.gl/R5NMTr
+ */
+ if (ctx->replace_dots == FLB_TRUE) {
+ char *p = ptr_key;
+ char *end = ptr_key + key_size;
+ while (p != end) {
+ if (*p == '.') *p = '_';
+ p++;
+ }
+ }
+
+ /* Append the key */
+ msgpack_pack_str(tmp_pck, key_size);
+ msgpack_pack_str_body(tmp_pck, ptr_key, key_size);
+
+ /* Release temporary key if was allocated */
+ if (ptr_key && ptr_key != buf_key) {
+ flb_free(ptr_key);
+ }
+ ptr_key = NULL;
+
+ /*
+ * The value can be any data type, if it's a map we need to
+ * sanitize to avoid dots.
+ */
+ if (v->type == MSGPACK_OBJECT_MAP) {
+ msgpack_pack_map(tmp_pck, v->via.map.size);
+ os_pack_map_content(tmp_pck, *v, ctx);
+ }
+ /*
+ * The value can be any data type, if it's an array we need to
+ * pass it to os_pack_array_content.
+ */
+ else if (v->type == MSGPACK_OBJECT_ARRAY) {
+ msgpack_pack_array(tmp_pck, v->via.array.size);
+ os_pack_array_content(tmp_pck, *v, ctx);
+ }
+ else {
+ msgpack_pack_object(tmp_pck, *v);
+ }
+ }
+ return 0;
+}
+
+/*
+ * Iterate through the array and sanitize elements.
+ * Mutual recursion with os_pack_map_content.
+ */
+static int os_pack_array_content(msgpack_packer *tmp_pck,
+ msgpack_object array,
+ struct flb_opensearch *ctx)
+{
+ int i;
+ msgpack_object *e;
+
+ for (i = 0; i < array.via.array.size; i++) {
+ e = &array.via.array.ptr[i];
+ if (e->type == MSGPACK_OBJECT_MAP) {
+ msgpack_pack_map(tmp_pck, e->via.map.size);
+ os_pack_map_content(tmp_pck, *e, ctx);
+ }
+ else if (e->type == MSGPACK_OBJECT_ARRAY) {
+ msgpack_pack_array(tmp_pck, e->via.array.size);
+ os_pack_array_content(tmp_pck, *e, ctx);
+ }
+ else {
+ msgpack_pack_object(tmp_pck, *e);
+ }
+ }
+ return 0;
+}
+
+/*
+ * Get _id value from incoming record.
+ * If it successed, return the value as flb_sds_t.
+ * If it failed, return NULL.
+*/
+static flb_sds_t os_get_id_value(struct flb_opensearch *ctx,
+ msgpack_object *map)
+{
+ struct flb_ra_value *rval = NULL;
+ flb_sds_t tmp_str;
+ rval = flb_ra_get_value_object(ctx->ra_id_key, *map);
+ if (rval == NULL) {
+ flb_plg_warn(ctx->ins, "the value of %s is missing",
+ ctx->id_key);
+ return NULL;
+ }
+ else if(rval->o.type != MSGPACK_OBJECT_STR) {
+ flb_plg_warn(ctx->ins, "the value of %s is not string",
+ ctx->id_key);
+ flb_ra_key_value_destroy(rval);
+ return NULL;
+ }
+
+ tmp_str = flb_sds_create_len(rval->o.via.str.ptr,
+ rval->o.via.str.size);
+ if (tmp_str == NULL) {
+ flb_plg_warn(ctx->ins, "cannot create ID string from record");
+ flb_ra_key_value_destroy(rval);
+ return NULL;
+ }
+ flb_ra_key_value_destroy(rval);
+ return tmp_str;
+}
+
+static int compose_index_header(struct flb_opensearch *ctx,
+ int index_custom_len,
+ char *logstash_index, size_t logstash_index_size,
+ char *separator_str,
+ struct tm *tm)
+{
+ int ret;
+ int len;
+ char *p;
+ size_t s;
+
+ /* Compose Index header */
+ if (index_custom_len > 0) {
+ p = logstash_index + index_custom_len;
+ } else {
+ p = logstash_index + flb_sds_len(ctx->logstash_prefix);
+ }
+ len = p - logstash_index;
+ ret = snprintf(p, logstash_index_size - len, "%s",
+ separator_str);
+ if (ret > logstash_index_size - len) {
+ /* exceed limit */
+ return -1;
+ }
+ p += strlen(separator_str);
+ len += strlen(separator_str);
+
+ s = strftime(p, logstash_index_size - len,
+ ctx->logstash_dateformat, tm);
+ if (s==0) {
+ /* exceed limit */
+ return -1;
+ }
+ p += s;
+ *p++ = '\0';
+
+ return 0;
+}
+
+/*
+ * Convert the internal Fluent Bit data representation to the required
+ * one by OpenSearch.
+ */
+static int opensearch_format(struct flb_config *config,
+ struct flb_input_instance *ins,
+ void *plugin_context,
+ void *flush_ctx,
+ int event_type,
+ const char *tag, int tag_len,
+ const void *data, size_t bytes,
+ void **out_data, size_t *out_size)
+{
+ int ret;
+ int len;
+ int map_size;
+ int index_len = 0;
+ int write_op_update = FLB_FALSE;
+ int write_op_upsert = FLB_FALSE;
+ flb_sds_t ra_index = NULL;
+ size_t s = 0;
+ char *index = NULL;
+ char logstash_index[256];
+ char time_formatted[256];
+ char index_formatted[256];
+ char uuid[37];
+ flb_sds_t out_buf;
+ flb_sds_t id_key_str = NULL;
+ msgpack_object map;
+ flb_sds_t bulk;
+ struct tm tm;
+ struct flb_time tms;
+ msgpack_sbuffer tmp_sbuf;
+ msgpack_packer tmp_pck;
+ cfl_hash_128bits_t hash;
+ unsigned char h[sizeof(cfl_hash_128bits_t)];
+ int index_custom_len;
+ struct flb_opensearch *ctx = plugin_context;
+ flb_sds_t j_index;
+ struct flb_log_event_decoder log_decoder;
+ struct flb_log_event log_event;
+
+ ret = flb_log_event_decoder_init(&log_decoder, (char *) data, bytes);
+
+ if (ret != FLB_EVENT_DECODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event decoder initialization error : %d", ret);
+
+ return -1;
+ }
+
+ j_index = flb_sds_create_size(FLB_OS_HEADER_SIZE);
+ if (j_index == NULL) {
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ return -1;
+ }
+
+ bulk = flb_sds_create_size(bytes * 2);
+ if (!bulk) {
+ flb_log_event_decoder_destroy(&log_decoder);
+ flb_sds_destroy(j_index);
+
+ return -1;
+ }
+
+ /* Copy logstash prefix if logstash format is enabled */
+ if (ctx->logstash_format == FLB_TRUE) {
+ strncpy(logstash_index, ctx->logstash_prefix, sizeof(logstash_index));
+ logstash_index[sizeof(logstash_index) - 1] = '\0';
+ }
+
+ /*
+ * If logstash format and id generation are disabled, pre-generate
+ * the index line for all records.
+ *
+ * The header stored in 'j_index' will be used for the all records on
+ * this payload.
+ */
+ if (ctx->logstash_format == FLB_FALSE && ctx->generate_id == FLB_FALSE && ctx->ra_index == NULL) {
+ flb_time_get(&tms);
+ gmtime_r(&tms.tm.tv_sec, &tm);
+ strftime(index_formatted, sizeof(index_formatted) - 1,
+ ctx->index, &tm);
+ index = index_formatted;
+ if (ctx->suppress_type_name) {
+ index_len = flb_sds_snprintf(&j_index,
+ flb_sds_alloc(j_index),
+ OS_BULK_INDEX_FMT_NO_TYPE,
+ ctx->action,
+ index);
+ }
+ else {
+ index_len = flb_sds_snprintf(&j_index,
+ flb_sds_alloc(j_index),
+ OS_BULK_INDEX_FMT,
+ ctx->action,
+ index, ctx->type);
+ }
+
+ if (index_len == -1) {
+ flb_log_event_decoder_destroy(&log_decoder);
+ flb_sds_destroy(bulk);
+ flb_sds_destroy(j_index);
+ return -1;
+ }
+ }
+
+ /*
+ * Some broken clients may have time drift up to year 1970
+ * this will generate corresponding index in OpenSearch
+ * in order to prevent generating millions of indexes
+ * we can set to always use current time for index generation
+ */
+ if (ctx->current_time_index == FLB_TRUE) {
+ flb_time_get(&tms);
+ }
+
+ while ((ret = flb_log_event_decoder_next(
+ &log_decoder,
+ &log_event)) == FLB_EVENT_DECODER_SUCCESS) {
+ /* Only pop time from record if current_time_index is disabled */
+ if (!ctx->current_time_index) {
+ flb_time_copy(&tms, &log_event.timestamp);
+ }
+
+ map = *log_event.body;
+ map_size = map.via.map.size;
+
+ index_custom_len = 0;
+ if (ctx->logstash_prefix_key) {
+ flb_sds_t v = flb_ra_translate(ctx->ra_prefix_key,
+ (char *) tag, tag_len,
+ map, NULL);
+ if (v) {
+ len = flb_sds_len(v);
+ if (len > 128) {
+ len = 128;
+ memcpy(logstash_index, v, 128);
+ }
+ else {
+ memcpy(logstash_index, v, len);
+ }
+
+ index_custom_len = len;
+ flb_sds_destroy(v);
+ }
+ }
+
+ /* Create temporary msgpack buffer */
+ msgpack_sbuffer_init(&tmp_sbuf);
+ msgpack_packer_init(&tmp_pck, &tmp_sbuf, msgpack_sbuffer_write);
+
+ if (ctx->include_tag_key) {
+ map_size++;
+ }
+
+ /* Set the new map size */
+ msgpack_pack_map(&tmp_pck, map_size + 1);
+
+ /* Append the time key */
+ msgpack_pack_str(&tmp_pck, flb_sds_len(ctx->time_key));
+ msgpack_pack_str_body(&tmp_pck, ctx->time_key, flb_sds_len(ctx->time_key));
+
+ /* Format the time */
+ gmtime_r(&tms.tm.tv_sec, &tm);
+ s = strftime(time_formatted, sizeof(time_formatted) - 1,
+ ctx->time_key_format, &tm);
+ if (ctx->time_key_nanos) {
+ len = snprintf(time_formatted + s, sizeof(time_formatted) - 1 - s,
+ ".%09" PRIu64 "Z", (uint64_t) tms.tm.tv_nsec);
+ } else {
+ len = snprintf(time_formatted + s, sizeof(time_formatted) - 1 - s,
+ ".%03" PRIu64 "Z",
+ (uint64_t) tms.tm.tv_nsec / 1000000);
+ }
+
+ s += len;
+ msgpack_pack_str(&tmp_pck, s);
+ msgpack_pack_str_body(&tmp_pck, time_formatted, s);
+
+ index = ctx->index;
+ if (ctx->logstash_format == FLB_TRUE) {
+ ret = compose_index_header(ctx, index_custom_len,
+ &logstash_index[0], sizeof(logstash_index),
+ ctx->logstash_prefix_separator, &tm);
+ if (ret < 0) {
+ /* retry with default separator */
+ compose_index_header(ctx, index_custom_len,
+ &logstash_index[0], sizeof(logstash_index),
+ "-", &tm);
+ }
+ index = logstash_index;
+ if (ctx->generate_id == FLB_FALSE) {
+ if (ctx->suppress_type_name) {
+ index_len = flb_sds_snprintf(&j_index,
+ flb_sds_alloc(j_index),
+ OS_BULK_INDEX_FMT_NO_TYPE,
+ ctx->action,
+ index);
+ }
+ else {
+ index_len = flb_sds_snprintf(&j_index,
+ flb_sds_alloc(j_index),
+ OS_BULK_INDEX_FMT,
+ ctx->action,
+ index, ctx->type);
+ }
+ }
+ }
+ else if (ctx->current_time_index == FLB_TRUE) {
+ /* Make sure we handle index time format for index */
+ strftime(index_formatted, sizeof(index_formatted) - 1,
+ ctx->index, &tm);
+ index = index_formatted;
+ }
+ else if (ctx->ra_index) {
+ // free any previous ra_index to avoid memory leaks.
+ if (ra_index != NULL) {
+ flb_sds_destroy(ra_index);
+ }
+ /* a record accessor pattern exists for the index */
+ ra_index = flb_ra_translate(ctx->ra_index,
+ (char *) tag, tag_len,
+ map, NULL);
+ if (!ra_index) {
+ flb_plg_warn(ctx->ins, "invalid index translation from record accessor pattern, default to static index");
+ }
+ else {
+ index = ra_index;
+ }
+
+ if (ctx->suppress_type_name) {
+ index_len = flb_sds_snprintf(&j_index,
+ flb_sds_alloc(j_index),
+ OS_BULK_INDEX_FMT_NO_TYPE,
+ ctx->action,
+ index);
+ }
+ else {
+ index_len = flb_sds_snprintf(&j_index,
+ flb_sds_alloc(j_index),
+ OS_BULK_INDEX_FMT,
+ ctx->action,
+ index, ctx->type);
+ }
+ }
+
+ /* Tag Key */
+ if (ctx->include_tag_key == FLB_TRUE) {
+ msgpack_pack_str(&tmp_pck, flb_sds_len(ctx->tag_key));
+ msgpack_pack_str_body(&tmp_pck, ctx->tag_key, flb_sds_len(ctx->tag_key));
+ msgpack_pack_str(&tmp_pck, tag_len);
+ msgpack_pack_str_body(&tmp_pck, tag, tag_len);
+ }
+
+ /*
+ * The map_content routine iterate over each Key/Value pair found in
+ * the map and do some sanitization for the key names.
+ *
+ * There is a restriction that key names cannot contain a dot; if some
+ * dot is found, it's replaced with an underscore.
+ */
+ ret = os_pack_map_content(&tmp_pck, map, ctx);
+ if (ret == -1) {
+ flb_log_event_decoder_destroy(&log_decoder);
+ msgpack_sbuffer_destroy(&tmp_sbuf);
+ flb_sds_destroy(bulk);
+ flb_sds_destroy(j_index);
+ if (ra_index != NULL) {
+ flb_sds_destroy(ra_index);
+ }
+ return -1;
+ }
+
+ if (ctx->generate_id == FLB_TRUE) {
+ /* use a 128 bit hash and copy it to a buffer */
+ hash = cfl_hash_128bits(tmp_sbuf.data, tmp_sbuf.size);
+ memcpy(h, &hash, sizeof(hash));
+ snprintf(uuid, sizeof(uuid),
+ "%02X%02X%02X%02X-%02X%02X-%02X%02X-"
+ "%02X%02X-%02X%02X%02X%02X%02X%02X",
+ h[0], h[1], h[2], h[3], h[4], h[5], h[6], h[7],
+ h[8], h[9], h[10], h[11], h[12], h[13], h[14], h[15]);
+
+ if (ctx->suppress_type_name) {
+ index_len = flb_sds_snprintf(&j_index,
+ flb_sds_alloc(j_index),
+ OS_BULK_INDEX_FMT_ID_NO_TYPE,
+ ctx->action,
+ index, uuid);
+ }
+ else {
+ index_len = flb_sds_snprintf(&j_index,
+ flb_sds_alloc(j_index),
+ OS_BULK_INDEX_FMT_ID,
+ ctx->action,
+ index, ctx->type, uuid);
+ }
+ }
+ if (ctx->ra_id_key) {
+ id_key_str = os_get_id_value(ctx ,&map);
+ if (id_key_str) {
+ if (ctx->suppress_type_name) {
+ index_len = flb_sds_snprintf(&j_index,
+ flb_sds_alloc(j_index),
+ OS_BULK_INDEX_FMT_ID_NO_TYPE,
+ ctx->action,
+ index, id_key_str);
+ }
+ else {
+ index_len = flb_sds_snprintf(&j_index,
+ flb_sds_alloc(j_index),
+ OS_BULK_INDEX_FMT_ID,
+ ctx->action,
+ index, ctx->type, id_key_str);
+ }
+ flb_sds_destroy(id_key_str);
+ id_key_str = NULL;
+ }
+ }
+
+ /* Convert msgpack to JSON */
+ out_buf = flb_msgpack_raw_to_json_sds(tmp_sbuf.data, tmp_sbuf.size);
+ msgpack_sbuffer_destroy(&tmp_sbuf);
+ if (!out_buf) {
+ flb_log_event_decoder_destroy(&log_decoder);
+ flb_sds_destroy(bulk);
+ flb_sds_destroy(j_index);
+ if (ra_index != NULL) {
+ flb_sds_destroy(ra_index);
+ }
+ return -1;
+ }
+
+ ret = flb_sds_cat_safe(&bulk, j_index, flb_sds_len(j_index));
+ if (ret == -1) {
+ flb_log_event_decoder_destroy(&log_decoder);
+ *out_size = 0;
+ flb_sds_destroy(bulk);
+ flb_sds_destroy(j_index);
+ flb_sds_destroy(out_buf);
+ if (ra_index != NULL) {
+ flb_sds_destroy(ra_index);
+ }
+ return -1;
+ }
+
+ if (strcasecmp(ctx->write_operation, FLB_OS_WRITE_OP_UPDATE) == 0) {
+ write_op_update = FLB_TRUE;
+ }
+ else if (strcasecmp(ctx->write_operation, FLB_OS_WRITE_OP_UPSERT) == 0) {
+ write_op_upsert = FLB_TRUE;
+ }
+
+ /* UPDATE | UPSERT */
+ if (write_op_update) {
+ flb_sds_cat_safe(&bulk,
+ OS_BULK_UPDATE_OP_BODY,
+ sizeof(OS_BULK_UPDATE_OP_BODY) - 1);
+ }
+ else if (write_op_upsert) {
+ flb_sds_cat_safe(&bulk,
+ OS_BULK_UPSERT_OP_BODY,
+ sizeof(OS_BULK_UPSERT_OP_BODY) - 1);
+ }
+
+ ret = flb_sds_cat_safe(&bulk, out_buf, flb_sds_len(out_buf));
+ if (ret == -1) {
+ flb_log_event_decoder_destroy(&log_decoder);
+ *out_size = 0;
+ flb_sds_destroy(bulk);
+ flb_sds_destroy(j_index);
+ flb_sds_destroy(out_buf);
+ if (ra_index != NULL) {
+ flb_sds_destroy(ra_index);
+ }
+ return -1;
+ }
+
+ /* finish UPDATE | UPSERT */
+ if (write_op_update || write_op_upsert) {
+ flb_sds_cat_safe(&bulk, "}", 1);
+ }
+
+ flb_sds_cat_safe(&bulk, "\n", 1);
+ flb_sds_destroy(out_buf);
+ }
+
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ /* Set outgoing data */
+ *out_data = bulk;
+ *out_size = flb_sds_len(bulk);
+
+ if (ra_index != NULL) {
+ flb_sds_destroy(ra_index);
+ }
+ /*
+ * Note: we don't destroy the bulk as we need to keep the allocated
+ * buffer with the data. Instead we just release the bulk context and
+ * return the bulk->ptr buffer
+ */
+ if (ctx->trace_output) {
+ fwrite(*out_data, 1, *out_size, stdout);
+ fflush(stdout);
+ }
+ flb_sds_destroy(j_index);
+ return 0;
+}
+
+static int cb_opensearch_init(struct flb_output_instance *ins,
+ struct flb_config *config,
+ void *data)
+{
+ struct flb_opensearch *ctx;
+
+ ctx = flb_os_conf_create(ins, config);
+ if (!ctx) {
+ flb_plg_error(ins, "cannot initialize plugin");
+ return -1;
+ }
+
+ flb_plg_debug(ctx->ins, "host=%s port=%i uri=%s index=%s type=%s",
+ ins->host.name, ins->host.port, ctx->uri,
+ ctx->index, ctx->type);
+
+ flb_output_set_context(ins, ctx);
+
+ /*
+ * This plugin instance uses the HTTP client interface, let's register
+ * it debugging callbacks.
+ */
+ flb_output_set_http_debug_callbacks(ins);
+
+ return 0;
+}
+
+static int opensearch_error_check(struct flb_opensearch *ctx,
+ struct flb_http_client *c)
+{
+ int i, j, k;
+ int ret;
+ int check = FLB_FALSE;
+ int root_type;
+ char *out_buf;
+ size_t off = 0;
+ size_t out_size;
+ msgpack_unpacked result;
+ msgpack_object root;
+ msgpack_object key;
+ msgpack_object val;
+ msgpack_object item;
+ msgpack_object item_key;
+ msgpack_object item_val;
+
+ /*
+ * Check if our payload is complete: there is such situations where
+ * the OpenSearch HTTP response body is bigger than the HTTP client
+ * buffer so payload can be incomplete.
+ */
+ /* Convert JSON payload to msgpack */
+ ret = flb_pack_json(c->resp.payload, c->resp.payload_size,
+ &out_buf, &out_size, &root_type, NULL);
+ if (ret == -1) {
+ /* Is this an incomplete HTTP Request ? */
+ if (c->resp.payload_size <= 0) {
+ return FLB_TRUE;
+ }
+
+ /* Lookup error field */
+ if (strstr(c->resp.payload, "\"errors\":false,\"items\":[")) {
+ return FLB_FALSE;
+ }
+
+ flb_plg_error(ctx->ins, "could not pack/validate JSON response\n%s",
+ c->resp.payload);
+ return FLB_TRUE;
+ }
+
+ /* Lookup error field */
+ msgpack_unpacked_init(&result);
+ ret = msgpack_unpack_next(&result, out_buf, out_size, &off);
+ if (ret != MSGPACK_UNPACK_SUCCESS) {
+ flb_plg_error(ctx->ins, "Cannot unpack response to find error\n%s",
+ c->resp.payload);
+ return FLB_TRUE;
+ }
+
+ root = result.data;
+ if (root.type != MSGPACK_OBJECT_MAP) {
+ flb_plg_error(ctx->ins, "unexpected payload type=%i",
+ root.type);
+ check = FLB_TRUE;
+ goto done;
+ }
+
+ for (i = 0; i < root.via.map.size; i++) {
+ key = root.via.map.ptr[i].key;
+ if (key.type != MSGPACK_OBJECT_STR) {
+ flb_plg_error(ctx->ins, "unexpected key type=%i",
+ key.type);
+ check = FLB_TRUE;
+ goto done;
+ }
+
+ if (key.via.str.size == 6 && strncmp(key.via.str.ptr, "errors", 6) == 0) {
+ val = root.via.map.ptr[i].val;
+ if (val.type != MSGPACK_OBJECT_BOOLEAN) {
+ flb_plg_error(ctx->ins, "unexpected 'error' value type=%i",
+ val.type);
+ check = FLB_TRUE;
+ goto done;
+ }
+
+ /* If error == false, we are OK (no errors = FLB_FALSE) */
+ if (!val.via.boolean) {
+ /* no errors */
+ check = FLB_FALSE;
+ goto done;
+ }
+ }
+ else if (key.via.str.size == 5 && strncmp(key.via.str.ptr, "items", 5) == 0) {
+ val = root.via.map.ptr[i].val;
+ if (val.type != MSGPACK_OBJECT_ARRAY) {
+ flb_plg_error(ctx->ins, "unexpected 'items' value type=%i",
+ val.type);
+ check = FLB_TRUE;
+ goto done;
+ }
+
+ for (j = 0; j < val.via.array.size; j++) {
+ item = val.via.array.ptr[j];
+ if (item.type != MSGPACK_OBJECT_MAP) {
+ flb_plg_error(ctx->ins, "unexpected 'item' outer value type=%i",
+ item.type);
+ check = FLB_TRUE;
+ goto done;
+ }
+
+ if (item.via.map.size != 1) {
+ flb_plg_error(ctx->ins, "unexpected 'item' size=%i",
+ item.via.map.size);
+ check = FLB_TRUE;
+ goto done;
+ }
+
+ item = item.via.map.ptr[0].val;
+ if (item.type != MSGPACK_OBJECT_MAP) {
+ flb_plg_error(ctx->ins, "unexpected 'item' inner value type=%i",
+ item.type);
+ check = FLB_TRUE;
+ goto done;
+ }
+
+ for (k = 0; k < item.via.map.size; k++) {
+ item_key = item.via.map.ptr[k].key;
+ if (item_key.type != MSGPACK_OBJECT_STR) {
+ flb_plg_error(ctx->ins, "unexpected key type=%i",
+ item_key.type);
+ check = FLB_TRUE;
+ goto done;
+ }
+
+ if (item_key.via.str.size == 6 && strncmp(item_key.via.str.ptr, "status", 6) == 0) {
+ item_val = item.via.map.ptr[k].val;
+
+ if (item_val.type != MSGPACK_OBJECT_POSITIVE_INTEGER) {
+ flb_plg_error(ctx->ins, "unexpected 'status' value type=%i",
+ item_val.type);
+ check = FLB_TRUE;
+ goto done;
+ }
+ /* Check for errors other than version conflict (document already exists) */
+ if (item_val.via.i64 != 409) {
+ check = FLB_TRUE;
+ goto done;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ done:
+ flb_free(out_buf);
+ msgpack_unpacked_destroy(&result);
+ return check;
+}
+
+static void cb_opensearch_flush(struct flb_event_chunk *event_chunk,
+ struct flb_output_flush *out_flush,
+ struct flb_input_instance *ins, void *out_context,
+ struct flb_config *config)
+{
+ int ret = -1;
+ size_t pack_size;
+ flb_sds_t pack;
+ void *out_buf;
+ size_t out_size;
+ size_t b_sent;
+ struct flb_opensearch *ctx = out_context;
+ struct flb_connection *u_conn;
+ struct flb_http_client *c;
+ flb_sds_t signature = NULL;
+ int compressed = FLB_FALSE;
+ void *final_payload_buf = NULL;
+ size_t final_payload_size = 0;
+
+ /* Get upstream connection */
+ u_conn = flb_upstream_conn_get(ctx->u);
+ if (!u_conn) {
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ /* Convert format */
+ if (event_chunk->type == FLB_EVENT_TYPE_TRACES) {
+ pack = flb_msgpack_raw_to_json_sds(event_chunk->data, event_chunk->size);
+ if (pack) {
+ ret = 0;
+ }
+ else {
+ ret = -1;
+ }
+ }
+ else if (event_chunk->type == FLB_EVENT_TYPE_LOGS) {
+ ret = opensearch_format(config, ins,
+ ctx, NULL,
+ event_chunk->type,
+ event_chunk->tag, flb_sds_len(event_chunk->tag),
+ event_chunk->data, event_chunk->size,
+ &out_buf, &out_size);
+ }
+
+ if (ret != 0) {
+ flb_upstream_conn_release(u_conn);
+ FLB_OUTPUT_RETURN(FLB_ERROR);
+ }
+
+ pack = (char *) out_buf;
+ pack_size = out_size;
+
+ final_payload_buf = pack;
+ final_payload_size = pack_size;
+ /* Should we compress the payload ? */
+ if (ctx->compression == FLB_OS_COMPRESSION_GZIP) {
+ ret = flb_gzip_compress((void *) pack, pack_size,
+ &out_buf, &out_size);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins,
+ "cannot gzip payload, disabling compression");
+ }
+ else {
+ compressed = FLB_TRUE;
+ final_payload_buf = out_buf;
+ final_payload_size = out_size;
+ }
+ }
+
+ /* Compose HTTP Client request */
+ c = flb_http_client(u_conn, FLB_HTTP_POST, ctx->uri,
+ final_payload_buf, final_payload_size, NULL, 0, NULL, 0);
+
+ flb_http_buffer_size(c, ctx->buffer_size);
+
+#ifndef FLB_HAVE_AWS
+ flb_http_add_header(c, "User-Agent", 10, "Fluent-Bit", 10);
+#endif
+
+ flb_http_add_header(c, "Content-Type", 12, "application/x-ndjson", 20);
+
+ if (ctx->http_user && ctx->http_passwd) {
+ flb_http_basic_auth(c, ctx->http_user, ctx->http_passwd);
+ }
+
+#ifdef FLB_HAVE_AWS
+ if (ctx->has_aws_auth == FLB_TRUE) {
+ signature = add_aws_auth(c, ctx);
+ if (!signature) {
+ goto retry;
+ }
+ }
+ else {
+ flb_http_add_header(c, "User-Agent", 10, "Fluent-Bit", 10);
+ }
+#endif
+
+ /* Set Content-Encoding of compressed payload */
+ if (compressed == FLB_TRUE) {
+ if (ctx->compression == FLB_OS_COMPRESSION_GZIP) {
+ flb_http_set_content_encoding_gzip(c);
+ }
+ }
+
+ /* Map debug callbacks */
+ flb_http_client_debug(c, ctx->ins->callback);
+
+ ret = flb_http_do(c, &b_sent);
+ if (ret != 0) {
+ flb_plg_warn(ctx->ins, "http_do=%i URI=%s", ret, ctx->uri);
+ goto retry;
+ }
+ else {
+ /* The request was issued successfully, validate the 'error' field */
+ flb_plg_debug(ctx->ins, "HTTP Status=%i URI=%s", c->resp.status, ctx->uri);
+ if (c->resp.status != 200 && c->resp.status != 201) {
+ if (c->resp.payload_size > 0) {
+ flb_plg_error(ctx->ins, "HTTP status=%i URI=%s, response:\n%s\n",
+ c->resp.status, ctx->uri, c->resp.payload);
+ }
+ else {
+ flb_plg_error(ctx->ins, "HTTP status=%i URI=%s",
+ c->resp.status, ctx->uri);
+ }
+ goto retry;
+ }
+
+ if (c->resp.payload_size > 0) {
+ /*
+ * OpenSearch payload should be JSON, we convert it to msgpack
+ * and lookup the 'error' field.
+ */
+ ret = opensearch_error_check(ctx, c);
+ if (ret == FLB_TRUE) {
+ /* we got an error */
+ if (ctx->trace_error) {
+ /*
+ * If trace_error is set, trace the actual
+ * response from Elasticsearch explaining the problem.
+ * Trace_Output can be used to see the request.
+ */
+ if (pack_size < 4000) {
+ flb_plg_debug(ctx->ins, "error caused by: Input\n%.*s\n",
+ (int) pack_size, pack);
+ }
+ if (c->resp.payload_size < 4000) {
+ flb_plg_error(ctx->ins, "error: Output\n%s",
+ c->resp.payload);
+ } else {
+ /*
+ * We must use fwrite since the flb_log functions
+ * will truncate data at 4KB
+ */
+ fwrite(c->resp.payload, 1, c->resp.payload_size, stderr);
+ fflush(stderr);
+ }
+ }
+ goto retry;
+ }
+ else {
+ flb_plg_debug(ctx->ins, "OpenSearch response\n%s",
+ c->resp.payload);
+ }
+ }
+ else {
+ goto retry;
+ }
+ }
+
+ /* Cleanup */
+ flb_http_client_destroy(c);
+ flb_sds_destroy(pack);
+
+ if (final_payload_buf != pack) {
+ flb_free(final_payload_buf);
+ }
+
+ flb_upstream_conn_release(u_conn);
+ if (signature) {
+ flb_sds_destroy(signature);
+ }
+ FLB_OUTPUT_RETURN(FLB_OK);
+
+ /* Issue a retry */
+ retry:
+ flb_http_client_destroy(c);
+ flb_sds_destroy(pack);
+
+ if (final_payload_buf != pack) {
+ flb_free(final_payload_buf);
+ }
+
+ flb_upstream_conn_release(u_conn);
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+}
+
+static int cb_opensearch_exit(void *data, struct flb_config *config)
+{
+ struct flb_opensearch *ctx = data;
+
+ flb_os_conf_destroy(ctx);
+ return 0;
+}
+
+/* Configuration properties map */
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_STR, "index", FLB_OS_DEFAULT_INDEX,
+ 0, FLB_TRUE, offsetof(struct flb_opensearch, index),
+ "Set an index name"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "type", FLB_OS_DEFAULT_TYPE,
+ 0, FLB_TRUE, offsetof(struct flb_opensearch, type),
+ "Set the document type property"
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "suppress_type_name", "false",
+ 0, FLB_TRUE, offsetof(struct flb_opensearch, suppress_type_name),
+ "If true, mapping types is removed. (for v7.0.0 or later)"
+ },
+
+ /* HTTP Authentication */
+ {
+ FLB_CONFIG_MAP_STR, "http_user", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_opensearch, http_user),
+ "Optional username credential for access"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "http_passwd", "",
+ 0, FLB_TRUE, offsetof(struct flb_opensearch, http_passwd),
+ "Password for user defined in 'http_user'"
+ },
+
+ /* AWS Authentication */
+#ifdef FLB_HAVE_AWS
+ {
+ FLB_CONFIG_MAP_BOOL, "aws_auth", "false",
+ 0, FLB_TRUE, offsetof(struct flb_opensearch, has_aws_auth),
+ "Enable AWS Sigv4 Authentication"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "aws_region", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_opensearch, aws_region),
+ "AWS Region of your Amazon OpenSearch Service cluster"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "aws_profile", "default",
+ 0, FLB_TRUE, offsetof(struct flb_opensearch, aws_profile),
+ "AWS Profile name. AWS Profiles can be configured with AWS CLI and are usually stored in "
+ "$HOME/.aws/ directory."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "aws_sts_endpoint", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_opensearch, aws_sts_endpoint),
+ "Custom endpoint for the AWS STS API, used with the AWS_Role_ARN option"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "aws_role_arn", NULL,
+ 0, FLB_FALSE, 0,
+ "AWS IAM Role to assume to put records to your Amazon OpenSearch cluster"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "aws_external_id", NULL,
+ 0, FLB_FALSE, 0,
+ "External ID for the AWS IAM Role specified with `aws_role_arn`"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "aws_service_name", "es",
+ 0, FLB_TRUE, offsetof(struct flb_opensearch, aws_service_name),
+ "AWS Service Name"
+ },
+#endif
+
+ /* Logstash compatibility */
+ {
+ FLB_CONFIG_MAP_BOOL, "logstash_format", "false",
+ 0, FLB_TRUE, offsetof(struct flb_opensearch, logstash_format),
+ "Enable Logstash format compatibility"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "logstash_prefix", FLB_OS_DEFAULT_PREFIX,
+ 0, FLB_TRUE, offsetof(struct flb_opensearch, logstash_prefix),
+ "When Logstash_Format is enabled, the Index name is composed using a prefix "
+ "and the date, e.g: If Logstash_Prefix is equals to 'mydata' your index will "
+ "become 'mydata-YYYY.MM.DD'. The last string appended belongs to the date "
+ "when the data is being generated"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "logstash_prefix_separator", "-",
+ 0, FLB_TRUE, offsetof(struct flb_opensearch, logstash_prefix_separator),
+ "Set a separator between logstash_prefix and date."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "logstash_prefix_key", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_opensearch, logstash_prefix_key),
+ "When included: the value in the record that belongs to the key will be looked "
+ "up and over-write the Logstash_Prefix for index generation. If the key/value "
+ "is not found in the record then the Logstash_Prefix option will act as a "
+ "fallback. Nested keys are supported through record accessor pattern"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "logstash_dateformat", FLB_OS_DEFAULT_TIME_FMT,
+ 0, FLB_TRUE, offsetof(struct flb_opensearch, logstash_dateformat),
+ "Time format (based on strftime) to generate the second part of the Index name"
+ },
+
+ /* Custom Time and Tag keys */
+ {
+ FLB_CONFIG_MAP_STR, "time_key", FLB_OS_DEFAULT_TIME_KEY,
+ 0, FLB_TRUE, offsetof(struct flb_opensearch, time_key),
+ "When Logstash_Format is enabled, each record will get a new timestamp field. "
+ "The Time_Key property defines the name of that field"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "time_key_format", FLB_OS_DEFAULT_TIME_KEYF,
+ 0, FLB_TRUE, offsetof(struct flb_opensearch, time_key_format),
+ "When Logstash_Format is enabled, this property defines the format of the "
+ "timestamp"
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "time_key_nanos", "false",
+ 0, FLB_TRUE, offsetof(struct flb_opensearch, time_key_nanos),
+ "When Logstash_Format is enabled, enabling this property sends nanosecond "
+ "precision timestamps"
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "include_tag_key", "false",
+ 0, FLB_TRUE, offsetof(struct flb_opensearch, include_tag_key),
+ "When enabled, it append the Tag name to the record"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "tag_key", FLB_OS_DEFAULT_TAG_KEY,
+ 0, FLB_TRUE, offsetof(struct flb_opensearch, tag_key),
+ "When Include_Tag_Key is enabled, this property defines the key name for the tag"
+ },
+ {
+ FLB_CONFIG_MAP_SIZE, "buffer_size", FLB_OS_DEFAULT_HTTP_MAX,
+ 0, FLB_TRUE, offsetof(struct flb_opensearch, buffer_size),
+ "Specify the buffer size used to read the response from the OpenSearch HTTP "
+ "service. This option is useful for debugging purposes where is required to read "
+ "full responses, note that response size grows depending of the number of records "
+ "inserted. To set an unlimited amount of memory set this value to 'false', "
+ "otherwise the value must be according to the Unit Size specification"
+ },
+
+ /* OpenSearch specifics */
+ {
+ FLB_CONFIG_MAP_STR, "path", NULL,
+ 0, FLB_FALSE, 0,
+ "OpenSearch accepts new data on HTTP query path '/_bulk'. But it is also "
+ "possible to serve OpenSearch behind a reverse proxy on a subpath. This "
+ "option defines such path on the fluent-bit side. It simply adds a path "
+ "prefix in the indexing HTTP POST URI"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "pipeline", NULL,
+ 0, FLB_FALSE, 0,
+ "OpenSearch allows to setup filters called pipelines. "
+ "This option allows to define which pipeline the database should use. For "
+ "performance reasons is strongly suggested to do parsing and filtering on "
+ "Fluent Bit side, avoid pipelines"
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "generate_id", "false",
+ 0, FLB_TRUE, offsetof(struct flb_opensearch, generate_id),
+ "When enabled, generate _id for outgoing records. This prevents duplicate "
+ "records when retrying"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "write_operation", "create",
+ 0, FLB_TRUE, offsetof(struct flb_opensearch, write_operation),
+ "Operation to use to write in bulk requests"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "id_key", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_opensearch, id_key),
+ "If set, _id will be the value of the key from incoming record."
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "replace_dots", "false",
+ 0, FLB_TRUE, offsetof(struct flb_opensearch, replace_dots),
+ "When enabled, replace field name dots with underscore."
+ },
+
+ {
+ FLB_CONFIG_MAP_BOOL, "current_time_index", "false",
+ 0, FLB_TRUE, offsetof(struct flb_opensearch, current_time_index),
+ "Use current time for index generation instead of message record"
+ },
+
+ /* Trace */
+ {
+ FLB_CONFIG_MAP_BOOL, "trace_output", "false",
+ 0, FLB_TRUE, offsetof(struct flb_opensearch, trace_output),
+ "When enabled print the OpenSearch API calls to stdout (for diag only)"
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "trace_error", "false",
+ 0, FLB_TRUE, offsetof(struct flb_opensearch, trace_error),
+ "When enabled print the OpenSearch exception to stderr (for diag only)"
+ },
+
+ /* HTTP Compression */
+ {
+ FLB_CONFIG_MAP_STR, "compress", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_opensearch, compression_str),
+ "Set payload compression mechanism. Option available is 'gzip'"
+ },
+
+ /* EOF */
+ {0}
+};
+
+/* Plugin reference */
+struct flb_output_plugin out_opensearch_plugin = {
+ .name = "opensearch",
+ .description = "OpenSearch",
+ .cb_init = cb_opensearch_init,
+ .cb_pre_run = NULL,
+ .cb_flush = cb_opensearch_flush,
+ .cb_exit = cb_opensearch_exit,
+
+ /* Configuration */
+ .config_map = config_map,
+
+ /* Events supported */
+ .event_type = FLB_OUTPUT_LOGS | FLB_OUTPUT_TRACES,
+
+ /* Test */
+ .test_formatter.callback = opensearch_format,
+
+ /* Plugin flags */
+ .flags = FLB_OUTPUT_NET | FLB_IO_OPT_TLS,
+};
diff --git a/src/fluent-bit/plugins/out_opensearch/opensearch.h b/src/fluent-bit/plugins/out_opensearch/opensearch.h
new file mode 100644
index 000000000..a1087c1da
--- /dev/null
+++ b/src/fluent-bit/plugins/out_opensearch/opensearch.h
@@ -0,0 +1,155 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_OUT_OPENSEARCH_H
+#define FLB_OUT_OPENSEARCH_H
+
+/* config defaults */
+#define FLB_OS_DEFAULT_HOST "127.0.0.1"
+#define FLB_OS_DEFAULT_PORT 92000
+#define FLB_OS_DEFAULT_INDEX "fluent-bit"
+#define FLB_OS_DEFAULT_TYPE "_doc"
+#define FLB_OS_DEFAULT_PREFIX "logstash"
+#define FLB_OS_DEFAULT_TIME_FMT "%Y.%m.%d"
+#define FLB_OS_DEFAULT_TIME_KEY "@timestamp"
+#define FLB_OS_DEFAULT_TIME_KEYF "%Y-%m-%dT%H:%M:%S"
+#define FLB_OS_DEFAULT_TAG_KEY "flb-key"
+#define FLB_OS_DEFAULT_HTTP_MAX "512k"
+#define FLB_OS_WRITE_OP_INDEX "index"
+#define FLB_OS_WRITE_OP_CREATE "create"
+#define FLB_OS_WRITE_OP_UPDATE "update"
+#define FLB_OS_WRITE_OP_UPSERT "upsert"
+
+/* macros */
+#define FLB_OS_HEADER_SIZE 1024
+#define OS_BULK_CHUNK 4096 /* Size of buffer chunks */
+#define OS_BULK_HEADER 165 /* Bulk API prefix line */
+
+/* Bulk formats */
+#define OS_BULK_INDEX_FMT "{\"%s\":{\"_index\":\"%s\",\"_type\":\"%s\"}}\n"
+#define OS_BULK_INDEX_FMT_ID "{\"%s\":{\"_index\":\"%s\",\"_type\":\"%s\",\"_id\":\"%s\"}}\n"
+#define OS_BULK_INDEX_FMT_NO_TYPE "{\"%s\":{\"_index\":\"%s\"}}\n"
+#define OS_BULK_INDEX_FMT_ID_NO_TYPE "{\"%s\":{\"_index\":\"%s\",\"_id\":\"%s\"}}\n"
+
+/* Bulk write-type operations */
+#define OS_BULK_UPDATE_OP_BODY "{\"doc\":"
+#define OS_BULK_UPSERT_OP_BODY "{\"doc_as_upsert\":true,\"doc\":"
+
+/* Supported compression algorithms */
+#define FLB_OS_COMPRESSION_NONE 0
+#define FLB_OS_COMPRESSION_GZIP 1
+
+struct flb_opensearch {
+ /* OpenSearch index (database) and type (table) */
+ flb_sds_t index;
+ struct flb_record_accessor *ra_index;
+
+ char *type;
+ char suppress_type_name;
+
+ /* HTTP Auth */
+ char *http_user;
+ char *http_passwd;
+
+ /* AWS Auth */
+#ifdef FLB_HAVE_AWS
+ int has_aws_auth;
+ char *aws_region;
+ char *aws_sts_endpoint;
+ char *aws_profile;
+ struct flb_aws_provider *aws_provider;
+ struct flb_aws_provider *base_aws_provider;
+ /* tls instances can't be re-used; aws provider requires a separate one */
+ struct flb_tls *aws_tls;
+ /* one for the standard chain provider, one for sts assume role */
+ struct flb_tls *aws_sts_tls;
+ char *aws_session_name;
+ char *aws_service_name;
+ struct mk_list *aws_unsigned_headers;
+#endif
+
+ /* HTTP Client Setup */
+ size_t buffer_size;
+
+ /* If enabled, replace field name dots with underscore */
+ int replace_dots;
+
+ int trace_output;
+ int trace_error;
+
+ /*
+ * Logstash compatibility options
+ * ==============================
+ */
+
+ /* enabled/disabled */
+ int logstash_format;
+ int generate_id;
+ int current_time_index;
+
+ /* prefix */
+ flb_sds_t logstash_prefix;
+ flb_sds_t logstash_prefix_separator;
+
+ /* prefix key */
+ flb_sds_t logstash_prefix_key;
+
+ /* date format */
+ flb_sds_t logstash_dateformat;
+
+ /* time key */
+ flb_sds_t time_key;
+
+ /* time key format */
+ flb_sds_t time_key_format;
+
+ /* time key nanoseconds */
+ int time_key_nanos;
+
+ /* write operation config value */
+ flb_sds_t write_operation;
+
+ /* write operation / action */
+ char *action;
+
+ /* id_key */
+ flb_sds_t id_key;
+ struct flb_record_accessor *ra_id_key;
+
+ /* include_tag_key */
+ int include_tag_key;
+ flb_sds_t tag_key;
+
+ /* HTTP API */
+ char uri[1024];
+
+ struct flb_record_accessor *ra_prefix_key;
+
+ /* Upstream connection to the backend server */
+ struct flb_upstream *u;
+
+ /* Plugin output instance reference */
+ struct flb_output_instance *ins;
+
+ /* Compression algorithm */
+ int compression;
+ flb_sds_t compression_str;
+};
+
+#endif
diff --git a/src/fluent-bit/plugins/out_opensearch/os_conf.c b/src/fluent-bit/plugins/out_opensearch/os_conf.c
new file mode 100644
index 000000000..b814bd35f
--- /dev/null
+++ b/src/fluent-bit/plugins/out_opensearch/os_conf.c
@@ -0,0 +1,411 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_mem.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_http_client.h>
+#include <fluent-bit/flb_record_accessor.h>
+#include <fluent-bit/flb_signv4.h>
+#include <fluent-bit/flb_aws_credentials.h>
+
+#include "opensearch.h"
+#include "os_conf.h"
+
+struct flb_opensearch *flb_os_conf_create(struct flb_output_instance *ins,
+ struct flb_config *config)
+{
+ int len;
+ int io_flags = 0;
+ ssize_t ret;
+ char *buf;
+ const char *tmp;
+ const char *path;
+#ifdef FLB_HAVE_AWS
+ char *aws_role_arn = NULL;
+ char *aws_external_id = NULL;
+ char *aws_session_name = NULL;
+#endif
+ struct flb_uri *uri = ins->host.uri;
+ struct flb_uri_field *f_index = NULL;
+ struct flb_uri_field *f_type = NULL;
+ struct flb_upstream *upstream;
+ struct flb_opensearch *ctx;
+
+ /* Allocate context */
+ ctx = flb_calloc(1, sizeof(struct flb_opensearch));
+ if (!ctx) {
+ flb_errno();
+ return NULL;
+ }
+ ctx->ins = ins;
+
+ /* only used if the config has been set from the command line */
+ if (uri) {
+ if (uri->count >= 2) {
+ f_index = flb_uri_get(uri, 0);
+ f_type = flb_uri_get(uri, 1);
+ }
+ }
+
+ /* Set default network configuration */
+ flb_output_net_default("127.0.0.1", 9200, ins);
+
+ /* Populate context with config map defaults and incoming properties */
+ ret = flb_output_config_map_set(ins, (void *) ctx);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "configuration error");
+ flb_os_conf_destroy(ctx);
+ return NULL;
+ }
+
+ /* use TLS ? */
+ if (ins->use_tls == FLB_TRUE) {
+ io_flags = FLB_IO_TLS;
+ }
+ else {
+ io_flags = FLB_IO_TCP;
+ }
+
+ if (ins->host.ipv6 == FLB_TRUE) {
+ io_flags |= FLB_IO_IPV6;
+ }
+
+ /* Prepare an upstream handler */
+ upstream = flb_upstream_create(config,
+ ins->host.name,
+ ins->host.port,
+ io_flags,
+ ins->tls);
+ if (!upstream) {
+ flb_plg_error(ctx->ins, "cannot create Upstream context");
+ flb_os_conf_destroy(ctx);
+ return NULL;
+ }
+ ctx->u = upstream;
+
+ /* Set instance flags into upstream */
+ flb_output_upstream_set(ctx->u, ins);
+
+ /* Set manual Index and Type */
+ if (f_index) {
+ ctx->index = flb_strdup(f_index->value);
+ }
+ else {
+ /* Check if the index has been set in the configuration */
+ if (ctx->index) {
+ /* do we have a record accessor pattern ? */
+ if (strchr(ctx->index, '$')) {
+ ctx->ra_index = flb_ra_create(ctx->index, FLB_TRUE);
+ if (!ctx->ra_index) {
+ flb_plg_error(ctx->ins, "invalid record accessor pattern set for 'index' property");
+ flb_os_conf_destroy(ctx);
+ return NULL;
+ }
+ }
+ }
+ }
+
+ if (f_type) {
+ ctx->type = flb_strdup(f_type->value); /* FIXME */
+ }
+
+ /* HTTP Payload (response) maximum buffer size (0 == unlimited) */
+ if (ctx->buffer_size == -1) {
+ ctx->buffer_size = 0;
+ }
+
+ /* Path */
+ path = flb_output_get_property("path", ins);
+ if (!path) {
+ path = "";
+ }
+
+ /* Pipeline */
+ tmp = flb_output_get_property("pipeline", ins);
+ if (tmp) {
+ snprintf(ctx->uri, sizeof(ctx->uri) - 1, "%s/_bulk/?pipeline=%s", path, tmp);
+ }
+ else {
+ snprintf(ctx->uri, sizeof(ctx->uri) - 1, "%s/_bulk", path);
+ }
+
+
+ if (ctx->id_key) {
+ ctx->ra_id_key = flb_ra_create(ctx->id_key, FLB_FALSE);
+ if (ctx->ra_id_key == NULL) {
+ flb_plg_error(ins, "could not create record accessor for Id Key");
+ }
+ if (ctx->generate_id == FLB_TRUE) {
+ flb_plg_warn(ins, "Generate_ID is ignored when ID_key is set");
+ ctx->generate_id = FLB_FALSE;
+ }
+ }
+
+ if (ctx->write_operation) {
+ if (strcasecmp(ctx->write_operation, FLB_OS_WRITE_OP_INDEX) == 0) {
+ ctx->action = FLB_OS_WRITE_OP_INDEX;
+ }
+ else if (strcasecmp(ctx->write_operation, FLB_OS_WRITE_OP_CREATE) == 0) {
+ ctx->action = FLB_OS_WRITE_OP_CREATE;
+ }
+ else if (strcasecmp(ctx->write_operation, FLB_OS_WRITE_OP_UPDATE) == 0
+ || strcasecmp(ctx->write_operation, FLB_OS_WRITE_OP_UPSERT) == 0) {
+ ctx->action = FLB_OS_WRITE_OP_UPDATE;
+ }
+ else {
+ flb_plg_error(ins,
+ "wrong Write_Operation (should be one of index, "
+ "create, update, upsert)");
+ flb_os_conf_destroy(ctx);
+ return NULL;
+ }
+
+ if (strcasecmp(ctx->action, FLB_OS_WRITE_OP_UPDATE) == 0
+ && !ctx->ra_id_key && ctx->generate_id == FLB_FALSE) {
+ flb_plg_error(ins,
+ "id_key or generate_id must be set when Write_Operation "
+ "update or upsert");
+ flb_os_conf_destroy(ctx);
+ return NULL;
+ }
+ }
+
+ if (ctx->logstash_prefix_key) {
+ if (ctx->logstash_prefix_key[0] != '$') {
+ len = flb_sds_len(ctx->logstash_prefix_key);
+ buf = flb_malloc(len + 2);
+ if (!buf) {
+ flb_errno();
+ flb_os_conf_destroy(ctx);
+ return NULL;
+ }
+ buf[0] = '$';
+ memcpy(buf + 1, ctx->logstash_prefix_key, len);
+ buf[len + 1] = '\0';
+
+ ctx->ra_prefix_key = flb_ra_create(buf, FLB_TRUE);
+ flb_free(buf);
+ }
+ else {
+ ctx->ra_prefix_key = flb_ra_create(ctx->logstash_prefix_key, FLB_TRUE);
+ }
+
+ if (!ctx->ra_prefix_key) {
+ flb_plg_error(ins, "invalid logstash_prefix_key pattern '%s'", tmp);
+ flb_os_conf_destroy(ctx);
+ return NULL;
+ }
+ }
+
+ if (ctx->compression_str) {
+ if (strcasecmp(ctx->compression_str, "gzip") == 0) {
+ ctx->compression = FLB_OS_COMPRESSION_GZIP;
+ }
+ else {
+ ctx->compression = FLB_OS_COMPRESSION_NONE;
+ }
+ }
+ else {
+ ctx->compression = FLB_OS_COMPRESSION_NONE;
+ }
+
+#ifdef FLB_HAVE_AWS
+ /* AWS Auth Unsigned Headers */
+ ctx->aws_unsigned_headers = flb_malloc(sizeof(struct mk_list));
+ if (!ctx->aws_unsigned_headers) {
+ flb_os_conf_destroy(ctx);
+ return NULL;
+ }
+ flb_slist_create(ctx->aws_unsigned_headers);
+ ret = flb_slist_add(ctx->aws_unsigned_headers, "Content-Length");
+ if (ret != 0) {
+ flb_os_conf_destroy(ctx);
+ return NULL;
+ }
+
+ /* AWS Auth */
+ ctx->has_aws_auth = FLB_FALSE;
+ tmp = flb_output_get_property("aws_auth", ins);
+ if (tmp) {
+ if (strncasecmp(tmp, "On", 2) == 0) {
+ ctx->has_aws_auth = FLB_TRUE;
+ flb_debug("[out_es] Enabled AWS Auth");
+
+ /* AWS provider needs a separate TLS instance */
+ ctx->aws_tls = flb_tls_create(FLB_TLS_CLIENT_MODE,
+ FLB_TRUE,
+ ins->tls_debug,
+ ins->tls_vhost,
+ ins->tls_ca_path,
+ ins->tls_ca_file,
+ ins->tls_crt_file,
+ ins->tls_key_file,
+ ins->tls_key_passwd);
+ if (!ctx->aws_tls) {
+ flb_errno();
+ flb_os_conf_destroy(ctx);
+ return NULL;
+ }
+
+ tmp = flb_output_get_property("aws_region", ins);
+ if (!tmp) {
+ flb_error("[out_es] aws_auth enabled but aws_region not set");
+ flb_os_conf_destroy(ctx);
+ return NULL;
+ }
+ ctx->aws_region = (char *) tmp;
+
+ tmp = flb_output_get_property("aws_sts_endpoint", ins);
+ if (tmp) {
+ ctx->aws_sts_endpoint = (char *) tmp;
+ }
+
+ ctx->aws_provider = flb_standard_chain_provider_create(config,
+ ctx->aws_tls,
+ ctx->aws_region,
+ ctx->aws_sts_endpoint,
+ NULL,
+ flb_aws_client_generator(),
+ ctx->aws_profile);
+ if (!ctx->aws_provider) {
+ flb_error("[out_es] Failed to create AWS Credential Provider");
+ flb_os_conf_destroy(ctx);
+ return NULL;
+ }
+
+ tmp = flb_output_get_property("aws_role_arn", ins);
+ if (tmp) {
+ /* Use the STS Provider */
+ ctx->base_aws_provider = ctx->aws_provider;
+ aws_role_arn = (char *) tmp;
+ aws_external_id = NULL;
+ tmp = flb_output_get_property("aws_external_id", ins);
+ if (tmp) {
+ aws_external_id = (char *) tmp;
+ }
+
+ aws_session_name = flb_sts_session_name();
+ if (!aws_session_name) {
+ flb_error("[out_es] Failed to create aws iam role "
+ "session name");
+ flb_os_conf_destroy(ctx);
+ return NULL;
+ }
+
+ /* STS provider needs yet another separate TLS instance */
+ ctx->aws_sts_tls = flb_tls_create(FLB_TLS_CLIENT_MODE,
+ FLB_TRUE,
+ ins->tls_debug,
+ ins->tls_vhost,
+ ins->tls_ca_path,
+ ins->tls_ca_file,
+ ins->tls_crt_file,
+ ins->tls_key_file,
+ ins->tls_key_passwd);
+ if (!ctx->aws_sts_tls) {
+ flb_errno();
+ flb_os_conf_destroy(ctx);
+ return NULL;
+ }
+
+ ctx->aws_provider = flb_sts_provider_create(config,
+ ctx->aws_sts_tls,
+ ctx->
+ base_aws_provider,
+ aws_external_id,
+ aws_role_arn,
+ aws_session_name,
+ ctx->aws_region,
+ ctx->aws_sts_endpoint,
+ NULL,
+ flb_aws_client_generator());
+ /* Session name can be freed once provider is created */
+ flb_free(aws_session_name);
+ if (!ctx->aws_provider) {
+ flb_error("[out_es] Failed to create AWS STS Credential "
+ "Provider");
+ flb_os_conf_destroy(ctx);
+ return NULL;
+ }
+
+ }
+
+ /* initialize credentials in sync mode */
+ ctx->aws_provider->provider_vtable->sync(ctx->aws_provider);
+ ctx->aws_provider->provider_vtable->init(ctx->aws_provider);
+ /* set back to async */
+ ctx->aws_provider->provider_vtable->async(ctx->aws_provider);
+ ctx->aws_provider->provider_vtable->upstream_set(ctx->aws_provider, ctx->ins);
+ }
+ }
+#endif
+
+ return ctx;
+}
+
+int flb_os_conf_destroy(struct flb_opensearch *ctx)
+{
+ if (!ctx) {
+ return 0;
+ }
+
+ if (ctx->u) {
+ flb_upstream_destroy(ctx->u);
+ }
+ if (ctx->ra_id_key) {
+ flb_ra_destroy(ctx->ra_id_key);
+ ctx->ra_id_key = NULL;
+ }
+
+#ifdef FLB_HAVE_AWS
+ if (ctx->base_aws_provider) {
+ flb_aws_provider_destroy(ctx->base_aws_provider);
+ }
+
+ if (ctx->aws_provider) {
+ flb_aws_provider_destroy(ctx->aws_provider);
+ }
+
+ if (ctx->aws_tls) {
+ flb_tls_destroy(ctx->aws_tls);
+ }
+
+ if (ctx->aws_sts_tls) {
+ flb_tls_destroy(ctx->aws_sts_tls);
+ }
+
+ if (ctx->aws_unsigned_headers) {
+ flb_slist_destroy(ctx->aws_unsigned_headers);
+ flb_free(ctx->aws_unsigned_headers);
+ }
+#endif
+
+ if (ctx->ra_prefix_key) {
+ flb_ra_destroy(ctx->ra_prefix_key);
+ }
+
+ if (ctx->ra_index) {
+ flb_ra_destroy(ctx->ra_index);
+ }
+
+ flb_free(ctx);
+
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/out_opensearch/os_conf.h b/src/fluent-bit/plugins/out_opensearch/os_conf.h
new file mode 100644
index 000000000..a48376a07
--- /dev/null
+++ b/src/fluent-bit/plugins/out_opensearch/os_conf.h
@@ -0,0 +1,33 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_OUT_OPENSEARCH_CONF_H
+#define FLB_OUT_OPENSEARCH_CONF_H
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_output.h>
+#include <fluent-bit/flb_config.h>
+
+#include "opensearch.h"
+
+struct flb_opensearch *flb_os_conf_create(struct flb_output_instance *ins,
+ struct flb_config *config);
+int flb_os_conf_destroy(struct flb_opensearch *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/out_opentelemetry/CMakeLists.txt b/src/fluent-bit/plugins/out_opentelemetry/CMakeLists.txt
new file mode 100644
index 000000000..03c697ab2
--- /dev/null
+++ b/src/fluent-bit/plugins/out_opentelemetry/CMakeLists.txt
@@ -0,0 +1,6 @@
+set(src
+ opentelemetry.c
+ opentelemetry_conf.c
+ )
+
+FLB_PLUGIN(out_opentelemetry "${src}" "")
diff --git a/src/fluent-bit/plugins/out_opentelemetry/opentelemetry.c b/src/fluent-bit/plugins/out_opentelemetry/opentelemetry.c
new file mode 100644
index 000000000..c981cc27c
--- /dev/null
+++ b/src/fluent-bit/plugins/out_opentelemetry/opentelemetry.c
@@ -0,0 +1,1207 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_snappy.h>
+#include <fluent-bit/flb_metrics.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_kv.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_log_event_decoder.h>
+
+#include <cfl/cfl.h>
+#include <fluent-otel-proto/fluent-otel.h>
+
+#include <cmetrics/cmetrics.h>
+#include <fluent-bit/flb_gzip.h>
+#include <cmetrics/cmt_encode_opentelemetry.h>
+
+#include <ctraces/ctraces.h>
+#include <ctraces/ctr_decode_msgpack.h>
+
+extern cfl_sds_t cmt_encode_opentelemetry_create(struct cmt *cmt);
+extern void cmt_encode_opentelemetry_destroy(cfl_sds_t text);
+
+#include "opentelemetry.h"
+#include "opentelemetry_conf.h"
+
+static inline Opentelemetry__Proto__Common__V1__AnyValue *msgpack_object_to_otlp_any_value(struct msgpack_object *o);
+
+static inline void otlp_any_value_destroy(Opentelemetry__Proto__Common__V1__AnyValue *value);
+static inline void otlp_kvarray_destroy(Opentelemetry__Proto__Common__V1__KeyValue **kvarray, size_t entry_count);
+static inline void otlp_kvpair_destroy(Opentelemetry__Proto__Common__V1__KeyValue *kvpair);
+static inline void otlp_kvlist_destroy(Opentelemetry__Proto__Common__V1__KeyValueList *kvlist);
+static inline void otlp_array_destroy(Opentelemetry__Proto__Common__V1__ArrayValue *array);
+
+static inline void otlp_kvarray_destroy(Opentelemetry__Proto__Common__V1__KeyValue **kvarray, size_t entry_count)
+{
+ size_t index;
+
+ if (kvarray != NULL) {
+ for (index = 0 ; index < entry_count ; index++) {
+ if (kvarray[index] != NULL) {
+ otlp_kvpair_destroy(kvarray[index]);
+ kvarray[index] = NULL;
+ }
+ }
+
+ flb_free(kvarray);
+ }
+}
+
+static inline void otlp_kvpair_destroy(Opentelemetry__Proto__Common__V1__KeyValue *kvpair)
+{
+ if (kvpair != NULL) {
+ if (kvpair->key != NULL) {
+ flb_free(kvpair->key);
+ }
+
+ if (kvpair->value != NULL) {
+ otlp_any_value_destroy(kvpair->value);
+ }
+
+ flb_free(kvpair);
+ }
+}
+
+static inline void otlp_kvlist_destroy(Opentelemetry__Proto__Common__V1__KeyValueList *kvlist)
+{
+ size_t index;
+
+ if (kvlist != NULL) {
+ if (kvlist->values != NULL) {
+ for (index = 0 ; index < kvlist->n_values ; index++) {
+ otlp_kvpair_destroy(kvlist->values[index]);
+ }
+
+ flb_free(kvlist->values);
+ }
+
+ flb_free(kvlist);
+ }
+}
+
+static inline void otlp_array_destroy(Opentelemetry__Proto__Common__V1__ArrayValue *array)
+{
+ size_t index;
+
+ if (array != NULL) {
+ if (array->values != NULL) {
+ for (index = 0 ; index < array->n_values ; index++) {
+ otlp_any_value_destroy(array->values[index]);
+ }
+
+ flb_free(array->values);
+ }
+
+ flb_free(array);
+ }
+}
+
+static inline void otlp_any_value_destroy(Opentelemetry__Proto__Common__V1__AnyValue *value)
+{
+ if (value != NULL) {
+ if (value->value_case == OPENTELEMETRY__PROTO__COMMON__V1__ANY_VALUE__VALUE_STRING_VALUE) {
+ if (value->string_value != NULL) {
+ flb_free(value->string_value);
+ }
+ }
+ else if (value->value_case == OPENTELEMETRY__PROTO__COMMON__V1__ANY_VALUE__VALUE_ARRAY_VALUE) {
+ if (value->array_value != NULL) {
+ otlp_array_destroy(value->array_value);
+ }
+ }
+ else if (value->value_case == OPENTELEMETRY__PROTO__COMMON__V1__ANY_VALUE__VALUE_KVLIST_VALUE) {
+ if (value->kvlist_value != NULL) {
+ otlp_kvlist_destroy(value->kvlist_value);
+ }
+ }
+ else if (value->value_case == OPENTELEMETRY__PROTO__COMMON__V1__ANY_VALUE__VALUE_BYTES_VALUE) {
+ if (value->bytes_value.data != NULL) {
+ flb_free(value->bytes_value.data);
+ }
+ }
+
+ value->string_value = NULL;
+
+ flb_free(value);
+ }
+}
+
+static int http_post(struct opentelemetry_context *ctx,
+ const void *body, size_t body_len,
+ const char *tag, int tag_len,
+ const char *uri)
+{
+ size_t final_body_len;
+ void *final_body;
+ int compressed;
+ int out_ret;
+ size_t b_sent;
+ struct flb_connection *u_conn;
+ struct mk_list *head;
+ int ret;
+ struct flb_slist_entry *key;
+ struct flb_slist_entry *val;
+ struct flb_config_map_val *mv;
+ struct flb_http_client *c;
+
+ compressed = FLB_FALSE;
+
+ u_conn = flb_upstream_conn_get(ctx->u);
+
+ if (u_conn == NULL) {
+ flb_plg_error(ctx->ins,
+ "no upstream connections available to %s:%i",
+ ctx->u->tcp_host,
+ ctx->u->tcp_port);
+
+ return FLB_RETRY;
+ }
+
+ if (ctx->compress_gzip) {
+ ret = flb_gzip_compress((void *) body, body_len,
+ &final_body, &final_body_len);
+
+ if (ret == 0) {
+ compressed = FLB_TRUE;
+ } else {
+ flb_plg_error(ctx->ins, "cannot gzip payload, disabling compression");
+ }
+ } else {
+ final_body = (void *) body;
+ final_body_len = body_len;
+ }
+
+ /* Create HTTP client context */
+ c = flb_http_client(u_conn, FLB_HTTP_POST, uri,
+ final_body, final_body_len,
+ ctx->host, ctx->port,
+ ctx->proxy, 0);
+
+ if (c == NULL) {
+ flb_plg_error(ctx->ins, "error initializing http client");
+
+ if (compressed) {
+ flb_free(final_body);
+ }
+
+ flb_upstream_conn_release(u_conn);
+
+ return FLB_RETRY;
+ }
+
+ if (c->proxy.host != NULL) {
+ flb_plg_debug(ctx->ins, "[http_client] proxy host: %s port: %i",
+ c->proxy.host, c->proxy.port);
+ }
+
+ /* Allow duplicated headers ? */
+ flb_http_allow_duplicated_headers(c, FLB_FALSE);
+
+ /*
+ * Direct assignment of the callback context to the HTTP client context.
+ * This needs to be improved through a more clean API.
+ */
+ c->cb_ctx = ctx->ins->callback;
+
+ flb_http_add_header(c,
+ FLB_OPENTELEMETRY_CONTENT_TYPE_HEADER_NAME,
+ sizeof(FLB_OPENTELEMETRY_CONTENT_TYPE_HEADER_NAME) - 1,
+ FLB_OPENTELEMETRY_MIME_PROTOBUF_LITERAL,
+ sizeof(FLB_OPENTELEMETRY_MIME_PROTOBUF_LITERAL) - 1);
+
+ /* Basic Auth headers */
+ if (ctx->http_user != NULL &&
+ ctx->http_passwd != NULL) {
+ flb_http_basic_auth(c, ctx->http_user, ctx->http_passwd);
+ }
+
+ flb_http_add_header(c, "User-Agent", 10, "Fluent-Bit", 10);
+
+ flb_config_map_foreach(head, mv, ctx->headers) {
+ key = mk_list_entry_first(mv->val.list, struct flb_slist_entry, _head);
+ val = mk_list_entry_last(mv->val.list, struct flb_slist_entry, _head);
+
+ flb_http_add_header(c,
+ key->str, flb_sds_len(key->str),
+ val->str, flb_sds_len(val->str));
+ }
+
+ if (compressed) {
+ flb_http_set_content_encoding_gzip(c);
+ }
+
+ ret = flb_http_do(c, &b_sent);
+
+ if (ret == 0) {
+ /*
+ * Only allow the following HTTP status:
+ *
+ * - 200: OK
+ * - 201: Created
+ * - 202: Accepted
+ * - 203: no authorative resp
+ * - 204: No Content
+ * - 205: Reset content
+ *
+ */
+ if (c->resp.status < 200 || c->resp.status > 205) {
+ if (ctx->log_response_payload &&
+ c->resp.payload != NULL &&
+ c->resp.payload_size > 0) {
+ flb_plg_error(ctx->ins, "%s:%i, HTTP status=%i\n%.*s",
+ ctx->host, ctx->port,
+ c->resp.status,
+ (int) c->resp.payload_size,
+ c->resp.payload);
+ }
+ else {
+ flb_plg_error(ctx->ins, "%s:%i, HTTP status=%i",
+ ctx->host, ctx->port, c->resp.status);
+ }
+
+ out_ret = FLB_RETRY;
+ }
+ else {
+ if (ctx->log_response_payload &&
+ c->resp.payload != NULL &&
+ c->resp.payload_size > 0) {
+ flb_plg_info(ctx->ins, "%s:%i, HTTP status=%i\n%.*s",
+ ctx->host, ctx->port,
+ c->resp.status,
+ (int) c->resp.payload_size,
+ c->resp.payload);
+ }
+ else {
+ flb_plg_info(ctx->ins, "%s:%i, HTTP status=%i",
+ ctx->host, ctx->port,
+ c->resp.status);
+ }
+
+ out_ret = FLB_OK;
+ }
+ }
+ else {
+ flb_plg_error(ctx->ins, "could not flush records to %s:%i (http_do=%i)",
+ ctx->host, ctx->port, ret);
+
+ out_ret = FLB_RETRY;
+ }
+
+ if (compressed) {
+ flb_free(final_body);
+ }
+
+ /* Destroy HTTP client context */
+ flb_http_client_destroy(c);
+
+ /* Release the TCP connection */
+ flb_upstream_conn_release(u_conn);
+
+ return out_ret;
+}
+
+static void append_labels(struct opentelemetry_context *ctx,
+ struct cmt *cmt)
+{
+ struct flb_kv *kv;
+ struct mk_list *head;
+
+ mk_list_foreach(head, &ctx->kv_labels) {
+ kv = mk_list_entry(head, struct flb_kv, _head);
+ cmt_label_add(cmt, kv->key, kv->val);
+ }
+}
+
+static void clear_array(Opentelemetry__Proto__Logs__V1__LogRecord **logs,
+ size_t log_count)
+{
+ size_t index;
+
+ if (logs == NULL){
+ return;
+ }
+
+ for (index = 0 ; index < log_count ; index++) {
+ if (logs[index]->body != NULL) {
+ otlp_any_value_destroy(logs[index]->body);
+
+ logs[index]->body = NULL;
+ }
+
+ if (logs[index]->attributes != NULL) {
+ otlp_kvarray_destroy(logs[index]->attributes,
+ logs[index]->n_attributes);
+
+ logs[index]->attributes = NULL;
+ }
+ }
+}
+
+static Opentelemetry__Proto__Common__V1__ArrayValue *otlp_array_value_initialize(size_t entry_count)
+{
+ Opentelemetry__Proto__Common__V1__ArrayValue *value;
+
+ value = flb_calloc(1, sizeof(Opentelemetry__Proto__Common__V1__ArrayValue));
+
+ if (value != NULL) {
+ opentelemetry__proto__common__v1__array_value__init(value);
+
+ if (entry_count > 0) {
+ value->values = \
+ flb_calloc(entry_count,
+ sizeof(Opentelemetry__Proto__Common__V1__AnyValue *));
+
+ if (value->values == NULL) {
+ flb_free(value);
+
+ value = NULL;
+ }
+ else {
+ value->n_values = entry_count;
+ }
+ }
+ }
+
+ return value;
+}
+
+static Opentelemetry__Proto__Common__V1__KeyValue *otlp_kvpair_value_initialize()
+{
+ Opentelemetry__Proto__Common__V1__KeyValue *value;
+
+ value = flb_calloc(1, sizeof(Opentelemetry__Proto__Common__V1__KeyValue));
+
+ if (value != NULL) {
+ opentelemetry__proto__common__v1__key_value__init(value);
+ }
+
+ return value;
+}
+
+static Opentelemetry__Proto__Common__V1__KeyValueList *otlp_kvlist_value_initialize(size_t entry_count)
+{
+ Opentelemetry__Proto__Common__V1__KeyValueList *value;
+
+ value = flb_calloc(1, sizeof(Opentelemetry__Proto__Common__V1__KeyValueList));
+
+ if (value != NULL) {
+ opentelemetry__proto__common__v1__key_value_list__init(value);
+
+ if (entry_count > 0) {
+ value->values = \
+ flb_calloc(entry_count,
+ sizeof(Opentelemetry__Proto__Common__V1__KeyValue *));
+
+ if (value->values == NULL) {
+ flb_free(value);
+
+ value = NULL;
+ }
+ else {
+ value->n_values = entry_count;
+ }
+ }
+ }
+
+ return value;
+}
+
+static Opentelemetry__Proto__Common__V1__AnyValue *otlp_any_value_initialize(int data_type, size_t entry_count)
+{
+ Opentelemetry__Proto__Common__V1__AnyValue *value;
+
+ value = flb_calloc(1, sizeof(Opentelemetry__Proto__Common__V1__AnyValue));
+
+ if (value == NULL) {
+ return NULL;
+ }
+
+ opentelemetry__proto__common__v1__any_value__init(value);
+
+ if (data_type == MSGPACK_OBJECT_STR) {
+ value->value_case = OPENTELEMETRY__PROTO__COMMON__V1__ANY_VALUE__VALUE_STRING_VALUE;
+ }
+ else if (data_type == MSGPACK_OBJECT_NIL) {
+ value->value_case = OPENTELEMETRY__PROTO__COMMON__V1__ANY_VALUE__VALUE__NOT_SET;
+ }
+ else if (data_type == MSGPACK_OBJECT_BOOLEAN) {
+ value->value_case = OPENTELEMETRY__PROTO__COMMON__V1__ANY_VALUE__VALUE_BOOL_VALUE;
+ }
+ else if (data_type == MSGPACK_OBJECT_POSITIVE_INTEGER || data_type == MSGPACK_OBJECT_NEGATIVE_INTEGER) {
+ value->value_case = OPENTELEMETRY__PROTO__COMMON__V1__ANY_VALUE__VALUE_INT_VALUE;
+ }
+ else if (data_type == MSGPACK_OBJECT_FLOAT32 || data_type == MSGPACK_OBJECT_FLOAT64) {
+ value->value_case = OPENTELEMETRY__PROTO__COMMON__V1__ANY_VALUE__VALUE_DOUBLE_VALUE;
+ }
+ else if (data_type == MSGPACK_OBJECT_ARRAY) {
+ value->value_case = OPENTELEMETRY__PROTO__COMMON__V1__ANY_VALUE__VALUE_ARRAY_VALUE;
+ value->array_value = otlp_array_value_initialize(entry_count);
+
+ if (value->array_value == NULL) {
+ flb_free(value);
+
+ value = NULL;
+ }
+ }
+ else if (data_type == MSGPACK_OBJECT_MAP) {
+ value->value_case = OPENTELEMETRY__PROTO__COMMON__V1__ANY_VALUE__VALUE_KVLIST_VALUE;
+
+ value->kvlist_value = otlp_kvlist_value_initialize(entry_count);
+
+ if (value->kvlist_value == NULL) {
+ flb_free(value);
+
+ value = NULL;
+ }
+ }
+ else if (data_type == MSGPACK_OBJECT_BIN) {
+ value->value_case = OPENTELEMETRY__PROTO__COMMON__V1__ANY_VALUE__VALUE_BYTES_VALUE;
+ }
+ else {
+ flb_free(value);
+
+ value = NULL;
+ }
+
+ return value;
+}
+
+static inline Opentelemetry__Proto__Common__V1__AnyValue *msgpack_boolean_to_otlp_any_value(struct msgpack_object *o)
+{
+ Opentelemetry__Proto__Common__V1__AnyValue *result;
+
+ result = otlp_any_value_initialize(MSGPACK_OBJECT_BOOLEAN, 0);
+
+ if (result != NULL) {
+ result->bool_value = o->via.boolean;
+ }
+
+ return result;
+}
+
+static inline Opentelemetry__Proto__Common__V1__AnyValue *msgpack_integer_to_otlp_any_value(struct msgpack_object *o)
+{
+ Opentelemetry__Proto__Common__V1__AnyValue *result;
+
+ result = otlp_any_value_initialize(o->type, 0);
+
+ if (result != NULL) {
+ if (o->type == MSGPACK_OBJECT_POSITIVE_INTEGER) {
+ result->int_value = (int64_t) o->via.u64;
+ }
+ else {
+ result->int_value = o->via.i64;
+ }
+ }
+
+ return result;
+}
+
+static inline Opentelemetry__Proto__Common__V1__AnyValue *msgpack_float_to_otlp_any_value(struct msgpack_object *o)
+{
+ Opentelemetry__Proto__Common__V1__AnyValue *result;
+
+ result = otlp_any_value_initialize(o->type, 0);
+
+ if (result != NULL) {
+ result->double_value = o->via.f64;
+ }
+
+ return result;
+}
+
+static inline Opentelemetry__Proto__Common__V1__AnyValue *msgpack_string_to_otlp_any_value(struct msgpack_object *o)
+{
+ Opentelemetry__Proto__Common__V1__AnyValue *result;
+
+ result = otlp_any_value_initialize(MSGPACK_OBJECT_STR, 0);
+
+ if (result != NULL) {
+ result->string_value = flb_strndup(o->via.str.ptr, o->via.str.size);
+
+ if (result->string_value == NULL) {
+ otlp_any_value_destroy(result);
+
+ result = NULL;
+ }
+ }
+
+ return result;
+}
+
+static inline Opentelemetry__Proto__Common__V1__AnyValue *msgpack_nil_to_otlp_any_value(struct msgpack_object *o)
+{
+ Opentelemetry__Proto__Common__V1__AnyValue *result;
+
+ result = otlp_any_value_initialize(MSGPACK_OBJECT_NIL, 0);
+
+ if (result != NULL) {
+ result->string_value = NULL;
+ }
+
+ return result;
+}
+
+static inline Opentelemetry__Proto__Common__V1__AnyValue *msgpack_bin_to_otlp_any_value(struct msgpack_object *o)
+{
+ Opentelemetry__Proto__Common__V1__AnyValue *result;
+
+ result = otlp_any_value_initialize(MSGPACK_OBJECT_BIN, 0);
+
+ if (result != NULL) {
+ result->bytes_value.len = o->via.bin.size;
+ result->bytes_value.data = flb_malloc(o->via.bin.size);
+
+ if (result->bytes_value.data == NULL) {
+ otlp_any_value_destroy(result);
+
+ result = NULL;
+ }
+
+ memcpy(result->bytes_value.data, o->via.bin.ptr, o->via.bin.size);
+ }
+
+ return result;
+}
+
+static inline Opentelemetry__Proto__Common__V1__AnyValue *msgpack_array_to_otlp_any_value(struct msgpack_object *o)
+{
+ size_t entry_count;
+ Opentelemetry__Proto__Common__V1__AnyValue *entry_value;
+ Opentelemetry__Proto__Common__V1__AnyValue *result;
+ size_t index;
+ msgpack_object *p;
+
+ entry_count = o->via.array.size;
+ result = otlp_any_value_initialize(MSGPACK_OBJECT_ARRAY, entry_count);
+
+ p = o->via.array.ptr;
+
+ if (result != NULL) {
+ index = 0;
+
+ for (index = 0 ; index < entry_count ; index++) {
+ entry_value = msgpack_object_to_otlp_any_value(&p[index]);
+
+ if (entry_value == NULL) {
+ otlp_any_value_destroy(result);
+
+ result = NULL;
+
+ break;
+ }
+
+ result->array_value->values[index] = entry_value;
+ }
+ }
+
+ return result;
+}
+
+static inline Opentelemetry__Proto__Common__V1__KeyValue *msgpack_kv_to_otlp_any_value(struct msgpack_object_kv *input_pair)
+{
+ Opentelemetry__Proto__Common__V1__KeyValue *kv;
+
+ kv = otlp_kvpair_value_initialize();
+ if (kv == NULL) {
+ flb_errno();
+
+ return NULL;
+ }
+
+ kv->key = flb_strndup(input_pair->key.via.str.ptr, input_pair->key.via.str.size);
+ if (kv->key == NULL) {
+ flb_errno();
+ flb_free(kv);
+
+ return NULL;
+ }
+
+ kv->value = msgpack_object_to_otlp_any_value(&input_pair->val);
+ if (kv->value == NULL) {
+ flb_free(kv->key);
+ flb_free(kv);
+
+ return NULL;
+ }
+
+ return kv;
+}
+
+static inline Opentelemetry__Proto__Common__V1__KeyValue **msgpack_map_to_otlp_kvarray(struct msgpack_object *o, size_t *entry_count)
+{
+ Opentelemetry__Proto__Common__V1__KeyValue **result;
+ size_t index;
+ msgpack_object_kv *kv;
+
+ *entry_count = o->via.map.size;
+ result = flb_calloc(*entry_count, sizeof(Opentelemetry__Proto__Common__V1__KeyValue *));
+
+ if (result != NULL) {
+ for (index = 0; index < *entry_count; index++) {
+ kv = &o->via.map.ptr[index];
+ result[index] = msgpack_kv_to_otlp_any_value(kv);
+ }
+ }
+ else {
+ *entry_count = 0;
+ }
+
+ return result;
+}
+
+static inline Opentelemetry__Proto__Common__V1__AnyValue *msgpack_map_to_otlp_any_value(struct msgpack_object *o)
+{
+ size_t entry_count;
+ Opentelemetry__Proto__Common__V1__AnyValue *result;
+ Opentelemetry__Proto__Common__V1__KeyValue *keyvalue;
+ size_t index;
+ msgpack_object_kv *kv;
+
+ entry_count = o->via.map.size;
+ result = otlp_any_value_initialize(MSGPACK_OBJECT_MAP, entry_count);
+
+ if (result != NULL) {
+
+ for (index = 0; index < entry_count; index++) {
+ kv = &o->via.map.ptr[index];
+ keyvalue = msgpack_kv_to_otlp_any_value(kv);
+ result->kvlist_value->values[index] = keyvalue;
+ }
+ }
+
+ return result;
+}
+
+static inline Opentelemetry__Proto__Common__V1__AnyValue *msgpack_object_to_otlp_any_value(struct msgpack_object *o)
+{
+ Opentelemetry__Proto__Common__V1__AnyValue *result;
+
+ result = NULL;
+
+ switch (o->type) {
+ case MSGPACK_OBJECT_NIL:
+ result = msgpack_nil_to_otlp_any_value(o);
+ break;
+
+ case MSGPACK_OBJECT_BOOLEAN:
+ result = msgpack_boolean_to_otlp_any_value(o);
+ break;
+
+ case MSGPACK_OBJECT_POSITIVE_INTEGER:
+ case MSGPACK_OBJECT_NEGATIVE_INTEGER:
+ result = msgpack_integer_to_otlp_any_value(o);
+ break;
+
+ case MSGPACK_OBJECT_FLOAT32:
+ case MSGPACK_OBJECT_FLOAT64:
+ result = msgpack_float_to_otlp_any_value(o);
+ break;
+
+ case MSGPACK_OBJECT_STR:
+ result = msgpack_string_to_otlp_any_value(o);
+ break;
+
+ case MSGPACK_OBJECT_MAP:
+ result = msgpack_map_to_otlp_any_value(o);
+ break;
+
+ case MSGPACK_OBJECT_BIN:
+ result = msgpack_bin_to_otlp_any_value(o);
+ break;
+
+ case MSGPACK_OBJECT_ARRAY:
+ result = msgpack_array_to_otlp_any_value(o);
+ break;
+
+ default:
+ break;
+ }
+
+ /* This function will fail if it receives an object with
+ * type MSGPACK_OBJECT_EXT
+ */
+
+ return result;
+}
+
+static int flush_to_otel(struct opentelemetry_context *ctx,
+ struct flb_event_chunk *event_chunk,
+ Opentelemetry__Proto__Logs__V1__LogRecord **logs,
+ size_t log_count)
+{
+ Opentelemetry__Proto__Collector__Logs__V1__ExportLogsServiceRequest export_logs;
+ Opentelemetry__Proto__Logs__V1__ScopeLogs scope_log;
+ Opentelemetry__Proto__Logs__V1__ResourceLogs resource_log;
+ Opentelemetry__Proto__Logs__V1__ResourceLogs *resource_logs[1];
+ Opentelemetry__Proto__Logs__V1__ScopeLogs *scope_logs[1];
+ void *body;
+ unsigned len;
+ int res;
+
+ opentelemetry__proto__collector__logs__v1__export_logs_service_request__init(&export_logs);
+ opentelemetry__proto__logs__v1__resource_logs__init(&resource_log);
+ opentelemetry__proto__logs__v1__scope_logs__init(&scope_log);
+
+ scope_log.log_records = logs;
+ scope_log.n_log_records = log_count;
+ scope_logs[0] = &scope_log;
+
+ resource_log.scope_logs = scope_logs;
+ resource_log.n_scope_logs = 1;
+ resource_logs[0] = &resource_log;
+
+ export_logs.resource_logs = resource_logs;
+ export_logs.n_resource_logs = 1;
+
+ len = opentelemetry__proto__collector__logs__v1__export_logs_service_request__get_packed_size(&export_logs);
+ body = flb_calloc(len, sizeof(char));
+ if (!body) {
+ flb_errno();
+ return FLB_ERROR;
+ }
+
+ opentelemetry__proto__collector__logs__v1__export_logs_service_request__pack(&export_logs, body);
+
+ // send post request to opentelemetry with content type application/x-protobuf
+ res = http_post(ctx, body, len,
+ event_chunk->tag,
+ flb_sds_len(event_chunk->tag),
+ ctx->logs_uri);
+
+ flb_free(body);
+
+ return res;
+}
+
+static int process_logs(struct flb_event_chunk *event_chunk,
+ struct flb_output_flush *out_flush,
+ struct flb_input_instance *ins, void *out_context,
+ struct flb_config *config)
+{
+ size_t log_record_count;
+ Opentelemetry__Proto__Logs__V1__LogRecord **log_record_list;
+ Opentelemetry__Proto__Logs__V1__LogRecord *log_records;
+ Opentelemetry__Proto__Common__V1__AnyValue *log_object;
+ struct flb_log_event_decoder *decoder;
+ struct flb_log_event event;
+ size_t index;
+ struct opentelemetry_context *ctx;
+ int res;
+
+ ctx = (struct opentelemetry_context *) out_context;
+
+ log_record_list = (Opentelemetry__Proto__Logs__V1__LogRecord **) \
+ flb_calloc(ctx->batch_size,
+ sizeof(Opentelemetry__Proto__Logs__V1__LogRecord *));
+
+ if (log_record_list == NULL) {
+ flb_errno();
+
+ return -1;
+ }
+
+ log_records = (Opentelemetry__Proto__Logs__V1__LogRecord *)
+ flb_calloc(ctx->batch_size,
+ sizeof(Opentelemetry__Proto__Logs__V1__LogRecord));
+
+ if (log_records == NULL) {
+ flb_errno();
+
+ flb_free(log_record_list);
+
+ return -2;
+ }
+
+ for(index = 0 ; index < ctx->batch_size ; index++) {
+ log_record_list[index] = &log_records[index];
+ }
+
+ decoder = flb_log_event_decoder_create((char *) event_chunk->data,
+ event_chunk->size);
+
+ if (decoder == NULL) {
+ flb_plg_error(ctx->ins, "could not initialize record decoder");
+
+ flb_free(log_record_list);
+ flb_free(log_records);
+
+ return -1;
+ }
+
+ log_record_count = 0;
+
+ res = FLB_OK;
+
+ while (flb_log_event_decoder_next(decoder, &event) == 0 &&
+ res == FLB_OK) {
+ opentelemetry__proto__logs__v1__log_record__init(&log_records[log_record_count]);
+ log_records[log_record_count].attributes = \
+ msgpack_map_to_otlp_kvarray(event.metadata,
+ &log_records[log_record_count].n_attributes);
+
+ log_object = msgpack_object_to_otlp_any_value(event.body);
+
+ if (log_object == NULL) {
+ flb_plg_error(ctx->ins, "log event conversion failure");
+ res = FLB_ERROR;
+ continue;
+ }
+
+
+ log_records[log_record_count].body = log_object;
+ log_records[log_record_count].time_unix_nano = flb_time_to_nanosec(&event.timestamp);
+
+ log_record_count++;
+
+ if (log_record_count >= ctx->batch_size) {
+ res = flush_to_otel(ctx,
+ event_chunk,
+ log_record_list,
+ log_record_count);
+
+ clear_array(log_record_list, log_record_count);
+
+ log_record_count = 0;
+ }
+ }
+
+ flb_log_event_decoder_destroy(decoder);
+
+ if (log_record_count > 0 &&
+ res == FLB_OK) {
+ res = flush_to_otel(ctx,
+ event_chunk,
+ log_record_list,
+ log_record_count);
+
+ clear_array(log_record_list, log_record_count);
+ }
+
+ flb_free(log_record_list);
+ flb_free(log_records);
+
+ return res;
+}
+
+static int process_metrics(struct flb_event_chunk *event_chunk,
+ struct flb_output_flush *out_flush,
+ struct flb_input_instance *ins, void *out_context,
+ struct flb_config *config)
+{
+ int c = 0;
+ int ok;
+ int ret;
+ int result;
+ cfl_sds_t encoded_chunk;
+ flb_sds_t buf = NULL;
+ size_t diff = 0;
+ size_t off = 0;
+ struct cmt *cmt;
+ struct opentelemetry_context *ctx = out_context;
+
+ /* Initialize vars */
+ ctx = out_context;
+ ok = CMT_DECODE_MSGPACK_SUCCESS;
+ result = FLB_OK;
+
+ /* Buffer to concatenate multiple metrics contexts */
+ buf = flb_sds_create_size(event_chunk->size);
+ if (!buf) {
+ flb_plg_error(ctx->ins, "could not allocate outgoing buffer");
+ return FLB_RETRY;
+ }
+
+ flb_plg_debug(ctx->ins, "cmetrics msgpack size: %lu",
+ event_chunk->size);
+
+ /* Decode and encode every CMetric context */
+ diff = 0;
+ while ((ret = cmt_decode_msgpack_create(&cmt,
+ (char *) event_chunk->data,
+ event_chunk->size, &off)) == ok) {
+ /* append labels set by config */
+ append_labels(ctx, cmt);
+
+ /* Create a OpenTelemetry payload */
+ encoded_chunk = cmt_encode_opentelemetry_create(cmt);
+ if (encoded_chunk == NULL) {
+ flb_plg_error(ctx->ins,
+ "Error encoding context as opentelemetry");
+ result = FLB_ERROR;
+ cmt_destroy(cmt);
+ goto exit;
+ }
+
+ flb_plg_debug(ctx->ins, "cmetric_id=%i decoded %lu-%lu payload_size=%lu",
+ c, diff, off, flb_sds_len(encoded_chunk));
+ c++;
+ diff = off;
+
+ /* concat buffer */
+ flb_sds_cat_safe(&buf, encoded_chunk, flb_sds_len(encoded_chunk));
+
+ /* release */
+ cmt_encode_opentelemetry_destroy(encoded_chunk);
+ cmt_destroy(cmt);
+ }
+
+ if (ret == CMT_DECODE_MSGPACK_INSUFFICIENT_DATA && c > 0) {
+ flb_plg_debug(ctx->ins, "final payload size: %lu", flb_sds_len(buf));
+ if (buf && flb_sds_len(buf) > 0) {
+ /* Send HTTP request */
+ result = http_post(ctx, buf, flb_sds_len(buf),
+ event_chunk->tag,
+ flb_sds_len(event_chunk->tag),
+ ctx->metrics_uri);
+
+ /* Debug http_post() result statuses */
+ if (result == FLB_OK) {
+ flb_plg_debug(ctx->ins, "http_post result FLB_OK");
+ }
+ else if (result == FLB_ERROR) {
+ flb_plg_debug(ctx->ins, "http_post result FLB_ERROR");
+ }
+ else if (result == FLB_RETRY) {
+ flb_plg_debug(ctx->ins, "http_post result FLB_RETRY");
+ }
+ }
+ flb_sds_destroy(buf);
+ buf = NULL;
+ return result;
+ }
+ else {
+ flb_plg_error(ctx->ins, "Error decoding msgpack encoded context");
+ return FLB_ERROR;
+ }
+
+exit:
+ if (buf) {
+ flb_sds_destroy(buf);
+ }
+ return result;
+}
+
+static int process_traces(struct flb_event_chunk *event_chunk,
+ struct flb_output_flush *out_flush,
+ struct flb_input_instance *ins, void *out_context,
+ struct flb_config *config)
+{
+ int ret;
+ int result;
+ cfl_sds_t encoded_chunk;
+ flb_sds_t buf = NULL;
+ size_t off = 0;
+ struct ctrace *ctr;
+ struct opentelemetry_context *ctx = out_context;
+
+ /* Initialize vars */
+ ctx = out_context;
+ result = FLB_OK;
+
+ buf = flb_sds_create_size(event_chunk->size);
+ if (!buf) {
+ flb_plg_error(ctx->ins, "could not allocate outgoing buffer");
+ return FLB_RETRY;
+ }
+
+ flb_plg_debug(ctx->ins, "ctraces msgpack size: %lu",
+ event_chunk->size);
+
+ while (ctr_decode_msgpack_create(&ctr,
+ (char *) event_chunk->data,
+ event_chunk->size, &off) == 0) {
+ /* Create a OpenTelemetry payload */
+ encoded_chunk = ctr_encode_opentelemetry_create(ctr);
+ if (encoded_chunk == NULL) {
+ flb_plg_error(ctx->ins,
+ "Error encoding context as opentelemetry");
+ result = FLB_ERROR;
+ ctr_destroy(ctr);
+ goto exit;
+ }
+
+ /* concat buffer */
+ ret = flb_sds_cat_safe(&buf, encoded_chunk, flb_sds_len(encoded_chunk));
+ if (ret != 0) {
+ flb_plg_error(ctx->ins, "Error appending encoded trace to buffer");
+ result = FLB_ERROR;
+ ctr_encode_opentelemetry_destroy(encoded_chunk);
+ ctr_destroy(ctr);
+ goto exit;
+ }
+
+ /* release */
+ ctr_encode_opentelemetry_destroy(encoded_chunk);
+ ctr_destroy(ctr);
+ }
+
+ flb_plg_debug(ctx->ins, "final payload size: %lu", flb_sds_len(buf));
+ if (buf && flb_sds_len(buf) > 0) {
+ /* Send HTTP request */
+ result = http_post(ctx, buf, flb_sds_len(buf),
+ event_chunk->tag,
+ flb_sds_len(event_chunk->tag),
+ ctx->traces_uri);
+
+ /* Debug http_post() result statuses */
+ if (result == FLB_OK) {
+ flb_plg_debug(ctx->ins, "http_post result FLB_OK");
+ }
+ else if (result == FLB_ERROR) {
+ flb_plg_debug(ctx->ins, "http_post result FLB_ERROR");
+ }
+ else if (result == FLB_RETRY) {
+ flb_plg_debug(ctx->ins, "http_post result FLB_RETRY");
+ }
+ }
+
+exit:
+ if (buf) {
+ flb_sds_destroy(buf);
+ }
+ return result;
+}
+
+static int cb_opentelemetry_exit(void *data, struct flb_config *config)
+{
+ struct opentelemetry_context *ctx;
+
+ ctx = (struct opentelemetry_context *) data;
+
+ flb_opentelemetry_context_destroy(ctx);
+
+ return 0;
+}
+
+static int cb_opentelemetry_init(struct flb_output_instance *ins,
+ struct flb_config *config,
+ void *data)
+{
+ struct opentelemetry_context *ctx;
+
+ ctx = flb_opentelemetry_context_create(ins, config);
+ if (!ctx) {
+ return -1;
+ }
+
+ if (ctx->batch_size <= 0){
+ ctx->batch_size = atoi(DEFAULT_LOG_RECORD_BATCH_SIZE);
+ }
+
+ flb_output_set_context(ins, ctx);
+
+ return 0;
+}
+
+static void cb_opentelemetry_flush(struct flb_event_chunk *event_chunk,
+ struct flb_output_flush *out_flush,
+ struct flb_input_instance *ins, void *out_context,
+ struct flb_config *config)
+{
+ int result = FLB_RETRY;
+
+ if (event_chunk->type == FLB_INPUT_METRICS){
+ result = process_metrics(event_chunk, out_flush, ins, out_context, config);
+ }
+ else if (event_chunk->type == FLB_INPUT_LOGS){
+ result = process_logs(event_chunk, out_flush, ins, out_context, config);
+ }
+ else if (event_chunk->type == FLB_INPUT_TRACES){
+ result = process_traces(event_chunk, out_flush, ins, out_context, config);
+ }
+ FLB_OUTPUT_RETURN(result);
+}
+
+/* Configuration properties map */
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_SLIST_1, "add_label", NULL,
+ FLB_CONFIG_MAP_MULT, FLB_TRUE, offsetof(struct opentelemetry_context,
+ add_labels),
+ "Adds a custom label to the metrics use format: 'add_label name value'"
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "proxy", NULL,
+ 0, FLB_FALSE, 0,
+ "Specify an HTTP Proxy. The expected format of this value is http://host:port. "
+ },
+ {
+ FLB_CONFIG_MAP_STR, "http_user", NULL,
+ 0, FLB_TRUE, offsetof(struct opentelemetry_context, http_user),
+ "Set HTTP auth user"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "http_passwd", "",
+ 0, FLB_TRUE, offsetof(struct opentelemetry_context, http_passwd),
+ "Set HTTP auth password"
+ },
+ {
+ FLB_CONFIG_MAP_SLIST_1, "header", NULL,
+ FLB_CONFIG_MAP_MULT, FLB_TRUE, offsetof(struct opentelemetry_context, headers),
+ "Add a HTTP header key/value pair. Multiple headers can be set"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "metrics_uri", "/v1/metrics",
+ 0, FLB_TRUE, offsetof(struct opentelemetry_context, metrics_uri),
+ "Specify an optional HTTP URI for the target OTel endpoint."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "logs_uri", "/v1/logs",
+ 0, FLB_TRUE, offsetof(struct opentelemetry_context, logs_uri),
+ "Specify an optional HTTP URI for the target OTel endpoint."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "traces_uri", "/v1/traces",
+ 0, FLB_TRUE, offsetof(struct opentelemetry_context, traces_uri),
+ "Specify an optional HTTP URI for the target OTel endpoint."
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "log_response_payload", "true",
+ 0, FLB_TRUE, offsetof(struct opentelemetry_context, log_response_payload),
+ "Specify if the response paylod should be logged or not"
+ },
+ {
+ FLB_CONFIG_MAP_INT, "batch_size", DEFAULT_LOG_RECORD_BATCH_SIZE,
+ 0, FLB_TRUE, offsetof(struct opentelemetry_context, batch_size),
+ "Set the maximum number of log records to be flushed at a time"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "compress", NULL,
+ 0, FLB_FALSE, 0,
+ "Set payload compression mechanism. Option available is 'gzip'"
+ },
+ /* EOF */
+ {0}
+};
+
+/* Plugin reference */
+struct flb_output_plugin out_opentelemetry_plugin = {
+ .name = "opentelemetry",
+ .description = "OpenTelemetry",
+ .cb_init = cb_opentelemetry_init,
+ .cb_flush = cb_opentelemetry_flush,
+ .cb_exit = cb_opentelemetry_exit,
+ .config_map = config_map,
+ .event_type = FLB_OUTPUT_LOGS | FLB_OUTPUT_METRICS | FLB_OUTPUT_TRACES,
+ .flags = FLB_OUTPUT_NET | FLB_IO_OPT_TLS,
+};
diff --git a/src/fluent-bit/plugins/out_opentelemetry/opentelemetry.h b/src/fluent-bit/plugins/out_opentelemetry/opentelemetry.h
new file mode 100644
index 000000000..94e424ac7
--- /dev/null
+++ b/src/fluent-bit/plugins/out_opentelemetry/opentelemetry.h
@@ -0,0 +1,80 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_OUT_OPENTELEMETRY_H
+#define FLB_OUT_OPENTELEMETRY_H
+
+#include <fluent-bit/flb_output_plugin.h>
+
+#define FLB_OPENTELEMETRY_CONTENT_TYPE_HEADER_NAME "Content-Type"
+#define FLB_OPENTELEMETRY_MIME_PROTOBUF_LITERAL "application/x-protobuf"
+
+/*
+ * This lets you send log records in batches instead of a request per log record
+ * It might be removed in furthur versions since if we have a large number of
+ * log records, and a later batch fails, Fluent Bit will retry ALL the batches,
+ * including the ones that succeeded. This is not ideal.
+ */
+#define DEFAULT_LOG_RECORD_BATCH_SIZE "1000"
+
+/* Plugin context */
+struct opentelemetry_context {
+ /* HTTP Auth */
+ char *http_user;
+ char *http_passwd;
+
+ /* Proxy */
+ const char *proxy;
+ char *proxy_host;
+ int proxy_port;
+
+ /* HTTP URI */
+ char *traces_uri;
+ char *metrics_uri;
+ char *logs_uri;
+ char *host;
+ int port;
+
+ /* Number of logs to flush at a time */
+ int batch_size;
+
+ /* Log the response paylod */
+ int log_response_payload;
+
+ /* config reader for 'add_label' */
+ struct mk_list *add_labels;
+
+ /* internal labels ready to append */
+ struct mk_list kv_labels;
+
+ /* Upstream connection to the backend server */
+ struct flb_upstream *u;
+
+ /* Arbitrary HTTP headers */
+ struct mk_list *headers;
+
+
+ /* instance context */
+ struct flb_output_instance *ins;
+
+ /* Compression mode (gzip) */
+ int compress_gzip;
+};
+
+#endif
diff --git a/src/fluent-bit/plugins/out_opentelemetry/opentelemetry_conf.c b/src/fluent-bit/plugins/out_opentelemetry/opentelemetry_conf.c
new file mode 100644
index 000000000..5c9c8f82c
--- /dev/null
+++ b/src/fluent-bit/plugins/out_opentelemetry/opentelemetry_conf.c
@@ -0,0 +1,262 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_sds.h>
+#include <fluent-bit/flb_kv.h>
+
+#include "opentelemetry.h"
+#include "opentelemetry_conf.h"
+
+static int config_add_labels(struct flb_output_instance *ins,
+ struct opentelemetry_context *ctx)
+{
+ struct mk_list *head;
+ struct flb_config_map_val *mv;
+ struct flb_slist_entry *k = NULL;
+ struct flb_slist_entry *v = NULL;
+ struct flb_kv *kv;
+
+ if (!ctx->add_labels || mk_list_size(ctx->add_labels) == 0) {
+ return 0;
+ }
+
+ /* iterate all 'add_label' definitions */
+ flb_config_map_foreach(head, mv, ctx->add_labels) {
+ if (mk_list_size(mv->val.list) != 2) {
+ flb_plg_error(ins, "'add_label' expects a key and a value, "
+ "e.g: 'add_label version 1.8.0'");
+ return -1;
+ }
+
+ k = mk_list_entry_first(mv->val.list, struct flb_slist_entry, _head);
+ v = mk_list_entry_last(mv->val.list, struct flb_slist_entry, _head);
+
+ kv = flb_kv_item_create(&ctx->kv_labels, k->str, v->str);
+ if (!kv) {
+ flb_plg_error(ins, "could not append label %s=%s\n", k->str, v->str);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+/*
+* Check if a Proxy have been set, if so the Upstream manager will use
+* the Proxy end-point and then we let the HTTP client know about it, so
+* it can adjust the HTTP requests.
+*/
+
+static void check_proxy(struct flb_output_instance *ins,
+ struct opentelemetry_context *ctx,
+ char *host, char *port,
+ char *protocol, char *uri){
+
+ const char *tmp = NULL;
+ int ret;
+ tmp = flb_output_get_property("proxy", ins);
+ if (tmp) {
+ ret = flb_utils_url_split(tmp, &protocol, &host, &port, &uri);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "could not parse proxy parameter: '%s'", tmp);
+ flb_free(ctx);
+ }
+
+ ctx->proxy_host = host;
+ ctx->proxy_port = atoi(port);
+ ctx->proxy = tmp;
+ flb_free(protocol);
+ flb_free(port);
+ flb_free(uri);
+ uri = NULL;
+ }
+ else {
+ flb_output_net_default("127.0.0.1", 80, ins);
+ }
+}
+
+static char *sanitize_uri(char *uri){
+ char *new_uri;
+ int uri_len;
+
+ if (uri == NULL) {
+ uri = flb_strdup("/");
+ }
+ else if (uri[0] != '/') {
+ uri_len = strlen(uri);
+ new_uri = flb_calloc(uri_len + 2, sizeof(char));
+
+ if (new_uri != NULL) {
+ new_uri[0] = '/';
+
+ strncat(new_uri, uri, uri_len + 1);
+ }
+
+ uri = new_uri;
+ }
+
+ /* This function could return NULL if flb_calloc fails */
+
+ return uri;
+}
+
+struct opentelemetry_context *flb_opentelemetry_context_create(
+ struct flb_output_instance *ins, struct flb_config *config)
+{
+ int ret;
+ int io_flags = 0;
+ char *protocol = NULL;
+ char *host = NULL;
+ char *port = NULL;
+ char *metrics_uri = NULL;
+ char *traces_uri = NULL;
+ char *logs_uri = NULL;
+ struct flb_upstream *upstream;
+ struct opentelemetry_context *ctx = NULL;
+ const char *tmp = NULL;
+
+ /* Allocate plugin context */
+ ctx = flb_calloc(1, sizeof(struct opentelemetry_context));
+ if (!ctx) {
+ flb_errno();
+ return NULL;
+ }
+ ctx->ins = ins;
+ mk_list_init(&ctx->kv_labels);
+
+ ret = flb_output_config_map_set(ins, (void *) ctx);
+ if (ret == -1) {
+ flb_free(ctx);
+ return NULL;
+ }
+
+ /* Parse 'add_label' */
+ ret = config_add_labels(ins, ctx);
+ if (ret == -1) {
+ return NULL;
+ }
+
+ check_proxy(ins, ctx, host, port, protocol, metrics_uri);
+ check_proxy(ins, ctx, host, port, protocol, logs_uri);
+
+ /* Check if SSL/TLS is enabled */
+#ifdef FLB_HAVE_TLS
+ if (ins->use_tls == FLB_TRUE) {
+ io_flags = FLB_IO_TLS;
+ }
+ else {
+ io_flags = FLB_IO_TCP;
+ }
+#else
+ io_flags = FLB_IO_TCP;
+#endif
+
+ if (ins->host.ipv6 == FLB_TRUE) {
+ io_flags |= FLB_IO_IPV6;
+ }
+
+ if (ctx->proxy) {
+ flb_plg_trace(ctx->ins, "Upstream Proxy=%s:%i",
+ ctx->proxy_host, ctx->proxy_port);
+ upstream = flb_upstream_create(config,
+ ctx->proxy_host,
+ ctx->proxy_port,
+ io_flags, ins->tls);
+ }
+ else {
+ upstream = flb_upstream_create(config,
+ ins->host.name,
+ ins->host.port,
+ io_flags, ins->tls);
+ }
+
+ if (!upstream) {
+ flb_free(ctx);
+ return NULL;
+ }
+
+ logs_uri = sanitize_uri(ctx->logs_uri);
+ traces_uri = sanitize_uri(ctx->traces_uri);
+ metrics_uri = sanitize_uri(ctx->metrics_uri);
+
+ ctx->u = upstream;
+ ctx->host = ins->host.name;
+ ctx->port = ins->host.port;
+
+ if (logs_uri == NULL) {
+ flb_plg_trace(ctx->ins,
+ "Could not allocate memory for sanitized "
+ "log endpoint uri");
+ }
+ else {
+ ctx->logs_uri = logs_uri;
+ }
+
+ if (traces_uri == NULL) {
+ flb_plg_trace(ctx->ins,
+ "Could not allocate memory for sanitized "
+ "trace endpoint uri");
+ }
+ else {
+ ctx->traces_uri = traces_uri;
+ }
+
+ if (metrics_uri == NULL) {
+ flb_plg_trace(ctx->ins,
+ "Could not allocate memory for sanitized "
+ "metric endpoint uri");
+ }
+ else {
+ ctx->metrics_uri = metrics_uri;
+ }
+
+
+ /* Set instance flags into upstream */
+ flb_output_upstream_set(ctx->u, ins);
+
+ tmp = flb_output_get_property("compress", ins);
+ ctx->compress_gzip = FLB_FALSE;
+ if (tmp) {
+ if (strcasecmp(tmp, "gzip") == 0) {
+ ctx->compress_gzip = FLB_TRUE;
+ }
+ }
+
+ return ctx;
+}
+
+void flb_opentelemetry_context_destroy(
+ struct opentelemetry_context *ctx)
+{
+ if (!ctx) {
+ return;
+ }
+
+ flb_kv_release(&ctx->kv_labels);
+
+ if (ctx->u) {
+ flb_upstream_destroy(ctx->u);
+ }
+
+ flb_free(ctx->proxy_host);
+ flb_free(ctx);
+}
diff --git a/src/fluent-bit/plugins/out_opentelemetry/opentelemetry_conf.h b/src/fluent-bit/plugins/out_opentelemetry/opentelemetry_conf.h
new file mode 100644
index 000000000..974f7fea5
--- /dev/null
+++ b/src/fluent-bit/plugins/out_opentelemetry/opentelemetry_conf.h
@@ -0,0 +1,33 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_OUT_OPENTELEMETRY_CONF_H
+#define FLB_OUT_OPENTELEMETRY_CONF_H
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_output.h>
+
+#include "opentelemetry.h"
+
+struct opentelemetry_context *flb_opentelemetry_context_create(
+ struct flb_output_instance *ins, struct flb_config *config);
+void flb_opentelemetry_context_destroy(
+ struct opentelemetry_context *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/out_oracle_log_analytics/CMakeLists.txt b/src/fluent-bit/plugins/out_oracle_log_analytics/CMakeLists.txt
new file mode 100644
index 000000000..81f971a95
--- /dev/null
+++ b/src/fluent-bit/plugins/out_oracle_log_analytics/CMakeLists.txt
@@ -0,0 +1,6 @@
+set(src
+ oci_logan.c
+ oci_logan_conf.c
+ )
+
+FLB_PLUGIN(out_oracle_log_analytics "${src}" "")
diff --git a/src/fluent-bit/plugins/out_oracle_log_analytics/oci_logan.c b/src/fluent-bit/plugins/out_oracle_log_analytics/oci_logan.c
new file mode 100644
index 000000000..630812e2d
--- /dev/null
+++ b/src/fluent-bit/plugins/out_oracle_log_analytics/oci_logan.c
@@ -0,0 +1,1313 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2023 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_upstream.h>
+#include <fluent-bit/flb_upstream_conn.h>
+#include <fluent-bit/flb_http_client.h>
+#include <fluent-bit/flb_log_event_decoder.h>
+#include <fluent-bit/flb_hash_table.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_crypto.h>
+#include <fluent-bit/flb_base64.h>
+#include <fluent-bit/flb_hash.h>
+#include <fluent-bit/flb_sds.h>
+
+#include <monkey/mk_core/mk_list.h>
+#include <msgpack.h>
+
+#include <string.h>
+
+#include "oci_logan_conf.h"
+#include "oci_logan.h"
+
+
+static int check_config_from_record(msgpack_object key,
+ char *name, int len)
+{
+ if (key.type != MSGPACK_OBJECT_STR) {
+ return FLB_FALSE;
+ }
+
+ if (key.via.str.size != len) {
+ return FLB_FALSE;
+ }
+
+
+ return memcmp(key.via.str.ptr, name, len) == 0;
+}
+
+/*
+ * Authorization: Signature version="1",keyId="<tenancy_ocid>/<user_ocid>/<key_fingerprint>",
+ * algorithm="rsa-sha256",headers="(request-target) date x-content-sha256 content-type content-length",
+ * signature="signature"
+ */
+static flb_sds_t create_authorization_header_content(struct flb_oci_logan *ctx,
+ flb_sds_t signature)
+{
+ flb_sds_t content;
+
+ content = flb_sds_create_size(512);
+ flb_sds_cat_safe(&content, FLB_OCI_SIGN_SIGNATURE_VERSION,
+ sizeof(FLB_OCI_SIGN_SIGNATURE_VERSION) - 1);
+ flb_sds_cat_safe(&content, ",", 1);
+ flb_sds_cat_safe(&content, FLB_OCI_SIGN_KEYID,
+ sizeof(FLB_OCI_SIGN_KEYID) - 1);
+ flb_sds_cat_safe(&content, "=\"", 2);
+ flb_sds_cat_safe(&content, ctx->key_id, flb_sds_len(ctx->key_id));
+ flb_sds_cat_safe(&content, "\",", 2);
+ flb_sds_cat_safe(&content, FLB_OCI_SIGN_ALGORITHM,
+ sizeof(FLB_OCI_SIGN_ALGORITHM) - 1);
+ flb_sds_cat_safe(&content, ",", 1);
+ flb_sds_cat_safe(&content, FLB_OCI_SIGN_HEADERS,
+ sizeof(FLB_OCI_SIGN_HEADERS) - 1);
+ flb_sds_cat_safe(&content, ",", 1);
+ flb_sds_cat_safe(&content, FLB_OCI_SIGN_SIGNATURE,
+ sizeof(FLB_OCI_SIGN_SIGNATURE) - 1);
+ flb_sds_cat_safe(&content, "=\"", 2);
+ flb_sds_cat_safe(&content, signature, flb_sds_len(signature));
+ flb_sds_cat_safe(&content, "\"", 1);
+
+ return content;
+}
+
+static flb_sds_t create_base64_sha256_signature(struct flb_oci_logan *ctx,
+ flb_sds_t signing_string)
+{
+ int len = 0, ret;
+ size_t outlen;
+ flb_sds_t signature;
+ unsigned char sha256_buf[32] = { 0 };
+ unsigned char sig[256] = { 0 };
+ size_t sig_len = sizeof(sig);
+
+ ret = flb_hash_simple(FLB_HASH_SHA256,
+ (unsigned char*) signing_string,
+ flb_sds_len(signing_string),
+ sha256_buf, sizeof(sha256_buf));
+
+ if(ret != FLB_CRYPTO_SUCCESS) {
+ flb_plg_error(ctx->ins, "error generating hash buffer");
+ return NULL;
+ }
+
+ ret = flb_crypto_sign_simple(FLB_CRYPTO_PRIVATE_KEY,
+ FLB_CRYPTO_PADDING_PKCS1,
+ FLB_HASH_SHA256,
+ (unsigned char *) ctx->private_key,
+ flb_sds_len(ctx->private_key),
+ sha256_buf, sizeof(sha256_buf),
+ sig, &sig_len);
+
+
+ if (ret != FLB_CRYPTO_SUCCESS) {
+ flb_plg_error(ctx->ins, "error signing SHA256");
+ return NULL;
+ }
+
+ signature = flb_sds_create_size(512);
+ if (!signature) {
+ flb_errno();
+ return NULL;
+ }
+
+ /* base 64 encode */
+ len = flb_sds_alloc(signature) - 1;
+ flb_base64_encode((unsigned char*) signature, len, &outlen, sig,
+ sizeof(sig));
+ signature[outlen] = '\0';
+ flb_sds_len_set(signature, outlen);
+
+ return signature;
+}
+
+static flb_sds_t get_date(void)
+{
+
+ flb_sds_t rfc1123date;
+ time_t t;
+ size_t size;
+ struct tm tm = { 0 };
+
+ /* Format Date */
+ rfc1123date = flb_sds_create_size(32);
+ if (!rfc1123date) {
+ flb_errno();
+ return NULL;
+ }
+
+ t = time(NULL);
+ if (!gmtime_r(&t, &tm)) {
+ flb_errno();
+ flb_sds_destroy(rfc1123date);
+ return NULL;
+ }
+ size = strftime(rfc1123date, flb_sds_alloc(rfc1123date) - 1,
+ "%a, %d %b %Y %H:%M:%S GMT", &tm);
+ if (size <= 0) {
+ flb_errno();
+ flb_sds_destroy(rfc1123date);
+ return NULL;
+ }
+ flb_sds_len_set(rfc1123date, size);
+ return rfc1123date;
+}
+
+static flb_sds_t add_header_and_signing(struct flb_http_client *c,
+ flb_sds_t signing_str, const char *header, int headersize,
+ const char *val, int val_size)
+{
+ if (!signing_str) {
+ return NULL;
+ }
+
+ flb_http_add_header(c, header, headersize, val, val_size);
+
+ flb_sds_cat_safe(&signing_str, "\n", 1);
+ flb_sds_cat_safe(&signing_str, header, headersize);
+ flb_sds_cat_safe(&signing_str, ": ", 2);
+ flb_sds_cat_safe(&signing_str, val, val_size);
+
+ return signing_str;
+}
+
+static int build_headers(struct flb_http_client *c, struct flb_oci_logan *ctx,
+ flb_sds_t json, flb_sds_t hostname, int port, flb_sds_t uri)
+{
+ int ret = -1;
+ flb_sds_t tmp_sds = NULL;
+ flb_sds_t signing_str = NULL;
+ flb_sds_t rfc1123date = NULL;
+ flb_sds_t encoded_uri = NULL;
+ flb_sds_t signature = NULL;
+ flb_sds_t auth_header_str = NULL;
+
+ flb_sds_t tmp_ref = NULL;
+
+ size_t tmp_len = 0;
+
+ unsigned char sha256_buf[32] = { 0 };
+
+ tmp_sds = flb_sds_create_size(512);
+ if (!tmp_sds) {
+ flb_errno();
+ goto error_label;
+ }
+
+ signing_str = flb_sds_create_size(1024);
+ if (!signing_str) {
+ flb_errno();
+ goto error_label;
+ }
+
+ /* Add (requeset-target) to signing string */
+ encoded_uri = flb_uri_encode(uri, flb_sds_len(uri));
+ if (!encoded_uri) {
+ flb_errno();
+ goto error_label;
+ }
+ flb_sds_cat_safe(&signing_str, FLB_OCI_HEADER_REQUEST_TARGET,
+ sizeof(FLB_OCI_HEADER_REQUEST_TARGET) - 1);
+ flb_sds_cat_safe(&signing_str, ": post ", sizeof(": post ") - 1);
+ flb_sds_cat_safe(&signing_str, encoded_uri,
+ flb_sds_len(encoded_uri));
+
+ /* Add Host to Header */
+ if (((c->flags & FLB_IO_TLS) && c->port == 443)
+ || (!(c->flags & FLB_IO_TLS) && c->port == 80)) {
+ /* default port */
+ tmp_ref = flb_sds_copy(tmp_sds, c->host, strlen(c->host));
+ }
+ else {
+ tmp_ref = flb_sds_printf(&tmp_sds, "%s:%i", c->host, c->port);
+ }
+ if (!tmp_ref) {
+ flb_plg_error(ctx->ins, "cannot compose temporary host header");
+ goto error_label;
+ }
+ tmp_sds = tmp_ref;
+ tmp_ref = NULL;
+
+ signing_str = add_header_and_signing(c, signing_str, FLB_OCI_HEADER_HOST,
+ sizeof(FLB_OCI_HEADER_HOST) - 1,
+ tmp_sds, flb_sds_len(tmp_sds));
+ if (!signing_str) {
+ flb_plg_error(ctx->ins, "cannot compose signing string");
+ goto error_label;
+ }
+
+ /* Add Date header */
+ rfc1123date = get_date();
+ if (!rfc1123date) {
+ flb_plg_error(ctx->ins, "cannot compose temporary date header");
+ goto error_label;
+ }
+ signing_str = add_header_and_signing(c, signing_str, FLB_OCI_HEADER_DATE,
+ sizeof(FLB_OCI_HEADER_DATE) - 1, rfc1123date,
+ flb_sds_len(rfc1123date));
+ if (!signing_str) {
+ flb_plg_error(ctx->ins, "cannot compose signing string");
+ goto error_label;
+ }
+
+ /* Add x-content-sha256 Header */
+ ret = flb_hash_simple(FLB_HASH_SHA256,
+ (unsigned char*) json,
+ flb_sds_len(json),
+ sha256_buf, sizeof(sha256_buf));
+
+ if (ret != FLB_CRYPTO_SUCCESS) {
+ flb_plg_error(ctx->ins, "error forming hash buffer for x-content-sha256 Header");
+ goto error_label;
+ }
+
+ flb_base64_encode((unsigned char*) tmp_sds, flb_sds_len(tmp_sds) - 1,
+ &tmp_len, sha256_buf, sizeof(sha256_buf));
+
+ tmp_sds[tmp_len] = '\0';
+ flb_sds_len_set(tmp_sds, tmp_len);
+
+ signing_str = add_header_and_signing(c, signing_str,
+ FLB_OCI_HEADER_X_CONTENT_SHA256,
+ sizeof(FLB_OCI_HEADER_X_CONTENT_SHA256) - 1, tmp_sds,
+ flb_sds_len(tmp_sds));
+ if (!signing_str) {
+ flb_plg_error(ctx->ins, "cannot compose signing string");
+ goto error_label;
+ }
+
+ /* Add content-Type */
+ signing_str = add_header_and_signing(c, signing_str,
+ FLB_OCI_HEADER_CONTENT_TYPE, sizeof(FLB_OCI_HEADER_CONTENT_TYPE) - 1,
+ FLB_OCI_HEADER_CONTENT_TYPE_VAL,
+ sizeof(FLB_OCI_HEADER_CONTENT_TYPE_VAL) - 1);
+ if (!signing_str) {
+ flb_plg_error(ctx->ins, "cannot compose signing string");
+ goto error_label;
+ }
+
+ /* Add content-Length */
+ tmp_len = snprintf(tmp_sds, flb_sds_alloc(tmp_sds) - 1, "%i",
+ (int) flb_sds_len(json));
+ flb_sds_len_set(tmp_sds, tmp_len);
+ signing_str = add_header_and_signing(c, signing_str,
+ FLB_OCI_HEADER_CONTENT_LENGTH, sizeof(FLB_OCI_HEADER_CONTENT_LENGTH) - 1,
+ tmp_sds, flb_sds_len(tmp_sds));
+ if (!signing_str) {
+ flb_plg_error(ctx->ins, "cannot compose signing string");
+ goto error_label;
+ }
+
+ /* Add Authorization header */
+ signature = create_base64_sha256_signature(ctx, signing_str);
+ if (!signature) {
+ flb_plg_error(ctx->ins, "cannot compose signing signature");
+ goto error_label;
+ }
+
+ auth_header_str = create_authorization_header_content(ctx, signature);
+ if (!auth_header_str) {
+ flb_plg_error(ctx->ins, "cannot compose authorization header");
+ goto error_label;
+ }
+
+ flb_http_add_header(c, FLB_OCI_HEADER_AUTH, sizeof(FLB_OCI_HEADER_AUTH) - 1,
+ auth_header_str, flb_sds_len(auth_header_str));
+
+ /* User-Agent */
+ flb_http_add_header(c, FLB_OCI_HEADER_USER_AGENT,
+ sizeof(FLB_OCI_HEADER_USER_AGENT) - 1,
+ FLB_OCI_HEADER_USER_AGENT_VAL,
+ sizeof(FLB_OCI_HEADER_USER_AGENT_VAL) - 1);
+
+ /* Accept */
+ flb_http_add_header(c, "Accept", 6, "*/*", 3);
+
+ ret = 0;
+
+ error_label:
+ if (tmp_sds) {
+ flb_sds_destroy(tmp_sds);
+ }
+ if (signing_str) {
+ flb_sds_destroy(signing_str);
+ }
+ if (rfc1123date) {
+ flb_sds_destroy(rfc1123date);
+ }
+ if (encoded_uri) {
+ flb_sds_destroy(encoded_uri);
+ }
+ if (signature) {
+ flb_sds_destroy(signature);
+ }
+ if (auth_header_str) {
+ flb_sds_destroy(auth_header_str);
+ }
+ return ret;
+}
+
+static struct flb_oci_error_response* parse_response_error(struct flb_oci_logan *ctx,
+ char *response, size_t response_len)
+{
+ int tok_size = 32, ret, i;
+ jsmn_parser parser;
+ jsmntok_t *t;
+ jsmntok_t *tokens;
+ char *key;
+ char *val;
+ int key_len;
+ int val_len;
+ struct flb_oci_error_response *error_response;
+
+ jsmn_init(&parser);
+
+ tokens = flb_calloc(1, sizeof(jsmntok_t) * tok_size);
+ if (!tokens) {
+ flb_errno();
+ return NULL;
+ }
+
+ ret = jsmn_parse(&parser, response, response_len, tokens, tok_size);
+
+ if (ret == JSMN_ERROR_INVAL || ret == JSMN_ERROR_PART) {
+ flb_free(tokens);
+ flb_plg_info(ctx->ins,
+ "Unable to parser error response. reponse is not valid json");
+ return NULL;
+ }
+ tok_size = ret;
+
+ error_response = flb_calloc(1, sizeof(struct flb_oci_error_response));
+ if (!error_response) {
+ flb_errno();
+ flb_free(tokens);
+ return NULL;
+ }
+
+ /* Parse JSON tokens */
+ for (i = 0; i < tok_size; i++) {
+ t = &tokens[i];
+
+ if (t->start == -1 || t->end == -1 || (t->start == 0 && t->end == 0)) {
+ break;
+ }
+
+ if (t->type != JSMN_STRING) {
+ continue;
+ }
+
+ key = response + t->start;
+ key_len = (t->end - t->start);
+
+ i++;
+ t = &tokens[i];
+ val = response + t->start;
+ val_len = (t->end - t->start);
+
+ if (val_len < 1) {
+ continue;
+ }
+
+ if ((key_len == sizeof(FLB_OCI_ERROR_RESPONSE_CODE) - 1)
+ && strncasecmp(key, FLB_OCI_ERROR_RESPONSE_CODE,
+ sizeof(FLB_OCI_ERROR_RESPONSE_CODE) - 1) == 0) {
+ /* code */
+ error_response->code = flb_sds_create_len(val, val_len);
+ if (!error_response->code) {
+ flb_free(error_response);
+ flb_free(tokens);
+ return NULL;
+ }
+ }
+ else if ((key_len == sizeof(FLB_OCI_ERROR_RESPONSE_MESSAGE) - 1)
+ && strncasecmp(key, FLB_OCI_ERROR_RESPONSE_MESSAGE,
+ sizeof(FLB_OCI_ERROR_RESPONSE_MESSAGE) - 1) == 0) {
+
+ /* message */
+ error_response->message = flb_sds_create_len(val, val_len);
+ if (!error_response->message) {
+ flb_free(error_response);
+ flb_free(tokens);
+ return NULL;
+ }
+ }
+ }
+
+ flb_free(tokens);
+ return error_response;
+}
+
+static int retry_error(struct flb_http_client *c, struct flb_oci_logan *ctx)
+{
+ struct flb_oci_error_response *error_response = NULL;
+ int tmp_len;
+ int ret = FLB_FALSE;
+
+ /* possible retry error message */
+ if ( !(c->resp.status == 400 || c->resp.status == 401
+ || c->resp.status == 404 || c->resp.status == 409
+ || c->resp.status == 429 || c->resp.status == 500)) {
+ return FLB_FALSE;
+ }
+
+ /* parse error message */
+ error_response = parse_response_error(ctx, c->resp.payload,
+ c->resp.payload_size);
+ if (!error_response) {
+ return FLB_FALSE;
+ }
+
+ if (error_response->code) {
+ tmp_len = flb_sds_len(error_response->code);
+ if (c->resp.status == 400
+ && (tmp_len == sizeof(FLB_OCI_ERROR_CODE_RELATED_RESOURCE_NOT_FOUND) - 1)
+ && strncasecmp(error_response->code, FLB_OCI_ERROR_CODE_RELATED_RESOURCE_NOT_FOUND, tmp_len) == 0) {
+ ret = FLB_TRUE;
+ }
+ else if( c->resp.status == 401
+ &&( tmp_len == sizeof(FLB_OCI_ERROR_CODE_NOT_AUTHENTICATED)-1 )
+ && strncasecmp(error_response->code, FLB_OCI_ERROR_CODE_NOT_AUTHENTICATED, tmp_len) == 0) {
+ ret = FLB_TRUE;
+ }
+ else if (c->resp.status == 404
+ && (tmp_len == sizeof(FLB_OCI_ERROR_CODE_NOT_AUTHENTICATEDORNOTFOUND) - 1)
+ && strncasecmp(error_response->code, FLB_OCI_ERROR_CODE_NOT_AUTHENTICATEDORNOTFOUND, tmp_len) == 0) {
+ ret = FLB_TRUE;
+ }
+ else if (c->resp.status == 409
+ && (tmp_len == sizeof(FLB_OCI_ERROR_CODE_INCORRECTSTATE) - 1)
+ && strncasecmp(error_response->code, FLB_OCI_ERROR_CODE_INCORRECTSTATE, tmp_len) == 0) {
+ ret = FLB_TRUE;
+ }
+ else if (c->resp.status == 409
+ && (tmp_len == sizeof(FLB_OCI_ERROR_CODE_NOT_AUTH_OR_RESOURCE_EXIST) - 1)
+ && strncasecmp(error_response->code, FLB_OCI_ERROR_CODE_NOT_AUTH_OR_RESOURCE_EXIST, tmp_len) == 0) {
+ ret = FLB_TRUE;
+ }
+ else if (c->resp.status == 429
+ && (tmp_len == sizeof(FLB_OCI_ERROR_CODE_TOO_MANY_REQUESTS) - 1)
+ && strncasecmp(error_response->code, FLB_OCI_ERROR_CODE_TOO_MANY_REQUESTS, tmp_len) == 0) {
+ ret = FLB_TRUE;
+ }
+ else if (c->resp.status == 500
+ && (tmp_len == sizeof(FLB_OCI_ERROR_CODE_INTERNAL_SERVER_ERROR) - 1)
+ && strncasecmp(error_response->code, FLB_OCI_ERROR_CODE_INTERNAL_SERVER_ERROR, tmp_len) == 0) {
+ ret = FLB_TRUE;
+ }
+ }
+
+ if (error_response->code) {
+ flb_sds_destroy(error_response->code);
+ }
+ if (error_response->message) {
+ flb_sds_destroy(error_response->message);
+ }
+ flb_free(error_response);
+
+ return ret;
+}
+
+static int cb_oci_logan_init(struct flb_output_instance *ins,
+ struct flb_config *config,
+ void *data)
+{
+ struct flb_oci_logan *ctx;
+ ctx = flb_oci_logan_conf_create(ins, config);
+ if (!ctx) {
+ flb_plg_error(ins, "cannot initialize plugin");
+ return -1;
+ }
+ flb_plg_info(ins, "initialized logan plugin");
+ flb_output_set_context(ins, ctx);
+ flb_output_set_http_debug_callbacks(ins);
+
+ return 0;
+}
+
+static flb_sds_t compose_uri(struct flb_oci_logan *ctx,
+ flb_sds_t log_set, flb_sds_t log_group_id)
+{
+ flb_sds_t uri_param;
+ flb_sds_t full_uri;
+
+ uri_param = flb_sds_create_size(512);
+ if (!uri_param) {
+ flb_errno();
+ return NULL;
+ }
+
+ /* LogGroupId */
+ if (log_group_id) {
+ if (flb_sds_len(uri_param) > 0) {
+ flb_sds_cat_safe(&uri_param, "&", 1);
+ }
+ flb_sds_cat_safe(&uri_param, FLB_OCI_LOG_GROUP_ID,
+ FLB_OCI_LOG_GROUP_ID_SIZE);
+ flb_sds_cat_safe(&uri_param, "=", 1);
+ flb_sds_cat_safe(&uri_param, log_group_id,
+ flb_sds_len(log_group_id));
+ }
+
+ if (!uri_param) {
+ return NULL;
+ }
+
+ /* logSet */
+ if (log_set) {
+ if (flb_sds_len(uri_param) > 0) {
+ flb_sds_cat_safe(&uri_param, "&", 1);
+ }
+ flb_sds_cat_safe(&uri_param, FLB_OCI_LOG_SET,
+ FLB_OCI_LOG_SET_SIZE);
+ flb_sds_cat_safe(&uri_param, "=", 1);
+ flb_sds_cat_safe(&uri_param, log_set,
+ flb_sds_len(log_set));
+ }
+
+ if (!uri_param) {
+ return NULL;
+ }
+
+ flb_sds_cat_safe(&uri_param, "&", 1);
+ flb_sds_cat_safe(&uri_param, FLB_OCI_PAYLOAD_TYPE,
+ sizeof(FLB_OCI_PAYLOAD_TYPE) - 1);
+ flb_sds_cat_safe(&uri_param, "=", 1);
+ flb_sds_cat_safe(&uri_param, "JSON", 4);
+
+
+ if (!uri_param) {
+ return NULL;
+ }
+
+
+ if (flb_sds_len(uri_param) == 0) {
+ flb_sds_destroy(uri_param);
+ return flb_sds_create(ctx->uri);
+ }
+
+ full_uri = flb_sds_create_size(
+ flb_sds_len(ctx->uri) + 1 + flb_sds_len(uri_param));
+ if (!full_uri) {
+ flb_errno();
+ flb_sds_destroy(uri_param);
+ return NULL;
+ }
+
+ flb_sds_cat_safe(&full_uri, ctx->uri, flb_sds_len(ctx->uri));
+ flb_sds_cat_safe(&full_uri, "?", 1);
+ flb_sds_cat_safe(&full_uri, uri_param, flb_sds_len(uri_param));
+
+ flb_sds_destroy(uri_param);
+
+ return full_uri;
+}
+
+static int flush_to_endpoint(struct flb_oci_logan *ctx,
+ flb_sds_t payload,
+ flb_sds_t log_group_id,
+ flb_sds_t log_set_id)
+{
+ int out_ret = FLB_RETRY;
+ int http_ret;
+ size_t b_sent;
+ flb_sds_t full_uri;
+ struct flb_http_client *c = NULL;
+ struct flb_connection *u_conn;
+
+ full_uri = compose_uri(ctx, log_set_id, log_group_id);
+ if(!full_uri) {
+ flb_plg_error(ctx->ins, "unable to compose uri for logGroup: %s logSet: %s",
+ ctx->oci_la_log_group_id, ctx->oci_la_log_set_id);
+ }
+
+ flb_plg_debug(ctx->ins, "full_uri=%s", full_uri);
+
+ u_conn = flb_upstream_conn_get(ctx->u);
+ if(!u_conn) {
+ goto error_label;
+ }
+ /* Create HTTP client context */
+ c = flb_http_client(u_conn, FLB_HTTP_POST, full_uri, (void*) payload,
+ flb_sds_len(payload), ctx->ins->host.name, ctx->ins->host.port, ctx->proxy, 0);
+ if (!c) {
+ goto error_label;
+ }
+ flb_http_allow_duplicated_headers(c, FLB_FALSE);
+
+ flb_plg_debug(ctx->ins, "built client");
+ flb_http_buffer_size(c, FLB_HTTP_DATA_SIZE_MAX);
+ if (build_headers(c, ctx, payload, ctx->ins->host.name, ctx->ins->host.port, full_uri) < 0) {
+ flb_plg_error(ctx->ins, "failed to build headers");
+ goto error_label;
+ }
+ flb_plg_debug(ctx->ins, "built request");
+
+ out_ret = FLB_OK;
+
+ http_ret = flb_http_do(c, &b_sent);
+ flb_plg_debug(ctx->ins, "placed request");
+
+ if (http_ret == 0) {
+
+ if (c->resp.status != 200) {
+ flb_plg_debug(ctx->ins, "request header %s", c->header_buf);
+
+ out_ret = FLB_ERROR;
+
+ if (c->resp.payload && c->resp.payload_size > 0) {
+ if (retry_error(c, ctx) == FLB_TRUE) {
+ out_ret = FLB_RETRY;
+ }
+
+ flb_plg_error(ctx->ins, "%s:%i, retry=%s, HTTP status=%i\n%s",
+ ctx->ins->host.name, ctx->ins->host.port,
+ (out_ret == FLB_RETRY ? "true" : "false"),
+ c->resp.status, c->resp.payload);
+ }
+ else {
+ flb_plg_error(ctx->ins, "%s:%i, retry=%s, HTTP status=%i",
+ ctx->ins->host.name, ctx->ins->host.port,
+ (out_ret == FLB_RETRY ? "true" : "false"),
+ c->resp.status);
+ }
+ }
+ }
+ else {
+ out_ret = FLB_RETRY;
+ flb_plg_error(ctx->ins, "could not flush records to %s:%i (http_do=%i), retry=%s",
+ ctx->ins->host.name, ctx->ins->host.port,
+ http_ret, (out_ret == FLB_RETRY ? "true" : "false"));
+ goto error_label;
+ }
+
+
+
+ error_label:
+ if (full_uri) {
+ flb_sds_destroy(full_uri);
+ }
+
+ /* Destroy HTTP client context */
+ if (c) {
+ flb_http_client_destroy(c);
+ }
+
+ /* Release the TCP connection */
+ if (u_conn) {
+ flb_upstream_conn_release(u_conn);
+ }
+
+ return out_ret;
+
+}
+
+static void pack_oci_fields(msgpack_packer *packer,
+ struct flb_oci_logan *ctx)
+{
+ int num_global_meta = 0;
+ int num_event_meta = 0;
+ int pck_sz = 2;
+ struct mk_list *head = NULL;
+ struct metadata_obj *f;
+
+
+ /* number of meta properties */
+ if(ctx->oci_la_global_metadata != NULL) {
+ num_global_meta = mk_list_size(&ctx->global_metadata_fields);
+ }
+ if(ctx->oci_la_metadata != NULL) {
+ num_event_meta = mk_list_size(&ctx->log_event_metadata_fields);
+ }
+
+
+ if (num_global_meta > 0) {
+ msgpack_pack_map(packer, 2);
+ msgpack_pack_str(packer, FLB_OCI_LOG_METADATA_SIZE);
+ msgpack_pack_str_body(packer, FLB_OCI_LOG_METADATA,
+ FLB_OCI_LOG_METADATA_SIZE);
+
+ msgpack_pack_map(packer, num_global_meta);
+ /* pack kv list */
+ mk_list_foreach(head, &ctx->global_metadata_fields) {
+ f = mk_list_entry(head, struct metadata_obj, _head);
+
+ msgpack_pack_str(packer, flb_sds_len(f->key));
+ msgpack_pack_str_body(packer, f->key, flb_sds_len(f->key));
+
+ msgpack_pack_str(packer, flb_sds_len(f->val));
+ msgpack_pack_str_body(packer, f->val, flb_sds_len(f->val));
+
+ }
+
+ }
+ else {
+ msgpack_pack_map(packer, 1);
+ }
+
+ /*
+ *logEvents":[
+ {
+ "entityId":"",
+ "logSourceName":"LinuxSyslogSource",
+ "logPath":"/var/log/messages",
+ "metadata":{
+ "Error ID":"1",
+ "Environment":"prod",
+ "Client Host Region":"PST"
+ },
+ "logRecords":[
+ "May 8 2017 04:02:36 blr00akm syslogd 1.4.1: shutdown.",
+ "May 8 2017 04:02:37 blr00akm syslogd 1.4.1: restart."
+ ]
+ },
+ {
+
+ }
+ ]
+ */
+ msgpack_pack_str(packer, FLB_OCI_LOG_EVENTS_SIZE);
+ msgpack_pack_str_body(packer, FLB_OCI_LOG_EVENTS, FLB_OCI_LOG_EVENTS_SIZE);
+
+ msgpack_pack_array(packer, 1);
+
+ if (ctx->oci_la_entity_id) {
+ pck_sz++;
+ }
+ if (ctx->oci_la_log_path) {
+ pck_sz++;
+ }
+ if (ctx->oci_la_entity_type) {
+ pck_sz++;
+ }
+
+ if (num_event_meta > 0) {
+ pck_sz++;
+ }
+
+ msgpack_pack_map(packer, pck_sz); /* entityId, logSourceName, logPath, logRecords */
+
+
+ /* "entityType:"" */
+ if (ctx->oci_la_entity_type) {
+ msgpack_pack_str(packer, FLB_OCI_ENTITY_TYPE_SIZE);
+ msgpack_pack_str_body(packer, FLB_OCI_ENTITY_TYPE, FLB_OCI_ENTITY_TYPE_SIZE);
+ msgpack_pack_str(packer, flb_sds_len(ctx->oci_la_entity_type));
+ msgpack_pack_str_body(packer, ctx->oci_la_entity_type,
+ flb_sds_len(ctx->oci_la_entity_type));
+ }
+
+ /* "entityId":"", */
+ if (ctx->oci_la_entity_id) {
+ msgpack_pack_str(packer, FLB_OCI_ENTITY_ID_SIZE);
+ msgpack_pack_str_body(packer, FLB_OCI_ENTITY_ID, FLB_OCI_ENTITY_ID_SIZE);
+ msgpack_pack_str(packer, flb_sds_len(ctx->oci_la_entity_id));
+ msgpack_pack_str_body(packer, ctx->oci_la_entity_id,
+ flb_sds_len(ctx->oci_la_entity_id));
+ }
+
+
+ /* "logSourceName":"", */
+ msgpack_pack_str(packer, FLB_OCI_LOG_SOURCE_NAME_SIZE);
+ msgpack_pack_str_body(packer, FLB_OCI_LOG_SOURCE_NAME,
+ FLB_OCI_LOG_SOURCE_NAME_SIZE);
+ msgpack_pack_str(packer, flb_sds_len(ctx->oci_la_log_source_name));
+ msgpack_pack_str_body(packer, ctx->oci_la_log_source_name,
+ flb_sds_len(ctx->oci_la_log_source_name));
+
+
+ /* "logPath":"" */
+ if (ctx->oci_la_log_path) {
+ msgpack_pack_str(packer, FLB_OCI_LOG_PATH_SIZE);
+ msgpack_pack_str_body(packer, FLB_OCI_LOG_PATH, FLB_OCI_LOG_PATH_SIZE);
+ msgpack_pack_str(packer, flb_sds_len(ctx->oci_la_log_path));
+ msgpack_pack_str_body(packer, ctx->oci_la_log_path,
+ flb_sds_len(ctx->oci_la_log_path));
+ }
+
+
+ /* Add metadata */
+ if (num_event_meta > 0) {
+ /*
+ "metadata":{
+ "Error ID":"0",
+ "Environment":"dev",
+ "Client Host Region":"IST"
+ },
+ */
+ msgpack_pack_str(packer, FLB_OCI_LOG_METADATA_SIZE);
+ msgpack_pack_str_body(packer, FLB_OCI_LOG_METADATA,
+ FLB_OCI_LOG_METADATA_SIZE);
+
+ msgpack_pack_map(packer, num_event_meta);
+ /* pack kv list */
+ mk_list_foreach(head, &ctx->log_event_metadata_fields) {
+ f = mk_list_entry(head, struct metadata_obj, _head);
+
+ msgpack_pack_str(packer, flb_sds_len(f->key));
+ msgpack_pack_str_body(packer, f->key, flb_sds_len(f->key));
+
+ msgpack_pack_str(packer, flb_sds_len(f->val));
+ msgpack_pack_str_body(packer, f->val, flb_sds_len(f->val));
+
+ }
+
+ }
+}
+
+static int get_and_pack_oci_fields_from_record(msgpack_packer *packer,
+ msgpack_object map,
+ flb_sds_t *lg_id,
+ flb_sds_t *ls_id,
+ struct flb_oci_logan *ctx)
+{
+ int map_size = map.via.map.size;
+ int pck_size = 1, i;
+ msgpack_object *log_group_id= NULL;
+ msgpack_object *log_set_id = NULL;
+ msgpack_object *entity_id = NULL;
+ msgpack_object *entity_type = NULL;
+ msgpack_object *log_path = NULL;
+ msgpack_object *log_source = NULL;
+ msgpack_object *global_metadata = NULL;
+ msgpack_object *metadata = NULL;
+
+ for(i = 0; i < map_size; i++) {
+ if (check_config_from_record(map.via.map.ptr[i].key,
+ FLB_OCI_LOG_GROUP_ID_KEY,
+ FLB_OCI_LOG_GROUP_ID_KEY_SIZE) == FLB_TRUE) {
+ if (map.via.map.ptr[i].val.type == MSGPACK_OBJECT_STR) {
+ log_group_id = &map.via.map.ptr[i].val;
+ }
+ continue;
+ }
+ else if (check_config_from_record(map.via.map.ptr[i].key,
+ FLB_OCI_LOG_SET_ID_KEY,
+ FLB_OCI_LOG_SET_ID_KEY_SIZE) == FLB_TRUE) {
+ if (map.via.map.ptr[i].val.type == MSGPACK_OBJECT_STR) {
+ log_set_id = &map.via.map.ptr[i].val;
+ }
+ continue;
+ }
+ else if (check_config_from_record(map.via.map.ptr[i].key,
+ FLB_OCI_LOG_ENTITY_ID_KEY,
+ FLB_OCI_LOG_ENTITY_ID_KEY_SIZE) == FLB_TRUE) {
+ if (map.via.map.ptr[i].val.type == MSGPACK_OBJECT_STR) {
+ entity_id = &map.via.map.ptr[i].val;
+ pck_size++;
+ }
+ continue;
+ }
+ else if (check_config_from_record(map.via.map.ptr[i].key,
+ FLB_OCI_LOG_ENTITY_TYPE_KEY,
+ FLB_OCI_LOG_ENTITY_TYPE_KEY_SIZE) == FLB_TRUE) {
+ if (map.via.map.ptr[i].val.type == MSGPACK_OBJECT_STR) {
+ entity_type = &map.via.map.ptr[i].val;
+ pck_size++;
+ }
+ continue;
+ }
+ else if (check_config_from_record(map.via.map.ptr[i].key,
+ FLB_OCI_LOG_SOURCE_NAME_KEY,
+ FLB_OCI_LOG_SOURCE_NAME_KEY_SIZE) == FLB_TRUE) {
+ if (map.via.map.ptr[i].val.type == MSGPACK_OBJECT_STR) {
+ log_source = &map.via.map.ptr[i].val;
+ pck_size++;
+ }
+ continue;
+ }
+ else if (check_config_from_record(map.via.map.ptr[i].key,
+ FLB_OCI_LOG_PATH_KEY,
+ FLB_OCI_LOG_PATH_KEY_SIZE) == FLB_TRUE) {
+ if (map.via.map.ptr[i].val.type == MSGPACK_OBJECT_STR) {
+ log_path = &map.via.map.ptr[i].val;
+ pck_size++;
+ }
+ continue;
+ }
+ else if (check_config_from_record(map.via.map.ptr[i].key,
+ FLB_OCI_METADATA_KEY,
+ FLB_OCI_METADATA_KEY_SIZE) == FLB_TRUE) {
+ if (map.via.map.ptr[i].val.type == MSGPACK_OBJECT_STR) {
+ metadata = &map.via.map.ptr[i].val;
+ pck_size++;
+ }
+ continue;
+ }
+ else if (check_config_from_record(map.via.map.ptr[i].key,
+ FLB_OCI_GLOBAL_METADATA_KEY,
+ FLB_OCI_GLOBAL_METADATA_KEY_SIZE) == FLB_TRUE) {
+ if (map.via.map.ptr[i].val.type == MSGPACK_OBJECT_STR) {
+ global_metadata = &map.via.map.ptr[i].val;
+ }
+ continue;
+ }
+ }
+
+ if (log_group_id == NULL || log_source == NULL) {
+ flb_plg_error(ctx->ins,
+ "log source name and log group id are required");
+ return -1;
+ }
+ if (global_metadata != NULL) {
+ msgpack_pack_map(packer, 2);
+ msgpack_pack_str(packer, FLB_OCI_LOG_METADATA_SIZE);
+ msgpack_pack_str_body(packer, FLB_OCI_LOG_METADATA,
+ FLB_OCI_LOG_METADATA_SIZE);
+
+ msgpack_pack_object(packer, *global_metadata);
+ }
+ else {
+ msgpack_pack_map(packer, 1);
+ }
+
+ /*
+ *logEvents":[
+ {
+ "entityId":"",
+ "logSourceName":"LinuxSyslogSource",
+ "logPath":"/var/log/messages",
+ "metadata":{
+ "Error ID":"1",
+ "Environment":"prod",
+ "Client Host Region":"PST"
+ },
+ "logRecords":[
+ "May 8 2017 04:02:36 blr00akm syslogd 1.4.1: shutdown.",
+ "May 8 2017 04:02:37 blr00akm syslogd 1.4.1: restart."
+ ]
+ },
+ {
+
+ }
+ ]
+ */
+ msgpack_pack_str(packer, FLB_OCI_LOG_EVENTS_SIZE);
+ msgpack_pack_str_body(packer, FLB_OCI_LOG_EVENTS, FLB_OCI_LOG_EVENTS_SIZE);
+
+ msgpack_pack_array(packer, 1);
+
+ if (metadata != NULL) {
+ pck_size++;
+ msgpack_pack_map(packer, pck_size); /* entityType, entityId, logSourceName, logPath, metadata, logRecords */
+ msgpack_pack_str(packer, FLB_OCI_LOG_METADATA_SIZE);
+ msgpack_pack_str_body(packer, FLB_OCI_LOG_METADATA,
+ FLB_OCI_LOG_METADATA_SIZE);
+ msgpack_pack_object(packer, *global_metadata);
+
+ }
+ else {
+ msgpack_pack_map(packer, pck_size); /* entityType, entityId, logSourceName, logPath, logRecords */
+ }
+
+ /* "entityType:"" */
+ if (entity_type) {
+ msgpack_pack_str(packer, FLB_OCI_ENTITY_TYPE_SIZE);
+ msgpack_pack_str_body(packer, FLB_OCI_ENTITY_TYPE, FLB_OCI_ENTITY_TYPE_SIZE);
+ msgpack_pack_object(packer, *entity_type);
+ }
+
+ /* "entityId":"", */
+ if (entity_type) {
+ msgpack_pack_str(packer, FLB_OCI_ENTITY_ID_SIZE);
+ msgpack_pack_str_body(packer, FLB_OCI_ENTITY_ID, FLB_OCI_ENTITY_ID_SIZE);
+ msgpack_pack_object(packer, *entity_id);
+ }
+
+
+
+ /* "logSourceName":"", */
+ msgpack_pack_str(packer, FLB_OCI_LOG_SOURCE_NAME_SIZE);
+ msgpack_pack_str_body(packer, FLB_OCI_LOG_SOURCE_NAME,
+ FLB_OCI_LOG_SOURCE_NAME_SIZE);
+ msgpack_pack_object(packer, *log_source);
+
+
+ /* "logPath":"" */
+ if (log_path) {
+ msgpack_pack_str(packer, FLB_OCI_LOG_PATH_SIZE);
+ msgpack_pack_str_body(packer, FLB_OCI_LOG_PATH, FLB_OCI_LOG_PATH_SIZE);
+ msgpack_pack_object(packer, *log_path);
+ }
+
+ *lg_id = flb_sds_create_len(log_group_id->via.str.ptr, log_group_id->via.str.size);
+ if(!*lg_id) {
+ return -1;
+ }
+ if (log_set_id != NULL) {
+ *ls_id = flb_sds_create_len(log_set_id->via.str.ptr, log_set_id->via.str.size);
+ if(!*ls_id) {
+ return -1;
+ }
+ }
+ return 0;
+
+}
+
+static int total_flush(struct flb_event_chunk *event_chunk,
+ struct flb_output_flush *out_flush,
+ struct flb_input_instance *ins, void *out_context,
+ struct flb_config *config)
+{
+ struct flb_oci_logan *ctx = out_context;
+ flb_sds_t out_buf = NULL;
+ int ret = 0, res = FLB_OK, ret1 = 0, i;
+ msgpack_object map;
+ int map_size;
+ msgpack_sbuffer mp_sbuf;
+ msgpack_packer mp_pck;
+ int msg = -1, log = -1;
+ struct flb_log_event_decoder log_decoder;
+ struct flb_log_event log_event;
+ int num_records;
+ flb_sds_t log_group_id = NULL;
+ flb_sds_t log_set_id = NULL;
+ int count = 0;
+
+ ret = flb_log_event_decoder_init(&log_decoder, (char *) event_chunk->data, event_chunk->size);
+
+ if (ret != FLB_EVENT_DECODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event decoder initialization error : %d", ret);
+ res = FLB_ERROR;
+ goto clean_up;
+ }
+
+ /* Create temporary msgpack buffer */
+ msgpack_sbuffer_init(&mp_sbuf);
+ msgpack_packer_init(&mp_pck, &mp_sbuf, msgpack_sbuffer_write);
+
+ /* pack oci fields */
+ /* pack_oci_fields(&mp_pck, ctx); */
+
+ num_records = flb_mp_count(event_chunk->data, event_chunk->size);
+
+ while ((ret = flb_log_event_decoder_next(
+ &log_decoder,
+ &log_event)) == FLB_EVENT_DECODER_SUCCESS) {
+ map = *log_event.body;
+ map_size = map.via.map.size;
+ if (count < 1) {
+ if (ctx->oci_config_in_record == FLB_FALSE) {
+ pack_oci_fields(&mp_pck, ctx);
+ log_group_id = ctx->oci_la_log_group_id;
+ log_set_id = ctx->oci_la_log_set_id;
+ } else {
+ ret1 = get_and_pack_oci_fields_from_record(&mp_pck, map, &log_group_id, &log_set_id, ctx);
+ if (ret1 != 0) {
+ break;
+ }
+ }
+ msgpack_pack_str(&mp_pck, FLB_OCI_LOG_RECORDS_SIZE);
+ msgpack_pack_str_body(&mp_pck, FLB_OCI_LOG_RECORDS,
+ FLB_OCI_LOG_RECORDS_SIZE);
+ msgpack_pack_array(&mp_pck, num_records);
+ count++;
+ }
+
+ for(i = 0; i < map_size; i++) {
+ if (check_config_from_record(map.via.map.ptr[i].key,
+ "message",
+ 7) == FLB_TRUE) {
+ msg = i;
+ }
+ if (check_config_from_record(map.via.map.ptr[i].key,
+ "log",
+ 3) == FLB_TRUE) {
+ log = i;
+ }
+ }
+ if (log >= 0) {
+ msgpack_pack_str(&mp_pck, map.via.map.ptr[log].val.via.str.size);
+ msgpack_pack_str_body(&mp_pck, map.via.map.ptr[log].val.via.str.ptr,
+ map.via.map.ptr[log].val.via.str.size);
+ }
+ else if (msg >= 0) {
+ msgpack_pack_str(&mp_pck, map.via.map.ptr[msg].val.via.str.size);
+ msgpack_pack_str_body(&mp_pck, map.via.map.ptr[msg].val.via.str.ptr,
+ map.via.map.ptr[msg].val.via.str.size);
+ }
+ log = -1;
+ msg = -1;
+ }
+
+ if (ret1 != 0) {
+ res = FLB_ERROR;
+ msgpack_sbuffer_destroy(&mp_sbuf);
+ flb_log_event_decoder_destroy(&log_decoder);
+ goto clean_up;
+ }
+
+ out_buf = flb_msgpack_raw_to_json_sds(mp_sbuf.data, mp_sbuf.size);
+ msgpack_sbuffer_destroy(&mp_sbuf);
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ flb_plg_debug(ctx->ins, "payload=%s", out_buf);
+ flb_plg_debug(ctx->ins, "lg_id=%s", log_group_id);
+
+ ret = flush_to_endpoint(ctx, out_buf, log_group_id, log_set_id);
+ if(ret != FLB_OK) {
+ res = FLB_RETRY;
+ goto clean_up;
+ }
+
+ clean_up:
+ if (out_buf != NULL) {
+ flb_sds_destroy(out_buf);
+ }
+ if (log_group_id != NULL && ctx->oci_config_in_record) {
+ flb_sds_destroy(log_group_id);
+ }
+ if (log_set_id != NULL && ctx->oci_config_in_record) {
+ flb_sds_destroy(log_set_id);
+ }
+ return res;
+}
+
+static void cb_oci_logan_flush(struct flb_event_chunk *event_chunk,
+ struct flb_output_flush *out_flush,
+ struct flb_input_instance *ins, void *out_context,
+ struct flb_config *config)
+{
+ struct flb_oci_logan *ctx = out_context;
+ int ret = -1;
+
+ ret = total_flush(event_chunk, out_flush,
+ ins, out_context,
+ config);
+ if (ret != FLB_OK) {
+ flb_oci_logan_conf_destroy(ctx);
+ FLB_OUTPUT_RETURN(ret);
+ }
+ flb_plg_debug(ctx->ins, "success");
+
+ FLB_OUTPUT_RETURN(FLB_OK);
+
+}
+
+static int cb_oci_logan_exit(void *data, struct flb_config *config)
+{
+ struct flb_oci_logan *ctx = data;
+
+ flb_oci_logan_conf_destroy(ctx);
+ return 0;
+}
+
+/* Configuration properties map */
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_STR, "config_file_location", "",
+ 0, FLB_TRUE, offsetof(struct flb_oci_logan, config_file_location),
+ "Location of the oci config file for user api key signing"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "profile_name", "DEFAULT",
+ 0, FLB_TRUE, offsetof(struct flb_oci_logan, profile_name),
+ "name of the profile in the config file from which the user configs should be loaded"
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "oci_config_in_record", "false",
+ 0, FLB_TRUE, offsetof(struct flb_oci_logan, oci_config_in_record),
+ "If true, oci_la_* configs will be read from the record"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "uri", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_oci_logan, uri),
+ "Set the uri for rest api request"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "oci_la_log_group_id", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_oci_logan, oci_la_log_group_id),
+ "log group id"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "oci_la_log_set_id", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_oci_logan, oci_la_log_set_id),
+ ""
+ },
+ {
+ FLB_CONFIG_MAP_STR, "oci_la_entity_id", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_oci_logan, oci_la_entity_id),
+ ""
+ },
+ {
+ FLB_CONFIG_MAP_STR, "oci_la_entity_type", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_oci_logan, oci_la_entity_type),
+ ""
+ },
+ {
+ FLB_CONFIG_MAP_STR, "oci_la_log_source_name", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_oci_logan, oci_la_log_source_name),
+ ""
+ },
+ {
+ FLB_CONFIG_MAP_STR, "oci_la_log_set_id", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_oci_logan, oci_la_log_set_id),
+ ""
+ },
+ {
+ FLB_CONFIG_MAP_STR, "oci_la_log_path", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_oci_logan, oci_la_log_path),
+ ""
+ },
+ {
+ FLB_CONFIG_MAP_SLIST_2, "oci_la_global_metadata", NULL,
+ FLB_CONFIG_MAP_MULT, FLB_TRUE, offsetof(struct flb_oci_logan, oci_la_global_metadata),
+ ""
+ },
+ {
+ FLB_CONFIG_MAP_SLIST_2, "oci_la_metadata", NULL,
+ FLB_CONFIG_MAP_MULT, FLB_TRUE, offsetof(struct flb_oci_logan, oci_la_metadata),
+ ""
+ },
+ {
+ FLB_CONFIG_MAP_STR, "namespace", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_oci_logan, namespace),
+ "namespace in your tenancy where the log objects reside"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "proxy", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_oci_logan, proxy),
+ "define proxy if required, in http://host:port format, supports only http protocol"
+ },
+
+ {0}
+};
+
+/* Plugin reference */
+struct flb_output_plugin out_oracle_log_analytics_plugin = {
+ .name = "oracle_log_analytics",
+ .description = "Oracle log analytics",
+ .cb_init = cb_oci_logan_init,
+ .cb_pre_run = NULL,
+ .cb_flush = cb_oci_logan_flush,
+ .cb_exit = cb_oci_logan_exit,
+
+ /* Configuration */
+ .config_map = config_map,
+
+ /* Events supported */
+ .event_type = FLB_OUTPUT_LOGS,
+
+
+ /* Plugin flags */
+ .flags = FLB_OUTPUT_NET | FLB_IO_OPT_TLS,
+ .workers = 1,
+};
diff --git a/src/fluent-bit/plugins/out_oracle_log_analytics/oci_logan.h b/src/fluent-bit/plugins/out_oracle_log_analytics/oci_logan.h
new file mode 100644
index 000000000..7cc9e75f4
--- /dev/null
+++ b/src/fluent-bit/plugins/out_oracle_log_analytics/oci_logan.h
@@ -0,0 +1,215 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2023 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#ifndef FLB_OUT_OCI_LOGAN_H
+#define FLB_OUT_OCI_LOGAN_H
+
+#define FLB_OCI_LOG_ENTITY_ID_KEY "oci_la_entity_id"
+#define FLB_OCI_LOG_ENTITY_ID_KEY_SIZE sizeof(FLB_OCI_LOG_ENTITY_ID_KEY) - 1
+
+#define FLB_OCI_LOG_ENTITY_TYPE_KEY "oci_la_entity_type"
+#define FLB_OCI_LOG_ENTITY_TYPE_KEY_SIZE sizeof(FLB_OCI_LOG_ENTITY_TYPE_KEY) - 1
+
+#define FLB_OCI_LOG_GROUP_ID_KEY "oci_la_log_group_id"
+#define FLB_OCI_LOG_GROUP_ID_KEY_SIZE sizeof(FLB_OCI_LOG_GROUP_ID_KEY) - 1
+
+#define FLB_OCI_LOG_SET_ID_KEY "oci_la_log_set_id"
+#define FLB_OCI_LOG_SET_ID_KEY_SIZE sizeof(FLB_OCI_LOG_SET_ID_KEY) - 1
+
+#define FLB_OCI_LOG_SOURCE_NAME_KEY "oci_la_log_source_name"
+#define FLB_OCI_LOG_SOURCE_NAME_KEY_SIZE sizeof(FLB_OCI_LOG_SOURCE_NAME_KEY) - 1
+
+#define FLB_OCI_LOG_PATH_KEY "oci_la_log_path"
+#define FLB_OCI_LOG_PATH_KEY_SIZE sizeof(FLB_OCI_LOG_PATH_KEY) - 1
+
+#define FLB_OCI_METADATA_KEY "oci_la_metadata"
+#define FLB_OCI_METADATA_KEY_SIZE sizeof(FLB_OCI_METADATA_KEY) - 1
+
+#define FLB_OCI_GLOBAL_METADATA_KEY "oci_la_global_metadata"
+#define FLB_OCI_GLOBAL_METADATA_KEY_SIZE sizeof(FLB_OCI_GLOBAL_METADATA_KEY) - 1
+
+#define FLB_OCI_LOG_EVENTS "logEvents"
+#define FLB_OCI_LOG_EVENTS_SIZE sizeof(FLB_OCI_LOG_EVENTS)-1
+
+#define FLB_OCI_LOG_RECORDS "logRecords"
+#define FLB_OCI_LOG_RECORDS_SIZE sizeof(FLB_OCI_LOG_RECORDS)-1
+
+#define FLB_OCI_LOG_GROUP_ID "logGroupId"
+#define FLB_OCI_LOG_GROUP_ID_SIZE sizeof(FLB_OCI_LOG_GROUP_ID)-1
+
+#define FLB_OCI_ENTITY_TYPE "entityType"
+#define FLB_OCI_ENTITY_TYPE_SIZE sizeof(FLB_OCI_ENTITY_TYPE) - 1
+
+#define FLB_OCI_LOG_SET "logSet"
+#define FLB_OCI_LOG_SET_SIZE sizeof(FLB_OCI_LOG_SET)-1
+
+#define FLB_OCI_LOG_METADATA "metadata"
+#define FLB_OCI_LOG_METADATA_SIZE sizeof(FLB_OCI_LOG_METADATA)-1
+
+#define FLB_OCI_ENTITY_ID "entityId"
+#define FLB_OCI_ENTITY_ID_SIZE sizeof(FLB_OCI_ENTITY_ID)-1
+
+#define FLB_OCI_LOG_SOURCE_NAME "logSourceName"
+#define FLB_OCI_LOG_SOURCE_NAME_SIZE sizeof(FLB_OCI_LOG_SOURCE_NAME)-1
+
+#define FLB_OCI_LOG_PATH "logPath"
+#define FLB_OCI_LOG_PATH_SIZE sizeof(FLB_OCI_LOG_PATH)-1
+
+#define FLB_OCI_META_PREFIX "metadata_"
+#define FLB_OCI_META_PREFIX_SIZE sizeof(FLB_OCI_META_PREFIX)-1
+
+#define FLB_OCI_MATCH_PREFIX "oci_match_"
+#define FLB_OCI_MATCH_PREFIX_SIZE sizeof(FLB_OCI_MATCH_PREFIX)-1
+
+#ifdef FLB_HAVE_REGEX
+#define FLB_OCI_MATCH_REGEX_PREFIX "oci_match_regex_"
+#define FLB_OCI_MATCH_REGEX_PREFIX_SIZE sizeof(FLB_OCI_MATCH_REGEX_PREFIX)-1
+#endif
+
+/* Params */
+#define FLB_OCI_PARAM_SKIP_HTTP_POST "skip_http_post"
+#define FLB_OCI_PARAM_URI "uri"
+#define FLB_OCI_PARAM_ENABLE_TRACE_OUTPUT "enable_trace"
+#define FLB_OCI_PARAM_TRACE_OUTPUT_PATH "trace_file_path"
+#define FLB_OCI_PARAM_TRACE_OUTPUT_FILE "trace_file_name"
+#define FLB_OCI_PARAM_COLLECT_TIME_FIELD "collect_time_field_name"
+
+#define FLB_OCI_PARAM_USE_RAW_RECORD "use_raw_record"
+#define FLB_OCI_PARAM_USE_RAW_RECORD_SIZE sizeof(FLB_OCI_PARAM_USE_RAW_RECORD)-1
+
+#define FLB_OCI_PARAM_INCLUDE_COLLECT_TIME "include_collect_time"
+#define FLB_OCI_PARAM_INCLUDE_COLLECT_TIME_SIZE sizeof(FLB_OCI_PARAM_INCLUDE_COLLECT_TIME)-1
+
+#define FLB_OCI_MATCH_ID_MAX 1000 // TO avoid too large memory allocation
+
+#define FLB_OCI_DEFAULT_COLLECT_TIME "oci_collect_time"
+#define FLB_OCI_DEFAULT_COLLECT_TIME_SIZE sizeof(FLB_OCI_DEFAULT_COLLECT_TIME)-1
+
+/* Http Header */
+#define FLB_OCI_HEADER_REQUEST_TARGET "(request-target)"
+#define FLB_OCI_HEADER_USER_AGENT "User-Agent"
+#define FLB_OCI_HEADER_USER_AGENT_VAL "Fluent-Bit"
+#define FLB_OCI_HEADER_CONTENT_TYPE "content-type"
+#define FLB_OCI_HEADER_CONTENT_TYPE_VAL "application/octet-stream"
+#define FLB_OCI_HEADER_X_CONTENT_SHA256 "x-content-sha256"
+#define FLB_OCI_HEADER_CONTENT_LENGTH "content-length"
+#define FLB_OCI_HEADER_HOST "host"
+#define FLB_OCI_HEADER_DATE "date"
+#define FLB_OCI_HEADER_AUTH "Authorization"
+#define FLB_OCI_PAYLOAD_TYPE "payloadType"
+
+
+/* For OCI signing */
+#define FLB_OCI_PARAM_TENANCY "tenancy"
+#define FLB_OCI_PARAM_USER "user"
+#define FLB_OCI_PARAM_KEY_FINGERPRINT "fingerprint"
+#define FLB_OCI_PARAM_KEY_FILE "key_file"
+#define FLB_OCI_PARAM_REGION "region"
+#define FLB_OCI_PARAM_KEY_FILE_PASSPHRASE "key_file_passphrase"
+
+#define FLB_OCI_SIGN_SIGNATURE_VERSION "Signature version=\"1\""
+#define FLB_OCI_SIGN_KEYID "keyId"
+#define FLB_OCI_SIGN_ALGORITHM "algorithm=\"rsa-sha256\""
+
+#define FLB_OCI_SIGN_HEADERS "headers=\"" \
+ FLB_OCI_HEADER_REQUEST_TARGET " " \
+ FLB_OCI_HEADER_HOST " " \
+ FLB_OCI_HEADER_DATE " " \
+ FLB_OCI_HEADER_X_CONTENT_SHA256 " " \
+ FLB_OCI_HEADER_CONTENT_TYPE " " \
+ FLB_OCI_HEADER_CONTENT_LENGTH "\""
+
+#define FLB_OCI_SIGN_SIGNATURE "signature"
+
+/* For error response */
+#define FLB_OCI_ERROR_RESPONSE_CODE "code"
+#define FLB_OCI_ERROR_RESPONSE_MESSAGE "message"
+
+#define FLB_OCI_ERROR_CODE_RELATED_RESOURCE_NOT_FOUND "RelatedResourceNotAuthorizedOrNotFound"
+#define FLB_OCI_ERROR_CODE_NOT_AUTHENTICATED "NotAuthenticated"
+#define FLB_OCI_ERROR_CODE_NOT_AUTHENTICATEDORNOTFOUND "NotAuthorizedOrNotFound"
+#define FLB_OCI_ERROR_CODE_INCORRECTSTATE "IncorrectState"
+#define FLB_OCI_ERROR_CODE_NOT_AUTH_OR_RESOURCE_EXIST "NotAuthorizedOrResourceAlreadyExists"
+#define FLB_OCI_ERROR_CODE_TOO_MANY_REQUESTS "TooManyRequests"
+#define FLB_OCI_ERROR_CODE_INTERNAL_SERVER_ERROR "InternalServerError"
+
+#include <fluent-bit/flb_upstream.h>
+#include <fluent-bit/flb_sds.h>
+#include <fluent-bit/flb_record_accessor.h>
+#include <fluent-bit/flb_hash_table.h>
+#include <monkey/mk_core/mk_list.h>
+
+struct metadata_obj {
+ flb_sds_t key;
+ flb_sds_t val;
+ struct mk_list _head;
+
+};
+
+struct flb_oci_error_response
+{
+ flb_sds_t code;
+ flb_sds_t message;
+};
+
+struct flb_oci_logan {
+ flb_sds_t namespace;
+ flb_sds_t config_file_location;
+ flb_sds_t profile_name;
+ int oci_config_in_record;
+ flb_sds_t uri;
+
+ struct flb_upstream *u;
+ flb_sds_t proxy;
+ char *proxy_host;
+ int proxy_port;
+
+ // oci_la_* configs
+ flb_sds_t oci_la_entity_id;
+
+ flb_sds_t oci_la_entity_type;
+
+ flb_sds_t oci_la_log_source_name;
+
+ flb_sds_t oci_la_log_path;
+
+ flb_sds_t oci_la_log_group_id;
+
+ flb_sds_t oci_la_log_set_id;
+
+ struct mk_list *oci_la_global_metadata;
+ struct mk_list global_metadata_fields;
+ struct mk_list *oci_la_metadata;
+ struct mk_list log_event_metadata_fields;
+
+ // config_file
+ flb_sds_t user;
+ flb_sds_t region;
+ flb_sds_t tenancy;
+ flb_sds_t key_fingerprint;
+ flb_sds_t key_file;
+ /* For OCI signing */
+ flb_sds_t key_id; // tenancy/user/key_fingerprint
+ flb_sds_t private_key;
+
+ struct flb_output_instance *ins;
+
+};
+#endif
diff --git a/src/fluent-bit/plugins/out_oracle_log_analytics/oci_logan_conf.c b/src/fluent-bit/plugins/out_oracle_log_analytics/oci_logan_conf.c
new file mode 100644
index 000000000..a39803184
--- /dev/null
+++ b/src/fluent-bit/plugins/out_oracle_log_analytics/oci_logan_conf.c
@@ -0,0 +1,493 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2023 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include <sys/stat.h>
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_output.h>
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_error.h>
+#include <fluent-bit/flb_record_accessor.h>
+#include <fluent-bit/flb_config_map.h>
+#include <fluent-bit/flb_slist.h>
+#include <fluent-bit/flb_file.h>
+
+#include <monkey/mk_core/mk_list.h>
+#include <monkey/mk_core/mk_string.h>
+#include <fluent-bit/flb_utils.h>
+
+#include "oci_logan.h"
+#include "oci_logan_conf.h"
+
+static int create_pk_context(flb_sds_t filepath, const char *key_passphrase,
+ struct flb_oci_logan *ctx)
+{
+ int ret;
+ struct stat st;
+ struct file_info finfo;
+ FILE *fp;
+ flb_sds_t kbuffer;
+
+
+ ret = stat(filepath, &st);
+ if (ret == -1) {
+ flb_errno();
+ flb_plg_error(ctx->ins, "cannot open key file %s", filepath);
+ return -1;
+ }
+
+ if (!S_ISREG(st.st_mode) && !S_ISLNK(st.st_mode)) {
+ flb_plg_error(ctx->ins, "key file is not a valid file: %s", filepath);
+ return -1;
+ }
+
+ /* Read file content */
+ if (mk_file_get_info(filepath, &finfo, MK_FILE_READ) != 0) {
+ flb_plg_error(ctx->ins, "error to read key file: %s", filepath);
+ return -1;
+ }
+
+ if (!(fp = fopen(filepath, "rb"))) {
+ flb_plg_error(ctx->ins, "error to open key file: %s", filepath);
+ return -1;
+ }
+
+ kbuffer = flb_sds_create_size(finfo.size + 1);
+ if (!kbuffer) {
+ flb_errno();
+ fclose(fp);
+ return -1;
+ }
+
+ ret = fread(kbuffer, finfo.size, 1, fp);
+ if (ret < 1) {
+ flb_sds_destroy(kbuffer);
+ fclose(fp);
+ flb_plg_error(ctx->ins, "fail to read key file: %s", filepath);
+ return -1;
+ }
+ fclose(fp);
+
+ /* In mbedtls, for PEM, the buffer must contains a null-terminated string */
+ kbuffer[finfo.size] = '\0';
+ flb_sds_len_set(kbuffer, finfo.size + 1);
+
+ ctx->private_key = kbuffer;
+
+ return 0;
+}
+
+static int load_oci_credentials(struct flb_oci_logan *ctx)
+{
+ flb_sds_t content;
+ int found_profile = 0, res = 0;
+ char *line, *profile = NULL;
+ int eq_pos = 0;
+ char* key = NULL;
+ char* val;
+
+ content = flb_file_read(ctx->config_file_location);
+ if (content == NULL || flb_sds_len(content) == 0)
+ {
+ return -1;
+ }
+ flb_plg_debug(ctx->ins, "content = %s", content);
+ line = strtok(content, "\n");
+ while(line != NULL) {
+ /* process line */
+ flb_plg_debug(ctx->ins, "line = %s", line);
+ if(!found_profile && line[0] == '[') {
+ profile = mk_string_copy_substr(line, 1, strlen(line) - 1);
+ if(!strcmp(profile, ctx->profile_name)) {
+ flb_plg_info(ctx->ins, "found profile");
+ found_profile = 1;
+ goto iterate;
+ }
+ mk_mem_free(profile);
+ }
+ if(found_profile) {
+ if(line[0] == '[') {
+ break;
+ }
+ eq_pos = mk_string_char_search(line, '=', strlen(line));
+ flb_plg_debug(ctx->ins, "eq_pos %d", eq_pos);
+ key = mk_string_copy_substr(line, 0, eq_pos);
+ flb_plg_debug(ctx->ins, "key = %s", key);
+ val = line + eq_pos + 1;
+ if (!key || !val) {
+ res = -1;
+ break;
+ }
+ if (strcmp(key, FLB_OCI_PARAM_USER) == 0) {
+ ctx->user = flb_sds_create(val);
+ }
+ else if (strcmp(key, FLB_OCI_PARAM_TENANCY) == 0) {
+ ctx->tenancy = flb_sds_create(val);
+ }
+ else if (strcmp(key, FLB_OCI_PARAM_KEY_FILE) == 0) {
+ ctx->key_file = flb_sds_create(val);
+ }
+ else if (strcmp(key, FLB_OCI_PARAM_KEY_FINGERPRINT) == 0) {
+ ctx->key_fingerprint = flb_sds_create(val);
+ }
+ else if (strcmp(key, FLB_OCI_PARAM_REGION) == 0) {
+ ctx->region = flb_sds_create(val);
+ }
+ else {
+ goto iterate;
+ }
+ }
+ iterate:
+ if (profile) {
+ mk_mem_free(profile);
+ profile = NULL;
+ }
+ if (key) {
+ mk_mem_free(key);
+ key = NULL;
+ }
+ line = strtok(NULL, "\n");
+ }
+ if (!found_profile) {
+ flb_errno();
+ res = -1;
+ }
+
+ flb_sds_destroy(content);
+ if (profile) {
+ mk_mem_free(profile);
+ }
+ if (key) {
+ mk_mem_free(key);
+ }
+ return res;
+}
+
+static int global_metadata_fields_create(struct flb_oci_logan *ctx)
+{
+ struct mk_list *head;
+ struct flb_slist_entry *kname;
+ struct flb_slist_entry *val;
+ struct flb_config_map_val *mv;
+ struct metadata_obj *f;
+
+ if (!ctx->oci_la_global_metadata) {
+ return 0;
+ }
+
+ flb_config_map_foreach(head, mv, ctx->oci_la_global_metadata) {
+ kname = mk_list_entry_first(mv->val.list, struct flb_slist_entry, _head);
+ val = mk_list_entry_last(mv->val.list, struct flb_slist_entry, _head);
+
+ f = flb_malloc(sizeof(struct metadata_obj));
+ if (!f) {
+ flb_errno();
+ return -1;
+ }
+
+ f->key = flb_sds_create(kname->str);
+ if (!f->key) {
+ flb_free(f);
+ return -1;
+ }
+ f->val = flb_sds_create(val->str);
+ if (!f->val) {
+ flb_free(f);
+ return -1;
+ }
+
+
+ mk_list_add(&f->_head, &ctx->global_metadata_fields);
+ }
+
+ return 0;
+}
+
+static int log_event_metadata_create(struct flb_oci_logan *ctx)
+{
+ struct mk_list *head;
+ struct flb_slist_entry *kname;
+ struct flb_slist_entry *val;
+ struct flb_config_map_val *mv;
+ struct metadata_obj *f;
+
+ if (!ctx->oci_la_metadata) {
+ return 0;
+ }
+
+ flb_config_map_foreach(head, mv, ctx->oci_la_metadata) {
+ kname = mk_list_entry_first(mv->val.list, struct flb_slist_entry, _head);
+ val = mk_list_entry_last(mv->val.list, struct flb_slist_entry, _head);
+
+ f = flb_malloc(sizeof(struct metadata_obj));
+ if (!f) {
+ flb_errno();
+ return -1;
+ }
+
+ f->key = flb_sds_create(kname->str);
+ if (!f->key) {
+ flb_free(f);
+ return -1;
+ }
+ f->val = flb_sds_create(val->str);
+ if (!f->val) {
+ flb_free(f);
+ return -1;
+ }
+
+
+ mk_list_add(&f->_head, &ctx->log_event_metadata_fields);
+ }
+
+ return 0;
+}
+struct flb_oci_logan *flb_oci_logan_conf_create(struct flb_output_instance *ins,
+ struct flb_config *config) {
+ struct flb_oci_logan *ctx;
+ struct flb_upstream *upstream;
+ flb_sds_t host = NULL;
+ int io_flags = 0, default_port;
+ const char *tmp;
+ int ret = 0;
+ char *protocol = NULL;
+ char *p_host = NULL;
+ char *p_port = NULL;
+ char *p_uri = NULL;
+
+ ctx = flb_calloc(1, sizeof(struct flb_oci_logan));
+ if (!ctx) {
+ flb_errno();
+ return NULL;
+ }
+
+ ctx->ins = ins;
+
+ ret = flb_output_config_map_set(ins, (void *) ctx);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "configuration error");
+ flb_oci_logan_conf_destroy(ctx);
+ return NULL;
+ }
+
+ if (ctx->oci_config_in_record == FLB_FALSE) {
+ if (ctx->oci_la_log_source_name == NULL ||
+ ctx->oci_la_log_group_id == NULL) {
+ flb_errno();
+ flb_plg_error(ctx->ins,
+ "log source name and log group id are required");
+ flb_oci_logan_conf_destroy(ctx);
+ return NULL;
+ }
+ }
+ if (ctx->oci_la_global_metadata != NULL) {
+ mk_list_init(&ctx->global_metadata_fields);
+ ret = global_metadata_fields_create(ctx);
+ if (ret != 0) {
+ flb_errno();
+ flb_oci_logan_conf_destroy(ctx);
+ return NULL;
+ }
+ }
+
+ if (ctx->oci_la_metadata != NULL) {
+ mk_list_init(&ctx->log_event_metadata_fields);
+ ret = log_event_metadata_create(ctx);
+ if (ret != 0) {
+ flb_errno();
+ flb_oci_logan_conf_destroy(ctx);
+ return NULL;
+ }
+ }
+
+ if (!ctx->config_file_location) {
+ flb_errno();
+ flb_plg_error(ctx->ins, "config file location is required");
+ flb_oci_logan_conf_destroy(ctx);
+ return NULL;
+ }
+
+ ret = load_oci_credentials(ctx);
+ if(ret != 0) {
+ flb_errno();
+ flb_oci_logan_conf_destroy(ctx);
+ return NULL;
+ }
+
+ if (ins->host.name) {
+ host = ins->host.name;
+ }
+ else {
+ if (!ctx->region) {
+ flb_errno();
+ flb_plg_error(ctx->ins, "Region is required");
+ flb_oci_logan_conf_destroy(ctx);
+ return NULL;
+ }
+ host = flb_sds_create_size(512);
+ flb_sds_snprintf(&host, flb_sds_alloc(host), "loganalytics.%s.oci.oraclecloud.com", ctx->region);
+ }
+
+ if (!ctx->uri) {
+ if (!ctx->namespace) {
+ flb_errno();
+ flb_plg_error(ctx->ins, "Namespace is required");
+ flb_oci_logan_conf_destroy(ctx);
+ return NULL;
+ }
+ ctx->uri = flb_sds_create_size(512);
+ flb_sds_snprintf(&ctx->uri, flb_sds_alloc(ctx->uri),
+ "/20200601/namespaces/%s/actions/uploadLogEventsFile",
+ ctx->namespace);
+ }
+
+
+
+ if (create_pk_context(ctx->key_file, NULL, ctx) < 0) {
+ flb_plg_error(ctx->ins, "failed to create pk context");
+ flb_oci_logan_conf_destroy(ctx);
+ return NULL;
+ }
+
+
+ ctx->key_id = flb_sds_create_size(512);
+ flb_sds_snprintf(&ctx->key_id, flb_sds_alloc(ctx->key_id),
+ "%s/%s/%s", ctx->tenancy, ctx->user, ctx->key_fingerprint);
+
+
+ /* Check if SSL/TLS is enabled */
+ io_flags = FLB_IO_TCP;
+ default_port = 80;
+
+#ifdef FLB_HAVE_TLS
+ if (ins->use_tls == FLB_TRUE) {
+ io_flags = FLB_IO_TLS;
+ default_port = 443;
+ }
+#endif
+
+ if (ins->host.ipv6 == FLB_TRUE) {
+ io_flags |= FLB_IO_IPV6;
+ }
+
+ flb_output_net_default(host, default_port, ins);
+ flb_sds_destroy(host);
+
+ if (ctx->proxy) {
+ ret = flb_utils_url_split(tmp, &protocol, &p_host, &p_port, &p_uri);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "could not parse proxy parameter: '%s'", tmp);
+ flb_oci_logan_conf_destroy(ctx);
+ return NULL;
+ }
+
+ ctx->proxy_host = p_host;
+ ctx->proxy_port = atoi(p_port);
+ flb_free(protocol);
+ flb_free(p_port);
+ flb_free(p_uri);
+ flb_free(p_host);
+ }
+
+ if (ctx->proxy) {
+ upstream = flb_upstream_create(config, ctx->proxy_host, ctx->proxy_port,
+ io_flags, ins->tls);
+ }
+ else {
+ /* Prepare an upstream handler */
+ upstream = flb_upstream_create(config, ins->host.name, ins->host.port,
+ io_flags, ins->tls);
+ }
+
+ if (!upstream) {
+ flb_plg_error(ctx->ins, "cannot create Upstream context");
+ flb_oci_logan_conf_destroy(ctx);
+ return NULL;
+ }
+ ctx->u = upstream;
+
+ /* Set instance flags into upstream */
+ flb_output_upstream_set(ctx->u, ins);
+
+ return ctx;
+}
+
+static void metadata_fields_destroy(struct flb_oci_logan *ctx)
+{
+ struct mk_list *tmp;
+ struct mk_list *head;
+ struct metadata_obj *f;
+
+ mk_list_foreach_safe(head, tmp, &ctx->global_metadata_fields) {
+ f = mk_list_entry(head, struct metadata_obj, _head);
+ flb_sds_destroy(f->key);
+ flb_sds_destroy(f->val);
+ mk_list_del(&f->_head);
+ flb_free(f);
+ }
+
+ mk_list_foreach_safe(head, tmp, &ctx->log_event_metadata_fields) {
+ f = mk_list_entry(head, struct metadata_obj, _head);
+ flb_sds_destroy(f->key);
+ flb_sds_destroy(f->val);
+ mk_list_del(&f->_head);
+ flb_free(f);
+ }
+
+}
+
+int flb_oci_logan_conf_destroy(struct flb_oci_logan *ctx) {
+ if(ctx == NULL) {
+ return 0;
+ }
+
+ if (ctx->private_key) {
+ flb_sds_destroy(ctx->private_key);
+ }
+ if (ctx->uri) {
+ flb_sds_destroy(ctx->uri);
+ }
+ if (ctx->key_id) {
+ flb_sds_destroy(ctx->key_id);
+ }
+ if (ctx->key_file) {
+ flb_sds_destroy(ctx->key_file);
+ }
+ if(ctx->user) {
+ flb_sds_destroy(ctx->user);
+ }
+ if(ctx->key_fingerprint) {
+ flb_sds_destroy(ctx->key_fingerprint);
+ }
+ if(ctx->tenancy) {
+ flb_sds_destroy(ctx->tenancy);
+ }
+ if(ctx->region) {
+ flb_sds_destroy(ctx->region);
+ }
+ if (ctx->u) {
+ flb_upstream_destroy(ctx->u);
+ }
+
+ metadata_fields_destroy(ctx);
+
+ flb_free(ctx);
+ return 0;
+} \ No newline at end of file
diff --git a/src/fluent-bit/plugins/out_oracle_log_analytics/oci_logan_conf.h b/src/fluent-bit/plugins/out_oracle_log_analytics/oci_logan_conf.h
new file mode 100644
index 000000000..a11832b0a
--- /dev/null
+++ b/src/fluent-bit/plugins/out_oracle_log_analytics/oci_logan_conf.h
@@ -0,0 +1,34 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2023 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#ifndef FLB_OUT_OCI_LOGAN_CONF_H
+#define FLB_OUT_OCI_LOGAN_CONF_H
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_output.h>
+#include <fluent-bit/flb_config.h>
+
+#include "oci_logan.h"
+
+struct flb_oci_logan *flb_oci_logan_conf_create(struct flb_output_instance *ins,
+ struct flb_config *config);
+int flb_oci_logan_conf_destroy(struct flb_oci_logan *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/out_pgsql/CMakeLists.txt b/src/fluent-bit/plugins/out_pgsql/CMakeLists.txt
new file mode 100644
index 000000000..6206c02f9
--- /dev/null
+++ b/src/fluent-bit/plugins/out_pgsql/CMakeLists.txt
@@ -0,0 +1,8 @@
+set(src
+ pgsql.c
+ pgsql_connections.c
+ )
+
+FLB_PLUGIN(out_pgsql "${src}" "")
+target_include_directories(flb-plugin-out_pgsql PRIVATE ${PostgreSQL_INCLUDE_DIRS})
+target_link_libraries(flb-plugin-out_pgsql -lpq)
diff --git a/src/fluent-bit/plugins/out_pgsql/pgsql.c b/src/fluent-bit/plugins/out_pgsql/pgsql.c
new file mode 100644
index 000000000..a01090c1a
--- /dev/null
+++ b/src/fluent-bit/plugins/out_pgsql/pgsql.c
@@ -0,0 +1,389 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_output.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_pack.h>
+
+#include "pgsql.h"
+#include "pgsql_connections.h"
+
+void pgsql_conf_destroy(struct flb_pgsql_config *ctx)
+{
+ pgsql_destroy_connections(ctx);
+
+ flb_free(ctx->db_hostname);
+
+ if (ctx->db_table != NULL) {
+ flb_sds_destroy(ctx->db_table);
+ }
+
+ if (ctx->timestamp_key != NULL) {
+ flb_sds_destroy(ctx->timestamp_key);
+ }
+
+ flb_free(ctx);
+ ctx = NULL;
+}
+
+static int cb_pgsql_init(struct flb_output_instance *ins,
+ struct flb_config *config, void *data)
+{
+
+ struct flb_pgsql_config *ctx;
+ size_t str_len;
+ PGresult *res;
+ char *query = NULL;
+ char *temp = NULL;
+ const char *tmp = NULL;
+ int ret;
+
+ /* set default network configuration */
+ flb_output_net_default(FLB_PGSQL_HOST, FLB_PGSQL_PORT, ins);
+
+ ctx = flb_calloc(1, sizeof(struct flb_pgsql_config));
+ if (!ctx) {
+ flb_errno();
+ return -1;
+ }
+
+ ctx->ins = ins;
+
+ /* Database host */
+ ctx->db_hostname = flb_strdup(ins->host.name);
+ if (!ctx->db_hostname) {
+ flb_errno();
+ pgsql_conf_destroy(ctx);
+ return -1;
+ }
+
+ /* Database port */
+ snprintf(ctx->db_port, sizeof(ctx->db_port), "%d", ins->host.port);
+
+ /* Database name */
+ ctx->db_name = flb_output_get_property("database", ins);
+ if (!ctx->db_name) {
+ ctx->db_name = FLB_PGSQL_DBNAME;
+ }
+
+ /* db table */
+ tmp = flb_output_get_property("table", ins);
+ if (tmp) {
+ ctx->db_table = flb_sds_create(tmp);
+ }
+ else {
+ ctx->db_table = flb_sds_create(FLB_PGSQL_TABLE);
+ }
+
+ /* connection options */
+ ctx->conn_options = flb_output_get_property("connection_options", ins);
+
+ if (!ctx->db_table) {
+ flb_errno();
+ pgsql_conf_destroy(ctx);
+ return -1;
+ }
+
+ /* db user */
+ ctx->db_user = flb_output_get_property("user", ins);
+ if (!ctx->db_user) {
+ flb_plg_warn(ctx->ins,
+ "You didn't supply a valid user to connect,"
+ "your current unix user will be used");
+ }
+
+ /* db user password */
+ ctx->db_passwd = flb_output_get_property("password", ins);
+
+ /* timestamp key */
+ tmp = flb_output_get_property("timestamp_key", ins);
+ if (tmp) {
+ ctx->timestamp_key = flb_sds_create(tmp);
+ }
+ else {
+ ctx->timestamp_key = flb_sds_create(FLB_PGSQL_TIMESTAMP_KEY);
+ }
+
+ if (!ctx->timestamp_key) {
+ flb_errno();
+ pgsql_conf_destroy(ctx);
+ return -1;
+ }
+
+ /* Pool size */
+ tmp = flb_output_get_property("max_pool_size", ins);
+ if (tmp) {
+ ctx->max_pool_size = strtol(tmp, NULL, 0);
+ if (ctx->max_pool_size < 1)
+ ctx->max_pool_size = 1;
+ }
+ else {
+ ctx->max_pool_size = FLB_PGSQL_POOL_SIZE;
+ }
+
+ tmp = flb_output_get_property("min_pool_size", ins);
+ if (tmp) {
+ ctx->min_pool_size = strtol(tmp, NULL, 0);
+ if (ctx->min_pool_size < 1 || ctx->min_pool_size > ctx->max_pool_size)
+ ctx->min_pool_size = ctx->max_pool_size;
+ }
+ else {
+ ctx->min_pool_size = FLB_PGSQL_MIN_POOL_SIZE;
+ }
+
+ /* Sync Mode */
+ tmp = flb_output_get_property("async", ins);
+ if (tmp && flb_utils_bool(tmp)) {
+ ctx->async = FLB_TRUE;
+ }
+ else {
+ ctx->async = FLB_FALSE;
+ }
+
+ if (!ctx->async) {
+ ctx->min_pool_size = 1;
+ ctx->max_pool_size = 1;
+ }
+
+ /* CockroachDB Support */
+ tmp = flb_output_get_property("cockroachdb", ins);
+ if (tmp && flb_utils_bool(tmp)) {
+ ctx->cockroachdb = FLB_TRUE;
+ }
+ else {
+ ctx->cockroachdb = FLB_FALSE;
+ }
+
+ ret = pgsql_start_connections(ctx);
+ if (ret) {
+ return -1;
+ }
+
+ flb_plg_info(ctx->ins, "host=%s port=%s dbname=%s OK",
+ ctx->db_hostname, ctx->db_port, ctx->db_name);
+ flb_output_set_context(ins, ctx);
+
+ temp = PQescapeIdentifier(ctx->conn_current->conn, ctx->db_table,
+ flb_sds_len(ctx->db_table));
+
+ if (temp == NULL) {
+ flb_plg_error(ctx->ins, "failed to parse table name: %s",
+ PQerrorMessage(ctx->conn_current->conn));
+ pgsql_conf_destroy(ctx);
+ return -1;
+ }
+
+ flb_sds_destroy(ctx->db_table);
+ ctx->db_table = flb_sds_create(temp);
+ PQfreemem(temp);
+
+ if (!ctx->db_table) {
+ flb_errno();
+ pgsql_conf_destroy(ctx);
+ return -1;
+ }
+
+ flb_plg_info(ctx->ins, "we check that the table %s "
+ "exists, if not we create it", ctx->db_table);
+
+ str_len = 72 + flb_sds_len(ctx->db_table);
+
+ query = flb_malloc(str_len);
+ if (query == NULL) {
+ flb_errno();
+ pgsql_conf_destroy(ctx);
+ return -1;
+ }
+
+ /* Maybe use the timestamp with the TZ specified */
+ /* in the postgresql connection? */
+ snprintf(query, str_len,
+ "CREATE TABLE IF NOT EXISTS %s "
+ "(tag varchar, time timestamp, data jsonb);",
+ ctx->db_table);
+ flb_plg_trace(ctx->ins, "%s", query);
+ res = PQexec(ctx->conn_current->conn, query);
+
+ flb_free(query);
+
+ if (PQresultStatus(res) != PGRES_COMMAND_OK) {
+ flb_plg_error(ctx->ins, "%s",
+ PQerrorMessage(ctx->conn_current->conn));
+ pgsql_conf_destroy(ctx);
+ return -1;
+ }
+
+ PQclear(res);
+
+ return 0;
+}
+
+static void cb_pgsql_flush(struct flb_event_chunk *event_chunk,
+ struct flb_output_flush *out_flush,
+ struct flb_input_instance *i_ins,
+ void *out_context,
+ struct flb_config *config)
+{
+ struct flb_pgsql_config *ctx = out_context;
+ flb_sds_t json;
+ char *tmp = NULL;
+ char *query = NULL;
+ PGresult *res = NULL;
+ int send_res;
+ flb_sds_t tag_escaped = NULL;
+ size_t str_len;
+
+
+ if (pgsql_next_connection(ctx) == 1) {
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ /*
+ * PQreset()
+ * This function will close the connection to the server and attempt to
+ * reestablish a new connection to the same server, using all the same
+ * parameters previously used. This might be useful for error recovery
+ * if a working connection is lost.
+ */
+ if (PQstatus(ctx->conn_current->conn) != CONNECTION_OK) {
+ PQreset(ctx->conn_current->conn);
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ json = flb_pack_msgpack_to_json_format(event_chunk->data,
+ event_chunk->size,
+ FLB_PACK_JSON_FORMAT_JSON,
+ FLB_PACK_JSON_DATE_DOUBLE,
+ ctx->timestamp_key);
+ if (json == NULL) {
+ flb_errno();
+ flb_plg_error(ctx->ins,
+ "Can't parse the msgpack into json");
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ tmp = PQescapeLiteral(ctx->conn_current->conn, json, flb_sds_len(json));
+ flb_sds_destroy(json);
+ if (!tmp) {
+ flb_errno();
+ PQfreemem(tmp);
+ flb_plg_error(ctx->ins, "Can't escape json string");
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ json = flb_sds_create(tmp);
+ PQfreemem(tmp);
+ if (!json) {
+ flb_errno();
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ tmp = PQescapeLiteral(ctx->conn_current->conn,
+ event_chunk->tag,
+ flb_sds_len(event_chunk->tag));
+ if (!tmp) {
+ flb_errno();
+ flb_sds_destroy(json);
+ PQfreemem(tmp);
+ flb_plg_error(ctx->ins, "Can't escape tag string: %s",
+ event_chunk->tag);
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ tag_escaped = flb_sds_create(tmp);
+ PQfreemem(tmp);
+ if (!tag_escaped) {
+ flb_errno();
+ flb_sds_destroy(json);
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ str_len = 100 + flb_sds_len(json)
+ + flb_sds_len(tag_escaped)
+ + flb_sds_len(ctx->db_table)
+ + flb_sds_len(ctx->timestamp_key);
+ query = flb_malloc(str_len);
+
+ if (query == NULL) {
+ flb_errno();
+ flb_sds_destroy(json);
+ flb_sds_destroy(tag_escaped);
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+
+ snprintf(query, str_len,
+ ctx->cockroachdb ? FLB_PGSQL_INSERT_COCKROACH : FLB_PGSQL_INSERT,
+ ctx->db_table, tag_escaped, ctx->timestamp_key, json);
+ flb_plg_trace(ctx->ins, "query: %s", query);
+
+ if (ctx->async) {
+ send_res = PQsendQuery(ctx->conn_current->conn, query);
+ flb_free(query);
+ flb_sds_destroy(json);
+ flb_sds_destroy(tag_escaped);
+
+ if (send_res == 0) {
+ flb_plg_error(ctx->ins, "%s",
+ PQerrorMessage(ctx->conn_current->conn));
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ PQflush(ctx->conn_current->conn);
+ }
+ else {
+ res = PQexec(ctx->conn_current->conn, query);
+ flb_free(query);
+ flb_sds_destroy(json);
+ flb_sds_destroy(tag_escaped);
+
+ if (PQresultStatus(res) != PGRES_COMMAND_OK) {
+ flb_plg_error(ctx->ins, "%s",
+ PQerrorMessage(ctx->conn_current->conn));
+ PQclear(res);
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+ PQclear(res);
+ }
+
+ FLB_OUTPUT_RETURN(FLB_OK);
+}
+
+static int cb_pgsql_exit(void *data, struct flb_config *config)
+{
+ struct flb_pgsql_config *ctx = data;
+
+ if (!ctx){
+ return 0;
+ }
+
+ pgsql_conf_destroy(ctx);
+
+ return 0;
+}
+
+struct flb_output_plugin out_pgsql_plugin = {
+ .name = "pgsql",
+ .description = "PostgreSQL",
+ .cb_init = cb_pgsql_init,
+ .cb_flush = cb_pgsql_flush,
+ .cb_exit = cb_pgsql_exit,
+ .flags = 0,
+};
diff --git a/src/fluent-bit/plugins/out_pgsql/pgsql.h b/src/fluent-bit/plugins/out_pgsql/pgsql.h
new file mode 100644
index 000000000..5190a5a54
--- /dev/null
+++ b/src/fluent-bit/plugins/out_pgsql/pgsql.h
@@ -0,0 +1,91 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_OUT_PGSQL_H
+#define FLB_OUT_PGSQL_H
+
+#include <fluent-bit/flb_output.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_output_plugin.h>
+
+#include <libpq-fe.h>
+
+#define FLB_PGSQL_HOST "127.0.0.1"
+#define FLB_PGSQL_PORT 5432
+#define FLB_PGSQL_DBNAME "fluentbit"
+#define FLB_PGSQL_TABLE "fluentbit"
+#define FLB_PGSQL_TIMESTAMP_KEY "date"
+#define FLB_PGSQL_POOL_SIZE 4
+#define FLB_PGSQL_MIN_POOL_SIZE 1
+#define FLB_PGSQL_SYNC FLB_FALSE
+#define FLB_PGSQL_COCKROACH FLB_FALSE
+
+#define FLB_PGSQL_INSERT "INSERT INTO %s SELECT %s, " \
+ "to_timestamp(CAST(value->>'%s' as FLOAT))," \
+ " * FROM json_array_elements(%s);"
+#define FLB_PGSQL_INSERT_COCKROACH "INSERT INTO %s SELECT %s," \
+ "CAST(value->>'%s' AS INTERVAL) + DATE'1970-01-01'," \
+ " * FROM json_array_elements(%s);"
+
+struct flb_pgsql_conn {
+ struct mk_list _head;
+ PGconn *conn;
+ int number;
+};
+
+struct flb_pgsql_config {
+
+ /* database */
+ char *db_hostname;
+ char db_port[8];
+ const char *db_name;
+ flb_sds_t db_table;
+
+ /* auth */
+ const char *db_user;
+ const char *db_passwd;
+
+ /* time key */
+ flb_sds_t timestamp_key;
+
+ /* instance reference */
+ struct flb_output_instance *ins;
+
+ /* connections options */
+ const char *conn_options;
+
+ /* connections pool */
+ struct mk_list conn_queue;
+ struct mk_list _head;
+
+ struct flb_pgsql_conn *conn_current;
+ int max_pool_size;
+ int min_pool_size;
+ int active_conn;
+
+ /* async mode or sync mode */
+ int async;
+
+ /* cockroachdb */
+ int cockroachdb;
+};
+
+void pgsql_conf_destroy(struct flb_pgsql_config *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/out_pgsql/pgsql_connections.c b/src/fluent-bit/plugins/out_pgsql/pgsql_connections.c
new file mode 100644
index 000000000..9c4ccfba2
--- /dev/null
+++ b/src/fluent-bit/plugins/out_pgsql/pgsql_connections.c
@@ -0,0 +1,193 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+
+#include "pgsql.h"
+
+void pgsql_destroy_connections(struct flb_pgsql_config *ctx)
+{
+ struct mk_list *tmp;
+ struct mk_list *head;
+ struct flb_pgsql_conn *conn;
+ PGresult *res = NULL;
+
+ mk_list_foreach_safe(head, tmp, &ctx->conn_queue) {
+ conn = mk_list_entry(head, struct flb_pgsql_conn, _head);
+ if (PQstatus(conn->conn) == CONNECTION_OK) {
+ while(PQconsumeInput(conn->conn) == 0) {
+ res = PQgetResult(conn->conn);
+ if (PQresultStatus(res) != PGRES_COMMAND_OK) {
+ flb_plg_warn(ctx->ins, "%s",
+ PQerrorMessage(conn->conn));
+ }
+ PQclear(res);
+ }
+ }
+ PQfinish(conn->conn);
+ flb_free(conn);
+ }
+}
+
+void *pgsql_create_connection(struct flb_pgsql_config *ctx)
+{
+ struct flb_pgsql_conn *conn;
+
+ conn = flb_calloc(1, sizeof(struct flb_pgsql_conn));
+ if (!conn) {
+ flb_errno();
+ return NULL;
+ }
+
+ conn->conn = PQsetdbLogin(ctx->db_hostname,
+ ctx->db_port,
+ ctx->conn_options,
+ NULL,
+ ctx->db_name,
+ ctx->db_user,
+ ctx->db_passwd);
+
+ if (PQstatus(conn->conn) != CONNECTION_OK) {
+ flb_plg_error(ctx->ins,
+ "failed connecting to host=%s with error: %s",
+ ctx->db_hostname, PQerrorMessage(conn->conn));
+ PQfinish(conn->conn);
+ flb_free(conn);
+ return NULL;
+ }
+
+ flb_plg_info(ctx->ins, "switching postgresql connection "
+ "to non-blocking mode");
+
+ if (PQsetnonblocking(conn->conn, 1) != 0) {
+ flb_plg_error(ctx->ins, "non-blocking mode not set");
+ PQfinish(conn->conn);
+ flb_free(conn);
+ return NULL;
+ }
+
+ return conn;
+}
+
+int pgsql_start_connections(struct flb_pgsql_config *ctx)
+{
+ int i;
+ struct flb_pgsql_conn *conn = NULL;
+
+ mk_list_init(&ctx->conn_queue);
+ ctx->active_conn = 0;
+
+ for(i = 0; i < ctx->min_pool_size; i++) {
+ flb_plg_info(ctx->ins, "Opening connection: #%d", i);
+
+ conn = (struct flb_pgsql_conn *)pgsql_create_connection(ctx);
+ if (conn == NULL) {
+ pgsql_conf_destroy(ctx);
+ return -1;
+ }
+
+ conn->number = i;
+ ctx->active_conn++;
+ mk_list_add(&conn->_head, &ctx->conn_queue);
+ }
+
+ ctx->conn_current = mk_list_entry_last(&ctx->conn_queue,
+ struct flb_pgsql_conn,
+ _head);
+
+ return 0;
+}
+
+int pgsql_new_connection(struct flb_pgsql_config *ctx)
+{
+ struct flb_pgsql_conn *conn = NULL;
+
+ if (ctx->active_conn >= ctx->max_pool_size) {
+ return -1;
+ }
+
+ conn = (struct flb_pgsql_conn *)pgsql_create_connection(ctx);
+ if (conn == NULL) {
+ pgsql_conf_destroy(ctx);
+ return -1;
+ }
+
+ conn->number = ctx->active_conn + 1;
+ ctx->active_conn++;
+
+ mk_list_add(&conn->_head, &ctx->conn_queue);
+
+ return 0;
+}
+
+int pgsql_next_connection(struct flb_pgsql_config *ctx)
+{
+ struct flb_pgsql_conn *tmp = NULL;
+ PGresult *res = NULL;
+ struct mk_list *head;
+ int ret_conn = 1;
+
+ if (PQconsumeInput(ctx->conn_current->conn) == 1) {
+ if (PQisBusy(ctx->conn_current->conn) == 0) {
+ res = PQgetResult(ctx->conn_current->conn);
+ PQclear(res);
+ }
+ }
+ else {
+ flb_plg_error(ctx->ins, "%s",
+ PQerrorMessage(ctx->conn_current->conn));
+ }
+
+ mk_list_foreach(head, &ctx->conn_queue) {
+ tmp = mk_list_entry(head, struct flb_pgsql_conn, _head);
+ if (ctx->conn_current == NULL) {
+ ctx->conn_current = tmp;
+ break;
+ }
+
+ res = PQgetResult(tmp->conn);
+
+ if (res == NULL) {
+ flb_plg_debug(ctx->ins, "Connection number %d",
+ tmp->number);
+ ctx->conn_current = tmp;
+ PQclear(res);
+ return 0;
+ }
+
+ if (PQresultStatus(res) == PGRES_FATAL_ERROR) {
+ flb_plg_info(ctx->ins, "%s",
+ PQerrorMessage(tmp->conn));
+ }
+
+ PQclear(res);
+ }
+
+ if (pgsql_new_connection(ctx) == -1) {
+ flb_plg_warn(ctx->ins,
+ "No more free connections."
+ " Increase max connections");
+ }
+ else {
+ flb_plg_warn(ctx->ins, "Added new connection");
+ ret_conn = pgsql_next_connection(ctx);
+ }
+
+ return ret_conn;
+}
diff --git a/src/fluent-bit/plugins/out_pgsql/pgsql_connections.h b/src/fluent-bit/plugins/out_pgsql/pgsql_connections.h
new file mode 100644
index 000000000..cd8730618
--- /dev/null
+++ b/src/fluent-bit/plugins/out_pgsql/pgsql_connections.h
@@ -0,0 +1,27 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_OUT_PGSQL_CONNECTIONS_H
+#define FLB_OUT_PGSQL_CONNECTIONS_H
+
+void pgsql_destroy_connections(struct flb_pgsql_config *ctx);
+int pgsql_start_connections(struct flb_pgsql_config *ctx);
+int pgsql_next_connection(struct flb_pgsql_config *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/out_plot/CMakeLists.txt b/src/fluent-bit/plugins/out_plot/CMakeLists.txt
new file mode 100644
index 000000000..e3cd96db5
--- /dev/null
+++ b/src/fluent-bit/plugins/out_plot/CMakeLists.txt
@@ -0,0 +1,4 @@
+set(src
+ plot.c)
+
+FLB_PLUGIN(out_plot "${src}" "")
diff --git a/src/fluent-bit/plugins/out_plot/plot.c b/src/fluent-bit/plugins/out_plot/plot.c
new file mode 100644
index 000000000..e5a217f45
--- /dev/null
+++ b/src/fluent-bit/plugins/out_plot/plot.c
@@ -0,0 +1,242 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_log_event_decoder.h>
+#include <msgpack.h>
+
+#include <stdio.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+struct flb_plot {
+ const char *out_file;
+ flb_sds_t key;
+ struct flb_output_instance *ins;
+};
+
+static int cb_plot_init(struct flb_output_instance *ins,
+ struct flb_config *config,
+ void *data)
+{
+ int ret;
+ (void) config;
+ (void) data;
+ struct flb_plot *ctx;
+
+ ctx = flb_calloc(1, sizeof(struct flb_plot));
+ if (!ctx) {
+ flb_errno();
+ return -1;
+ }
+ ctx->ins = ins;
+
+ ret = flb_output_config_map_set(ins, (void *)ctx);
+ if (ret == -1) {
+ flb_free(ctx);
+ return -1;
+ }
+
+ /* Set the context */
+ flb_output_set_context(ins, ctx);
+
+ return 0;
+}
+
+static void cb_plot_flush(struct flb_event_chunk *event_chunk,
+ struct flb_output_flush *out_flush,
+ struct flb_input_instance *i_ins,
+ void *out_context,
+ struct flb_config *config)
+{
+ int i;
+ int written;
+ int fd;
+ const char *out_file;
+ msgpack_object *map;
+ msgpack_object *key = NULL;
+ msgpack_object *val = NULL;
+ struct flb_plot *ctx = out_context;
+ struct flb_log_event_decoder log_decoder;
+ struct flb_log_event log_event;
+ int ret;
+
+ (void) i_ins;
+ (void) config;
+
+ /* Set the right output */
+ if (!ctx->out_file) {
+ out_file = event_chunk->tag;
+ }
+ else {
+ out_file = ctx->out_file;
+ }
+
+ /* Open output file with default name as the Tag */
+ fd = open(out_file, O_WRONLY | O_CREAT | O_APPEND, 0666);
+ if (fd == -1) {
+ flb_errno();
+ flb_plg_warn(ctx->ins, "could not open %s, switching to STDOUT",
+ out_file);
+ fd = STDOUT_FILENO;
+ }
+
+ ret = flb_log_event_decoder_init(&log_decoder,
+ (char *) event_chunk->data,
+ event_chunk->size);
+
+ if (ret != FLB_EVENT_DECODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event decoder initialization error : %d", ret);
+
+ close(fd);
+
+ FLB_OUTPUT_RETURN(FLB_ERROR);
+ }
+
+ /*
+ * Upon flush, for each array, lookup the time and the first field
+ * of the map to use as a data point.
+ */
+ while ((ret = flb_log_event_decoder_next(
+ &log_decoder,
+ &log_event)) == FLB_EVENT_DECODER_SUCCESS) {
+ map = log_event.body;
+
+ /*
+ * Lookup key, we need to iterate the whole map as sometimes the
+ * data that gets in can set the keys in different order (e.g: forward,
+ * tcp, etc).
+ */
+ if (ctx->key) {
+ for (i = 0; i < map->via.map.size; i++) {
+ /* Get each key and compare */
+ key = &(map->via.map.ptr[i].key);
+ if (key->type == MSGPACK_OBJECT_BIN) {
+ if (flb_sds_len(ctx->key) == key->via.bin.size &&
+ memcmp(key->via.bin.ptr, ctx->key,
+ flb_sds_len(ctx->key)) == 0) {
+ val = &(map->via.map.ptr[i].val);
+ break;
+ }
+ key = NULL;
+ val = NULL;
+ }
+ else if (key->type == MSGPACK_OBJECT_STR) {
+ if (flb_sds_len(ctx->key) == key->via.str.size &&
+ memcmp(key->via.str.ptr, ctx->key,
+ flb_sds_len(ctx->key)) == 0) {
+ val = &(map->via.map.ptr[i].val);
+ break;
+ }
+ key = NULL;
+ val = NULL;
+ }
+ else {
+ if (fd != STDOUT_FILENO) {
+ close(fd);
+ }
+
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ FLB_OUTPUT_RETURN(FLB_ERROR);
+ }
+ }
+ }
+ else {
+ val = &(map->via.map.ptr[0].val);
+ }
+
+ if (!val) {
+ flb_plg_error(ctx->ins, "unmatched key '%s'", ctx->key);
+
+ if (fd != STDOUT_FILENO) {
+ close(fd);
+ }
+
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ FLB_OUTPUT_RETURN(FLB_ERROR);
+ }
+
+ if (val->type == MSGPACK_OBJECT_POSITIVE_INTEGER) {
+ written = dprintf(fd, "%f %" PRIu64 "\n",
+ flb_time_to_double(&log_event.timestamp), val->via.u64);
+ }
+ else if (val->type == MSGPACK_OBJECT_NEGATIVE_INTEGER) {
+ written = dprintf(fd, "%f %" PRId64 "\n",
+ flb_time_to_double(&log_event.timestamp), val->via.i64);
+ }
+ else if (val->type == MSGPACK_OBJECT_FLOAT) {
+ written = dprintf(fd, "%f %lf\n",
+ flb_time_to_double(&log_event.timestamp), val->via.f64);
+ }
+ else {
+ flb_plg_error(ctx->ins, "value must be integer, negative integer "
+ "or float");
+ written = 0;
+ }
+ flb_plg_debug(ctx->ins, "%i bytes written to file '%s'",
+ written, out_file);
+ }
+
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ if (fd != STDOUT_FILENO) {
+ close(fd);
+ }
+
+ FLB_OUTPUT_RETURN(FLB_OK);
+}
+
+static int cb_plot_exit(void *data, struct flb_config *config)
+{
+ struct flb_plot *ctx = data;
+
+ flb_free(ctx);
+ return 0;
+}
+
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_STR, "key", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_plot, key),
+ "set a number of times to generate event."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "file", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_plot, out_file),
+ "set a number of times to generate event."
+ },
+ /* EOF */
+ {0}
+};
+
+struct flb_output_plugin out_plot_plugin = {
+ .name = "plot",
+ .description = "Generate data file for GNU Plot",
+ .cb_init = cb_plot_init,
+ .cb_flush = cb_plot_flush,
+ .cb_exit = cb_plot_exit,
+ .config_map = config_map,
+ .flags = 0,
+};
diff --git a/src/fluent-bit/plugins/out_prometheus_exporter/CMakeLists.txt b/src/fluent-bit/plugins/out_prometheus_exporter/CMakeLists.txt
new file mode 100644
index 000000000..f03809e57
--- /dev/null
+++ b/src/fluent-bit/plugins/out_prometheus_exporter/CMakeLists.txt
@@ -0,0 +1,14 @@
+if(NOT FLB_HTTP_SERVER)
+ message(
+ FATAL_ERROR
+ "Prometheus Exporter output plugin requires built-in HTTP Server be enabled:
+ Use -DFLB_HTTP_SERVER=On option to enable it"
+ )
+endif()
+
+set(src
+ prom_http.c
+ prom.c
+ )
+
+FLB_PLUGIN(out_prometheus_exporter "${src}" "")
diff --git a/src/fluent-bit/plugins/out_prometheus_exporter/prom.c b/src/fluent-bit/plugins/out_prometheus_exporter/prom.c
new file mode 100644
index 000000000..d471d2bab
--- /dev/null
+++ b/src/fluent-bit/plugins/out_prometheus_exporter/prom.c
@@ -0,0 +1,298 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_kv.h>
+#include <fluent-bit/flb_metrics.h>
+
+#include "prom.h"
+#include "prom_http.h"
+
+static int config_add_labels(struct flb_output_instance *ins,
+ struct prom_exporter *ctx)
+{
+ struct mk_list *head;
+ struct flb_config_map_val *mv;
+ struct flb_slist_entry *k = NULL;
+ struct flb_slist_entry *v = NULL;
+ struct flb_kv *kv;
+
+ if (!ctx->add_labels || mk_list_size(ctx->add_labels) == 0) {
+ return 0;
+ }
+
+ /* iterate all 'add_label' definitions */
+ flb_config_map_foreach(head, mv, ctx->add_labels) {
+ if (mk_list_size(mv->val.list) != 2) {
+ flb_plg_error(ins, "'add_label' expects a key and a value, "
+ "e.g: 'add_label version 1.8.0'");
+ return -1;
+ }
+
+ k = mk_list_entry_first(mv->val.list, struct flb_slist_entry, _head);
+ v = mk_list_entry_last(mv->val.list, struct flb_slist_entry, _head);
+
+ kv = flb_kv_item_create(&ctx->kv_labels, k->str, v->str);
+ if (!kv) {
+ flb_plg_error(ins, "could not append label %s=%s\n", k->str, v->str);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int cb_prom_init(struct flb_output_instance *ins,
+ struct flb_config *config,
+ void *data)
+{
+ int ret;
+ struct prom_exporter *ctx;
+
+ flb_output_net_default("0.0.0.0", 2021 , ins);
+
+ ctx = flb_calloc(1, sizeof(struct prom_exporter));
+ if (!ctx) {
+ flb_errno();
+ return -1;
+ }
+ ctx->ins = ins;
+ flb_kv_init(&ctx->kv_labels);
+ flb_output_set_context(ins, ctx);
+
+ /* Load config map */
+ ret = flb_output_config_map_set(ins, (void *) ctx);
+ if (ret == -1) {
+ return -1;
+ }
+
+ /* Parse 'add_label' */
+ ret = config_add_labels(ins, ctx);
+ if (ret == -1) {
+ return -1;
+ }
+
+ /* HTTP Server context */
+ ctx->http = prom_http_server_create(ctx,
+ ins->host.name, ins->host.port, config);
+ if (!ctx->http) {
+ flb_plg_error(ctx->ins, "could not initialize HTTP server, aborting");
+ return -1;
+ }
+
+ /* Hash table for metrics */
+ ctx->ht_metrics = flb_hash_table_create(FLB_HASH_TABLE_EVICT_NONE, 32, 0);
+ if (!ctx->ht_metrics) {
+ flb_plg_error(ctx->ins, "could not initialize hash table for metrics");
+ return -1;
+ }
+
+ /* Start HTTP Server */
+ ret = prom_http_server_start(ctx->http);
+ if (ret == -1) {
+ return -1;
+ }
+
+ flb_plg_info(ctx->ins, "listening iface=%s tcp_port=%d",
+ ins->host.name, ins->host.port);
+ return 0;
+}
+
+static void append_labels(struct prom_exporter *ctx, struct cmt *cmt)
+{
+ struct flb_kv *kv;
+ struct mk_list *head;
+
+ mk_list_foreach(head, &ctx->kv_labels) {
+ kv = mk_list_entry(head, struct flb_kv, _head);
+ cmt_label_add(cmt, kv->key, kv->val);
+ }
+}
+
+static int hash_store(struct prom_exporter *ctx, struct flb_input_instance *ins,
+ cfl_sds_t buf)
+{
+ int ret;
+ int len;
+
+ len = strlen(ins->name);
+
+ /* store/override the content into the hash table */
+ ret = flb_hash_table_add(ctx->ht_metrics, ins->name, len,
+ buf, cfl_sds_len(buf));
+ if (ret < 0) {
+ return -1;
+ }
+
+ return 0;
+}
+
+static flb_sds_t hash_format_metrics(struct prom_exporter *ctx)
+{
+ int size = 2048;
+ flb_sds_t buf;
+
+ struct mk_list *head;
+ struct flb_hash_table_entry *entry;
+
+
+ buf = flb_sds_create_size(size);
+ if (!buf) {
+ return NULL;
+ }
+
+ /* Take every hash entry and compose one buffer with the whole content */
+ mk_list_foreach(head, &ctx->ht_metrics->entries) {
+ entry = mk_list_entry(head, struct flb_hash_table_entry, _head_parent);
+ flb_sds_cat_safe(&buf, entry->val, entry->val_size);
+ }
+
+ return buf;
+}
+
+static void cb_prom_flush(struct flb_event_chunk *event_chunk,
+ struct flb_output_flush *out_flush,
+ struct flb_input_instance *ins, void *out_context,
+ struct flb_config *config)
+{
+ int ret;
+ int add_ts;
+ size_t off = 0;
+ flb_sds_t metrics;
+ cfl_sds_t text;
+ struct cmt *cmt;
+ struct prom_exporter *ctx = out_context;
+
+ /*
+ * A new set of metrics has arrived, perform decoding, apply labels,
+ * convert to Prometheus text format and store the output in the
+ * hash table for metrics.
+ */
+ ret = cmt_decode_msgpack_create(&cmt,
+ (char *) event_chunk->data,
+ event_chunk->size, &off);
+ if (ret != 0) {
+ FLB_OUTPUT_RETURN(FLB_ERROR);
+ }
+
+ /* append labels set by config */
+ append_labels(ctx, cmt);
+
+ /* add timestamp in the output format ? */
+ if (ctx->add_timestamp) {
+ add_ts = CMT_TRUE;
+ }
+ else {
+ add_ts = CMT_FALSE;
+ }
+
+ /* convert to text representation */
+ text = cmt_encode_prometheus_create(cmt, add_ts);
+ if (!text) {
+ cmt_destroy(cmt);
+ FLB_OUTPUT_RETURN(FLB_ERROR);
+ }
+ cmt_destroy(cmt);
+
+ if (cfl_sds_len(text) == 0) {
+ flb_plg_debug(ctx->ins, "context without metrics (empty)");
+ cmt_encode_text_destroy(text);
+ FLB_OUTPUT_RETURN(FLB_OK);
+ }
+
+ /* register payload of metrics / override previous one */
+ ret = hash_store(ctx, ins, text);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "could not store metrics coming from: %s",
+ flb_input_name(ins));
+ cmt_encode_prometheus_destroy(text);
+ cmt_destroy(cmt);
+ FLB_OUTPUT_RETURN(FLB_ERROR);
+ }
+ cmt_encode_prometheus_destroy(text);
+
+ /* retrieve a full copy of all metrics */
+ metrics = hash_format_metrics(ctx);
+ if (!metrics) {
+ flb_plg_error(ctx->ins, "could not retrieve metrics");
+ FLB_OUTPUT_RETURN(FLB_ERROR);
+ }
+
+ /* push new (full) metrics payload */
+ ret = prom_http_server_mq_push_metrics(ctx->http,
+ (char *) metrics,
+ flb_sds_len(metrics));
+ flb_sds_destroy(metrics);
+
+ if (ret != 0) {
+ FLB_OUTPUT_RETURN(FLB_ERROR);
+ }
+
+ FLB_OUTPUT_RETURN(FLB_OK);
+}
+
+static int cb_prom_exit(void *data, struct flb_config *config)
+{
+ struct prom_exporter *ctx = data;
+
+ if (!ctx) {
+ return 0;
+ }
+
+ if (ctx->ht_metrics) {
+ flb_hash_table_destroy(ctx->ht_metrics);
+ }
+
+ flb_kv_release(&ctx->kv_labels);
+ prom_http_server_stop(ctx->http);
+ prom_http_server_destroy(ctx->http);
+ flb_free(ctx);
+
+ return 0;
+}
+
+/* Configuration properties map */
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_BOOL, "add_timestamp", "false",
+ 0, FLB_TRUE, offsetof(struct prom_exporter, add_timestamp),
+ "Add timestamp to every metric honoring collection time."
+ },
+
+ {
+ FLB_CONFIG_MAP_SLIST_1, "add_label", NULL,
+ FLB_CONFIG_MAP_MULT, FLB_TRUE, offsetof(struct prom_exporter, add_labels),
+ "TCP port for listening for HTTP connections."
+ },
+
+ /* EOF */
+ {0}
+};
+
+/* Plugin reference */
+struct flb_output_plugin out_prometheus_exporter_plugin = {
+ .name = "prometheus_exporter",
+ .description = "Prometheus Exporter",
+ .cb_init = cb_prom_init,
+ .cb_flush = cb_prom_flush,
+ .cb_exit = cb_prom_exit,
+ .flags = FLB_OUTPUT_NET,
+ .event_type = FLB_OUTPUT_METRICS,
+ .config_map = config_map,
+};
diff --git a/src/fluent-bit/plugins/out_prometheus_exporter/prom.h b/src/fluent-bit/plugins/out_prometheus_exporter/prom.h
new file mode 100644
index 000000000..1cbab6a59
--- /dev/null
+++ b/src/fluent-bit/plugins/out_prometheus_exporter/prom.h
@@ -0,0 +1,46 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_PROMETHEUS_EXPORTER_H
+#define FLB_PROMETHEUS_EXPORTER_H
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_hash_table.h>
+
+/* Plugin context */
+struct prom_exporter {
+ void *http;
+
+ /* hash table for metrics reported */
+ struct flb_hash_table *ht_metrics;
+
+ /* add timestamp to every metric */
+ int add_timestamp;
+
+ /* config reader for 'add_label' */
+ struct mk_list *add_labels;
+
+ /* internal labels ready to append */
+ struct mk_list kv_labels;
+
+ /* instance context */
+ struct flb_output_instance *ins;
+};
+
+#endif
diff --git a/src/fluent-bit/plugins/out_prometheus_exporter/prom_http.c b/src/fluent-bit/plugins/out_prometheus_exporter/prom_http.c
new file mode 100644
index 000000000..7ff3f8200
--- /dev/null
+++ b/src/fluent-bit/plugins/out_prometheus_exporter/prom_http.c
@@ -0,0 +1,268 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_http_server.h>
+#include "prom.h"
+#include "prom_http.h"
+
+pthread_key_t ph_metrics_key;
+
+/* Return the newest storage metrics buffer */
+static struct prom_http_buf *metrics_get_latest()
+{
+ struct prom_http_buf *buf;
+ struct mk_list *metrics_list;
+
+ metrics_list = pthread_getspecific(ph_metrics_key);
+ if (!metrics_list) {
+ return NULL;
+ }
+
+ if (mk_list_size(metrics_list) == 0) {
+ return NULL;
+ }
+
+ buf = mk_list_entry_last(metrics_list, struct prom_http_buf, _head);
+ return buf;
+}
+
+/* Delete unused metrics, note that we only care about the latest node */
+static int cleanup_metrics()
+{
+ int c = 0;
+ struct mk_list *tmp;
+ struct mk_list *head;
+ struct mk_list *metrics_list;
+ struct prom_http_buf *last;
+ struct prom_http_buf *entry;
+
+ metrics_list = pthread_getspecific(ph_metrics_key);
+ if (!metrics_list) {
+ return -1;
+ }
+
+ last = metrics_get_latest();
+ if (!last) {
+ return -1;
+ }
+
+ mk_list_foreach_safe(head, tmp, metrics_list) {
+ entry = mk_list_entry(head, struct prom_http_buf, _head);
+ if (entry != last && entry->users == 0) {
+ mk_list_del(&entry->_head);
+ flb_free(entry->buf_data);
+ flb_free(entry);
+ c++;
+ }
+ }
+
+ return c;
+}
+
+/* destructor callback */
+static void destruct_metrics(void *data)
+{
+ struct mk_list *tmp;
+ struct mk_list *head;
+ struct mk_list *metrics_list = (struct mk_list*)data;
+ struct prom_http_buf *entry;
+
+ if (!metrics_list) {
+ return;
+ }
+
+ mk_list_foreach_safe(head, tmp, metrics_list) {
+ entry = mk_list_entry(head, struct prom_http_buf, _head);
+ mk_list_del(&entry->_head);
+ flb_free(entry->buf_data);
+ flb_free(entry);
+ }
+
+ flb_free(metrics_list);
+}
+
+/*
+ * Callback invoked every time a new payload of Metrics is received from
+ * Fluent Bit engine through Message Queue channel.
+ *
+ * This function runs in a Monkey HTTP thread worker and it purpose is
+ * to take the metrics data and store it locally for every thread, so then
+ * it can be available on 'cb_metrics()' to serve it as a response.
+ */
+static void cb_mq_metrics(mk_mq_t *queue, void *data, size_t size)
+{
+ struct prom_http_buf *buf;
+ struct mk_list *metrics_list = NULL;
+
+ metrics_list = pthread_getspecific(ph_metrics_key);
+ if (!metrics_list) {
+ metrics_list = flb_malloc(sizeof(struct mk_list));
+ if (!metrics_list) {
+ flb_errno();
+ return;
+ }
+ mk_list_init(metrics_list);
+ pthread_setspecific(ph_metrics_key, metrics_list);
+ }
+
+ /* FIXME: convert data ? */
+ buf = flb_malloc(sizeof(struct prom_http_buf));
+ if (!buf) {
+ flb_errno();
+ return;
+ }
+ buf->users = 0;
+ buf->buf_data = flb_malloc(size);
+ if (!buf->buf_data) {
+ flb_errno();
+ flb_free(buf);
+ return;
+ }
+ memcpy(buf->buf_data, data, size);
+ buf->buf_size = size;
+
+ mk_list_add(&buf->_head, metrics_list);
+ cleanup_metrics();
+}
+
+/* Create message queue to receive Metrics payload from the engine */
+static int http_server_mq_create(struct prom_http *ph)
+{
+ int ret;
+
+ pthread_key_create(&ph_metrics_key, destruct_metrics);
+
+ ret = mk_mq_create(ph->ctx, "/metrics", cb_mq_metrics, NULL);
+ if (ret == -1) {
+ return -1;
+ }
+ ph->qid_metrics = ret;
+ return 0;
+}
+
+/* HTTP endpoint: /metrics */
+static void cb_metrics(mk_request_t *request, void *data)
+{
+ struct prom_http_buf *buf;
+ (void) data;
+
+ buf = metrics_get_latest();
+ if (!buf) {
+ mk_http_status(request, 404);
+ mk_http_done(request);
+ return;
+ }
+
+ buf->users++;
+
+ mk_http_status(request, 200);
+ flb_hs_add_content_type_to_req(request, FLB_HS_CONTENT_TYPE_PROMETHEUS);
+ mk_http_send(request, buf->buf_data, buf->buf_size, NULL);
+ mk_http_done(request);
+
+ buf->users--;
+}
+
+/* HTTP endpoint: / (root) */
+static void cb_root(mk_request_t *request, void *data)
+{
+ (void) data;
+
+ mk_http_status(request, 200);
+ mk_http_send(request, "Fluent Bit Prometheus Exporter\n", 31, NULL);
+ mk_http_done(request);
+}
+
+struct prom_http *prom_http_server_create(struct prom_exporter *ctx,
+ const char *listen,
+ int tcp_port,
+ struct flb_config *config)
+{
+ int ret;
+ int vid;
+ char tmp[32];
+ struct prom_http *ph;
+
+ ph = flb_malloc(sizeof(struct prom_http));
+ if (!ph) {
+ flb_errno();
+ return NULL;
+ }
+ ph->config = config;
+
+ /* HTTP Server context */
+ ph->ctx = mk_create();
+ if (!ph->ctx) {
+ flb_free(ph);
+ return NULL;
+ }
+
+ /* Compose listen address */
+ snprintf(tmp, sizeof(tmp) -1, "%s:%d", listen, tcp_port);
+ mk_config_set(ph->ctx,
+ "Listen", tmp,
+ "Workers", "1",
+ NULL);
+
+ /* Virtual host */
+ vid = mk_vhost_create(ph->ctx, NULL);
+ ph->vid = vid;
+
+ /* Set HTTP URI callbacks */
+ mk_vhost_handler(ph->ctx, vid, "/metrics", cb_metrics, NULL);
+ mk_vhost_handler(ph->ctx, vid, "/", cb_root, NULL);
+
+ /* Create a Message Queue to push 'metrics' to HTTP workers */
+ ret = http_server_mq_create(ph);
+ if (ret == -1) {
+ mk_destroy(ph->ctx);
+ flb_free(ph);
+ return NULL;
+ }
+
+ return ph;
+}
+
+void prom_http_server_destroy(struct prom_http *ph)
+{
+ if (ph) {
+ /* TODO: release mk_vhost */
+ if (ph->ctx) {
+ mk_destroy(ph->ctx);
+ }
+ flb_free(ph);
+ }
+}
+
+int prom_http_server_start(struct prom_http *ph)
+{
+ return mk_start(ph->ctx);
+}
+
+int prom_http_server_stop(struct prom_http *ph)
+{
+ return mk_stop(ph->ctx);
+}
+
+int prom_http_server_mq_push_metrics(struct prom_http *ph,
+ void *data, size_t size)
+{
+ return mk_mq_send(ph->ctx, ph->qid_metrics, data, size);
+}
diff --git a/src/fluent-bit/plugins/out_prometheus_exporter/prom_http.h b/src/fluent-bit/plugins/out_prometheus_exporter/prom_http.h
new file mode 100644
index 000000000..79b4c87bb
--- /dev/null
+++ b/src/fluent-bit/plugins/out_prometheus_exporter/prom_http.h
@@ -0,0 +1,56 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_PROMETHEUS_EXPORTER_HTTP_H
+#define FLB_PROMETHEUS_EXPORTER_HTTP_H
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <monkey/mk_lib.h>
+
+#include "prom.h"
+
+/* HTTP response payload received through a Message Queue */
+struct prom_http_buf {
+ int users;
+ char *buf_data;
+ size_t buf_size;
+ struct mk_list _head;
+};
+
+/* Prom HTTP Server context */
+struct prom_http {
+ mk_ctx_t *ctx; /* Monkey HTTP Context */
+ int vid; /* Virtual host ID */
+ int qid_metrics; /* Queue ID for Metrics buffer */
+ struct flb_config *config; /* Fluent Bit context */
+};
+
+struct prom_http *prom_http_server_create(struct prom_exporter *ctx,
+ const char *listen,
+ int tcp_port,
+ struct flb_config *config);
+void prom_http_server_destroy(struct prom_http *ph);
+
+int prom_http_server_start(struct prom_http *ph);
+int prom_http_server_stop(struct prom_http *ph);
+
+int prom_http_server_mq_push_metrics(struct prom_http *ph,
+ void *data, size_t size);
+
+#endif
diff --git a/src/fluent-bit/plugins/out_prometheus_remote_write/CMakeLists.txt b/src/fluent-bit/plugins/out_prometheus_remote_write/CMakeLists.txt
new file mode 100644
index 000000000..71779cb9d
--- /dev/null
+++ b/src/fluent-bit/plugins/out_prometheus_remote_write/CMakeLists.txt
@@ -0,0 +1,6 @@
+set(src
+ remote_write.c
+ remote_write_conf.c
+ )
+
+FLB_PLUGIN(out_prometheus_remote_write "${src}" "")
diff --git a/src/fluent-bit/plugins/out_prometheus_remote_write/remote_write.c b/src/fluent-bit/plugins/out_prometheus_remote_write/remote_write.c
new file mode 100644
index 000000000..2349afd61
--- /dev/null
+++ b/src/fluent-bit/plugins/out_prometheus_remote_write/remote_write.c
@@ -0,0 +1,466 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_snappy.h>
+#include <fluent-bit/flb_gzip.h>
+#include <fluent-bit/flb_metrics.h>
+#include <fluent-bit/flb_kv.h>
+
+#ifdef FLB_HAVE_SIGNV4
+#ifdef FLB_HAVE_AWS
+#include <fluent-bit/flb_aws_credentials.h>
+#include <fluent-bit/flb_signv4.h>
+#endif
+#endif
+
+#include "remote_write.h"
+#include "remote_write_conf.h"
+
+static int http_post(struct prometheus_remote_write_context *ctx,
+ const void *body, size_t body_len,
+ const char *tag, int tag_len)
+{
+ int ret;
+ int out_ret = FLB_OK;
+ size_t b_sent;
+ void *payload_buf = NULL;
+ size_t payload_size = 0;
+ struct flb_upstream *u;
+ struct flb_connection *u_conn;
+ struct flb_http_client *c;
+ struct mk_list *head;
+ struct flb_config_map_val *mv;
+ struct flb_slist_entry *key = NULL;
+ struct flb_slist_entry *val = NULL;
+ flb_sds_t signature = NULL;
+
+ /* Get upstream context and connection */
+ u = ctx->u;
+ u_conn = flb_upstream_conn_get(u);
+ if (!u_conn) {
+ flb_plg_error(ctx->ins, "no upstream connections available to %s:%i",
+ u->tcp_host, u->tcp_port);
+ return FLB_RETRY;
+ }
+
+ /* Map payload */
+
+ if (strcasecmp(ctx->compression, "snappy") == 0) {
+ ret = flb_snappy_compress((void *) body, body_len,
+ (char **) &payload_buf,
+ &payload_size);
+ }
+ else if (strcasecmp(ctx->compression, "gzip") == 0) {
+ ret = flb_gzip_compress((void *) body, body_len,
+ &payload_buf, &payload_size);
+ }
+ else {
+ payload_buf = (void *) body;
+ payload_size = body_len;
+
+ ret = 0;
+ }
+
+ if (ret != 0) {
+ flb_upstream_conn_release(u_conn);
+
+ flb_plg_error(ctx->ins,
+ "cannot compress payload, aborting");
+
+ return FLB_ERROR;
+ }
+
+ /* Create HTTP client context */
+ c = flb_http_client(u_conn, FLB_HTTP_POST, ctx->uri,
+ payload_buf, payload_size,
+ ctx->host, ctx->port,
+ ctx->proxy, 0);
+
+
+ if (c->proxy.host) {
+ flb_plg_debug(ctx->ins, "[http_client] proxy host: %s port: %i",
+ c->proxy.host, c->proxy.port);
+ }
+
+ /* Allow duplicated headers ? */
+ flb_http_allow_duplicated_headers(c, FLB_FALSE);
+
+ /*
+ * Direct assignment of the callback context to the HTTP client context.
+ * This needs to be improved through a more clean API.
+ */
+ c->cb_ctx = ctx->ins->callback;
+
+ flb_http_add_header(c,
+ FLB_PROMETHEUS_REMOTE_WRITE_CONTENT_TYPE_HEADER_NAME,
+ sizeof(FLB_PROMETHEUS_REMOTE_WRITE_CONTENT_TYPE_HEADER_NAME) - 1,
+ FLB_PROMETHEUS_REMOTE_WRITE_MIME_PROTOBUF_LITERAL,
+ sizeof(FLB_PROMETHEUS_REMOTE_WRITE_MIME_PROTOBUF_LITERAL) - 1);
+
+ flb_http_add_header(c,
+ FLB_PROMETHEUS_REMOTE_WRITE_VERSION_HEADER_NAME,
+ sizeof(FLB_PROMETHEUS_REMOTE_WRITE_VERSION_HEADER_NAME) - 1,
+ FLB_PROMETHEUS_REMOTE_WRITE_VERSION_LITERAL,
+ sizeof(FLB_PROMETHEUS_REMOTE_WRITE_VERSION_LITERAL) - 1);
+
+ if (strcasecmp(ctx->compression, "snappy") == 0) {
+ flb_http_add_header(c,
+ "Content-Encoding",
+ strlen("Content-Encoding"),
+ "snappy",
+ strlen("snappy"));
+ }
+ else if (strcasecmp(ctx->compression, "gzip") == 0) {
+ flb_http_add_header(c,
+ "Content-Encoding",
+ strlen("Content-Encoding"),
+ "gzip",
+ strlen("gzip"));
+ }
+
+ /* Basic Auth headers */
+ if (ctx->http_user && ctx->http_passwd) {
+ flb_http_basic_auth(c, ctx->http_user, ctx->http_passwd);
+ }
+
+ flb_http_add_header(c, "User-Agent", 10, "Fluent-Bit", 10);
+
+ flb_config_map_foreach(head, mv, ctx->headers) {
+ key = mk_list_entry_first(mv->val.list, struct flb_slist_entry, _head);
+ val = mk_list_entry_last(mv->val.list, struct flb_slist_entry, _head);
+
+ flb_http_add_header(c,
+ key->str, flb_sds_len(key->str),
+ val->str, flb_sds_len(val->str));
+ }
+
+#ifdef FLB_HAVE_SIGNV4
+#ifdef FLB_HAVE_AWS
+ /* AWS SigV4 headers */
+ if (ctx->has_aws_auth == FLB_TRUE) {
+ flb_plg_debug(ctx->ins, "signing request with AWS Sigv4");
+ signature = flb_signv4_do(c,
+ FLB_TRUE, /* normalize URI ? */
+ FLB_TRUE, /* add x-amz-date header ? */
+ time(NULL),
+ (char *) ctx->aws_region,
+ (char *) ctx->aws_service,
+ 0, NULL,
+ ctx->aws_provider);
+
+ if (!signature) {
+ flb_plg_error(ctx->ins, "could not sign request with sigv4");
+ out_ret = FLB_RETRY;
+ goto cleanup;
+ }
+ flb_sds_destroy(signature);
+ }
+#endif
+#endif
+
+ ret = flb_http_do(c, &b_sent);
+ if (ret == 0) {
+ /*
+ * Only allow the following HTTP status:
+ *
+ * - 200: OK
+ * - 201: Created
+ * - 202: Accepted
+ * - 203: no authorative resp
+ * - 204: No Content
+ * - 205: Reset content
+ *
+ */
+ if ((c->resp.status < 200 || c->resp.status > 205) &&
+ c->resp.status != 400) {
+ if (ctx->log_response_payload &&
+ c->resp.payload && c->resp.payload_size > 0) {
+ flb_plg_error(ctx->ins, "%s:%i, HTTP status=%i\n%s",
+ ctx->host, ctx->port,
+ c->resp.status, c->resp.payload);
+ }
+ else {
+ flb_plg_error(ctx->ins, "%s:%i, HTTP status=%i",
+ ctx->host, ctx->port, c->resp.status);
+ }
+ out_ret = FLB_RETRY;
+ }
+ else if (c->resp.status == 400) {
+ /* Returned 400 status means unrecoverable. Immidiately
+ * returning as a error. */
+ if (ctx->log_response_payload &&
+ c->resp.payload && c->resp.payload_size > 0) {
+ flb_plg_error(ctx->ins, "%s:%i, HTTP status=%i\n%s",
+ ctx->host, ctx->port,
+ c->resp.status, c->resp.payload);
+ }
+ else {
+ flb_plg_error(ctx->ins, "%s:%i, HTTP status=%i",
+ ctx->host, ctx->port, c->resp.status);
+ }
+ out_ret = FLB_ERROR;
+ }
+ else {
+ if (ctx->log_response_payload &&
+ c->resp.payload && c->resp.payload_size > 0) {
+ flb_plg_debug(ctx->ins, "%s:%i, HTTP status=%i\n%s",
+ ctx->host, ctx->port,
+ c->resp.status, c->resp.payload);
+ }
+ else {
+ flb_plg_debug(ctx->ins, "%s:%i, HTTP status=%i",
+ ctx->host, ctx->port,
+ c->resp.status);
+ }
+ }
+ }
+ else {
+ flb_plg_error(ctx->ins, "could not flush records to %s:%i (http_do=%i)",
+ ctx->host, ctx->port, ret);
+ out_ret = FLB_RETRY;
+ }
+
+cleanup:
+ /*
+ * If the payload buffer is different than incoming records in body, means
+ * we generated a different payload and must be freed.
+ */
+ if (payload_buf != body) {
+ flb_free(payload_buf);
+ }
+
+ /* Destroy HTTP client context */
+ flb_http_client_destroy(c);
+
+ /* Release the TCP connection */
+ flb_upstream_conn_release(u_conn);
+
+ return out_ret;
+}
+
+static int cb_prom_init(struct flb_output_instance *ins,
+ struct flb_config *config,
+ void *data)
+{
+ struct prometheus_remote_write_context *ctx;
+
+ ctx = flb_prometheus_remote_write_context_create(ins, config);
+ if (!ctx) {
+ return -1;
+ }
+
+ flb_output_set_context(ins, ctx);
+
+ return 0;
+}
+
+static void append_labels(struct prometheus_remote_write_context *ctx,
+ struct cmt *cmt)
+{
+ struct flb_kv *kv;
+ struct mk_list *head;
+
+ mk_list_foreach(head, &ctx->kv_labels) {
+ kv = mk_list_entry(head, struct flb_kv, _head);
+ cmt_label_add(cmt, kv->key, kv->val);
+ }
+}
+
+static void cb_prom_flush(struct flb_event_chunk *event_chunk,
+ struct flb_output_flush *out_flush,
+ struct flb_input_instance *ins, void *out_context,
+ struct flb_config *config)
+{
+ int c = 0;
+ int ok;
+ int ret;
+ int result;
+ cfl_sds_t encoded_chunk;
+ flb_sds_t buf = NULL;
+ size_t diff = 0;
+ size_t off = 0;
+ struct cmt *cmt;
+ struct prometheus_remote_write_context *ctx = out_context;
+
+ /* Initialize vars */
+ ctx = out_context;
+ ok = CMT_DECODE_MSGPACK_SUCCESS;
+ result = FLB_OK;
+
+ /* Buffer to concatenate multiple metrics contexts */
+ buf = flb_sds_create_size(event_chunk->size);
+ if (!buf) {
+ flb_plg_error(ctx->ins, "could not allocate outgoing buffer");
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ flb_plg_debug(ctx->ins, "cmetrics msgpack size: %lu",
+ event_chunk->size);
+
+ /* Decode and encode every CMetric context */
+ diff = 0;
+ while ((ret = cmt_decode_msgpack_create(&cmt,
+ (char *) event_chunk->data,
+ event_chunk->size, &off)) == ok) {
+ /* append labels set by config */
+ append_labels(ctx, cmt);
+
+ /* Create a Prometheus Remote Write payload */
+ encoded_chunk = cmt_encode_prometheus_remote_write_create(cmt);
+ if (encoded_chunk == NULL) {
+ flb_plg_error(ctx->ins,
+ "Error encoding context as prometheus remote write");
+ result = FLB_ERROR;
+ goto exit;
+ }
+
+ flb_plg_debug(ctx->ins, "cmetric_id=%i decoded %lu-%lu payload_size=%lu",
+ c, diff, off, flb_sds_len(encoded_chunk));
+ c++;
+ diff = off;
+
+ /* concat buffer */
+ flb_sds_cat_safe(&buf, encoded_chunk, flb_sds_len(encoded_chunk));
+
+ /* release */
+ cmt_encode_prometheus_remote_write_destroy(encoded_chunk);
+ cmt_destroy(cmt);
+ }
+
+ if (ret == CMT_DECODE_MSGPACK_INSUFFICIENT_DATA && c > 0) {
+ flb_plg_debug(ctx->ins, "final payload size: %lu", flb_sds_len(buf));
+ if (buf && flb_sds_len(buf) > 0) {
+ /* Send HTTP request */
+ result = http_post(ctx, buf, flb_sds_len(buf),
+ event_chunk->tag,
+ flb_sds_len(event_chunk->tag));
+
+ /* Debug http_post() result statuses */
+ if (result == FLB_OK) {
+ flb_plg_debug(ctx->ins, "http_post result FLB_OK");
+ }
+ else if (result == FLB_ERROR) {
+ flb_plg_debug(ctx->ins, "http_post result FLB_ERROR");
+ }
+ else if (result == FLB_RETRY) {
+ flb_plg_debug(ctx->ins, "http_post result FLB_RETRY");
+ }
+ }
+ flb_sds_destroy(buf);
+ buf = NULL;
+ }
+ else {
+ flb_plg_error(ctx->ins, "Error decoding msgpack encoded context");
+ }
+
+exit:
+ if (buf) {
+ flb_sds_destroy(buf);
+ }
+ FLB_OUTPUT_RETURN(result);
+}
+
+static int cb_prom_exit(void *data, struct flb_config *config)
+{
+ struct prometheus_remote_write_context *ctx;
+
+ ctx = (struct prometheus_remote_write_context *) data;
+
+ flb_prometheus_remote_write_context_destroy(ctx);
+
+ return 0;
+}
+
+/* Configuration properties map */
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_SLIST_1, "add_label", NULL,
+ FLB_CONFIG_MAP_MULT, FLB_TRUE, offsetof(struct prometheus_remote_write_context,
+ add_labels),
+ "Adds a custom label to the metrics use format: 'add_label name value'"
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "proxy", NULL,
+ 0, FLB_FALSE, 0,
+ "Specify an HTTP Proxy. The expected format of this value is http://host:port. "
+ },
+ {
+ FLB_CONFIG_MAP_STR, "http_user", NULL,
+ 0, FLB_TRUE, offsetof(struct prometheus_remote_write_context, http_user),
+ "Set HTTP auth user"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "http_passwd", "",
+ 0, FLB_TRUE, offsetof(struct prometheus_remote_write_context, http_passwd),
+ "Set HTTP auth password"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "compression", "snappy",
+ 0, FLB_TRUE, offsetof(struct prometheus_remote_write_context, compression),
+ "Compress the payload with either snappy, gzip if set"
+ },
+
+#ifdef FLB_HAVE_SIGNV4
+#ifdef FLB_HAVE_AWS
+ {
+ FLB_CONFIG_MAP_BOOL, "aws_auth", "false",
+ 0, FLB_TRUE, offsetof(struct prometheus_remote_write_context, has_aws_auth),
+ "Enable AWS SigV4 authentication"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "aws_service", "aps",
+ 0, FLB_TRUE, offsetof(struct prometheus_remote_write_context, aws_service),
+ "AWS destination service code, used by SigV4 authentication"
+ },
+ FLB_AWS_CREDENTIAL_BASE_CONFIG_MAP(FLB_PROMETHEUS_REMOTE_WRITE_CREDENTIAL_PREFIX),
+#endif
+#endif
+ {
+ FLB_CONFIG_MAP_SLIST_1, "header", NULL,
+ FLB_CONFIG_MAP_MULT, FLB_TRUE, offsetof(struct prometheus_remote_write_context, headers),
+ "Add a HTTP header key/value pair. Multiple headers can be set"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "uri", NULL,
+ 0, FLB_TRUE, offsetof(struct prometheus_remote_write_context, uri),
+ "Specify an optional HTTP URI for the target web server, e.g: /something"
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "log_response_payload", "true",
+ 0, FLB_TRUE, offsetof(struct prometheus_remote_write_context, log_response_payload),
+ "Specify if the response paylod should be logged or not"
+ },
+ /* EOF */
+ {0}
+};
+
+/* Plugin reference */
+struct flb_output_plugin out_prometheus_remote_write_plugin = {
+ .name = "prometheus_remote_write",
+ .description = "Prometheus remote write",
+ .cb_init = cb_prom_init,
+ .cb_flush = cb_prom_flush,
+ .cb_exit = cb_prom_exit,
+ .config_map = config_map,
+ .event_type = FLB_OUTPUT_METRICS,
+ .workers = 2,
+ .flags = FLB_OUTPUT_NET | FLB_IO_OPT_TLS,
+};
diff --git a/src/fluent-bit/plugins/out_prometheus_remote_write/remote_write.h b/src/fluent-bit/plugins/out_prometheus_remote_write/remote_write.h
new file mode 100644
index 000000000..349c7a7b0
--- /dev/null
+++ b/src/fluent-bit/plugins/out_prometheus_remote_write/remote_write.h
@@ -0,0 +1,83 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_PROMETHEUS_REMOTE_WRITE_H
+#define FLB_PROMETHEUS_REMOTE_WRITE_H
+
+#include <fluent-bit/flb_output_plugin.h>
+
+#define FLB_PROMETHEUS_REMOTE_WRITE_CONTENT_TYPE_HEADER_NAME "Content-Type"
+#define FLB_PROMETHEUS_REMOTE_WRITE_MIME_PROTOBUF_LITERAL "application/x-protobuf"
+#define FLB_PROMETHEUS_REMOTE_WRITE_VERSION_HEADER_NAME "X-Prometheus-Remote-Write-Version"
+#define FLB_PROMETHEUS_REMOTE_WRITE_VERSION_LITERAL "0.1.0"
+#ifdef FLB_HAVE_SIGNV4
+#ifdef FLB_HAVE_AWS
+#define FLB_PROMETHEUS_REMOTE_WRITE_CREDENTIAL_PREFIX "aws_"
+#endif
+#endif
+
+/* Plugin context */
+struct prometheus_remote_write_context {
+ /* HTTP Auth */
+ char *http_user;
+ char *http_passwd;
+
+ /* AWS Auth */
+#ifdef FLB_HAVE_SIGNV4
+#ifdef FLB_HAVE_AWS
+ int has_aws_auth;
+ struct flb_aws_provider *aws_provider;
+ const char *aws_region;
+ const char *aws_service;
+#endif
+#endif
+
+ /* Proxy */
+ const char *proxy;
+ char *proxy_host;
+ int proxy_port;
+
+ /* HTTP URI */
+ char *uri;
+ char *host;
+ int port;
+
+ const char *compression;
+
+ /* Log the response paylod */
+ int log_response_payload;
+
+ /* config reader for 'add_label' */
+ struct mk_list *add_labels;
+
+ /* internal labels ready to append */
+ struct mk_list kv_labels;
+
+ /* Upstream connection to the backend server */
+ struct flb_upstream *u;
+
+ /* Arbitrary HTTP headers */
+ struct mk_list *headers;
+
+
+ /* instance context */
+ struct flb_output_instance *ins;
+};
+
+#endif
diff --git a/src/fluent-bit/plugins/out_prometheus_remote_write/remote_write_conf.c b/src/fluent-bit/plugins/out_prometheus_remote_write/remote_write_conf.c
new file mode 100644
index 000000000..09c7e0d52
--- /dev/null
+++ b/src/fluent-bit/plugins/out_prometheus_remote_write/remote_write_conf.c
@@ -0,0 +1,254 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_sds.h>
+#include <fluent-bit/flb_kv.h>
+#ifdef FLB_HAVE_SIGNV4
+#ifdef FLB_HAVE_AWS
+#include <fluent-bit/flb_aws_credentials.h>
+#endif
+#endif
+#include "remote_write.h"
+#include "remote_write_conf.h"
+
+static int config_add_labels(struct flb_output_instance *ins,
+ struct prometheus_remote_write_context *ctx)
+{
+ struct mk_list *head;
+ struct flb_config_map_val *mv;
+ struct flb_slist_entry *k = NULL;
+ struct flb_slist_entry *v = NULL;
+ struct flb_kv *kv;
+
+ if (!ctx->add_labels || mk_list_size(ctx->add_labels) == 0) {
+ return 0;
+ }
+
+ /* iterate all 'add_label' definitions */
+ flb_config_map_foreach(head, mv, ctx->add_labels) {
+ if (mk_list_size(mv->val.list) != 2) {
+ flb_plg_error(ins, "'add_label' expects a key and a value, "
+ "e.g: 'add_label version 1.8.0'");
+ return -1;
+ }
+
+ k = mk_list_entry_first(mv->val.list, struct flb_slist_entry, _head);
+ v = mk_list_entry_last(mv->val.list, struct flb_slist_entry, _head);
+
+ kv = flb_kv_item_create(&ctx->kv_labels, k->str, v->str);
+ if (!kv) {
+ flb_plg_error(ins, "could not append label %s=%s\n", k->str, v->str);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+struct prometheus_remote_write_context *flb_prometheus_remote_write_context_create(
+ struct flb_output_instance *ins, struct flb_config *config)
+{
+ int ret;
+ int ulen;
+ int io_flags = 0;
+ char *protocol = NULL;
+ char *host = NULL;
+ char *port = NULL;
+ char *uri = NULL;
+ char *tmp_uri = NULL;
+ const char *tmp;
+ struct flb_upstream *upstream;
+ struct prometheus_remote_write_context *ctx = NULL;
+
+ /* Allocate plugin context */
+ ctx = flb_calloc(1, sizeof(struct prometheus_remote_write_context));
+ if (!ctx) {
+ flb_errno();
+ return NULL;
+ }
+ ctx->ins = ins;
+ mk_list_init(&ctx->kv_labels);
+
+ ret = flb_output_config_map_set(ins, (void *) ctx);
+ if (ret == -1) {
+ flb_free(ctx);
+ return NULL;
+ }
+
+ /* Parse 'add_label' */
+ ret = config_add_labels(ins, ctx);
+ if (ret == -1) {
+ return NULL;
+ }
+
+ /*
+ * Check if a Proxy have been set, if so the Upstream manager will use
+ * the Proxy end-point and then we let the HTTP client know about it, so
+ * it can adjust the HTTP requests.
+ */
+ tmp = flb_output_get_property("proxy", ins);
+ if (tmp) {
+ ret = flb_utils_url_split(tmp, &protocol, &host, &port, &uri);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "could not parse proxy parameter: '%s'", tmp);
+ flb_free(ctx);
+ return NULL;
+ }
+
+ ctx->proxy_host = host;
+ ctx->proxy_port = atoi(port);
+ ctx->proxy = tmp;
+ flb_free(protocol);
+ flb_free(port);
+ flb_free(uri);
+ uri = NULL;
+ }
+ else {
+ flb_output_net_default("127.0.0.1", 80, ins);
+ }
+
+ /* Check if AWS SigV4 authentication is enabled */
+#ifdef FLB_HAVE_SIGNV4
+#ifdef FLB_HAVE_AWS
+ if (ctx->has_aws_auth) {
+ if (!ctx->aws_service) {
+ flb_plg_error(ins, "aws_auth option requires " FLB_PROMETHEUS_REMOTE_WRITE_CREDENTIAL_PREFIX
+ "service to be set");
+ flb_free(ctx);
+ return NULL;
+ }
+
+ ctx->aws_provider = flb_managed_chain_provider_create(
+ ins,
+ config,
+ FLB_PROMETHEUS_REMOTE_WRITE_CREDENTIAL_PREFIX,
+ NULL,
+ flb_aws_client_generator()
+ );
+ if (!ctx->aws_provider) {
+ flb_plg_error(ins, "failed to create aws credential provider for sigv4 auth");
+ flb_free(ctx);
+ return NULL;
+ }
+
+ /* If managed provider creation succeeds, then region key is present */
+ ctx->aws_region = flb_output_get_property(FLB_PROMETHEUS_REMOTE_WRITE_CREDENTIAL_PREFIX
+ "region", ctx->ins);
+ }
+#endif /* !FLB_HAVE_AWS */
+#endif /* !FLB_HAVE_SIGNV4 */
+
+ /* Check if SSL/TLS is enabled */
+#ifdef FLB_HAVE_TLS
+ if (ins->use_tls == FLB_TRUE) {
+ io_flags = FLB_IO_TLS;
+ }
+ else {
+ io_flags = FLB_IO_TCP;
+ }
+#else
+ io_flags = FLB_IO_TCP;
+#endif
+
+ if (ins->host.ipv6 == FLB_TRUE) {
+ io_flags |= FLB_IO_IPV6;
+ }
+
+ if (ctx->proxy) {
+ flb_plg_trace(ctx->ins, "Upstream Proxy=%s:%i",
+ ctx->proxy_host, ctx->proxy_port);
+ upstream = flb_upstream_create(config,
+ ctx->proxy_host,
+ ctx->proxy_port,
+ io_flags, ins->tls);
+ }
+ else {
+ upstream = flb_upstream_create(config,
+ ins->host.name,
+ ins->host.port,
+ io_flags, ins->tls);
+ }
+
+ if (!upstream) {
+ flb_free(ctx);
+ return NULL;
+ }
+
+ if (ins->host.uri) {
+ uri = flb_strdup(ins->host.uri->full);
+ }
+ else {
+ tmp = flb_output_get_property("uri", ins);
+ if (tmp) {
+ uri = flb_strdup(tmp);
+ }
+ }
+
+ if (!uri) {
+ uri = flb_strdup("/");
+ }
+ else if (uri[0] != '/') {
+ ulen = strlen(uri);
+ tmp_uri = flb_malloc(ulen + 2);
+ tmp_uri[0] = '/';
+ memcpy(tmp_uri + 1, uri, ulen);
+ tmp_uri[ulen + 1] = '\0';
+ flb_free(uri);
+ uri = tmp_uri;
+ }
+
+ ctx->u = upstream;
+ ctx->uri = uri;
+ ctx->host = ins->host.name;
+ ctx->port = ins->host.port;
+
+ /* Set instance flags into upstream */
+ flb_output_upstream_set(ctx->u, ins);
+
+ return ctx;
+}
+
+void flb_prometheus_remote_write_context_destroy(
+ struct prometheus_remote_write_context *ctx)
+{
+ if (!ctx) {
+ return;
+ }
+
+ flb_kv_release(&ctx->kv_labels);
+
+ if (ctx->u) {
+ flb_upstream_destroy(ctx->u);
+ }
+
+#ifdef FLB_HAVE_SIGNV4
+#ifdef FLB_HAVE_AWS
+ if (ctx->aws_provider) {
+ flb_aws_provider_destroy(ctx->aws_provider);
+ }
+#endif
+#endif
+
+ flb_free(ctx->proxy_host);
+ flb_free(ctx->uri);
+ flb_free(ctx);
+}
diff --git a/src/fluent-bit/plugins/out_prometheus_remote_write/remote_write_conf.h b/src/fluent-bit/plugins/out_prometheus_remote_write/remote_write_conf.h
new file mode 100644
index 000000000..e6268fc54
--- /dev/null
+++ b/src/fluent-bit/plugins/out_prometheus_remote_write/remote_write_conf.h
@@ -0,0 +1,33 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_OUT_PROMETHEUS_REMOTE_WRITE_CONF_H
+#define FLB_OUT_PROMETHEUS_REMOTE_WRITE_CONF_H
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_output.h>
+
+#include "remote_write.h"
+
+struct prometheus_remote_write_context *flb_prometheus_remote_write_context_create(
+ struct flb_output_instance *ins, struct flb_config *config);
+void flb_prometheus_remote_write_context_destroy(
+ struct prometheus_remote_write_context *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/out_retry/CMakeLists.txt b/src/fluent-bit/plugins/out_retry/CMakeLists.txt
new file mode 100644
index 000000000..e206ff719
--- /dev/null
+++ b/src/fluent-bit/plugins/out_retry/CMakeLists.txt
@@ -0,0 +1,4 @@
+set(src
+ retry.c)
+
+FLB_PLUGIN(out_retry "${src}" "")
diff --git a/src/fluent-bit/plugins/out_retry/retry.c b/src/fluent-bit/plugins/out_retry/retry.c
new file mode 100644
index 000000000..cb8f4da8c
--- /dev/null
+++ b/src/fluent-bit/plugins/out_retry/retry.c
@@ -0,0 +1,116 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_pack.h>
+
+#include <stdio.h>
+#include <unistd.h>
+#include <fcntl.h>
+
+
+/* Retry context, only works with one instance */
+struct retry_ctx {
+ int n_retry; /* max retries before real flush (OK) */
+ int count; /* number of retries done */
+ struct flb_output_instance *ins; /* plugin instance */
+};
+
+
+static int cb_retry_init(struct flb_output_instance *ins,
+ struct flb_config *config,
+ void *data)
+{
+ (void) config;
+ (void) data;
+ struct retry_ctx *ctx;
+ int ret;
+
+ ctx = flb_calloc(1, sizeof(struct retry_ctx));
+ if (!ctx) {
+ return -1;
+ }
+ ctx->ins = ins;
+ ctx->count = 0;
+
+ ret = flb_output_config_map_set(ins, ctx);
+ if (ret == -1) {
+ flb_plg_error(ins, "unable to load configuration");
+ return -1;
+ }
+
+ flb_output_set_context(ins, ctx);
+ return 0;
+}
+
+static void cb_retry_flush(struct flb_event_chunk *event_chunk,
+ struct flb_output_flush *out_flush,
+ struct flb_input_instance *i_ins,
+ void *out_context,
+ struct flb_config *config)
+{
+ (void) i_ins;
+ (void) out_context;
+ (void) config;
+ struct retry_ctx *ctx;
+
+ ctx = out_context;
+ ctx->count++;
+
+ if (ctx->count <= ctx->n_retry) {
+ flb_plg_debug(ctx->ins, "retry %i/%i", ctx->count, ctx->n_retry);
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+ else {
+ flb_plg_debug(ctx->ins, "flush", ctx->count, ctx->n_retry);
+ ctx->count = 0;
+ }
+
+ flb_pack_print(event_chunk->data, event_chunk->size);
+ FLB_OUTPUT_RETURN(FLB_OK);
+}
+
+static int cb_retry_exit(void *data, struct flb_config *config)
+{
+ struct retry_ctx *ctx = data;
+ (void) config;
+
+ flb_free(ctx);
+ return 0;
+}
+
+/* Configuration properties map */
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_INT, "retry", "3",
+ 0, FLB_TRUE, offsetof(struct retry_ctx, n_retry),
+ "Number of retries."
+ },
+ {0}
+};
+
+struct flb_output_plugin out_retry_plugin = {
+ .name = "retry",
+ .description = "Issue a retry upon flush request",
+ .cb_init = cb_retry_init,
+ .cb_flush = cb_retry_flush,
+ .cb_exit = cb_retry_exit,
+ .config_map = config_map,
+ .flags = 0,
+};
diff --git a/src/fluent-bit/plugins/out_s3/CMakeLists.txt b/src/fluent-bit/plugins/out_s3/CMakeLists.txt
new file mode 100644
index 000000000..94e048617
--- /dev/null
+++ b/src/fluent-bit/plugins/out_s3/CMakeLists.txt
@@ -0,0 +1,6 @@
+set(src
+ s3.c
+ s3_store.c
+ s3_multipart.c)
+
+FLB_PLUGIN(out_s3 "${src}" "")
diff --git a/src/fluent-bit/plugins/out_s3/s3.c b/src/fluent-bit/plugins/out_s3/s3.c
new file mode 100644
index 000000000..57e68e6ef
--- /dev/null
+++ b/src/fluent-bit/plugins/out_s3/s3.c
@@ -0,0 +1,2500 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_slist.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_config_map.h>
+#include <fluent-bit/flb_aws_util.h>
+#include <fluent-bit/aws/flb_aws_compress.h>
+#include <fluent-bit/flb_hash.h>
+#include <fluent-bit/flb_crypto.h>
+#include <fluent-bit/flb_signv4.h>
+#include <fluent-bit/flb_scheduler.h>
+#include <fluent-bit/flb_gzip.h>
+#include <fluent-bit/flb_base64.h>
+#include <fluent-bit/flb_log_event_decoder.h>
+#include <stdlib.h>
+#include <sys/stat.h>
+
+#include <msgpack.h>
+
+#include "s3.h"
+#include "s3_store.h"
+
+#define DEFAULT_S3_PORT 443
+#define DEFAULT_S3_INSECURE_PORT 80
+
+static int construct_request_buffer(struct flb_s3 *ctx, flb_sds_t new_data,
+ struct s3_file *chunk,
+ char **out_buf, size_t *out_size);
+
+static int s3_put_object(struct flb_s3 *ctx, const char *tag, time_t file_first_log_time,
+ char *body, size_t body_size);
+
+static int put_all_chunks(struct flb_s3 *ctx);
+
+static void cb_s3_upload(struct flb_config *ctx, void *data);
+
+static struct multipart_upload *get_upload(struct flb_s3 *ctx,
+ const char *tag, int tag_len);
+
+static struct multipart_upload *create_upload(struct flb_s3 *ctx,
+ const char *tag, int tag_len,
+ time_t file_first_log_time);
+
+static void remove_from_queue(struct upload_queue *entry);
+
+static struct flb_aws_header content_encoding_header = {
+ .key = "Content-Encoding",
+ .key_len = 16,
+ .val = "gzip",
+ .val_len = 4,
+};
+
+static struct flb_aws_header content_type_header = {
+ .key = "Content-Type",
+ .key_len = 12,
+ .val = "",
+ .val_len = 0,
+};
+
+static struct flb_aws_header canned_acl_header = {
+ .key = "x-amz-acl",
+ .key_len = 9,
+ .val = "",
+ .val_len = 0,
+};
+
+static struct flb_aws_header content_md5_header = {
+ .key = "Content-MD5",
+ .key_len = 11,
+ .val = "",
+ .val_len = 0,
+};
+
+static struct flb_aws_header storage_class_header = {
+ .key = "x-amz-storage-class",
+ .key_len = 19,
+ .val = "",
+ .val_len = 0,
+};
+
+static char *mock_error_response(char *error_env_var)
+{
+ char *err_val = NULL;
+ char *error = NULL;
+ int len = 0;
+
+ err_val = getenv(error_env_var);
+ if (err_val != NULL && strlen(err_val) > 0) {
+ error = flb_calloc(strlen(err_val) + 1, sizeof(char));
+ if (error == NULL) {
+ flb_errno();
+ return NULL;
+ }
+
+ len = strlen(err_val);
+ memcpy(error, err_val, len);
+ error[len] = '\0';
+ return error;
+ }
+
+ return NULL;
+}
+
+int s3_plugin_under_test()
+{
+ if (getenv("FLB_S3_PLUGIN_UNDER_TEST") != NULL) {
+ return FLB_TRUE;
+ }
+
+ return FLB_FALSE;
+}
+
+int create_headers(struct flb_s3 *ctx, char *body_md5,
+ struct flb_aws_header **headers, int *num_headers,
+ int multipart_upload)
+{
+ int n = 0;
+ int headers_len = 0;
+ struct flb_aws_header *s3_headers = NULL;
+
+ if (ctx->content_type != NULL) {
+ headers_len++;
+ }
+ if (ctx->compression == FLB_AWS_COMPRESS_GZIP) {
+ headers_len++;
+ }
+ if (ctx->canned_acl != NULL) {
+ headers_len++;
+ }
+ if (body_md5 != NULL && strlen(body_md5) && multipart_upload == FLB_FALSE) {
+ headers_len++;
+ }
+ if (ctx->storage_class != NULL) {
+ headers_len++;
+ }
+ if (headers_len == 0) {
+ *num_headers = headers_len;
+ *headers = s3_headers;
+ return 0;
+ }
+
+ s3_headers = flb_calloc(headers_len, sizeof(struct flb_aws_header));
+ if (s3_headers == NULL) {
+ flb_errno();
+ return -1;
+ }
+
+ if (ctx->content_type != NULL) {
+ s3_headers[n] = content_type_header;
+ s3_headers[n].val = ctx->content_type;
+ s3_headers[n].val_len = strlen(ctx->content_type);
+ n++;
+ }
+ if (ctx->compression == FLB_AWS_COMPRESS_GZIP) {
+ s3_headers[n] = content_encoding_header;
+ n++;
+ }
+ if (ctx->canned_acl != NULL) {
+ s3_headers[n] = canned_acl_header;
+ s3_headers[n].val = ctx->canned_acl;
+ s3_headers[n].val_len = strlen(ctx->canned_acl);
+ n++;
+ }
+ if (body_md5 != NULL && strlen(body_md5) && multipart_upload == FLB_FALSE) {
+ s3_headers[n] = content_md5_header;
+ s3_headers[n].val = body_md5;
+ s3_headers[n].val_len = strlen(body_md5);
+ n++;
+ }
+ if (ctx->storage_class != NULL) {
+ s3_headers[n] = storage_class_header;
+ s3_headers[n].val = ctx->storage_class;
+ s3_headers[n].val_len = strlen(ctx->storage_class);
+ }
+
+ *num_headers = headers_len;
+ *headers = s3_headers;
+ return 0;
+};
+
+struct flb_http_client *mock_s3_call(char *error_env_var, char *api)
+{
+ /* create an http client so that we can set the response */
+ struct flb_http_client *c = NULL;
+ char *error = mock_error_response(error_env_var);
+ char *resp;
+ int len;
+
+ c = flb_calloc(1, sizeof(struct flb_http_client));
+ if (!c) {
+ flb_errno();
+ flb_free(error);
+ return NULL;
+ }
+ mk_list_init(&c->headers);
+
+ if (error != NULL) {
+ c->resp.status = 400;
+ /* resp.data is freed on destroy, payload is supposed to reference it */
+ c->resp.data = error;
+ c->resp.payload = c->resp.data;
+ c->resp.payload_size = strlen(error);
+ }
+ else {
+ c->resp.status = 200;
+ c->resp.payload = "";
+ c->resp.payload_size = 0;
+ if (strcmp(api, "CreateMultipartUpload") == 0) {
+ /* mocked success response */
+ c->resp.payload = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"
+ "<InitiateMultipartUploadResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\n"
+ "<Bucket>example-bucket</Bucket>\n"
+ "<Key>example-object</Key>\n"
+ "<UploadId>VXBsb2FkIElEIGZvciA2aWWpbmcncyBteS1tb3ZpZS5tMnRzIHVwbG9hZA</UploadId>\n"
+ "</InitiateMultipartUploadResult>";
+ c->resp.payload_size = strlen(c->resp.payload);
+ }
+ else if (strcmp(api, "UploadPart") == 0) {
+ /* mocked success response */
+ resp = "Date: Mon, 1 Nov 2010 20:34:56 GMT\n"
+ "ETag: \"b54357faf0632cce46e942fa68356b38\"\n"
+ "Content-Length: 0\n"
+ "Connection: keep-alive\n"
+ "Server: AmazonS3";
+ /* since etag is in the headers, this code uses resp.data */
+ len = strlen(resp);
+ c->resp.data = flb_calloc(len + 1, sizeof(char));
+ if (!c->resp.data) {
+ flb_errno();
+ return NULL;
+ }
+ memcpy(c->resp.data, resp, len);
+ c->resp.data[len] = '\0';
+ c->resp.data_size = len;
+ }
+ else {
+ c->resp.payload = "";
+ c->resp.payload_size = 0;
+ }
+ }
+
+ return c;
+}
+
+static flb_sds_t concat_path(char *p1, char *p2)
+{
+ flb_sds_t dir;
+ flb_sds_t tmp;
+
+ dir = flb_sds_create_size(64);
+
+ tmp = flb_sds_printf(&dir, "%s/%s", p1, p2);
+ if (!tmp) {
+ flb_errno();
+ flb_sds_destroy(dir);
+ return NULL;
+ }
+ dir = tmp;
+
+ return dir;
+}
+
+/* Reads in index value from metadata file and sets seq_index to value */
+static int read_seq_index(char *seq_index_file, uint64_t *seq_index)
+{
+ FILE *fp;
+ int ret;
+
+ fp = fopen(seq_index_file, "r");
+ if (fp == NULL) {
+ flb_errno();
+ return -1;
+ }
+
+ ret = fscanf(fp, "%"PRIu64, seq_index);
+ if (ret != 1) {
+ fclose(fp);
+ flb_errno();
+ return -1;
+ }
+
+ fclose(fp);
+ return 0;
+}
+
+/* Writes index value to metadata file */
+static int write_seq_index(char *seq_index_file, uint64_t seq_index)
+{
+ FILE *fp;
+ int ret;
+
+ fp = fopen(seq_index_file, "w+");
+ if (fp == NULL) {
+ flb_errno();
+ return -1;
+ }
+
+ ret = fprintf(fp, "%"PRIu64, seq_index);
+ if (ret < 0) {
+ fclose(fp);
+ flb_errno();
+ return -1;
+ }
+
+ fclose(fp);
+ return 0;
+}
+
+static int init_seq_index(void *context) {
+ int ret;
+ const char *tmp;
+ char tmp_buf[1024];
+ struct flb_s3 *ctx = context;
+
+ ctx->key_fmt_has_seq_index = FLB_TRUE;
+
+ ctx->stream_metadata = flb_fstore_stream_create(ctx->fs, "sequence");
+ if (!ctx->stream_metadata) {
+ flb_plg_error(ctx->ins, "could not initialize metadata stream");
+ flb_fstore_destroy(ctx->fs);
+ ctx->fs = NULL;
+ return -1;
+ }
+
+ /* Construct directories and file path names */
+ ctx->metadata_dir = flb_sds_create(ctx->stream_metadata->path);
+ if (ctx->metadata_dir == NULL) {
+ flb_plg_error(ctx->ins, "Failed to create metadata path");
+ flb_errno();
+ return -1;
+ }
+ tmp = "/index_metadata";
+ ret = flb_sds_cat_safe(&ctx->metadata_dir, tmp, strlen(tmp));
+ if (ret < 0) {
+ flb_plg_error(ctx->ins, "Failed to create metadata path");
+ flb_errno();
+ return -1;
+ }
+
+ ctx->seq_index_file = flb_sds_create(ctx->metadata_dir);
+ if (ctx->seq_index_file == NULL) {
+ flb_plg_error(ctx->ins, "Failed to create sequential index file path");
+ flb_errno();
+ return -1;
+ }
+ tmp = "/seq_index_";
+ ret = flb_sds_cat_safe(&ctx->seq_index_file, tmp, strlen(tmp));
+ if (ret < 0) {
+ flb_plg_error(ctx->ins, "Failed to create sequential index file path");
+ flb_errno();
+ return -1;
+ }
+
+ sprintf(tmp_buf, "%d", ctx->ins->id);
+ ret = flb_sds_cat_safe(&ctx->seq_index_file, tmp_buf, strlen(tmp_buf));
+ if (ret < 0) {
+ flb_plg_error(ctx->ins, "Failed to create sequential index file path");
+ flb_errno();
+ return -1;
+ }
+
+ /* Create directory path if it doesn't exist */
+ ret = mkdir(ctx->metadata_dir, 0700);
+ if (ret < 0 && errno != EEXIST) {
+ flb_plg_error(ctx->ins, "Failed to create metadata directory");
+ return -1;
+ }
+
+ /* Check if index file doesn't exist and set index value */
+ if (access(ctx->seq_index_file, F_OK) != 0) {
+ ctx->seq_index = 0;
+ ret = write_seq_index(ctx->seq_index_file, ctx->seq_index);
+ if (ret < 0) {
+ flb_plg_error(ctx->ins, "Failed to write to sequential index metadata file");
+ return -1;
+ }
+ }
+ else {
+ ret = read_seq_index(ctx->seq_index_file, &ctx->seq_index);
+ if (ret < 0) {
+ flb_plg_error(ctx->ins, "Failed to read from sequential index "
+ "metadata file");
+ return -1;
+ }
+ flb_plg_info(ctx->ins, "Successfully recovered index. "
+ "Continuing at index=%"PRIu64, ctx->seq_index);
+ }
+ return 0;
+}
+
+void multipart_upload_destroy(struct multipart_upload *m_upload)
+{
+ int i;
+ flb_sds_t etag;
+
+ if (!m_upload) {
+ return;
+ }
+
+ if (m_upload->s3_key) {
+ flb_sds_destroy(m_upload->s3_key);
+ }
+ if (m_upload->tag) {
+ flb_sds_destroy(m_upload->tag);
+ }
+ if (m_upload->upload_id) {
+ flb_sds_destroy(m_upload->upload_id);
+ }
+
+ for (i = 0; i < m_upload->part_number; i++) {
+ etag = m_upload->etags[i];
+ if (etag) {
+ flb_sds_destroy(etag);
+ }
+ }
+
+ flb_free(m_upload);
+}
+
+static void s3_context_destroy(struct flb_s3 *ctx)
+{
+ struct mk_list *head;
+ struct mk_list *tmp;
+ struct multipart_upload *m_upload;
+ struct upload_queue *upload_contents;
+
+ if (!ctx) {
+ return;
+ }
+
+ if (ctx->base_provider) {
+ flb_aws_provider_destroy(ctx->base_provider);
+ }
+
+ if (ctx->provider) {
+ flb_aws_provider_destroy(ctx->provider);
+ }
+
+ if (ctx->provider_tls) {
+ flb_tls_destroy(ctx->provider_tls);
+ }
+
+ if (ctx->sts_provider_tls) {
+ flb_tls_destroy(ctx->sts_provider_tls);
+ }
+
+ if (ctx->s3_client) {
+ flb_aws_client_destroy(ctx->s3_client);
+ }
+
+ if (ctx->client_tls) {
+ flb_tls_destroy(ctx->client_tls);
+ }
+
+ if (ctx->free_endpoint == FLB_TRUE) {
+ flb_free(ctx->endpoint);
+ }
+
+ if (ctx->buffer_dir) {
+ flb_sds_destroy(ctx->buffer_dir);
+ }
+
+ if (ctx->metadata_dir) {
+ flb_sds_destroy(ctx->metadata_dir);
+ }
+
+ if (ctx->seq_index_file) {
+ flb_sds_destroy(ctx->seq_index_file);
+ }
+
+ /* Remove uploads */
+ mk_list_foreach_safe(head, tmp, &ctx->uploads) {
+ m_upload = mk_list_entry(head, struct multipart_upload, _head);
+ mk_list_del(&m_upload->_head);
+ multipart_upload_destroy(m_upload);
+ }
+
+ mk_list_foreach_safe(head, tmp, &ctx->upload_queue) {
+ upload_contents = mk_list_entry(head, struct upload_queue, _head);
+ s3_store_file_delete(ctx, upload_contents->upload_file);
+ multipart_upload_destroy(upload_contents->m_upload_file);
+ remove_from_queue(upload_contents);
+ }
+
+ flb_free(ctx);
+}
+
+static int cb_s3_init(struct flb_output_instance *ins,
+ struct flb_config *config, void *data)
+{
+ int ret;
+ flb_sds_t tmp_sds;
+ char *role_arn = NULL;
+ char *session_name;
+ const char *tmp;
+ struct flb_s3 *ctx = NULL;
+ struct flb_aws_client_generator *generator;
+ (void) config;
+ (void) data;
+ char *ep;
+ struct flb_split_entry *tok;
+ struct mk_list *split;
+ int list_size;
+
+ ctx = flb_calloc(1, sizeof(struct flb_s3));
+ if (!ctx) {
+ flb_errno();
+ return -1;
+ }
+ ctx->ins = ins;
+ mk_list_init(&ctx->uploads);
+ mk_list_init(&ctx->upload_queue);
+
+ ctx->retry_time = 0;
+ ctx->upload_queue_success = FLB_FALSE;
+
+ /* Export context */
+ flb_output_set_context(ins, ctx);
+
+ /* initialize config map */
+ ret = flb_output_config_map_set(ins, (void *) ctx);
+ if (ret == -1) {
+ return -1;
+ }
+
+ /* the check against -1 is works here because size_t is unsigned
+ * and (int) -1 == unsigned max value
+ * Fluent Bit uses -1 (which becomes max value) to indicate undefined
+ */
+ if (ctx->ins->total_limit_size != -1) {
+ flb_plg_warn(ctx->ins, "Please use 'store_dir_limit_size' with s3 output instead of 'storage.total_limit_size'. "
+ "S3 has its own buffer files located in the store_dir.");
+ }
+
+ /* Date key */
+ ctx->date_key = ctx->json_date_key;
+ tmp = flb_output_get_property("json_date_key", ins);
+ if (tmp) {
+ /* Just check if we have to disable it */
+ if (flb_utils_bool(tmp) == FLB_FALSE) {
+ ctx->date_key = NULL;
+ }
+ }
+
+ /* Date format for JSON output */
+ ctx->json_date_format = FLB_PACK_JSON_DATE_ISO8601;
+ tmp = flb_output_get_property("json_date_format", ins);
+ if (tmp) {
+ ret = flb_pack_to_json_date_type(tmp);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "invalid json_date_format '%s'. ", tmp);
+ return -1;
+ }
+ else {
+ ctx->json_date_format = ret;
+ }
+ }
+
+ tmp = flb_output_get_property("bucket", ins);
+ if (!tmp) {
+ flb_plg_error(ctx->ins, "'bucket' is a required parameter");
+ return -1;
+ }
+
+ /*
+ * store_dir is the user input, buffer_dir is what the code uses
+ * We append the bucket name to the dir, to support multiple instances
+ * of this plugin using the same buffer dir
+ */
+ tmp_sds = concat_path(ctx->store_dir, ctx->bucket);
+ if (!tmp_sds) {
+ flb_plg_error(ctx->ins, "Could not construct buffer path");
+ return -1;
+ }
+ ctx->buffer_dir = tmp_sds;
+
+ /* Initialize local storage */
+ ret = s3_store_init(ctx);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "Failed to initialize S3 storage: %s",
+ ctx->store_dir);
+ return -1;
+ }
+
+ tmp = flb_output_get_property("s3_key_format", ins);
+ if (tmp) {
+ if (tmp[0] != '/') {
+ flb_plg_error(ctx->ins, "'s3_key_format' must start with a '/'");
+ return -1;
+ }
+ if (strstr((char *) tmp, "$INDEX")) {
+ ret = init_seq_index(ctx);
+ if (ret < 0) {
+ return -1;
+ }
+ }
+ if (strstr((char *) tmp, "$UUID")) {
+ ctx->key_fmt_has_uuid = FLB_TRUE;
+ }
+ }
+
+ /* validate 'total_file_size' */
+ if (ctx->file_size <= 0) {
+ flb_plg_error(ctx->ins, "Failed to parse total_file_size %s", tmp);
+ return -1;
+ }
+ if (ctx->file_size < 1000000) {
+ flb_plg_error(ctx->ins, "total_file_size must be at least 1MB");
+ return -1;
+ }
+ if (ctx->file_size > MAX_FILE_SIZE) {
+ flb_plg_error(ctx->ins, "Max total_file_size is %s bytes", MAX_FILE_SIZE_STR);
+ return -1;
+ }
+ flb_plg_info(ctx->ins, "Using upload size %lu bytes", ctx->file_size);
+
+ if (ctx->use_put_object == FLB_FALSE && ctx->file_size < 2 * MIN_CHUNKED_UPLOAD_SIZE) {
+ flb_plg_info(ctx->ins,
+ "total_file_size is less than 10 MB, will use PutObject API");
+ ctx->use_put_object = FLB_TRUE;
+ }
+
+ tmp = flb_output_get_property("compression", ins);
+ if (tmp) {
+ ret = flb_aws_compression_get_type(tmp);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "unknown compression: %s", tmp);
+ return -1;
+ }
+ if (ctx->use_put_object == FLB_FALSE && ctx->compression == FLB_AWS_COMPRESS_ARROW) {
+ flb_plg_error(ctx->ins,
+ "use_put_object must be enabled when Apache Arrow is enabled");
+ return -1;
+ }
+ ctx->compression = ret;
+ }
+
+ tmp = flb_output_get_property("content_type", ins);
+ if (tmp) {
+ ctx->content_type = (char *) tmp;
+ }
+ if (ctx->use_put_object == FLB_FALSE) {
+ /* upload_chunk_size */
+ if (ctx->upload_chunk_size <= 0) {
+ flb_plg_error(ctx->ins, "Failed to parse upload_chunk_size %s", tmp);
+ return -1;
+ }
+ if (ctx->upload_chunk_size > ctx->file_size) {
+ flb_plg_error(ctx->ins,
+ "upload_chunk_size can not be larger than total_file_size");
+ return -1;
+ }
+ if (ctx->upload_chunk_size < MIN_CHUNKED_UPLOAD_SIZE) {
+ flb_plg_error(ctx->ins, "upload_chunk_size must be at least 5,242,880 bytes");
+ return -1;
+ }
+ if (ctx->compression == FLB_AWS_COMPRESS_GZIP) {
+ if(ctx->upload_chunk_size > MAX_CHUNKED_UPLOAD_COMPRESS_SIZE) {
+ flb_plg_error(ctx->ins, "upload_chunk_size in compressed multipart upload cannot exceed 5GB");
+ return -1;
+ }
+ } else {
+ if (ctx->upload_chunk_size > MAX_CHUNKED_UPLOAD_SIZE) {
+ flb_plg_error(ctx->ins, "Max upload_chunk_size is 50MB");
+ return -1;
+ }
+ }
+ }
+
+ if (ctx->upload_chunk_size != MIN_CHUNKED_UPLOAD_SIZE &&
+ (ctx->upload_chunk_size * 2) > ctx->file_size) {
+ flb_plg_error(ctx->ins, "total_file_size is less than 2x upload_chunk_size");
+ return -1;
+ }
+
+ if (ctx->use_put_object == FLB_TRUE) {
+ /*
+ * code internally uses 'upload_chunk_size' as the unit for each Put,
+ * regardless of which API is used to send data
+ */
+ ctx->upload_chunk_size = ctx->file_size;
+ if (ctx->file_size > MAX_FILE_SIZE_PUT_OBJECT) {
+ flb_plg_error(ctx->ins, "Max total_file_size is 50M when use_put_object is enabled");
+ return -1;
+ }
+ }
+
+ tmp = flb_output_get_property("endpoint", ins);
+ if (tmp) {
+ ctx->insecure = strncmp(tmp, "http://", 7) == 0 ? FLB_TRUE : FLB_FALSE;
+ if (ctx->insecure == FLB_TRUE) {
+ ep = removeProtocol((char *) tmp, "http://");
+ }
+ else {
+ ep = removeProtocol((char *) tmp, "https://");
+ }
+
+ split = flb_utils_split((const char *)ep, ':', 1);
+ if (!split) {
+ flb_errno();
+ return -1;
+ }
+ list_size = mk_list_size(split);
+ if (list_size > 2) {
+ flb_plg_error(ctx->ins, "Failed to split endpoint");
+ flb_utils_split_free(split);
+ return -1;
+ }
+
+ tok = mk_list_entry_first(split, struct flb_split_entry, _head);
+ ctx->endpoint = flb_strndup(tok->value, tok->len);
+ if (!ctx->endpoint) {
+ flb_errno();
+ flb_utils_split_free(split);
+ return -1;
+ }
+ ctx->free_endpoint = FLB_TRUE;
+ if (list_size == 2) {
+ tok = mk_list_entry_next(&tok->_head, struct flb_split_entry, _head, split);
+ ctx->port = atoi(tok->value);
+ }
+ else {
+ ctx->port = ctx->insecure == FLB_TRUE ? DEFAULT_S3_INSECURE_PORT : DEFAULT_S3_PORT;
+ }
+ flb_utils_split_free(split);
+ }
+ else {
+ /* default endpoint for the given region */
+ ctx->endpoint = flb_aws_endpoint("s3", ctx->region);
+ ctx->insecure = FLB_FALSE;
+ ctx->port = DEFAULT_S3_PORT;
+ ctx->free_endpoint = FLB_TRUE;
+ if (!ctx->endpoint) {
+ flb_plg_error(ctx->ins, "Could not construct S3 endpoint");
+ return -1;
+ }
+ }
+
+ tmp = flb_output_get_property("sts_endpoint", ins);
+ if (tmp) {
+ ctx->sts_endpoint = (char *) tmp;
+ }
+
+ tmp = flb_output_get_property("canned_acl", ins);
+ if (tmp) {
+ ctx->canned_acl = (char *) tmp;
+ }
+
+ tmp = flb_output_get_property("storage_class", ins);
+ if (tmp) {
+ ctx->storage_class = (char *) tmp;
+ }
+
+ if (ctx->insecure == FLB_FALSE) {
+ ctx->client_tls = flb_tls_create(FLB_TLS_CLIENT_MODE,
+ ins->tls_verify,
+ ins->tls_debug,
+ ins->tls_vhost,
+ ins->tls_ca_path,
+ ins->tls_ca_file,
+ ins->tls_crt_file,
+ ins->tls_key_file,
+ ins->tls_key_passwd);
+ if (!ctx->client_tls) {
+ flb_plg_error(ctx->ins, "Failed to create tls context");
+ return -1;
+ }
+ }
+
+ /* AWS provider needs a separate TLS instance */
+ ctx->provider_tls = flb_tls_create(FLB_TLS_CLIENT_MODE,
+ FLB_TRUE,
+ ins->tls_debug,
+ ins->tls_vhost,
+ ins->tls_ca_path,
+ ins->tls_ca_file,
+ ins->tls_crt_file,
+ ins->tls_key_file,
+ ins->tls_key_passwd);
+ if (!ctx->provider_tls) {
+ flb_errno();
+ return -1;
+ }
+
+ ctx->provider = flb_standard_chain_provider_create(config,
+ ctx->provider_tls,
+ ctx->region,
+ ctx->sts_endpoint,
+ NULL,
+ flb_aws_client_generator(),
+ ctx->profile);
+
+ if (!ctx->provider) {
+ flb_plg_error(ctx->ins, "Failed to create AWS Credential Provider");
+ return -1;
+ }
+
+ tmp = flb_output_get_property("role_arn", ins);
+ if (tmp) {
+ /* Use the STS Provider */
+ ctx->base_provider = ctx->provider;
+ role_arn = (char *) tmp;
+
+ /* STS provider needs yet another separate TLS instance */
+ ctx->sts_provider_tls = flb_tls_create(FLB_TLS_CLIENT_MODE,
+ FLB_TRUE,
+ ins->tls_debug,
+ ins->tls_vhost,
+ ins->tls_ca_path,
+ ins->tls_ca_file,
+ ins->tls_crt_file,
+ ins->tls_key_file,
+ ins->tls_key_passwd);
+
+ if (!ctx->sts_provider_tls) {
+ flb_errno();
+ return -1;
+ }
+
+ session_name = flb_sts_session_name();
+ if (!session_name) {
+ flb_plg_error(ctx->ins, "Failed to create aws iam role "
+ "session name");
+ flb_errno();
+ return -1;
+ }
+
+ ctx->provider = flb_sts_provider_create(config,
+ ctx->sts_provider_tls,
+ ctx->base_provider,
+ ctx->external_id,
+ role_arn,
+ session_name,
+ ctx->region,
+ ctx->sts_endpoint,
+ NULL,
+ flb_aws_client_generator());
+ flb_free(session_name);
+ if (!ctx->provider) {
+ flb_plg_error(ctx->ins, "Failed to create AWS STS Credential "
+ "Provider");
+ return -1;
+ }
+ }
+
+ /* read any remaining buffers from previous (failed) executions */
+ ctx->has_old_buffers = s3_store_has_data(ctx);
+ ctx->has_old_uploads = s3_store_has_uploads(ctx);
+
+ /* Multipart */
+ multipart_read_uploads_from_fs(ctx);
+
+ if (mk_list_size(&ctx->uploads) > 0) {
+ /* note that these should be sent */
+ ctx->has_old_uploads = FLB_TRUE;
+ }
+
+ /* create S3 client */
+ generator = flb_aws_client_generator();
+ ctx->s3_client = generator->create();
+ if (!ctx->s3_client) {
+ return -1;
+ }
+ ctx->s3_client->name = "s3_client";
+ ctx->s3_client->has_auth = FLB_TRUE;
+ ctx->s3_client->provider = ctx->provider;
+ ctx->s3_client->region = ctx->region;
+ ctx->s3_client->service = "s3";
+ ctx->s3_client->port = ctx->port;
+ ctx->s3_client->flags = 0;
+ ctx->s3_client->proxy = NULL;
+ ctx->s3_client->s3_mode = S3_MODE_SIGNED_PAYLOAD;
+ ctx->s3_client->retry_requests = ctx->retry_requests;
+
+ if (ctx->insecure == FLB_TRUE) {
+ ctx->s3_client->upstream = flb_upstream_create(config, ctx->endpoint, ctx->port,
+ FLB_IO_TCP, NULL);
+ } else {
+ ctx->s3_client->upstream = flb_upstream_create(config, ctx->endpoint, ctx->port,
+ FLB_IO_TLS, ctx->client_tls);
+ }
+ if (!ctx->s3_client->upstream) {
+ flb_plg_error(ctx->ins, "Connection initialization error");
+ return -1;
+ }
+
+ flb_output_upstream_set(ctx->s3_client->upstream, ctx->ins);
+
+ ctx->s3_client->host = ctx->endpoint;
+
+ /* set to sync mode and initialize credentials */
+ ctx->provider->provider_vtable->sync(ctx->provider);
+ ctx->provider->provider_vtable->init(ctx->provider);
+
+ ctx->timer_created = FLB_FALSE;
+ ctx->timer_ms = (int) (ctx->upload_timeout / 6) * 1000;
+ if (ctx->timer_ms > UPLOAD_TIMER_MAX_WAIT) {
+ ctx->timer_ms = UPLOAD_TIMER_MAX_WAIT;
+ }
+ else if (ctx->timer_ms < UPLOAD_TIMER_MIN_WAIT) {
+ ctx->timer_ms = UPLOAD_TIMER_MIN_WAIT;
+ }
+
+ /*
+ * S3 must ALWAYS use sync mode
+ * In the timer thread we do a mk_list_foreach_safe on the queue of uplaods and chunks
+ * Iterating over those lists is not concurrent safe. If a flush call ran at the same time
+ * And deleted an item from the list, this could cause a crash/corruption.
+ */
+ flb_stream_disable_async_mode(&ctx->s3_client->upstream->base);
+
+ /* clean up any old buffers found on startup */
+ if (ctx->has_old_buffers == FLB_TRUE) {
+ flb_plg_info(ctx->ins,
+ "Sending locally buffered data from previous "
+ "executions to S3; buffer=%s",
+ ctx->fs->root_path);
+ ctx->has_old_buffers = FLB_FALSE;
+ ret = put_all_chunks(ctx);
+ if (ret < 0) {
+ ctx->has_old_buffers = FLB_TRUE;
+ flb_plg_error(ctx->ins,
+ "Failed to send locally buffered data left over "
+ "from previous executions; will retry. Buffer=%s",
+ ctx->fs->root_path);
+ }
+ }
+
+ /* clean up any old uploads found on start up */
+ if (ctx->has_old_uploads == FLB_TRUE) {
+ flb_plg_info(ctx->ins,
+ "Completing multipart uploads from previous "
+ "executions to S3; buffer=%s",
+ ctx->stream_upload->path);
+ ctx->has_old_uploads = FLB_FALSE;
+
+ /*
+ * we don't need to worry if this fails; it will retry each
+ * time the upload callback is called
+ */
+ cb_s3_upload(config, ctx);
+ }
+
+ /* this is done last since in the previous block we make calls to AWS */
+ ctx->provider->provider_vtable->upstream_set(ctx->provider, ctx->ins);
+
+ return 0;
+}
+
+/*
+ * return value is one of FLB_OK, FLB_RETRY, FLB_ERROR
+ *
+ * Chunk is allowed to be NULL
+ */
+static int upload_data(struct flb_s3 *ctx, struct s3_file *chunk,
+ struct multipart_upload *m_upload,
+ char *body, size_t body_size,
+ const char *tag, int tag_len)
+{
+ int init_upload = FLB_FALSE;
+ int complete_upload = FLB_FALSE;
+ int size_check = FLB_FALSE;
+ int part_num_check = FLB_FALSE;
+ int timeout_check = FLB_FALSE;
+ int ret;
+ void *payload_buf = NULL;
+ size_t payload_size = 0;
+ size_t preCompress_size = 0;
+ time_t file_first_log_time = time(NULL);
+
+ /*
+ * When chunk does not exist, file_first_log_time will be the current time.
+ * This is only for unit tests and prevents unit tests from segfaulting when chunk is
+ * NULL because if so chunk->first_log_time will be NULl either and will cause
+ * segfault during the process of put_object upload or mutipart upload.
+ */
+ if (chunk != NULL) {
+ file_first_log_time = chunk->first_log_time;
+ }
+
+ if (ctx->compression == FLB_AWS_COMPRESS_GZIP) {
+ /* Map payload */
+ ret = flb_aws_compression_compress(ctx->compression, body, body_size, &payload_buf, &payload_size);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "Failed to compress data");
+ return FLB_RETRY;
+ } else {
+ preCompress_size = body_size;
+ body = (void *) payload_buf;
+ body_size = payload_size;
+ }
+ }
+
+ if (ctx->use_put_object == FLB_TRUE) {
+ goto put_object;
+ }
+
+ if (s3_plugin_under_test() == FLB_TRUE) {
+ init_upload = FLB_TRUE;
+ complete_upload = FLB_TRUE;
+ if (ctx->use_put_object == FLB_TRUE) {
+ goto put_object;
+ }
+ else {
+ goto multipart;
+ }
+ }
+
+ if (m_upload == NULL) {
+ if (chunk != NULL && time(NULL) >
+ (chunk->create_time + ctx->upload_timeout + ctx->retry_time)) {
+ /* timeout already reached, just PutObject */
+ goto put_object;
+ }
+ else if (body_size >= ctx->file_size) {
+ /* already big enough, just use PutObject API */
+ goto put_object;
+ }
+ else if(body_size > MIN_CHUNKED_UPLOAD_SIZE) {
+ init_upload = FLB_TRUE;
+ goto multipart;
+ }
+ else {
+ if (ctx->use_put_object == FLB_FALSE && ctx->compression == FLB_AWS_COMPRESS_GZIP) {
+ flb_plg_info(ctx->ins, "Pre-compression upload_chunk_size= %zu, After compression, chunk is only %zu bytes, "
+ "the chunk was too small, using PutObject to upload", preCompress_size, body_size);
+ }
+ goto put_object;
+ }
+ }
+ else {
+ /* existing upload */
+ if (body_size < MIN_CHUNKED_UPLOAD_SIZE) {
+ complete_upload = FLB_TRUE;
+ }
+
+ goto multipart;
+ }
+
+put_object:
+
+ /*
+ * remove chunk from buffer list
+ */
+ ret = s3_put_object(ctx, tag, file_first_log_time, body, body_size);
+ if (ctx->compression == FLB_AWS_COMPRESS_GZIP) {
+ flb_free(payload_buf);
+ }
+ if (ret < 0) {
+ /* re-add chunk to list */
+ if (chunk) {
+ s3_store_file_unlock(chunk);
+ chunk->failures += 1;
+ }
+ return FLB_RETRY;
+ }
+
+ /* data was sent successfully- delete the local buffer */
+ if (chunk) {
+ s3_store_file_delete(ctx, chunk);
+ }
+ return FLB_OK;
+
+multipart:
+
+ if (init_upload == FLB_TRUE) {
+ m_upload = create_upload(ctx, tag, tag_len, file_first_log_time);
+ if (!m_upload) {
+ flb_plg_error(ctx->ins, "Could not find or create upload for tag %s", tag);
+ if (chunk) {
+ s3_store_file_unlock(chunk);
+ }
+ if (ctx->compression == FLB_AWS_COMPRESS_GZIP) {
+ flb_free(payload_buf);
+ }
+ return FLB_RETRY;
+ }
+ }
+
+ if (m_upload->upload_state == MULTIPART_UPLOAD_STATE_NOT_CREATED) {
+ ret = create_multipart_upload(ctx, m_upload);
+ if (ret < 0) {
+ flb_plg_error(ctx->ins, "Could not initiate multipart upload");
+ if (chunk) {
+ s3_store_file_unlock(chunk);
+ }
+ if (ctx->compression == FLB_AWS_COMPRESS_GZIP) {
+ flb_free(payload_buf);
+ }
+ return FLB_RETRY;
+ }
+ m_upload->upload_state = MULTIPART_UPLOAD_STATE_CREATED;
+ }
+
+ ret = upload_part(ctx, m_upload, body, body_size);
+ if (ret < 0) {
+ if (ctx->compression == FLB_AWS_COMPRESS_GZIP) {
+ flb_free(payload_buf);
+ }
+ m_upload->upload_errors += 1;
+ /* re-add chunk to list */
+ if (chunk) {
+ s3_store_file_unlock(chunk);
+ chunk->failures += 1;
+ }
+ return FLB_RETRY;
+ }
+ m_upload->part_number += 1;
+ /* data was sent successfully- delete the local buffer */
+ if (chunk) {
+ s3_store_file_delete(ctx, chunk);
+ chunk = NULL;
+ }
+ if (ctx->compression == FLB_AWS_COMPRESS_GZIP) {
+ flb_free(payload_buf);
+ }
+ if (m_upload->bytes >= ctx->file_size) {
+ size_check = FLB_TRUE;
+ flb_plg_info(ctx->ins, "Will complete upload for %s because uploaded data is greater"
+ " than size set by total_file_size", m_upload->s3_key);
+ }
+ if (m_upload->part_number >= 10000) {
+ part_num_check = FLB_TRUE;
+ flb_plg_info(ctx->ins, "Will complete upload for %s because 10,000 chunks "
+ "(the API limit) have been uploaded", m_upload->s3_key);
+ }
+ if (time(NULL) >
+ (m_upload->init_time + ctx->upload_timeout + ctx->retry_time)) {
+ timeout_check = FLB_TRUE;
+ flb_plg_info(ctx->ins, "Will complete upload for %s because upload_timeout"
+ " has elapsed", m_upload->s3_key);
+ }
+ if (size_check || part_num_check || timeout_check) {
+ complete_upload = FLB_TRUE;
+ }
+
+ if (complete_upload == FLB_TRUE) {
+ /* mark for completion- the upload timer will handle actual completion */
+ m_upload->upload_state = MULTIPART_UPLOAD_STATE_COMPLETE_IN_PROGRESS;
+ }
+
+ return FLB_OK;
+}
+
+
+/*
+ * Attempts to send all chunks to S3 using PutObject
+ * Used on shut down to try to send all buffered data
+ * Used on start up to try to send any leftover buffers from previous executions
+ */
+static int put_all_chunks(struct flb_s3 *ctx)
+{
+ struct s3_file *chunk;
+ struct mk_list *tmp;
+ struct mk_list *head;
+ struct mk_list *f_head;
+ struct flb_fstore_file *fsf;
+ struct flb_fstore_stream *fs_stream;
+ void *payload_buf = NULL;
+ size_t payload_size = 0;
+ char *buffer = NULL;
+ size_t buffer_size;
+ int ret;
+
+ mk_list_foreach(head, &ctx->fs->streams) {
+ /* skip multi upload stream */
+ fs_stream = mk_list_entry(head, struct flb_fstore_stream, _head);
+ if (fs_stream == ctx->stream_upload) {
+ continue;
+ }
+ /* skip metadata stream */
+ if (fs_stream == ctx->stream_metadata) {
+ continue;
+ }
+
+ mk_list_foreach_safe(f_head, tmp, &fs_stream->files) {
+ fsf = mk_list_entry(f_head, struct flb_fstore_file, _head);
+ chunk = fsf->data;
+
+ /* Locked chunks are being processed, skip */
+ if (chunk->locked == FLB_TRUE) {
+ continue;
+ }
+
+ if (chunk->failures >= MAX_UPLOAD_ERRORS) {
+ flb_plg_warn(ctx->ins,
+ "Chunk for tag %s failed to send %i times, "
+ "will not retry",
+ (char *) fsf->meta_buf, MAX_UPLOAD_ERRORS);
+ flb_fstore_file_inactive(ctx->fs, fsf);
+ continue;
+ }
+
+ ret = construct_request_buffer(ctx, NULL, chunk,
+ &buffer, &buffer_size);
+ if (ret < 0) {
+ flb_plg_error(ctx->ins,
+ "Could not construct request buffer for %s",
+ chunk->file_path);
+ return -1;
+ }
+
+ if (ctx->compression != FLB_AWS_COMPRESS_NONE) {
+ /* Map payload */
+ ret = flb_aws_compression_compress(ctx->compression, buffer, buffer_size, &payload_buf, &payload_size);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "Failed to compress data, uploading uncompressed data instead to prevent data loss");
+ } else {
+ flb_plg_info(ctx->ins, "Pre-compression chunk size is %zu, After compression, chunk is %zu bytes", buffer_size, payload_size);
+ buffer = (void *) payload_buf;
+ buffer_size = payload_size;
+ }
+ }
+
+ ret = s3_put_object(ctx, (const char *)
+ fsf->meta_buf,
+ chunk->create_time, buffer, buffer_size);
+ flb_free(buffer);
+ if (ret < 0) {
+ s3_store_file_unlock(chunk);
+ chunk->failures += 1;
+ return -1;
+ }
+
+ /* data was sent successfully- delete the local buffer */
+ s3_store_file_delete(ctx, chunk);
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Either new_data or chunk can be NULL, but not both
+ */
+static int construct_request_buffer(struct flb_s3 *ctx, flb_sds_t new_data,
+ struct s3_file *chunk,
+ char **out_buf, size_t *out_size)
+{
+ char *body;
+ char *tmp;
+ size_t body_size = 0;
+ char *buffered_data = NULL;
+ size_t buffer_size = 0;
+ int ret;
+
+ if (new_data == NULL && chunk == NULL) {
+ flb_plg_error(ctx->ins, "[construct_request_buffer] Something went wrong"
+ " both chunk and new_data are NULL");
+ return -1;
+ }
+
+ if (chunk) {
+ ret = s3_store_file_read(ctx, chunk, &buffered_data, &buffer_size);
+ if (ret < 0) {
+ flb_plg_error(ctx->ins, "Could not read locally buffered data %s",
+ chunk->file_path);
+ return -1;
+ }
+
+ /*
+ * lock the chunk from buffer list
+ */
+ s3_store_file_lock(chunk);
+ body = buffered_data;
+ body_size = buffer_size;
+ }
+
+ /*
+ * If new data is arriving, increase the original 'buffered_data' size
+ * to append the new one.
+ */
+ if (new_data) {
+ body_size += flb_sds_len(new_data);
+
+ tmp = flb_realloc(buffered_data, body_size + 1);
+ if (!tmp) {
+ flb_errno();
+ flb_free(buffered_data);
+ if (chunk) {
+ s3_store_file_unlock(chunk);
+ }
+ return -1;
+ }
+ body = buffered_data = tmp;
+ memcpy(body + buffer_size, new_data, flb_sds_len(new_data));
+ body[body_size] = '\0';
+ }
+
+ *out_buf = body;
+ *out_size = body_size;
+
+ return 0;
+}
+
+static int s3_put_object(struct flb_s3 *ctx, const char *tag, time_t file_first_log_time,
+ char *body, size_t body_size)
+{
+ flb_sds_t s3_key = NULL;
+ struct flb_http_client *c = NULL;
+ struct flb_aws_client *s3_client;
+ struct flb_aws_header *headers = NULL;
+ char *random_alphanumeric;
+ int append_random = FLB_FALSE;
+ int len;
+ int ret;
+ int num_headers = 0;
+ char *final_key;
+ flb_sds_t uri;
+ flb_sds_t tmp;
+ char final_body_md5[25];
+
+ s3_key = flb_get_s3_key(ctx->s3_key_format, file_first_log_time, tag,
+ ctx->tag_delimiters, ctx->seq_index);
+ if (!s3_key) {
+ flb_plg_error(ctx->ins, "Failed to construct S3 Object Key for %s", tag);
+ return -1;
+ }
+
+ len = strlen(s3_key);
+ if ((len + 16) <= 1024 && !ctx->key_fmt_has_uuid && !ctx->static_file_path &&
+ !ctx->key_fmt_has_seq_index) {
+ append_random = FLB_TRUE;
+ len += 16;
+ }
+ len += strlen(ctx->bucket + 1);
+
+ uri = flb_sds_create_size(len);
+
+ if (append_random == FLB_TRUE) {
+ random_alphanumeric = flb_sts_session_name();
+ if (!random_alphanumeric) {
+ flb_sds_destroy(s3_key);
+ flb_sds_destroy(uri);
+ flb_plg_error(ctx->ins, "Failed to create randomness for S3 key %s", tag);
+ return -1;
+ }
+ /* only use 8 chars of the random string */
+ random_alphanumeric[8] = '\0';
+
+ tmp = flb_sds_printf(&uri, "/%s%s-object%s", ctx->bucket, s3_key,
+ random_alphanumeric);
+ flb_free(random_alphanumeric);
+ }
+ else {
+ tmp = flb_sds_printf(&uri, "/%s%s", ctx->bucket, s3_key);
+ }
+
+ if (!tmp) {
+ flb_sds_destroy(s3_key);
+ flb_plg_error(ctx->ins, "Failed to create PutObject URI");
+ return -1;
+ }
+ flb_sds_destroy(s3_key);
+ uri = tmp;
+
+ memset(final_body_md5, 0, sizeof(final_body_md5));
+ if (ctx->send_content_md5 == FLB_TRUE) {
+ ret = get_md5_base64(body, body_size,
+ final_body_md5, sizeof(final_body_md5));
+ if (ret != 0) {
+ flb_plg_error(ctx->ins, "Failed to create Content-MD5 header");
+ flb_sds_destroy(uri);
+ return -1;
+ }
+ }
+
+ /* Update file and increment index value right before request */
+ if (ctx->key_fmt_has_seq_index) {
+ ctx->seq_index++;
+
+ ret = write_seq_index(ctx->seq_index_file, ctx->seq_index);
+ if (ret < 0 && access(ctx->seq_index_file, F_OK) == 0) {
+ ctx->seq_index--;
+ flb_sds_destroy(s3_key);
+ flb_plg_error(ctx->ins, "Failed to update sequential index metadata file");
+ return -1;
+ }
+ }
+
+ s3_client = ctx->s3_client;
+ if (s3_plugin_under_test() == FLB_TRUE) {
+ c = mock_s3_call("TEST_PUT_OBJECT_ERROR", "PutObject");
+ }
+ else {
+ ret = create_headers(ctx, final_body_md5, &headers, &num_headers, FLB_FALSE);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "Failed to create headers");
+ flb_sds_destroy(uri);
+ goto decrement_index;
+ }
+ c = s3_client->client_vtable->request(s3_client, FLB_HTTP_PUT,
+ uri, body, body_size,
+ headers, num_headers);
+ flb_free(headers);
+ }
+ if (c) {
+ flb_plg_debug(ctx->ins, "PutObject http status=%d", c->resp.status);
+ if (c->resp.status == 200) {
+ /*
+ * URI contains bucket name, so we must advance over it
+ * to print the object key
+ */
+ final_key = uri + strlen(ctx->bucket) + 1;
+ flb_plg_info(ctx->ins, "Successfully uploaded object %s", final_key);
+ flb_sds_destroy(uri);
+ flb_http_client_destroy(c);
+
+ return 0;
+ }
+ flb_aws_print_xml_error(c->resp.payload, c->resp.payload_size,
+ "PutObject", ctx->ins);
+ if (c->resp.data != NULL) {
+ flb_plg_error(ctx->ins, "Raw PutObject response: %s", c->resp.data);
+ }
+ flb_http_client_destroy(c);
+ }
+
+ flb_plg_error(ctx->ins, "PutObject request failed");
+ flb_sds_destroy(uri);
+ goto decrement_index;
+
+decrement_index:
+ if (ctx->key_fmt_has_seq_index) {
+ ctx->seq_index--;
+
+ ret = write_seq_index(ctx->seq_index_file, ctx->seq_index);
+ if (ret < 0) {
+ flb_plg_error(ctx->ins, "Failed to decrement index after request error");
+ return -1;
+ }
+ }
+ return -1;
+}
+
+int get_md5_base64(char *buf, size_t buf_size, char *md5_str, size_t md5_str_size)
+{
+ unsigned char md5_bin[16];
+ size_t olen;
+ int ret;
+
+ ret = flb_hash_simple(FLB_HASH_MD5,
+ (unsigned char *) buf, buf_size,
+ md5_bin, sizeof(md5_bin));
+
+ if (ret != FLB_CRYPTO_SUCCESS) {
+ return -1;
+ }
+
+ ret = flb_base64_encode((unsigned char*) md5_str, md5_str_size,
+ &olen, md5_bin, sizeof(md5_bin));
+ if (ret != 0) {
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct multipart_upload *get_upload(struct flb_s3 *ctx,
+ const char *tag, int tag_len)
+{
+ struct multipart_upload *m_upload = NULL;
+ struct multipart_upload *tmp_upload = NULL;
+ struct mk_list *tmp;
+ struct mk_list *head;
+
+ mk_list_foreach_safe(head, tmp, &ctx->uploads) {
+ tmp_upload = mk_list_entry(head, struct multipart_upload, _head);
+
+ if (tmp_upload->upload_state == MULTIPART_UPLOAD_STATE_COMPLETE_IN_PROGRESS) {
+ continue;
+ }
+ if (tmp_upload->upload_errors >= MAX_UPLOAD_ERRORS) {
+ tmp_upload->upload_state = MULTIPART_UPLOAD_STATE_COMPLETE_IN_PROGRESS;
+ flb_plg_error(ctx->ins, "Upload for %s has reached max upload errors",
+ tmp_upload->s3_key);
+ continue;
+ }
+ if (strcmp(tmp_upload->tag, tag) == 0) {
+ m_upload = tmp_upload;
+ break;
+ }
+ }
+
+ return m_upload;
+}
+
+static struct multipart_upload *create_upload(struct flb_s3 *ctx, const char *tag,
+ int tag_len, time_t file_first_log_time)
+{
+ int ret;
+ struct multipart_upload *m_upload = NULL;
+ flb_sds_t s3_key = NULL;
+ flb_sds_t tmp_sds = NULL;
+
+ /* create new upload for this key */
+ m_upload = flb_calloc(1, sizeof(struct multipart_upload));
+ if (!m_upload) {
+ flb_errno();
+ return NULL;
+ }
+ s3_key = flb_get_s3_key(ctx->s3_key_format, file_first_log_time, tag,
+ ctx->tag_delimiters, ctx->seq_index);
+ if (!s3_key) {
+ flb_plg_error(ctx->ins, "Failed to construct S3 Object Key for %s", tag);
+ flb_free(m_upload);
+ return NULL;
+ }
+ m_upload->s3_key = s3_key;
+ tmp_sds = flb_sds_create_len(tag, tag_len);
+ if (!tmp_sds) {
+ flb_errno();
+ flb_free(m_upload);
+ return NULL;
+ }
+ m_upload->tag = tmp_sds;
+ m_upload->upload_state = MULTIPART_UPLOAD_STATE_NOT_CREATED;
+ m_upload->part_number = 1;
+ m_upload->init_time = time(NULL);
+ mk_list_add(&m_upload->_head, &ctx->uploads);
+
+ /* Update file and increment index value right before request */
+ if (ctx->key_fmt_has_seq_index) {
+ ctx->seq_index++;
+
+ ret = write_seq_index(ctx->seq_index_file, ctx->seq_index);
+ if (ret < 0) {
+ ctx->seq_index--;
+ flb_sds_destroy(s3_key);
+ flb_plg_error(ctx->ins, "Failed to write to sequential index metadata file");
+ return NULL;
+ }
+ }
+
+ return m_upload;
+}
+
+/* Adds an entry to upload queue */
+static int add_to_queue(struct flb_s3 *ctx, struct s3_file *upload_file,
+ struct multipart_upload *m_upload_file, const char *tag, int tag_len)
+{
+ struct upload_queue *upload_contents;
+ flb_sds_t tag_cpy;
+
+ /* Create upload contents object and add to upload queue */
+ upload_contents = flb_calloc(1, sizeof(struct upload_queue));
+ if (upload_contents == NULL) {
+ flb_plg_error(ctx->ins, "Error allocating memory for upload_queue entry");
+ flb_errno();
+ return -1;
+ }
+ upload_contents->upload_file = upload_file;
+ upload_contents->m_upload_file = m_upload_file;
+ upload_contents->tag_len = tag_len;
+ upload_contents->retry_counter = 0;
+ upload_contents->upload_time = -1;
+
+ /* Necessary to create separate string for tag to prevent corruption */
+ tag_cpy = flb_sds_create_len(tag, tag_len);
+ if (!tag_cpy) {
+ flb_errno();
+ flb_free(upload_contents);
+ return -1;
+ }
+ upload_contents->tag = tag_cpy;
+
+
+ /* Add entry to upload queue */
+ mk_list_add(&upload_contents->_head, &ctx->upload_queue);
+ return 0;
+}
+
+/* Removes an entry from upload_queue */
+void remove_from_queue(struct upload_queue *entry)
+{
+ mk_list_del(&entry->_head);
+ flb_sds_destroy(entry->tag);
+ flb_free(entry);
+ return;
+}
+
+/* Validity check for upload queue object */
+static int upload_queue_valid(struct upload_queue *upload_contents, time_t now,
+ void *out_context)
+{
+ struct flb_s3 *ctx = out_context;
+
+ if (upload_contents == NULL) {
+ flb_plg_error(ctx->ins, "Error getting entry from upload_queue");
+ return -1;
+ }
+ if (upload_contents->_head.next == NULL || upload_contents->_head.prev == NULL) {
+ flb_plg_debug(ctx->ins, "Encountered previously deleted entry in "
+ "upload_queue. Deleting invalid entry");
+ mk_list_del(&upload_contents->_head);
+ return -1;
+ }
+ if (upload_contents->upload_file->locked == FLB_FALSE) {
+ flb_plg_debug(ctx->ins, "Encountered unlocked file in upload_queue. "
+ "Exiting");
+ return -1;
+ }
+ if (upload_contents->upload_file->size <= 0) {
+ flb_plg_debug(ctx->ins, "Encountered empty chunk file in upload_queue. "
+ "Deleting empty chunk file");
+ remove_from_queue(upload_contents);
+ return -1;
+ }
+ if (now < upload_contents->upload_time) {
+ flb_plg_debug(ctx->ins, "Found valid chunk file but not ready to upload");
+ return -1;
+ }
+ return 0;
+}
+
+static int send_upload_request(void *out_context, flb_sds_t chunk,
+ struct s3_file *upload_file,
+ struct multipart_upload *m_upload_file,
+ const char *tag, int tag_len)
+{
+ int ret;
+ char *buffer;
+ size_t buffer_size;
+ struct flb_s3 *ctx = out_context;
+
+ /* Create buffer to upload to S3 */
+ ret = construct_request_buffer(ctx, chunk, upload_file, &buffer, &buffer_size);
+ flb_sds_destroy(chunk);
+ if (ret < 0) {
+ flb_plg_error(ctx->ins, "Could not construct request buffer for %s",
+ upload_file->file_path);
+ return -1;
+ }
+
+ /* Upload to S3 */
+ ret = upload_data(ctx, upload_file, m_upload_file, buffer, buffer_size, tag, tag_len);
+ flb_free(buffer);
+
+ return ret;
+}
+
+static int buffer_chunk(void *out_context, struct s3_file *upload_file,
+ flb_sds_t chunk, int chunk_size,
+ const char *tag, int tag_len,
+ time_t file_first_log_time)
+{
+ int ret;
+ struct flb_s3 *ctx = out_context;
+
+ ret = s3_store_buffer_put(ctx, upload_file, tag,
+ tag_len, chunk, (size_t) chunk_size, file_first_log_time);
+ flb_sds_destroy(chunk);
+ if (ret < 0) {
+ flb_plg_warn(ctx->ins, "Could not buffer chunk. Data order preservation "
+ "will be compromised");
+ return -1;
+ }
+ return 0;
+}
+
+/* Uploads all chunk files in queue synchronously */
+static void s3_upload_queue(struct flb_config *config, void *out_context)
+{
+ int ret;
+ time_t now;
+ struct upload_queue *upload_contents;
+ struct flb_s3 *ctx = out_context;
+ struct mk_list *tmp;
+ struct mk_list *head;
+
+ flb_plg_debug(ctx->ins, "Running upload timer callback (upload_queue)..");
+
+ /* No chunks in upload queue. Scan for timed out chunks. */
+ if (mk_list_size(&ctx->upload_queue) == 0) {
+ flb_plg_debug(ctx->ins, "No files found in upload_queue. Scanning for timed "
+ "out chunks");
+ cb_s3_upload(config, out_context);
+ }
+
+ /* Iterate through each file in upload queue */
+ mk_list_foreach_safe(head, tmp, &ctx->upload_queue) {
+ upload_contents = mk_list_entry(head, struct upload_queue, _head);
+
+ now = time(NULL);
+
+ /* Checks if upload_contents is valid */
+ ret = upload_queue_valid(upload_contents, now, ctx);
+ if (ret < 0) {
+ goto exit;
+ }
+
+ /* Try to upload file. Return value can be -1, FLB_OK, FLB_ERROR, FLB_RETRY. */
+ ret = send_upload_request(ctx, NULL, upload_contents->upload_file,
+ upload_contents->m_upload_file,
+ upload_contents->tag, upload_contents->tag_len);
+ if (ret < 0) {
+ goto exit;
+ }
+ else if (ret == FLB_OK) {
+ remove_from_queue(upload_contents);
+ ctx->retry_time = 0;
+ ctx->upload_queue_success = FLB_TRUE;
+ }
+ else {
+ s3_store_file_lock(upload_contents->upload_file);
+ ctx->upload_queue_success = FLB_FALSE;
+
+ /* If retry limit was reached, discard file and remove file from queue */
+ upload_contents->retry_counter++;
+ if (upload_contents->retry_counter >= MAX_UPLOAD_ERRORS) {
+ flb_plg_warn(ctx->ins, "Chunk file failed to send %d times, will not "
+ "retry", upload_contents->retry_counter);
+ s3_store_file_inactive(ctx, upload_contents->upload_file);
+ multipart_upload_destroy(upload_contents->m_upload_file);
+ remove_from_queue(upload_contents);
+ continue;
+ }
+
+ /* Retry in N seconds */
+ upload_contents->upload_time = now + 2 * upload_contents->retry_counter;
+ ctx->retry_time += 2 * upload_contents->retry_counter;
+ flb_plg_debug(ctx->ins, "Failed to upload file in upload_queue. Will not "
+ "retry for %d seconds", 2 * upload_contents->retry_counter);
+ break;
+ }
+ }
+
+exit:
+ return;
+}
+
+static void cb_s3_upload(struct flb_config *config, void *data)
+{
+ struct flb_s3 *ctx = data;
+ struct s3_file *chunk = NULL;
+ struct multipart_upload *m_upload = NULL;
+ struct flb_fstore_file *fsf;
+ char *buffer = NULL;
+ size_t buffer_size = 0;
+ struct mk_list *tmp;
+ struct mk_list *head;
+ int complete;
+ int ret;
+ time_t now;
+
+ flb_plg_debug(ctx->ins, "Running upload timer callback (cb_s3_upload)..");
+
+ now = time(NULL);
+
+ /* Check all chunks and see if any have timed out */
+ mk_list_foreach_safe(head, tmp, &ctx->stream_active->files) {
+ fsf = mk_list_entry(head, struct flb_fstore_file, _head);
+ chunk = fsf->data;
+
+ if (now < (chunk->create_time + ctx->upload_timeout + ctx->retry_time)) {
+ continue; /* Only send chunks which have timed out */
+ }
+
+ /* Locked chunks are being processed, skip */
+ if (chunk->locked == FLB_TRUE) {
+ continue;
+ }
+
+ m_upload = get_upload(ctx, (const char *) fsf->meta_buf, fsf->meta_size);
+
+ ret = construct_request_buffer(ctx, NULL, chunk, &buffer, &buffer_size);
+ if (ret < 0) {
+ flb_plg_error(ctx->ins, "Could not construct request buffer for %s",
+ chunk->file_path);
+ continue;
+ }
+
+ /* FYI: if construct_request_buffer() succeedeed, the s3_file is locked */
+ ret = upload_data(ctx, chunk, m_upload, buffer, buffer_size,
+ (const char *) fsf->meta_buf, fsf->meta_size);
+ flb_free(buffer);
+ if (ret != FLB_OK) {
+ flb_plg_error(ctx->ins, "Could not send chunk with tag %s",
+ (char *) fsf->meta_buf);
+ }
+ }
+
+ /* Check all uploads and see if any need completion */
+ mk_list_foreach_safe(head, tmp, &ctx->uploads) {
+ m_upload = mk_list_entry(head, struct multipart_upload, _head);
+ complete = FLB_FALSE;
+
+ if (m_upload->complete_errors >= MAX_UPLOAD_ERRORS) {
+ flb_plg_error(ctx->ins,
+ "Upload for %s has reached max completion errors, "
+ "plugin will give up", m_upload->s3_key);
+ mk_list_del(&m_upload->_head);
+ continue;
+ }
+
+ if (m_upload->upload_state == MULTIPART_UPLOAD_STATE_NOT_CREATED) {
+ continue;
+ }
+
+ if (m_upload->upload_state == MULTIPART_UPLOAD_STATE_COMPLETE_IN_PROGRESS) {
+ complete = FLB_TRUE;
+ }
+ if (time(NULL) > (m_upload->init_time + ctx->upload_timeout + ctx->retry_time)) {
+ flb_plg_info(ctx->ins, "Completing upload for %s because upload_timeout"
+ " has passed", m_upload->s3_key);
+ complete = FLB_TRUE;
+ }
+ if (complete == FLB_TRUE) {
+ m_upload->upload_state = MULTIPART_UPLOAD_STATE_COMPLETE_IN_PROGRESS;
+ mk_list_del(&m_upload->_head);
+ ret = complete_multipart_upload(ctx, m_upload);
+ if (ret == 0) {
+ multipart_upload_destroy(m_upload);
+ }
+ else {
+ mk_list_add(&m_upload->_head, &ctx->uploads);
+ /* data was persisted, this can be retried */
+ m_upload->complete_errors += 1;
+ flb_plg_error(ctx->ins, "Could not complete upload %s, will retry..",
+ m_upload->s3_key);
+ }
+ }
+ }
+
+}
+
+static flb_sds_t flb_pack_msgpack_extract_log_key(void *out_context, const char *data,
+ uint64_t bytes)
+{
+ int i;
+ int records = 0;
+ int map_size;
+ int check = FLB_FALSE;
+ int found = FLB_FALSE;
+ int log_key_missing = 0;
+ int ret;
+ int alloc_error = 0;
+ struct flb_s3 *ctx = out_context;
+ char *val_buf;
+ char *key_str = NULL;
+ size_t key_str_size = 0;
+ size_t msgpack_size = bytes + bytes / 4;
+ size_t val_offset = 0;
+ flb_sds_t out_buf;
+ msgpack_object map;
+ msgpack_object key;
+ msgpack_object val;
+ struct flb_log_event_decoder log_decoder;
+ struct flb_log_event log_event;
+
+ /* Iterate the original buffer and perform adjustments */
+ records = flb_mp_count(data, bytes);
+ if (records <= 0) {
+ return NULL;
+ }
+
+ /* Allocate buffer to store log_key contents */
+ val_buf = flb_calloc(1, msgpack_size);
+ if (val_buf == NULL) {
+ flb_plg_error(ctx->ins, "Could not allocate enough "
+ "memory to read record");
+ flb_errno();
+ return NULL;
+ }
+
+ ret = flb_log_event_decoder_init(&log_decoder, (char *) data, bytes);
+
+ if (ret != FLB_EVENT_DECODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event decoder initialization error : %d", ret);
+
+ flb_free(val_buf);
+
+ return NULL;
+ }
+
+
+ while (!alloc_error &&
+ (ret = flb_log_event_decoder_next(
+ &log_decoder,
+ &log_event)) == FLB_EVENT_DECODER_SUCCESS) {
+
+ /* Get the record/map */
+ map = *log_event.body;
+
+ if (map.type != MSGPACK_OBJECT_MAP) {
+ continue;
+ }
+
+ map_size = map.via.map.size;
+
+ /* Reset variables for found log_key and correct type */
+ found = FLB_FALSE;
+ check = FLB_FALSE;
+
+ /* Extract log_key from record and append to output buffer */
+ for (i = 0; i < map_size; i++) {
+ key = map.via.map.ptr[i].key;
+ val = map.via.map.ptr[i].val;
+
+ if (key.type == MSGPACK_OBJECT_BIN) {
+ key_str = (char *) key.via.bin.ptr;
+ key_str_size = key.via.bin.size;
+ check = FLB_TRUE;
+ }
+ if (key.type == MSGPACK_OBJECT_STR) {
+ key_str = (char *) key.via.str.ptr;
+ key_str_size = key.via.str.size;
+ check = FLB_TRUE;
+ }
+
+ if (check == FLB_TRUE) {
+ if (strncmp(ctx->log_key, key_str, key_str_size) == 0) {
+ found = FLB_TRUE;
+
+ /*
+ * Copy contents of value into buffer. Necessary to copy
+ * strings because flb_msgpack_to_json does not handle nested
+ * JSON gracefully and double escapes them.
+ */
+ if (val.type == MSGPACK_OBJECT_BIN) {
+ memcpy(val_buf + val_offset, val.via.bin.ptr, val.via.bin.size);
+ val_offset += val.via.bin.size;
+ val_buf[val_offset] = '\n';
+ val_offset++;
+ }
+ else if (val.type == MSGPACK_OBJECT_STR) {
+ memcpy(val_buf + val_offset, val.via.str.ptr, val.via.str.size);
+ val_offset += val.via.str.size;
+ val_buf[val_offset] = '\n';
+ val_offset++;
+ }
+ else {
+ ret = flb_msgpack_to_json(val_buf + val_offset,
+ msgpack_size - val_offset, &val);
+ if (ret < 0) {
+ break;
+ }
+ val_offset += ret;
+ val_buf[val_offset] = '\n';
+ val_offset++;
+ }
+ /* Exit early once log_key has been found for current record */
+ break;
+ }
+ }
+ }
+
+ /* If log_key was not found in the current record, mark log key as missing */
+ if (found == FLB_FALSE) {
+ log_key_missing++;
+ }
+ }
+
+ /* Throw error once per chunk if at least one log key was not found */
+ if (log_key_missing > 0) {
+ flb_plg_error(ctx->ins, "Could not find log_key '%s' in %d records",
+ ctx->log_key, log_key_missing);
+ }
+
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ /* If nothing was read, destroy buffer */
+ if (val_offset == 0) {
+ flb_free(val_buf);
+ return NULL;
+ }
+ val_buf[val_offset] = '\0';
+
+ /* Create output buffer to store contents */
+ out_buf = flb_sds_create(val_buf);
+ if (out_buf == NULL) {
+ flb_plg_error(ctx->ins, "Error creating buffer to store log_key contents.");
+ flb_errno();
+ }
+ flb_free(val_buf);
+
+ return out_buf;
+}
+
+static void unit_test_flush(void *out_context, struct s3_file *upload_file,
+ const char *tag, int tag_len, flb_sds_t chunk,
+ int chunk_size, struct multipart_upload *m_upload_file,
+ time_t file_first_log_time)
+{
+ int ret;
+ char *buffer;
+ size_t buffer_size;
+ struct flb_s3 *ctx = out_context;
+
+ s3_store_buffer_put(ctx, upload_file, tag, tag_len,
+ chunk, (size_t) chunk_size, file_first_log_time);
+ ret = construct_request_buffer(ctx, chunk, upload_file, &buffer, &buffer_size);
+ if (ret < 0) {
+ flb_plg_error(ctx->ins, "Could not construct request buffer for %s",
+ upload_file->file_path);
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ ret = upload_data(ctx, upload_file, m_upload_file, buffer, buffer_size, tag, tag_len);
+ flb_free(buffer);
+
+ FLB_OUTPUT_RETURN(ret);
+}
+
+static void flush_init(void *out_context)
+{
+ int ret;
+ struct flb_s3 *ctx = out_context;
+ struct flb_sched *sched;
+
+ /* clean up any old buffers found on startup */
+ if (ctx->has_old_buffers == FLB_TRUE) {
+ flb_plg_info(ctx->ins,
+ "Sending locally buffered data from previous "
+ "executions to S3; buffer=%s",
+ ctx->fs->root_path);
+ ctx->has_old_buffers = FLB_FALSE;
+ ret = put_all_chunks(ctx);
+ if (ret < 0) {
+ ctx->has_old_buffers = FLB_TRUE;
+ flb_plg_error(ctx->ins,
+ "Failed to send locally buffered data left over "
+ "from previous executions; will retry. Buffer=%s",
+ ctx->fs->root_path);
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+ }
+
+ /*
+ * create a timer that will run periodically and check if uploads
+ * are ready for completion
+ * this is created once on the first flush
+ */
+ if (ctx->timer_created == FLB_FALSE) {
+ flb_plg_debug(ctx->ins,
+ "Creating upload timer with frequency %ds",
+ ctx->timer_ms / 1000);
+
+ sched = flb_sched_ctx_get();
+
+ if (ctx->preserve_data_ordering) {
+ ret = flb_sched_timer_cb_create(sched, FLB_SCHED_TIMER_CB_PERM,
+ ctx->timer_ms, s3_upload_queue, ctx, NULL);
+ }
+ else {
+ ret = flb_sched_timer_cb_create(sched, FLB_SCHED_TIMER_CB_PERM,
+ ctx->timer_ms, cb_s3_upload, ctx, NULL);
+ }
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "Failed to create upload timer");
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+ ctx->timer_created = FLB_TRUE;
+ }
+}
+
+static void cb_s3_flush(struct flb_event_chunk *event_chunk,
+ struct flb_output_flush *out_flush,
+ struct flb_input_instance *i_ins,
+ void *out_context,
+ struct flb_config *config)
+{
+ int ret;
+ int chunk_size;
+ int upload_timeout_check = FLB_FALSE;
+ int total_file_size_check = FLB_FALSE;
+ flb_sds_t chunk = NULL;
+ struct s3_file *upload_file = NULL;
+ struct flb_s3 *ctx = out_context;
+ struct multipart_upload *m_upload_file = NULL;
+ time_t file_first_log_time = 0;
+ struct flb_log_event_decoder log_decoder;
+ struct flb_log_event log_event;
+
+ /* Cleanup old buffers and initialize upload timer */
+ flush_init(ctx);
+
+ /* Process chunk */
+ if (ctx->log_key) {
+ chunk = flb_pack_msgpack_extract_log_key(ctx,
+ event_chunk->data,
+ event_chunk->size);
+ }
+ else {
+ chunk = flb_pack_msgpack_to_json_format(event_chunk->data,
+ event_chunk->size,
+ FLB_PACK_JSON_FORMAT_LINES,
+ ctx->json_date_format,
+ ctx->date_key);
+ }
+ if (chunk == NULL) {
+ flb_plg_error(ctx->ins, "Could not marshal msgpack to output string");
+ FLB_OUTPUT_RETURN(FLB_ERROR);
+ }
+ chunk_size = flb_sds_len(chunk);
+
+ /* Get a file candidate matching the given 'tag' */
+ upload_file = s3_store_file_get(ctx,
+ event_chunk->tag,
+ flb_sds_len(event_chunk->tag));
+
+ if (upload_file == NULL) {
+ ret = flb_log_event_decoder_init(&log_decoder,
+ (char *) event_chunk->data,
+ event_chunk->size);
+
+ if (ret != FLB_EVENT_DECODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event decoder initialization error : %d", ret);
+
+ flb_sds_destroy(chunk);
+
+ FLB_OUTPUT_RETURN(FLB_ERROR);
+ }
+
+ while ((ret = flb_log_event_decoder_next(
+ &log_decoder,
+ &log_event)) == FLB_EVENT_DECODER_SUCCESS) {
+ if (log_event.timestamp.tm.tv_sec != 0) {
+ file_first_log_time = log_event.timestamp.tm.tv_sec;
+ break;
+ }
+ }
+
+ flb_log_event_decoder_destroy(&log_decoder);
+ }
+ else {
+ /* Get file_first_log_time from upload_file */
+ file_first_log_time = upload_file->first_log_time;
+ }
+
+ if (file_first_log_time == 0) {
+ file_first_log_time = time(NULL);
+ }
+
+ /* Specific to unit tests, will not get called normally */
+ if (s3_plugin_under_test() == FLB_TRUE) {
+ unit_test_flush(ctx, upload_file,
+ event_chunk->tag, flb_sds_len(event_chunk->tag),
+ chunk, chunk_size,
+ m_upload_file, file_first_log_time);
+ }
+
+ /* Discard upload_file if it has failed to upload MAX_UPLOAD_ERRORS times */
+ if (upload_file != NULL && upload_file->failures >= MAX_UPLOAD_ERRORS) {
+ flb_plg_warn(ctx->ins, "File with tag %s failed to send %d times, will not "
+ "retry", event_chunk->tag, MAX_UPLOAD_ERRORS);
+ s3_store_file_inactive(ctx, upload_file);
+ upload_file = NULL;
+ }
+
+ /* If upload_timeout has elapsed, upload file */
+ if (upload_file != NULL && time(NULL) >
+ (upload_file->create_time + ctx->upload_timeout)) {
+ upload_timeout_check = FLB_TRUE;
+ flb_plg_info(ctx->ins, "upload_timeout reached for %s",
+ event_chunk->tag);
+ }
+
+ m_upload_file = get_upload(ctx,
+ event_chunk->tag, flb_sds_len(event_chunk->tag));
+
+ if (m_upload_file != NULL && time(NULL) >
+ (m_upload_file->init_time + ctx->upload_timeout)) {
+ upload_timeout_check = FLB_TRUE;
+ flb_plg_info(ctx->ins, "upload_timeout reached for %s", event_chunk->tag);
+ }
+
+ /* If total_file_size has been reached, upload file */
+ if ((upload_file && upload_file->size + chunk_size > ctx->upload_chunk_size) ||
+ (m_upload_file && m_upload_file->bytes + chunk_size > ctx->file_size)) {
+ total_file_size_check = FLB_TRUE;
+ }
+
+ /* File is ready for upload, upload_file != NULL prevents from segfaulting. */
+ if ((upload_file != NULL) && (upload_timeout_check == FLB_TRUE || total_file_size_check == FLB_TRUE)) {
+ if (ctx->preserve_data_ordering == FLB_TRUE) {
+ /* Buffer last chunk in file and lock file to prevent further changes */
+ ret = buffer_chunk(ctx, upload_file, chunk, chunk_size,
+ event_chunk->tag, flb_sds_len(event_chunk->tag),
+ file_first_log_time);
+
+ if (ret < 0) {
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+ s3_store_file_lock(upload_file);
+
+ /* Add chunk file to upload queue */
+ ret = add_to_queue(ctx, upload_file, m_upload_file,
+ event_chunk->tag, flb_sds_len(event_chunk->tag));
+ if (ret < 0) {
+ FLB_OUTPUT_RETURN(FLB_ERROR);
+ }
+
+ /* Go through upload queue and return error if something went wrong */
+ s3_upload_queue(config, ctx);
+ if (ctx->upload_queue_success == FLB_FALSE) {
+ ctx->upload_queue_success = FLB_TRUE;
+ FLB_OUTPUT_RETURN(FLB_ERROR);
+ }
+ FLB_OUTPUT_RETURN(FLB_OK);
+ }
+ else {
+ /* Send upload directly without upload queue */
+ ret = send_upload_request(ctx, chunk, upload_file, m_upload_file,
+ event_chunk->tag,
+ flb_sds_len(event_chunk->tag));
+ if (ret < 0) {
+ FLB_OUTPUT_RETURN(FLB_ERROR);
+ }
+ FLB_OUTPUT_RETURN(ret);
+ }
+ }
+
+ /* Buffer current chunk in filesystem and wait for next chunk from engine */
+ ret = buffer_chunk(ctx, upload_file, chunk, chunk_size,
+ event_chunk->tag, flb_sds_len(event_chunk->tag),
+ file_first_log_time);
+
+ if (ret < 0) {
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+ FLB_OUTPUT_RETURN(FLB_OK);
+}
+
+static int cb_s3_exit(void *data, struct flb_config *config)
+{
+ int ret;
+ struct flb_s3 *ctx = data;
+ struct multipart_upload *m_upload = NULL;
+ struct mk_list *tmp;
+ struct mk_list *head;
+
+ if (!ctx) {
+ return 0;
+ }
+
+ if (s3_store_has_data(ctx) == FLB_TRUE) {
+ flb_plg_info(ctx->ins, "Sending all locally buffered data to S3");
+ ret = put_all_chunks(ctx);
+ if (ret < 0) {
+ flb_plg_error(ctx->ins, "Could not send all chunks on exit");
+ }
+ }
+
+ if (s3_store_has_uploads(ctx) == FLB_TRUE) {
+ mk_list_foreach_safe(head, tmp, &ctx->uploads) {
+ m_upload = mk_list_entry(head, struct multipart_upload, _head);
+
+ if (m_upload->upload_state == MULTIPART_UPLOAD_STATE_NOT_CREATED) {
+ continue;
+ }
+
+ if (m_upload->bytes > 0) {
+ m_upload->upload_state = MULTIPART_UPLOAD_STATE_COMPLETE_IN_PROGRESS;
+ mk_list_del(&m_upload->_head);
+ ret = complete_multipart_upload(ctx, m_upload);
+ if (ret == 0) {
+ multipart_upload_destroy(m_upload);
+ }
+ else {
+ mk_list_add(&m_upload->_head, &ctx->uploads);
+ flb_plg_error(ctx->ins, "Could not complete upload %s",
+ m_upload->s3_key);
+ }
+ }
+ }
+ }
+
+ s3_store_exit(ctx);
+ s3_context_destroy(ctx);
+
+ return 0;
+}
+
+/* Configuration properties map */
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_STR, "json_date_format", NULL,
+ 0, FLB_FALSE, 0,
+ FBL_PACK_JSON_DATE_FORMAT_DESCRIPTION
+ },
+ {
+ FLB_CONFIG_MAP_STR, "json_date_key", "date",
+ 0, FLB_TRUE, offsetof(struct flb_s3, json_date_key),
+ "Specifies the name of the date field in output."
+ },
+ {
+ FLB_CONFIG_MAP_SIZE, "total_file_size", "100000000",
+ 0, FLB_TRUE, offsetof(struct flb_s3, file_size),
+ "Specifies the size of files in S3. Maximum size is 50GB, minimum is 1MB"
+ },
+ {
+ FLB_CONFIG_MAP_SIZE, "upload_chunk_size", "5242880",
+ 0, FLB_TRUE, offsetof(struct flb_s3, upload_chunk_size),
+ "This plugin uses the S3 Multipart Upload API to stream data to S3, "
+ "ensuring your data gets-off-the-box as quickly as possible. "
+ "This parameter configures the size of each “part” in the upload. "
+ "The total_file_size option configures the size of the file you will see "
+ "in S3; this option determines the size of chunks uploaded until that "
+ "size is reached. These chunks are temporarily stored in chunk_buffer_path "
+ "until their size reaches upload_chunk_size, which point the chunk is "
+ "uploaded to S3. Default: 5M, Max: 50M, Min: 5M."
+ },
+
+ {
+ FLB_CONFIG_MAP_TIME, "upload_timeout", "10m",
+ 0, FLB_TRUE, offsetof(struct flb_s3, upload_timeout),
+ "Optionally specify a timeout for uploads. "
+ "Whenever this amount of time has elapsed, Fluent Bit will complete an "
+ "upload and create a new file in S3. For example, set this value to 60m "
+ "and you will get a new file in S3 every hour. Default is 10m."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "bucket", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_s3, bucket),
+ "S3 bucket name."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "region", "us-east-1",
+ 0, FLB_TRUE, offsetof(struct flb_s3, region),
+ "AWS region."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "role_arn", NULL,
+ 0, FLB_FALSE, 0,
+ "ARN of an IAM role to assume (ex. for cross account access)."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "endpoint", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_s3, endpoint),
+ "Custom endpoint for the S3 API."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "sts_endpoint", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_s3, sts_endpoint),
+ "Custom endpoint for the STS API."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "canned_acl", NULL,
+ 0, FLB_FALSE, 0,
+ "Predefined Canned ACL policy for S3 objects."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "compression", NULL,
+ 0, FLB_FALSE, 0,
+ "Compression type for S3 objects. 'gzip' and 'arrow' are the supported values. "
+ "'arrow' is only an available if Apache Arrow was enabled at compile time. "
+ "Defaults to no compression. "
+ "If 'gzip' is selected, the Content-Encoding HTTP Header will be set to 'gzip'."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "content_type", NULL,
+ 0, FLB_FALSE, 0,
+ "A standard MIME type for the S3 object; this will be set "
+ "as the Content-Type HTTP header."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "store_dir", "/tmp/fluent-bit/s3",
+ 0, FLB_TRUE, offsetof(struct flb_s3, store_dir),
+ "Directory to locally buffer data before sending. Plugin uses the S3 Multipart "
+ "upload API to send data in chunks of 5 MB at a time- only a small amount of"
+ " data will be locally buffered at any given point in time."
+ },
+
+ {
+ FLB_CONFIG_MAP_SIZE, "store_dir_limit_size", (char *) NULL,
+ 0, FLB_TRUE, offsetof(struct flb_s3, store_dir_limit_size),
+ "S3 plugin has its own buffering system with files in the `store_dir`. "
+ "Use the `store_dir_limit_size` to limit the amount of data S3 buffers in "
+ "the `store_dir` to limit disk usage. If the limit is reached, "
+ "data will be discarded. Default is 0 which means unlimited."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "s3_key_format", "/fluent-bit-logs/$TAG/%Y/%m/%d/%H/%M/%S",
+ 0, FLB_TRUE, offsetof(struct flb_s3, s3_key_format),
+ "Format string for keys in S3. This option supports strftime time formatters "
+ "and a syntax for selecting parts of the Fluent log tag using a syntax inspired "
+ "by the rewrite_tag filter. Add $TAG in the format string to insert the full "
+ "log tag; add $TAG[0] to insert the first part of the tag in the s3 key. "
+ "The tag is split into “parts” using the characters specified with the "
+ "s3_key_format_tag_delimiters option. Add $INDEX to enable sequential indexing "
+ "for file names. Adding $INDEX will prevent random string being added to end of key"
+ "when $UUID is not provided. See the in depth examples and tutorial in the "
+ "documentation."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "s3_key_format_tag_delimiters", ".",
+ 0, FLB_TRUE, offsetof(struct flb_s3, tag_delimiters),
+ "A series of characters which will be used to split the tag into “parts” for "
+ "use with the s3_key_format option. See the in depth examples and tutorial in "
+ "the documentation."
+ },
+
+ {
+ FLB_CONFIG_MAP_BOOL, "auto_retry_requests", "true",
+ 0, FLB_TRUE, offsetof(struct flb_s3, retry_requests),
+ "Immediately retry failed requests to AWS services once. This option "
+ "does not affect the normal Fluent Bit retry mechanism with backoff. "
+ "Instead, it enables an immediate retry with no delay for networking "
+ "errors, which may help improve throughput when there are transient/random "
+ "networking issues."
+ },
+
+ {
+ FLB_CONFIG_MAP_BOOL, "use_put_object", "false",
+ 0, FLB_TRUE, offsetof(struct flb_s3, use_put_object),
+ "Use the S3 PutObject API, instead of the multipart upload API"
+ },
+
+ {
+ FLB_CONFIG_MAP_BOOL, "send_content_md5", "false",
+ 0, FLB_TRUE, offsetof(struct flb_s3, send_content_md5),
+ "Send the Content-MD5 header with object uploads, as is required when Object Lock is enabled"
+ },
+
+ {
+ FLB_CONFIG_MAP_BOOL, "preserve_data_ordering", "true",
+ 0, FLB_TRUE, offsetof(struct flb_s3, preserve_data_ordering),
+ "Normally, when an upload request fails, there is a high chance for the last "
+ "received chunk to be swapped with a later chunk, resulting in data shuffling. "
+ "This feature prevents this shuffling by using a queue logic for uploads."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "log_key", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_s3, log_key),
+ "By default, the whole log record will be sent to S3. "
+ "If you specify a key name with this option, then only the value of "
+ "that key will be sent to S3."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "external_id", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_s3, external_id),
+ "Specify an external ID for the STS API, can be used with the role_arn parameter if your role "
+ "requires an external ID."
+ },
+
+ {
+ FLB_CONFIG_MAP_BOOL, "static_file_path", "false",
+ 0, FLB_TRUE, offsetof(struct flb_s3, static_file_path),
+ "Disables behavior where UUID string is automatically appended to end of S3 key name when "
+ "$UUID is not provided in s3_key_format. $UUID, time formatters, $TAG, and other dynamic "
+ "key formatters all work as expected while this feature is set to true."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "storage_class", NULL,
+ 0, FLB_FALSE, 0,
+ "Specify the storage class for S3 objects. If this option is not specified, objects "
+ "will be stored with the default 'STANDARD' storage class."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "profile", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_s3, profile),
+ "AWS Profile name. AWS Profiles can be configured with AWS CLI and are usually stored in "
+ "$HOME/.aws/ directory."
+ },
+
+ /* EOF */
+ {0}
+};
+
+/* Plugin registration */
+struct flb_output_plugin out_s3_plugin = {
+ .name = "s3",
+ .description = "Send to S3",
+ .cb_init = cb_s3_init,
+ .cb_flush = cb_s3_flush,
+ .cb_exit = cb_s3_exit,
+ .workers = 1,
+ .flags = FLB_OUTPUT_NET | FLB_IO_TLS,
+ .config_map = config_map
+};
diff --git a/src/fluent-bit/plugins/out_s3/s3.h b/src/fluent-bit/plugins/out_s3/s3.h
new file mode 100644
index 000000000..e145b1ad6
--- /dev/null
+++ b/src/fluent-bit/plugins/out_s3/s3.h
@@ -0,0 +1,203 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_OUT_S3
+#define FLB_OUT_S3
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_sds.h>
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_aws_credentials.h>
+#include <fluent-bit/flb_aws_util.h>
+
+/* Upload data to S3 in 5MB chunks */
+#define MIN_CHUNKED_UPLOAD_SIZE 5242880
+#define MAX_CHUNKED_UPLOAD_SIZE 50000000
+#define MAX_CHUNKED_UPLOAD_COMPRESS_SIZE 5000000000
+
+#define UPLOAD_TIMER_MAX_WAIT 60000
+#define UPLOAD_TIMER_MIN_WAIT 6000
+
+#define MULTIPART_UPLOAD_STATE_NOT_CREATED 0
+#define MULTIPART_UPLOAD_STATE_CREATED 1
+#define MULTIPART_UPLOAD_STATE_COMPLETE_IN_PROGRESS 2
+
+#define DEFAULT_FILE_SIZE 100000000
+#define MAX_FILE_SIZE 50000000000
+#define MAX_FILE_SIZE_STR "50,000,000,000"
+
+/* Allowed max file size 1 GB for publishing to S3 */
+#define MAX_FILE_SIZE_PUT_OBJECT 1000000000
+
+#define DEFAULT_UPLOAD_TIMEOUT 3600
+
+/*
+ * If we see repeated errors on an upload/chunk, we will discard it
+ * This saves us from scenarios where something goes wrong and an upload can
+ * not proceed (may be some other process completed it or deleted the upload)
+ * instead of erroring out forever, we eventually discard the upload.
+ *
+ * The same is done for chunks, just to be safe, even though realistically
+ * I can't think of a reason why a chunk could become unsendable.
+ */
+#define MAX_UPLOAD_ERRORS 5
+
+struct upload_queue {
+ struct s3_file *upload_file;
+ struct multipart_upload *m_upload_file;
+ flb_sds_t tag;
+ int tag_len;
+
+ int retry_counter;
+ time_t upload_time;
+
+ struct mk_list _head;
+};
+
+struct multipart_upload {
+ flb_sds_t s3_key;
+ flb_sds_t tag;
+ flb_sds_t upload_id;
+ int upload_state;
+ time_t init_time;
+
+ /*
+ * maximum of 10,000 parts in an upload, for each we need to store mapping
+ * of Part Number to ETag
+ */
+ flb_sds_t etags[10000];
+ int part_number;
+
+ /*
+ * we use async http, so we need to check that all part requests have
+ * completed before we complete the upload
+ */
+ int parts_uploaded;
+
+ /* ongoing tracker of how much data has been sent for this upload */
+ size_t bytes;
+
+ struct mk_list _head;
+
+ /* see note for MAX_UPLOAD_ERRORS */
+ int upload_errors;
+ int complete_errors;
+};
+
+struct flb_s3 {
+ char *bucket;
+ char *region;
+ char *s3_key_format;
+ char *tag_delimiters;
+ char *endpoint;
+ char *sts_endpoint;
+ char *canned_acl;
+ char *content_type;
+ char *storage_class;
+ char *log_key;
+ char *external_id;
+ char *profile;
+ int free_endpoint;
+ int retry_requests;
+ int use_put_object;
+ int send_content_md5;
+ int static_file_path;
+ int compression;
+ int port;
+ int insecure;
+ size_t store_dir_limit_size;
+
+ /* track the total amount of buffered data */
+ size_t current_buffer_size;
+
+ struct flb_aws_provider *provider;
+ struct flb_aws_provider *base_provider;
+ /* tls instances can't be re-used; aws provider requires a separate one */
+ struct flb_tls *provider_tls;
+ /* one for the standard chain provider, one for sts assume role */
+ struct flb_tls *sts_provider_tls;
+ struct flb_tls *client_tls;
+
+ struct flb_aws_client *s3_client;
+ int json_date_format;
+ flb_sds_t json_date_key;
+ flb_sds_t date_key;
+
+ flb_sds_t buffer_dir;
+
+ char *store_dir;
+ struct flb_fstore *fs;
+ struct flb_fstore_stream *stream_active; /* default active stream */
+ struct flb_fstore_stream *stream_upload; /* multipart upload stream */
+ struct flb_fstore_stream *stream_metadata; /* s3 metadata stream */
+
+ /*
+ * used to track that unset buffers were found on startup that have not
+ * been sent
+ */
+ int has_old_buffers;
+ /* old multipart uploads read on start up */
+ int has_old_uploads;
+
+ struct mk_list uploads;
+
+ int preserve_data_ordering;
+ int upload_queue_success;
+ struct mk_list upload_queue;
+
+ size_t file_size;
+ size_t upload_chunk_size;
+ time_t upload_timeout;
+ time_t retry_time;
+
+ int timer_created;
+ int timer_ms;
+ int key_fmt_has_uuid;
+
+ uint64_t seq_index;
+ int key_fmt_has_seq_index;
+ flb_sds_t metadata_dir;
+ flb_sds_t seq_index_file;
+
+ struct flb_output_instance *ins;
+};
+
+int upload_part(struct flb_s3 *ctx, struct multipart_upload *m_upload,
+ char *body, size_t body_size);
+
+int create_multipart_upload(struct flb_s3 *ctx,
+ struct multipart_upload *m_upload);
+
+int complete_multipart_upload(struct flb_s3 *ctx,
+ struct multipart_upload *m_upload);
+
+void multipart_read_uploads_from_fs(struct flb_s3 *ctx);
+
+void multipart_upload_destroy(struct multipart_upload *m_upload);
+
+struct flb_http_client *mock_s3_call(char *error_env_var, char *api);
+int s3_plugin_under_test();
+
+int get_md5_base64(char *buf, size_t buf_size, char *md5_str, size_t md5_str_size);
+
+int create_headers(struct flb_s3 *ctx, char *body_md5,
+ struct flb_aws_header **headers, int *num_headers,
+ int multipart_upload);
+
+#endif
diff --git a/src/fluent-bit/plugins/out_s3/s3_multipart.c b/src/fluent-bit/plugins/out_s3/s3_multipart.c
new file mode 100644
index 000000000..1eb2a1061
--- /dev/null
+++ b/src/fluent-bit/plugins/out_s3/s3_multipart.c
@@ -0,0 +1,707 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_slist.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_config_map.h>
+#include <fluent-bit/flb_aws_util.h>
+#include <fluent-bit/flb_signv4.h>
+#include <fluent-bit/flb_fstore.h>
+#include <ctype.h>
+
+#include "s3.h"
+#include "s3_store.h"
+
+#define COMPLETE_MULTIPART_UPLOAD_BASE_LEN 100
+#define COMPLETE_MULTIPART_UPLOAD_PART_LEN 124
+
+flb_sds_t get_etag(char *response, size_t size);
+
+static inline int try_to_write(char *buf, int *off, size_t left,
+ const char *str, size_t str_len)
+{
+ if (str_len <= 0){
+ str_len = strlen(str);
+ }
+ if (left <= *off+str_len) {
+ return FLB_FALSE;
+ }
+ memcpy(buf+*off, str, str_len);
+ *off += str_len;
+ return FLB_TRUE;
+}
+
+
+/* the 'tag' or key in the upload_dir is s3_key + \n + upload_id */
+static flb_sds_t upload_key(struct multipart_upload *m_upload)
+{
+ flb_sds_t key;
+ flb_sds_t tmp;
+
+ key = flb_sds_create_size(64);
+
+ tmp = flb_sds_printf(&key, "%s\n%s", m_upload->s3_key, m_upload->upload_id);
+ if (!tmp) {
+ flb_errno();
+ flb_sds_destroy(key);
+ return NULL;
+ }
+ key = tmp;
+
+ return key;
+}
+
+/* the 'tag' or key in the upload_dir is s3_key + \n + upload_id */
+static int upload_data_from_key(struct multipart_upload *m_upload, char *key)
+{
+ flb_sds_t tmp_sds;
+ int len = 0;
+ int original_len;
+ char *tmp;
+
+ original_len = strlen(key);
+
+ tmp = strchr(key, '\n');
+ if (!tmp) {
+ return -1;
+ }
+
+ len = tmp - key;
+ tmp_sds = flb_sds_create_len(key, len);
+ if (!tmp_sds) {
+ flb_errno();
+ return -1;
+ }
+ m_upload->s3_key = tmp_sds;
+
+ tmp++;
+ original_len -= (len + 1);
+
+ tmp_sds = flb_sds_create_len(tmp, original_len);
+ if (!tmp_sds) {
+ flb_errno();
+ return -1;
+ }
+ m_upload->upload_id = tmp_sds;
+
+ return 0;
+}
+
+/* parse etags from file data */
+static void parse_etags(struct multipart_upload *m_upload, char *data)
+{
+ char *line = data;
+ char *start;
+ char *end;
+ flb_sds_t etag;
+ int part_num;
+ int len;
+
+ if (!data) {
+ return;
+ }
+
+ line = strtok(data, "\n");
+
+ if (!line) {
+ return;
+ }
+
+ do {
+ start = strstr(line, "part_number=");
+ if (!start) {
+ return;
+ }
+ start += 12;
+ end = strchr(start, '\t');
+ if (!end) {
+ flb_debug("[s3 restart parser] Did not find tab separator in line %s", start);
+ return;
+ }
+ *end = '\0';
+ part_num = atoi(start);
+ if (part_num <= 0) {
+ flb_debug("[s3 restart parser] Could not parse part_number from %s", start);
+ return;
+ }
+ m_upload->part_number = part_num;
+ *end = '\t';
+
+ start = strstr(line, "tag=");
+ if (!start) {
+ flb_debug("[s3 restart parser] Could not find 'etag=' %s", line);
+ return;
+ }
+
+ start += 4;
+ len = strlen(start);
+
+ if (len <= 0) {
+ flb_debug("[s3 restart parser] Could not find etag %s", line);
+ return;
+ }
+
+ etag = flb_sds_create_len(start, len);
+ if (!etag) {
+ flb_debug("[s3 restart parser] Could create etag");
+ return;
+ }
+ flb_debug("[s3 restart parser] found part number %d=%s", part_num, etag);
+ m_upload->etags[part_num - 1] = etag;
+
+ line = strtok(NULL, "\n");
+ } while (line != NULL);
+}
+
+static struct multipart_upload *upload_from_file(struct flb_s3 *ctx,
+ struct flb_fstore_file *fsf)
+{
+ struct multipart_upload *m_upload = NULL;
+ char *buffered_data = NULL;
+ size_t buffer_size = 0;
+ int ret;
+
+ ret = s3_store_file_upload_read(ctx, fsf, &buffered_data, &buffer_size);
+ if (ret < 0) {
+ flb_plg_error(ctx->ins, "Could not read locally buffered data %s",
+ fsf->name);
+ return NULL;
+ }
+
+ /* always make sure we have a fresh copy of metadata */
+ ret = s3_store_file_meta_get(ctx, fsf);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "Could not read file metadata: %s",
+ fsf->name);
+ return NULL;
+ }
+
+ m_upload = flb_calloc(1, sizeof(struct multipart_upload));
+ if (!m_upload) {
+ flb_errno();
+ flb_free(buffered_data);
+ return NULL;
+ }
+ m_upload->init_time = time(NULL);
+ m_upload->upload_state = MULTIPART_UPLOAD_STATE_COMPLETE_IN_PROGRESS;
+
+ ret = upload_data_from_key(m_upload, fsf->meta_buf);
+ if (ret < 0) {
+ flb_plg_error(ctx->ins, "Could not extract upload data from: %s",
+ fsf->name);
+ flb_free(buffered_data);
+ multipart_upload_destroy(m_upload);
+ return NULL;
+ }
+
+ parse_etags(m_upload, buffered_data);
+ flb_free(buffered_data);
+ if (m_upload->part_number == 0) {
+ flb_plg_error(ctx->ins, "Could not extract upload data from %s",
+ fsf->name);
+ multipart_upload_destroy(m_upload);
+ return NULL;
+ }
+
+ /* code expects it to be 1 more than the last part read */
+ m_upload->part_number++;
+
+ return m_upload;
+}
+
+void multipart_read_uploads_from_fs(struct flb_s3 *ctx)
+{
+ struct mk_list *tmp;
+ struct mk_list *head;
+ struct multipart_upload *m_upload = NULL;
+ struct flb_fstore_file *fsf;
+
+ mk_list_foreach_safe(head, tmp, &ctx->stream_upload->files) {
+ fsf = mk_list_entry(head, struct flb_fstore_file, _head);
+ m_upload = upload_from_file(ctx, fsf);
+ if (!m_upload) {
+ flb_plg_error(ctx->ins,
+ "Could not process multipart upload data in %s",
+ fsf->name);
+ continue;
+ }
+ mk_list_add(&m_upload->_head, &ctx->uploads);
+ flb_plg_info(ctx->ins,
+ "Successfully read existing upload from file system, s3_key=%s",
+ m_upload->s3_key);
+ }
+}
+
+/* store list of part number and etag */
+static flb_sds_t upload_data(flb_sds_t etag, int part_num)
+{
+ flb_sds_t data;
+ flb_sds_t tmp;
+
+ data = flb_sds_create_size(64);
+
+ tmp = flb_sds_printf(&data, "part_number=%d\tetag=%s\n", part_num, etag);
+ if (!tmp) {
+ flb_errno();
+ flb_sds_destroy(data);
+ return NULL;
+ }
+ data = tmp;
+
+ return data;
+}
+
+/* persists upload data to the file system */
+static int save_upload(struct flb_s3 *ctx, struct multipart_upload *m_upload,
+ flb_sds_t etag)
+{
+ int ret;
+ flb_sds_t key;
+ flb_sds_t data;
+ struct flb_fstore_file *fsf;
+
+ key = upload_key(m_upload);
+ if (!key) {
+ flb_plg_debug(ctx->ins, "Could not constuct upload key for buffer dir");
+ return -1;
+ }
+
+ data = upload_data(etag, m_upload->part_number);
+ if (!data) {
+ flb_plg_debug(ctx->ins, "Could not constuct upload key for buffer dir");
+ return -1;
+ }
+
+ fsf = s3_store_file_upload_get(ctx, key, flb_sds_len(key));
+
+ /* Write the key to the file */
+ ret = s3_store_file_upload_put(ctx, fsf, key, data);
+
+ flb_sds_destroy(key);
+ flb_sds_destroy(data);
+
+ return ret;
+}
+
+static int remove_upload_from_fs(struct flb_s3 *ctx, struct multipart_upload *m_upload)
+{
+ flb_sds_t key;
+ struct flb_fstore_file *fsf;
+
+ key = upload_key(m_upload);
+ if (!key) {
+ flb_plg_debug(ctx->ins, "Could not construct upload key");
+ return -1;
+ }
+
+ fsf = s3_store_file_upload_get(ctx, key, flb_sds_len(key));
+ if (fsf) {
+ s3_store_file_upload_delete(ctx, fsf);
+ }
+ flb_sds_destroy(key);
+ return 0;
+}
+
+/*
+ * https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html
+ */
+static int complete_multipart_upload_payload(struct flb_s3 *ctx,
+ struct multipart_upload *m_upload,
+ char **out_buf, size_t *out_size)
+{
+ char *buf;
+ int i;
+ int offset = 0;
+ flb_sds_t etag;
+ size_t size = COMPLETE_MULTIPART_UPLOAD_BASE_LEN;
+ char part_num[7];
+
+ size = size + (COMPLETE_MULTIPART_UPLOAD_PART_LEN * m_upload->part_number);
+
+ buf = flb_malloc(size + 1);
+ if (!buf) {
+ flb_errno();
+ return -1;
+ }
+
+ if (!try_to_write(buf, &offset, size,
+ "<CompleteMultipartUpload xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">", 73)) {
+ goto error;
+ }
+
+ for (i = 0; i < m_upload->part_number; i++) {
+ etag = m_upload->etags[i];
+ if (etag == NULL) {
+ continue;
+ }
+ if (!try_to_write(buf, &offset, size,
+ "<Part><ETag>", 12)) {
+ goto error;
+ }
+
+ if (!try_to_write(buf, &offset, size,
+ etag, 0)) {
+ goto error;
+ }
+
+ if (!try_to_write(buf, &offset, size,
+ "</ETag><PartNumber>", 19)) {
+ goto error;
+ }
+
+ if (!sprintf(part_num, "%d", i + 1)) {
+ goto error;
+ }
+
+ if (!try_to_write(buf, &offset, size,
+ part_num, 0)) {
+ goto error;
+ }
+
+ if (!try_to_write(buf, &offset, size,
+ "</PartNumber></Part>", 20)) {
+ goto error;
+ }
+ }
+
+ if (!try_to_write(buf, &offset, size,
+ "</CompleteMultipartUpload>", 26)) {
+ goto error;
+ }
+
+ buf[offset] = '\0';
+
+ *out_buf = buf;
+ *out_size = offset;
+ return 0;
+
+error:
+ flb_free(buf);
+ flb_plg_error(ctx->ins, "Failed to construct CompleteMultipartUpload "
+ "request body");
+ return -1;
+}
+
+int complete_multipart_upload(struct flb_s3 *ctx,
+ struct multipart_upload *m_upload)
+{
+ char *body;
+ size_t size;
+ flb_sds_t uri = NULL;
+ flb_sds_t tmp;
+ int ret;
+ struct flb_http_client *c = NULL;
+ struct flb_aws_client *s3_client;
+
+ if (!m_upload->upload_id) {
+ flb_plg_error(ctx->ins, "Cannot complete multipart upload for key %s: "
+ "upload ID is unset ", m_upload->s3_key);
+ return -1;
+ }
+
+ uri = flb_sds_create_size(flb_sds_len(m_upload->s3_key) + 11 +
+ flb_sds_len(m_upload->upload_id));
+ if (!uri) {
+ flb_errno();
+ return -1;
+ }
+
+ tmp = flb_sds_printf(&uri, "/%s%s?uploadId=%s", ctx->bucket,
+ m_upload->s3_key, m_upload->upload_id);
+ if (!tmp) {
+ flb_sds_destroy(uri);
+ return -1;
+ }
+ uri = tmp;
+
+ ret = complete_multipart_upload_payload(ctx, m_upload, &body, &size);
+ if (ret < 0) {
+ flb_sds_destroy(uri);
+ return -1;
+ }
+
+ s3_client = ctx->s3_client;
+ if (s3_plugin_under_test() == FLB_TRUE) {
+ c = mock_s3_call("TEST_COMPLETE_MULTIPART_UPLOAD_ERROR", "CompleteMultipartUpload");
+ }
+ else {
+ c = s3_client->client_vtable->request(s3_client, FLB_HTTP_POST,
+ uri, body, size,
+ NULL, 0);
+ }
+ flb_sds_destroy(uri);
+ flb_free(body);
+ if (c) {
+ flb_plg_debug(ctx->ins, "CompleteMultipartUpload http status=%d",
+ c->resp.status);
+ if (c->resp.status == 200) {
+ flb_plg_info(ctx->ins, "Successfully completed multipart upload "
+ "for %s, UploadId=%s", m_upload->s3_key,
+ m_upload->upload_id);
+ flb_http_client_destroy(c);
+ /* remove this upload from the file system */
+ remove_upload_from_fs(ctx, m_upload);
+ return 0;
+ }
+ flb_aws_print_xml_error(c->resp.payload, c->resp.payload_size,
+ "CompleteMultipartUpload", ctx->ins);
+ if (c->resp.payload != NULL) {
+ flb_plg_debug(ctx->ins, "Raw CompleteMultipartUpload response: %s",
+ c->resp.payload);
+ }
+ flb_http_client_destroy(c);
+ }
+
+ flb_plg_error(ctx->ins, "CompleteMultipartUpload request failed");
+ return -1;
+}
+
+
+int create_multipart_upload(struct flb_s3 *ctx,
+ struct multipart_upload *m_upload)
+{
+ flb_sds_t uri = NULL;
+ flb_sds_t tmp;
+ struct flb_http_client *c = NULL;
+ struct flb_aws_client *s3_client;
+ struct flb_aws_header *headers = NULL;
+ int num_headers = 0;
+ int ret;
+
+ uri = flb_sds_create_size(flb_sds_len(m_upload->s3_key) + 8);
+ if (!uri) {
+ flb_errno();
+ return -1;
+ }
+
+ tmp = flb_sds_printf(&uri, "/%s%s?uploads=", ctx->bucket, m_upload->s3_key);
+ if (!tmp) {
+ flb_sds_destroy(uri);
+ return -1;
+ }
+ uri = tmp;
+
+ s3_client = ctx->s3_client;
+ if (s3_plugin_under_test() == FLB_TRUE) {
+ c = mock_s3_call("TEST_CREATE_MULTIPART_UPLOAD_ERROR", "CreateMultipartUpload");
+ }
+ else {
+ ret = create_headers(ctx, NULL, &headers, &num_headers, FLB_TRUE);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "Failed to create headers");
+ flb_sds_destroy(uri);
+ return -1;
+ }
+ c = s3_client->client_vtable->request(s3_client, FLB_HTTP_POST,
+ uri, NULL, 0, headers, num_headers);
+ if (headers) {
+ flb_free(headers);
+ }
+ }
+ flb_sds_destroy(uri);
+ if (c) {
+ flb_plg_debug(ctx->ins, "CreateMultipartUpload http status=%d",
+ c->resp.status);
+ if (c->resp.status == 200) {
+ tmp = flb_aws_xml_get_val(c->resp.payload, c->resp.payload_size,
+ "<UploadId>", "</UploadId>");
+ if (!tmp) {
+ flb_plg_error(ctx->ins, "Could not find upload ID in "
+ "CreateMultipartUpload response");
+ flb_plg_debug(ctx->ins, "Raw CreateMultipartUpload response: %s",
+ c->resp.payload);
+ flb_http_client_destroy(c);
+ return -1;
+ }
+ m_upload->upload_id = tmp;
+ flb_plg_info(ctx->ins, "Successfully initiated multipart upload "
+ "for %s, UploadId=%s", m_upload->s3_key,
+ m_upload->upload_id);
+ flb_http_client_destroy(c);
+ return 0;
+ }
+ flb_aws_print_xml_error(c->resp.payload, c->resp.payload_size,
+ "CreateMultipartUpload", ctx->ins);
+ if (c->resp.payload != NULL) {
+ flb_plg_debug(ctx->ins, "Raw CreateMultipartUpload response: %s",
+ c->resp.payload);
+ }
+ flb_http_client_destroy(c);
+ }
+
+ flb_plg_error(ctx->ins, "CreateMultipartUpload request failed");
+ return -1;
+}
+
+/* gets the ETag value from response headers */
+flb_sds_t get_etag(char *response, size_t size)
+{
+ char *tmp;
+ int start;
+ int end;
+ int len;
+ int i = 0;
+ flb_sds_t etag;
+
+ if (response == NULL) {
+ return NULL;
+ }
+
+ tmp = strstr(response, "ETag:");
+ if (!tmp) {
+ return NULL;
+ }
+ i = tmp - response;
+
+ /* advance to end of ETag key */
+ i += 5;
+
+ /* advance across any whitespace and the opening quote */
+ while (i < size && (response[i] == '\"' || isspace(response[i]) != 0)) {
+ i++;
+ }
+ start = i;
+ /* advance until we hit whitespace or the end quote */
+ while (i < size && (response[i] != '\"' && isspace(response[i]) == 0)) {
+ i++;
+ }
+ end = i;
+ len = end - start;
+
+ etag = flb_sds_create_len(response + start, len);
+ if (!etag) {
+ flb_errno();
+ return NULL;
+ }
+
+ return etag;
+}
+
+int upload_part(struct flb_s3 *ctx, struct multipart_upload *m_upload,
+ char *body, size_t body_size)
+{
+ flb_sds_t uri = NULL;
+ flb_sds_t tmp;
+ int ret;
+ struct flb_http_client *c = NULL;
+ struct flb_aws_client *s3_client;
+ struct flb_aws_header *headers = NULL;
+ int num_headers = 0;
+ char body_md5[25];
+
+ uri = flb_sds_create_size(flb_sds_len(m_upload->s3_key) + 8);
+ if (!uri) {
+ flb_errno();
+ return -1;
+ }
+
+ tmp = flb_sds_printf(&uri, "/%s%s?partNumber=%d&uploadId=%s",
+ ctx->bucket, m_upload->s3_key, m_upload->part_number,
+ m_upload->upload_id);
+ if (!tmp) {
+ flb_errno();
+ flb_sds_destroy(uri);
+ return -1;
+ }
+ uri = tmp;
+
+ memset(body_md5, 0, sizeof(body_md5));
+ if (ctx->send_content_md5 == FLB_TRUE) {
+ ret = get_md5_base64(body, body_size, body_md5, sizeof(body_md5));
+ if (ret != 0) {
+ flb_plg_error(ctx->ins, "Failed to create Content-MD5 header");
+ flb_sds_destroy(uri);
+ return -1;
+ }
+
+ num_headers = 1;
+ headers = flb_malloc(sizeof(struct flb_aws_header) * num_headers);
+ if (headers == NULL) {
+ flb_errno();
+ flb_sds_destroy(uri);
+ return -1;
+ }
+
+ headers[0].key = "Content-MD5";
+ headers[0].key_len = 11;
+ headers[0].val = body_md5;
+ headers[0].val_len = strlen(body_md5);
+ }
+
+ s3_client = ctx->s3_client;
+ if (s3_plugin_under_test() == FLB_TRUE) {
+ c = mock_s3_call("TEST_UPLOAD_PART_ERROR", "UploadPart");
+ }
+ else {
+ c = s3_client->client_vtable->request(s3_client, FLB_HTTP_PUT,
+ uri, body, body_size,
+ headers, num_headers);
+ }
+ flb_free(headers);
+ flb_sds_destroy(uri);
+ if (c) {
+ flb_plg_info(ctx->ins, "UploadPart http status=%d",
+ c->resp.status);
+ if (c->resp.status == 200) {
+ tmp = get_etag(c->resp.data, c->resp.data_size);
+ if (!tmp) {
+ flb_plg_error(ctx->ins, "Could not find ETag in "
+ "UploadPart response");
+ flb_plg_debug(ctx->ins, "Raw UploadPart response: %s",
+ c->resp.payload);
+ flb_http_client_destroy(c);
+ return -1;
+ }
+ m_upload->etags[m_upload->part_number - 1] = tmp;
+ flb_plg_info(ctx->ins, "Successfully uploaded part #%d "
+ "for %s, UploadId=%s, ETag=%s", m_upload->part_number,
+ m_upload->s3_key, m_upload->upload_id, tmp);
+ flb_http_client_destroy(c);
+ /* track how many bytes are have gone toward this upload */
+ m_upload->bytes += body_size;
+
+ /* finally, attempt to persist the data for this upload */
+ ret = save_upload(ctx, m_upload, tmp);
+ if (ret == 0) {
+ flb_plg_debug(ctx->ins, "Successfully persisted upload data, UploadId=%s",
+ m_upload->upload_id);
+ }
+ else {
+ flb_plg_warn(ctx->ins, "Was not able to persisted upload data to disk; "
+ "if fluent bit dies without completing this upload the part "
+ "could be lost, UploadId=%s, ETag=%s",
+ m_upload->upload_id, tmp);
+ }
+ return 0;
+ }
+ flb_aws_print_xml_error(c->resp.payload, c->resp.payload_size,
+ "UploadPart", ctx->ins);
+ if (c->resp.payload != NULL) {
+ flb_plg_debug(ctx->ins, "Raw UploadPart response: %s",
+ c->resp.payload);
+ }
+ flb_http_client_destroy(c);
+ }
+
+ flb_plg_error(ctx->ins, "UploadPart request failed");
+ return -1;
+}
diff --git a/src/fluent-bit/plugins/out_s3/s3_store.c b/src/fluent-bit/plugins/out_s3/s3_store.c
new file mode 100644
index 000000000..8a9640633
--- /dev/null
+++ b/src/fluent-bit/plugins/out_s3/s3_store.c
@@ -0,0 +1,543 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_fstore.h>
+#include <fluent-bit/flb_time.h>
+
+#include "s3.h"
+#include "s3_store.h"
+
+static int s3_store_under_travis_ci()
+{
+
+ if (getenv("CI") != NULL && getenv("TRAVIS") != NULL) {
+ return FLB_TRUE;
+ }
+
+ return FLB_FALSE;
+}
+
+/*
+ * Simple and fast hashing algorithm to create keys in the local buffer
+ */
+static flb_sds_t gen_store_filename(const char *tag)
+{
+ int c;
+ unsigned long hash = 5381;
+ unsigned long hash2 = 5381;
+ flb_sds_t hash_str;
+ flb_sds_t tmp;
+ struct flb_time tm;
+
+ /* get current time */
+ flb_time_get(&tm);
+
+ /* compose hash */
+ while ((c = *tag++)) {
+ hash = ((hash << 5) + hash) + c; /* hash * 33 + c */
+ }
+ hash2 = (unsigned long) hash2 * tm.tm.tv_sec * tm.tm.tv_nsec;
+
+ /* flb_sds_printf allocs if the incoming sds is not at least 64 bytes */
+ hash_str = flb_sds_create_size(64);
+ if (!hash_str) {
+ flb_errno();
+ return NULL;
+ }
+ tmp = flb_sds_printf(&hash_str, "%lu-%lu", hash, hash2);
+ if (!tmp) {
+ flb_errno();
+ flb_sds_destroy(hash_str);
+ return NULL;
+ }
+ hash_str = tmp;
+
+ return hash_str;
+}
+
+/* Retrieve a candidate s3 local file using the tag */
+struct s3_file *s3_store_file_get(struct flb_s3 *ctx, const char *tag,
+ int tag_len)
+{
+ struct mk_list *head;
+ struct mk_list *tmp;
+ struct flb_fstore_file *fsf = NULL;
+ struct s3_file *s3_file;
+
+ /*
+ * Based in the current ctx->stream_name, locate a candidate file to
+ * store the incoming data using as a lookup pattern the content Tag.
+ */
+ mk_list_foreach_safe(head, tmp, &ctx->stream_active->files) {
+ fsf = mk_list_entry(head, struct flb_fstore_file, _head);
+
+ /* skip and warn on partially initialized chunks */
+ if (fsf->data == NULL) {
+ flb_plg_warn(ctx->ins, "BAD: found flb_fstore_file with NULL data reference, tag=%s, file=%s, will try to delete", tag, fsf->name);
+ flb_fstore_file_delete(ctx->fs, fsf);
+ }
+
+ if (fsf->meta_size != tag_len) {
+ fsf = NULL;
+ continue;
+ }
+
+ /* skip locked chunks */
+ s3_file = fsf->data;
+ if (s3_file->locked == FLB_TRUE) {
+ fsf = NULL;
+ continue;
+ }
+
+ /* compare meta and tag */
+ if (strncmp((char *) fsf->meta_buf, tag, tag_len) == 0) {
+ break;
+ }
+
+ /* not found, invalidate the reference */
+ fsf = NULL;
+ }
+
+ if (!fsf) {
+ return NULL;
+ }
+
+ return fsf->data;
+}
+
+/* Append data to a new or existing fstore file */
+int s3_store_buffer_put(struct flb_s3 *ctx, struct s3_file *s3_file,
+ const char *tag, int tag_len,
+ char *data, size_t bytes,
+ time_t file_first_log_time)
+{
+ int ret;
+ flb_sds_t name;
+ struct flb_fstore_file *fsf;
+ size_t space_remaining;
+
+ if (ctx->store_dir_limit_size > 0 && ctx->current_buffer_size + bytes >= ctx->store_dir_limit_size) {
+ flb_plg_error(ctx->ins, "Buffer is full: current_buffer_size=%zu, new_data=%zu, store_dir_limit_size=%zu bytes",
+ ctx->current_buffer_size, bytes, ctx->store_dir_limit_size);
+ return -1;
+ }
+
+ /* If no target file was found, create a new one */
+ if (!s3_file) {
+ name = gen_store_filename(tag);
+ if (!name) {
+ flb_plg_error(ctx->ins, "could not generate chunk file name");
+ return -1;
+ }
+
+ /* Create the file */
+ fsf = flb_fstore_file_create(ctx->fs, ctx->stream_active, name, bytes);
+ if (!fsf) {
+ flb_plg_error(ctx->ins, "could not create the file '%s' in the store",
+ name);
+ flb_sds_destroy(name);
+ return -1;
+ }
+ flb_sds_destroy(name);
+
+ /* Write tag as metadata */
+ ret = flb_fstore_file_meta_set(ctx->fs, fsf, (char *) tag, tag_len);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "error writing tag metadata");
+ flb_plg_warn(ctx->ins, "Deleting buffer file because metadata could not be written");
+ flb_fstore_file_delete(ctx->fs, fsf);
+ return -1;
+ }
+
+ /* Allocate local context */
+ s3_file = flb_calloc(1, sizeof(struct s3_file));
+ if (!s3_file) {
+ flb_errno();
+ flb_plg_error(ctx->ins, "cannot allocate s3 file context");
+ flb_plg_warn(ctx->ins, "Deleting buffer file because S3 context creation failed");
+ flb_fstore_file_delete(ctx->fs, fsf);
+ return -1;
+ }
+ s3_file->fsf = fsf;
+ s3_file->first_log_time = file_first_log_time;
+ s3_file->create_time = time(NULL);
+
+ /* Use fstore opaque 'data' reference to keep our context */
+ fsf->data = s3_file;
+ }
+ else {
+ fsf = s3_file->fsf;
+ }
+
+ /* Append data to the target file */
+ ret = flb_fstore_file_append(fsf, data, bytes);
+ if (ret != 0) {
+ flb_plg_error(ctx->ins, "error writing data to local s3 file");
+ return -1;
+ }
+ s3_file->size += bytes;
+ ctx->current_buffer_size += bytes;
+
+ /* if buffer is 95% full, warn user */
+ if (ctx->store_dir_limit_size > 0) {
+ space_remaining = ctx->store_dir_limit_size - ctx->current_buffer_size;
+ if ((space_remaining * 20) < ctx->store_dir_limit_size) {
+ flb_plg_warn(ctx->ins, "Buffer is almost full: current_buffer_size=%zu, store_dir_limit_size=%zu bytes",
+ ctx->current_buffer_size, ctx->store_dir_limit_size);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int set_files_context(struct flb_s3 *ctx)
+{
+ struct mk_list *head;
+ struct mk_list *f_head;
+ struct flb_fstore_stream *fs_stream;
+ struct flb_fstore_file *fsf;
+ struct s3_file *s3_file;
+
+ mk_list_foreach(head, &ctx->fs->streams) {
+ fs_stream = mk_list_entry(head, struct flb_fstore_stream, _head);
+
+ /* skip current stream since it's new */
+ if (fs_stream == ctx->stream_active) {
+ continue;
+ }
+
+ /* skip multi-upload */
+ if (fs_stream == ctx->stream_upload) {
+ continue;
+ }
+
+ mk_list_foreach(f_head, &fs_stream->files) {
+ fsf = mk_list_entry(f_head, struct flb_fstore_file, _head);
+ if (fsf->data) {
+ continue;
+ }
+
+ /* Allocate local context */
+ s3_file = flb_calloc(1, sizeof(struct s3_file));
+ if (!s3_file) {
+ flb_errno();
+ flb_plg_error(ctx->ins, "cannot allocate s3 file context");
+ continue;
+ }
+ s3_file->fsf = fsf;
+ s3_file->first_log_time = time(NULL);
+ s3_file->create_time = time(NULL);
+
+ /* Use fstore opaque 'data' reference to keep our context */
+ fsf->data = s3_file;
+ }
+ }
+
+ return 0;
+}
+
+/* Initialize filesystem storage for S3 plugin */
+int s3_store_init(struct flb_s3 *ctx)
+{
+ int type;
+ time_t now;
+ char tmp[64];
+ struct tm *tm;
+ struct flb_fstore *fs;
+ struct flb_fstore_stream *fs_stream;
+
+ if (s3_store_under_travis_ci() == FLB_TRUE) {
+ type = FLB_FSTORE_MEM;
+ flb_plg_warn(ctx->ins, "Travis CI test, using s3 store memory backend");
+ }
+ else {
+ type = FLB_FSTORE_FS;
+ }
+
+ /* Initialize the storage context */
+ fs = flb_fstore_create(ctx->buffer_dir, type);
+ if (!fs) {
+ return -1;
+ }
+ ctx->fs = fs;
+
+ /*
+ * On every start we create a new stream, this stream in the file system
+ * is directory with the name using the date like '2020-10-03T13:00:02'. So
+ * all the 'new' data that is generated on this process is stored there.
+ *
+ * Note that previous data in similar directories from previous runs is
+ * considered backlog data, in the S3 plugin we need to differenciate the
+ * new v/s the older buffered data.
+ *
+ * Compose a stream name...
+ */
+ now = time(NULL);
+ tm = localtime(&now);
+
+#ifdef FLB_SYSTEM_WINDOWS
+ /* Windows does not allow ':' in directory names */
+ strftime(tmp, sizeof(tmp) - 1, "%Y-%m-%dT%H-%M-%S", tm);
+#else
+ strftime(tmp, sizeof(tmp) - 1, "%Y-%m-%dT%H:%M:%S", tm);
+#endif
+
+ /* Create the stream */
+ fs_stream = flb_fstore_stream_create(ctx->fs, tmp);
+ if (!fs_stream) {
+ /* Upon exception abort */
+ flb_plg_error(ctx->ins, "could not initialize active stream: %s", tmp);
+ flb_fstore_destroy(fs);
+ ctx->fs = NULL;
+ return -1;
+ }
+ ctx->stream_active = fs_stream;
+
+ /* Multipart upload stream */
+ fs_stream = flb_fstore_stream_create(ctx->fs, "multipart_upload_metadata");
+ if (!fs_stream) {
+ flb_plg_error(ctx->ins, "could not initialize multipart_upload stream");
+ flb_fstore_destroy(fs);
+ ctx->fs = NULL;
+ return -1;
+ }
+ ctx->stream_upload = fs_stream;
+
+ set_files_context(ctx);
+ return 0;
+}
+
+int s3_store_exit(struct flb_s3 *ctx)
+{
+ struct mk_list *head;
+ struct mk_list *f_head;
+ struct flb_fstore_stream *fs_stream;
+ struct flb_fstore_file *fsf;
+ struct s3_file *s3_file;
+
+ if (!ctx->fs) {
+ return 0;
+ }
+
+ /* release local context on non-multi upload files */
+ mk_list_foreach(head, &ctx->fs->streams) {
+ fs_stream = mk_list_entry(head, struct flb_fstore_stream, _head);
+ if (fs_stream == ctx->stream_upload) {
+ continue;
+ }
+
+ mk_list_foreach(f_head, &fs_stream->files) {
+ fsf = mk_list_entry(f_head, struct flb_fstore_file, _head);
+ if (fsf->data != NULL) {
+ s3_file = fsf->data;
+ flb_sds_destroy(s3_file->file_path);
+ flb_free(s3_file);
+ }
+ }
+ }
+
+ if (ctx->fs) {
+ flb_fstore_destroy(ctx->fs);
+ }
+ return 0;
+}
+
+/*
+ * Check if the store has data. This function is only used on plugin
+ * initialization
+ */
+int s3_store_has_data(struct flb_s3 *ctx)
+{
+ struct mk_list *head;
+ struct flb_fstore_stream *fs_stream;
+
+ if (!ctx->fs) {
+ return FLB_FALSE;
+ }
+
+ mk_list_foreach(head, &ctx->fs->streams) {
+ /* skip multi upload stream */
+ fs_stream = mk_list_entry(head, struct flb_fstore_stream, _head);
+ if (fs_stream == ctx->stream_upload) {
+ continue;
+ }
+
+ if (mk_list_size(&fs_stream->files) > 0) {
+ return FLB_TRUE;
+ }
+ }
+
+ return FLB_FALSE;
+}
+
+int s3_store_has_uploads(struct flb_s3 *ctx)
+{
+ if (!ctx || !ctx->stream_upload) {
+ return FLB_FALSE;
+ }
+
+ if (mk_list_size(&ctx->stream_upload->files) > 0) {
+ return FLB_TRUE;
+ }
+
+ return FLB_FALSE;
+}
+
+int s3_store_file_inactive(struct flb_s3 *ctx, struct s3_file *s3_file)
+{
+ int ret;
+ struct flb_fstore_file *fsf;
+
+ fsf = s3_file->fsf;
+ flb_free(s3_file);
+ ret = flb_fstore_file_inactive(ctx->fs, fsf);
+
+ return ret;
+}
+
+int s3_store_file_delete(struct flb_s3 *ctx, struct s3_file *s3_file)
+{
+ struct flb_fstore_file *fsf;
+
+ fsf = s3_file->fsf;
+ ctx->current_buffer_size -= s3_file->size;
+
+ /* permanent deletion */
+ flb_fstore_file_delete(ctx->fs, fsf);
+ flb_free(s3_file);
+
+ return 0;
+}
+
+int s3_store_file_read(struct flb_s3 *ctx, struct s3_file *s3_file,
+ char **out_buf, size_t *out_size)
+{
+ int ret;
+
+ ret = flb_fstore_file_content_copy(ctx->fs, s3_file->fsf,
+ (void **) out_buf, out_size);
+ return ret;
+}
+
+int s3_store_file_upload_read(struct flb_s3 *ctx, struct flb_fstore_file *fsf,
+ char **out_buf, size_t *out_size)
+{
+ int ret;
+
+ ret = flb_fstore_file_content_copy(ctx->fs, fsf,
+ (void **) out_buf, out_size);
+ return ret;
+}
+
+struct flb_fstore_file *s3_store_file_upload_get(struct flb_s3 *ctx,
+ char *key, int key_len)
+{
+ struct mk_list *head;
+ struct flb_fstore_file *fsf = NULL;
+
+ mk_list_foreach(head, &ctx->stream_upload->files) {
+ fsf = mk_list_entry(head, struct flb_fstore_file, _head);
+ if (fsf->meta_buf == NULL) {
+ continue;
+ }
+
+ if (fsf->meta_size != key_len ){
+ continue;
+ }
+
+ if (strncmp(fsf->meta_buf, key, key_len) == 0) {
+ break;
+ }
+ fsf = NULL;
+ }
+
+ return fsf;
+}
+
+/* param fsf can NULL if the file has not yet been created */
+int s3_store_file_upload_put(struct flb_s3 *ctx,
+ struct flb_fstore_file *fsf, flb_sds_t key,
+ flb_sds_t data)
+{
+ int ret;
+ flb_sds_t name;
+
+ /* If no target file was found, create a new one */
+ if (!fsf) {
+ name = gen_store_filename(key);
+ if (!name) {
+ flb_plg_error(ctx->ins, "could not generate chunk file name");
+ return -1;
+ }
+
+ /* Create the file */
+ fsf = flb_fstore_file_create(ctx->fs, ctx->stream_upload, name, flb_sds_len(data));
+ if (!fsf) {
+ flb_plg_error(ctx->ins, "could not create the file '%s' in the upload store",
+ name);
+ flb_sds_destroy(name);
+ return -1;
+ }
+ flb_sds_destroy(name);
+
+ /* Write key as metadata */
+ ret = flb_fstore_file_meta_set(ctx->fs, fsf,
+ key, flb_sds_len(key));
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "error writing upload metadata");
+ flb_plg_warn(ctx->ins, "Deleting s3 upload cache file because metadata could not be written");
+ flb_fstore_file_delete(ctx->fs, fsf);
+ return -1;
+ }
+ }
+
+ /* Append data to the target file */
+ ret = flb_fstore_file_append(fsf, data, flb_sds_len(data));
+ if (ret != 0) {
+ flb_plg_error(ctx->ins, "error writing data to local s3 file");
+ return -1;
+ }
+
+ return 0;
+}
+
+int s3_store_file_upload_delete(struct flb_s3 *ctx, struct flb_fstore_file *fsf)
+{
+ /* permanent deletion */
+ flb_fstore_file_delete(ctx->fs, fsf);
+ return 0;
+}
+
+/* Always set an updated copy of metadata into the fs_store_file entry */
+int s3_store_file_meta_get(struct flb_s3 *ctx, struct flb_fstore_file *fsf)
+{
+ return flb_fstore_file_meta_get(ctx->fs, fsf);
+}
+
+void s3_store_file_lock(struct s3_file *s3_file)
+{
+ s3_file->locked = FLB_TRUE;
+}
+
+void s3_store_file_unlock(struct s3_file *s3_file)
+{
+ s3_file->locked = FLB_FALSE;
+}
diff --git a/src/fluent-bit/plugins/out_s3/s3_store.h b/src/fluent-bit/plugins/out_s3/s3_store.h
new file mode 100644
index 000000000..9caa7bdf4
--- /dev/null
+++ b/src/fluent-bit/plugins/out_s3/s3_store.h
@@ -0,0 +1,68 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_S3_STORE_H
+#define FLB_S3_STORE_H
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_fstore.h>
+
+struct s3_file {
+ int locked; /* locked chunk is busy, cannot write to it */
+ int failures; /* delivery failures */
+ size_t size; /* file size */
+ time_t create_time; /* creation time */
+ time_t first_log_time; /* first log time */
+ flb_sds_t file_path; /* file path */
+ struct flb_fstore_file *fsf; /* reference to parent flb_fstore_file */
+};
+
+int s3_store_buffer_put(struct flb_s3 *ctx, struct s3_file *s3_file,
+ const char *tag, int tag_len,
+ char *data, size_t bytes,
+ time_t file_first_log_time);
+
+int s3_store_init(struct flb_s3 *ctx);
+int s3_store_exit(struct flb_s3 *ctx);
+
+int s3_store_has_data(struct flb_s3 *ctx);
+int s3_store_has_uploads(struct flb_s3 *ctx);
+
+int s3_store_file_inactive(struct flb_s3 *ctx, struct s3_file *s3_file);
+struct s3_file *s3_store_file_get(struct flb_s3 *ctx, const char *tag,
+ int tag_len);
+int s3_store_file_delete(struct flb_s3 *ctx, struct s3_file *s3_file);
+int s3_store_file_read(struct flb_s3 *ctx, struct s3_file *s3_file,
+ char **out_buf, size_t *out_size);
+int s3_store_file_upload_read(struct flb_s3 *ctx, struct flb_fstore_file *fsf,
+ char **out_buf, size_t *out_size);
+struct flb_fstore_file *s3_store_file_upload_get(struct flb_s3 *ctx,
+ char *key, int key_len);
+
+int s3_store_file_upload_put(struct flb_s3 *ctx,
+ struct flb_fstore_file *fsf, flb_sds_t key,
+ flb_sds_t data);
+int s3_store_file_upload_delete(struct flb_s3 *ctx, struct flb_fstore_file *fsf);
+
+int s3_store_file_meta_get(struct flb_s3 *ctx, struct flb_fstore_file *fsf);
+
+void s3_store_file_lock(struct s3_file *s3_file);
+void s3_store_file_unlock(struct s3_file *s3_file);
+
+#endif
diff --git a/src/fluent-bit/plugins/out_skywalking/CMakeLists.txt b/src/fluent-bit/plugins/out_skywalking/CMakeLists.txt
new file mode 100644
index 000000000..ad5a5845e
--- /dev/null
+++ b/src/fluent-bit/plugins/out_skywalking/CMakeLists.txt
@@ -0,0 +1,4 @@
+set(src
+ skywalking.c)
+
+FLB_PLUGIN(out_skywalking "${src}" "")
diff --git a/src/fluent-bit/plugins/out_skywalking/skywalking.c b/src/fluent-bit/plugins/out_skywalking/skywalking.c
new file mode 100644
index 000000000..c5a9a1e2d
--- /dev/null
+++ b/src/fluent-bit/plugins/out_skywalking/skywalking.c
@@ -0,0 +1,427 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_http_client.h>
+#include <fluent-bit/flb_log_event_decoder.h>
+
+#include "skywalking.h"
+
+#define DEFAULT_SW_OAP_HOST "127.0.0.1"
+#define DEFAULT_SW_OAP_PORT 12800
+#define DEFAULT_SW_SVC_NAME "sw-service"
+#define DEFAULT_SW_INS_NAME "fluent-bit"
+#define DEFAULT_SW_LOG_PATH "/v3/logs"
+
+static void sw_output_ctx_destroy(struct flb_output_sw* ctx) {
+ if (!ctx) {
+ return;
+ }
+
+ if (ctx->u) {
+ flb_upstream_destroy(ctx->u);
+ }
+
+ flb_sds_destroy(ctx->http_scheme);
+ flb_sds_destroy(ctx->uri);
+ flb_free(ctx);
+}
+
+static int cb_sw_init(struct flb_output_instance *ins,
+ struct flb_config *config, void *data)
+{
+ int ret;
+ int io_flags;
+ struct flb_output_sw *ctx;
+
+ /* Allocate plugin context */
+ ctx = flb_calloc(1, sizeof(struct flb_output_sw));
+ if (!ctx) {
+ flb_errno();
+ return -1;
+ }
+
+ ctx->ins = ins;
+
+ ret = flb_output_config_map_set(ins, (void *)ctx);
+ if (ret == -1) {
+ sw_output_ctx_destroy(ctx);
+ return -1;
+ }
+
+ flb_output_net_default(DEFAULT_SW_OAP_HOST, DEFAULT_SW_OAP_PORT, ctx->ins);
+
+ ctx->uri = flb_sds_create(DEFAULT_SW_LOG_PATH);
+ if (!ctx->uri) {
+ flb_plg_error(ctx->ins, "failed to configure endpoint");
+ sw_output_ctx_destroy(ctx);
+ return -1;
+ }
+
+ if (!ctx->svc_name) {
+ flb_plg_error(ctx->ins, "failed to configure service name");
+ sw_output_ctx_destroy(ctx);
+ return -1;
+ }
+
+ if (!ctx->svc_inst_name) {
+ flb_plg_error(ctx->ins, "failed to configure instance name");
+ sw_output_ctx_destroy(ctx);
+ return -1;
+ }
+
+ flb_plg_debug(ctx->ins, "configured %s/%s", ctx->svc_name, ctx->svc_inst_name);
+ flb_plg_debug(ctx->ins, "OAP address is %s:%d", ins->host.name, ins->host.port);
+
+ /* scheme configuration */
+ if (ins->use_tls == FLB_TRUE) {
+ io_flags = FLB_IO_TLS;
+ ctx->http_scheme = flb_sds_create("https://");
+ }
+ else {
+ io_flags = FLB_IO_TCP;
+ ctx->http_scheme = flb_sds_create("http://");
+ }
+
+ /* configure upstream instance */
+ ctx->u = flb_upstream_create(config, ins->host.name, ins->host.port, io_flags, ins->tls);
+ if (!ctx->u) {
+ flb_plg_error(ctx->ins, "failed to create upstream context");
+ sw_output_ctx_destroy(ctx);
+ return -1;
+ }
+
+ flb_output_upstream_set(ctx->u, ins);
+
+ /* Set the plugin context */
+ flb_output_set_context(ins, ctx);
+ flb_output_set_http_debug_callbacks(ins);
+
+ return 0;
+}
+
+static int64_t timestamp_format(const struct flb_time* tms)
+{
+ int64_t timestamp = 0;
+
+ /* Format the time, use milliseconds precision not nanoseconds */
+ timestamp = tms->tm.tv_sec * 1000;
+ timestamp += tms->tm.tv_nsec / 1000000;
+
+ /* round up if necessary */
+ if (tms->tm.tv_nsec % 1000000 >= 500000) {
+ ++timestamp;
+ }
+ return timestamp;
+}
+
+static void sw_msgpack_pack_kv_str(msgpack_packer* pk, const char* key,
+ size_t key_len, const char *value,
+ size_t value_len)
+{
+ msgpack_pack_str(pk, key_len);
+ msgpack_pack_str_body(pk, key, key_len);
+ msgpack_pack_str(pk, value_len);
+ msgpack_pack_str_body(pk, value, value_len);
+}
+
+static void sw_msgpack_pack_kv_int64_t(msgpack_packer* pk, const char* key,
+ size_t key_len, int64_t value)
+{
+ msgpack_pack_str(pk, key_len);
+ msgpack_pack_str_body(pk, key, key_len);
+ msgpack_pack_int64(pk, value);
+}
+
+static void sw_msgpack_pack_log_body(msgpack_packer* pk,
+ msgpack_object* obj, size_t obj_size)
+{
+ int i, j = 0;
+ int log_entry_num = 0;
+ msgpack_sbuffer sbuf;
+ msgpack_packer body_pk;
+ msgpack_object key;
+ msgpack_object value;
+ flb_sds_t out_body_str;
+ size_t out_body_str_len;
+ int* valid_log_entry = NULL;
+
+ valid_log_entry = (int*)flb_malloc(obj_size * sizeof(int));
+ if (!valid_log_entry) {
+ flb_errno();
+ return;
+ }
+
+ msgpack_sbuffer_init(&sbuf);
+ msgpack_packer_init(&body_pk, &sbuf, msgpack_sbuffer_write);
+
+ for (i = 0; i < obj_size; ++i) {
+ key = obj->via.map.ptr[i].key;
+ value = obj->via.map.ptr[i].val;
+
+ if (key.type != MSGPACK_OBJECT_STR ||
+ value.type != MSGPACK_OBJECT_STR) {
+ continue;
+ }
+
+ valid_log_entry[j] = i;
+ ++j;
+ ++log_entry_num;
+ }
+
+ msgpack_pack_map(&body_pk, log_entry_num);
+
+ for (i = 0; i < log_entry_num; ++i) {
+ key = obj->via.map.ptr[valid_log_entry[i]].key;
+ value = obj->via.map.ptr[valid_log_entry[i]].val;
+ sw_msgpack_pack_kv_str(&body_pk, key.via.str.ptr, key.via.str.size,
+ value.via.str.ptr, value.via.str.size);
+ }
+
+ out_body_str = flb_msgpack_raw_to_json_sds(sbuf.data, sbuf.size);
+ if (!out_body_str) {
+ msgpack_sbuffer_destroy(&sbuf);
+ flb_free(valid_log_entry);
+ return;
+ }
+ out_body_str_len = flb_sds_len(out_body_str);
+
+ msgpack_pack_str(pk, 4);
+ msgpack_pack_str_body(pk, "body", 4);
+ msgpack_pack_map(pk, 1);
+
+ /* body['json'] */
+ msgpack_pack_str(pk, 4);
+ msgpack_pack_str_body(pk, "json", 4);
+ msgpack_pack_map(pk, 1);
+
+ /* body['json']['json'] */
+ msgpack_pack_str(pk, 4);
+ msgpack_pack_str_body(pk, "json", 4);
+ msgpack_pack_str(pk, out_body_str_len);
+ msgpack_pack_str_body(pk, out_body_str, out_body_str_len);
+
+ flb_sds_destroy(out_body_str);
+ msgpack_sbuffer_destroy(&sbuf);
+ flb_free(valid_log_entry);
+}
+
+static int sw_format(struct flb_output_sw* ctx, const void *data, size_t bytes,
+ void** buf, size_t* buf_len)
+{
+ int ret = 0;
+ int chunk_size = 0;
+ uint32_t map_size;
+ msgpack_sbuffer sbuf;
+ msgpack_packer pk;
+ msgpack_object map;
+ int64_t timestamp;
+ flb_sds_t out_str;
+ struct flb_log_event_decoder log_decoder;
+ struct flb_log_event log_event;
+
+ ret = flb_log_event_decoder_init(&log_decoder, (char *) data, bytes);
+
+ if (ret != FLB_EVENT_DECODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event decoder initialization error : %d", ret);
+
+ return -1;
+ }
+
+ msgpack_sbuffer_init(&sbuf);
+ msgpack_packer_init(&pk, &sbuf, msgpack_sbuffer_write);
+
+ chunk_size = flb_mp_count(data, bytes);
+ flb_plg_debug(ctx->ins, "%i messages flushed", chunk_size);
+
+ msgpack_pack_array(&pk, chunk_size);
+
+ while ((ret = flb_log_event_decoder_next(
+ &log_decoder,
+ &log_event)) == FLB_EVENT_DECODER_SUCCESS) {
+ timestamp = timestamp_format(&log_event.timestamp);
+
+ map = *log_event.body;
+ map_size = map.via.map.size;
+
+ msgpack_pack_map(&pk, 4);
+
+ sw_msgpack_pack_kv_int64_t(&pk, "timestamp", 9, timestamp);
+ sw_msgpack_pack_kv_str(&pk, "service", 7, ctx->svc_name,
+ flb_sds_len(ctx->svc_name));
+ sw_msgpack_pack_kv_str(&pk, "serviceInstance", 15,
+ ctx->svc_inst_name, flb_sds_len(ctx->svc_inst_name));
+ sw_msgpack_pack_log_body(&pk, &map, map_size);
+ }
+
+ out_str = flb_msgpack_raw_to_json_sds(sbuf.data, sbuf.size);
+ if (!out_str) {
+ ret = -1;
+ goto done;
+ }
+ else {
+ ret = 0;
+ }
+
+ *buf = out_str;
+ *buf_len = flb_sds_len(out_str);
+
+done:
+ msgpack_sbuffer_destroy(&sbuf);
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ return ret;
+}
+
+static int mock_oap_request(struct flb_http_client* client, int mock_status)
+{
+ client->resp.status = mock_status;
+ return 0;
+}
+
+static bool check_sw_under_test()
+{
+ if (getenv("FLB_SW_PLUGIN_UNDER_TEST") != NULL) {
+ return FLB_TRUE;
+ }
+ return FLB_FALSE;
+}
+
+static void cb_sw_flush(struct flb_event_chunk *event_chunk,
+ struct flb_output_flush *out_flush,
+ struct flb_input_instance *i_ins,
+ void *out_context, struct flb_config *config)
+{
+ int flush_ret = -1;
+ int tmp_ret = -1;
+ struct flb_output_sw *ctx = out_context;
+ struct flb_connection *conn = NULL;
+ struct flb_http_client *client = NULL;
+ void* buf = NULL;
+ size_t buf_len;
+ size_t sent_size;
+
+ tmp_ret = sw_format(ctx,
+ event_chunk->data,
+ event_chunk->size,
+ &buf, &buf_len);
+ if (tmp_ret != 0) {
+ flb_plg_error(ctx->ins, "failed to create buffer");
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ conn = flb_upstream_conn_get(ctx->u);
+ if (!conn) {
+ flb_plg_error(ctx->ins, "failed to establish connection to %s:%i",
+ ctx->ins->host.name, ctx->ins->host.port);
+ flb_sds_destroy(buf);
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ client = flb_http_client(conn, FLB_HTTP_POST, ctx->uri,
+ (const char*)buf, buf_len, ctx->ins->host.name, ctx->ins->host.port,
+ NULL, 0);
+ if (!client) {
+ flb_plg_error(ctx->ins, "failed to create HTTP client");
+ flb_sds_destroy(buf);
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ if (ctx->auth_token && flb_sds_len(ctx->auth_token) != 0) {
+ flb_http_add_header(client, "Authentication", 14,
+ ctx->auth_token, strlen(ctx->auth_token));
+ }
+
+ flb_http_add_header(client, "Content-Type", 12,
+ "application/json", 16);
+ flb_http_add_header(client, "User-Agent", 10,
+ "Fluent-Bit", 10);
+
+ if (check_sw_under_test() == FLB_TRUE) {
+ tmp_ret = mock_oap_request(client, 200);
+ }
+ else {
+ tmp_ret = flb_http_do(client, &sent_size);
+ }
+
+ if (tmp_ret == 0) {
+ flb_plg_debug(ctx->ins, "%s:%i, HTTP status=%i", ctx->ins->host.name,
+ ctx->ins->host.port, client->resp.status);
+
+ if (client->resp.status < 200 || client->resp.status > 205) {
+ flush_ret = FLB_RETRY;
+ }
+ else {
+ flush_ret = FLB_OK;
+ }
+ }
+ else {
+ flb_plg_error(ctx->ins, "failed to flush buffer to %s:%i",
+ ctx->ins->host.name, ctx->ins->host.port);
+ flush_ret = FLB_RETRY;
+ }
+
+ flb_sds_destroy(buf);
+ flb_http_client_destroy(client);
+ flb_upstream_conn_release(conn);
+
+ FLB_OUTPUT_RETURN(flush_ret);
+}
+
+static int cb_sw_exit(void *data, struct flb_config *config)
+{
+ struct flb_output_sw *ctx;
+
+ ctx = (struct flb_output_sw*)data;
+ sw_output_ctx_destroy(ctx);
+
+ return 0;
+}
+
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_STR, "auth_token", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_output_sw, auth_token),
+ "Auth token for SkyWalking OAP"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "svc_name", DEFAULT_SW_SVC_NAME,
+ 0, FLB_TRUE, offsetof(struct flb_output_sw, svc_name),
+ "Service name"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "svc_inst_name", DEFAULT_SW_INS_NAME,
+ 0, FLB_TRUE, offsetof(struct flb_output_sw, svc_inst_name),
+ "Instance name"
+ },
+ {0}
+};
+
+struct flb_output_plugin out_skywalking_plugin = {
+ .name = "skywalking",
+ .description = "Send logs into log collector on SkyWalking OAP",
+ .cb_init = cb_sw_init,
+ .cb_flush = cb_sw_flush,
+ .cb_exit = cb_sw_exit,
+ .flags = FLB_OUTPUT_NET | FLB_IO_OPT_TLS,
+ .config_map = config_map
+};
diff --git a/src/fluent-bit/plugins/out_skywalking/skywalking.h b/src/fluent-bit/plugins/out_skywalking/skywalking.h
new file mode 100644
index 000000000..554ab6912
--- /dev/null
+++ b/src/fluent-bit/plugins/out_skywalking/skywalking.h
@@ -0,0 +1,41 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_OUT_SKYWALKING_H
+#define FLB_OUT_SKYWALKING_H
+
+#include <fluent-bit/flb_output_plugin.h>
+
+struct flb_output_sw {
+ /* Configured by user */
+ flb_sds_t auth_token;
+ flb_sds_t svc_name;
+ flb_sds_t svc_inst_name;
+
+ /* Upstream log collector context */
+ struct flb_upstream *u;
+
+ /* Output plugin instance */
+ struct flb_output_instance *ins;
+
+ flb_sds_t http_scheme;
+ flb_sds_t uri;
+};
+
+#endif
diff --git a/src/fluent-bit/plugins/out_slack/CMakeLists.txt b/src/fluent-bit/plugins/out_slack/CMakeLists.txt
new file mode 100644
index 000000000..b62a70472
--- /dev/null
+++ b/src/fluent-bit/plugins/out_slack/CMakeLists.txt
@@ -0,0 +1,5 @@
+set(src
+ slack.c
+ )
+
+FLB_PLUGIN(out_slack "${src}" "")
diff --git a/src/fluent-bit/plugins/out_slack/slack.c b/src/fluent-bit/plugins/out_slack/slack.c
new file mode 100644
index 000000000..f014228e9
--- /dev/null
+++ b/src/fluent-bit/plugins/out_slack/slack.c
@@ -0,0 +1,336 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_http_client.h>
+#include <fluent-bit/flb_log_event_decoder.h>
+
+#include "slack.h"
+
+#define FLB_HTTP_CONTENT_TYPE "Content-Type"
+#define FLB_HTTP_MIME_JSON "application/json"
+
+static int cb_slack_init(struct flb_output_instance *ins,
+ struct flb_config *config, void *data)
+{
+ int ret;
+ char *protocol = NULL;
+ char *host = NULL;
+ char *port = NULL;
+ char *uri = NULL;
+ struct flb_slack *ctx;
+ (void) config;
+ (void) data;
+
+ /* Allocate context */
+ ctx = flb_calloc(1, sizeof(struct flb_slack));
+ if (!ctx) {
+ flb_errno();
+ return -1;
+ }
+ ctx->ins = ins;
+
+ /* Set the plugin context */
+ flb_output_set_context(ins, ctx);
+
+ /* Create config map and validate expected parameters */
+ ret = flb_output_config_map_set(ins, (void *) ctx);
+ if (ret == -1) {
+ flb_free(ctx);
+ return -1;
+ }
+
+ /* Validate if the slack webhook is defined */
+ if (!ctx->webhook) {
+ flb_plg_error(ctx->ins, "the 'webhook' address has not been defined");
+ return -1;
+ }
+
+ /* Split the address */
+ ret = flb_utils_url_split(ctx->webhook, &protocol, &host, &port, &uri);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "could not process 'webhook' address");
+ return -1;
+ }
+
+ if (strcasecmp(protocol, "https") != 0) {
+ flb_plg_error(ctx->ins, "invalid protocol '%s', we expected 'https'",
+ protocol);
+ goto error;
+ }
+
+ if (!host) {
+ flb_plg_error(ctx->ins, "invalid slack host");
+ goto error;
+ }
+
+ if (!uri) {
+ flb_plg_error(ctx->ins, "slack webhook uri has not been defined");
+ goto error;
+ }
+
+ ctx->host = flb_sds_create(host);
+ ctx->uri = flb_sds_create(uri);
+
+ if (port) {
+ ctx->port = atoi(port);
+ }
+ else {
+ ctx->port = 443;
+ }
+
+ /* Create upstream context */
+ ctx->u = flb_upstream_create(config,
+ ctx->host,
+ ctx->port,
+ FLB_IO_TLS, ins->tls);
+ if (!ctx->u) {
+ flb_plg_error(ctx->ins, "error creating upstream context");
+ goto error;
+ }
+
+ /* Set instance flags into upstream */
+ flb_output_upstream_set(ctx->u, ins);
+
+ /* Cleanup */
+ if (protocol) {
+ flb_free(protocol);
+ }
+ if (host) {
+ flb_free(host);
+ }
+ if (port) {
+ flb_free(port);
+ }
+ if (uri) {
+ flb_free(uri);
+ }
+
+ return 0;
+
+error:
+ if (protocol) {
+ flb_free(protocol);
+ }
+ if (host) {
+ flb_free(host);
+ }
+ if (port) {
+ flb_free(port);
+ }
+ if (uri) {
+ flb_free(uri);
+ }
+
+ return -1;
+}
+
+static void cb_slack_flush(struct flb_event_chunk *event_chunk,
+ struct flb_output_flush *out_flush,
+ struct flb_input_instance *i_ins,
+ void *out_context,
+ struct flb_config *config)
+{
+ int len;
+ int ret;
+ int out_ret = FLB_OK;
+ size_t size;
+ size_t printed = 0;
+ size_t b_sent;
+ flb_sds_t json;
+ flb_sds_t out_buf;
+ msgpack_sbuffer mp_sbuf;
+ msgpack_packer mp_pck;
+ struct flb_http_client *c;
+ struct flb_connection *u_conn;
+ struct flb_slack *ctx = out_context;
+ struct flb_log_event_decoder log_decoder;
+ struct flb_log_event log_event;
+
+ size = event_chunk->size * 4;
+ json = flb_sds_create_size(size);
+ if (!json) {
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+ memset(json, '\0', size);
+
+ ret = flb_log_event_decoder_init(&log_decoder,
+ (char *) event_chunk->data,
+ event_chunk->size);
+
+ if (ret != FLB_EVENT_DECODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event decoder initialization error : %d", ret);
+
+ flb_sds_destroy(json);
+
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ while ((ret = flb_log_event_decoder_next(
+ &log_decoder,
+ &log_event)) == FLB_EVENT_DECODER_SUCCESS) {
+
+ ret = snprintf(json + printed, size - printed,
+ "[\"timestamp\": %" PRIu32 ".%09lu, ",
+ (uint32_t) log_event.timestamp.tm.tv_sec,
+ log_event.timestamp.tm.tv_nsec);
+ printed += ret;
+
+ ret = msgpack_object_print_buffer(json + printed,
+ size - printed,
+ *log_event.body);
+ if (ret < 0) {
+ flb_plg_error(ctx->ins, "error formatting payload");
+ flb_sds_destroy(json);
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ /* the previous call returns the remaining available space in the buffer */
+ printed += ret;
+ json[printed++] = ']';
+ json[printed++] = '\n';
+ }
+
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ /* Take formatted message and convert it to msgpack */
+ msgpack_sbuffer_init(&mp_sbuf);
+ msgpack_packer_init(&mp_pck, &mp_sbuf, msgpack_sbuffer_write);
+
+ len = strlen(json);
+
+ msgpack_pack_map(&mp_pck, 1);
+ msgpack_pack_str(&mp_pck, 4);
+ msgpack_pack_str_body(&mp_pck, "text", 4);
+ msgpack_pack_str(&mp_pck, len);
+ msgpack_pack_str_body(&mp_pck, json, len);
+
+ /* Release buffer */
+ flb_sds_destroy(json);
+
+ /* Re-format mspgack as JSON */
+ out_buf = flb_msgpack_raw_to_json_sds(mp_sbuf.data, mp_sbuf.size);
+ if (!out_buf) {
+ msgpack_sbuffer_destroy(&mp_sbuf);
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+ msgpack_sbuffer_destroy(&mp_sbuf);
+
+ /* Create upstream connection */
+ u_conn = flb_upstream_conn_get(ctx->u);
+ if (!u_conn) {
+ flb_sds_destroy(out_buf);
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ /* Create HTTP client context */
+ c = flb_http_client(u_conn, FLB_HTTP_POST, ctx->uri,
+ out_buf, flb_sds_len(out_buf),
+ ctx->host, ctx->port,
+ NULL, 0);
+ flb_http_add_header(c,
+ FLB_HTTP_CONTENT_TYPE,
+ sizeof(FLB_HTTP_CONTENT_TYPE) - 1,
+ FLB_HTTP_MIME_JSON,
+ sizeof(FLB_HTTP_MIME_JSON) - 1);
+ flb_http_add_header(c, "User-Agent", 10, "Fluent-Bit", 10);
+
+ ret = flb_http_do(c, &b_sent);
+ if (ret == 0) {
+ if (c->resp.status < 200 || c->resp.status > 205) {
+ flb_plg_error(ctx->ins, "%s:%i, HTTP status=%i",
+ ctx->host, ctx->port, c->resp.status);
+ out_ret = FLB_RETRY;
+ }
+ else {
+ if (c->resp.payload) {
+ flb_plg_info(ctx->ins, "%s:%i, HTTP status=%i\n%s",
+ ctx->host, ctx->port,
+ c->resp.status, c->resp.payload);
+ }
+ else {
+ flb_plg_info(ctx->ins, "%s:%i, HTTP status=%i",
+ ctx->host, ctx->port,
+ c->resp.status);
+ }
+ }
+ }
+ else {
+ flb_plg_error(ctx->ins, "could not flush records to %s:%i (http_do=%i)",
+ ctx->host, ctx->port, ret);
+ out_ret = FLB_RETRY;
+ }
+
+ flb_upstream_conn_release(u_conn);
+ flb_http_client_destroy(c);
+ flb_sds_destroy(out_buf);
+ FLB_OUTPUT_RETURN(out_ret);
+}
+
+static int cb_slack_exit(void *data, struct flb_config *config)
+{
+ struct flb_slack *ctx;
+
+ ctx = (struct flb_slack *) data;
+ if (!ctx) {
+ return 0;
+ }
+
+ if (ctx->host) {
+ flb_sds_destroy(ctx->host);
+ }
+ if (ctx->uri) {
+ flb_sds_destroy(ctx->uri);
+ }
+
+ if (ctx->u) {
+ flb_upstream_destroy(ctx->u);
+ }
+
+ flb_free(ctx);
+
+ return 0;
+}
+
+/* Configuration properties map */
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_STR, "webhook", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_slack, webhook),
+ NULL
+ },
+
+ /* EOF */
+ {0}
+};
+
+struct flb_output_plugin out_slack_plugin = {
+ .name = "slack",
+ .description = "Send events to a Slack channel",
+ .cb_init = cb_slack_init,
+ .cb_flush = cb_slack_flush,
+ .cb_exit = cb_slack_exit,
+ .flags = FLB_OUTPUT_NET | FLB_IO_TLS,
+ .config_map = config_map
+};
diff --git a/src/fluent-bit/plugins/out_slack/slack.h b/src/fluent-bit/plugins/out_slack/slack.h
new file mode 100644
index 000000000..e7babc263
--- /dev/null
+++ b/src/fluent-bit/plugins/out_slack/slack.h
@@ -0,0 +1,43 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_OUT_SLACK_H
+#define FLB_OUT_SLACK_H
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_sds.h>
+#include <fluent-bit/flb_upstream.h>
+
+struct flb_slack {
+ /* full webhook address */
+ flb_sds_t webhook;
+
+ /* processed webhook */
+ flb_sds_t host;
+ int port;
+ flb_sds_t uri;
+
+ /* upstream context */
+ struct flb_upstream *u;
+
+ /* Plugin instance */
+ struct flb_output_instance *ins;
+};
+
+#endif
diff --git a/src/fluent-bit/plugins/out_splunk/CMakeLists.txt b/src/fluent-bit/plugins/out_splunk/CMakeLists.txt
new file mode 100644
index 000000000..da66bca70
--- /dev/null
+++ b/src/fluent-bit/plugins/out_splunk/CMakeLists.txt
@@ -0,0 +1,6 @@
+set(src
+ splunk_conf.c
+ splunk.c
+ )
+
+FLB_PLUGIN(out_splunk "${src}" "")
diff --git a/src/fluent-bit/plugins/out_splunk/splunk.c b/src/fluent-bit/plugins/out_splunk/splunk.c
new file mode 100644
index 000000000..d9c28380a
--- /dev/null
+++ b/src/fluent-bit/plugins/out_splunk/splunk.c
@@ -0,0 +1,873 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_http_client.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_mp.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_gzip.h>
+#include <fluent-bit/flb_ra_key.h>
+#include <fluent-bit/flb_metrics.h>
+#include <fluent-bit/flb_log_event_decoder.h>
+
+#include <msgpack.h>
+#include "splunk.h"
+#include "splunk_conf.h"
+
+static int cb_splunk_init(struct flb_output_instance *ins,
+ struct flb_config *config, void *data)
+{
+ struct flb_splunk *ctx;
+
+ ctx = flb_splunk_conf_create(ins, config);
+ if (!ctx) {
+ flb_plg_error(ins, "configuration failed");
+ return -1;
+ }
+
+ flb_output_set_context(ins, ctx);
+
+ /*
+ * This plugin instance uses the HTTP client interface, let's register
+ * it debugging callbacks.
+ */
+ flb_output_set_http_debug_callbacks(ins);
+ return 0;
+}
+
+static int pack_map_meta(struct flb_splunk *ctx,
+ struct flb_mp_map_header *mh,
+ msgpack_packer *mp_pck,
+ msgpack_object map,
+ char *tag, int tag_len)
+{
+ int index_key_set = FLB_FALSE;
+ int sourcetype_key_set = FLB_FALSE;
+ flb_sds_t str;
+ struct mk_list *head;
+ struct flb_splunk_field *f;
+ struct flb_mp_map_header mh_fields;
+ struct flb_ra_value *rval;
+
+ /* event host */
+ if (ctx->event_host) {
+ str = flb_ra_translate(ctx->ra_event_host, tag, tag_len,
+ map, NULL);
+ if (str) {
+ if (flb_sds_len(str) > 0) {
+ flb_mp_map_header_append(mh);
+ msgpack_pack_str(mp_pck, sizeof(FLB_SPLUNK_DEFAULT_EVENT_HOST) -1);
+ msgpack_pack_str_body(mp_pck,
+ FLB_SPLUNK_DEFAULT_EVENT_HOST,
+ sizeof(FLB_SPLUNK_DEFAULT_EVENT_HOST) - 1);
+ msgpack_pack_str(mp_pck, flb_sds_len(str));
+ msgpack_pack_str_body(mp_pck, str, flb_sds_len(str));
+ }
+ flb_sds_destroy(str);
+ }
+ }
+
+ /* event source */
+ if (ctx->event_source) {
+ str = flb_ra_translate(ctx->ra_event_source, tag, tag_len,
+ map, NULL);
+ if (str) {
+ if (flb_sds_len(str) > 0) {
+ flb_mp_map_header_append(mh);
+ msgpack_pack_str(mp_pck, sizeof(FLB_SPLUNK_DEFAULT_EVENT_SOURCE) -1);
+ msgpack_pack_str_body(mp_pck,
+ FLB_SPLUNK_DEFAULT_EVENT_SOURCE,
+ sizeof(FLB_SPLUNK_DEFAULT_EVENT_SOURCE) - 1);
+ msgpack_pack_str(mp_pck, flb_sds_len(str));
+ msgpack_pack_str_body(mp_pck, str, flb_sds_len(str));
+ }
+ flb_sds_destroy(str);
+ }
+ }
+
+ /* event sourcetype (key lookup) */
+ if (ctx->event_sourcetype_key) {
+ str = flb_ra_translate(ctx->ra_event_sourcetype_key, tag, tag_len,
+ map, NULL);
+ if (str) {
+ /* sourcetype_key was found */
+ if (flb_sds_len(str) > 0) {
+ flb_mp_map_header_append(mh);
+ msgpack_pack_str(mp_pck, sizeof(FLB_SPLUNK_DEFAULT_EVENT_SOURCET) -1);
+ msgpack_pack_str_body(mp_pck,
+ FLB_SPLUNK_DEFAULT_EVENT_SOURCET,
+ sizeof(FLB_SPLUNK_DEFAULT_EVENT_SOURCET) - 1);
+ msgpack_pack_str(mp_pck, flb_sds_len(str));
+ msgpack_pack_str_body(mp_pck, str, flb_sds_len(str));
+ sourcetype_key_set = FLB_TRUE;
+ }
+ flb_sds_destroy(str);
+ }
+ /* If not found, it will fallback to the value set in event_sourcetype */
+ }
+
+ if (sourcetype_key_set == FLB_FALSE && ctx->event_sourcetype) {
+ flb_mp_map_header_append(mh);
+ msgpack_pack_str(mp_pck, sizeof(FLB_SPLUNK_DEFAULT_EVENT_SOURCET) -1);
+ msgpack_pack_str_body(mp_pck,
+ FLB_SPLUNK_DEFAULT_EVENT_SOURCET,
+ sizeof(FLB_SPLUNK_DEFAULT_EVENT_SOURCET) - 1);
+ msgpack_pack_str(mp_pck, flb_sds_len(ctx->event_sourcetype));
+ msgpack_pack_str_body(mp_pck,
+ ctx->event_sourcetype, flb_sds_len(ctx->event_sourcetype));
+ }
+
+ /* event index (key lookup) */
+ if (ctx->event_index_key) {
+ str = flb_ra_translate(ctx->ra_event_index_key, tag, tag_len,
+ map, NULL);
+ if (str) {
+ /* sourcetype_key was found */
+ if (flb_sds_len(str) > 0) {
+ flb_mp_map_header_append(mh);
+ msgpack_pack_str(mp_pck, sizeof(FLB_SPLUNK_DEFAULT_EVENT_INDEX) -1);
+ msgpack_pack_str_body(mp_pck,
+ FLB_SPLUNK_DEFAULT_EVENT_INDEX,
+ sizeof(FLB_SPLUNK_DEFAULT_EVENT_INDEX) - 1);
+ msgpack_pack_str(mp_pck, flb_sds_len(str));
+ msgpack_pack_str_body(mp_pck, str, flb_sds_len(str));
+ index_key_set = FLB_TRUE;
+ }
+ flb_sds_destroy(str);
+ }
+ /* If not found, it will fallback to the value set in event_index */
+ }
+
+ if (index_key_set == FLB_FALSE && ctx->event_index) {
+ flb_mp_map_header_append(mh);
+ msgpack_pack_str(mp_pck, sizeof(FLB_SPLUNK_DEFAULT_EVENT_INDEX) -1);
+ msgpack_pack_str_body(mp_pck,
+ FLB_SPLUNK_DEFAULT_EVENT_INDEX,
+ sizeof(FLB_SPLUNK_DEFAULT_EVENT_INDEX) - 1);
+ msgpack_pack_str(mp_pck, flb_sds_len(ctx->event_index));
+ msgpack_pack_str_body(mp_pck,
+ ctx->event_index, flb_sds_len(ctx->event_index));
+ }
+
+ /* event 'fields' */
+ if (mk_list_size(&ctx->fields) > 0) {
+ flb_mp_map_header_append(mh);
+ msgpack_pack_str(mp_pck, sizeof(FLB_SPLUNK_DEFAULT_EVENT_FIELDS) -1);
+ msgpack_pack_str_body(mp_pck,
+ FLB_SPLUNK_DEFAULT_EVENT_FIELDS,
+ sizeof(FLB_SPLUNK_DEFAULT_EVENT_FIELDS) - 1);
+
+ /* Pack map */
+ flb_mp_map_header_init(&mh_fields, mp_pck);
+
+ mk_list_foreach(head, &ctx->fields) {
+ f = mk_list_entry(head, struct flb_splunk_field, _head);
+ rval = flb_ra_get_value_object(f->ra, map);
+ if (!rval) {
+ continue;
+ }
+
+ flb_mp_map_header_append(&mh_fields);
+
+ /* key */
+ msgpack_pack_str(mp_pck, flb_sds_len(f->key_name));
+ msgpack_pack_str_body(mp_pck, f->key_name, flb_sds_len(f->key_name));
+
+ /* value */
+ msgpack_pack_object(mp_pck, rval->o);
+ flb_ra_key_value_destroy(rval);
+ }
+ flb_mp_map_header_end(&mh_fields);
+ }
+
+ return 0;
+}
+
+static int pack_map(struct flb_splunk *ctx, msgpack_packer *mp_pck,
+ struct flb_time *tm, msgpack_object map,
+ char *tag, int tag_len)
+{
+ int i;
+ double t;
+ int map_size;
+ msgpack_object k;
+ msgpack_object v;
+ struct flb_mp_map_header mh;
+
+ t = flb_time_to_double(tm);
+ map_size = map.via.map.size;
+
+ if (ctx->splunk_send_raw == FLB_TRUE) {
+ msgpack_pack_map(mp_pck, map_size /* all k/v */);
+ }
+ else {
+ flb_mp_map_header_init(&mh, mp_pck);
+
+ /* Append the time key */
+ flb_mp_map_header_append(&mh);
+ msgpack_pack_str(mp_pck, sizeof(FLB_SPLUNK_DEFAULT_TIME) -1);
+ msgpack_pack_str_body(mp_pck,
+ FLB_SPLUNK_DEFAULT_TIME,
+ sizeof(FLB_SPLUNK_DEFAULT_TIME) - 1);
+ msgpack_pack_double(mp_pck, t);
+
+ /* Pack Splunk metadata */
+ pack_map_meta(ctx, &mh, mp_pck, map, tag, tag_len);
+
+ /* Add k/v pairs under the key 'event' instead of to the top level object */
+ flb_mp_map_header_append(&mh);
+ msgpack_pack_str(mp_pck, sizeof(FLB_SPLUNK_DEFAULT_EVENT) -1);
+ msgpack_pack_str_body(mp_pck,
+ FLB_SPLUNK_DEFAULT_EVENT,
+ sizeof(FLB_SPLUNK_DEFAULT_EVENT) - 1);
+
+ flb_mp_map_header_end(&mh);
+
+ msgpack_pack_map(mp_pck, map_size);
+ }
+
+ /* Append k/v */
+ for (i = 0; i < map_size; i++) {
+ k = map.via.map.ptr[i].key;
+ v = map.via.map.ptr[i].val;
+
+ msgpack_pack_object(mp_pck, k);
+ msgpack_pack_object(mp_pck, v);
+ }
+
+ return 0;
+}
+
+
+static inline int pack_event_key(struct flb_splunk *ctx, msgpack_packer *mp_pck,
+ struct flb_time *tm, msgpack_object map,
+ char *tag, int tag_len)
+{
+ double t;
+ struct flb_mp_map_header mh;
+ flb_sds_t val;
+
+ t = flb_time_to_double(tm);
+ val = flb_ra_translate(ctx->ra_event_key, tag, tag_len, map, NULL);
+ if (!val || flb_sds_len(val) == 0) {
+ if (val != NULL) {
+ flb_sds_destroy(val);
+ }
+
+ return -1;
+ }
+
+ if (ctx->splunk_send_raw == FLB_FALSE) {
+ flb_mp_map_header_init(&mh, mp_pck);
+
+ /* Append the time key */
+ flb_mp_map_header_append(&mh);
+ msgpack_pack_str(mp_pck, sizeof(FLB_SPLUNK_DEFAULT_TIME) -1);
+ msgpack_pack_str_body(mp_pck,
+ FLB_SPLUNK_DEFAULT_TIME,
+ sizeof(FLB_SPLUNK_DEFAULT_TIME) - 1);
+ msgpack_pack_double(mp_pck, t);
+
+ /* Pack Splunk metadata */
+ pack_map_meta(ctx, &mh, mp_pck, map, tag, tag_len);
+
+ /* Add k/v pairs under the key 'event' instead of to the top level object */
+ flb_mp_map_header_append(&mh);
+ msgpack_pack_str(mp_pck, sizeof(FLB_SPLUNK_DEFAULT_EVENT) -1);
+ msgpack_pack_str_body(mp_pck,
+ FLB_SPLUNK_DEFAULT_EVENT,
+ sizeof(FLB_SPLUNK_DEFAULT_EVENT) - 1);
+
+ flb_mp_map_header_end(&mh);
+ }
+
+ msgpack_pack_str(mp_pck, flb_sds_len(val));
+ msgpack_pack_str_body(mp_pck, val, flb_sds_len(val));
+ flb_sds_destroy(val);
+
+ return 0;
+}
+
+#ifdef FLB_HAVE_METRICS
+static inline int splunk_metrics_format(struct flb_output_instance *ins,
+ const void *in_buf, size_t in_bytes,
+ char **out_buf, size_t *out_size,
+ struct flb_splunk *ctx)
+{
+ int ret;
+ size_t off = 0;
+ cfl_sds_t text;
+ cfl_sds_t host;
+ struct cmt *cmt = NULL;
+
+ if (ctx->event_host != NULL) {
+ host = ctx->event_host;
+ }
+ else {
+ host = "localhost";
+ }
+
+ /* get cmetrics context */
+ ret = cmt_decode_msgpack_create(&cmt, (char *) in_buf, in_bytes, &off);
+ if (ret != 0) {
+ flb_plg_error(ins, "could not process metrics payload");
+ return -1;
+ }
+
+ /* convert to text representation */
+ text = cmt_encode_splunk_hec_create(cmt, host, ctx->event_index, ctx->event_source, ctx->event_sourcetype);
+
+ /* destroy cmt context */
+ cmt_destroy(cmt);
+
+ *out_buf = text;
+ *out_size = flb_sds_len(text);
+
+ return 0;
+}
+#endif
+
+static inline int splunk_format(const void *in_buf, size_t in_bytes,
+ char *tag, int tag_len,
+ char **out_buf, size_t *out_size,
+ struct flb_splunk *ctx)
+{
+ int ret;
+ msgpack_object map;
+ msgpack_sbuffer mp_sbuf;
+ msgpack_packer mp_pck;
+ char *err;
+ flb_sds_t tmp;
+ flb_sds_t record;
+ flb_sds_t json_out;
+ struct flb_log_event_decoder log_decoder;
+ struct flb_log_event log_event;
+
+ json_out = flb_sds_create_size(in_bytes * 1.5);
+ if (!json_out) {
+ flb_errno();
+ return -1;
+ }
+
+ ret = flb_log_event_decoder_init(&log_decoder, (char *) in_buf, in_bytes);
+
+ if (ret != FLB_EVENT_DECODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event decoder initialization error : %d", ret);
+
+ flb_sds_destroy(json_out);
+
+ return -1;
+ }
+
+ while ((ret = flb_log_event_decoder_next(
+ &log_decoder,
+ &log_event)) == FLB_EVENT_DECODER_SUCCESS) {
+
+ /* Create temporary msgpack buffer */
+ msgpack_sbuffer_init(&mp_sbuf);
+ msgpack_packer_init(&mp_pck, &mp_sbuf, msgpack_sbuffer_write);
+
+ map = *log_event.body;
+
+ if (ctx->event_key) {
+ /* Pack the value of a event key */
+ ret = pack_event_key(ctx, &mp_pck, &log_event.timestamp, map, tag, tag_len);
+ if (ret != 0) {
+ /*
+ * if pack_event_key fails due to missing content in the
+ * record, we just warn the user and try to pack it
+ * as a normal map.
+ */
+ ret = pack_map(ctx, &mp_pck, &log_event.timestamp, map, tag, tag_len);
+ }
+ }
+ else {
+ /* Pack as a map */
+ ret = pack_map(ctx, &mp_pck, &log_event.timestamp, map, tag, tag_len);
+ }
+
+ /* Validate packaging */
+ if (ret != 0) {
+ /* Format invalid record */
+ err = flb_msgpack_to_json_str(2048, &map);
+ if (err) {
+ /* Print error and continue processing other records */
+ flb_plg_warn(ctx->ins, "could not process or pack record: %s", err);
+ msgpack_sbuffer_destroy(&mp_sbuf);
+ flb_free(err);
+ }
+ continue;
+ }
+
+ /* Format as JSON */
+ record = flb_msgpack_raw_to_json_sds(mp_sbuf.data, mp_sbuf.size);
+ if (!record) {
+ flb_errno();
+ msgpack_sbuffer_destroy(&mp_sbuf);
+ flb_log_event_decoder_destroy(&log_decoder);
+ flb_sds_destroy(json_out);
+ return -1;
+ }
+
+ /* On raw mode, append a breakline to every record */
+ if (ctx->splunk_send_raw) {
+ tmp = flb_sds_cat(record, "\n", 1);
+ if (tmp) {
+ record = tmp;
+ }
+ }
+
+ tmp = flb_sds_cat(json_out, record, flb_sds_len(record));
+ flb_sds_destroy(record);
+ if (tmp) {
+ json_out = tmp;
+ }
+ else {
+ flb_errno();
+ msgpack_sbuffer_destroy(&mp_sbuf);
+ flb_log_event_decoder_destroy(&log_decoder);
+ flb_sds_destroy(json_out);
+ return -1;
+ }
+ msgpack_sbuffer_destroy(&mp_sbuf);
+ }
+
+ *out_buf = json_out;
+ *out_size = flb_sds_len(json_out);
+
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ return 0;
+}
+
+static void debug_request_response(struct flb_splunk *ctx,
+ struct flb_http_client *c)
+{
+ int ret;
+ int uncompressed = FLB_FALSE;
+ time_t now;
+ void *tmp_buf = NULL;
+ size_t tmp_size;
+ size_t req_size;
+ char *req_buf = NULL;
+ struct tm result;
+ struct tm *current;
+ unsigned char *ptr;
+ flb_sds_t req_headers = NULL;
+ flb_sds_t req_body = NULL;
+
+ if (c->body_len > 3) {
+ ptr = (unsigned char *) c->body_buf;
+ if (ptr[0] == 0x1F && ptr[1] == 0x8B && ptr[2] == 0x08) {
+ /* uncompress payload */
+ ret = flb_gzip_uncompress((void *) c->body_buf, c->body_len,
+ &tmp_buf, &tmp_size);
+ if (ret == -1) {
+ fprintf(stdout, "[out_splunk] could not uncompress data\n");
+ }
+ else {
+ req_buf = (char *) tmp_buf;
+ req_size = tmp_size;
+ uncompressed = FLB_TRUE;
+ }
+ }
+ else {
+ req_buf = (char *) c->body_buf;
+ req_size = c->body_len;
+ }
+
+ /* create a safe buffer */
+ if (req_buf) {
+ req_body = flb_sds_create_len(req_buf, req_size);
+ }
+ }
+
+ req_headers = flb_sds_create_len(c->header_buf, c->header_len);
+
+ if (c->resp.data)
+ now = time(NULL);
+ current = localtime_r(&now, &result);
+
+ fprintf(stdout,
+ "[%i/%02i/%02i %02i:%02i:%02i] "
+ "[out_splunk] debug HTTP 400 (bad request)\n"
+ ">>> request\n"
+ "%s%s\n\n"
+ "<<< response\n"
+ "%s\n\n",
+
+ current->tm_year + 1900,
+ current->tm_mon + 1,
+ current->tm_mday,
+ current->tm_hour,
+ current->tm_min,
+ current->tm_sec,
+
+ req_headers,
+ req_body,
+ c->resp.data);
+
+ if (uncompressed) {
+ flb_free(tmp_buf);
+ }
+
+ if (req_headers) {
+ flb_sds_destroy(req_headers);
+ }
+ if (req_body) {
+ flb_sds_destroy(req_body);
+ }
+}
+
+static void cb_splunk_flush(struct flb_event_chunk *event_chunk,
+ struct flb_output_flush *out_flush,
+ struct flb_input_instance *i_ins,
+ void *out_context,
+ struct flb_config *config)
+{
+ int ret;
+ int compressed = FLB_FALSE;
+ size_t b_sent;
+ flb_sds_t buf_data;
+ size_t resp_size;
+ size_t buf_size;
+ char *endpoint;
+ struct flb_splunk *ctx = out_context;
+ struct flb_connection *u_conn;
+ struct flb_http_client *c;
+ void *payload_buf;
+ size_t payload_size;
+ (void) i_ins;
+ (void) config;
+
+ /* Get upstream connection */
+ u_conn = flb_upstream_conn_get(ctx->u);
+ if (!u_conn) {
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+#ifdef FLB_HAVE_METRICS
+ /* Check if the event type is metrics, handle the payload differently */
+ if (event_chunk->type == FLB_EVENT_TYPE_METRICS) {
+ ret = splunk_metrics_format(ctx->ins,
+ event_chunk->data,
+ event_chunk->size,
+ &buf_data, &buf_size, ctx);
+ }
+#endif
+ if (event_chunk->type == FLB_EVENT_TYPE_LOGS) {
+ /* Convert binary logs into a JSON payload */
+ ret = splunk_format(event_chunk->data,
+ event_chunk->size,
+ (char *) event_chunk->tag,
+ flb_sds_len(event_chunk->tag),
+ &buf_data, &buf_size, ctx);
+ }
+
+ if (ret == -1) {
+ flb_upstream_conn_release(u_conn);
+ FLB_OUTPUT_RETURN(FLB_ERROR);
+ }
+
+ /* Map buffer */
+ payload_buf = buf_data;
+ payload_size = buf_size;
+
+ /* Should we compress the payload ? */
+ if (ctx->compress_gzip == FLB_TRUE) {
+ ret = flb_gzip_compress((void *) buf_data, buf_size,
+ &payload_buf, &payload_size);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins,
+ "cannot gzip payload, disabling compression");
+ }
+ else {
+ compressed = FLB_TRUE;
+
+ /* JSON buffer is not longer needed */
+ flb_sds_destroy(buf_data);
+ }
+ }
+
+ /* Splunk URI endpoint */
+ if (ctx->splunk_send_raw) {
+ endpoint = FLB_SPLUNK_DEFAULT_URI_RAW;
+ }
+ else {
+ endpoint = FLB_SPLUNK_DEFAULT_URI_EVENT;
+ }
+
+ /* Compose HTTP Client request */
+ c = flb_http_client(u_conn, FLB_HTTP_POST, endpoint,
+ payload_buf, payload_size, NULL, 0, NULL, 0);
+
+ /* HTTP Response buffer size, honor value set by the user */
+ if (ctx->buffer_size > 0) {
+ flb_http_buffer_size(c, ctx->buffer_size);
+ }
+ else {
+ /*
+ * If no value was set, we try to accomodate by using our post
+ * payload size * 1.5, on that way we make room for large responses
+ * if something goes wrong, so we don't get a partial response.
+ */
+ resp_size = payload_size * 1.5;
+ if (resp_size < 4096) {
+ resp_size = 4096;
+ }
+ flb_http_buffer_size(c, resp_size);
+ }
+
+ /* HTTP Client */
+ flb_http_add_header(c, "User-Agent", 10, "Fluent-Bit", 10);
+
+ /* Try to use http_user and http_passwd if not, fallback to auth_header */
+ if (ctx->http_user && ctx->http_passwd) {
+ flb_http_basic_auth(c, ctx->http_user, ctx->http_passwd);
+ }
+ else if (ctx->auth_header) {
+ flb_http_add_header(c, "Authorization", 13,
+ ctx->auth_header, flb_sds_len(ctx->auth_header));
+ }
+
+ /* Append Channel identifier header */
+ if (ctx->channel) {
+ flb_http_add_header(c, FLB_SPLUNK_CHANNEL_IDENTIFIER_HEADER,
+ strlen(FLB_SPLUNK_CHANNEL_IDENTIFIER_HEADER),
+ ctx->channel, ctx->channel_len);
+ }
+
+ /* Content Encoding: gzip */
+ if (compressed == FLB_TRUE) {
+ flb_http_set_content_encoding_gzip(c);
+ }
+
+ /* Map debug callbacks */
+ flb_http_client_debug(c, ctx->ins->callback);
+
+ /* Perform HTTP request */
+ ret = flb_http_do(c, &b_sent);
+ if (ret != 0) {
+ flb_plg_warn(ctx->ins, "http_do=%i", ret);
+ ret = FLB_RETRY;
+ }
+ else {
+ if (c->resp.status != 200) {
+ if (c->resp.payload_size > 0) {
+ flb_plg_warn(ctx->ins, "http_status=%i:\n%s",
+ c->resp.status, c->resp.payload);
+ }
+ else {
+ flb_plg_warn(ctx->ins, "http_status=%i", c->resp.status);
+ }
+ /*
+ * Requests that get 4xx responses from the Splunk HTTP Event
+ * Collector will 'always' fail, so there is no point in retrying
+ * them:
+ *
+ * https://docs.splunk.com/Documentation/Splunk/8.0.5/Data/TroubleshootHTTPEventCollector#Possible_error_codes
+ */
+ ret = (c->resp.status < 400 || c->resp.status >= 500) ?
+ FLB_RETRY : FLB_ERROR;
+
+
+ if (c->resp.status == 400 && ctx->http_debug_bad_request) {
+ debug_request_response(ctx, c);
+ }
+ }
+ else {
+ ret = FLB_OK;
+ }
+ }
+
+ /*
+ * If the payload buffer is different than incoming records in body, means
+ * we generated a different payload and must be freed.
+ */
+ if (compressed == FLB_TRUE) {
+ flb_free(payload_buf);
+ }
+ else {
+ flb_sds_destroy(buf_data);
+ }
+
+ /* Cleanup */
+ flb_http_client_destroy(c);
+ flb_upstream_conn_release(u_conn);
+ FLB_OUTPUT_RETURN(ret);
+}
+
+static int cb_splunk_exit(void *data, struct flb_config *config)
+{
+ struct flb_splunk *ctx = data;
+
+ flb_splunk_conf_destroy(ctx);
+ return 0;
+}
+
+/* Configuration properties map */
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_STR, "compress", NULL,
+ 0, FLB_FALSE, 0,
+ "Set payload compression mechanism. Option available is 'gzip'"
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "http_user", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_splunk, http_user),
+ "Set HTTP auth user"
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "http_passwd", "",
+ 0, FLB_TRUE, offsetof(struct flb_splunk, http_passwd),
+ "Set HTTP auth password"
+ },
+
+ {
+ FLB_CONFIG_MAP_SIZE, "http_buffer_size", NULL,
+ 0, FLB_FALSE, 0,
+ "Specify the buffer size used to read the response from the Splunk HTTP "
+ "service. This option is useful for debugging purposes where is required to read "
+ "full responses, note that response size grows depending of the number of records "
+ "inserted. To set an unlimited amount of memory set this value to 'false', "
+ "otherwise the value must be according to the Unit Size specification"
+ },
+
+ {
+ FLB_CONFIG_MAP_BOOL, "http_debug_bad_request", "false",
+ 0, FLB_TRUE, offsetof(struct flb_splunk, http_debug_bad_request),
+ "If the HTTP server response code is 400 (bad request) and this flag is "
+ "enabled, it will print the full HTTP request and response to the stdout "
+ "interface. This feature is available for debugging purposes."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "event_key", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_splunk, event_key),
+ "Specify the key name that will be used to send a single value as part of the record."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "event_host", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_splunk, event_host),
+ "Set the host value to the event data. The value allows a record accessor "
+ "pattern."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "event_source", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_splunk, event_source),
+ "Set the source value to assign to the event data."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "event_sourcetype", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_splunk, event_sourcetype),
+ "Set the sourcetype value to assign to the event data."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "event_sourcetype_key", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_splunk, event_sourcetype_key),
+ "Set a record key that will populate 'sourcetype'. If the key is found, it will "
+ "have precedence over the value set in 'event_sourcetype'."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "event_index", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_splunk, event_index),
+ "The name of the index by which the event data is to be indexed."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "event_index_key", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_splunk, event_index_key),
+ "Set a record key that will populate the 'index' field. If the key is found, "
+ "it will have precedence over the value set in 'event_index'."
+ },
+
+ {
+ FLB_CONFIG_MAP_SLIST_2, "event_field", NULL,
+ FLB_CONFIG_MAP_MULT, FLB_TRUE, offsetof(struct flb_splunk, event_fields),
+ "Set event fields for the record. This option can be set multiple times and "
+ "the format is 'key_name record_accessor_pattern'."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "splunk_token", NULL,
+ 0, FLB_FALSE, 0,
+ "Specify the Authentication Token for the HTTP Event Collector interface."
+ },
+
+ {
+ FLB_CONFIG_MAP_BOOL, "splunk_send_raw", "off",
+ 0, FLB_TRUE, offsetof(struct flb_splunk, splunk_send_raw),
+ "When enabled, the record keys and values are set in the top level of the "
+ "map instead of under the event key. Refer to the Sending Raw Events section "
+ "from the docs for more details to make this option work properly."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "channel", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_splunk, channel),
+ "Specify X-Splunk-Request-Channel Header for the HTTP Event Collector interface."
+ },
+
+ /* EOF */
+ {0}
+};
+
+
+static int cb_splunk_format_test(struct flb_config *config,
+ struct flb_input_instance *ins,
+ void *plugin_context,
+ void *flush_ctx,
+ int event_type,
+ const char *tag, int tag_len,
+ const void *data, size_t bytes,
+ void **out_data, size_t *out_size)
+{
+ struct flb_splunk *ctx = plugin_context;
+
+ return splunk_format(data, bytes, (char *) tag, tag_len,
+ (char**) out_data, out_size,ctx);
+}
+
+struct flb_output_plugin out_splunk_plugin = {
+ .name = "splunk",
+ .description = "Send events to Splunk HTTP Event Collector",
+ .cb_init = cb_splunk_init,
+ .cb_flush = cb_splunk_flush,
+ .cb_exit = cb_splunk_exit,
+ .config_map = config_map,
+ .workers = 2,
+#ifdef FLB_HAVE_METRICS
+ .event_type = FLB_OUTPUT_LOGS | FLB_OUTPUT_METRICS,
+#endif
+
+ /* for testing */
+ .test_formatter.callback = cb_splunk_format_test,
+ /* Plugin flags */
+ .flags = FLB_OUTPUT_NET | FLB_IO_OPT_TLS,
+};
diff --git a/src/fluent-bit/plugins/out_splunk/splunk.h b/src/fluent-bit/plugins/out_splunk/splunk.h
new file mode 100644
index 000000000..eef8fa8b0
--- /dev/null
+++ b/src/fluent-bit/plugins/out_splunk/splunk.h
@@ -0,0 +1,119 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_OUT_SPLUNK
+#define FLB_OUT_SPLUNK
+
+#define FLB_SPLUNK_DEFAULT_HOST "127.0.0.1"
+#define FLB_SPLUNK_DEFAULT_PORT 8088
+#define FLB_SPLUNK_DEFAULT_URI_RAW "/services/collector/raw"
+#define FLB_SPLUNK_DEFAULT_URI_EVENT "/services/collector/event"
+#define FLB_SPLUNK_DEFAULT_TIME "time"
+#define FLB_SPLUNK_DEFAULT_EVENT_HOST "host"
+#define FLB_SPLUNK_DEFAULT_EVENT_SOURCE "source"
+#define FLB_SPLUNK_DEFAULT_EVENT_SOURCET "sourcetype"
+#define FLB_SPLUNK_DEFAULT_EVENT_INDEX "index"
+#define FLB_SPLUNK_DEFAULT_EVENT_FIELDS "fields"
+#define FLB_SPLUNK_DEFAULT_EVENT "event"
+#define FLB_SPLUNK_DEFAULT_HTTP_MAX "2M"
+
+#define FLB_SPLUNK_CHANNEL_IDENTIFIER_HEADER "X-Splunk-Request-Channel"
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_sds.h>
+#include <fluent-bit/flb_record_accessor.h>
+
+struct flb_splunk_field {
+ flb_sds_t key_name;
+ struct flb_record_accessor *ra;
+ struct mk_list _head;
+};
+
+struct flb_splunk {
+ /* Payload compression */
+ int compress_gzip;
+
+ /* HTTP Auth */
+ char *http_user;
+ char *http_passwd;
+
+ /* Event key */
+ flb_sds_t event_key;
+ struct flb_record_accessor *ra_event_key;
+
+ /* Event host */
+ flb_sds_t event_host;
+ struct flb_record_accessor *ra_event_host;
+
+ /* Event source */
+ flb_sds_t event_source;
+ struct flb_record_accessor *ra_event_source;
+
+ /*
+ * NOTE: EVENT SOURCE
+ * -------------------
+ * we use two separate variables since we aim to specify a default in case
+ * a record accessor pattern is given but not found. The event_sourcetype_key
+ * has precedence over th the 'event_sourcetype' variable.
+ */
+
+ /* Event sourcetype */
+ flb_sds_t event_sourcetype;
+
+ /* Event sourcetype record key */
+ flb_sds_t event_sourcetype_key;
+ struct flb_record_accessor *ra_event_sourcetype_key;
+
+ /* Event index */
+ flb_sds_t event_index;
+
+ /* Event sourcetype record key */
+ flb_sds_t event_index_key;
+ struct flb_record_accessor *ra_event_index_key;
+
+ /* Event fields */
+ struct mk_list *event_fields;
+
+ /* Internal/processed event fields */
+ struct mk_list fields;
+
+ /* Token Auth */
+ flb_sds_t auth_header;
+
+ /* Channel identifier */
+ flb_sds_t channel;
+ size_t channel_len;
+
+ /* Send fields directly or pack data into "event" object */
+ int splunk_send_raw;
+
+ /* HTTP Client Setup */
+ size_t buffer_size;
+
+ /* HTTP: Debug bad requests (HTTP status 400) to stdout */
+ int http_debug_bad_request;
+
+ /* Upstream connection to the backend server */
+ struct flb_upstream *u;
+
+ /* Plugin instance */
+ struct flb_output_instance *ins;
+};
+
+#endif
diff --git a/src/fluent-bit/plugins/out_splunk/splunk_conf.c b/src/fluent-bit/plugins/out_splunk/splunk_conf.c
new file mode 100644
index 000000000..cc911cbeb
--- /dev/null
+++ b/src/fluent-bit/plugins/out_splunk/splunk_conf.c
@@ -0,0 +1,313 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_utils.h>
+
+#include "splunk.h"
+#include "splunk_conf.h"
+
+static int event_fields_create(struct flb_splunk *ctx)
+{
+ int i = 0;
+ struct mk_list *head;
+ struct flb_slist_entry *kname;
+ struct flb_slist_entry *pattern;
+ struct flb_config_map_val *mv;
+ struct flb_splunk_field *f;
+
+ if (!ctx->event_fields) {
+ return 0;
+ }
+
+ flb_config_map_foreach(head, mv, ctx->event_fields) {
+ kname = mk_list_entry_first(mv->val.list, struct flb_slist_entry, _head);
+ pattern = mk_list_entry_last(mv->val.list, struct flb_slist_entry, _head);
+
+ f = flb_malloc(sizeof(struct flb_splunk_field));
+ if (!f) {
+ flb_errno();
+ return -1;
+ }
+
+ f->key_name = flb_sds_create(kname->str);
+ if (!f->key_name) {
+ flb_free(f);
+ return -1;
+ }
+
+ f->ra = flb_ra_create(pattern->str, FLB_TRUE);
+ if (!f->ra) {
+ flb_plg_error(ctx->ins,
+ "could not process event_field number #%i with "
+ "pattern '%s'",
+ i, pattern->str);
+ flb_sds_destroy(f->key_name);
+ flb_free(f);
+ return -1;
+ }
+
+ mk_list_add(&f->_head, &ctx->fields);
+ }
+
+ return 0;
+}
+
+static void event_fields_destroy(struct flb_splunk *ctx)
+{
+ struct mk_list *tmp;
+ struct mk_list *head;
+ struct flb_splunk_field *f;
+
+ mk_list_foreach_safe(head, tmp, &ctx->fields) {
+ f = mk_list_entry(head, struct flb_splunk_field, _head);
+ flb_sds_destroy(f->key_name);
+ flb_ra_destroy(f->ra);
+ mk_list_del(&f->_head);
+ flb_free(f);
+ }
+}
+
+struct flb_splunk *flb_splunk_conf_create(struct flb_output_instance *ins,
+ struct flb_config *config)
+{
+ int ret;
+ int io_flags = 0;
+ size_t size;
+ flb_sds_t t;
+ const char *tmp;
+ struct flb_upstream *upstream;
+ struct flb_splunk *ctx;
+
+ ctx = flb_calloc(1, sizeof(struct flb_splunk));
+ if (!ctx) {
+ flb_errno();
+ return NULL;
+ }
+ ctx->ins = ins;
+ mk_list_init(&ctx->fields);
+
+ ret = flb_output_config_map_set(ins, (void *) ctx);
+ if (ret == -1) {
+ flb_free(ctx);
+ return NULL;
+ }
+
+ /* Set default network configuration */
+ flb_output_net_default(FLB_SPLUNK_DEFAULT_HOST, FLB_SPLUNK_DEFAULT_PORT, ins);
+
+ /* use TLS ? */
+ if (ins->use_tls == FLB_TRUE) {
+ io_flags = FLB_IO_TLS;
+ }
+ else {
+ io_flags = FLB_IO_TCP;
+ }
+
+ if (ins->host.ipv6 == FLB_TRUE) {
+ io_flags |= FLB_IO_IPV6;
+ }
+
+ /* Prepare an upstream handler */
+ upstream = flb_upstream_create(config,
+ ins->host.name,
+ ins->host.port,
+ io_flags,
+ ins->tls);
+ if (!upstream) {
+ flb_plg_error(ctx->ins, "cannot create Upstream context");
+ flb_splunk_conf_destroy(ctx);
+ return NULL;
+ }
+
+ /* Set manual Index and Type */
+ ctx->u = upstream;
+
+ tmp = flb_output_get_property("http_buffer_size", ins);
+ if (!tmp) {
+ ctx->buffer_size = 0;
+ }
+ else {
+ size = flb_utils_size_to_bytes(tmp);
+ if (size == -1) {
+ flb_plg_error(ctx->ins, "invalid 'buffer_size' value");
+ flb_splunk_conf_destroy(ctx);
+ return NULL;
+ }
+ if (size < 4 *1024) {
+ size = 4 * 1024;
+ }
+ ctx->buffer_size = size;
+ }
+
+ /* Compress (gzip) */
+ tmp = flb_output_get_property("compress", ins);
+ ctx->compress_gzip = FLB_FALSE;
+ if (tmp) {
+ if (strcasecmp(tmp, "gzip") == 0) {
+ ctx->compress_gzip = FLB_TRUE;
+ }
+ }
+
+ /* Event key */
+ if (ctx->event_key) {
+ if (ctx->event_key[0] != '$') {
+ flb_plg_error(ctx->ins,
+ "invalid event_key pattern, it must start with '$'");
+ flb_splunk_conf_destroy(ctx);
+ return NULL;
+ }
+ ctx->ra_event_key = flb_ra_create(ctx->event_key, FLB_TRUE);
+ if (!ctx->ra_event_key) {
+ flb_plg_error(ctx->ins,
+ "cannot create record accessor for event_key pattern: '%s'",
+ ctx->event_key);
+ flb_splunk_conf_destroy(ctx);
+ return NULL;
+ }
+ }
+
+ /* Event host */
+ if (ctx->event_host) {
+ ctx->ra_event_host = flb_ra_create(ctx->event_host, FLB_TRUE);
+ if (!ctx->ra_event_host) {
+ flb_plg_error(ctx->ins,
+ "cannot create record accessor for event_key pattern: '%s'",
+ ctx->event_host);
+ flb_splunk_conf_destroy(ctx);
+ return NULL;
+ }
+ }
+
+ /* Event source */
+ if (ctx->event_source) {
+ ctx->ra_event_source = flb_ra_create(ctx->event_source, FLB_TRUE);
+ if (!ctx->ra_event_source) {
+ flb_plg_error(ctx->ins,
+ "cannot create record accessor for event_source pattern: '%s'",
+ ctx->event_host);
+ flb_splunk_conf_destroy(ctx);
+ return NULL;
+ }
+ }
+
+ /* Event source (key lookup) */
+ if (ctx->event_sourcetype_key) {
+ ctx->ra_event_sourcetype_key = flb_ra_create(ctx->event_sourcetype_key, FLB_TRUE);
+ if (!ctx->ra_event_sourcetype_key) {
+ flb_plg_error(ctx->ins,
+ "cannot create record accessor for "
+ "event_sourcetype_key pattern: '%s'",
+ ctx->event_host);
+ flb_splunk_conf_destroy(ctx);
+ return NULL;
+ }
+ }
+
+ /* Event index (key lookup) */
+ if (ctx->event_index_key) {
+ ctx->ra_event_index_key = flb_ra_create(ctx->event_index_key, FLB_TRUE);
+ if (!ctx->ra_event_index_key) {
+ flb_plg_error(ctx->ins,
+ "cannot create record accessor for "
+ "event_index_key pattern: '%s'",
+ ctx->event_host);
+ flb_splunk_conf_destroy(ctx);
+ return NULL;
+ }
+ }
+
+ /* Event fields */
+ ret = event_fields_create(ctx);
+ if (ret == -1) {
+ flb_splunk_conf_destroy(ctx);
+ return NULL;
+ }
+
+ /* No http_user is set, fallback to splunk_token, if splunk_token is unset, fail. */
+ if (!ctx->http_user) {
+ /* Splunk Auth Token */
+ tmp = flb_output_get_property("splunk_token", ins);
+ if(!tmp) {
+ flb_plg_error(ctx->ins, "either splunk_token or http_user should be set");
+ flb_splunk_conf_destroy(ctx);
+ return NULL;
+ }
+ ctx->auth_header = flb_sds_create("Splunk ");
+ t = flb_sds_cat(ctx->auth_header, tmp, strlen(tmp));
+ if (t) {
+ ctx->auth_header = t;
+ }
+ else {
+ flb_plg_error(ctx->ins, "error on token generation");
+ flb_splunk_conf_destroy(ctx);
+ return NULL;
+ }
+ }
+
+ /* channel */
+ if (ctx->channel != NULL) {
+ ctx->channel_len = flb_sds_len(ctx->channel);
+ }
+
+ /* Set instance flags into upstream */
+ flb_output_upstream_set(ctx->u, ins);
+
+ return ctx;
+}
+
+int flb_splunk_conf_destroy(struct flb_splunk *ctx)
+{
+ if (!ctx) {
+ return -1;
+ }
+
+ if (ctx->auth_header) {
+ flb_sds_destroy(ctx->auth_header);
+ }
+ if (ctx->u) {
+ flb_upstream_destroy(ctx->u);
+ }
+
+ if (ctx->ra_event_key) {
+ flb_ra_destroy(ctx->ra_event_key);
+ }
+
+ if (ctx->ra_event_host) {
+ flb_ra_destroy(ctx->ra_event_host);
+ }
+
+ if (ctx->ra_event_source) {
+ flb_ra_destroy(ctx->ra_event_source);
+ }
+
+ if (ctx->ra_event_sourcetype_key) {
+ flb_ra_destroy(ctx->ra_event_sourcetype_key);
+ }
+
+ if (ctx->ra_event_index_key) {
+ flb_ra_destroy(ctx->ra_event_index_key);
+ }
+
+ event_fields_destroy(ctx);
+
+ flb_free(ctx);
+
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/out_splunk/splunk_conf.h b/src/fluent-bit/plugins/out_splunk/splunk_conf.h
new file mode 100644
index 000000000..c5114b1f9
--- /dev/null
+++ b/src/fluent-bit/plugins/out_splunk/splunk_conf.h
@@ -0,0 +1,29 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_OUT_SPLUNK_CONF_H
+#define FLB_OUT_SPLUNK_CONF_H
+
+#include "splunk.h"
+
+struct flb_splunk *flb_splunk_conf_create(struct flb_output_instance *ins,
+ struct flb_config *config);
+int flb_splunk_conf_destroy(struct flb_splunk *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/out_stackdriver/CMakeLists.txt b/src/fluent-bit/plugins/out_stackdriver/CMakeLists.txt
new file mode 100644
index 000000000..2d7fa71bb
--- /dev/null
+++ b/src/fluent-bit/plugins/out_stackdriver/CMakeLists.txt
@@ -0,0 +1,13 @@
+set(src
+ gce_metadata.c
+ stackdriver_conf.c
+ stackdriver.c
+ stackdriver_operation.c
+ stackdriver_source_location.c
+ stackdriver_http_request.c
+ stackdriver_timestamp.c
+ stackdriver_helper.c
+ stackdriver_resource_types.c
+ )
+
+FLB_PLUGIN(out_stackdriver "${src}" "")
diff --git a/src/fluent-bit/plugins/out_stackdriver/gce_metadata.c b/src/fluent-bit/plugins/out_stackdriver/gce_metadata.c
new file mode 100644
index 000000000..fb942213b
--- /dev/null
+++ b/src/fluent-bit/plugins/out_stackdriver/gce_metadata.c
@@ -0,0 +1,222 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_http_client.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_oauth2.h>
+
+#include <msgpack.h>
+
+#include "gce_metadata.h"
+#include "stackdriver.h"
+#include "stackdriver_conf.h"
+
+
+static int fetch_metadata(struct flb_stackdriver *ctx,
+ struct flb_upstream *upstream, char *uri,
+ char *payload)
+{
+ int ret;
+ int ret_code;
+ size_t b_sent;
+ struct flb_connection *metadata_conn;
+ struct flb_http_client *c;
+
+ /* If runtime test mode is enabled, add test data */
+ if (ctx->ins->test_mode == FLB_TRUE) {
+ if (strcmp(uri, FLB_STD_METADATA_PROJECT_ID_URI) == 0) {
+ flb_sds_cat(payload, "fluent-bit-test", 15);
+ return 0;
+ }
+ else if (strcmp(uri, FLB_STD_METADATA_ZONE_URI) == 0) {
+ flb_sds_cat(payload, "projects/0123456789/zones/fluent", 32);
+ return 0;
+ }
+ else if (strcmp(uri, FLB_STD_METADATA_INSTANCE_ID_URI) == 0) {
+ flb_sds_cat(payload, "333222111", 9);
+ return 0;
+ }
+ return -1;
+ }
+
+ /* Get metadata connection */
+ metadata_conn = flb_upstream_conn_get(upstream);
+ if (!metadata_conn) {
+ flb_plg_error(ctx->ins, "failed to create metadata connection");
+ return -1;
+ }
+
+ /* Compose HTTP Client request */
+ c = flb_http_client(metadata_conn, FLB_HTTP_GET, uri,
+ "", 0, NULL, 0, NULL, 0);
+
+ flb_http_buffer_size(c, FLB_STD_METADATA_TOKEN_SIZE_MAX);
+
+ flb_http_add_header(c, "User-Agent", 10, "Fluent-Bit", 10);
+ flb_http_add_header(c, "Content-Type", 12, "application/text", 16);
+ flb_http_add_header(c, "Metadata-Flavor", 15, "Google", 6);
+
+ /* Send HTTP request */
+ ret = flb_http_do(c, &b_sent);
+
+ /* validate response */
+ if (ret != 0) {
+ flb_plg_warn(ctx->ins, "http_do=%i", ret);
+ ret_code = -1;
+ }
+ else {
+ /* The request was issued successfully, validate the 'error' field */
+ flb_plg_debug(ctx->ins, "HTTP Status=%i", c->resp.status);
+ if (c->resp.status == 200) {
+ ret_code = 0;
+ flb_sds_copy(payload, c->resp.payload, c->resp.payload_size);
+ }
+ else {
+ if (c->resp.payload_size > 0) {
+ /* we got an error */
+ flb_plg_warn(ctx->ins, "error\n%s", c->resp.payload);
+ }
+ else {
+ flb_plg_debug(ctx->ins, "response\n%s", c->resp.payload);
+ }
+ ret_code = -1;
+ }
+ }
+
+ /* Cleanup */
+ flb_http_client_destroy(c);
+ flb_upstream_conn_release(metadata_conn);
+
+ return ret_code;
+}
+
+int gce_metadata_read_token(struct flb_stackdriver *ctx)
+{
+ int ret;
+ flb_sds_t uri = flb_sds_create(FLB_STD_METADATA_SERVICE_ACCOUNT_URI);
+ flb_sds_t payload = flb_sds_create_size(FLB_STD_METADATA_TOKEN_SIZE_MAX);
+
+ uri = flb_sds_cat(uri, ctx->client_email, flb_sds_len(ctx->client_email));
+ uri = flb_sds_cat(uri, "/token", 6);
+ ret = fetch_metadata(ctx, ctx->metadata_u, uri, payload);
+ if (ret != 0) {
+ flb_plg_error(ctx->ins, "can't fetch token from the metadata server");
+ flb_sds_destroy(payload);
+ flb_sds_destroy(uri);
+ return -1;
+ }
+
+ ret = flb_oauth2_parse_json_response(payload, flb_sds_len(payload), ctx->o);
+ flb_sds_destroy(payload);
+ flb_sds_destroy(uri);
+
+ if (ret != 0) {
+ flb_plg_error(ctx->ins, "unable to parse token body");
+ return -1;
+ }
+ ctx->o->expires = time(NULL) + ctx->o->expires_in;
+ return 0;
+}
+
+int gce_metadata_read_zone(struct flb_stackdriver *ctx)
+{
+ int ret;
+ int i;
+ int j;
+ int part = 0;
+ flb_sds_t payload = flb_sds_create_size(4096);
+ flb_sds_t zone = NULL;
+
+ ret = fetch_metadata(ctx, ctx->metadata_u, FLB_STD_METADATA_ZONE_URI,
+ payload);
+ if (ret != 0) {
+ flb_plg_error(ctx->ins, "can't fetch zone from the metadata server");
+ flb_sds_destroy(payload);
+ return -1;
+ }
+
+ /* Data returned in the format projects/{project-id}/zones/{name} */
+ for (i = 0; i < flb_sds_len(payload); ++i) {
+ if (payload[i] == '/') {
+ part++;
+ }
+ if (part == 3) {
+ i++;
+ break;
+ }
+ }
+
+ if (part != 3) {
+ flb_plg_error(ctx->ins, "wrong format of zone response");
+ flb_sds_destroy(payload);
+ return -1;
+ }
+
+ zone = flb_sds_create_size(flb_sds_len(payload) - i);
+
+ j = 0;
+ while (i != flb_sds_len(payload)) {
+ zone[j] = payload[i];
+ i++;
+ j++;
+ }
+ zone[j] = '\0';
+ ctx->zone = flb_sds_create(zone);
+ flb_sds_destroy(zone);
+ flb_sds_destroy(payload);
+
+ return 0;
+}
+
+int gce_metadata_read_project_id(struct flb_stackdriver *ctx)
+{
+ int ret;
+ flb_sds_t payload = flb_sds_create_size(4096);
+
+ ret = fetch_metadata(ctx, ctx->metadata_u,
+ FLB_STD_METADATA_PROJECT_ID_URI, payload);
+ if (ret != 0) {
+ flb_plg_error(ctx->ins, "can't fetch project id from the metadata server");
+ flb_sds_destroy(payload);
+ return -1;
+ }
+ ctx->project_id = flb_sds_create(payload);
+ flb_sds_destroy(payload);
+ return 0;
+}
+
+int gce_metadata_read_instance_id(struct flb_stackdriver *ctx)
+{
+ int ret;
+ flb_sds_t payload = flb_sds_create_size(4096);
+
+ ret = fetch_metadata(ctx, ctx->metadata_u,
+ FLB_STD_METADATA_INSTANCE_ID_URI, payload);
+ if (ret != 0) {
+ flb_plg_error(ctx->ins, "can't fetch instance id from the metadata server");
+ flb_sds_destroy(payload);
+ return -1;
+ }
+ ctx->instance_id = flb_sds_create(payload);
+ flb_sds_destroy(payload);
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/out_stackdriver/gce_metadata.h b/src/fluent-bit/plugins/out_stackdriver/gce_metadata.h
new file mode 100644
index 000000000..65009588d
--- /dev/null
+++ b/src/fluent-bit/plugins/out_stackdriver/gce_metadata.h
@@ -0,0 +1,48 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLUENT_BIT_GCE_METADATA_H
+#define FLUENT_BIT_GCE_METADATA_H
+
+#include "stackdriver.h"
+
+/* Metadata server URL */
+#define FLB_STD_METADATA_SERVER "http://metadata.google.internal"
+
+/* Project ID metadata URI */
+#define FLB_STD_METADATA_PROJECT_ID_URI "/computeMetadata/v1/project/project-id"
+
+/* Zone metadata URI */
+#define FLB_STD_METADATA_ZONE_URI "/computeMetadata/v1/instance/zone"
+
+/* Instance ID metadata URI */
+#define FLB_STD_METADATA_INSTANCE_ID_URI "/computeMetadata/v1/instance/id"
+
+/* Service account metadata URI */
+#define FLB_STD_METADATA_SERVICE_ACCOUNT_URI "/computeMetadata/v1/instance/service-accounts/"
+
+/* Max size of token response from metadata server */
+#define FLB_STD_METADATA_TOKEN_SIZE_MAX 14336
+
+int gce_metadata_read_token(struct flb_stackdriver *ctx);
+int gce_metadata_read_zone(struct flb_stackdriver *ctx);
+int gce_metadata_read_project_id(struct flb_stackdriver *ctx);
+int gce_metadata_read_instance_id(struct flb_stackdriver *ctx);
+
+#endif //FLUENT_BIT_GCE_METADATA_H
diff --git a/src/fluent-bit/plugins/out_stackdriver/stackdriver.c b/src/fluent-bit/plugins/out_stackdriver/stackdriver.c
new file mode 100644
index 000000000..5c48a338b
--- /dev/null
+++ b/src/fluent-bit/plugins/out_stackdriver/stackdriver.c
@@ -0,0 +1,2867 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_http_client.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_oauth2.h>
+#include <fluent-bit/flb_regex.h>
+#include <fluent-bit/flb_pthread.h>
+#include <fluent-bit/flb_crypto.h>
+#include <fluent-bit/flb_hash.h>
+#include <fluent-bit/flb_base64.h>
+#include <fluent-bit/flb_kv.h>
+#include <fluent-bit/flb_ra_key.h>
+#include <fluent-bit/flb_record_accessor.h>
+#include <fluent-bit/flb_log_event_decoder.h>
+#include <fluent-bit/flb_gzip.h>
+
+#include <msgpack.h>
+
+#include "gce_metadata.h"
+#include "stackdriver.h"
+#include "stackdriver_conf.h"
+#include "stackdriver_operation.h"
+#include "stackdriver_source_location.h"
+#include "stackdriver_http_request.h"
+#include "stackdriver_timestamp.h"
+#include "stackdriver_helper.h"
+#include "stackdriver_resource_types.h"
+
+pthread_key_t oauth2_type;
+pthread_key_t oauth2_token;
+pthread_key_t oauth2_token_expires;
+
+static void oauth2_cache_exit(void *ptr)
+{
+ if (ptr) {
+ flb_sds_destroy(ptr);
+ }
+}
+
+static void oauth2_cache_free_expiration(void *ptr)
+{
+ if (ptr) {
+ flb_free(ptr);
+ }
+}
+
+static void oauth2_cache_init()
+{
+ /* oauth2 pthread key */
+ pthread_key_create(&oauth2_type, oauth2_cache_exit);
+ pthread_key_create(&oauth2_token, oauth2_cache_exit);
+ pthread_key_create(&oauth2_token_expires, oauth2_cache_free_expiration);
+}
+
+/* Set oauth2 type and token in pthread keys */
+static void oauth2_cache_set(char *type, char *token, time_t expires)
+{
+ flb_sds_t tmp;
+ time_t *tmp_expires;
+
+ /* oauth2 type */
+ tmp = pthread_getspecific(oauth2_type);
+ if (tmp) {
+ flb_sds_destroy(tmp);
+ }
+ tmp = flb_sds_create(type);
+ pthread_setspecific(oauth2_type, tmp);
+
+ /* oauth2 access token */
+ tmp = pthread_getspecific(oauth2_token);
+ if (tmp) {
+ flb_sds_destroy(tmp);
+ }
+ tmp = flb_sds_create(token);
+ pthread_setspecific(oauth2_token, tmp);
+
+ /* oauth2 access token expiration */
+ tmp_expires = pthread_getspecific(oauth2_token_expires);
+ if (tmp_expires) {
+ flb_free(tmp_expires);
+ }
+ tmp_expires = flb_calloc(1, sizeof(time_t));
+ if (!tmp_expires) {
+ flb_errno();
+ return;
+ }
+ *tmp_expires = expires;
+ pthread_setspecific(oauth2_token_expires, tmp_expires);
+}
+
+/* By using pthread keys cached values, compose the authorizatoin token */
+static time_t oauth2_cache_get_expiration()
+{
+ time_t *expires = pthread_getspecific(oauth2_token_expires);
+ if (expires) {
+ return *expires;
+ }
+ return 0;
+}
+
+/* By using pthread keys cached values, compose the authorizatoin token */
+static flb_sds_t oauth2_cache_to_token()
+{
+ flb_sds_t type;
+ flb_sds_t token;
+ flb_sds_t output;
+
+ type = pthread_getspecific(oauth2_type);
+ if (!type) {
+ return NULL;
+ }
+
+ output = flb_sds_create(type);
+ if (!output) {
+ return NULL;
+ }
+
+ token = pthread_getspecific(oauth2_token);
+ flb_sds_printf(&output, " %s", token);
+ return output;
+}
+
+/*
+ * Base64 Encoding in JWT must:
+ *
+ * - remove any trailing padding '=' character
+ * - replace '+' with '-'
+ * - replace '/' with '_'
+ *
+ * ref: https://www.rfc-editor.org/rfc/rfc7515.txt Appendix C
+ */
+int jwt_base64_url_encode(unsigned char *out_buf, size_t out_size,
+ unsigned char *in_buf, size_t in_size,
+ size_t *olen)
+
+{
+ int i;
+ size_t len;
+ int result;
+
+
+ /* do normal base64 encoding */
+ result = flb_base64_encode((unsigned char *) out_buf, out_size - 1,
+ &len, in_buf, in_size);
+ if (result != 0) {
+ return -1;
+ }
+
+ /* Replace '+' and '/' characters */
+ for (i = 0; i < len && out_buf[i] != '='; i++) {
+ if (out_buf[i] == '+') {
+ out_buf[i] = '-';
+ }
+ else if (out_buf[i] == '/') {
+ out_buf[i] = '_';
+ }
+ }
+
+ /* Now 'i' becomes the new length */
+ *olen = i;
+ return 0;
+}
+
+static int jwt_encode(char *payload, char *secret,
+ char **out_signature, size_t *out_size,
+ struct flb_stackdriver *ctx)
+{
+ int ret;
+ int len;
+ int buf_size;
+ size_t olen;
+ char *buf;
+ char *sigd;
+ char *headers = "{\"alg\": \"RS256\", \"typ\": \"JWT\"}";
+ unsigned char sha256_buf[32] = {0};
+ flb_sds_t out;
+ unsigned char sig[256] = {0};
+ size_t sig_len;
+
+ buf_size = (strlen(payload) + strlen(secret)) * 2;
+ buf = flb_malloc(buf_size);
+ if (!buf) {
+ flb_errno();
+ return -1;
+ }
+
+ /* Encode header */
+ len = strlen(headers);
+ ret = flb_base64_encode((unsigned char *) buf, buf_size - 1,
+ &olen, (unsigned char *) headers, len);
+ if (ret != 0) {
+ flb_free(buf);
+
+ return ret;
+ }
+
+ /* Create buffer to store JWT */
+ out = flb_sds_create_size(2048);
+ if (!out) {
+ flb_errno();
+ flb_free(buf);
+ return -1;
+ }
+
+ /* Append header */
+ flb_sds_cat(out, buf, olen);
+ flb_sds_cat(out, ".", 1);
+
+ /* Encode Payload */
+ len = strlen(payload);
+ jwt_base64_url_encode((unsigned char *) buf, buf_size,
+ (unsigned char *) payload, len, &olen);
+
+ /* Append Payload */
+ flb_sds_cat(out, buf, olen);
+
+ /* do sha256() of base64(header).base64(payload) */
+ ret = flb_hash_simple(FLB_HASH_SHA256,
+ (unsigned char *) out, flb_sds_len(out),
+ sha256_buf, sizeof(sha256_buf));
+
+ if (ret != FLB_CRYPTO_SUCCESS) {
+ flb_plg_error(ctx->ins, "error hashing token");
+ flb_free(buf);
+ flb_sds_destroy(out);
+ return -1;
+ }
+
+ len = strlen(secret);
+ sig_len = sizeof(sig);
+
+ ret = flb_crypto_sign_simple(FLB_CRYPTO_PRIVATE_KEY,
+ FLB_CRYPTO_PADDING_PKCS1,
+ FLB_HASH_SHA256,
+ (unsigned char *) secret, len,
+ sha256_buf, sizeof(sha256_buf),
+ sig, &sig_len);
+
+ if (ret != FLB_CRYPTO_SUCCESS) {
+ flb_plg_error(ctx->ins, "error creating RSA context");
+ flb_free(buf);
+ flb_sds_destroy(out);
+ return -1;
+ }
+
+ sigd = flb_malloc(2048);
+ if (!sigd) {
+ flb_errno();
+ flb_free(buf);
+ flb_sds_destroy(out);
+ return -1;
+ }
+
+ jwt_base64_url_encode((unsigned char *) sigd, 2048, sig, 256, &olen);
+
+ flb_sds_cat(out, ".", 1);
+ flb_sds_cat(out, sigd, olen);
+
+ *out_signature = out;
+ *out_size = flb_sds_len(out);
+
+ flb_free(buf);
+ flb_free(sigd);
+
+ return 0;
+}
+
+/* Create a new oauth2 context and get a oauth2 token */
+static int get_oauth2_token(struct flb_stackdriver *ctx)
+{
+ int ret;
+ char *token;
+ char *sig_data;
+ size_t sig_size;
+ time_t issued;
+ time_t expires;
+ char payload[1024];
+
+ flb_oauth2_payload_clear(ctx->o);
+
+ /* In case of using metadata server, fetch token from there */
+ if (ctx->metadata_server_auth) {
+ return gce_metadata_read_token(ctx);
+ }
+
+ /* JWT encode for oauth2 */
+ issued = time(NULL);
+ expires = issued + FLB_STD_TOKEN_REFRESH;
+
+ snprintf(payload, sizeof(payload) - 1,
+ "{\"iss\": \"%s\", \"scope\": \"%s\", "
+ "\"aud\": \"%s\", \"exp\": %lu, \"iat\": %lu}",
+ ctx->client_email, FLB_STD_SCOPE,
+ FLB_STD_AUTH_URL,
+ expires, issued);
+
+ /* Compose JWT signature */
+ ret = jwt_encode(payload, ctx->private_key, &sig_data, &sig_size, ctx);
+ if (ret != 0) {
+ flb_plg_error(ctx->ins, "JWT signature generation failed");
+ return -1;
+ }
+ flb_plg_debug(ctx->ins, "JWT signature:\n%s", sig_data);
+
+ ret = flb_oauth2_payload_append(ctx->o,
+ "grant_type", -1,
+ "urn%3Aietf%3Aparams%3Aoauth%3A"
+ "grant-type%3Ajwt-bearer", -1);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "error appending oauth2 params");
+ flb_sds_destroy(sig_data);
+ return -1;
+ }
+
+ ret = flb_oauth2_payload_append(ctx->o,
+ "assertion", -1,
+ sig_data, sig_size);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "error appending oauth2 params");
+ flb_sds_destroy(sig_data);
+ return -1;
+ }
+ flb_sds_destroy(sig_data);
+
+ /* Retrieve access token */
+ token = flb_oauth2_token_get(ctx->o);
+ if (!token) {
+ flb_plg_error(ctx->ins, "error retrieving oauth2 access token");
+ return -1;
+ }
+
+ return 0;
+}
+
+static flb_sds_t get_google_token(struct flb_stackdriver *ctx)
+{
+ int ret = 0;
+ flb_sds_t output = NULL;
+ time_t cached_expiration = 0;
+
+ ret = pthread_mutex_trylock(&ctx->token_mutex);
+ if (ret == EBUSY) {
+ /*
+ * If the routine is locked we just use our pre-cached values and
+ * compose the expected authorization value.
+ *
+ * If the routine fails it will return NULL and the caller will just
+ * issue a FLB_RETRY.
+ */
+ output = oauth2_cache_to_token();
+ cached_expiration = oauth2_cache_get_expiration();
+ if (time(NULL) >= cached_expiration) {
+ return output;
+ } else {
+ /*
+ * Cached token is expired. Wait on lock to use up-to-date token
+ * by either waiting for it to be refreshed or refresh it ourselves.
+ */
+ flb_plg_info(ctx->ins, "Cached token is expired. Waiting on lock.");
+ ret = pthread_mutex_lock(&ctx->token_mutex);
+ }
+ }
+
+ if (ret != 0) {
+ flb_plg_error(ctx->ins, "error locking mutex");
+ return NULL;
+ }
+
+ if (flb_oauth2_token_expired(ctx->o) == FLB_TRUE) {
+ ret = get_oauth2_token(ctx);
+ }
+
+ /* Copy string to prevent race conditions (get_oauth2 can free the string) */
+ if (ret == 0) {
+ /* Update pthread keys cached values */
+ oauth2_cache_set(ctx->o->token_type, ctx->o->access_token, ctx->o->expires);
+
+ /* Compose outgoing buffer using cached values */
+ output = oauth2_cache_to_token();
+ }
+
+ if (pthread_mutex_unlock(&ctx->token_mutex)){
+ flb_plg_error(ctx->ins, "error unlocking mutex");
+ if (output) {
+ flb_sds_destroy(output);
+ }
+ return NULL;
+ }
+
+
+ return output;
+}
+
+void replace_prefix_dot(flb_sds_t s, int tag_prefix_len)
+{
+ int i;
+ int str_len;
+ char c;
+
+ if (!s) {
+ return;
+ }
+
+ str_len = flb_sds_len(s);
+ if (tag_prefix_len > str_len) {
+ flb_error("[output] tag_prefix shouldn't be longer than local_resource_id");
+ return;
+ }
+
+ for (i = 0; i < tag_prefix_len; i++) {
+ c = s[i];
+
+ if (c == '.') {
+ s[i] = '_';
+ }
+ }
+}
+
+static flb_sds_t get_str_value_from_msgpack_map(msgpack_object_map map,
+ const char *key, int key_size)
+{
+ int i;
+ msgpack_object k;
+ msgpack_object v;
+ flb_sds_t ptr = NULL;
+
+ for (i = 0; i < map.size; i++) {
+ k = map.ptr[i].key;
+ v = map.ptr[i].val;
+
+ if (k.type != MSGPACK_OBJECT_STR) {
+ continue;
+ }
+
+ if (k.via.str.size == key_size &&
+ strncmp(key, (char *) k.via.str.ptr, k.via.str.size) == 0) {
+ /* make sure to free it after use */
+ ptr = flb_sds_create_len(v.via.str.ptr, v.via.str.size);
+ break;
+ }
+ }
+
+ return ptr;
+}
+
+/* parse_monitored_resource is to extract the monitoired resource labels
+ * from "logging.googleapis.com/monitored_resource" in log data
+ * and append to 'resource'/'labels' in log entry.
+ * Monitored resource type is already read from resource field in stackdriver
+ * output plugin configuration parameters.
+ *
+ * The structure of monitored_resource is:
+ * {
+ * "logging.googleapis.com/monitored_resource": {
+ * "labels": {
+ * "resource_label": <label_value>,
+ * }
+ * }
+ * }
+ * See https://cloud.google.com/logging/docs/api/v2/resource-list#resource-types
+ * for required labels for each monitored resource.
+ */
+
+static int parse_monitored_resource(struct flb_stackdriver *ctx, const void *data, size_t bytes, msgpack_packer *mp_pck)
+{
+ int ret = -1;
+ msgpack_object *obj;
+ struct flb_log_event_decoder log_decoder;
+ struct flb_log_event log_event;
+
+ ret = flb_log_event_decoder_init(&log_decoder, (char *) data, bytes);
+
+ if (ret != FLB_EVENT_DECODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event decoder initialization error : %d", ret);
+
+ return -1;
+ }
+
+ while ((ret = flb_log_event_decoder_next(
+ &log_decoder,
+ &log_event)) == FLB_EVENT_DECODER_SUCCESS) {
+ obj = log_event.body;
+
+ msgpack_object_kv *kv = obj->via.map.ptr;
+ msgpack_object_kv *const kvend = obj->via.map.ptr + obj->via.map.size;
+ for (; kv < kvend; ++kv) {
+ if (kv->val.type == MSGPACK_OBJECT_MAP && kv->key.type == MSGPACK_OBJECT_STR
+ && strncmp (MONITORED_RESOURCE_KEY, kv->key.via.str.ptr, kv->key.via.str.size) == 0) {
+ msgpack_object subobj = kv->val;
+ msgpack_object_kv *p = subobj.via.map.ptr;
+ msgpack_object_kv *pend = subobj.via.map.ptr + subobj.via.map.size;
+ for (; p < pend; ++p) {
+ if (p->key.type != MSGPACK_OBJECT_STR || p->val.type != MSGPACK_OBJECT_MAP) {
+ continue;
+ }
+ if (strncmp("labels", p->key.via.str.ptr, p->key.via.str.size) == 0) {
+ msgpack_object labels = p->val;
+ msgpack_object_kv *q = labels.via.map.ptr;
+ msgpack_object_kv *qend = labels.via.map.ptr + labels.via.map.size;
+ int fields = 0;
+ for (; q < qend; ++q) {
+ if (q->key.type != MSGPACK_OBJECT_STR || q->val.type != MSGPACK_OBJECT_STR) {
+ flb_plg_error(ctx->ins, "Key and value should be string in the %s/labels", MONITORED_RESOURCE_KEY);
+ }
+ ++fields;
+ }
+ if (fields > 0) {
+ msgpack_pack_map(mp_pck, fields);
+ q = labels.via.map.ptr;
+ for (; q < qend; ++q) {
+ if (q->key.type != MSGPACK_OBJECT_STR || q->val.type != MSGPACK_OBJECT_STR) {
+ continue;
+ }
+ flb_plg_debug(ctx->ins, "[%s] found in the payload", MONITORED_RESOURCE_KEY);
+ msgpack_pack_str(mp_pck, q->key.via.str.size);
+ msgpack_pack_str_body(mp_pck, q->key.via.str.ptr, q->key.via.str.size);
+ msgpack_pack_str(mp_pck, q->val.via.str.size);
+ msgpack_pack_str_body(mp_pck, q->val.via.str.ptr, q->val.via.str.size);
+ }
+
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ return 0;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ flb_plg_debug(ctx->ins, "[%s] not found in the payload", MONITORED_RESOURCE_KEY);
+
+ return ret;
+}
+
+/*
+ * Given a local_resource_id, split the content using the proper separator generating
+ * a linked list to store the spliited string
+ */
+static struct mk_list *parse_local_resource_id_to_list(char *local_resource_id, char *type)
+{
+ int ret = -1;
+ int max_split = -1;
+ int len_k8s_container;
+ int len_k8s_node;
+ int len_k8s_pod;
+ struct mk_list *list;
+
+ len_k8s_container = sizeof(K8S_CONTAINER) - 1;
+ len_k8s_node = sizeof(K8S_NODE) - 1;
+ len_k8s_pod = sizeof(K8S_POD) - 1;
+
+ /* Allocate list head */
+ list = flb_malloc(sizeof(struct mk_list));
+ if (!list) {
+ flb_errno();
+ return NULL;
+ }
+ mk_list_init(list);
+
+ /* Determinate the max split value based on type */
+ if (strncmp(type, K8S_CONTAINER, len_k8s_container) == 0) {
+ /* including the prefix of tag */
+ max_split = 4;
+ }
+ else if (strncmp(type, K8S_NODE, len_k8s_node) == 0) {
+ max_split = 2;
+ }
+ else if (strncmp(type, K8S_POD, len_k8s_pod) == 0) {
+ max_split = 3;
+ }
+
+ /* The local_resource_id is splitted by '.' */
+ ret = flb_slist_split_string(list, local_resource_id, '.', max_split);
+
+ if (ret == -1 || mk_list_size(list) != max_split) {
+ flb_error("error parsing local_resource_id [%s] for type %s", local_resource_id, type);
+ flb_slist_destroy(list);
+ flb_free(list);
+ return NULL;
+ }
+
+ return list;
+}
+
+/*
+ * extract_local_resource_id():
+ * - extract the value from "logging.googleapis.com/local_resource_id" field
+ * - if local_resource_id is missing from the payLoad, use the tag of the log
+ */
+static int extract_local_resource_id(const void *data, size_t bytes,
+ struct flb_stackdriver *ctx, const char *tag) {
+ msgpack_object_map map;
+ flb_sds_t local_resource_id;
+ struct flb_log_event_decoder log_decoder;
+ struct flb_log_event log_event;
+ int ret;
+
+ ret = flb_log_event_decoder_init(&log_decoder, (char *) data, bytes);
+
+ if (ret != FLB_EVENT_DECODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event decoder initialization error : %d", ret);
+
+ return -1;
+ }
+
+ if ((ret = flb_log_event_decoder_next(
+ &log_decoder,
+ &log_event)) == FLB_EVENT_DECODER_SUCCESS) {
+ map = log_event.body->via.map;
+ local_resource_id = get_str_value_from_msgpack_map(map, LOCAL_RESOURCE_ID_KEY,
+ LEN_LOCAL_RESOURCE_ID_KEY);
+
+ if (local_resource_id == NULL) {
+ /* if local_resource_id is not found, use the tag of the log */
+ flb_plg_debug(ctx->ins, "local_resource_id not found, "
+ "tag [%s] is assigned for local_resource_id", tag);
+ local_resource_id = flb_sds_create(tag);
+ }
+
+ /* we need to create up the local_resource_id from previous log */
+ if (ctx->local_resource_id) {
+ flb_sds_destroy(ctx->local_resource_id);
+ }
+
+ ctx->local_resource_id = flb_sds_create(local_resource_id);
+
+ flb_sds_destroy(local_resource_id);
+
+ ret = 0;
+ }
+ else {
+ flb_plg_error(ctx->ins, "failed to unpack data");
+
+ ret = -1;
+ }
+
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ return ret;
+}
+
+/*
+ * set_monitored_resource_labels():
+ * - use the extracted local_resource_id to assign the label keys for different
+ * resource types that are specified in the configuration of stackdriver_out plugin
+ */
+static int set_monitored_resource_labels(struct flb_stackdriver *ctx, char *type)
+{
+ int ret = -1;
+ int first = FLB_TRUE;
+ int counter = 0;
+ int len_k8s_container;
+ int len_k8s_node;
+ int len_k8s_pod;
+ size_t prefix_len = 0;
+ struct local_resource_id_list *ptr;
+ struct mk_list *list = NULL;
+ struct mk_list *head;
+ flb_sds_t new_local_resource_id;
+
+ if (!ctx->local_resource_id) {
+ flb_plg_error(ctx->ins, "local_resource_is is not assigned");
+ return -1;
+ }
+
+ len_k8s_container = sizeof(K8S_CONTAINER) - 1;
+ len_k8s_node = sizeof(K8S_NODE) - 1;
+ len_k8s_pod = sizeof(K8S_POD) - 1;
+
+ prefix_len = flb_sds_len(ctx->tag_prefix);
+ if (flb_sds_casecmp(ctx->tag_prefix, ctx->local_resource_id, prefix_len) != 0) {
+ flb_plg_error(ctx->ins, "tag_prefix [%s] doesn't match the prefix of"
+ " local_resource_id [%s]", ctx->tag_prefix,
+ ctx->local_resource_id);
+ return -1;
+ }
+
+ new_local_resource_id = flb_sds_create_len(ctx->local_resource_id,
+ flb_sds_len(ctx->local_resource_id));
+ replace_prefix_dot(new_local_resource_id, prefix_len - 1);
+
+ if (strncmp(type, K8S_CONTAINER, len_k8s_container) == 0) {
+ list = parse_local_resource_id_to_list(new_local_resource_id, K8S_CONTAINER);
+ if (!list) {
+ goto error;
+ }
+
+ /* iterate through the list */
+ mk_list_foreach(head, list) {
+ ptr = mk_list_entry(head, struct local_resource_id_list, _head);
+ if (first) {
+ first = FLB_FALSE;
+ continue;
+ }
+
+ /* Follow the order of fields in local_resource_id */
+ if (counter == 0) {
+ if (ctx->namespace_name) {
+ flb_sds_destroy(ctx->namespace_name);
+ }
+ ctx->namespace_name = flb_sds_create(ptr->val);
+ }
+ else if (counter == 1) {
+ if (ctx->pod_name) {
+ flb_sds_destroy(ctx->pod_name);
+ }
+ ctx->pod_name = flb_sds_create(ptr->val);
+ }
+ else if (counter == 2) {
+ if (ctx->container_name) {
+ flb_sds_destroy(ctx->container_name);
+ }
+ ctx->container_name = flb_sds_create(ptr->val);
+ }
+
+ counter++;
+ }
+
+ if (!ctx->namespace_name || !ctx->pod_name || !ctx->container_name) {
+ goto error;
+ }
+ }
+ else if (strncmp(type, K8S_NODE, len_k8s_node) == 0) {
+ list = parse_local_resource_id_to_list(new_local_resource_id, K8S_NODE);
+ if (!list) {
+ goto error;
+ }
+
+ mk_list_foreach(head, list) {
+ ptr = mk_list_entry(head, struct local_resource_id_list, _head);
+ if (first) {
+ first = FLB_FALSE;
+ continue;
+ }
+
+ if (ptr != NULL) {
+ if (ctx->node_name) {
+ flb_sds_destroy(ctx->node_name);
+ }
+ ctx->node_name = flb_sds_create(ptr->val);
+ }
+ }
+
+ if (!ctx->node_name) {
+ goto error;
+ }
+ }
+ else if (strncmp(type, K8S_POD, len_k8s_pod) == 0) {
+ list = parse_local_resource_id_to_list(new_local_resource_id, K8S_POD);
+ if (!list) {
+ goto error;
+ }
+
+ mk_list_foreach(head, list) {
+ ptr = mk_list_entry(head, struct local_resource_id_list, _head);
+ if (first) {
+ first = FLB_FALSE;
+ continue;
+ }
+
+ /* Follow the order of fields in local_resource_id */
+ if (counter == 0) {
+ if (ctx->namespace_name) {
+ flb_sds_destroy(ctx->namespace_name);
+ }
+ ctx->namespace_name = flb_sds_create(ptr->val);
+ }
+ else if (counter == 1) {
+ if (ctx->pod_name) {
+ flb_sds_destroy(ctx->pod_name);
+ }
+ ctx->pod_name = flb_sds_create(ptr->val);
+ }
+
+ counter++;
+ }
+
+ if (!ctx->namespace_name || !ctx->pod_name) {
+ goto error;
+ }
+ }
+
+ ret = 0;
+
+ if (list) {
+ flb_slist_destroy(list);
+ flb_free(list);
+ }
+ flb_sds_destroy(new_local_resource_id);
+
+ return ret;
+
+ error:
+ if (list) {
+ flb_slist_destroy(list);
+ flb_free(list);
+ }
+
+ if (strncmp(type, K8S_CONTAINER, len_k8s_container) == 0) {
+ if (ctx->namespace_name) {
+ flb_sds_destroy(ctx->namespace_name);
+ }
+
+ if (ctx->pod_name) {
+ flb_sds_destroy(ctx->pod_name);
+ }
+
+ if (ctx->container_name) {
+ flb_sds_destroy(ctx->container_name);
+ }
+ }
+ else if (strncmp(type, K8S_NODE, len_k8s_node) == 0) {
+ if (ctx->node_name) {
+ flb_sds_destroy(ctx->node_name);
+ }
+ }
+ else if (strncmp(type, K8S_POD, len_k8s_pod) == 0) {
+ if (ctx->namespace_name) {
+ flb_sds_destroy(ctx->namespace_name);
+ }
+
+ if (ctx->pod_name) {
+ flb_sds_destroy(ctx->pod_name);
+ }
+ }
+
+ flb_sds_destroy(new_local_resource_id);
+ return -1;
+}
+
+static int is_tag_match_regex(struct flb_stackdriver *ctx,
+ const char *tag, int tag_len)
+{
+ int ret;
+ int tag_prefix_len;
+ int len_to_be_matched;
+ const char *tag_str_to_be_matcheds;
+
+ tag_prefix_len = flb_sds_len(ctx->tag_prefix);
+ if (tag_len > tag_prefix_len &&
+ flb_sds_cmp(ctx->tag_prefix, tag, tag_prefix_len) != 0) {
+ return 0;
+ }
+
+ tag_str_to_be_matcheds = tag + tag_prefix_len;
+ len_to_be_matched = tag_len - tag_prefix_len;
+ ret = flb_regex_match(ctx->regex,
+ (unsigned char *) tag_str_to_be_matcheds,
+ len_to_be_matched);
+
+ /* 1 -> match; 0 -> doesn't match; < 0 -> error */
+ return ret;
+}
+
+static int is_local_resource_id_match_regex(struct flb_stackdriver *ctx)
+{
+ int ret;
+ int prefix_len;
+ int len_to_be_matched;
+ const char *str_to_be_matcheds;
+
+ if (!ctx->local_resource_id) {
+ flb_plg_warn(ctx->ins, "local_resource_id not found in the payload");
+ return -1;
+ }
+
+ prefix_len = flb_sds_len(ctx->tag_prefix);
+ str_to_be_matcheds = ctx->local_resource_id + prefix_len;
+ len_to_be_matched = flb_sds_len(ctx->local_resource_id) - prefix_len;
+
+ ret = flb_regex_match(ctx->regex,
+ (unsigned char *) str_to_be_matcheds,
+ len_to_be_matched);
+
+ /* 1 -> match; 0 -> doesn't match; < 0 -> error */
+ return ret;
+}
+
+static void cb_results(const char *name, const char *value,
+ size_t vlen, void *data);
+/*
+ * extract_resource_labels_from_regex(4) will only be called if the
+ * tag or local_resource_id field matches the regex rule
+ */
+static int extract_resource_labels_from_regex(struct flb_stackdriver *ctx,
+ const char *tag, int tag_len,
+ int from_tag)
+{
+ int ret = 1;
+ int prefix_len;
+ int len_to_be_matched;
+ int local_resource_id_len;
+ const char *str_to_be_matcheds;
+ struct flb_regex_search result;
+
+ prefix_len = flb_sds_len(ctx->tag_prefix);
+ if (from_tag == FLB_TRUE) {
+ local_resource_id_len = tag_len;
+ str_to_be_matcheds = tag + prefix_len;
+ }
+ else {
+ // this will be called only if the payload contains local_resource_id
+ local_resource_id_len = flb_sds_len(ctx->local_resource_id);
+ str_to_be_matcheds = ctx->local_resource_id + prefix_len;
+ }
+
+ len_to_be_matched = local_resource_id_len - prefix_len;
+ ret = flb_regex_do(ctx->regex, str_to_be_matcheds, len_to_be_matched, &result);
+ if (ret <= 0) {
+ flb_plg_warn(ctx->ins, "invalid pattern for given value %s when"
+ " extracting resource labels", str_to_be_matcheds);
+ return -1;
+ }
+
+ flb_regex_parse(ctx->regex, &result, cb_results, ctx);
+
+ return ret;
+}
+
+static int process_local_resource_id(struct flb_stackdriver *ctx,
+ const char *tag, int tag_len, char *type)
+{
+ int ret;
+
+ // parsing local_resource_id from tag takes higher priority
+ if (is_tag_match_regex(ctx, tag, tag_len) > 0) {
+ ret = extract_resource_labels_from_regex(ctx, tag, tag_len, FLB_TRUE);
+ }
+ else if (is_local_resource_id_match_regex(ctx) > 0) {
+ ret = extract_resource_labels_from_regex(ctx, tag, tag_len, FLB_FALSE);
+ }
+ else {
+ ret = set_monitored_resource_labels(ctx, type);
+ }
+
+ return ret;
+}
+
+/*
+ * get_payload_labels
+ * - Iterate throught the original payload (obj) and find out the entry that matches
+ * the labels_key
+ * - Used to convert all labels under labels_key to root-level `labels` field
+ */
+static msgpack_object *get_payload_labels(struct flb_stackdriver *ctx, msgpack_object *obj)
+{
+ int i;
+ int len;
+ msgpack_object_kv *kv = NULL;
+
+ if (!obj || obj->type != MSGPACK_OBJECT_MAP) {
+ return NULL;
+ }
+
+ len = flb_sds_len(ctx->labels_key);
+ for (i = 0; i < obj->via.map.size; i++) {
+ kv = &obj->via.map.ptr[i];
+ if (flb_sds_casecmp(ctx->labels_key, kv->key.via.str.ptr, len) == 0) {
+ /* only the first matching entry will be returned */
+ return &kv->val;
+ }
+ }
+
+ //flb_plg_debug(ctx->ins, "labels_key [%s] not found in the payload",
+ // ctx->labels_key);
+ return NULL;
+}
+
+/*
+ * pack_resource_labels():
+ * - Looks through the resource_labels parameter and appends new key value
+ * pair to the log entry.
+ * - Supports field access, plaintext assignment and environment variables.
+ */
+static int pack_resource_labels(struct flb_stackdriver *ctx,
+ struct flb_mp_map_header *mh,
+ msgpack_packer *mp_pck,
+ const void *data,
+ size_t bytes)
+{
+ struct mk_list *head;
+ struct flb_kv *label_kv;
+ struct flb_record_accessor *ra;
+ struct flb_ra_value *rval;
+ int len;
+ struct flb_log_event_decoder log_decoder;
+ struct flb_log_event log_event;
+ int ret;
+
+ if (ctx->should_skip_resource_labels_api == FLB_TRUE) {
+ return -1;
+ }
+
+ len = mk_list_size(&ctx->resource_labels_kvs);
+ if (len == 0) {
+ return -1;
+ }
+
+ ret = flb_log_event_decoder_init(&log_decoder, (char *) data, bytes);
+
+ if (ret != FLB_EVENT_DECODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event decoder initialization error : %d", ret);
+
+ return -1;
+ }
+
+ if ((ret = flb_log_event_decoder_next(
+ &log_decoder,
+ &log_event)) == FLB_EVENT_DECODER_SUCCESS) {
+
+ flb_mp_map_header_init(mh, mp_pck);
+ mk_list_foreach(head, &ctx->resource_labels_kvs) {
+ label_kv = mk_list_entry(head, struct flb_kv, _head);
+ /*
+ * KVs have the form destination=original, so the original key is the value.
+ * If the value starts with '$', it will be processed using record accessor.
+ * Otherwise, it will be treated as a plaintext assignment.
+ */
+ if (label_kv->val[0] == '$') {
+ ra = flb_ra_create(label_kv->val, FLB_TRUE);
+ rval = flb_ra_get_value_object(ra, *log_event.body);
+
+ if (rval != NULL && rval->o.type == MSGPACK_OBJECT_STR) {
+ flb_mp_map_header_append(mh);
+ msgpack_pack_str(mp_pck, flb_sds_len(label_kv->key));
+ msgpack_pack_str_body(mp_pck, label_kv->key,
+ flb_sds_len(label_kv->key));
+ msgpack_pack_str(mp_pck, flb_sds_len(rval->val.string));
+ msgpack_pack_str_body(mp_pck, rval->val.string,
+ flb_sds_len(rval->val.string));
+ flb_ra_key_value_destroy(rval);
+ } else {
+ flb_plg_warn(ctx->ins, "failed to find a corresponding entry for "
+ "resource label entry [%s=%s]", label_kv->key, label_kv->val);
+ }
+ flb_ra_destroy(ra);
+ } else {
+ flb_mp_map_header_append(mh);
+ msgpack_pack_str(mp_pck, flb_sds_len(label_kv->key));
+ msgpack_pack_str_body(mp_pck, label_kv->key,
+ flb_sds_len(label_kv->key));
+ msgpack_pack_str(mp_pck, flb_sds_len(label_kv->val));
+ msgpack_pack_str_body(mp_pck, label_kv->val,
+ flb_sds_len(label_kv->val));
+ }
+ }
+ }
+ else {
+ flb_plg_error(ctx->ins, "failed to unpack data");
+
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ return -1;
+ }
+
+ /* project_id should always be packed from config parameter */
+ flb_mp_map_header_append(mh);
+ msgpack_pack_str(mp_pck, 10);
+ msgpack_pack_str_body(mp_pck, "project_id", 10);
+ msgpack_pack_str(mp_pck, flb_sds_len(ctx->project_id));
+ msgpack_pack_str_body(mp_pck,
+ ctx->project_id, flb_sds_len(ctx->project_id));
+
+ flb_log_event_decoder_destroy(&log_decoder);
+ flb_mp_map_header_end(mh);
+
+ return 0;
+}
+
+static void pack_labels(struct flb_stackdriver *ctx,
+ msgpack_packer *mp_pck,
+ msgpack_object *payload_labels_ptr)
+{
+ int i;
+ int labels_size = 0;
+ struct mk_list *head;
+ struct flb_kv *list_kv;
+ msgpack_object_kv *obj_kv = NULL;
+
+ /* Determine size of labels map */
+ labels_size = mk_list_size(&ctx->config_labels);
+ if (payload_labels_ptr != NULL &&
+ payload_labels_ptr->type == MSGPACK_OBJECT_MAP) {
+ labels_size += payload_labels_ptr->via.map.size;
+ }
+
+ msgpack_pack_map(mp_pck, labels_size);
+
+ /* pack labels from the payload */
+ if (payload_labels_ptr != NULL &&
+ payload_labels_ptr->type == MSGPACK_OBJECT_MAP) {
+
+ for (i = 0; i < payload_labels_ptr->via.map.size; i++) {
+ obj_kv = &payload_labels_ptr->via.map.ptr[i];
+ msgpack_pack_object(mp_pck, obj_kv->key);
+ msgpack_pack_object(mp_pck, obj_kv->val);
+ }
+ }
+
+ /* pack labels set in configuration */
+ /* in msgpack duplicate keys are overriden by the last set */
+ /* static label keys override payload labels */
+ mk_list_foreach(head, &ctx->config_labels){
+ list_kv = mk_list_entry(head, struct flb_kv, _head);
+ msgpack_pack_str(mp_pck, flb_sds_len(list_kv->key));
+ msgpack_pack_str_body(mp_pck, list_kv->key, flb_sds_len(list_kv->key));
+ msgpack_pack_str(mp_pck, flb_sds_len(list_kv->val));
+ msgpack_pack_str_body(mp_pck, list_kv->val, flb_sds_len(list_kv->val));
+ }
+}
+
+static void cb_results(const char *name, const char *value,
+ size_t vlen, void *data)
+{
+ struct flb_stackdriver *ctx = data;
+
+ if (vlen == 0) {
+ return;
+ }
+
+ if (strcmp(name, "pod_name") == 0) {
+ if (ctx->pod_name != NULL) {
+ flb_sds_destroy(ctx->pod_name);
+ }
+ ctx->pod_name = flb_sds_create_len(value, vlen);
+ }
+ else if (strcmp(name, "namespace_name") == 0) {
+ if (ctx->namespace_name != NULL) {
+ flb_sds_destroy(ctx->namespace_name);
+ }
+ ctx->namespace_name = flb_sds_create_len(value, vlen);
+ }
+ else if (strcmp(name, "container_name") == 0) {
+ if (ctx->container_name != NULL) {
+ flb_sds_destroy(ctx->container_name);
+ }
+ ctx->container_name = flb_sds_create_len(value, vlen);
+ }
+ else if (strcmp(name, "node_name") == 0) {
+ if (ctx->node_name != NULL) {
+ flb_sds_destroy(ctx->node_name);
+ }
+ ctx->node_name = flb_sds_create_len(value, vlen);
+ }
+
+ return;
+}
+
+int flb_stackdriver_regex_init(struct flb_stackdriver *ctx)
+{
+ /* If a custom regex is not set, use the defaults */
+ ctx->regex = flb_regex_create(ctx->custom_k8s_regex);
+ if (!ctx->regex) {
+ return -1;
+ }
+
+ return 0;
+}
+
+static int cb_stackdriver_init(struct flb_output_instance *ins,
+ struct flb_config *config, void *data)
+{
+ int ret;
+ int io_flags = FLB_IO_TLS;
+ char *token;
+ struct flb_stackdriver *ctx;
+
+ /* Create config context */
+ ctx = flb_stackdriver_conf_create(ins, config);
+ if (!ctx) {
+ flb_plg_error(ins, "configuration failed");
+ return -1;
+ }
+
+ /* Load config map */
+ ret = flb_output_config_map_set(ins, (void *) ctx);
+ if (ret == -1) {
+ return -1;
+ }
+
+ /* Set context */
+ flb_output_set_context(ins, ctx);
+
+ /* Network mode IPv6 */
+ if (ins->host.ipv6 == FLB_TRUE) {
+ io_flags |= FLB_IO_IPV6;
+ }
+
+ /* Initialize oauth2 cache pthread keys */
+ oauth2_cache_init();
+
+ /* Create mutex for acquiring oauth tokens (they are shared across flush coroutines) */
+ pthread_mutex_init(&ctx->token_mutex, NULL);
+
+ /* Create Upstream context for Stackdriver Logging (no oauth2 service) */
+ ctx->u = flb_upstream_create_url(config, FLB_STD_WRITE_URL,
+ io_flags, ins->tls);
+ ctx->metadata_u = flb_upstream_create_url(config, ctx->metadata_server,
+ FLB_IO_TCP, NULL);
+
+ /* Create oauth2 context */
+ ctx->o = flb_oauth2_create(ctx->config, FLB_STD_AUTH_URL, 3000);
+
+ if (!ctx->u) {
+ flb_plg_error(ctx->ins, "upstream creation failed");
+ return -1;
+ }
+ if (!ctx->metadata_u) {
+ flb_plg_error(ctx->ins, "metadata upstream creation failed");
+ return -1;
+ }
+ if (!ctx->o) {
+ flb_plg_error(ctx->ins, "cannot create oauth2 context");
+ return -1;
+ }
+ flb_output_upstream_set(ctx->u, ins);
+
+ /* Metadata Upstream Sync flags */
+ flb_stream_disable_async_mode(&ctx->metadata_u->base);
+
+ if (ins->test_mode == FLB_FALSE) {
+ /* Retrieve oauth2 token */
+ token = get_google_token(ctx);
+ if (!token) {
+ flb_plg_warn(ctx->ins, "token retrieval failed");
+ }
+ else {
+ flb_sds_destroy(token);
+ }
+ }
+
+ if (ctx->metadata_server_auth) {
+ ret = gce_metadata_read_project_id(ctx);
+ if (ret == -1) {
+ return -1;
+ }
+
+ if (ctx->resource_type != RESOURCE_TYPE_GENERIC_NODE
+ && ctx->resource_type != RESOURCE_TYPE_GENERIC_TASK) {
+ ret = gce_metadata_read_zone(ctx);
+ if (ret == -1) {
+ return -1;
+ }
+
+ ret = gce_metadata_read_instance_id(ctx);
+ if (ret == -1) {
+ return -1;
+ }
+ }
+ }
+
+ /* Validate project_id */
+ if (!ctx->project_id) {
+ flb_plg_error(ctx->ins, "property 'project_id' is not set");
+ return -1;
+ }
+
+ if (!ctx->export_to_project_id) {
+ ctx->export_to_project_id = ctx->project_id;
+ }
+
+ ret = flb_stackdriver_regex_init(ctx);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "failed to init stackdriver custom regex");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int validate_severity_level(severity_t * s,
+ const char * str,
+ const unsigned int str_size)
+{
+ int i = 0;
+
+ const static struct {
+ severity_t s;
+ const unsigned int str_size;
+ const char * str;
+ } enum_mapping[] = {
+ {FLB_STD_EMERGENCY, 9, "EMERGENCY"},
+ {FLB_STD_EMERGENCY, 5, "EMERG" },
+
+ {FLB_STD_ALERT , 1, "A" },
+ {FLB_STD_ALERT , 5, "ALERT" },
+
+ {FLB_STD_CRITICAL , 1, "C" },
+ {FLB_STD_CRITICAL , 1, "F" },
+ {FLB_STD_CRITICAL , 4, "CRIT" },
+ {FLB_STD_CRITICAL , 5, "FATAL" },
+ {FLB_STD_CRITICAL , 8, "CRITICAL" },
+
+ {FLB_STD_ERROR , 1, "E" },
+ {FLB_STD_ERROR , 3, "ERR" },
+ {FLB_STD_ERROR , 5, "ERROR" },
+ {FLB_STD_ERROR , 6, "SEVERE" },
+
+ {FLB_STD_WARNING , 1, "W" },
+ {FLB_STD_WARNING , 4, "WARN" },
+ {FLB_STD_WARNING , 7, "WARNING" },
+
+ {FLB_STD_NOTICE , 1, "N" },
+ {FLB_STD_NOTICE , 6, "NOTICE" },
+
+ {FLB_STD_INFO , 1, "I" },
+ {FLB_STD_INFO , 4, "INFO" },
+
+ {FLB_STD_DEBUG , 1, "D" },
+ {FLB_STD_DEBUG , 5, "DEBUG" },
+ {FLB_STD_DEBUG , 5, "TRACE" },
+ {FLB_STD_DEBUG , 9, "TRACE_INT"},
+ {FLB_STD_DEBUG , 4, "FINE" },
+ {FLB_STD_DEBUG , 5, "FINER" },
+ {FLB_STD_DEBUG , 6, "FINEST" },
+ {FLB_STD_DEBUG , 6, "CONFIG" },
+
+ {FLB_STD_DEFAULT , 7, "DEFAULT" }
+ };
+
+ for (i = 0; i < sizeof (enum_mapping) / sizeof (enum_mapping[0]); ++i) {
+ if (enum_mapping[i].str_size != str_size) {
+ continue;
+ }
+
+ if (strncasecmp(str, enum_mapping[i].str, str_size) == 0) {
+ *s = enum_mapping[i].s;
+ return 0;
+ }
+ }
+ return -1;
+}
+
+static int get_msgpack_obj(msgpack_object * subobj, const msgpack_object * o,
+ const flb_sds_t key, const int key_size,
+ msgpack_object_type type)
+{
+ int i = 0;
+ msgpack_object_kv * p = NULL;
+
+ if (o == NULL || subobj == NULL) {
+ return -1;
+ }
+
+ for (i = 0; i < o->via.map.size; i++) {
+ p = &o->via.map.ptr[i];
+ if (p->val.type != type) {
+ continue;
+ }
+
+ if (flb_sds_cmp(key, p->key.via.str.ptr, p->key.via.str.size) == 0) {
+ *subobj = p->val;
+ return 0;
+ }
+ }
+ return -1;
+}
+
+static int get_string(flb_sds_t * s, const msgpack_object * o, const flb_sds_t key)
+{
+ msgpack_object tmp;
+ if (get_msgpack_obj(&tmp, o, key, flb_sds_len(key), MSGPACK_OBJECT_STR) == 0) {
+ *s = flb_sds_create_len(tmp.via.str.ptr, tmp.via.str.size);
+ return 0;
+ }
+
+ *s = 0;
+ return -1;
+}
+
+static int get_severity_level(severity_t * s, const msgpack_object * o,
+ const flb_sds_t key)
+{
+ msgpack_object tmp;
+ if (get_msgpack_obj(&tmp, o, key, flb_sds_len(key), MSGPACK_OBJECT_STR) == 0
+ && validate_severity_level(s, tmp.via.str.ptr, tmp.via.str.size) == 0) {
+ return 0;
+ }
+ *s = 0;
+ return -1;
+}
+
+static int get_trace_sampled(int * trace_sampled_value, const msgpack_object * src_obj,
+ const flb_sds_t key)
+{
+ msgpack_object tmp;
+ int ret = get_msgpack_obj(&tmp, src_obj, key, flb_sds_len(key), MSGPACK_OBJECT_BOOLEAN);
+
+ if (ret == 0 && tmp.via.boolean == true) {
+ *trace_sampled_value = FLB_TRUE;
+ return 0;
+ } else if (ret == 0 && tmp.via.boolean == false) {
+ *trace_sampled_value = FLB_FALSE;
+ return 0;
+ }
+
+ return -1;
+}
+
+static insert_id_status validate_insert_id(msgpack_object * insert_id_value,
+ const msgpack_object * obj)
+{
+ int i = 0;
+ msgpack_object_kv * p = NULL;
+ insert_id_status ret = INSERTID_NOT_PRESENT;
+
+ if (obj == NULL) {
+ return ret;
+ }
+
+ for (i = 0; i < obj->via.map.size; i++) {
+ p = &obj->via.map.ptr[i];
+ if (p->key.type != MSGPACK_OBJECT_STR) {
+ continue;
+ }
+ if (validate_key(p->key, DEFAULT_INSERT_ID_KEY, INSERT_ID_SIZE)) {
+ if (p->val.type == MSGPACK_OBJECT_STR && p->val.via.str.size > 0) {
+ *insert_id_value = p->val;
+ ret = INSERTID_VALID;
+ }
+ else {
+ ret = INSERTID_INVALID;
+ }
+ break;
+ }
+ }
+ return ret;
+}
+
+static int pack_json_payload(int insert_id_extracted,
+ int operation_extracted, int operation_extra_size,
+ int source_location_extracted,
+ int source_location_extra_size,
+ int http_request_extracted,
+ int http_request_extra_size,
+ timestamp_status tms_status,
+ msgpack_packer *mp_pck, msgpack_object *obj,
+ struct flb_stackdriver *ctx)
+{
+ /* Specified fields include local_resource_id, operation, sourceLocation ... */
+ int i, j;
+ int to_remove = 0;
+ int ret;
+ int map_size;
+ int new_map_size;
+ int len;
+ int len_to_be_removed;
+ int key_not_found;
+ flb_sds_t removed;
+ flb_sds_t monitored_resource_key;
+ flb_sds_t local_resource_id_key;
+ flb_sds_t stream;
+ msgpack_object_kv *kv = obj->via.map.ptr;
+ msgpack_object_kv *const kvend = obj->via.map.ptr + obj->via.map.size;
+
+ monitored_resource_key = flb_sds_create(MONITORED_RESOURCE_KEY);
+ local_resource_id_key = flb_sds_create(LOCAL_RESOURCE_ID_KEY);
+ stream = flb_sds_create("stream");
+ /*
+ * array of elements that need to be removed from payload,
+ * special field 'operation' will be processed individually
+ */
+ flb_sds_t to_be_removed[] =
+ {
+ monitored_resource_key,
+ local_resource_id_key,
+ ctx->labels_key,
+ ctx->severity_key,
+ ctx->trace_key,
+ ctx->span_id_key,
+ ctx->trace_sampled_key,
+ ctx->log_name_key,
+ stream
+ /* more special fields are required to be added, but, if this grows with more
+ than a few records, it might need to be converted to flb_hash
+ */
+ };
+
+ if (insert_id_extracted == FLB_TRUE) {
+ to_remove += 1;
+ }
+ if (operation_extracted == FLB_TRUE && operation_extra_size == 0) {
+ to_remove += 1;
+ }
+ if (source_location_extracted == FLB_TRUE && source_location_extra_size == 0) {
+ to_remove += 1;
+ }
+ if (http_request_extracted == FLB_TRUE && http_request_extra_size == 0) {
+ to_remove += 1;
+ }
+ if (tms_status == FORMAT_TIMESTAMP_OBJECT) {
+ to_remove += 1;
+ }
+ if (tms_status == FORMAT_TIMESTAMP_DUO_FIELDS) {
+ to_remove += 2;
+ }
+
+ map_size = obj->via.map.size;
+ len_to_be_removed = sizeof(to_be_removed) / sizeof(to_be_removed[0]);
+ for (i = 0; i < map_size; i++) {
+ kv = &obj->via.map.ptr[i];
+ len = kv->key.via.str.size;
+ for (j = 0; j < len_to_be_removed; j++) {
+ removed = to_be_removed[j];
+ /*
+ * check length of key to avoid partial matching
+ * e.g. labels key = labels && kv->key = labelss
+ */
+ if (removed && flb_sds_cmp(removed, kv->key.via.str.ptr, len) == 0) {
+ to_remove += 1;
+ break;
+ }
+ }
+ }
+
+ new_map_size = map_size - to_remove;
+
+ ret = msgpack_pack_map(mp_pck, new_map_size);
+ if (ret < 0) {
+ goto error;
+ }
+
+ /* points back to the beginning of map */
+ kv = obj->via.map.ptr;
+ for(; kv != kvend; ++kv ) {
+ key_not_found = 1;
+
+ /* processing logging.googleapis.com/insertId */
+ if (insert_id_extracted == FLB_TRUE
+ && validate_key(kv->key, DEFAULT_INSERT_ID_KEY, INSERT_ID_SIZE)) {
+ continue;
+ }
+
+ /* processing logging.googleapis.com/operation */
+ if (validate_key(kv->key, OPERATION_FIELD_IN_JSON,
+ OPERATION_KEY_SIZE)
+ && kv->val.type == MSGPACK_OBJECT_MAP) {
+ if (operation_extra_size > 0) {
+ msgpack_pack_object(mp_pck, kv->key);
+ pack_extra_operation_subfields(mp_pck, &kv->val, operation_extra_size);
+ }
+ continue;
+ }
+
+ if (validate_key(kv->key, SOURCELOCATION_FIELD_IN_JSON,
+ SOURCE_LOCATION_SIZE)
+ && kv->val.type == MSGPACK_OBJECT_MAP) {
+
+ if (source_location_extra_size > 0) {
+ msgpack_pack_object(mp_pck, kv->key);
+ pack_extra_source_location_subfields(mp_pck, &kv->val,
+ source_location_extra_size);
+ }
+ continue;
+ }
+
+ if (validate_key(kv->key, ctx->http_request_key,
+ ctx->http_request_key_size)
+ && kv->val.type == MSGPACK_OBJECT_MAP) {
+
+ if(http_request_extra_size > 0) {
+ msgpack_pack_object(mp_pck, kv->key);
+ pack_extra_http_request_subfields(mp_pck, &kv->val,
+ http_request_extra_size);
+ }
+ continue;
+ }
+
+ if (validate_key(kv->key, "timestamp", 9)
+ && tms_status == FORMAT_TIMESTAMP_OBJECT) {
+ continue;
+ }
+
+ if (validate_key(kv->key, "timestampSeconds", 16)
+ && tms_status == FORMAT_TIMESTAMP_DUO_FIELDS) {
+ continue;
+ }
+ if (validate_key(kv->key, "timestampNanos", 14)
+ && tms_status == FORMAT_TIMESTAMP_DUO_FIELDS) {
+ continue;
+ }
+
+ len = kv->key.via.str.size;
+ for (j = 0; j < len_to_be_removed; j++) {
+ removed = to_be_removed[j];
+ if (removed && flb_sds_cmp(removed, kv->key.via.str.ptr, len) == 0) {
+ key_not_found = 0;
+ break;
+ }
+ }
+
+ if (key_not_found) {
+ ret = msgpack_pack_object(mp_pck, kv->key);
+ if (ret < 0) {
+ goto error;
+ }
+ ret = msgpack_pack_object(mp_pck, kv->val);
+ if (ret < 0) {
+ goto error;
+ }
+ }
+ }
+
+ flb_sds_destroy(monitored_resource_key);
+ flb_sds_destroy(local_resource_id_key);
+ flb_sds_destroy(stream);
+ return 0;
+
+ error:
+ flb_sds_destroy(monitored_resource_key);
+ flb_sds_destroy(local_resource_id_key);
+ flb_sds_destroy(stream);
+ return ret;
+}
+
+static flb_sds_t stackdriver_format(struct flb_stackdriver *ctx,
+ int total_records,
+ const char *tag, int tag_len,
+ const void *data, size_t bytes)
+{
+ int len;
+ int ret;
+ int array_size = 0;
+ /* The default value is 3: timestamp, jsonPayload, logName. */
+ int entry_size = 3;
+ size_t s;
+ // size_t off = 0;
+ char path[PATH_MAX];
+ char time_formatted[255];
+ const char *newtag;
+ const char *new_log_name;
+ msgpack_object *obj;
+ msgpack_sbuffer mp_sbuf;
+ msgpack_packer mp_pck;
+ flb_sds_t out_buf;
+ struct flb_mp_map_header mh;
+
+ /* Parameters for severity */
+ int severity_extracted = FLB_FALSE;
+ severity_t severity;
+
+ /* Parameters for trace */
+ int trace_extracted = FLB_FALSE;
+ flb_sds_t trace;
+ char stackdriver_trace[PATH_MAX];
+ const char *new_trace;
+
+ /* Parameters for span id */
+ int span_id_extracted = FLB_FALSE;
+ flb_sds_t span_id;
+
+ /* Parameters for trace sampled */
+ int trace_sampled_extracted = FLB_FALSE;
+ int trace_sampled = FLB_FALSE;
+
+ /* Parameters for log name */
+ int log_name_extracted = FLB_FALSE;
+ flb_sds_t log_name = NULL;
+ flb_sds_t stream = NULL;
+ flb_sds_t stream_key;
+
+ /* Parameters for insertId */
+ msgpack_object insert_id_obj;
+ insert_id_status in_status;
+ int insert_id_extracted;
+
+ /* Parameters in Operation */
+ flb_sds_t operation_id;
+ flb_sds_t operation_producer;
+ int operation_first = FLB_FALSE;
+ int operation_last = FLB_FALSE;
+ int operation_extracted = FLB_FALSE;
+ int operation_extra_size = 0;
+
+ /* Parameters for sourceLocation */
+ flb_sds_t source_location_file;
+ int64_t source_location_line = 0;
+ flb_sds_t source_location_function;
+ int source_location_extracted = FLB_FALSE;
+ int source_location_extra_size = 0;
+
+ /* Parameters for httpRequest */
+ struct http_request_field http_request;
+ int http_request_extracted = FLB_FALSE;
+ int http_request_extra_size = 0;
+
+ /* Parameters for Timestamp */
+ struct tm tm;
+ // struct flb_time tms;
+ timestamp_status tms_status;
+ /* Count number of records */
+ array_size = total_records;
+
+ /* Parameters for labels */
+ msgpack_object *payload_labels_ptr;
+ int labels_size = 0;
+
+ struct flb_log_event_decoder log_decoder;
+ struct flb_log_event log_event;
+
+ ret = flb_log_event_decoder_init(&log_decoder, (char *) data, bytes);
+
+ if (ret != FLB_EVENT_DECODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event decoder initialization error : %d", ret);
+
+ return NULL;
+ }
+
+ /*
+ * Search each entry and validate insertId.
+ * Reject the entry if insertId is invalid.
+ * If all the entries are rejected, stop formatting.
+ *
+ */
+ while ((ret = flb_log_event_decoder_next(
+ &log_decoder,
+ &log_event)) == FLB_EVENT_DECODER_SUCCESS) {
+ /* Extract insertId */
+ in_status = validate_insert_id(&insert_id_obj, log_event.body);
+
+ if (in_status == INSERTID_INVALID) {
+ flb_plg_error(ctx->ins,
+ "Incorrect insertId received. InsertId should be non-empty string.");
+ array_size -= 1;
+ }
+ }
+
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ /* Sounds like this should compare to -1 instead of zero */
+ if (array_size == 0) {
+ return NULL;
+ }
+
+ /* Create temporal msgpack buffer */
+ msgpack_sbuffer_init(&mp_sbuf);
+ msgpack_packer_init(&mp_pck, &mp_sbuf, msgpack_sbuffer_write);
+
+ /*
+ * Pack root map (resource & entries):
+ *
+ * {"resource": {"type": "...", "labels": {...},
+ * "entries": []
+ */
+ msgpack_pack_map(&mp_pck, 2);
+
+ msgpack_pack_str(&mp_pck, 8);
+ msgpack_pack_str_body(&mp_pck, "resource", 8);
+
+ /* type & labels */
+ msgpack_pack_map(&mp_pck, 2);
+
+ /* type */
+ msgpack_pack_str(&mp_pck, 4);
+ msgpack_pack_str_body(&mp_pck, "type", 4);
+ msgpack_pack_str(&mp_pck, flb_sds_len(ctx->resource));
+ msgpack_pack_str_body(&mp_pck, ctx->resource,
+ flb_sds_len(ctx->resource));
+
+ msgpack_pack_str(&mp_pck, 6);
+ msgpack_pack_str_body(&mp_pck, "labels", 6);
+
+ ret = pack_resource_labels(ctx, &mh, &mp_pck, data, bytes);
+ if (ret != 0) {
+ if (ctx->resource_type == RESOURCE_TYPE_K8S) {
+ ret = extract_local_resource_id(data, bytes, ctx, tag);
+ if (ret != 0) {
+ flb_plg_error(ctx->ins, "fail to construct local_resource_id");
+ msgpack_sbuffer_destroy(&mp_sbuf);
+ return NULL;
+ }
+ }
+ ret = parse_monitored_resource(ctx, data, bytes, &mp_pck);
+ if (ret != 0) {
+ if (strcmp(ctx->resource, "global") == 0) {
+ /* global resource has field project_id */
+ msgpack_pack_map(&mp_pck, 1);
+ msgpack_pack_str(&mp_pck, 10);
+ msgpack_pack_str_body(&mp_pck, "project_id", 10);
+ msgpack_pack_str(&mp_pck, flb_sds_len(ctx->project_id));
+ msgpack_pack_str_body(&mp_pck,
+ ctx->project_id, flb_sds_len(ctx->project_id));
+ }
+ else if (ctx->resource_type == RESOURCE_TYPE_GENERIC_NODE
+ || ctx->resource_type == RESOURCE_TYPE_GENERIC_TASK) {
+ flb_mp_map_header_init(&mh, &mp_pck);
+
+ if (ctx->resource_type == RESOURCE_TYPE_GENERIC_NODE && ctx->node_id) {
+ /* generic_node has fields project_id, location, namespace, node_id */
+ flb_mp_map_header_append(&mh);
+ msgpack_pack_str(&mp_pck, 7);
+ msgpack_pack_str_body(&mp_pck, "node_id", 7);
+ msgpack_pack_str(&mp_pck, flb_sds_len(ctx->node_id));
+ msgpack_pack_str_body(&mp_pck,
+ ctx->node_id, flb_sds_len(ctx->node_id));
+ }
+ else {
+ /* generic_task has fields project_id, location, namespace, job, task_id */
+ if (ctx->job) {
+ flb_mp_map_header_append(&mh);
+ msgpack_pack_str(&mp_pck, 3);
+ msgpack_pack_str_body(&mp_pck, "job", 3);
+ msgpack_pack_str(&mp_pck, flb_sds_len(ctx->job));
+ msgpack_pack_str_body(&mp_pck,
+ ctx->job, flb_sds_len(ctx->job));
+ }
+
+ if (ctx->task_id) {
+ flb_mp_map_header_append(&mh);
+ msgpack_pack_str(&mp_pck, 7);
+ msgpack_pack_str_body(&mp_pck, "task_id", 7);
+ msgpack_pack_str(&mp_pck, flb_sds_len(ctx->task_id));
+ msgpack_pack_str_body(&mp_pck,
+ ctx->task_id, flb_sds_len(ctx->task_id));
+ }
+ }
+
+ if (ctx->project_id) {
+ flb_mp_map_header_append(&mh);
+ msgpack_pack_str(&mp_pck, 10);
+ msgpack_pack_str_body(&mp_pck, "project_id", 10);
+ msgpack_pack_str(&mp_pck, flb_sds_len(ctx->project_id));
+ msgpack_pack_str_body(&mp_pck,
+ ctx->project_id, flb_sds_len(ctx->project_id));
+ }
+
+ if (ctx->location) {
+ flb_mp_map_header_append(&mh);
+ msgpack_pack_str(&mp_pck, 8);
+ msgpack_pack_str_body(&mp_pck, "location", 8);
+ msgpack_pack_str(&mp_pck, flb_sds_len(ctx->location));
+ msgpack_pack_str_body(&mp_pck,
+ ctx->location, flb_sds_len(ctx->location));
+ }
+
+ if (ctx->namespace_id) {
+ flb_mp_map_header_append(&mh);
+ msgpack_pack_str(&mp_pck, 9);
+ msgpack_pack_str_body(&mp_pck, "namespace", 9);
+ msgpack_pack_str(&mp_pck, flb_sds_len(ctx->namespace_id));
+ msgpack_pack_str_body(&mp_pck,
+ ctx->namespace_id, flb_sds_len(ctx->namespace_id));
+ }
+
+ flb_mp_map_header_end(&mh);
+ }
+ else if (strcmp(ctx->resource, "gce_instance") == 0) {
+ /* gce_instance resource has fields project_id, zone, instance_id */
+ flb_mp_map_header_init(&mh, &mp_pck);
+
+ if (ctx->project_id) {
+ flb_mp_map_header_append(&mh);
+ msgpack_pack_str(&mp_pck, 10);
+ msgpack_pack_str_body(&mp_pck, "project_id", 10);
+ msgpack_pack_str(&mp_pck, flb_sds_len(ctx->project_id));
+ msgpack_pack_str_body(&mp_pck,
+ ctx->project_id, flb_sds_len(ctx->project_id));
+ }
+
+ if (ctx->zone) {
+ flb_mp_map_header_append(&mh);
+ msgpack_pack_str(&mp_pck, 4);
+ msgpack_pack_str_body(&mp_pck, "zone", 4);
+ msgpack_pack_str(&mp_pck, flb_sds_len(ctx->zone));
+ msgpack_pack_str_body(&mp_pck, ctx->zone, flb_sds_len(ctx->zone));
+ }
+
+ if (ctx->instance_id) {
+ flb_mp_map_header_append(&mh);
+ msgpack_pack_str(&mp_pck, 11);
+ msgpack_pack_str_body(&mp_pck, "instance_id", 11);
+ msgpack_pack_str(&mp_pck, flb_sds_len(ctx->instance_id));
+ msgpack_pack_str_body(&mp_pck,
+ ctx->instance_id, flb_sds_len(ctx->instance_id));
+ }
+ flb_mp_map_header_end(&mh);
+ }
+ else if (strcmp(ctx->resource, K8S_CONTAINER) == 0) {
+ /* k8s_container resource has fields project_id, location, cluster_name,
+ * namespace_name, pod_name, container_name
+ *
+ * The local_resource_id for k8s_container is in format:
+ * k8s_container.<namespace_name>.<pod_name>.<container_name>
+ */
+
+ ret = process_local_resource_id(ctx, tag, tag_len, K8S_CONTAINER);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "fail to extract resource labels "
+ "for k8s_container resource type");
+ msgpack_sbuffer_destroy(&mp_sbuf);
+ return NULL;
+ }
+
+ flb_mp_map_header_init(&mh, &mp_pck);
+
+ if (ctx->project_id) {
+ flb_mp_map_header_append(&mh);
+ msgpack_pack_str(&mp_pck, 10);
+ msgpack_pack_str_body(&mp_pck, "project_id", 10);
+ msgpack_pack_str(&mp_pck, flb_sds_len(ctx->project_id));
+ msgpack_pack_str_body(&mp_pck,
+ ctx->project_id, flb_sds_len(ctx->project_id));
+ }
+
+ if (ctx->cluster_location) {
+ flb_mp_map_header_append(&mh);
+ msgpack_pack_str(&mp_pck, 8);
+ msgpack_pack_str_body(&mp_pck, "location", 8);
+ msgpack_pack_str(&mp_pck, flb_sds_len(ctx->cluster_location));
+ msgpack_pack_str_body(&mp_pck,
+ ctx->cluster_location,
+ flb_sds_len(ctx->cluster_location));
+ }
+
+ if (ctx->cluster_name) {
+ flb_mp_map_header_append(&mh);
+ msgpack_pack_str(&mp_pck, 12);
+ msgpack_pack_str_body(&mp_pck, "cluster_name", 12);
+ msgpack_pack_str(&mp_pck, flb_sds_len(ctx->cluster_name));
+ msgpack_pack_str_body(&mp_pck,
+ ctx->cluster_name, flb_sds_len(ctx->cluster_name));
+ }
+
+ if (ctx->namespace_name) {
+ flb_mp_map_header_append(&mh);
+ msgpack_pack_str(&mp_pck, 14);
+ msgpack_pack_str_body(&mp_pck, "namespace_name", 14);
+ msgpack_pack_str(&mp_pck, flb_sds_len(ctx->namespace_name));
+ msgpack_pack_str_body(&mp_pck,
+ ctx->namespace_name,
+ flb_sds_len(ctx->namespace_name));
+ }
+
+ if (ctx->pod_name) {
+ flb_mp_map_header_append(&mh);
+ msgpack_pack_str(&mp_pck, 8);
+ msgpack_pack_str_body(&mp_pck, "pod_name", 8);
+ msgpack_pack_str(&mp_pck, flb_sds_len(ctx->pod_name));
+ msgpack_pack_str_body(&mp_pck,
+ ctx->pod_name, flb_sds_len(ctx->pod_name));
+ }
+
+ if (ctx->container_name) {
+ flb_mp_map_header_append(&mh);
+ msgpack_pack_str(&mp_pck, 14);
+ msgpack_pack_str_body(&mp_pck, "container_name", 14);
+ msgpack_pack_str(&mp_pck, flb_sds_len(ctx->container_name));
+ msgpack_pack_str_body(&mp_pck,
+ ctx->container_name,
+ flb_sds_len(ctx->container_name));
+ }
+
+ flb_mp_map_header_end(&mh);
+ }
+ else if (strcmp(ctx->resource, K8S_NODE) == 0) {
+ /* k8s_node resource has fields project_id, location, cluster_name, node_name
+ *
+ * The local_resource_id for k8s_node is in format:
+ * k8s_node.<node_name>
+ */
+
+ ret = process_local_resource_id(ctx, tag, tag_len, K8S_NODE);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "fail to process local_resource_id from "
+ "log entry for k8s_node");
+ msgpack_sbuffer_destroy(&mp_sbuf);
+ return NULL;
+ }
+
+ flb_mp_map_header_init(&mh, &mp_pck);
+
+ if (ctx->project_id) {
+ flb_mp_map_header_append(&mh);
+ msgpack_pack_str(&mp_pck, 10);
+ msgpack_pack_str_body(&mp_pck, "project_id", 10);
+ msgpack_pack_str(&mp_pck, flb_sds_len(ctx->project_id));
+ msgpack_pack_str_body(&mp_pck,
+ ctx->project_id, flb_sds_len(ctx->project_id));
+ }
+
+ if (ctx->cluster_location) {
+ flb_mp_map_header_append(&mh);
+ msgpack_pack_str(&mp_pck, 8);
+ msgpack_pack_str_body(&mp_pck, "location", 8);
+ msgpack_pack_str(&mp_pck, flb_sds_len(ctx->cluster_location));
+ msgpack_pack_str_body(&mp_pck,
+ ctx->cluster_location,
+ flb_sds_len(ctx->cluster_location));
+ }
+
+ if (ctx->cluster_name) {
+ flb_mp_map_header_append(&mh);
+ msgpack_pack_str(&mp_pck, 12);
+ msgpack_pack_str_body(&mp_pck, "cluster_name", 12);
+ msgpack_pack_str(&mp_pck, flb_sds_len(ctx->cluster_name));
+ msgpack_pack_str_body(&mp_pck,
+ ctx->cluster_name, flb_sds_len(ctx->cluster_name));
+ }
+
+ if (ctx->node_name) {
+ flb_mp_map_header_append(&mh);
+ msgpack_pack_str(&mp_pck, 9);
+ msgpack_pack_str_body(&mp_pck, "node_name", 9);
+ msgpack_pack_str(&mp_pck, flb_sds_len(ctx->node_name));
+ msgpack_pack_str_body(&mp_pck,
+ ctx->node_name, flb_sds_len(ctx->node_name));
+ }
+
+ flb_mp_map_header_end(&mh);
+ }
+ else if (strcmp(ctx->resource, K8S_POD) == 0) {
+ /* k8s_pod resource has fields project_id, location, cluster_name,
+ * namespace_name, pod_name.
+ *
+ * The local_resource_id for k8s_pod is in format:
+ * k8s_pod.<namespace_name>.<pod_name>
+ */
+
+ ret = process_local_resource_id(ctx, tag, tag_len, K8S_POD);
+ if (ret != 0) {
+ flb_plg_error(ctx->ins, "fail to process local_resource_id from "
+ "log entry for k8s_pod");
+ msgpack_sbuffer_destroy(&mp_sbuf);
+ return NULL;
+ }
+
+ flb_mp_map_header_init(&mh, &mp_pck);
+
+ if (ctx->project_id) {
+ flb_mp_map_header_append(&mh);
+ msgpack_pack_str(&mp_pck, 10);
+ msgpack_pack_str_body(&mp_pck, "project_id", 10);
+ msgpack_pack_str(&mp_pck, flb_sds_len(ctx->project_id));
+ msgpack_pack_str_body(&mp_pck,
+ ctx->project_id, flb_sds_len(ctx->project_id));
+ }
+
+ if (ctx->cluster_location) {
+ flb_mp_map_header_append(&mh);
+ msgpack_pack_str(&mp_pck, 8);
+ msgpack_pack_str_body(&mp_pck, "location", 8);
+ msgpack_pack_str(&mp_pck, flb_sds_len(ctx->cluster_location));
+ msgpack_pack_str_body(&mp_pck,
+ ctx->cluster_location,
+ flb_sds_len(ctx->cluster_location));
+ }
+
+ if (ctx->cluster_name) {
+ flb_mp_map_header_append(&mh);
+ msgpack_pack_str(&mp_pck, 12);
+ msgpack_pack_str_body(&mp_pck, "cluster_name", 12);
+ msgpack_pack_str(&mp_pck, flb_sds_len(ctx->cluster_name));
+ msgpack_pack_str_body(&mp_pck,
+ ctx->cluster_name, flb_sds_len(ctx->cluster_name));
+ }
+
+ if (ctx->namespace_name) {
+ flb_mp_map_header_append(&mh);
+ msgpack_pack_str(&mp_pck, 14);
+ msgpack_pack_str_body(&mp_pck, "namespace_name", 14);
+ msgpack_pack_str(&mp_pck, flb_sds_len(ctx->namespace_name));
+ msgpack_pack_str_body(&mp_pck,
+ ctx->namespace_name,
+ flb_sds_len(ctx->namespace_name));
+ }
+
+ if (ctx->pod_name) {
+ flb_mp_map_header_append(&mh);
+ msgpack_pack_str(&mp_pck, 8);
+ msgpack_pack_str_body(&mp_pck, "pod_name", 8);
+ msgpack_pack_str(&mp_pck, flb_sds_len(ctx->pod_name));
+ msgpack_pack_str_body(&mp_pck,
+ ctx->pod_name, flb_sds_len(ctx->pod_name));
+ }
+
+ flb_mp_map_header_end(&mh);
+ }
+ else {
+ flb_plg_error(ctx->ins, "unsupported resource type '%s'",
+ ctx->resource);
+ msgpack_sbuffer_destroy(&mp_sbuf);
+ return NULL;
+ }
+ }
+ }
+ msgpack_pack_str(&mp_pck, 7);
+ msgpack_pack_str_body(&mp_pck, "entries", 7);
+
+ /* Append entries */
+ msgpack_pack_array(&mp_pck, array_size);
+
+ ret = flb_log_event_decoder_init(&log_decoder, (char *) data, bytes);
+
+ if (ret != FLB_EVENT_DECODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event decoder initialization error : %d", ret);
+ msgpack_sbuffer_destroy(&mp_sbuf);
+
+ return NULL;
+ }
+
+ while ((ret = flb_log_event_decoder_next(
+ &log_decoder,
+ &log_event)) == FLB_EVENT_DECODER_SUCCESS) {
+ obj = log_event.body;
+ tms_status = extract_timestamp(obj, &log_event.timestamp);
+
+ /*
+ * Pack entry
+ *
+ * {
+ * "severity": "...",
+ * "labels": "...",
+ * "logName": "...",
+ * "jsonPayload": {...},
+ * "timestamp": "...",
+ * "spanId": "...",
+ * "traceSampled": <true or false>,
+ * "trace": "..."
+ * }
+ */
+ entry_size = 3;
+
+ /* Extract severity */
+ severity_extracted = FLB_FALSE;
+ if (ctx->severity_key
+ && get_severity_level(&severity, obj, ctx->severity_key) == 0) {
+ severity_extracted = FLB_TRUE;
+ entry_size += 1;
+ }
+
+ /* Extract trace */
+ trace_extracted = FLB_FALSE;
+ if (ctx->trace_key
+ && get_string(&trace, obj, ctx->trace_key) == 0) {
+ trace_extracted = FLB_TRUE;
+ entry_size += 1;
+ }
+
+ /* Extract span id */
+ span_id_extracted = FLB_FALSE;
+ if (ctx->span_id_key
+ && get_string(&span_id, obj, ctx->span_id_key) == 0) {
+ span_id_extracted = FLB_TRUE;
+ entry_size += 1;
+ }
+
+ /* Extract trace sampled */
+ trace_sampled_extracted = FLB_FALSE;
+ if (ctx->trace_sampled_key
+ && get_trace_sampled(&trace_sampled, obj, ctx->trace_sampled_key) == 0) {
+ trace_sampled_extracted = FLB_TRUE;
+ entry_size += 1;
+ }
+
+ /* Extract log name */
+ log_name_extracted = FLB_FALSE;
+ if (ctx->log_name_key
+ && get_string(&log_name, obj, ctx->log_name_key) == 0) {
+ log_name_extracted = FLB_TRUE;
+ }
+
+ /* Extract insertId */
+ in_status = validate_insert_id(&insert_id_obj, obj);
+ if (in_status == INSERTID_VALID) {
+ insert_id_extracted = FLB_TRUE;
+ entry_size += 1;
+ }
+ else if (in_status == INSERTID_NOT_PRESENT) {
+ insert_id_extracted = FLB_FALSE;
+ }
+ else {
+ if (log_name_extracted == FLB_TRUE) {
+ flb_sds_destroy(log_name);
+ }
+ continue;
+ }
+
+ /* Extract operation */
+ operation_id = flb_sds_create("");
+ operation_producer = flb_sds_create("");
+ operation_first = FLB_FALSE;
+ operation_last = FLB_FALSE;
+ operation_extra_size = 0;
+ operation_extracted = extract_operation(&operation_id, &operation_producer,
+ &operation_first, &operation_last, obj,
+ &operation_extra_size);
+
+ if (operation_extracted == FLB_TRUE) {
+ entry_size += 1;
+ }
+
+ /* Extract sourceLocation */
+ source_location_file = flb_sds_create("");
+ source_location_line = 0;
+ source_location_function = flb_sds_create("");
+ source_location_extra_size = 0;
+ source_location_extracted = extract_source_location(&source_location_file,
+ &source_location_line,
+ &source_location_function,
+ obj,
+ &source_location_extra_size);
+
+ if (source_location_extracted == FLB_TRUE) {
+ entry_size += 1;
+ }
+
+ /* Extract httpRequest */
+ init_http_request(&http_request);
+ http_request_extra_size = 0;
+ http_request_extracted = extract_http_request(&http_request,
+ ctx->http_request_key,
+ ctx->http_request_key_size,
+ obj, &http_request_extra_size);
+ if (http_request_extracted == FLB_TRUE) {
+ entry_size += 1;
+ }
+
+ /* Extract payload labels */
+ payload_labels_ptr = get_payload_labels(ctx, obj);
+ if (payload_labels_ptr != NULL &&
+ payload_labels_ptr->type != MSGPACK_OBJECT_MAP) {
+ flb_plg_error(ctx->ins, "the type of payload labels should be map");
+ flb_sds_destroy(operation_id);
+ flb_sds_destroy(operation_producer);
+ flb_log_event_decoder_destroy(&log_decoder);
+ msgpack_sbuffer_destroy(&mp_sbuf);
+ return NULL;
+ }
+
+ /* Number of parsed labels */
+ labels_size = mk_list_size(&ctx->config_labels);
+ if (payload_labels_ptr != NULL &&
+ payload_labels_ptr->type == MSGPACK_OBJECT_MAP) {
+ labels_size += payload_labels_ptr->via.map.size;
+ }
+
+ if (labels_size > 0) {
+ entry_size += 1;
+ }
+
+ msgpack_pack_map(&mp_pck, entry_size);
+
+ /* Add severity into the log entry */
+ if (severity_extracted == FLB_TRUE) {
+ msgpack_pack_str(&mp_pck, 8);
+ msgpack_pack_str_body(&mp_pck, "severity", 8);
+ msgpack_pack_int(&mp_pck, severity);
+ }
+
+ /* Add trace into the log entry */
+ if (trace_extracted == FLB_TRUE) {
+ msgpack_pack_str(&mp_pck, 5);
+ msgpack_pack_str_body(&mp_pck, "trace", 5);
+
+ if (ctx->autoformat_stackdriver_trace) {
+ len = snprintf(stackdriver_trace, sizeof(stackdriver_trace) - 1,
+ "projects/%s/traces/%s", ctx->project_id, trace);
+ new_trace = stackdriver_trace;
+ }
+ else {
+ len = flb_sds_len(trace);
+ new_trace = trace;
+ }
+
+ msgpack_pack_str(&mp_pck, len);
+ msgpack_pack_str_body(&mp_pck, new_trace, len);
+ flb_sds_destroy(trace);
+ }
+
+ /* Add spanId field into the log entry */
+ if (span_id_extracted == FLB_TRUE) {
+ msgpack_pack_str_with_body(&mp_pck, "spanId", 6);
+ len = flb_sds_len(span_id);
+ msgpack_pack_str_with_body(&mp_pck, span_id, len);
+ flb_sds_destroy(span_id);
+ }
+
+ /* Add traceSampled field into the log entry */
+ if (trace_sampled_extracted == FLB_TRUE) {
+ msgpack_pack_str_with_body(&mp_pck, "traceSampled", 12);
+
+ if (trace_sampled == FLB_TRUE) {
+ msgpack_pack_true(&mp_pck);
+ } else {
+ msgpack_pack_false(&mp_pck);
+ }
+
+ }
+
+ /* Add insertId field into the log entry */
+ if (insert_id_extracted == FLB_TRUE) {
+ msgpack_pack_str(&mp_pck, 8);
+ msgpack_pack_str_body(&mp_pck, "insertId", 8);
+ msgpack_pack_object(&mp_pck, insert_id_obj);
+ }
+
+ /* Add operation field into the log entry */
+ if (operation_extracted == FLB_TRUE) {
+ add_operation_field(&operation_id, &operation_producer,
+ &operation_first, &operation_last, &mp_pck);
+ }
+
+ /* Add sourceLocation field into the log entry */
+ if (source_location_extracted == FLB_TRUE) {
+ add_source_location_field(&source_location_file, source_location_line,
+ &source_location_function, &mp_pck);
+ }
+
+ /* Add httpRequest field into the log entry */
+ if (http_request_extracted == FLB_TRUE) {
+ add_http_request_field(&http_request, &mp_pck);
+ }
+
+ /* labels */
+ if (labels_size > 0) {
+ msgpack_pack_str(&mp_pck, 6);
+ msgpack_pack_str_body(&mp_pck, "labels", 6);
+ pack_labels(ctx, &mp_pck, payload_labels_ptr);
+ }
+
+ /* Clean up id and producer if operation extracted */
+ flb_sds_destroy(operation_id);
+ flb_sds_destroy(operation_producer);
+ flb_sds_destroy(source_location_file);
+ flb_sds_destroy(source_location_function);
+ destroy_http_request(&http_request);
+
+ /* jsonPayload */
+ msgpack_pack_str(&mp_pck, 11);
+ msgpack_pack_str_body(&mp_pck, "jsonPayload", 11);
+ pack_json_payload(insert_id_extracted,
+ operation_extracted, operation_extra_size,
+ source_location_extracted,
+ source_location_extra_size,
+ http_request_extracted,
+ http_request_extra_size,
+ tms_status,
+ &mp_pck, obj, ctx);
+
+ /* avoid modifying the original tag */
+ newtag = tag;
+ stream_key = flb_sds_create("stream");
+ if (ctx->resource_type == RESOURCE_TYPE_K8S
+ && get_string(&stream, obj, stream_key) == 0) {
+ if (flb_sds_cmp(stream, STDOUT, flb_sds_len(stream)) == 0) {
+ newtag = "stdout";
+ }
+ else if (flb_sds_cmp(stream, STDERR, flb_sds_len(stream)) == 0) {
+ newtag = "stderr";
+ }
+ }
+
+ if (log_name_extracted == FLB_FALSE) {
+ new_log_name = newtag;
+ }
+ else {
+ new_log_name = log_name;
+ }
+
+ /* logName */
+ len = snprintf(path, sizeof(path) - 1,
+ "projects/%s/logs/%s", ctx->export_to_project_id, new_log_name);
+
+ if (log_name_extracted == FLB_TRUE) {
+ flb_sds_destroy(log_name);
+ }
+
+ msgpack_pack_str(&mp_pck, 7);
+ msgpack_pack_str_body(&mp_pck, "logName", 7);
+ msgpack_pack_str(&mp_pck, len);
+ msgpack_pack_str_body(&mp_pck, path, len);
+ flb_sds_destroy(stream_key);
+ flb_sds_destroy(stream);
+
+ /* timestamp */
+ msgpack_pack_str(&mp_pck, 9);
+ msgpack_pack_str_body(&mp_pck, "timestamp", 9);
+
+ /* Format the time */
+ /*
+ * If format is timestamp_object or timestamp_duo_fields,
+ * tms has been updated.
+ *
+ * If timestamp is not presen,
+ * use the default tms(current time).
+ */
+
+ gmtime_r(&log_event.timestamp.tm.tv_sec, &tm);
+ s = strftime(time_formatted, sizeof(time_formatted) - 1,
+ FLB_STD_TIME_FMT, &tm);
+ len = snprintf(time_formatted + s, sizeof(time_formatted) - 1 - s,
+ ".%09" PRIu64 "Z",
+ (uint64_t) log_event.timestamp.tm.tv_nsec);
+ s += len;
+
+ msgpack_pack_str(&mp_pck, s);
+ msgpack_pack_str_body(&mp_pck, time_formatted, s);
+ }
+
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ /* Convert from msgpack to JSON */
+ out_buf = flb_msgpack_raw_to_json_sds(mp_sbuf.data, mp_sbuf.size);
+ msgpack_sbuffer_destroy(&mp_sbuf);
+
+ if (!out_buf) {
+ flb_plg_error(ctx->ins, "error formatting JSON payload");
+ return NULL;
+ }
+
+ return out_buf;
+}
+
+static int stackdriver_format_test(struct flb_config *config,
+ struct flb_input_instance *ins,
+ void *plugin_context,
+ void *flush_ctx,
+ int event_type,
+ const char *tag, int tag_len,
+ const void *data, size_t bytes,
+ void **out_data, size_t *out_size)
+{
+ int total_records;
+ flb_sds_t payload = NULL;
+ struct flb_stackdriver *ctx = plugin_context;
+
+ /* Count number of records */
+ total_records = flb_mp_count(data, bytes);
+
+ payload = stackdriver_format(ctx, total_records,
+ (char *) tag, tag_len, data, bytes);
+ if (payload == NULL) {
+ return -1;
+ }
+
+ *out_data = payload;
+ *out_size = flb_sds_len(payload);
+
+ return 0;
+
+}
+
+#ifdef FLB_HAVE_METRICS
+static void update_http_metrics(struct flb_stackdriver *ctx,
+ struct flb_event_chunk *event_chunk,
+ uint64_t ts,
+ int http_status)
+{
+ char tmp[32];
+
+ /* convert status to string format */
+ snprintf(tmp, sizeof(tmp) - 1, "%i", http_status);
+ char *name = (char *) flb_output_name(ctx->ins);
+
+ /* processed records total */
+ cmt_counter_add(ctx->cmt_proc_records_total, ts, event_chunk->total_events,
+ 2, (char *[]) {tmp, name});
+
+ /* HTTP status */
+ if (http_status != STACKDRIVER_NET_ERROR) {
+ cmt_counter_inc(ctx->cmt_requests_total, ts, 2, (char *[]) {tmp, name});
+ }
+}
+
+static void update_retry_metric(struct flb_stackdriver *ctx,
+ struct flb_event_chunk *event_chunk,
+ uint64_t ts,
+ int http_status, int ret_code)
+{
+ char tmp[32];
+ char *name = (char *) flb_output_name(ctx->ins);
+
+ if (ret_code != FLB_RETRY) {
+ return;
+ }
+
+ /* convert status to string format */
+ snprintf(tmp, sizeof(tmp) - 1, "%i", http_status);
+ cmt_counter_add(ctx->cmt_retried_records_total,
+ ts, event_chunk->total_events, 2, (char *[]) {tmp, name});
+
+}
+#endif
+
+static void cb_stackdriver_flush(struct flb_event_chunk *event_chunk,
+ struct flb_output_flush *out_flush,
+ struct flb_input_instance *i_ins,
+ void *out_context,
+ struct flb_config *config)
+{
+ (void) i_ins;
+ (void) config;
+ int ret;
+ int ret_code = FLB_RETRY;
+ size_t b_sent;
+ flb_sds_t token;
+ flb_sds_t payload_buf;
+ void *compressed_payload_buffer = NULL;
+ size_t compressed_payload_size;
+ struct flb_stackdriver *ctx = out_context;
+ struct flb_connection *u_conn;
+ struct flb_http_client *c;
+ int compressed = FLB_FALSE;
+#ifdef FLB_HAVE_METRICS
+ char *name = (char *) flb_output_name(ctx->ins);
+ uint64_t ts = cfl_time_now();
+#endif
+
+ /* Get upstream connection */
+ u_conn = flb_upstream_conn_get(ctx->u);
+ if (!u_conn) {
+#ifdef FLB_HAVE_METRICS
+ cmt_counter_inc(ctx->cmt_failed_requests,
+ ts, 1, (char *[]) {name});
+
+ /* OLD api */
+ flb_metrics_sum(FLB_STACKDRIVER_FAILED_REQUESTS, 1, ctx->ins->metrics);
+
+ update_http_metrics(ctx, event_chunk, ts, STACKDRIVER_NET_ERROR);
+ update_retry_metric(ctx, event_chunk, ts, STACKDRIVER_NET_ERROR, FLB_RETRY);
+#endif
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ /* Reformat msgpack to stackdriver JSON payload */
+ payload_buf = stackdriver_format(ctx,
+ event_chunk->total_events,
+ event_chunk->tag, flb_sds_len(event_chunk->tag),
+ event_chunk->data, event_chunk->size);
+ if (!payload_buf) {
+#ifdef FLB_HAVE_METRICS
+ cmt_counter_inc(ctx->cmt_failed_requests,
+ ts, 1, (char *[]) {name});
+
+ /* OLD api */
+ flb_metrics_sum(FLB_STACKDRIVER_FAILED_REQUESTS, 1, ctx->ins->metrics);
+#endif
+ flb_upstream_conn_release(u_conn);
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ /* Get or renew Token */
+ token = get_google_token(ctx);
+ if (!token) {
+ flb_plg_error(ctx->ins, "cannot retrieve oauth2 token");
+ flb_upstream_conn_release(u_conn);
+ flb_sds_destroy(payload_buf);
+#ifdef FLB_HAVE_METRICS
+ cmt_counter_inc(ctx->cmt_failed_requests,
+ ts, 1, (char *[]) {name});
+
+ /* OLD api */
+ flb_metrics_sum(FLB_STACKDRIVER_FAILED_REQUESTS, 1, ctx->ins->metrics);
+#endif
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ compressed_payload_buffer = payload_buf;
+ compressed_payload_size = flb_sds_len(payload_buf);
+ if (ctx->compress_gzip == FLB_TRUE) {
+ ret = flb_gzip_compress((void *) payload_buf, flb_sds_len(payload_buf),
+ &compressed_payload_buffer, &compressed_payload_size);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "cannot gzip payload, disabling compression");
+ } else {
+ compressed = FLB_TRUE;
+ flb_sds_destroy(payload_buf);
+ }
+ }
+
+ /* Compose HTTP Client request */
+ c = flb_http_client(u_conn, FLB_HTTP_POST, FLB_STD_WRITE_URI,
+ compressed_payload_buffer, compressed_payload_size, NULL, 0, NULL, 0);
+
+ flb_http_buffer_size(c, 4192);
+
+ if (ctx->stackdriver_agent) {
+ flb_http_add_header(c, "User-Agent", 10,
+ ctx->stackdriver_agent,
+ flb_sds_len(ctx->stackdriver_agent));
+ }
+ else {
+ flb_http_add_header(c, "User-Agent", 10, "Fluent-Bit", 10);
+ }
+
+ flb_http_add_header(c, "Content-Type", 12, "application/json", 16);
+ flb_http_add_header(c, "Authorization", 13, token, flb_sds_len(token));
+ /* Content Encoding: gzip */
+ if (compressed == FLB_TRUE) {
+ flb_http_set_content_encoding_gzip(c);
+ }
+
+ /* Send HTTP request */
+ ret = flb_http_do(c, &b_sent);
+
+ /* validate response */
+ if (ret != 0) {
+ flb_plg_warn(ctx->ins, "http_do=%i", ret);
+ ret_code = FLB_RETRY;
+#ifdef FLB_HAVE_METRICS
+ update_http_metrics(ctx, event_chunk, ts, STACKDRIVER_NET_ERROR);
+#endif
+ }
+ else {
+ /* The request was issued successfully, validate the 'error' field */
+ flb_plg_debug(ctx->ins, "HTTP Status=%i", c->resp.status);
+ if (c->resp.status == 200) {
+ ret_code = FLB_OK;
+ }
+ else if (c->resp.status >= 400 && c->resp.status < 500) {
+ ret_code = FLB_ERROR;
+ flb_plg_warn(ctx->ins, "error\n%s",
+ c->resp.payload);
+ }
+ else {
+ if (c->resp.payload_size > 0) {
+ /* we got an error */
+ flb_plg_warn(ctx->ins, "error\n%s",
+ c->resp.payload);
+ }
+ else {
+ flb_plg_debug(ctx->ins, "response\n%s",
+ c->resp.payload);
+ }
+ ret_code = FLB_RETRY;
+ }
+ }
+
+ /* Update specific stackdriver metrics */
+#ifdef FLB_HAVE_METRICS
+ if (ret_code == FLB_OK) {
+ cmt_counter_inc(ctx->cmt_successful_requests,
+ ts, 1, (char *[]) {name});
+
+ /* OLD api */
+ flb_metrics_sum(FLB_STACKDRIVER_SUCCESSFUL_REQUESTS, 1, ctx->ins->metrics);
+ }
+ else {
+ cmt_counter_inc(ctx->cmt_failed_requests,
+ ts, 1, (char *[]) {name});
+
+ /* OLD api */
+ flb_metrics_sum(FLB_STACKDRIVER_FAILED_REQUESTS, 1, ctx->ins->metrics);
+ }
+
+ /* Update metrics counter by using labels/http status code */
+ if (ret == 0) {
+ update_http_metrics(ctx, event_chunk, ts, c->resp.status);
+ }
+
+ /* Update retry count if necessary */
+ update_retry_metric(ctx, event_chunk, ts, c->resp.status, ret_code);
+#endif
+
+
+ /* Cleanup */
+ if (compressed == FLB_TRUE) {
+ flb_free(compressed_payload_buffer);
+ }
+ else {
+ flb_sds_destroy(payload_buf);
+ }
+ flb_sds_destroy(token);
+ flb_http_client_destroy(c);
+ flb_upstream_conn_release(u_conn);
+
+ /* Done */
+ FLB_OUTPUT_RETURN(ret_code);
+}
+
+static int cb_stackdriver_exit(void *data, struct flb_config *config)
+{
+ struct flb_stackdriver *ctx = data;
+
+ if (!ctx) {
+ return -1;
+ }
+
+ flb_stackdriver_conf_destroy(ctx);
+ return 0;
+}
+
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_STR, "google_service_credentials", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_stackdriver, credentials_file),
+ "Set the path for the google service credentials file"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "metadata_server", (char *)NULL,
+ 0, FLB_FALSE, 0,
+ "Set the metadata server"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "service_account_email", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_stackdriver, client_email),
+ "Set the service account email"
+ },
+ // set in flb_bigquery_oauth_credentials
+ {
+ FLB_CONFIG_MAP_STR, "service_account_secret", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_stackdriver, private_key),
+ "Set the service account secret"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "export_to_project_id", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_stackdriver, export_to_project_id),
+ "Export to project id"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "resource", FLB_SDS_RESOURCE_TYPE,
+ 0, FLB_TRUE, offsetof(struct flb_stackdriver, resource),
+ "Set the resource"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "severity_key", DEFAULT_SEVERITY_KEY,
+ 0, FLB_TRUE, offsetof(struct flb_stackdriver, severity_key),
+ "Set the severity key"
+ },
+ {
+ FLB_CONFIG_MAP_BOOL, "autoformat_stackdriver_trace", "false",
+ 0, FLB_TRUE, offsetof(struct flb_stackdriver, autoformat_stackdriver_trace),
+ "Autoformat the stackdriver trace"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "trace_key", DEFAULT_TRACE_KEY,
+ 0, FLB_TRUE, offsetof(struct flb_stackdriver, trace_key),
+ "Set the trace key"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "span_id_key", DEFAULT_SPAN_ID_KEY,
+ 0, FLB_TRUE, offsetof(struct flb_stackdriver, span_id_key),
+ "Set the span id key"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "trace_sampled_key", DEFAULT_TRACE_SAMPLED_KEY,
+ 0, FLB_TRUE, offsetof(struct flb_stackdriver, trace_sampled_key),
+ "Set the trace sampled key"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "log_name_key", DEFAULT_LOG_NAME_KEY,
+ 0, FLB_TRUE, offsetof(struct flb_stackdriver, log_name_key),
+ "Set the logname key"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "http_request_key", HTTPREQUEST_FIELD_IN_JSON,
+ 0, FLB_TRUE, offsetof(struct flb_stackdriver, http_request_key),
+ "Set the http request key"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "k8s_cluster_name", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_stackdriver, cluster_name),
+ "Set the kubernetes cluster name"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "k8s_cluster_location", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_stackdriver, cluster_location),
+ "Set the kubernetes cluster location"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "location", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_stackdriver, location),
+ "Set the resource location"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "namespace", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_stackdriver, namespace_id),
+ "Set the resource namespace"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "node_id", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_stackdriver, node_id),
+ "Set the resource node id"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "job", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_stackdriver, job),
+ "Set the resource job"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "task_id", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_stackdriver, task_id),
+ "Set the resource task id"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "compress", NULL,
+ 0, FLB_FALSE, 0,
+ "Set log payload compression method. Option available is 'gzip'"
+ },
+ {
+ FLB_CONFIG_MAP_CLIST, "labels", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_stackdriver, labels),
+ "Set the labels"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "labels_key", DEFAULT_LABELS_KEY,
+ 0, FLB_TRUE, offsetof(struct flb_stackdriver, labels_key),
+ "Set the labels key"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "tag_prefix", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_stackdriver, tag_prefix),
+ "Set the tag prefix"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "stackdriver_agent", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_stackdriver, stackdriver_agent),
+ "Set the stackdriver agent"
+ },
+ /* Custom Regex */
+ {
+ FLB_CONFIG_MAP_STR, "custom_k8s_regex", DEFAULT_TAG_REGEX,
+ 0, FLB_TRUE, offsetof(struct flb_stackdriver, custom_k8s_regex),
+ "Set a custom kubernetes regex filter"
+ },
+ {
+ FLB_CONFIG_MAP_CLIST, "resource_labels", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_stackdriver, resource_labels),
+ "Set the resource labels"
+ },
+ /* EOF */
+ {0}
+};
+
+struct flb_output_plugin out_stackdriver_plugin = {
+ .name = "stackdriver",
+ .description = "Send events to Google Stackdriver Logging",
+ .cb_init = cb_stackdriver_init,
+ .cb_flush = cb_stackdriver_flush,
+ .cb_exit = cb_stackdriver_exit,
+ .workers = 1,
+ .config_map = config_map,
+
+ /* Test */
+ .test_formatter.callback = stackdriver_format_test,
+
+ /* Plugin flags */
+ .flags = FLB_OUTPUT_NET | FLB_IO_TLS,
+};
diff --git a/src/fluent-bit/plugins/out_stackdriver/stackdriver.h b/src/fluent-bit/plugins/out_stackdriver/stackdriver.h
new file mode 100644
index 000000000..239a3ee31
--- /dev/null
+++ b/src/fluent-bit/plugins/out_stackdriver/stackdriver.h
@@ -0,0 +1,241 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_OUT_STACKDRIVER_H
+#define FLB_OUT_STACKDRIVER_H
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_oauth2.h>
+#include <fluent-bit/flb_sds.h>
+#include <fluent-bit/flb_pthread.h>
+#include <fluent-bit/flb_regex.h>
+#include <fluent-bit/flb_metrics.h>
+
+/* refresh token every 50 minutes */
+#define FLB_STD_TOKEN_REFRESH 3000
+
+/* Stackdriver Logging write scope */
+#define FLB_STD_SCOPE "https://www.googleapis.com/auth/logging.write"
+
+/* Stackdriver authorization URL */
+#define FLB_STD_AUTH_URL "https://oauth2.googleapis.com/token"
+
+/* Stackdriver Logging 'write' end-point */
+#define FLB_STD_WRITE_URI "/v2/entries:write"
+#define FLB_STD_WRITE_URL "https://logging.googleapis.com" FLB_STD_WRITE_URI
+
+/* Timestamp format */
+#define FLB_STD_TIME_FMT "%Y-%m-%dT%H:%M:%S"
+
+/* Default Resource type */
+#define FLB_SDS_RESOURCE_TYPE "global"
+#define OPERATION_FIELD_IN_JSON "logging.googleapis.com/operation"
+#define MONITORED_RESOURCE_KEY "logging.googleapis.com/monitored_resource"
+#define LOCAL_RESOURCE_ID_KEY "logging.googleapis.com/local_resource_id"
+#define DEFAULT_LABELS_KEY "logging.googleapis.com/labels"
+#define DEFAULT_SEVERITY_KEY "logging.googleapis.com/severity"
+#define DEFAULT_TRACE_KEY "logging.googleapis.com/trace"
+#define DEFAULT_SPAN_ID_KEY "logging.googleapis.com/spanId"
+#define DEFAULT_TRACE_SAMPLED_KEY "logging.googleapis.com/traceSampled"
+#define DEFAULT_LOG_NAME_KEY "logging.googleapis.com/logName"
+#define DEFAULT_INSERT_ID_KEY "logging.googleapis.com/insertId"
+#define SOURCELOCATION_FIELD_IN_JSON "logging.googleapis.com/sourceLocation"
+#define HTTPREQUEST_FIELD_IN_JSON "logging.googleapis.com/http_request"
+#define INSERT_ID_SIZE 31
+#define LEN_LOCAL_RESOURCE_ID_KEY 40
+#define OPERATION_KEY_SIZE 32
+#define SOURCE_LOCATION_SIZE 37
+#define HTTP_REQUEST_KEY_SIZE 35
+
+/*
+ * Stackdriver implements a specific HTTP status code that is used internally in clients to keep
+ * track of networking errors that could happen before a successful HTTP request/response. For
+ * metrics counting purposes, every failed networking connection will use a 502 HTTP status code
+ * that can be used later to query the metrics by using labels with that value.
+ */
+#define STACKDRIVER_NET_ERROR 502
+
+#define K8S_CONTAINER "k8s_container"
+#define K8S_NODE "k8s_node"
+#define K8S_POD "k8s_pod"
+
+#define STDOUT "stdout"
+#define STDERR "stderr"
+
+#define DEFAULT_TAG_REGEX "(?<pod_name>[a-z0-9](?:[-a-z0-9]*[a-z0-9])?(?:\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*)_(?<namespace_name>[^_]+)_(?<container_name>.+)-(?<docker_id>[a-z0-9]{64})\\.log$"
+
+/* Metrics */
+#ifdef FLB_HAVE_METRICS
+#define FLB_STACKDRIVER_SUCCESSFUL_REQUESTS 1000 /* successful requests */
+#define FLB_STACKDRIVER_FAILED_REQUESTS 1001 /* failed requests */
+#endif
+
+struct flb_stackdriver_oauth_credentials {
+ /* parsed credentials file */
+ flb_sds_t type;
+ flb_sds_t private_key_id;
+ flb_sds_t private_key;
+ flb_sds_t client_email;
+ flb_sds_t client_id;
+ flb_sds_t auth_uri;
+ flb_sds_t token_uri;
+};
+
+struct flb_stackdriver_env {
+ flb_sds_t creds_file;
+ flb_sds_t metadata_server;
+};
+
+struct flb_stackdriver {
+ /* credentials */
+ flb_sds_t credentials_file;
+
+ /* parsed credentials file */
+ flb_sds_t type;
+ flb_sds_t project_id;
+ flb_sds_t private_key_id;
+ flb_sds_t private_key;
+ flb_sds_t client_email;
+ flb_sds_t client_id;
+ flb_sds_t auth_uri;
+ flb_sds_t token_uri;
+ bool metadata_server_auth;
+
+ /* metadata server (GCP specific, WIP) */
+ flb_sds_t metadata_server;
+ flb_sds_t zone;
+ flb_sds_t instance_id;
+ flb_sds_t instance_name;
+
+ /* kubernetes specific */
+ flb_sds_t cluster_name;
+ flb_sds_t cluster_location;
+ flb_sds_t namespace_name;
+ flb_sds_t pod_name;
+ flb_sds_t container_name;
+ flb_sds_t node_name;
+
+ flb_sds_t local_resource_id;
+ flb_sds_t tag_prefix;
+ /* shadow tag_prefix for safe deallocation */
+ flb_sds_t tag_prefix_k8s;
+
+ /* labels */
+ flb_sds_t labels_key;
+ struct mk_list *labels;
+ struct mk_list config_labels;
+
+ /* resource type flag */
+ int resource_type;
+
+ /* resource labels api */
+ struct mk_list *resource_labels;
+ struct mk_list resource_labels_kvs;
+ int should_skip_resource_labels_api;
+
+ /* generic resources */
+ flb_sds_t location;
+ flb_sds_t namespace_id;
+
+ /* generic_node specific */
+ flb_sds_t node_id;
+
+ /* generic_task specific */
+ flb_sds_t job;
+ flb_sds_t task_id;
+
+ /* Internal variable to reduce string comparisons */
+ int compress_gzip;
+
+ /* other */
+ flb_sds_t export_to_project_id;
+ flb_sds_t resource;
+ flb_sds_t severity_key;
+ flb_sds_t trace_key;
+ flb_sds_t span_id_key;
+ flb_sds_t trace_sampled_key;
+ flb_sds_t log_name_key;
+ flb_sds_t http_request_key;
+ int http_request_key_size;
+ bool autoformat_stackdriver_trace;
+
+ flb_sds_t stackdriver_agent;
+
+ /* Regex context to parse tags */
+ flb_sds_t custom_k8s_regex;
+ struct flb_regex *regex;
+
+ /* oauth2 context */
+ struct flb_oauth2 *o;
+
+ /* parsed oauth2 credentials */
+ struct flb_stackdriver_oauth_credentials *creds;
+
+ /* environment variable settings */
+ struct flb_stackdriver_env *env;
+
+ /* mutex for acquiring oauth tokens */
+ pthread_mutex_t token_mutex;
+
+ /* upstream context for stackdriver write end-point */
+ struct flb_upstream *u;
+
+ /* upstream context for metadata end-point */
+ struct flb_upstream *metadata_u;
+
+#ifdef FLB_HAVE_METRICS
+ /* metrics */
+ struct cmt_counter *cmt_successful_requests;
+ struct cmt_counter *cmt_failed_requests;
+ struct cmt_counter *cmt_requests_total;
+ struct cmt_counter *cmt_proc_records_total;
+ struct cmt_counter *cmt_retried_records_total;
+#endif
+
+ /* plugin instance */
+ struct flb_output_instance *ins;
+
+ /* Fluent Bit context */
+ struct flb_config *config;
+};
+
+typedef enum {
+ FLB_STD_EMERGENCY = 800,
+ FLB_STD_ALERT = 700,
+ FLB_STD_CRITICAL = 600,
+ FLB_STD_ERROR = 500,
+ FLB_STD_WARNING = 400,
+ FLB_STD_NOTICE = 300,
+ FLB_STD_INFO = 200,
+ FLB_STD_DEBUG = 100,
+ FLB_STD_DEFAULT = 0
+} severity_t;
+
+struct local_resource_id_list {
+ flb_sds_t val;
+ struct mk_list _head;
+};
+
+typedef enum {
+ INSERTID_VALID = 0,
+ INSERTID_INVALID = 1,
+ INSERTID_NOT_PRESENT = 2
+} insert_id_status;
+
+#endif
diff --git a/src/fluent-bit/plugins/out_stackdriver/stackdriver_conf.c b/src/fluent-bit/plugins/out_stackdriver/stackdriver_conf.c
new file mode 100644
index 000000000..9f3f28a35
--- /dev/null
+++ b/src/fluent-bit/plugins/out_stackdriver/stackdriver_conf.c
@@ -0,0 +1,667 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_compat.h>
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_unescape.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_jsmn.h>
+#include <fluent-bit/flb_sds.h>
+#include <fluent-bit/flb_kv.h>
+
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#include "gce_metadata.h"
+#include "stackdriver.h"
+#include "stackdriver_conf.h"
+#include "stackdriver_resource_types.h"
+
+static inline int key_cmp(const char *str, int len, const char *cmp) {
+
+ if (strlen(cmp) != len) {
+ return -1;
+ }
+
+ return strncasecmp(str, cmp, len);
+}
+
+static int read_credentials_file(const char *cred_file, struct flb_stackdriver *ctx)
+{
+ int i;
+ int ret;
+ int key_len;
+ int val_len;
+ int tok_size = 32;
+ char *buf;
+ char *key;
+ char *val;
+ flb_sds_t tmp;
+ struct stat st;
+ jsmn_parser parser;
+ jsmntok_t *t;
+ jsmntok_t *tokens;
+
+ /* Validate credentials path */
+ ret = stat(cred_file, &st);
+ if (ret == -1) {
+ flb_errno();
+ flb_plg_error(ctx->ins, "cannot open credentials file: %s",
+ cred_file);
+ return -1;
+ }
+
+ if (!S_ISREG(st.st_mode) && !S_ISLNK(st.st_mode)) {
+ flb_plg_error(ctx->ins, "credentials file "
+ "is not a valid file: %s", cred_file);
+ return -1;
+ }
+
+ /* Read file content */
+ buf = mk_file_to_buffer(cred_file);
+ if (!buf) {
+ flb_plg_error(ctx->ins, "error reading credentials file: %s",
+ cred_file);
+ return -1;
+ }
+
+ /* Parse content */
+ jsmn_init(&parser);
+ tokens = flb_calloc(1, sizeof(jsmntok_t) * tok_size);
+ if (!tokens) {
+ flb_errno();
+ flb_free(buf);
+ return -1;
+ }
+
+ ret = jsmn_parse(&parser, buf, st.st_size, tokens, tok_size);
+ if (ret <= 0) {
+ flb_plg_error(ctx->ins, "invalid JSON credentials file: %s",
+ cred_file);
+ flb_free(buf);
+ flb_free(tokens);
+ return -1;
+ }
+
+ t = &tokens[0];
+ if (t->type != JSMN_OBJECT) {
+ flb_plg_error(ctx->ins, "invalid JSON map on file: %s",
+ cred_file);
+ flb_free(buf);
+ flb_free(tokens);
+ return -1;
+ }
+
+ /* Parse JSON tokens */
+ for (i = 1; i < ret; i++) {
+ t = &tokens[i];
+ if (t->type != JSMN_STRING) {
+ continue;
+ }
+
+ if (t->start == -1 || t->end == -1 || (t->start == 0 && t->end == 0)){
+ break;
+ }
+
+ /* Key */
+ key = buf + t->start;
+ key_len = (t->end - t->start);
+
+ /* Value */
+ i++;
+ t = &tokens[i];
+ val = buf + t->start;
+ val_len = (t->end - t->start);
+
+ if (key_cmp(key, key_len, "type") == 0) {
+ ctx->creds->type = flb_sds_create_len(val, val_len);
+ }
+ else if (key_cmp(key, key_len, "project_id") == 0) {
+ ctx->project_id = flb_sds_create_len(val, val_len);
+ }
+ else if (key_cmp(key, key_len, "private_key_id") == 0) {
+ ctx->creds->private_key_id = flb_sds_create_len(val, val_len);
+ }
+ else if (key_cmp(key, key_len, "private_key") == 0) {
+ tmp = flb_sds_create_len(val, val_len);
+ if (tmp) {
+ /* Unescape private key */
+ ctx->creds->private_key = flb_sds_create_size(val_len);
+ flb_unescape_string(tmp, flb_sds_len(tmp),
+ &ctx->creds->private_key);
+ flb_sds_destroy(tmp);
+ }
+ }
+ else if (key_cmp(key, key_len, "client_email") == 0) {
+ ctx->creds->client_email = flb_sds_create_len(val, val_len);
+ }
+ else if (key_cmp(key, key_len, "client_id") == 0) {
+ ctx->creds->client_id = flb_sds_create_len(val, val_len);
+ }
+ else if (key_cmp(key, key_len, "auth_uri") == 0) {
+ ctx->creds->auth_uri = flb_sds_create_len(val, val_len);
+ }
+ else if (key_cmp(key, key_len, "token_uri") == 0) {
+ ctx->creds->token_uri = flb_sds_create_len(val, val_len);
+ }
+ }
+
+ flb_free(buf);
+ flb_free(tokens);
+
+ return 0;
+}
+
+/*
+ * parse_key_value_list():
+ * - Parses an origin list of comma seperated string specifying key=value.
+ * - Appends the parsed key value pairs into the destination list.
+ * - Returns the length of the destination list.
+ */
+static int parse_key_value_list(struct flb_stackdriver *ctx,
+ struct mk_list *origin,
+ struct mk_list *dest,
+ int shouldTrim)
+{
+ char *p;
+ flb_sds_t key;
+ flb_sds_t val;
+ struct flb_kv *kv;
+ struct mk_list *head;
+ struct flb_slist_entry *entry;
+
+ if (origin) {
+ mk_list_foreach(head, origin) {
+ entry = mk_list_entry(head, struct flb_slist_entry, _head);
+
+ p = strchr(entry->str, '=');
+ if (!p) {
+ flb_plg_error(ctx->ins, "invalid key value pair on '%s'",
+ entry->str);
+ return -1;
+ }
+
+ key = flb_sds_create_size((p - entry->str) + 1);
+ flb_sds_cat(key, entry->str, p - entry->str);
+ val = flb_sds_create(p + 1);
+ if (shouldTrim) {
+ flb_sds_trim(key);
+ flb_sds_trim(val);
+ }
+ if (!key || flb_sds_len(key) == 0) {
+ flb_plg_error(ctx->ins,
+ "invalid key value pair on '%s'",
+ entry->str);
+ return -1;
+ }
+ if (!val || flb_sds_len(val) == 0) {
+ flb_plg_error(ctx->ins,
+ "invalid key value pair on '%s'",
+ entry->str);
+ flb_sds_destroy(key);
+ return -1;
+ }
+
+ kv = flb_kv_item_create(dest, key, val);
+ flb_sds_destroy(key);
+ flb_sds_destroy(val);
+
+ if (!kv) {
+ return -1;
+ }
+ }
+ }
+
+ return mk_list_size(dest);
+}
+
+/*
+ * parse_configuration_labels
+ * - Parse labels set in configuration
+ * - Returns the number of configuration labels
+ */
+static int parse_configuration_labels(struct flb_stackdriver *ctx)
+{
+ return parse_key_value_list(ctx, ctx->labels,
+ &ctx->config_labels, FLB_FALSE);
+}
+
+/*
+ * parse_resource_labels():
+ * - Parses resource labels set in configuration.
+ * - Returns the number of resource label mappings.
+ */
+static int parse_resource_labels(struct flb_stackdriver *ctx)
+{
+ return parse_key_value_list(ctx, ctx->resource_labels,
+ &ctx->resource_labels_kvs, FLB_TRUE);
+}
+
+struct flb_stackdriver *flb_stackdriver_conf_create(struct flb_output_instance *ins,
+ struct flb_config *config)
+{
+ int ret;
+ const char *tmp;
+ const char *backwards_compatible_env_var;
+ struct flb_stackdriver *ctx;
+ size_t http_request_key_size;
+
+ /* Allocate config context */
+ ctx = flb_calloc(1, sizeof(struct flb_stackdriver));
+ if (!ctx) {
+ flb_errno();
+ return NULL;
+ }
+ ctx->ins = ins;
+ ctx->config = config;
+
+ ret = flb_output_config_map_set(ins, (void *)ctx);
+ if (ret == -1) {
+ flb_plg_error(ins, "unable to load configuration");
+ flb_free(ctx);
+ return NULL;
+ }
+
+ /* Compress (gzip) */
+ tmp = flb_output_get_property("compress", ins);
+ ctx->compress_gzip = FLB_FALSE;
+ if (tmp && strcasecmp(tmp, "gzip") == 0) {
+ ctx->compress_gzip = FLB_TRUE;
+ }
+
+ /* labels */
+ flb_kv_init(&ctx->config_labels);
+ ret = parse_configuration_labels((void *)ctx);
+ if (ret == -1) {
+ flb_plg_error(ins, "unable to parse configuration labels");
+ flb_kv_release(&ctx->config_labels);
+ flb_free(ctx);
+ return NULL;
+ }
+
+ /* resource labels */
+ flb_kv_init(&ctx->resource_labels_kvs);
+ ret = parse_resource_labels((void *)ctx);
+ if (ret == -1) {
+ flb_plg_error(ins, "unable to parse resource label list");
+ flb_kv_release(&ctx->resource_labels_kvs);
+ flb_free(ctx);
+ return NULL;
+ }
+
+ /* Lookup metadata server URL */
+ ctx->metadata_server = NULL;
+ tmp = flb_output_get_property("metadata_server", ins);
+ if (tmp == NULL) {
+ tmp = getenv("METADATA_SERVER");
+ if(tmp) {
+ if (ctx->env == NULL) {
+ ctx->env = flb_calloc(1, sizeof(struct flb_stackdriver_env));
+ if (ctx->env == NULL) {
+ flb_plg_error(ins, "unable to allocate env variables");
+ flb_free(ctx);
+ return NULL;
+ }
+ }
+ ctx->env->metadata_server = flb_sds_create(tmp);
+ ctx->metadata_server = ctx->env->metadata_server;
+ }
+ else {
+ ctx->metadata_server = flb_sds_create(FLB_STD_METADATA_SERVER);
+ }
+ }
+ else {
+ ctx->metadata_server = flb_sds_create(tmp);
+ }
+ flb_plg_info(ctx->ins, "metadata_server set to %s", ctx->metadata_server);
+
+ /* Lookup credentials file */
+ if (ctx->credentials_file == NULL) {
+ /*
+ * Use GOOGLE_APPLICATION_CREDENTIALS to fetch the credentials.
+ * GOOGLE_SERVICE_CREDENTIALS is checked for backwards compatibility.
+ */
+ tmp = getenv("GOOGLE_APPLICATION_CREDENTIALS");
+ backwards_compatible_env_var = getenv("GOOGLE_SERVICE_CREDENTIALS");
+ if (tmp && backwards_compatible_env_var) {
+ flb_plg_warn(ctx->ins, "GOOGLE_APPLICATION_CREDENTIALS and "
+ "GOOGLE_SERVICE_CREDENTIALS are both defined. "
+ "Defaulting to GOOGLE_APPLICATION_CREDENTIALS");
+ }
+ if ((tmp || backwards_compatible_env_var) && (ctx->env == NULL)) {
+ ctx->env = flb_calloc(1, sizeof(struct flb_stackdriver_env));
+ if (ctx->env == NULL) {
+ flb_plg_error(ins, "unable to allocate env variables");
+ flb_free(ctx);
+ return NULL;
+ }
+ }
+ if (tmp) {
+ ctx->env->creds_file = flb_sds_create(tmp);
+ ctx->credentials_file = ctx->env->creds_file;
+ }
+ else if (backwards_compatible_env_var) {
+ ctx->env->creds_file = flb_sds_create(backwards_compatible_env_var);
+ ctx->credentials_file = ctx->env->creds_file;
+ }
+ }
+
+ if (ctx->credentials_file) {
+ ctx->creds = flb_calloc(1, sizeof(struct flb_stackdriver_oauth_credentials));
+ if (ctx->creds == NULL) {
+ flb_plg_error(ctx->ins, "unable to allocate credentials");
+ flb_stackdriver_conf_destroy(ctx);
+ return NULL;
+ }
+ ret = read_credentials_file(ctx->credentials_file, ctx);
+ if (ret != 0) {
+ flb_stackdriver_conf_destroy(ctx);
+ return NULL;
+ }
+ ctx->type = ctx->creds->type;
+ ctx->private_key_id = ctx->creds->private_key_id;
+ ctx->private_key = ctx->creds->private_key;
+ ctx->client_email = ctx->creds->client_email;
+ ctx->client_id = ctx->creds->client_id;
+ ctx->auth_uri = ctx->creds->auth_uri;
+ ctx->token_uri = ctx->creds->token_uri;
+ }
+ else {
+ /*
+ * If no credentials file has been defined, do manual lookup of the
+ * client email and the private key
+ */
+ ctx->creds = flb_calloc(1, sizeof(struct flb_stackdriver_oauth_credentials));
+ if (ctx->creds == NULL) {
+ flb_plg_error(ctx->ins, "unable to allocate credentials");
+ flb_stackdriver_conf_destroy(ctx);
+ return NULL;
+ }
+
+ /* Service Account Email */
+ if (ctx->client_email == NULL) {
+ tmp = getenv("SERVICE_ACCOUNT_EMAIL");
+ if (tmp) {
+ ctx->creds->client_email = flb_sds_create(tmp);
+ }
+ }
+
+ /* Service Account Secret */
+ if (ctx->private_key == NULL) {
+ tmp = getenv("SERVICE_ACCOUNT_SECRET");
+ if (tmp) {
+ ctx->creds->private_key = flb_sds_create(tmp);
+ }
+ }
+
+ ctx->private_key = ctx->creds->private_key;
+ ctx->client_email = ctx->creds->client_email;
+ }
+
+ /*
+ * If only client email has been provided, fetch token from
+ * the GCE metadata server.
+ *
+ * If no credentials have been provided, fetch token from the GCE
+ * metadata server for default account.
+ */
+ if (!ctx->client_email && ctx->private_key) {
+ flb_plg_error(ctx->ins, "client_email is not defined");
+ flb_stackdriver_conf_destroy(ctx);
+ return NULL;
+ }
+
+ if (!ctx->client_email) {
+ flb_plg_warn(ctx->ins, "client_email is not defined, using "
+ "a default one");
+ if (ctx->creds == NULL) {
+ ctx->creds = flb_calloc(1, sizeof(struct flb_stackdriver_oauth_credentials));
+ if (ctx->creds == NULL) {
+ flb_plg_error(ctx->ins, "unable to allocate credentials");
+ flb_stackdriver_conf_destroy(ctx);
+ return NULL;
+ }
+ }
+ ctx->creds->client_email = flb_sds_create("default");
+ ctx->client_email = ctx->creds->client_email;
+ }
+ if (!ctx->private_key) {
+ flb_plg_warn(ctx->ins, "private_key is not defined, fetching "
+ "it from metadata server");
+ ctx->metadata_server_auth = true;
+ }
+
+ if (ctx->http_request_key) {
+ http_request_key_size = flb_sds_len(ctx->http_request_key);
+ if (http_request_key_size >= INT_MAX) {
+ flb_plg_error(ctx->ins, "http_request_key is too long");
+ flb_sds_destroy(ctx->http_request_key);
+ ctx->http_request_key = NULL;
+ ctx->http_request_key_size = 0;
+ } else {
+ ctx->http_request_key_size = http_request_key_size;
+ }
+ }
+
+ set_resource_type(ctx);
+
+ if (resource_api_has_required_labels(ctx) == FLB_FALSE) {
+
+ if (ctx->resource_type == RESOURCE_TYPE_K8S) {
+ if (!ctx->cluster_name || !ctx->cluster_location) {
+ flb_plg_error(ctx->ins, "missing k8s_cluster_name "
+ "or k8s_cluster_location in configuration");
+ flb_stackdriver_conf_destroy(ctx);
+ return NULL;
+ }
+ }
+
+ else if (ctx->resource_type == RESOURCE_TYPE_GENERIC_NODE
+ || ctx->resource_type == RESOURCE_TYPE_GENERIC_TASK) {
+
+ if (ctx->location == NULL) {
+ flb_plg_error(ctx->ins, "missing generic resource's location");
+ }
+
+ if (ctx->namespace_id == NULL) {
+ flb_plg_error(ctx->ins, "missing generic resource's namespace");
+ }
+
+ if (ctx->resource_type == RESOURCE_TYPE_GENERIC_NODE) {
+ if (ctx->node_id == NULL) {
+ flb_plg_error(ctx->ins, "missing generic_node's node_id");
+ flb_stackdriver_conf_destroy(ctx);
+ return NULL;
+ }
+ }
+ else {
+ if (ctx->job == NULL) {
+ flb_plg_error(ctx->ins, "missing generic_task's job");
+ }
+
+ if (ctx->task_id == NULL) {
+ flb_plg_error(ctx->ins, "missing generic_task's task_id");
+ }
+
+ if (!ctx->job || !ctx->task_id) {
+ flb_stackdriver_conf_destroy(ctx);
+ return NULL;
+ }
+ }
+
+ if (!ctx->location || !ctx->namespace_id) {
+ flb_stackdriver_conf_destroy(ctx);
+ return NULL;
+ }
+ }
+ }
+
+
+ if (ctx->tag_prefix == NULL && ctx->resource_type == RESOURCE_TYPE_K8S) {
+ /* allocate the flb_sds_t to tag_prefix_k8s so we can safely deallocate it */
+ ctx->tag_prefix_k8s = flb_sds_create(ctx->resource);
+ ctx->tag_prefix_k8s = flb_sds_cat(ctx->tag_prefix_k8s, ".", 1);
+ ctx->tag_prefix = ctx->tag_prefix_k8s;
+ }
+
+ /* Register metrics */
+#ifdef FLB_HAVE_METRICS
+ ctx->cmt_successful_requests = cmt_counter_create(ins->cmt,
+ "fluentbit",
+ "stackdriver",
+ "successful_requests",
+ "Total number of successful "
+ "requests.",
+ 1, (char *[]) {"name"});
+
+ ctx->cmt_failed_requests = cmt_counter_create(ins->cmt,
+ "fluentbit",
+ "stackdriver",
+ "failed_requests",
+ "Total number of failed "
+ "requests.",
+ 1, (char *[]) {"name"});
+
+ ctx->cmt_requests_total = cmt_counter_create(ins->cmt,
+ "fluentbit",
+ "stackdriver",
+ "requests_total",
+ "Total number of requests.",
+ 2, (char *[]) {"status", "name"});
+
+ ctx->cmt_proc_records_total = cmt_counter_create(ins->cmt,
+ "fluentbit",
+ "stackdriver",
+ "proc_records_total",
+ "Total number of processed records.",
+ 2, (char *[]) {"status", "name"});
+
+ ctx->cmt_retried_records_total = cmt_counter_create(ins->cmt,
+ "fluentbit",
+ "stackdriver",
+ "retried_records_total",
+ "Total number of retried records.",
+ 2, (char *[]) {"status", "name"});
+
+ /* OLD api */
+ flb_metrics_add(FLB_STACKDRIVER_SUCCESSFUL_REQUESTS,
+ "stackdriver_successful_requests", ctx->ins->metrics);
+ flb_metrics_add(FLB_STACKDRIVER_FAILED_REQUESTS,
+ "stackdriver_failed_requests", ctx->ins->metrics);
+#endif
+
+ return ctx;
+}
+
+int flb_stackdriver_conf_destroy(struct flb_stackdriver *ctx)
+{
+ if (!ctx) {
+ return -1;
+ }
+
+ if (ctx->creds) {
+ if (ctx->creds->type) {
+ flb_sds_destroy(ctx->creds->type);
+ }
+ if (ctx->creds->private_key_id) {
+ flb_sds_destroy(ctx->creds->private_key_id);
+ }
+ if (ctx->creds->private_key) {
+ flb_sds_destroy(ctx->creds->private_key);
+ }
+ if (ctx->creds->client_email) {
+ flb_sds_destroy(ctx->creds->client_email);
+ }
+ if (ctx->creds->client_id) {
+ flb_sds_destroy(ctx->creds->client_id);
+ }
+ if (ctx->creds->auth_uri) {
+ flb_sds_destroy(ctx->creds->auth_uri);
+ }
+ if (ctx->creds->token_uri) {
+ flb_sds_destroy(ctx->creds->token_uri);
+ }
+ flb_free(ctx->creds);
+ }
+
+ if (ctx->env) {
+ if (ctx->env->creds_file) {
+ flb_sds_destroy(ctx->env->creds_file);
+ }
+ if (ctx->env->metadata_server) {
+ flb_sds_destroy(ctx->env->metadata_server);
+ /*
+ * If ctx->env is not NULL,
+ * ctx->metadata_server points ctx->env->metadata_server.
+ *
+ * We set ctx->metadata_server to NULL to prevent double free.
+ */
+ ctx->metadata_server = NULL;
+ }
+ flb_free(ctx->env);
+ }
+
+ if (ctx->metadata_server) {
+ flb_sds_destroy(ctx->metadata_server);
+ }
+
+ if (ctx->resource_type == RESOURCE_TYPE_K8S){
+ flb_sds_destroy(ctx->namespace_name);
+ flb_sds_destroy(ctx->pod_name);
+ flb_sds_destroy(ctx->container_name);
+ flb_sds_destroy(ctx->node_name);
+ flb_sds_destroy(ctx->local_resource_id);
+ }
+
+ if (ctx->metadata_server_auth) {
+ flb_sds_destroy(ctx->zone);
+ flb_sds_destroy(ctx->instance_id);
+ }
+
+ if (ctx->metadata_u) {
+ flb_upstream_destroy(ctx->metadata_u);
+ }
+
+ if (ctx->u) {
+ flb_upstream_destroy(ctx->u);
+ }
+
+ if (ctx->o) {
+ flb_oauth2_destroy(ctx->o);
+ }
+
+ if (ctx->regex) {
+ flb_regex_destroy(ctx->regex);
+ }
+
+ if (ctx->project_id) {
+ flb_sds_destroy(ctx->project_id);
+ }
+
+ if (ctx->tag_prefix_k8s) {
+ flb_sds_destroy(ctx->tag_prefix_k8s);
+ }
+
+ flb_kv_release(&ctx->config_labels);
+ flb_kv_release(&ctx->resource_labels_kvs);
+ flb_free(ctx);
+
+ return 0;
+}
diff --git a/src/fluent-bit/plugins/out_stackdriver/stackdriver_conf.h b/src/fluent-bit/plugins/out_stackdriver/stackdriver_conf.h
new file mode 100644
index 000000000..6244e6851
--- /dev/null
+++ b/src/fluent-bit/plugins/out_stackdriver/stackdriver_conf.h
@@ -0,0 +1,29 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_OUT_STACKDRIVER_CONF_H
+#define FLB_OUT_STACKDRIVER_CONF_H
+
+#include "stackdriver.h"
+
+struct flb_stackdriver *flb_stackdriver_conf_create(struct flb_output_instance *ins,
+ struct flb_config *config);
+int flb_stackdriver_conf_destroy(struct flb_stackdriver *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/out_stackdriver/stackdriver_helper.c b/src/fluent-bit/plugins/out_stackdriver/stackdriver_helper.c
new file mode 100644
index 000000000..e5b94c481
--- /dev/null
+++ b/src/fluent-bit/plugins/out_stackdriver/stackdriver_helper.c
@@ -0,0 +1,63 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include "stackdriver.h"
+
+int equal_obj_str(msgpack_object obj, const char *str, const int size) {
+ if (obj.type != MSGPACK_OBJECT_STR) {
+ return FLB_FALSE;
+ }
+ if (size != obj.via.str.size
+ || strncmp(str, obj.via.str.ptr, obj.via.str.size) != 0) {
+ return FLB_FALSE;
+ }
+ return FLB_TRUE;
+}
+
+int validate_key(msgpack_object obj, const char *str, const int size) {
+ return equal_obj_str(obj, str, size);
+}
+
+void try_assign_subfield_str(msgpack_object obj, flb_sds_t *subfield) {
+ if (obj.type == MSGPACK_OBJECT_STR) {
+ *subfield = flb_sds_copy(*subfield, obj.via.str.ptr,
+ obj.via.str.size);
+ }
+}
+
+void try_assign_subfield_bool(msgpack_object obj, int *subfield) {
+ if (obj.type == MSGPACK_OBJECT_BOOLEAN) {
+ if (obj.via.boolean) {
+ *subfield = FLB_TRUE;
+ }
+ else {
+ *subfield = FLB_FALSE;
+ }
+ }
+}
+
+void try_assign_subfield_int(msgpack_object obj, int64_t *subfield) {
+ if (obj.type == MSGPACK_OBJECT_STR) {
+ *subfield = atoll(obj.via.str.ptr);
+ }
+ else if (obj.type == MSGPACK_OBJECT_POSITIVE_INTEGER) {
+ *subfield = obj.via.i64;
+ }
+}
diff --git a/src/fluent-bit/plugins/out_stackdriver/stackdriver_helper.h b/src/fluent-bit/plugins/out_stackdriver/stackdriver_helper.h
new file mode 100644
index 000000000..ab8ac30a8
--- /dev/null
+++ b/src/fluent-bit/plugins/out_stackdriver/stackdriver_helper.h
@@ -0,0 +1,51 @@
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#ifndef FLB_STD_HELPER_H
+#define FLB_STD_HELPER_H
+
+#include "stackdriver.h"
+
+/*
+ * Compare obj->via.str and str.
+ * Return FLB_TRUE if they are equal.
+ * Return FLB_FALSE if obj->type is not string or they are not equal
+ */
+int equal_obj_str(msgpack_object obj, const char *str, const int size);
+
+int validate_key(msgpack_object obj, const char *str, const int size);
+
+/*
+ * if obj->type is string, assign obj->val to subfield
+ * Otherwise leave the subfield untouched
+ */
+void try_assign_subfield_str(msgpack_object obj, flb_sds_t *subfield);
+
+/*
+ * if obj->type is boolean, assign obj->val to subfield
+ * Otherwise leave the subfield untouched
+ */
+void try_assign_subfield_bool(msgpack_object obj, int *subfield);
+
+/*
+ * if obj->type is valid, assign obj->val to subfield
+ * Otherwise leave the subfield untouched
+ */
+void try_assign_subfield_int(msgpack_object obj, int64_t *subfield);
+
+#endif
diff --git a/src/fluent-bit/plugins/out_stackdriver/stackdriver_http_request.c b/src/fluent-bit/plugins/out_stackdriver/stackdriver_http_request.c
new file mode 100644
index 000000000..9a1c814c0
--- /dev/null
+++ b/src/fluent-bit/plugins/out_stackdriver/stackdriver_http_request.c
@@ -0,0 +1,393 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <fluent-bit/flb_regex.h>
+#include "stackdriver.h"
+#include "stackdriver_helper.h"
+#include "stackdriver_http_request.h"
+
+#include <ctype.h>
+
+typedef enum {
+ NO_HTTPREQUEST = 1,
+ HTTPREQUEST_EXISTS = 2
+} http_request_status;
+
+void init_http_request(struct http_request_field *http_request)
+{
+ http_request->latency = flb_sds_create("");
+ http_request->protocol = flb_sds_create("");
+ http_request->referer = flb_sds_create("");
+ http_request->remoteIp = flb_sds_create("");
+ http_request->requestMethod = flb_sds_create("");
+ http_request->requestUrl = flb_sds_create("");
+ http_request->serverIp = flb_sds_create("");
+ http_request->userAgent = flb_sds_create("");
+
+ http_request->cacheFillBytes = 0;
+ http_request->requestSize = 0;
+ http_request->responseSize = 0;
+ http_request->status = 0;
+
+ http_request->cacheHit = FLB_FALSE;
+ http_request->cacheLookup = FLB_FALSE;
+ http_request->cacheValidatedWithOriginServer = FLB_FALSE;
+}
+
+void destroy_http_request(struct http_request_field *http_request)
+{
+ flb_sds_destroy(http_request->latency);
+ flb_sds_destroy(http_request->protocol);
+ flb_sds_destroy(http_request->referer);
+ flb_sds_destroy(http_request->remoteIp);
+ flb_sds_destroy(http_request->requestMethod);
+ flb_sds_destroy(http_request->requestUrl);
+ flb_sds_destroy(http_request->serverIp);
+ flb_sds_destroy(http_request->userAgent);
+}
+
+void add_http_request_field(struct http_request_field *http_request,
+ msgpack_packer *mp_pck)
+{
+ msgpack_pack_str(mp_pck, 11);
+ msgpack_pack_str_body(mp_pck, "httpRequest", 11);
+
+ if (flb_sds_is_empty(http_request->latency) == FLB_TRUE) {
+ msgpack_pack_map(mp_pck, 14);
+ }
+ else {
+ msgpack_pack_map(mp_pck, 15);
+
+ msgpack_pack_str(mp_pck, HTTP_REQUEST_LATENCY_SIZE);
+ msgpack_pack_str_body(mp_pck, HTTP_REQUEST_LATENCY,
+ HTTP_REQUEST_LATENCY_SIZE);
+ msgpack_pack_str(mp_pck, flb_sds_len(http_request->latency));
+ msgpack_pack_str_body(mp_pck, http_request->latency,
+ flb_sds_len(http_request->latency));
+ }
+
+ /* String sub-fields */
+ msgpack_pack_str(mp_pck, HTTP_REQUEST_REQUEST_METHOD_SIZE);
+ msgpack_pack_str_body(mp_pck, HTTP_REQUEST_REQUEST_METHOD,
+ HTTP_REQUEST_REQUEST_METHOD_SIZE);
+ msgpack_pack_str(mp_pck, flb_sds_len(http_request->requestMethod));
+ msgpack_pack_str_body(mp_pck, http_request->requestMethod,
+ flb_sds_len(http_request->requestMethod));
+
+ msgpack_pack_str(mp_pck, HTTP_REQUEST_REQUEST_URL_SIZE);
+ msgpack_pack_str_body(mp_pck, HTTP_REQUEST_REQUEST_URL,
+ HTTP_REQUEST_REQUEST_URL_SIZE);
+ msgpack_pack_str(mp_pck, flb_sds_len(http_request->requestUrl));
+ msgpack_pack_str_body(mp_pck, http_request->requestUrl,
+ flb_sds_len(http_request->requestUrl));
+
+ msgpack_pack_str(mp_pck, HTTP_REQUEST_USER_AGENT_SIZE);
+ msgpack_pack_str_body(mp_pck, HTTP_REQUEST_USER_AGENT,
+ HTTP_REQUEST_USER_AGENT_SIZE);
+ msgpack_pack_str(mp_pck, flb_sds_len(http_request->userAgent));
+ msgpack_pack_str_body(mp_pck, http_request->userAgent,
+ flb_sds_len(http_request->userAgent));
+
+ msgpack_pack_str(mp_pck, HTTP_REQUEST_REMOTE_IP_SIZE);
+ msgpack_pack_str_body(mp_pck, HTTP_REQUEST_REMOTE_IP,
+ HTTP_REQUEST_REMOTE_IP_SIZE);
+ msgpack_pack_str(mp_pck, flb_sds_len(http_request->remoteIp));
+ msgpack_pack_str_body(mp_pck, http_request->remoteIp,
+ flb_sds_len(http_request->remoteIp));
+
+ msgpack_pack_str(mp_pck, HTTP_REQUEST_SERVER_IP_SIZE);
+ msgpack_pack_str_body(mp_pck, HTTP_REQUEST_SERVER_IP,
+ HTTP_REQUEST_SERVER_IP_SIZE);
+ msgpack_pack_str(mp_pck, flb_sds_len(http_request->serverIp));
+ msgpack_pack_str_body(mp_pck, http_request->serverIp,
+ flb_sds_len(http_request->serverIp));
+
+ msgpack_pack_str(mp_pck, HTTP_REQUEST_REFERER_SIZE);
+ msgpack_pack_str_body(mp_pck, HTTP_REQUEST_REFERER,
+ HTTP_REQUEST_REFERER_SIZE);
+ msgpack_pack_str(mp_pck, flb_sds_len(http_request->referer));
+ msgpack_pack_str_body(mp_pck, http_request->referer,
+ flb_sds_len(http_request->referer));
+
+ msgpack_pack_str(mp_pck, HTTP_REQUEST_PROTOCOL_SIZE);
+ msgpack_pack_str_body(mp_pck, HTTP_REQUEST_PROTOCOL,
+ HTTP_REQUEST_PROTOCOL_SIZE);
+ msgpack_pack_str(mp_pck, flb_sds_len(http_request->protocol));
+ msgpack_pack_str_body(mp_pck, http_request->protocol,
+ flb_sds_len(http_request->protocol));
+
+ /* Integer sub-fields */
+ msgpack_pack_str(mp_pck, HTTP_REQUEST_REQUESTSIZE_SIZE);
+ msgpack_pack_str_body(mp_pck, HTTP_REQUEST_REQUESTSIZE,
+ HTTP_REQUEST_REQUESTSIZE_SIZE);
+ msgpack_pack_int64(mp_pck, http_request->requestSize);
+
+ msgpack_pack_str(mp_pck, HTTP_REQUEST_RESPONSESIZE_SIZE);
+ msgpack_pack_str_body(mp_pck, HTTP_REQUEST_RESPONSESIZE,
+ HTTP_REQUEST_RESPONSESIZE_SIZE);
+ msgpack_pack_int64(mp_pck, http_request->responseSize);
+
+ msgpack_pack_str(mp_pck, HTTP_REQUEST_STATUS_SIZE);
+ msgpack_pack_str_body(mp_pck, HTTP_REQUEST_STATUS, HTTP_REQUEST_STATUS_SIZE);
+ msgpack_pack_int64(mp_pck, http_request->status);
+
+ msgpack_pack_str(mp_pck, HTTP_REQUEST_CACHE_FILL_BYTES_SIZE);
+ msgpack_pack_str_body(mp_pck, HTTP_REQUEST_CACHE_FILL_BYTES,
+ HTTP_REQUEST_CACHE_FILL_BYTES_SIZE);
+ msgpack_pack_int64(mp_pck, http_request->cacheFillBytes);
+
+ /* Boolean sub-fields */
+ msgpack_pack_str(mp_pck, HTTP_REQUEST_CACHE_LOOKUP_SIZE);
+ msgpack_pack_str_body(mp_pck, HTTP_REQUEST_CACHE_LOOKUP,
+ HTTP_REQUEST_CACHE_LOOKUP_SIZE);
+ if (http_request->cacheLookup == FLB_TRUE) {
+ msgpack_pack_true(mp_pck);
+ }
+ else {
+ msgpack_pack_false(mp_pck);
+ }
+
+ msgpack_pack_str(mp_pck, HTTP_REQUEST_CACHE_HIT_SIZE);
+ msgpack_pack_str_body(mp_pck, HTTP_REQUEST_CACHE_HIT,
+ HTTP_REQUEST_CACHE_HIT_SIZE);
+ if (http_request->cacheLookup == FLB_TRUE) {
+ msgpack_pack_true(mp_pck);
+ }
+ else {
+ msgpack_pack_false(mp_pck);
+ }
+
+ msgpack_pack_str(mp_pck, HTTP_REQUEST_CACHE_VALIDATE_WITH_ORIGIN_SERVER_SIZE);
+ msgpack_pack_str_body(mp_pck, HTTP_REQUEST_CACHE_VALIDATE_WITH_ORIGIN_SERVER,
+ HTTP_REQUEST_CACHE_VALIDATE_WITH_ORIGIN_SERVER_SIZE);
+ if (http_request->cacheValidatedWithOriginServer == FLB_TRUE) {
+ msgpack_pack_true(mp_pck);
+ }
+ else {
+ msgpack_pack_false(mp_pck);
+ }
+}
+
+/* latency should be in the format:
+ * whitespace (opt.) + integer + point & decimal (opt.)
+ * + whitespace (opt.) + "s" + whitespace (opt.)
+ *
+ * latency is Duration, so the maximum value is "315576000000.999999999s".
+ * (23 characters in length)
+ */
+static void validate_latency(msgpack_object_str latency_in_payload,
+ struct http_request_field *http_request) {
+ int i = 0;
+ int j = 0;
+ int status = 0;
+ char extract_latency[32];
+ flb_sds_t pattern;
+ struct flb_regex *regex;
+
+ pattern = flb_sds_create("^\\s*\\d+(.\\d+)?\\s*s\\s*$");
+ if (!pattern) {
+ return;
+ }
+
+ if (latency_in_payload.size > sizeof(extract_latency)) {
+ flb_sds_destroy(pattern);
+ return;
+ }
+
+ regex = flb_regex_create(pattern);
+ status = flb_regex_match(regex,
+ (unsigned char *) latency_in_payload.ptr,
+ latency_in_payload.size);
+ flb_regex_destroy(regex);
+ flb_sds_destroy(pattern);
+
+ if (status == 1) {
+ for (; i < latency_in_payload.size; ++ i) {
+ if (latency_in_payload.ptr[i] == '.' || latency_in_payload.ptr[i] == 's'
+ || isdigit(latency_in_payload.ptr[i])) {
+ extract_latency[j] = latency_in_payload.ptr[i];
+ ++ j;
+ }
+ }
+ http_request->latency = flb_sds_copy(http_request->latency, extract_latency, j);
+ }
+}
+
+/* Return true if httpRequest extracted */
+int extract_http_request(struct http_request_field *http_request,
+ flb_sds_t http_request_key,
+ int http_request_key_size,
+ msgpack_object *obj, int *extra_subfields)
+{
+ http_request_status op_status = NO_HTTPREQUEST;
+ msgpack_object_kv *p;
+ msgpack_object_kv *pend;
+ msgpack_object_kv *tmp_p;
+ msgpack_object_kv *tmp_pend;
+
+ if (obj->via.map.size == 0) {
+ return FLB_FALSE;
+ }
+
+ p = obj->via.map.ptr;
+ pend = obj->via.map.ptr + obj->via.map.size;
+
+ for (; p < pend && op_status == NO_HTTPREQUEST; ++p) {
+
+ if (p->val.type != MSGPACK_OBJECT_MAP
+ || !validate_key(p->key, http_request_key,
+ http_request_key_size)) {
+
+ continue;
+ }
+
+ op_status = HTTPREQUEST_EXISTS;
+ msgpack_object sub_field = p->val;
+
+ tmp_p = sub_field.via.map.ptr;
+ tmp_pend = sub_field.via.map.ptr + sub_field.via.map.size;
+
+ /* Validate the subfields of httpRequest */
+ for (; tmp_p < tmp_pend; ++tmp_p) {
+ if (tmp_p->key.type != MSGPACK_OBJECT_STR) {
+ continue;
+ }
+
+ if (validate_key(tmp_p->key, HTTP_REQUEST_LATENCY,
+ HTTP_REQUEST_LATENCY_SIZE)) {
+ if (tmp_p->val.type != MSGPACK_OBJECT_STR) {
+ continue;
+ }
+ validate_latency(tmp_p->val.via.str, http_request);
+ }
+ else if (validate_key(tmp_p->key, HTTP_REQUEST_PROTOCOL,
+ HTTP_REQUEST_PROTOCOL_SIZE)) {
+ try_assign_subfield_str(tmp_p->val, &http_request->protocol);
+ }
+ else if (validate_key(tmp_p->key, HTTP_REQUEST_REFERER,
+ HTTP_REQUEST_REFERER_SIZE)) {
+ try_assign_subfield_str(tmp_p->val, &http_request->referer);
+ }
+ else if (validate_key(tmp_p->key, HTTP_REQUEST_REMOTE_IP,
+ HTTP_REQUEST_REMOTE_IP_SIZE)) {
+ try_assign_subfield_str(tmp_p->val, &http_request->remoteIp);
+ }
+ else if (validate_key(tmp_p->key, HTTP_REQUEST_REQUEST_METHOD,
+ HTTP_REQUEST_REQUEST_METHOD_SIZE)) {
+ try_assign_subfield_str(tmp_p->val, &http_request->requestMethod);
+ }
+ else if (validate_key(tmp_p->key, HTTP_REQUEST_REQUEST_URL,
+ HTTP_REQUEST_REQUEST_URL_SIZE)) {
+ try_assign_subfield_str(tmp_p->val, &http_request->requestUrl);
+ }
+ else if (validate_key(tmp_p->key, HTTP_REQUEST_SERVER_IP,
+ HTTP_REQUEST_SERVER_IP_SIZE)) {
+ try_assign_subfield_str(tmp_p->val, &http_request->serverIp);
+ }
+ else if (validate_key(tmp_p->key, HTTP_REQUEST_USER_AGENT,
+ HTTP_REQUEST_USER_AGENT_SIZE)) {
+ try_assign_subfield_str(tmp_p->val, &http_request->userAgent);
+ }
+
+ else if (validate_key(tmp_p->key, HTTP_REQUEST_CACHE_FILL_BYTES,
+ HTTP_REQUEST_CACHE_FILL_BYTES_SIZE)) {
+ try_assign_subfield_int(tmp_p->val, &http_request->cacheFillBytes);
+ }
+ else if (validate_key(tmp_p->key, HTTP_REQUEST_REQUESTSIZE,
+ HTTP_REQUEST_REQUESTSIZE_SIZE)) {
+ try_assign_subfield_int(tmp_p->val, &http_request->requestSize);
+ }
+ else if (validate_key(tmp_p->key, HTTP_REQUEST_RESPONSESIZE,
+ HTTP_REQUEST_RESPONSESIZE_SIZE)) {
+ try_assign_subfield_int(tmp_p->val, &http_request->responseSize);
+ }
+ else if (validate_key(tmp_p->key, HTTP_REQUEST_STATUS,
+ HTTP_REQUEST_STATUS_SIZE)) {
+ try_assign_subfield_int(tmp_p->val, &http_request->status);
+ }
+
+ else if (validate_key(tmp_p->key, HTTP_REQUEST_CACHE_HIT,
+ HTTP_REQUEST_CACHE_HIT_SIZE)) {
+ try_assign_subfield_bool(tmp_p->val, &http_request->cacheHit);
+ }
+ else if (validate_key(tmp_p->key, HTTP_REQUEST_CACHE_LOOKUP,
+ HTTP_REQUEST_CACHE_LOOKUP_SIZE)) {
+ try_assign_subfield_bool(tmp_p->val, &http_request->cacheLookup);
+ }
+ else if (validate_key(tmp_p->key, HTTP_REQUEST_CACHE_VALIDATE_WITH_ORIGIN_SERVER,
+ HTTP_REQUEST_CACHE_VALIDATE_WITH_ORIGIN_SERVER_SIZE)) {
+ try_assign_subfield_bool(tmp_p->val,
+ &http_request->cacheValidatedWithOriginServer);
+ }
+
+ else {
+ *extra_subfields += 1;
+ }
+ }
+ }
+
+ return op_status == HTTPREQUEST_EXISTS;
+}
+
+void pack_extra_http_request_subfields(msgpack_packer *mp_pck,
+ msgpack_object *http_request,
+ int extra_subfields) {
+ msgpack_object_kv *p = http_request->via.map.ptr;
+ msgpack_object_kv *const pend = http_request->via.map.ptr + http_request->via.map.size;
+
+ msgpack_pack_map(mp_pck, extra_subfields);
+
+ for (; p < pend; ++p) {
+ if (validate_key(p->key, HTTP_REQUEST_LATENCY,
+ HTTP_REQUEST_LATENCY_SIZE)
+ || validate_key(p->key, HTTP_REQUEST_PROTOCOL,
+ HTTP_REQUEST_PROTOCOL_SIZE)
+ || validate_key(p->key, HTTP_REQUEST_REFERER,
+ HTTP_REQUEST_REFERER_SIZE)
+ || validate_key(p->key, HTTP_REQUEST_REMOTE_IP,
+ HTTP_REQUEST_REMOTE_IP_SIZE)
+ || validate_key(p->key, HTTP_REQUEST_REQUEST_METHOD,
+ HTTP_REQUEST_REQUEST_METHOD_SIZE)
+ || validate_key(p->key, HTTP_REQUEST_REQUEST_URL,
+ HTTP_REQUEST_REQUEST_URL_SIZE)
+ || validate_key(p->key, HTTP_REQUEST_SERVER_IP,
+ HTTP_REQUEST_SERVER_IP_SIZE)
+ || validate_key(p->key, HTTP_REQUEST_USER_AGENT,
+ HTTP_REQUEST_USER_AGENT_SIZE)
+ || validate_key(p->key, HTTP_REQUEST_CACHE_FILL_BYTES,
+ HTTP_REQUEST_CACHE_FILL_BYTES_SIZE)
+ || validate_key(p->key, HTTP_REQUEST_REQUESTSIZE,
+ HTTP_REQUEST_REQUESTSIZE_SIZE)
+ || validate_key(p->key, HTTP_REQUEST_RESPONSESIZE,
+ HTTP_REQUEST_RESPONSESIZE_SIZE)
+ || validate_key(p->key, HTTP_REQUEST_STATUS,
+ HTTP_REQUEST_STATUS_SIZE)
+ || validate_key(p->key, HTTP_REQUEST_CACHE_HIT,
+ HTTP_REQUEST_CACHE_HIT_SIZE)
+ || validate_key(p->key, HTTP_REQUEST_CACHE_LOOKUP,
+ HTTP_REQUEST_CACHE_LOOKUP_SIZE)
+ || validate_key(p->key, HTTP_REQUEST_CACHE_VALIDATE_WITH_ORIGIN_SERVER,
+ HTTP_REQUEST_CACHE_VALIDATE_WITH_ORIGIN_SERVER_SIZE)) {
+
+ continue;
+ }
+
+ msgpack_pack_object(mp_pck, p->key);
+ msgpack_pack_object(mp_pck, p->val);
+ }
+}
diff --git a/src/fluent-bit/plugins/out_stackdriver/stackdriver_http_request.h b/src/fluent-bit/plugins/out_stackdriver/stackdriver_http_request.h
new file mode 100644
index 000000000..8b935c3f7
--- /dev/null
+++ b/src/fluent-bit/plugins/out_stackdriver/stackdriver_http_request.h
@@ -0,0 +1,120 @@
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#ifndef FLB_STD_HTTPREQUEST_H
+#define FLB_STD_HTTPREQUEST_H
+
+#include "stackdriver.h"
+
+/* subfield name and size */
+#define HTTP_REQUEST_LATENCY "latency"
+#define HTTP_REQUEST_PROTOCOL "protocol"
+#define HTTP_REQUEST_REFERER "referer"
+#define HTTP_REQUEST_REMOTE_IP "remoteIp"
+#define HTTP_REQUEST_REQUEST_METHOD "requestMethod"
+#define HTTP_REQUEST_REQUEST_URL "requestUrl"
+#define HTTP_REQUEST_SERVER_IP "serverIp"
+#define HTTP_REQUEST_USER_AGENT "userAgent"
+#define HTTP_REQUEST_CACHE_FILL_BYTES "cacheFillBytes"
+#define HTTP_REQUEST_REQUESTSIZE "requestSize"
+#define HTTP_REQUEST_RESPONSESIZE "responseSize"
+#define HTTP_REQUEST_STATUS "status"
+#define HTTP_REQUEST_CACHE_HIT "cacheHit"
+#define HTTP_REQUEST_CACHE_LOOKUP "cacheLookup"
+#define HTTP_REQUEST_CACHE_VALIDATE_WITH_ORIGIN_SERVER "cacheValidatedWithOriginServer"
+
+#define HTTP_REQUEST_LATENCY_SIZE 7
+#define HTTP_REQUEST_PROTOCOL_SIZE 8
+#define HTTP_REQUEST_REFERER_SIZE 7
+#define HTTP_REQUEST_REMOTE_IP_SIZE 8
+#define HTTP_REQUEST_REQUEST_METHOD_SIZE 13
+#define HTTP_REQUEST_REQUEST_URL_SIZE 10
+#define HTTP_REQUEST_SERVER_IP_SIZE 8
+#define HTTP_REQUEST_USER_AGENT_SIZE 9
+#define HTTP_REQUEST_CACHE_FILL_BYTES_SIZE 14
+#define HTTP_REQUEST_REQUESTSIZE_SIZE 11
+#define HTTP_REQUEST_RESPONSESIZE_SIZE 12
+#define HTTP_REQUEST_STATUS_SIZE 6
+#define HTTP_REQUEST_CACHE_HIT_SIZE 8
+#define HTTP_REQUEST_CACHE_LOOKUP_SIZE 11
+#define HTTP_REQUEST_CACHE_VALIDATE_WITH_ORIGIN_SERVER_SIZE 30
+
+
+struct http_request_field {
+ flb_sds_t latency;
+ flb_sds_t protocol;
+ flb_sds_t referer;
+ flb_sds_t remoteIp;
+ flb_sds_t requestMethod;
+ flb_sds_t requestUrl;
+ flb_sds_t serverIp;
+ flb_sds_t userAgent;
+
+ int64_t cacheFillBytes;
+ int64_t requestSize;
+ int64_t responseSize;
+ int64_t status;
+
+ int cacheHit;
+ int cacheLookup;
+ int cacheValidatedWithOriginServer;
+};
+
+void init_http_request(struct http_request_field *http_request);
+void destroy_http_request(struct http_request_field *http_request);
+
+/*
+ * Add httpRequest field to the entries.
+ * The structure of httpRequest is as shown in struct http_request_field
+ */
+void add_http_request_field(struct http_request_field *http_request,
+ msgpack_packer *mp_pck);
+
+/*
+ * Extract the httpRequest field from the jsonPayload.
+ * If the httpRequest field exists, return TRUE and store the subfields.
+ * If there are extra subfields, count the number.
+ */
+int extract_http_request(struct http_request_field *http_request,
+ flb_sds_t http_request_key,
+ int http_request_key_size,
+ msgpack_object *obj, int *extra_subfields);
+
+/*
+ * When there are extra subfields, we will preserve the extra subfields inside jsonPayload
+ * For example, if the jsonPayload is as followed:
+ * jsonPayload {
+ * "logging.googleapis.com/http_request": {
+ * "requestMethod": "GET",
+ * "latency": "1s",
+ * "cacheLookup": true,
+ * "extra": "some string" #extra subfield
+ * }
+ * }
+ * We will preserve the extra subfields. The jsonPayload after extracting is:
+ * jsonPayload {
+ * "logging.googleapis.com/http_request": {
+ * "extra": "some string"
+ * }
+ * }
+ */
+void pack_extra_http_request_subfields(msgpack_packer *mp_pck,
+ msgpack_object *http_request,
+ int extra_subfields);
+
+#endif
diff --git a/src/fluent-bit/plugins/out_stackdriver/stackdriver_operation.c b/src/fluent-bit/plugins/out_stackdriver/stackdriver_operation.c
new file mode 100644
index 000000000..548e8b473
--- /dev/null
+++ b/src/fluent-bit/plugins/out_stackdriver/stackdriver_operation.c
@@ -0,0 +1,147 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+
+#include "stackdriver.h"
+#include "stackdriver_helper.h"
+#include "stackdriver_operation.h"
+
+typedef enum {
+ NO_OPERATION = 1,
+ OPERATION_EXISTED = 2
+} operation_status;
+
+void add_operation_field(flb_sds_t *operation_id, flb_sds_t *operation_producer,
+ int *operation_first, int *operation_last,
+ msgpack_packer *mp_pck)
+{
+ msgpack_pack_str(mp_pck, 9);
+ msgpack_pack_str_body(mp_pck, "operation", 9);
+
+ msgpack_pack_map(mp_pck, 4);
+
+ msgpack_pack_str(mp_pck, OPERATION_ID_SIZE);
+ msgpack_pack_str_body(mp_pck, OPERATION_ID, OPERATION_ID_SIZE);
+ msgpack_pack_str(mp_pck, flb_sds_len(*operation_id));
+ msgpack_pack_str_body(mp_pck, *operation_id, flb_sds_len(*operation_id));
+
+ msgpack_pack_str(mp_pck, OPERATION_PRODUCER_SIZE);
+ msgpack_pack_str_body(mp_pck, OPERATION_PRODUCER, OPERATION_PRODUCER_SIZE);
+ msgpack_pack_str(mp_pck, flb_sds_len(*operation_producer));
+ msgpack_pack_str_body(mp_pck, *operation_producer,
+ flb_sds_len(*operation_producer));
+
+ msgpack_pack_str(mp_pck, OPERATION_FIRST_SIZE);
+ msgpack_pack_str_body(mp_pck, OPERATION_FIRST, OPERATION_FIRST_SIZE);
+ if (*operation_first == FLB_TRUE) {
+ msgpack_pack_true(mp_pck);
+ }
+ else {
+ msgpack_pack_false(mp_pck);
+ }
+
+ msgpack_pack_str(mp_pck, OPERATION_LAST_SIZE);
+ msgpack_pack_str_body(mp_pck, OPERATION_LAST, OPERATION_LAST_SIZE);
+ if (*operation_last == FLB_TRUE) {
+ msgpack_pack_true(mp_pck);
+ }
+ else {
+ msgpack_pack_false(mp_pck);
+ }
+}
+
+/* Return true if operation extracted */
+int extract_operation(flb_sds_t *operation_id, flb_sds_t *operation_producer,
+ int *operation_first, int *operation_last,
+ msgpack_object *obj, int *extra_subfields)
+{
+ operation_status op_status = NO_OPERATION;
+ msgpack_object_kv *p;
+ msgpack_object_kv *pend;
+ msgpack_object_kv *tmp_p;
+ msgpack_object_kv *tmp_pend;
+
+ if (obj->via.map.size == 0) {
+ return FLB_FALSE;
+ }
+ p = obj->via.map.ptr;
+ pend = obj->via.map.ptr + obj->via.map.size;
+
+ for (; p < pend && op_status == NO_OPERATION; ++p) {
+
+ if (p->val.type != MSGPACK_OBJECT_MAP
+ || !validate_key(p->key, OPERATION_FIELD_IN_JSON,
+ OPERATION_KEY_SIZE)) {
+ continue;
+ }
+
+ op_status = OPERATION_EXISTED;
+ msgpack_object sub_field = p->val;
+
+ tmp_p = sub_field.via.map.ptr;
+ tmp_pend = sub_field.via.map.ptr + sub_field.via.map.size;
+
+ /* Validate the subfields of operation */
+ for (; tmp_p < tmp_pend; ++tmp_p) {
+ if (tmp_p->key.type != MSGPACK_OBJECT_STR) {
+ continue;
+ }
+
+ if (validate_key(tmp_p->key, OPERATION_ID, OPERATION_ID_SIZE)) {
+ try_assign_subfield_str(tmp_p->val, operation_id);
+ }
+ else if (validate_key(tmp_p->key, OPERATION_PRODUCER,
+ OPERATION_PRODUCER_SIZE)) {
+ try_assign_subfield_str(tmp_p->val, operation_producer);
+ }
+ else if (validate_key(tmp_p->key, OPERATION_FIRST, OPERATION_FIRST_SIZE)) {
+ try_assign_subfield_bool(tmp_p->val, operation_first);
+ }
+ else if (validate_key(tmp_p->key, OPERATION_LAST, OPERATION_LAST_SIZE)) {
+ try_assign_subfield_bool(tmp_p->val, operation_last);
+ }
+ else {
+ *extra_subfields += 1;
+ }
+ }
+ }
+
+ return op_status == OPERATION_EXISTED;
+}
+
+void pack_extra_operation_subfields(msgpack_packer *mp_pck,
+ msgpack_object *operation, int extra_subfields) {
+ msgpack_object_kv *p = operation->via.map.ptr;
+ msgpack_object_kv *const pend = operation->via.map.ptr + operation->via.map.size;
+
+ msgpack_pack_map(mp_pck, extra_subfields);
+
+ for (; p < pend; ++p) {
+ if (validate_key(p->key, OPERATION_ID, OPERATION_ID_SIZE)
+ || validate_key(p->key, OPERATION_PRODUCER, OPERATION_PRODUCER_SIZE)
+ || validate_key(p->key, OPERATION_FIRST, OPERATION_FIRST_SIZE)
+ || validate_key(p->key, OPERATION_LAST, OPERATION_LAST_SIZE)) {
+ continue;
+ }
+
+ msgpack_pack_object(mp_pck, p->key);
+ msgpack_pack_object(mp_pck, p->val);
+ }
+}
diff --git a/src/fluent-bit/plugins/out_stackdriver/stackdriver_operation.h b/src/fluent-bit/plugins/out_stackdriver/stackdriver_operation.h
new file mode 100644
index 000000000..ded886c3b
--- /dev/null
+++ b/src/fluent-bit/plugins/out_stackdriver/stackdriver_operation.h
@@ -0,0 +1,82 @@
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#ifndef FLB_STD_OPERATION_H
+#define FLB_STD_OPERATION_H
+
+#include "stackdriver.h"
+
+/* subfield name and size */
+#define OPERATION_ID "id"
+#define OPERATION_PRODUCER "producer"
+#define OPERATION_FIRST "first"
+#define OPERATION_LAST "last"
+
+#define OPERATION_ID_SIZE 2
+#define OPERATION_PRODUCER_SIZE 8
+#define OPERATION_FIRST_SIZE 5
+#define OPERATION_LAST_SIZE 4
+
+/*
+ * Add operation field to the entries.
+ * The structure of operation is:
+ * {
+ * "id": string,
+ * "producer": string,
+ * "first": boolean,
+ * "last": boolean
+ * }
+ *
+ */
+void add_operation_field(flb_sds_t *operation_id, flb_sds_t *operation_producer,
+ int *operation_first, int *operation_last,
+ msgpack_packer *mp_pck);
+
+/*
+ * Extract the operation field from the jsonPayload.
+ * If the operation field exists, return TRUE and store the subfields.
+ * If there are extra subfields, count the number.
+ */
+int extract_operation(flb_sds_t *operation_id, flb_sds_t *operation_producer,
+ int *operation_first, int *operation_last,
+ msgpack_object *obj, int *extra_subfields);
+
+/*
+ * When there are extra subfields, we will preserve the extra subfields inside jsonPayload
+ * For example, if the jsonPayload is as followed:
+ * jsonPayload {
+ * "logging.googleapis.com/operation": {
+ * "id": "id1",
+ * "producer": "id2",
+ * "first": true,
+ * "last": true,
+ * "extra": "some string" #extra subfield
+ * }
+ * }
+ * We will preserve the extra subfields. The jsonPayload after extracting is:
+ * jsonPayload {
+ * "logging.googleapis.com/operation": {
+ * "extra": "some string"
+ * }
+ * }
+ */
+void pack_extra_operation_subfields(msgpack_packer *mp_pck, msgpack_object *operation,
+ int extra_subfields);
+
+
+#endif
diff --git a/src/fluent-bit/plugins/out_stackdriver/stackdriver_resource_types.c b/src/fluent-bit/plugins/out_stackdriver/stackdriver_resource_types.c
new file mode 100644
index 000000000..b114e4922
--- /dev/null
+++ b/src/fluent-bit/plugins/out_stackdriver/stackdriver_resource_types.c
@@ -0,0 +1,143 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_sds.h>
+#include <fluent-bit/flb_kv.h>
+
+#include "stackdriver.h"
+#include "stackdriver_resource_types.h"
+
+static const struct resource_type resource_types[] = {
+ {
+ .id = RESOURCE_TYPE_K8S,
+ .resources = {"k8s_container", "k8s_node", "k8s_pod"},
+ .required_labels = {"cluster_name", "location"}
+ },
+ {
+ .id = RESOURCE_TYPE_GENERIC_NODE,
+ .resources = {"generic_node"},
+ .required_labels = {"location", "namespace", "node_id"}
+ },
+ {
+ .id = RESOURCE_TYPE_GENERIC_TASK,
+ .resources = {"generic_task"},
+ .required_labels = {"location", "namespace", "job", "task_id"}
+ }
+};
+
+static char **get_required_labels(int resource_type)
+{
+ int i;
+ int len;
+
+ len = sizeof(resource_types) / sizeof(resource_types[0]);
+ for(i = 0; i < len; i++) {
+ if (resource_types[i].id == resource_type) {
+ return (char **) resource_types[i].required_labels;
+ }
+ }
+ return NULL;
+}
+
+/*
+ * set_resource_type():
+ * - Iterates through resource_types that are set up for validation and sets the
+ * resource_type if it matches one of them.
+ * - A resource may not be in the resource types list but still be accepted
+ * and processed (e.g. global) if it does not require / is not set up for validation.
+ */
+void set_resource_type(struct flb_stackdriver *ctx)
+{
+ int i;
+ int j;
+ int len;
+ char *resource;
+ struct resource_type resource_type;
+
+ len = sizeof(resource_types) / sizeof(resource_types[0]);
+ for(i = 0; i < len; i++) {
+ resource_type = resource_types[i];
+ for(j = 0; j < MAX_RESOURCE_ENTRIES; j++) {
+ if (resource_type.resources[j] != NULL) {
+ resource = resource_type.resources[j];
+ if (flb_sds_cmp(ctx->resource, resource, strlen(resource)) == 0) {
+ ctx->resource_type = resource_type.id;
+ return;
+ }
+ }
+ }
+ }
+}
+
+/*
+ * resource_api_has_required_labels():
+ * - Determines if all required labels for the set resource type are present as
+ * keys on the resource labels key-value pairs.
+ */
+int resource_api_has_required_labels(struct flb_stackdriver *ctx)
+{
+ struct mk_list *head;
+ struct flb_hash_table *ht;
+ struct flb_kv *label_kv;
+ char** required_labels;
+ int i;
+ int found;
+ void *tmp_buf;
+ size_t tmp_size;
+
+ if (mk_list_size(&ctx->resource_labels_kvs) == 0) {
+ return FLB_FALSE;
+ }
+
+ required_labels = get_required_labels(ctx->resource_type);
+ if (required_labels == NULL) {
+ flb_plg_warn(ctx->ins, "no validation applied to resource_labels "
+ "for set resource type");
+ return FLB_FALSE;
+ }
+
+ ht = flb_hash_table_create(FLB_HASH_TABLE_EVICT_NONE, MAX_REQUIRED_LABEL_ENTRIES, 0);
+ mk_list_foreach(head, &ctx->resource_labels_kvs) {
+ label_kv = mk_list_entry(head, struct flb_kv, _head);
+ for (i = 0; i < MAX_REQUIRED_LABEL_ENTRIES; i++) {
+ if (required_labels[i] != NULL && flb_sds_cmp(label_kv->key,
+ required_labels[i], strlen(required_labels[i])) == 0) {
+ flb_hash_table_add(ht, required_labels[i], strlen(required_labels[i]),
+ NULL, 0);
+ }
+ }
+ }
+
+ for (i = 0; i < MAX_REQUIRED_LABEL_ENTRIES; i++) {
+ if (required_labels[i] != NULL) {
+ found = flb_hash_table_get(ht, required_labels[i], strlen(required_labels[i]),
+ &tmp_buf, &tmp_size);
+ if (found == -1) {
+ flb_plg_warn(ctx->ins, "labels set in resource_labels will not be applied"
+ ", as the required resource label [%s] is missing", required_labels[i]);
+ ctx->should_skip_resource_labels_api = FLB_TRUE;
+ flb_hash_table_destroy(ht);
+ return FLB_FALSE;
+ }
+ }
+ }
+ flb_hash_table_destroy(ht);
+ return FLB_TRUE;
+}
diff --git a/src/fluent-bit/plugins/out_stackdriver/stackdriver_resource_types.h b/src/fluent-bit/plugins/out_stackdriver/stackdriver_resource_types.h
new file mode 100644
index 000000000..5e45c8745
--- /dev/null
+++ b/src/fluent-bit/plugins/out_stackdriver/stackdriver_resource_types.h
@@ -0,0 +1,41 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_OUT_STACKDRIVER_RESOURCE_TYPES_H
+#define FLB_OUT_STACKDRIVER_RESOURCE_TYPES_H
+
+#include "stackdriver.h"
+
+#define MAX_RESOURCE_ENTRIES 10
+#define MAX_REQUIRED_LABEL_ENTRIES 10
+
+#define RESOURCE_TYPE_K8S 1
+#define RESOURCE_TYPE_GENERIC_NODE 2
+#define RESOURCE_TYPE_GENERIC_TASK 3
+
+struct resource_type {
+ int id;
+ char* resources[MAX_RESOURCE_ENTRIES];
+ char* required_labels[MAX_REQUIRED_LABEL_ENTRIES];
+};
+
+void set_resource_type(struct flb_stackdriver *ctx);
+int resource_api_has_required_labels(struct flb_stackdriver *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/out_stackdriver/stackdriver_source_location.c b/src/fluent-bit/plugins/out_stackdriver/stackdriver_source_location.c
new file mode 100644
index 000000000..58102c91e
--- /dev/null
+++ b/src/fluent-bit/plugins/out_stackdriver/stackdriver_source_location.c
@@ -0,0 +1,139 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "stackdriver.h"
+#include "stackdriver_helper.h"
+#include "stackdriver_source_location.h"
+
+typedef enum {
+ NO_SOURCELOCATION = 1,
+ SOURCELOCATION_EXISTED = 2
+} source_location_status;
+
+
+void add_source_location_field(flb_sds_t *source_location_file,
+ int64_t source_location_line,
+ flb_sds_t *source_location_function,
+ msgpack_packer *mp_pck)
+{
+ msgpack_pack_str(mp_pck, 14);
+ msgpack_pack_str_body(mp_pck, "sourceLocation", 14);
+ msgpack_pack_map(mp_pck, 3);
+
+ msgpack_pack_str(mp_pck, SOURCE_LOCATION_FILE_SIZE);
+ msgpack_pack_str_body(mp_pck, SOURCE_LOCATION_FILE, SOURCE_LOCATION_FILE_SIZE);
+ msgpack_pack_str(mp_pck, flb_sds_len(*source_location_file));
+ msgpack_pack_str_body(mp_pck, *source_location_file,
+ flb_sds_len(*source_location_file));
+
+ msgpack_pack_str(mp_pck, SOURCE_LOCATION_LINE_SIZE);
+ msgpack_pack_str_body(mp_pck, SOURCE_LOCATION_LINE, SOURCE_LOCATION_LINE_SIZE);
+ msgpack_pack_int64(mp_pck, source_location_line);
+
+ msgpack_pack_str(mp_pck, SOURCE_LOCATION_FUNCTION_SIZE);
+ msgpack_pack_str_body(mp_pck, SOURCE_LOCATION_FUNCTION,
+ SOURCE_LOCATION_FUNCTION_SIZE);
+ msgpack_pack_str(mp_pck, flb_sds_len(*source_location_function));
+ msgpack_pack_str_body(mp_pck, *source_location_function,
+ flb_sds_len(*source_location_function));
+}
+
+/* Return FLB_TRUE if sourceLocation extracted */
+int extract_source_location(flb_sds_t *source_location_file,
+ int64_t *source_location_line,
+ flb_sds_t *source_location_function,
+ msgpack_object *obj, int *extra_subfields)
+{
+ source_location_status op_status = NO_SOURCELOCATION;
+ msgpack_object_kv *p;
+ msgpack_object_kv *pend;
+ msgpack_object_kv *tmp_p;
+ msgpack_object_kv *tmp_pend;
+
+ if (obj->via.map.size == 0) {
+ return FLB_FALSE;
+ }
+ p = obj->via.map.ptr;
+ pend = obj->via.map.ptr + obj->via.map.size;
+
+ for (; p < pend && op_status == NO_SOURCELOCATION; ++p) {
+
+ if (p->val.type != MSGPACK_OBJECT_MAP
+ || p->key.type != MSGPACK_OBJECT_STR
+ || !validate_key(p->key, SOURCELOCATION_FIELD_IN_JSON,
+ SOURCE_LOCATION_SIZE)) {
+
+ continue;
+ }
+
+ op_status = SOURCELOCATION_EXISTED;
+ msgpack_object sub_field = p->val;
+
+ tmp_p = sub_field.via.map.ptr;
+ tmp_pend = sub_field.via.map.ptr + sub_field.via.map.size;
+
+ /* Validate the subfields of sourceLocation */
+ for (; tmp_p < tmp_pend; ++tmp_p) {
+ if (tmp_p->key.type != MSGPACK_OBJECT_STR) {
+ continue;
+ }
+
+ if (validate_key(tmp_p->key,
+ SOURCE_LOCATION_FILE,
+ SOURCE_LOCATION_FILE_SIZE)) {
+ try_assign_subfield_str(tmp_p->val, source_location_file);
+ }
+ else if (validate_key(tmp_p->key,
+ SOURCE_LOCATION_FUNCTION,
+ SOURCE_LOCATION_FUNCTION_SIZE)) {
+ try_assign_subfield_str(tmp_p->val, source_location_function);
+ }
+ else if (validate_key(tmp_p->key,
+ SOURCE_LOCATION_LINE,
+ SOURCE_LOCATION_LINE_SIZE)) {
+ try_assign_subfield_int(tmp_p->val, source_location_line);
+ }
+ else {
+ *extra_subfields += 1;
+ }
+ }
+ }
+
+ return op_status == SOURCELOCATION_EXISTED;
+}
+
+void pack_extra_source_location_subfields(msgpack_packer *mp_pck,
+ msgpack_object *source_location,
+ int extra_subfields) {
+ msgpack_object_kv *p = source_location->via.map.ptr;
+ msgpack_object_kv *const pend = source_location->via.map.ptr + source_location->via.map.size;
+
+ msgpack_pack_map(mp_pck, extra_subfields);
+
+ for (; p < pend; ++p) {
+ if (validate_key(p->key, SOURCE_LOCATION_FILE, SOURCE_LOCATION_FILE_SIZE)
+ || validate_key(p->key, SOURCE_LOCATION_LINE, SOURCE_LOCATION_LINE_SIZE)
+ || validate_key(p->key, SOURCE_LOCATION_FUNCTION,
+ SOURCE_LOCATION_FUNCTION_SIZE)) {
+ continue;
+ }
+
+ msgpack_pack_object(mp_pck, p->key);
+ msgpack_pack_object(mp_pck, p->val);
+ }
+}
diff --git a/src/fluent-bit/plugins/out_stackdriver/stackdriver_source_location.h b/src/fluent-bit/plugins/out_stackdriver/stackdriver_source_location.h
new file mode 100644
index 000000000..4f703d330
--- /dev/null
+++ b/src/fluent-bit/plugins/out_stackdriver/stackdriver_source_location.h
@@ -0,0 +1,80 @@
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#ifndef FLB_STD_SOURCELOCATION_H
+#define FLB_STD_SOURCELOCATION_H
+
+#include "stackdriver.h"
+
+/* subfield name and size */
+#define SOURCE_LOCATION_FILE "file"
+#define SOURCE_LOCATION_LINE "line"
+#define SOURCE_LOCATION_FUNCTION "function"
+
+#define SOURCE_LOCATION_FILE_SIZE 4
+#define SOURCE_LOCATION_LINE_SIZE 4
+#define SOURCE_LOCATION_FUNCTION_SIZE 8
+
+/*
+ * Add sourceLocation field to the entries.
+ * The structure of sourceLocation is:
+ * {
+ * "file": string,
+ * "line": int,
+ * "function": string
+ * }
+ */
+void add_source_location_field(flb_sds_t *source_location_file,
+ int64_t source_location_line,
+ flb_sds_t *source_location_function,
+ msgpack_packer *mp_pck);
+
+/*
+ * Extract the sourceLocation field from the jsonPayload.
+ * If the sourceLocation field exists, return TRUE and store the subfields.
+ * If there are extra subfields, count the number.
+ */
+int extract_source_location(flb_sds_t *source_location_file,
+ int64_t *source_location_line,
+ flb_sds_t *source_location_function,
+ msgpack_object *obj, int *extra_subfields);
+
+/*
+ * When there are extra subfields, we will preserve the extra subfields inside jsonPayload
+ * For example, if the jsonPayload is as followed:
+ * jsonPayload {
+ * "logging.googleapis.com/sourceLocation": {
+ * "file": "file1",
+ * "line": 1,
+ * "function": "func1",
+ * "extra": "some string" #extra subfield
+ * }
+ * }
+ * We will preserve the extra subfields. The jsonPayload after extracting is:
+ * jsonPayload {
+ * "logging.googleapis.com/sourceLocation": {
+ * "extra": "some string"
+ * }
+ * }
+ */
+void pack_extra_source_location_subfields(msgpack_packer *mp_pck,
+ msgpack_object *source_location,
+ int extra_subfields);
+
+
+#endif
diff --git a/src/fluent-bit/plugins/out_stackdriver/stackdriver_timestamp.c b/src/fluent-bit/plugins/out_stackdriver/stackdriver_timestamp.c
new file mode 100644
index 000000000..a9b350d22
--- /dev/null
+++ b/src/fluent-bit/plugins/out_stackdriver/stackdriver_timestamp.c
@@ -0,0 +1,180 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+
+#include "stackdriver.h"
+#include "stackdriver_helper.h"
+#include "stackdriver_timestamp.h"
+#include <fluent-bit/flb_regex.h>
+
+#include <ctype.h>
+
+static int is_integer(char *str, int size) {
+ int i;
+ for (i = 0; i < size; ++ i) {
+ if (!isdigit(str[i])) {
+ return FLB_FALSE;
+ }
+ }
+ return FLB_TRUE;
+}
+
+static void try_assign_time(long long seconds, long long nanos,
+ struct flb_time *tms)
+{
+ if (seconds != 0) {
+ tms->tm.tv_sec = seconds;
+ tms->tm.tv_nsec = nanos;
+ }
+}
+
+static long long get_integer(msgpack_object obj)
+{
+ char tmp[32];
+
+ if (obj.type == MSGPACK_OBJECT_POSITIVE_INTEGER) {
+ return obj.via.i64;
+ }
+ else if (obj.type == MSGPACK_OBJECT_STR
+ && is_integer((char *) obj.via.str.ptr,
+ obj.via.str.size)) {
+
+ /*
+ * use an intermediary buffer to perform the conversion to avoid any
+ * overflow by atoll. LLONG_MAX value is +9,223,372,036,854,775,807,
+ * so using a 32 bytes buffer is enough.
+ */
+ if (obj.via.str.size > sizeof(tmp) - 1) {
+ return 0;
+ }
+
+ memcpy(tmp, obj.via.str.ptr, obj.via.str.size);
+ tmp[obj.via.str.size] = '\0';
+
+ return atoll(tmp);
+ }
+
+ return 0;
+}
+
+static int extract_format_timestamp_object(msgpack_object *obj,
+ struct flb_time *tms)
+{
+ int seconds_found = FLB_FALSE;
+ int nanos_found = FLB_FALSE;
+ long long seconds = 0;
+ long long nanos = 0;
+
+ msgpack_object_kv *p;
+ msgpack_object_kv *pend;
+ msgpack_object_kv *tmp_p;
+ msgpack_object_kv *tmp_pend;
+
+ if (obj->via.map.size == 0) {
+ return FLB_FALSE;
+ }
+ p = obj->via.map.ptr;
+ pend = obj->via.map.ptr + obj->via.map.size;
+
+ for (; p < pend; ++p) {
+ if (!validate_key(p->key, "timestamp", 9)
+ || p->val.type != MSGPACK_OBJECT_MAP) {
+ continue;
+ }
+
+ tmp_p = p->val.via.map.ptr;
+ tmp_pend = p->val.via.map.ptr + p->val.via.map.size;
+
+ for (; tmp_p < tmp_pend; ++tmp_p) {
+ if (validate_key(tmp_p->key, "seconds", 7)) {
+ seconds_found = FLB_TRUE;
+ seconds = get_integer(tmp_p->val);
+
+ if (nanos_found == FLB_TRUE) {
+ try_assign_time(seconds, nanos, tms);
+ return FLB_TRUE;
+ }
+ }
+ else if (validate_key(tmp_p->key, "nanos", 5)) {
+ nanos_found = FLB_TRUE;
+ nanos = get_integer(tmp_p->val);
+
+ if (seconds_found == FLB_TRUE) {
+ try_assign_time(seconds, nanos, tms);
+ return FLB_TRUE;
+ }
+ }
+ }
+ }
+ return FLB_FALSE;
+}
+
+static int extract_format_timestamp_duo_fields(msgpack_object *obj,
+ struct flb_time *tms)
+{
+ int seconds_found = FLB_FALSE;
+ int nanos_found = FLB_FALSE;
+ long long seconds = 0;
+ long long nanos = 0;
+
+ msgpack_object_kv *p;
+ msgpack_object_kv *pend;
+
+ if (obj->via.map.size == 0) {
+ return FLB_FALSE;
+ }
+ p = obj->via.map.ptr;
+ pend = obj->via.map.ptr + obj->via.map.size;
+
+ for (; p < pend; ++p) {
+ if (validate_key(p->key, "timestampSeconds", 16)) {
+ seconds_found = FLB_TRUE;
+ seconds = get_integer(p->val);
+
+ if (nanos_found == FLB_TRUE) {
+ try_assign_time(seconds, nanos, tms);
+ return FLB_TRUE;
+ }
+ }
+ else if (validate_key(p->key, "timestampNanos", 14)) {
+ nanos_found = FLB_TRUE;
+ nanos = get_integer(p->val);
+
+ if (seconds_found == FLB_TRUE) {
+ try_assign_time(seconds, nanos, tms);
+ return FLB_TRUE;
+ }
+ }
+ }
+
+ return FLB_FALSE;
+}
+
+timestamp_status extract_timestamp(msgpack_object *obj,
+ struct flb_time *tms)
+{
+ if (extract_format_timestamp_object(obj, tms)) {
+ return FORMAT_TIMESTAMP_OBJECT;
+ }
+ if (extract_format_timestamp_duo_fields(obj, tms)) {
+ return FORMAT_TIMESTAMP_DUO_FIELDS;
+ }
+ return TIMESTAMP_NOT_PRESENT;
+}
diff --git a/src/fluent-bit/plugins/out_stackdriver/stackdriver_timestamp.h b/src/fluent-bit/plugins/out_stackdriver/stackdriver_timestamp.h
new file mode 100644
index 000000000..f3c025864
--- /dev/null
+++ b/src/fluent-bit/plugins/out_stackdriver/stackdriver_timestamp.h
@@ -0,0 +1,47 @@
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#ifndef FLB_STD_TIMESTAMP_H
+#define FLB_STD_TIMESTAMP_H
+
+#include "stackdriver.h"
+#include <fluent-bit/flb_time.h>
+
+typedef enum {
+ TIMESTAMP_NOT_PRESENT = 0,
+ FORMAT_TIMESTAMP_OBJECT = 1,
+ FORMAT_TIMESTAMP_DUO_FIELDS = 2
+} timestamp_status;
+
+/*
+ * Currently support two formats of time-related fields
+ * - "timestamp":{"seconds", "nanos"}
+ * - "timestampSeconds"/"timestampNanos"
+ *
+ * If timestamp field is not existed, return TIMESTAMP_NOT_PRESENT
+ * If timestamp format is "timestamp":{"seconds", "nanos"},
+ * set the time and return FORMAT_TIMESTAMP
+ *
+ * If timestamp format is "timestampSeconds"/"timestampNanos",
+ * set the time and return FORMAT_TIMESTAMPSECONDS
+ */
+timestamp_status extract_timestamp(msgpack_object *obj,
+ struct flb_time *tms);
+
+
+#endif
diff --git a/src/fluent-bit/plugins/out_stdout/CMakeLists.txt b/src/fluent-bit/plugins/out_stdout/CMakeLists.txt
new file mode 100644
index 000000000..2331680c1
--- /dev/null
+++ b/src/fluent-bit/plugins/out_stdout/CMakeLists.txt
@@ -0,0 +1,4 @@
+set(src
+ stdout.c)
+
+FLB_PLUGIN(out_stdout "${src}" "")
diff --git a/src/fluent-bit/plugins/out_stdout/stdout.c b/src/fluent-bit/plugins/out_stdout/stdout.c
new file mode 100644
index 000000000..2d6bff598
--- /dev/null
+++ b/src/fluent-bit/plugins/out_stdout/stdout.c
@@ -0,0 +1,301 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_slist.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_config_map.h>
+#include <fluent-bit/flb_metrics.h>
+#include <fluent-bit/flb_log_event_decoder.h>
+
+#include <ctraces/ctraces.h>
+#include <ctraces/ctr_decode_msgpack.h>
+
+#include <msgpack.h>
+#include "stdout.h"
+
+
+static int cb_stdout_init(struct flb_output_instance *ins,
+ struct flb_config *config, void *data)
+{
+ int ret;
+ const char *tmp;
+ struct flb_stdout *ctx = NULL;
+ (void) ins;
+ (void) config;
+ (void) data;
+
+ ctx = flb_calloc(1, sizeof(struct flb_stdout));
+ if (!ctx) {
+ flb_errno();
+ return -1;
+ }
+ ctx->ins = ins;
+
+ ret = flb_output_config_map_set(ins, (void *) ctx);
+ if (ret == -1) {
+ flb_free(ctx);
+ return -1;
+ }
+
+ ctx->out_format = FLB_PACK_JSON_FORMAT_NONE;
+ tmp = flb_output_get_property("format", ins);
+ if (tmp) {
+ ret = flb_pack_to_json_format_type(tmp);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "unrecognized 'format' option. "
+ "Using 'msgpack'");
+ }
+ else {
+ ctx->out_format = ret;
+ }
+ }
+
+ /* Date key */
+ ctx->date_key = ctx->json_date_key;
+ tmp = flb_output_get_property("json_date_key", ins);
+ if (tmp) {
+ /* Just check if we have to disable it */
+ if (flb_utils_bool(tmp) == FLB_FALSE) {
+ ctx->date_key = NULL;
+ }
+ }
+
+ /* Date format for JSON output */
+ ctx->json_date_format = FLB_PACK_JSON_DATE_DOUBLE;
+ tmp = flb_output_get_property("json_date_format", ins);
+ if (tmp) {
+ ret = flb_pack_to_json_date_type(tmp);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "invalid json_date_format '%s'. "
+ "Using 'double' type", tmp);
+ }
+ else {
+ ctx->json_date_format = ret;
+ }
+ }
+
+ /* Export context */
+ flb_output_set_context(ins, ctx);
+
+ return 0;
+}
+
+#ifdef FLB_HAVE_METRICS
+static void print_metrics_text(struct flb_output_instance *ins,
+ const void *data, size_t bytes)
+{
+ int ret;
+ size_t off = 0;
+ cfl_sds_t text;
+ struct cmt *cmt = NULL;
+
+ /* get cmetrics context */
+ ret = cmt_decode_msgpack_create(&cmt, (char *) data, bytes, &off);
+ if (ret != 0) {
+ flb_plg_error(ins, "could not process metrics payload");
+ return;
+ }
+
+ /* convert to text representation */
+ text = cmt_encode_text_create(cmt);
+
+ /* destroy cmt context */
+ cmt_destroy(cmt);
+
+ printf("%s", text);
+ fflush(stdout);
+
+ cmt_encode_text_destroy(text);
+}
+#endif
+
+static void print_traces_text(struct flb_output_instance *ins,
+ const void *data, size_t bytes)
+{
+ int ret;
+ size_t off = 0;
+ cfl_sds_t text;
+ struct ctrace *ctr = NULL;
+
+ /* get cmetrics context */
+ ret = ctr_decode_msgpack_create(&ctr, (char *) data, bytes, &off);
+ if (ret != 0) {
+ flb_plg_error(ins, "could not process traces payload (ret=%i)", ret);
+ return;
+ }
+
+ /* convert to text representation */
+ text = ctr_encode_text_create(ctr);
+
+ /* destroy cmt context */
+ ctr_destroy(ctr);
+
+ printf("%s", text);
+ fflush(stdout);
+
+ ctr_encode_text_destroy(text);
+}
+
+static void cb_stdout_flush(struct flb_event_chunk *event_chunk,
+ struct flb_output_flush *out_flush,
+ struct flb_input_instance *i_ins,
+ void *out_context,
+ struct flb_config *config)
+{
+ struct flb_log_event_decoder log_decoder;
+ struct flb_log_event log_event;
+ int result;
+ flb_sds_t json;
+ struct flb_stdout *ctx;
+ size_t cnt;
+
+ (void) config;
+
+ result = FLB_EVENT_DECODER_SUCCESS;
+ ctx = (struct flb_stdout *) out_context;
+ cnt = 0;
+
+#ifdef FLB_HAVE_METRICS
+ /* Check if the event type is metrics, handle the payload differently */
+ if (event_chunk->type == FLB_EVENT_TYPE_METRICS) {
+ print_metrics_text(ctx->ins, (char *)
+ event_chunk->data,
+ event_chunk->size);
+ FLB_OUTPUT_RETURN(FLB_OK);
+ }
+#endif
+
+ if (event_chunk->type == FLB_EVENT_TYPE_TRACES) {
+ print_traces_text(ctx->ins, (char *)
+ event_chunk->data,
+ event_chunk->size);
+ FLB_OUTPUT_RETURN(FLB_OK);
+ }
+
+ /* Assuming data is a log entry...*/
+ if (ctx->out_format != FLB_PACK_JSON_FORMAT_NONE) {
+ json = flb_pack_msgpack_to_json_format(event_chunk->data,
+ event_chunk->size,
+ ctx->out_format,
+ ctx->json_date_format,
+ ctx->date_key);
+ write(STDOUT_FILENO, json, flb_sds_len(json));
+ flb_sds_destroy(json);
+
+ /*
+ * If we are 'not' in json_lines mode, we need to add an extra
+ * breakline.
+ */
+ if (ctx->out_format != FLB_PACK_JSON_FORMAT_LINES) {
+ printf("\n");
+ }
+ fflush(stdout);
+ }
+ else {
+ result = flb_log_event_decoder_init(&log_decoder,
+ (char *) event_chunk->data,
+ event_chunk->size);
+
+ if (result != FLB_EVENT_DECODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event decoder initialization error : %d", result);
+
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ while (flb_log_event_decoder_next(&log_decoder,
+ &log_event) == FLB_EVENT_DECODER_SUCCESS) {
+ printf("[%zd] %s: [[", cnt++, event_chunk->tag);
+
+ printf("%"PRIu32".%09lu, ",
+ (uint32_t)log_event.timestamp.tm.tv_sec,
+ log_event.timestamp.tm.tv_nsec);
+
+ msgpack_object_print(stdout, *log_event.metadata);
+
+ printf("], ");
+
+ msgpack_object_print(stdout, *log_event.body);
+
+ printf("]\n");
+ }
+ result = flb_log_event_decoder_get_last_result(&log_decoder);
+
+ flb_log_event_decoder_destroy(&log_decoder);
+ }
+
+ fflush(stdout);
+
+ if (result != FLB_EVENT_DECODER_SUCCESS) {
+ flb_plg_error(ctx->ins, "Log event decoder error : %d", result);
+ FLB_OUTPUT_RETURN(FLB_ERROR);
+ }
+
+ FLB_OUTPUT_RETURN(FLB_OK);
+}
+
+static int cb_stdout_exit(void *data, struct flb_config *config)
+{
+ struct flb_stdout *ctx = data;
+
+ if (!ctx) {
+ return 0;
+ }
+
+ flb_free(ctx);
+ return 0;
+}
+
+/* Configuration properties map */
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_STR, "format", NULL,
+ 0, FLB_FALSE, 0,
+ "Specifies the data format to be printed. Supported formats are msgpack json, json_lines and json_stream."
+ },
+ {
+ FLB_CONFIG_MAP_STR, "json_date_format", NULL,
+ 0, FLB_FALSE, 0,
+ FBL_PACK_JSON_DATE_FORMAT_DESCRIPTION
+ },
+ {
+ FLB_CONFIG_MAP_STR, "json_date_key", "date",
+ 0, FLB_TRUE, offsetof(struct flb_stdout, json_date_key),
+ "Specifies the name of the date field in output."
+ },
+
+ /* EOF */
+ {0}
+};
+
+/* Plugin registration */
+struct flb_output_plugin out_stdout_plugin = {
+ .name = "stdout",
+ .description = "Prints events to STDOUT",
+ .cb_init = cb_stdout_init,
+ .cb_flush = cb_stdout_flush,
+ .cb_exit = cb_stdout_exit,
+ .flags = 0,
+ .workers = 1,
+ .event_type = FLB_OUTPUT_LOGS | FLB_OUTPUT_METRICS | FLB_OUTPUT_TRACES,
+ .config_map = config_map
+};
diff --git a/src/fluent-bit/plugins/out_stdout/stdout.h b/src/fluent-bit/plugins/out_stdout/stdout.h
new file mode 100644
index 000000000..0db82f389
--- /dev/null
+++ b/src/fluent-bit/plugins/out_stdout/stdout.h
@@ -0,0 +1,34 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_OUT_STDOUT
+#define FLB_OUT_STDOUT
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_sds.h>
+
+struct flb_stdout {
+ int out_format;
+ int json_date_format;
+ flb_sds_t json_date_key;
+ flb_sds_t date_key;
+ struct flb_output_instance *ins;
+};
+
+#endif
diff --git a/src/fluent-bit/plugins/out_syslog/CMakeLists.txt b/src/fluent-bit/plugins/out_syslog/CMakeLists.txt
new file mode 100644
index 000000000..556d8e1a4
--- /dev/null
+++ b/src/fluent-bit/plugins/out_syslog/CMakeLists.txt
@@ -0,0 +1,5 @@
+set(src
+ syslog.c
+ syslog_conf.c)
+
+FLB_PLUGIN(out_syslog "${src}" "")
diff --git a/src/fluent-bit/plugins/out_syslog/syslog.c b/src/fluent-bit/plugins/out_syslog/syslog.c
new file mode 100644
index 000000000..a33351354
--- /dev/null
+++ b/src/fluent-bit/plugins/out_syslog/syslog.c
@@ -0,0 +1,1170 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_network.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_kv.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_sds.h>
+#include <fluent-bit/flb_log_event_decoder.h>
+
+#include "syslog_conf.h"
+
+#ifndef MSG_DONTWAIT
+ #define MSG_DONTWAIT 0
+#endif
+
+#ifndef MSG_NOSIGNAL
+ #define MSG_NOSIGNAL 0
+#endif
+
+#define RFC5424_MAXSIZE 2048
+#define RFC3164_MAXSIZE 1024
+
+struct syslog_msg {
+ int severity;
+ int facility;
+ flb_sds_t hostname;
+ flb_sds_t appname;
+ flb_sds_t procid;
+ flb_sds_t msgid;
+ flb_sds_t sd;
+ flb_sds_t message;
+};
+
+static const char *rfc3164_mon[] = {"Jan", "Feb", "Mar", "Apr",
+ "May", "Jun", "Jul", "Aug",
+ "Sep", "Oct", "Nov", "Dec"};
+
+static struct {
+ char *name;
+ int len;
+ int value;
+} syslog_severity[] = {
+ { "emerg", 5, 0 },
+ { "alert", 5, 1 },
+ { "crit", 4, 2 },
+ { "err", 3, 3 },
+ { "warning", 7, 4 },
+ { "notice", 6, 5 },
+ { "info", 4, 6 },
+ { "debug", 5, 7 },
+ { NULL, 0,-1 }
+};
+
+static struct {
+ char *name;
+ int len;
+ int value;
+} syslog_facility[] = {
+ { "kern", 4, 0 },
+ { "user", 4, 1 },
+ { "mail", 4, 2 },
+ { "daemon", 6, 3 },
+ { "auth", 4, 4 },
+ { "syslog", 6, 5 },
+ { "lpr", 3, 6 },
+ { "news", 4, 7 },
+ { "uucp", 4, 8 },
+ { "cron", 4, 9 },
+ { "authpriv", 8, 10 },
+ { "ftp", 3, 11 },
+ { "ntp", 3, 12 },
+ { "security", 8, 13 },
+ { "console", 7, 14 },
+ { "local0", 6, 16 },
+ { "local1", 6, 17 },
+ { "local2", 6, 18 },
+ { "local3", 6, 19 },
+ { "local4", 6, 20 },
+ { "local5", 6, 21 },
+ { "local6", 6, 22 },
+ { "local7", 6, 23 },
+ { NULL, 0,-1 },
+};
+
+/* '"', '\' ']' */
+static char rfc5424_sp_value[256] = {
+ 0, 0, 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 , 0 , 0, 0,
+ 0, 0, 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 , 0 , 0, 0,
+ 0, 0,'"', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 , 0 , 0, 0,
+ 0, 0, 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 , 0 , 0, 0,
+ 0, 0, 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 , 0 , 0, 0,
+ 0, 0, 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0,'\\',']', 0, 0,
+ 0, 0, 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 , 0 , 0, 0,
+ 0, 0, 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 , 0 , 0, 0,
+ 0, 0, 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 , 0 , 0, 0,
+ 0, 0, 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 , 0 , 0, 0,
+ 0, 0, 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 , 0 , 0, 0,
+ 0, 0, 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 , 0 , 0, 0,
+ 0, 0, 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 , 0 , 0, 0,
+ 0, 0, 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 , 0 , 0, 0,
+ 0, 0, 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 , 0 , 0, 0,
+ 0, 0, 0 , 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 , 0 , 0, 0
+};
+
+/* '=', ' ', ']', '"' */
+static char rfc5424_sp_name[256] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static flb_sds_t syslog_rfc5424(flb_sds_t *s, struct flb_time *tms,
+ struct syslog_msg *msg)
+{
+ struct tm tm;
+ flb_sds_t tmp;
+ uint8_t prival;
+
+ prival = (msg->facility << 3) + msg->severity;
+
+ if (gmtime_r(&(tms->tm.tv_sec), &tm) == NULL) {
+ return NULL;
+ }
+
+ tmp = flb_sds_printf(s, "<%i>%i %d-%02d-%02dT%02d:%02d:%02d.%06"PRIu64"Z ",
+ prival, 1, tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
+ tm.tm_hour, tm.tm_min, tm.tm_sec,
+ (uint64_t) tms->tm.tv_nsec/1000);
+ if (!tmp) {
+ return NULL;
+ }
+ *s = tmp;
+
+ if (msg->hostname) {
+ int len = flb_sds_len(msg->hostname);
+ tmp = flb_sds_cat(*s, msg->hostname, len > 255 ? 255 : len);
+ if (!tmp) {
+ return NULL;
+ }
+ *s = tmp;
+ }
+ else {
+ tmp = flb_sds_cat(*s, "-" , 1);
+ if (!tmp) {
+ return NULL;
+ }
+ *s = tmp;
+ }
+
+ tmp = flb_sds_cat(*s, " ", 1);
+ if (!tmp) {
+ return NULL;
+ }
+ *s = tmp;
+
+ if (msg->appname) {
+ int len = flb_sds_len(msg->appname);
+ tmp = flb_sds_cat(*s, msg->appname, len > 48 ? 48 : len);
+ if (!tmp) {
+ return NULL;
+ }
+ *s = tmp;
+ }
+ else {
+ tmp = flb_sds_cat(*s, "-" , 1);
+ if (!tmp) {
+ return NULL;
+ }
+ *s = tmp;
+ }
+
+ tmp = flb_sds_cat(*s, " ", 1);
+ if (!tmp) {
+ return NULL;
+ }
+ *s = tmp;
+
+ if (msg->procid) {
+ int len = flb_sds_len(msg->procid);
+ tmp = flb_sds_cat(*s, msg->procid, len > 128 ? 128 : len);
+ if (!tmp) {
+ return NULL;
+ }
+ *s = tmp;
+ }
+ else {
+ tmp = flb_sds_cat(*s, "-" , 1);
+ if (!tmp) {
+ return NULL;
+ }
+ *s = tmp;
+ }
+
+ tmp = flb_sds_cat(*s, " ", 1);
+ if (!tmp) {
+ return NULL;
+ }
+ *s = tmp;
+
+ if (msg->msgid) {
+ int len = flb_sds_len(msg->msgid);
+ tmp = flb_sds_cat(*s, msg->msgid, len > 32 ? 32 : len);
+ if (!tmp) {
+ return NULL;
+ }
+ *s = tmp;
+ }
+ else {
+ tmp = flb_sds_cat(*s, "-" , 1);
+ if (!tmp) {
+ return NULL;
+ }
+ *s = tmp;
+ }
+
+ tmp = flb_sds_cat(*s, " ", 1);
+ if (!tmp) {
+ return NULL;
+ }
+ *s = tmp;
+
+ if (msg->sd) {
+ tmp = flb_sds_cat(*s, msg->sd, flb_sds_len(msg->sd));
+ if (!tmp) {
+ return NULL;
+ }
+ *s = tmp;
+ }
+ else {
+ tmp = flb_sds_cat(*s, "-" , 1);
+ if (!tmp) {
+ return NULL;
+ }
+ *s = tmp;
+ }
+
+ if (msg->message) {
+ int len = flb_sds_len(msg->message);
+ tmp = flb_sds_cat(*s, " \xef\xbb\xbf", 4);
+ if (!tmp) {
+ return NULL;
+ }
+ *s = tmp;
+ tmp = flb_sds_cat(*s, msg->message, len);
+ if (!tmp) {
+ return NULL;
+ }
+ *s = tmp;
+ }
+
+ return *s;
+}
+
+static flb_sds_t syslog_rfc3164 (flb_sds_t *s, struct flb_time *tms,
+ struct syslog_msg *msg)
+{
+ struct tm tm;
+ flb_sds_t tmp;
+ uint8_t prival;
+
+ prival = (msg->facility << 3) + msg->severity;
+
+ if (gmtime_r(&(tms->tm.tv_sec), &tm) == NULL) {
+ return NULL;
+ }
+
+ tmp = flb_sds_printf(s, "<%i>%s %2d %02d:%02d:%02d ", prival,
+ rfc3164_mon[tm.tm_mon], tm.tm_mday,
+ tm.tm_hour, tm.tm_min, tm.tm_sec);
+ if (!tmp) {
+ return NULL;
+ }
+ *s = tmp;
+
+ if (msg->hostname) {
+ tmp = flb_sds_cat(*s, msg->hostname, flb_sds_len(msg->hostname));
+ if (!tmp) {
+ return NULL;
+ }
+ *s = tmp;
+ tmp = flb_sds_cat(*s, " ", 1);
+ if (!tmp) {
+ return NULL;
+ }
+ *s = tmp;
+ }
+
+ if (msg->appname) {
+ tmp = flb_sds_cat(*s, msg->appname, flb_sds_len(msg->appname));
+ if (!tmp) {
+ return NULL;
+ }
+ *s = tmp;
+ if (msg->procid) {
+ tmp = flb_sds_cat(*s, "[" , 1);
+ if (!tmp) {
+ return NULL;
+ }
+ *s = tmp;
+ tmp = flb_sds_cat(*s, msg->procid, flb_sds_len(msg->procid));
+ if (!tmp) {
+ return NULL;
+ }
+ *s = tmp;
+ tmp = flb_sds_cat(*s, "]" , 1);
+ if (!tmp) {
+ return NULL;
+ }
+ *s = tmp;
+ }
+ tmp = flb_sds_cat(*s, ": " , 2);
+ if (!tmp) {
+ return NULL;
+ }
+ *s = tmp;
+ }
+
+ if (msg->message) {
+ tmp = flb_sds_cat(*s, msg->message, flb_sds_len(msg->message));
+ if (!tmp) {
+ return NULL;
+ }
+ *s = tmp;
+ }
+
+ return *s;
+}
+
+static flb_sds_t msgpack_to_sd(struct flb_syslog *ctx,
+ flb_sds_t *s, const char *sd, int sd_len,
+ msgpack_object *o)
+{
+ flb_sds_t tmp;
+ int i;
+ int loop;
+ int n, start_len, end_len;
+
+ if (*s == NULL) {
+ *s = flb_sds_create_size(512);
+ if (*s == NULL) {
+ return NULL;
+ }
+ }
+
+ tmp = flb_sds_cat(*s, "[" , 1);
+ if (!tmp) {
+ return NULL;
+ }
+ *s = tmp;
+
+ start_len = flb_sds_len(*s);
+ if (ctx->allow_longer_sd_id != FLB_TRUE && sd_len > 32) {
+ /*
+ * RFC5424 defines
+ * SD-NAME = 1*32PRINTUSASCII
+ * ; except '=', SP, ']', %d34 (")
+ *
+ * https://www.rfc-editor.org/rfc/rfc5424#section-6
+ */
+ sd_len = 32;
+ }
+ tmp = flb_sds_cat(*s, sd, sd_len);
+ if (!tmp) {
+ return NULL;
+ }
+ *s = tmp;
+
+ end_len = flb_sds_len(*s);
+ for(n=start_len; n < end_len; n++) {
+ if (!rfc5424_sp_name[(unsigned char)(*s)[n]]) {
+ (*s)[n] = '_';
+ }
+ }
+
+ loop = o->via.map.size;
+ if (loop != 0) {
+ msgpack_object_kv *p = o->via.map.ptr;
+ for (i = 0; i < loop; i++) {
+ char temp[48] = {0};
+ const char *key = NULL;
+ int key_len = 0;
+ const char *val = NULL;
+ int val_len = 0;
+
+ msgpack_object *k = &p[i].key;
+ msgpack_object *v = &p[i].val;
+
+ if (k->type != MSGPACK_OBJECT_BIN && k->type != MSGPACK_OBJECT_STR) {
+ continue;
+ }
+
+ if (k->type == MSGPACK_OBJECT_STR) {
+ key = k->via.str.ptr;
+ key_len = k->via.str.size;
+ }
+ else {
+ key = k->via.bin.ptr;
+ key_len = k->via.bin.size;
+ }
+
+ if (v->type == MSGPACK_OBJECT_BOOLEAN) {
+ val = v->via.boolean ? "true" : "false";
+ val_len = v->via.boolean ? 4 : 5;
+ }
+ else if (v->type == MSGPACK_OBJECT_POSITIVE_INTEGER) {
+ val = temp;
+ val_len = snprintf(temp, sizeof(temp) - 1,
+ "%" PRIu64, v->via.u64);
+ }
+ else if (v->type == MSGPACK_OBJECT_NEGATIVE_INTEGER) {
+ val = temp;
+ val_len = snprintf(temp, sizeof(temp) - 1,
+ "%" PRId64, v->via.i64);
+ }
+ else if (v->type == MSGPACK_OBJECT_FLOAT) {
+ val = temp;
+ val_len = snprintf(temp, sizeof(temp) - 1,
+ "%f", v->via.f64);
+ }
+ else if (v->type == MSGPACK_OBJECT_STR) {
+ /* String value */
+ val = v->via.str.ptr;
+ val_len = v->via.str.size;
+ }
+ else if (v->type == MSGPACK_OBJECT_BIN) {
+ /* Bin value */
+ val = v->via.bin.ptr;
+ val_len = v->via.bin.size;
+ }
+
+ if (!val || !key) {
+ continue;
+ }
+
+ tmp = flb_sds_cat(*s, " " , 1);
+ if (!tmp) {
+ return NULL;
+ }
+ *s = tmp;
+
+ start_len = flb_sds_len(*s);
+ if (ctx->allow_longer_sd_id != FLB_TRUE && key_len > 32 ) {
+ /*
+ * RFC5424 defines
+ * PARAM-NAME = SD-NAME
+ * SD-NAME = 1*32PRINTUSASCII
+ * ; except '=', SP, ']', %d34 (")
+ *
+ * https://www.rfc-editor.org/rfc/rfc5424#section-6
+ */
+ key_len = 32;
+ }
+ tmp = flb_sds_cat(*s, key, key_len);
+ if (!tmp) {
+ return NULL;
+ }
+ *s = tmp;
+
+ end_len = flb_sds_len(*s);
+ for(n=start_len; n < end_len; n++) {
+ if (!rfc5424_sp_name[(unsigned char)(*s)[n]]) {
+ (*s)[n] = '_';
+ }
+ }
+
+ tmp = flb_sds_cat(*s, "=\"" , 2);
+ if (!tmp) {
+ return NULL;
+ }
+ *s = tmp;
+
+ tmp = flb_sds_cat_esc(*s, val , val_len,
+ rfc5424_sp_value, sizeof(rfc5424_sp_value));
+ if (!tmp) {
+ return NULL;
+ }
+ *s = tmp;
+
+ tmp = flb_sds_cat(*s, "\"" , 1);
+ if (!tmp) {
+ return NULL;
+ }
+ *s = tmp;
+ }
+ }
+
+ tmp = flb_sds_cat(*s, "]" , 1);
+ if (!tmp) return NULL;
+ *s = tmp;
+
+ return *s;
+}
+
+static int msgpack_to_syslog(struct flb_syslog *ctx, msgpack_object *o,
+ struct syslog_msg *msg)
+{
+ int i;
+ int loop;
+ struct mk_list *head;
+ struct flb_config_map_val *mv;
+
+ if (o == NULL) {
+ return -1;
+ }
+
+ loop = o->via.map.size;
+ if (loop != 0) {
+ msgpack_object_kv *p = o->via.map.ptr;
+
+ for (i = 0; i < loop; i++) {
+ char temp[48] = {0};
+ const char *key = NULL;
+ int key_len = 0;
+ const char *val = NULL;
+ int val_len = 0;
+
+ msgpack_object *k = &p[i].key;
+ msgpack_object *v = &p[i].val;
+
+ if (k->type != MSGPACK_OBJECT_BIN && k->type != MSGPACK_OBJECT_STR){
+ continue;
+ }
+
+ if (k->type == MSGPACK_OBJECT_STR) {
+ key = k->via.str.ptr;
+ key_len = k->via.str.size;
+ }
+ else {
+ key = k->via.bin.ptr;
+ key_len = k->via.bin.size;
+ }
+
+ if (v->type == MSGPACK_OBJECT_MAP) {
+ if (ctx->sd_keys) {
+ flb_config_map_foreach(head, mv, ctx->sd_keys) {
+ if ((key_len == flb_sds_len(mv->val.str)) &&
+ strncmp(key, mv->val.str, flb_sds_len(mv->val.str)) == 0) {
+ msgpack_to_sd(ctx, &(msg->sd), key, key_len, v);
+ break;
+ }
+ }
+ }
+ continue;
+ }
+
+ if (v->type == MSGPACK_OBJECT_BOOLEAN) {
+ val = v->via.boolean ? "true" : "false";
+ val_len = v->via.boolean ? 4 : 5;
+ }
+ else if (v->type == MSGPACK_OBJECT_POSITIVE_INTEGER) {
+ val = temp;
+ val_len = snprintf(temp, sizeof(temp) - 1,
+ "%" PRIu64, v->via.u64);
+ }
+ else if (v->type == MSGPACK_OBJECT_NEGATIVE_INTEGER) {
+ val = temp;
+ val_len = snprintf(temp, sizeof(temp) - 1,
+ "%" PRId64, v->via.i64);
+ }
+ else if (v->type == MSGPACK_OBJECT_FLOAT) {
+ val = temp;
+ val_len = snprintf(temp, sizeof(temp) - 1,
+ "%f", v->via.f64);
+ }
+ else if (v->type == MSGPACK_OBJECT_STR) {
+ /* String value */
+ val = v->via.str.ptr;
+ val_len = v->via.str.size;
+ }
+ else if (v->type == MSGPACK_OBJECT_BIN) {
+ /* Bin value */
+ val = v->via.bin.ptr;
+ val_len = v->via.bin.size;
+ }
+
+ if (!val || !key) {
+ continue;
+ }
+
+ if ((ctx->severity_key != NULL) &&
+ flb_sds_cmp(ctx->severity_key, key, key_len) == 0) {
+ if (msg->severity == -1) {
+ if ((val_len == 1) && (val[0] >= '0' && val[0] <= '7')) {
+ msg->severity = val[0]-'0';
+ }
+ else {
+ int i;
+ for (i=0; syslog_severity[i].name != NULL; i++) {
+ if ((syslog_severity[i].len == val_len) &&
+ (!strncasecmp(syslog_severity[i].name, val, val_len))) {
+ msg->severity = syslog_severity[i].value;
+ }
+ }
+ if (!syslog_severity[i].name) {
+ flb_plg_warn(ctx->ins, "invalid severity: '%.*s'",
+ val_len, val);
+ }
+ }
+ }
+ }
+ else if ((ctx->facility_key != NULL) &&
+ flb_sds_cmp(ctx->facility_key, key, key_len) == 0) {
+ if (msg->facility == -1) {
+ if ((val_len == 1) && (val[0] >= '0' && val[0] <= '9')) {
+ msg->facility = val[0]-'0';
+ }
+ else if ((val_len == 2) &&
+ (val[0] >= '0' && val[0] <= '2') &&
+ (val[1] >= '0' && val[1] <= '9')) {
+ msg->facility = (val[0]-'0')*10;
+ msg->facility += (val[1]-'0');
+ if (!((msg->facility >= 0) && (msg->facility <=23))) {
+ flb_plg_warn(ctx->ins, "invalid facility: '%.*s'",
+ val_len, val);
+ msg->facility= -1;
+ }
+ }
+ else {
+ int i;
+ for (i=0; syslog_facility[i].name != NULL; i++) {
+ if ((syslog_facility[i].len == val_len) &&
+ (!strncasecmp(syslog_facility[i].name, val, val_len))) {
+ msg->facility = syslog_facility[i].value;
+ }
+ }
+ if (!syslog_facility[i].name) {
+ flb_plg_warn(ctx->ins, "invalid facility: '%.*s'",
+ val_len, val);
+ }
+ }
+ }
+ }
+ else if ((ctx->hostname_key != NULL) &&
+ flb_sds_cmp(ctx->hostname_key, key, key_len) == 0) {
+ if (!msg->hostname) {
+ msg->hostname = flb_sds_create_len(val, val_len);
+ }
+ }
+ else if ((ctx->appname_key != NULL) &&
+ flb_sds_cmp(ctx->appname_key, key, key_len) == 0) {
+ if (!msg->appname) {
+ msg->appname = flb_sds_create_len(val, val_len);
+ }
+ }
+ else if ((ctx->procid_key != NULL) &&
+ flb_sds_cmp(ctx->procid_key, key, key_len) == 0) {
+ if (!msg->procid) {
+ msg->procid = flb_sds_create_len(val, val_len);
+ }
+ }
+ else if ((ctx->msgid_key != NULL) &&
+ flb_sds_cmp(ctx->msgid_key, key, key_len) == 0) {
+ if (!msg->msgid) {
+ msg->msgid = flb_sds_create_len(val, val_len);
+ }
+ }
+ else if ((ctx->message_key != NULL) &&
+ flb_sds_cmp(ctx->message_key, key, key_len) == 0) {
+ if (!msg->message) {
+ msg->message = flb_sds_create_len(val, val_len);
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+static flb_sds_t syslog_format(struct flb_syslog *ctx, msgpack_object *o,
+ flb_sds_t *s, struct flb_time *tm)
+{
+ struct syslog_msg msg;
+ flb_sds_t tmp;
+ flb_sds_t ret_sds;
+ int ret;
+
+ msg.severity = -1;
+ msg.facility = -1;
+ msg.hostname = NULL;
+ msg.appname = NULL;
+ msg.procid = NULL;
+ msg.msgid = NULL;
+ msg.sd = NULL;
+ msg.message = NULL;
+
+ ret = msgpack_to_syslog(ctx, o, &msg);
+ if (!ret) {
+ if (msg.severity < 0) {
+ msg.severity = ctx->severity_preset;
+ }
+ if (msg.facility < 0) {
+ msg.facility = ctx->facility_preset;
+ }
+ if (msg.hostname == NULL && ctx->hostname_preset) {
+ msg.hostname = flb_sds_create(ctx->hostname_preset);
+ }
+ if (msg.appname == NULL && ctx->appname_preset) {
+ msg.appname = flb_sds_create(ctx->appname_preset);
+ }
+ if (msg.procid == NULL && ctx->procid_preset) {
+ msg.procid = flb_sds_create(ctx->procid_preset);
+ }
+ if (msg.msgid == NULL && ctx->msgid_preset) {
+ msg.msgid = flb_sds_create(ctx->msgid_preset);
+ }
+
+ if (ctx->parsed_format == FLB_SYSLOG_RFC3164) {
+ tmp = syslog_rfc3164(s, tm, &msg);
+ }
+ else {
+ tmp = syslog_rfc5424(s, tm, &msg);
+ }
+
+ if (!tmp) {
+ ret_sds = NULL;
+ goto clean;
+ }
+ *s = tmp;
+
+ if (flb_sds_len(*s) > ctx->maxsize) {
+ flb_sds_len_set(*s, ctx->maxsize);
+ }
+
+ if (ctx->parsed_mode != FLB_SYSLOG_UDP) {
+ tmp = flb_sds_cat(*s, "\n", 1);
+ if (!tmp) {
+ ret_sds = NULL;
+ goto clean;
+ }
+ *s = tmp;
+ }
+ }
+ else {
+ ret_sds = NULL;
+ goto clean;
+ }
+
+ ret_sds = *s;
+clean:
+ flb_sds_destroy(msg.hostname);
+ flb_sds_destroy(msg.appname);
+ flb_sds_destroy(msg.procid);
+ flb_sds_destroy(msg.msgid);
+ flb_sds_destroy(msg.sd);
+ flb_sds_destroy(msg.message);
+
+ return ret_sds;
+}
+
+static void cb_syslog_flush(struct flb_event_chunk *event_chunk,
+ struct flb_output_flush *out_flush,
+ struct flb_input_instance *i_ins,
+ void *out_context,
+ struct flb_config *config)
+{
+ struct flb_syslog *ctx = out_context;
+ flb_sds_t s;
+ flb_sds_t tmp;
+ size_t bytes_sent;
+ msgpack_object map;
+ struct flb_connection *u_conn = NULL;
+ int ret;
+ struct flb_log_event_decoder log_decoder;
+ struct flb_log_event log_event;
+
+ if (ctx->parsed_mode != FLB_SYSLOG_UDP) {
+ u_conn = flb_upstream_conn_get(ctx->u);
+ if (!u_conn) {
+ flb_plg_error(ctx->ins, "no upstream connections available");
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+ }
+
+ s = flb_sds_create_size(ctx->maxsize);
+ if (s == NULL) {
+ FLB_OUTPUT_RETURN(FLB_ERROR);
+ }
+
+ ret = flb_log_event_decoder_init(&log_decoder,
+ (char *) event_chunk->data,
+ event_chunk->size);
+
+ if (ret != FLB_EVENT_DECODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event decoder initialization error : %d", ret);
+
+ flb_sds_destroy(s);
+
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ while ((ret = flb_log_event_decoder_next(
+ &log_decoder,
+ &log_event)) == FLB_EVENT_DECODER_SUCCESS) {
+ map = *log_event.body;
+
+ flb_sds_len_set(s, 0);
+
+ tmp = syslog_format(ctx, &map, &s, &log_event.timestamp);
+ if (tmp != NULL) {
+ s = tmp;
+ if (ctx->parsed_mode == FLB_SYSLOG_UDP) {
+ ret = send(ctx->fd, s, flb_sds_len(s), MSG_DONTWAIT | MSG_NOSIGNAL);
+ if (ret == -1) {
+ flb_log_event_decoder_destroy(&log_decoder);
+ flb_sds_destroy(s);
+
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+ }
+ else {
+ ret = flb_io_net_write(u_conn,
+ s, flb_sds_len(s), &bytes_sent);
+ if (ret == -1) {
+ flb_errno();
+ flb_log_event_decoder_destroy(&log_decoder);
+ flb_upstream_conn_release(u_conn);
+ flb_sds_destroy(s);
+
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+ }
+ }
+ else {
+ flb_plg_error(ctx->ins, "error formating message");
+ }
+ }
+
+ flb_sds_destroy(s);
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ if (ctx->parsed_mode != FLB_SYSLOG_UDP) {
+ flb_upstream_conn_release(u_conn);
+ }
+
+ FLB_OUTPUT_RETURN(FLB_OK);
+}
+
+static int cb_syslog_init(struct flb_output_instance *ins, struct flb_config *config,
+ void *data)
+{
+ int io_flags;
+ struct flb_syslog *ctx = NULL;
+
+ /* Set default network configuration */
+ flb_output_net_default("127.0.0.1", 514, ins);
+
+ /* Create config context */
+ ctx = flb_syslog_config_create(ins, config);
+ if (ctx == NULL) {
+ flb_plg_error(ins, "error configuring plugin");
+ return -1;
+ }
+
+ if (ctx->maxsize < 0) {
+ if (ctx->parsed_format == FLB_SYSLOG_RFC3164) {
+ ctx->maxsize = RFC3164_MAXSIZE;
+ }
+ else {
+ ctx->maxsize = RFC5424_MAXSIZE;
+ }
+ }
+
+ ctx->fd = -1;
+ if (ctx->parsed_mode == FLB_SYSLOG_UDP) {
+ ctx->fd = flb_net_udp_connect(ins->host.name, ins->host.port,
+ ins->net_setup.source_address);
+ if (ctx->fd < 0) {
+ flb_syslog_config_destroy(ctx);
+ return -1;
+ }
+ }
+ else {
+
+ /* use TLS ? */
+ if (ins->use_tls == FLB_TRUE) {
+ io_flags = FLB_IO_TLS;
+ }
+ else {
+ io_flags = FLB_IO_TCP;
+ }
+
+ if (ins->host.ipv6 == FLB_TRUE) {
+ io_flags |= FLB_IO_IPV6;
+ }
+
+ ctx->u = flb_upstream_create(config, ins->host.name, ins->host.port,
+ io_flags, ins->tls);
+ if (!(ctx->u)) {
+ flb_syslog_config_destroy(ctx);
+ return -1;
+ }
+ flb_output_upstream_set(ctx->u, ins);
+ }
+
+ /* Set the plugin context */
+ flb_output_set_context(ins, ctx);
+
+ flb_plg_info(ctx->ins, "setup done for %s:%i (TLS=%s)",
+ ins->host.name, ins->host.port,
+ ins->use_tls ? "on" : "off");
+ return 0;
+}
+
+static int cb_syslog_exit(void *data, struct flb_config *config)
+{
+ struct flb_syslog *ctx = data;
+
+ if (ctx == NULL) {
+ return 0;
+ }
+
+ if (ctx->u) {
+ flb_upstream_destroy(ctx->u);
+ }
+
+ if (ctx->fd > 0) {
+ close(ctx->fd);
+ }
+
+ flb_syslog_config_destroy(ctx);
+
+ return 0;
+}
+
+
+/* for testing */
+static int cb_syslog_format_test(struct flb_config *config,
+ struct flb_input_instance *ins,
+ void *plugin_context,
+ void *flush_ctx,
+ int event_type,
+ const char *tag, int tag_len,
+ const void *data, size_t bytes,
+ void **out_data, size_t *out_size)
+{
+ struct flb_syslog *ctx = plugin_context;
+ flb_sds_t tmp;
+ flb_sds_t s;
+ msgpack_object map;
+ struct flb_log_event_decoder log_decoder;
+ struct flb_log_event log_event;
+ int ret;
+
+ s = flb_sds_create_size(ctx->maxsize);
+ if (s == NULL) {
+ flb_error("flb_sds_create_size failed");
+ return -1;
+ }
+
+ ret = flb_log_event_decoder_init(&log_decoder, (char *) data, bytes);
+
+ if (ret != FLB_EVENT_DECODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event decoder initialization error : %d", ret);
+
+ flb_sds_destroy(s);
+
+ return -1;
+ }
+
+ flb_log_event_decoder_next(&log_decoder, &log_event);
+ ret = flb_log_event_decoder_get_last_result(&log_decoder);
+ if (ret != FLB_EVENT_DECODER_SUCCESS) {
+ flb_error("msgpack_unpack_next failed");
+
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ return -1;
+ }
+
+ map = *log_event.body;
+ flb_sds_len_set(s, 0);
+ tmp = syslog_format(ctx, &map, &s, &log_event.timestamp);
+
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ if (tmp == NULL) {
+ flb_error("syslog_fromat returns NULL");
+ return -1;
+ }
+
+ *out_data = tmp;
+ *out_size = flb_sds_len(tmp);
+
+ return 0;
+}
+
+/* Configuration properties map */
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_STR, "mode", "udp",
+ 0, FLB_TRUE, offsetof(struct flb_syslog, mode),
+ "Set the desired transport type, the available options are tcp and udp. If you need to "
+ "use a TLS secure channel, choose 'tcp' mode here and enable the 'tls' option separately."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "syslog_format", "rfc5424",
+ 0, FLB_TRUE, offsetof(struct flb_syslog, format),
+ "Specify the Syslog protocol format to use, the available options are rfc3164 "
+ "and rfc5424."
+ },
+
+ {
+ FLB_CONFIG_MAP_SIZE, "syslog_maxsize", "0",
+ 0, FLB_TRUE, offsetof(struct flb_syslog, maxsize),
+ "Set the maximum size allowed per message. The value must be only integers "
+ "representing the number of bytes allowed. If no value is provided, the "
+ "default size is set depending of the protocol version specified by "
+ "syslog_format , rfc3164 sets max size to 1024 bytes, while rfc5424 sets "
+ "the size to 2048 bytes."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "syslog_severity_key", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_syslog, severity_key),
+ "Specify the name of the key from the original record that contains the Syslog "
+ "severity number. This configuration is optional."
+ },
+
+ {
+ FLB_CONFIG_MAP_INT, "syslog_severity_preset", "6",
+ 0, FLB_TRUE, offsetof(struct flb_syslog, severity_preset),
+ "Specify the preset severity number. It must be 0-7. "
+ " This configuration is optional."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "syslog_facility_key", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_syslog, facility_key),
+ "Specify the name of the key from the original record that contains the Syslog "
+ "facility number. This configuration is optional."
+ },
+
+ {
+ FLB_CONFIG_MAP_INT, "syslog_facility_preset", "1",
+ 0, FLB_TRUE, offsetof(struct flb_syslog, facility_preset),
+ "Specify the preset facility number. It must be 0-23. "
+ " This configuration is optional."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "syslog_hostname_key", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_syslog, hostname_key),
+ "Specify the key name from the original record that contains the hostname that "
+ "generated the message. This configuration is optional."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "syslog_hostname_preset", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_syslog, hostname_preset),
+ "Specify the preset hostname. This configuration is optional."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "syslog_appname_key", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_syslog, appname_key),
+ "Specify the key name from the original record that contains the application "
+ "name that generated the message. This configuration is optional."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "syslog_appname_preset", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_syslog, appname_preset),
+ "Specify the preset appname. This configuration is optional."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "syslog_procid_key", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_syslog, procid_key),
+ "Specify the key name from the original record that contains the Process ID "
+ "that generated the message. This configuration is optional."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "syslog_procid_preset", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_syslog, procid_preset),
+ "Specify the preset procid. This configuration is optional."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "syslog_msgid_key", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_syslog, msgid_key),
+ "Specify the key name from the original record that contains the Message ID "
+ "associated to the message. This configuration is optional."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "syslog_msgid_preset", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_syslog, msgid_preset),
+ "Specify the preset msgid. This configuration is optional."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "syslog_sd_key", NULL,
+ FLB_CONFIG_MAP_MULT, FLB_TRUE, offsetof(struct flb_syslog, sd_keys),
+ "Specify the key name from the original record that contains the "
+ "Structured Data (SD) content. If set, the value of the key must be a map."
+ "This option can be set multiple times."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "syslog_message_key", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_syslog, message_key),
+ "Specify the key name that contains the message to deliver. Note that if "
+ "this property is mandatory, otherwise the message will be empty."
+ },
+
+ {
+ FLB_CONFIG_MAP_BOOL, "allow_longer_sd_id", "false",
+ 0, FLB_TRUE, offsetof(struct flb_syslog, allow_longer_sd_id),
+ "If true, Fluent-bit allows SD-ID that is longer than 32 characters. "
+ "Such long SD-ID violates RFC 5424."
+ },
+
+ /* EOF */
+ {0}
+};
+
+/* Plugin reference */
+struct flb_output_plugin out_syslog_plugin = {
+ .name = "syslog",
+ .description = "Syslog",
+ .cb_init = cb_syslog_init,
+ .cb_pre_run = NULL,
+ .cb_flush = cb_syslog_flush,
+ .cb_exit = cb_syslog_exit,
+
+ /* Configuration */
+ .config_map = config_map,
+
+ /* for testing */
+ .test_formatter.callback = cb_syslog_format_test,
+
+ /* Plugin flags */
+ .flags = FLB_OUTPUT_NET | FLB_IO_OPT_TLS,
+};
diff --git a/src/fluent-bit/plugins/out_syslog/syslog_conf.c b/src/fluent-bit/plugins/out_syslog/syslog_conf.c
new file mode 100644
index 000000000..ea16fce26
--- /dev/null
+++ b/src/fluent-bit/plugins/out_syslog/syslog_conf.c
@@ -0,0 +1,162 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_kv.h>
+
+#include "syslog_conf.h"
+
+static int is_valid_severity(struct flb_output_instance *ins, int val, int format)
+{
+ if (format != FLB_SYSLOG_RFC5424 && format != FLB_SYSLOG_RFC3164) {
+ flb_plg_error(ins, "[%s] unknown syslog format.", __FUNCTION__);
+ return -1;
+ }
+
+ if (ins == NULL) {
+ flb_plg_error(ins, "[%s] arg is null. ins=%p", __FUNCTION__, ins);
+ return -1;
+ }
+ if (val < 0 || val > 7) {
+ flb_plg_error(ins, "[%s] invalid severity level %d. It should be 0-7.", __FUNCTION__, val);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int is_valid_facility(struct flb_output_instance *ins, int val, int format)
+{
+ if (format != FLB_SYSLOG_RFC5424 && format != FLB_SYSLOG_RFC3164) {
+ flb_plg_error(ins, "[%s] unknown syslog format.", __FUNCTION__);
+ return -1;
+ }
+
+ if (ins == NULL) {
+ flb_plg_error(ins, "[%s] arg is null. ins=%p", __FUNCTION__, ins);
+ return -1;
+ }
+
+ if (val < 0 || val > 23) {
+ flb_plg_error(ins, "[%s] invalid facility level %d. It should be 0-23.", __FUNCTION__, val);
+ return -1;
+ }
+ return 0;
+}
+
+
+struct flb_syslog *flb_syslog_config_create(struct flb_output_instance *ins,
+ struct flb_config *config)
+{
+ int ret;
+ const char *tmp;
+ struct flb_syslog *ctx = NULL;
+
+ /* Allocate plugin context */
+ ctx = flb_calloc(1, sizeof(struct flb_syslog));
+ if (!ctx) {
+ flb_errno();
+ return NULL;
+ }
+ ctx->ins = ins;
+ ctx->parsed_mode = FLB_SYSLOG_UDP;
+ ctx->parsed_format = FLB_SYSLOG_RFC5424;
+ ctx->maxsize = -1;
+
+ /* Populate context with config map defaults and incoming properties */
+ ret = flb_output_config_map_set(ins, (void *) ctx);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "configuration error");
+ flb_syslog_config_destroy(ctx);
+ return NULL;
+ }
+
+ /* Set context */
+ flb_output_set_context(ins, ctx);
+
+ /* Config Mode */
+ tmp = flb_output_get_property("mode", ins);
+ if (tmp) {
+ if (!strcasecmp(tmp, "tcp")) {
+ ctx->parsed_mode = FLB_SYSLOG_TCP;
+ }
+ else if (!strcasecmp(tmp, "tls")) {
+ ctx->parsed_mode = FLB_SYSLOG_TLS;
+ }
+ else if (!strcasecmp(tmp, "udp")) {
+ ctx->parsed_mode = FLB_SYSLOG_UDP;
+ }
+ else {
+ flb_plg_error(ctx->ins, "unknown syslog mode %s", tmp);
+ flb_syslog_config_destroy(ctx);
+ return NULL;
+ }
+ }
+
+ /* syslog_format */
+ tmp = flb_output_get_property("syslog_format", ins);
+ if (tmp) {
+ if (strcasecmp(tmp, "rfc3164") == 0) {
+ ctx->parsed_format = FLB_SYSLOG_RFC3164;
+ }
+ else if (strcasecmp(tmp, "rfc5424") == 0) {
+ ctx->parsed_format = FLB_SYSLOG_RFC5424;
+ }
+ else {
+ flb_plg_error(ctx->ins, "unknown syslog format %s", tmp);
+ flb_syslog_config_destroy(ctx);
+ return NULL;
+ }
+ }
+
+ if (ctx->parsed_format == FLB_SYSLOG_RFC5424 && ctx->allow_longer_sd_id == FLB_TRUE) {
+ flb_plg_warn(ctx->ins, "Allow longer SD-ID. It may violate RFC5424.");
+ }
+
+ /* validate preset values */
+ ret = is_valid_severity(ctx->ins, ctx->severity_preset, ctx->parsed_format);
+ if (ret != 0) {
+ flb_syslog_config_destroy(ctx);
+ return NULL;
+ }
+
+ ret = is_valid_facility(ctx->ins, ctx->facility_preset, ctx->parsed_format);
+ if (ret != 0) {
+ flb_syslog_config_destroy(ctx);
+ return NULL;
+ }
+
+
+ /* syslog maxsize */
+ if (ctx->maxsize <= 0) {
+ if (ctx->parsed_format == FLB_SYSLOG_RFC3164) {
+ ctx->maxsize = 1024;
+ }
+ else if (ctx->parsed_format == FLB_SYSLOG_RFC5424) {
+ ctx->maxsize = 2048;
+ }
+ }
+
+ return ctx;
+}
+
+void flb_syslog_config_destroy(struct flb_syslog *ctx)
+{
+ flb_free(ctx);
+}
diff --git a/src/fluent-bit/plugins/out_syslog/syslog_conf.h b/src/fluent-bit/plugins/out_syslog/syslog_conf.h
new file mode 100644
index 000000000..8bdb027a5
--- /dev/null
+++ b/src/fluent-bit/plugins/out_syslog/syslog_conf.h
@@ -0,0 +1,70 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_OUT_SYSLOG_CONF_H
+#define FLB_OUT_SYSLOG_CONF_H
+
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_output.h>
+#include <fluent-bit/flb_pipe.h>
+
+
+#define FLB_SYSLOG_UDP 0
+#define FLB_SYSLOG_TCP 1
+#define FLB_SYSLOG_TLS 2
+
+#define FLB_SYSLOG_RFC3164 0
+#define FLB_SYSLOG_RFC5424 1
+
+struct flb_syslog {
+ flb_sockfd_t fd;
+ struct flb_upstream *u;
+ flb_sds_t mode;
+ flb_sds_t format;
+ size_t maxsize;
+ flb_sds_t severity_key;
+ flb_sds_t facility_key;
+ flb_sds_t timestamp_key;
+ flb_sds_t hostname_key;
+ flb_sds_t appname_key;
+ flb_sds_t procid_key;
+ flb_sds_t msgid_key;
+ struct mk_list *sd_keys;
+ int allow_longer_sd_id;
+ flb_sds_t message_key;
+
+ /* Preset */
+ int severity_preset;
+ int facility_preset;
+ flb_sds_t hostname_preset;
+ flb_sds_t appname_preset;
+ flb_sds_t procid_preset;
+ flb_sds_t msgid_preset;
+
+ /* Internal */
+ int parsed_mode;
+ int parsed_format;
+ struct flb_output_instance *ins;
+};
+
+struct flb_syslog *flb_syslog_config_create(struct flb_output_instance *ins,
+ struct flb_config *config);
+void flb_syslog_config_destroy(struct flb_syslog *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/out_tcp/CMakeLists.txt b/src/fluent-bit/plugins/out_tcp/CMakeLists.txt
new file mode 100644
index 000000000..6dd000fdf
--- /dev/null
+++ b/src/fluent-bit/plugins/out_tcp/CMakeLists.txt
@@ -0,0 +1,6 @@
+set(src
+ tcp.c
+ tcp_conf.c
+ )
+
+FLB_PLUGIN(out_tcp "${src}" "mk_core")
diff --git a/src/fluent-bit/plugins/out_tcp/tcp.c b/src/fluent-bit/plugins/out_tcp/tcp.c
new file mode 100644
index 000000000..f74730b70
--- /dev/null
+++ b/src/fluent-bit/plugins/out_tcp/tcp.c
@@ -0,0 +1,269 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_str.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_sds.h>
+#include <fluent-bit/flb_config_map.h>
+#include <fluent-bit/flb_log_event_decoder.h>
+#include <msgpack.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <assert.h>
+
+#include "tcp.h"
+#include "tcp_conf.h"
+
+static int cb_tcp_init(struct flb_output_instance *ins,
+ struct flb_config *config, void *data)
+{
+ struct flb_out_tcp *ctx = NULL;
+ (void) data;
+
+ ctx = flb_tcp_conf_create(ins, config);
+ if (!ctx) {
+ return -1;
+ }
+
+ /* Set the plugin context */
+ flb_output_set_context(ins, ctx);
+
+ return 0;
+}
+
+static int compose_payload(struct flb_out_tcp *ctx,
+ const char *tag, int tag_len,
+ const void *in_data, size_t in_size,
+ void **out_payload, size_t *out_size)
+{
+ int ret;
+ flb_sds_t buf = NULL;
+ flb_sds_t json = NULL;
+ flb_sds_t str;
+ msgpack_object map;
+ struct flb_log_event_decoder log_decoder;
+ struct flb_log_event log_event;
+
+ /* raw message key by using a record accessor */
+ if (ctx->ra_raw_message_key) {
+ ret = flb_log_event_decoder_init(&log_decoder, (char *) in_data, in_size);
+
+ if (ret != FLB_EVENT_DECODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event decoder initialization error : %d", ret);
+
+ return -1;
+ }
+
+ buf = flb_sds_create_size(in_size);
+ if (!buf) {
+ flb_log_event_decoder_destroy(&log_decoder);
+ return FLB_ERROR;
+ }
+
+ while ((ret = flb_log_event_decoder_next(
+ &log_decoder,
+ &log_event)) == FLB_EVENT_DECODER_SUCCESS) {
+
+ map = *log_event.body;
+
+ str = flb_ra_translate(ctx->ra_raw_message_key, (char *) tag, tag_len, map, NULL);
+ if (!str) {
+ continue;
+ }
+
+ ret = flb_sds_cat_safe(&buf, str, flb_sds_len(str));
+ if (ret != 0) {
+ flb_plg_error(ctx->ins, "failed to compose payload from '%s'", str);
+ }
+ flb_sds_destroy(str);
+
+ /* append a new line */
+ flb_sds_cat_safe(&buf, "\n", 1);
+ }
+
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ if (flb_sds_len(buf) == 0) {
+ flb_sds_destroy(buf);
+ return FLB_ERROR;
+ }
+
+ *out_payload = buf;
+ *out_size = flb_sds_len(buf);
+ return FLB_OK;
+ }
+
+ if (ctx->out_format == FLB_PACK_JSON_FORMAT_NONE) {
+ /* nothing to do */
+ *out_payload = (void*)in_data;
+ *out_size = in_size;
+ return FLB_OK;
+ }
+
+ json = flb_pack_msgpack_to_json_format(in_data,
+ in_size,
+ ctx->out_format,
+ ctx->json_date_format,
+ ctx->date_key);
+ if (!json) {
+ flb_plg_error(ctx->ins, "error formatting JSON payload");
+ return FLB_ERROR;
+ }
+ *out_payload = (void*)json;
+ *out_size = flb_sds_len(json);
+
+ return FLB_OK;
+}
+
+static void cb_tcp_flush(struct flb_event_chunk *event_chunk,
+ struct flb_output_flush *out_flush,
+ struct flb_input_instance *i_ins,
+ void *out_context,
+ struct flb_config *config)
+{
+ int ret = FLB_ERROR;
+ size_t bytes_sent;
+ struct flb_upstream *u;
+ struct flb_connection *u_conn;
+ struct flb_out_tcp *ctx = out_context;
+ void *out_payload = NULL;
+ size_t out_size = 0;
+ (void) i_ins;
+
+ /* Get upstream context and connection */
+ u = ctx->u;
+ u_conn = flb_upstream_conn_get(u);
+ if (!u_conn) {
+ flb_plg_error(ctx->ins, "no upstream connections available to %s:%i",
+ u->tcp_host, u->tcp_port);
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ ret = compose_payload(ctx,
+ event_chunk->tag, flb_sds_len(event_chunk->tag),
+ event_chunk->data, event_chunk->size,
+ &out_payload, &out_size);
+ if (ret != FLB_OK) {
+ flb_upstream_conn_release(u_conn);
+ return FLB_OUTPUT_RETURN(ret);
+ }
+
+ if (ctx->ra_raw_message_key) {
+ ret = flb_io_net_write(u_conn, out_payload, out_size, &bytes_sent);
+ flb_sds_destroy(out_payload);
+ }
+ else if (ctx->out_format == FLB_PACK_JSON_FORMAT_NONE) {
+ ret = flb_io_net_write(u_conn,
+ event_chunk->data, event_chunk->size,
+ &bytes_sent);
+ }
+ else {
+ ret = flb_io_net_write(u_conn, out_payload, out_size, &bytes_sent);
+ flb_sds_destroy(out_payload);
+ }
+ if (ret == -1) {
+ flb_errno();
+ flb_upstream_conn_release(u_conn);
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ flb_upstream_conn_release(u_conn);
+ FLB_OUTPUT_RETURN(FLB_OK);
+}
+
+static int cb_tcp_exit(void *data, struct flb_config *config)
+{
+ struct flb_out_tcp *ctx = data;
+
+ flb_tcp_conf_destroy(ctx);
+ return 0;
+}
+
+/* Configuration properties map */
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_STR, "format", "msgpack",
+ 0, FLB_FALSE, 0,
+ "Specify the payload format, supported formats: msgpack, json, "
+ "json_lines or json_stream."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "json_date_format", "double",
+ 0, FLB_FALSE, 0,
+ FBL_PACK_JSON_DATE_FORMAT_DESCRIPTION
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "json_date_key", "date",
+ 0, FLB_TRUE, offsetof(struct flb_out_tcp, json_date_key),
+ "Specify the name of the date field in output."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "raw_message_key", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_out_tcp, raw_message_key),
+ "use a raw message key for the message."
+ },
+
+ /* EOF */
+ {0}
+};
+
+static int cb_tcp_format_test(struct flb_config *config,
+ struct flb_input_instance *ins,
+ void *plugin_context,
+ void *flush_ctx,
+ int event_type,
+ const char *tag, int tag_len,
+ const void *data, size_t bytes,
+ void **out_data, size_t *out_size)
+{
+ struct flb_out_tcp *ctx = plugin_context;
+ int ret;
+
+ ret = compose_payload(ctx, tag, tag_len, data, bytes, out_data, out_size);
+ if (ret != FLB_OK) {
+ flb_error("ret=%d", ret);
+ return -1;
+ }
+
+ return 0;
+}
+
+/* Plugin reference */
+struct flb_output_plugin out_tcp_plugin = {
+ .name = "tcp",
+ .description = "TCP Output",
+ .cb_init = cb_tcp_init,
+ .cb_flush = cb_tcp_flush,
+ .cb_exit = cb_tcp_exit,
+ .config_map = config_map,
+ /* for testing */
+ .test_formatter.callback = cb_tcp_format_test,
+
+ .workers = 2,
+ .flags = FLB_OUTPUT_NET | FLB_IO_OPT_TLS,
+};
diff --git a/src/fluent-bit/plugins/out_tcp/tcp.h b/src/fluent-bit/plugins/out_tcp/tcp.h
new file mode 100644
index 000000000..a133bb3f8
--- /dev/null
+++ b/src/fluent-bit/plugins/out_tcp/tcp.h
@@ -0,0 +1,46 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_OUT_TCP_H
+#define FLB_OUT_TCP_H
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_record_accessor.h>
+
+struct flb_out_tcp {
+ /* Output format */
+ int out_format;
+ flb_sds_t raw_message_key;
+ struct flb_record_accessor *ra_raw_message_key;
+
+ char *host;
+ int port;
+
+ /* Timestamp format */
+ int json_date_format;
+ flb_sds_t json_date_key;
+ flb_sds_t date_key;
+
+ /* Upstream connection to the backend server */
+ struct flb_upstream *u;
+
+ struct flb_output_instance *ins;
+};
+
+#endif
diff --git a/src/fluent-bit/plugins/out_tcp/tcp_conf.c b/src/fluent-bit/plugins/out_tcp/tcp_conf.c
new file mode 100644
index 000000000..7d6e4b0b3
--- /dev/null
+++ b/src/fluent-bit/plugins/out_tcp/tcp_conf.c
@@ -0,0 +1,154 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_sds.h>
+#include <fluent-bit/flb_pack.h>
+
+#include "tcp.h"
+#include "tcp_conf.h"
+
+struct flb_out_tcp *flb_tcp_conf_create(struct flb_output_instance *ins,
+ struct flb_config *config)
+{
+ int ret;
+ int io_flags = 0;
+ const char *tmp;
+ struct flb_upstream *upstream;
+ struct flb_out_tcp *ctx = NULL;
+
+ /* Allocate plugin context */
+ ctx = flb_calloc(1, sizeof(struct flb_out_tcp));
+ if (!ctx) {
+ flb_errno();
+ return NULL;
+ }
+ ctx->ins = ins;
+
+ ret = flb_output_config_map_set(ins, (void *) ctx);
+ if (ret == -1) {
+ flb_free(ctx);
+ return NULL;
+ }
+
+ /* Set default network configuration if not set */
+ flb_output_net_default("127.0.0.1", 5170, ins);
+
+ /* Check if SSL/TLS is enabled */
+#ifdef FLB_HAVE_TLS
+ if (ins->use_tls == FLB_TRUE) {
+ io_flags = FLB_IO_TLS;
+ }
+ else {
+ io_flags = FLB_IO_TCP;
+ }
+#else
+ io_flags = FLB_IO_TCP;
+#endif
+
+ if (ins->host.ipv6 == FLB_TRUE) {
+ io_flags |= FLB_IO_IPV6;
+ }
+
+ /* raw message key mode */
+ if (ctx->raw_message_key) {
+ ctx->ra_raw_message_key = flb_ra_create(ctx->raw_message_key, FLB_TRUE);
+ if (!ctx->ra_raw_message_key) {
+ flb_plg_error(ctx->ins, "could not create record accessor for raw_message_key");
+ flb_free(ctx);
+ return NULL;
+ }
+ }
+
+ /* Upstream context */
+ upstream = flb_upstream_create(config,
+ ins->host.name,
+ ins->host.port,
+ io_flags, ins->tls);
+ if (!upstream) {
+ flb_plg_error(ctx->ins, "could not create upstream context");
+ flb_free(ctx);
+ return NULL;
+ }
+
+ /* Output format */
+ ctx->out_format = FLB_PACK_JSON_FORMAT_NONE;
+ tmp = flb_output_get_property("format", ins);
+ if (tmp) {
+ ret = flb_pack_to_json_format_type(tmp);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "unrecognized 'format' option '%s'. "
+ "Using 'msgpack'", tmp);
+ }
+ else {
+ ctx->out_format = ret;
+ }
+ }
+
+ /* Date key */
+ ctx->date_key = ctx->json_date_key;
+ tmp = flb_output_get_property("json_date_key", ins);
+ if (tmp) {
+ /* Just check if we have to disable it */
+ if (flb_utils_bool(tmp) == FLB_FALSE) {
+ ctx->date_key = NULL;
+ }
+ }
+
+ /* Date format for JSON output */
+ ctx->json_date_format = FLB_PACK_JSON_DATE_DOUBLE;
+ tmp = flb_output_get_property("json_date_format", ins);
+ if (tmp) {
+ ret = flb_pack_to_json_date_type(tmp);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "unrecognized 'json_date_format' option '%s'. "
+ "Using 'double'", tmp);
+ }
+ else {
+ ctx->json_date_format = ret;
+ }
+ }
+
+ ctx->u = upstream;
+ flb_output_upstream_set(ctx->u, ins);
+
+ ctx->host = ins->host.name;
+ ctx->port = ins->host.port;
+
+ return ctx;
+}
+
+void flb_tcp_conf_destroy(struct flb_out_tcp *ctx)
+{
+ if (!ctx) {
+ return;
+ }
+
+ if (ctx->ra_raw_message_key) {
+ flb_ra_destroy(ctx->ra_raw_message_key);
+ }
+
+ if (ctx->u) {
+ flb_upstream_destroy(ctx->u);
+ }
+
+ flb_free(ctx);
+ ctx = NULL;
+}
diff --git a/src/fluent-bit/plugins/out_tcp/tcp_conf.h b/src/fluent-bit/plugins/out_tcp/tcp_conf.h
new file mode 100644
index 000000000..c91def5ca
--- /dev/null
+++ b/src/fluent-bit/plugins/out_tcp/tcp_conf.h
@@ -0,0 +1,32 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_OUT_TCP_CONF_H
+#define FLB_OUT_TCP_CONF_H
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_output.h>
+
+#include "tcp.h"
+
+struct flb_out_tcp *flb_tcp_conf_create(struct flb_output_instance *ins,
+ struct flb_config *config);
+void flb_tcp_conf_destroy(struct flb_out_tcp *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/out_td/CMakeLists.txt b/src/fluent-bit/plugins/out_td/CMakeLists.txt
new file mode 100644
index 000000000..3641be389
--- /dev/null
+++ b/src/fluent-bit/plugins/out_td/CMakeLists.txt
@@ -0,0 +1,7 @@
+set(src
+ td_http.c
+ td_config.c
+ td.c)
+
+FLB_PLUGIN(out_td "${src}" "mk_core")
+target_link_libraries(flb-plugin-out_td)
diff --git a/src/fluent-bit/plugins/out_td/td.c b/src/fluent-bit/plugins/out_td/td.c
new file mode 100644
index 000000000..7836c384a
--- /dev/null
+++ b/src/fluent-bit/plugins/out_td/td.c
@@ -0,0 +1,271 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_network.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_http_client.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_log_event_decoder.h>
+#include <msgpack.h>
+
+#include "td.h"
+#include "td_http.h"
+#include "td_config.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <errno.h>
+
+/*
+ * Convert the internal Fluent Bit data representation to the required
+ * one by Treasure Data cloud service.
+ *
+ * This function returns a new msgpack buffer and store the bytes length
+ * in the out_size variable.
+ */
+static char *td_format(struct flb_td *ctx, const void *data, size_t bytes, int *out_size)
+{
+ int i;
+ int ret;
+ int n_size;
+ time_t atime;
+ char *buf;
+ struct msgpack_sbuffer mp_sbuf;
+ struct msgpack_packer mp_pck;
+ msgpack_object map;
+ msgpack_sbuffer *sbuf;
+ struct flb_log_event_decoder log_decoder;
+ struct flb_log_event log_event;
+
+ /* Initialize contexts for new output */
+ msgpack_sbuffer_init(&mp_sbuf);
+ msgpack_packer_init(&mp_pck, &mp_sbuf, msgpack_sbuffer_write);
+
+ ret = flb_log_event_decoder_init(&log_decoder, (char *) data, bytes);
+
+ if (ret != FLB_EVENT_DECODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event decoder initialization error : %d", ret);
+
+ return NULL;
+ }
+
+ while ((ret = flb_log_event_decoder_next(
+ &log_decoder,
+ &log_event)) == FLB_EVENT_DECODER_SUCCESS) {
+ atime = log_event.timestamp.tm.tv_sec;
+ map = *log_event.body;
+
+ n_size = map.via.map.size + 1;
+ msgpack_pack_map(&mp_pck, n_size);
+ msgpack_pack_str(&mp_pck, 4);
+ msgpack_pack_str_body(&mp_pck, "time", 4);
+ msgpack_pack_int32(&mp_pck, atime);
+
+ for (i = 0; i < n_size - 1; i++) {
+ msgpack_pack_object(&mp_pck, map.via.map.ptr[i].key);
+ msgpack_pack_object(&mp_pck, map.via.map.ptr[i].val);
+ }
+ }
+
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ /* Create new buffer */
+ sbuf = &mp_sbuf;
+ *out_size = sbuf->size;
+ buf = flb_malloc(sbuf->size);
+ if (!buf) {
+ flb_errno();
+ return NULL;
+ }
+
+ /* set a new buffer and re-initialize our MessagePack context */
+ memcpy(buf, sbuf->data, sbuf->size);
+ msgpack_sbuffer_destroy(&mp_sbuf);
+
+ return buf;
+}
+
+static int cb_td_init(struct flb_output_instance *ins, struct flb_config *config,
+ void *data)
+{
+ struct flb_td *ctx;
+ struct flb_upstream *upstream;
+ (void) data;
+
+ ctx = td_config_init(ins);
+ if (!ctx) {
+ flb_plg_warn(ins, "Error reading configuration");
+ return -1;
+ }
+
+ if (ctx->region == FLB_TD_REGION_US) {
+ flb_output_net_default("api.treasuredata.com", 443, ins);
+ }
+ else if (ctx->region == FLB_TD_REGION_JP) {
+ flb_output_net_default("api.treasuredata.co.jp", 443, ins);
+ }
+
+ upstream = flb_upstream_create(config,
+ ins->host.name,
+ ins->host.port,
+ FLB_IO_TLS, ins->tls);
+ if (!upstream) {
+ flb_free(ctx);
+ return -1;
+ }
+ ctx->u = upstream;
+ flb_output_upstream_set(ctx->u, ins);
+
+ flb_output_set_context(ins, ctx);
+ return 0;
+}
+
+static void cb_td_flush(struct flb_event_chunk *event_chunk,
+ struct flb_output_flush *out_flush,
+ struct flb_input_instance *i_ins,
+ void *out_context,
+ struct flb_config *config)
+{
+ int ret;
+ int bytes_out;
+ char *pack;
+ size_t b_sent;
+ char *body = NULL;
+ struct flb_td *ctx = out_context;
+ struct flb_connection *u_conn;
+ struct flb_http_client *c;
+ (void) i_ins;
+
+ /* Convert format */
+ pack = td_format(ctx, event_chunk->data, event_chunk->size, &bytes_out);
+ if (!pack) {
+ FLB_OUTPUT_RETURN(FLB_ERROR);
+ }
+
+ /* Lookup an available connection context */
+ u_conn = flb_upstream_conn_get(ctx->u);
+ if (!u_conn) {
+ flb_plg_error(ctx->ins, "no upstream connections available");
+ flb_free(pack);
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ /* Compose request */
+ c = td_http_client(u_conn, pack, bytes_out, &body, ctx, config);
+ if (!c) {
+ flb_free(pack);
+ flb_upstream_conn_release(u_conn);
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ /* Issue HTTP request */
+ ret = flb_http_do(c, &b_sent);
+
+ /* Release Resources */
+ flb_free(pack);
+ flb_free(body);
+
+ /* Validate HTTP status */
+ if (ret == 0) {
+ /* We expect a HTTP 200 OK */
+ if (c->resp.status != 200) {
+ if (c->resp.payload_size > 0) {
+ flb_plg_warn(ctx->ins, "HTTP status %i\n%s",
+ c->resp.status, c->resp.payload);
+ }
+ else {
+ flb_plg_warn(ctx->ins, "HTTP status %i", c->resp.status);
+ }
+ goto retry;
+ }
+ else {
+ flb_plg_info(ctx->ins, "HTTP status 200 OK");
+ }
+ }
+ else {
+ flb_plg_error(ctx->ins, "http_do=%i", ret);
+ goto retry;
+ }
+
+ /* release */
+ flb_upstream_conn_release(u_conn);
+ flb_http_client_destroy(c);
+
+ FLB_OUTPUT_RETURN(FLB_OK);
+
+ retry:
+ flb_upstream_conn_release(u_conn);
+ flb_http_client_destroy(c);
+
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+}
+
+static int cb_td_exit(void *data, struct flb_config *config)
+{
+ struct flb_td *ctx = data;
+
+ if (!ctx) {
+ return 0;
+ }
+
+ flb_upstream_destroy(ctx->u);
+ flb_free(ctx);
+
+ return 0;
+}
+
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_STR, "API", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_td, api),
+ "Set the API key"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "Database", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_td, db_name),
+ "Set the Database file"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "Table", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_td, db_table),
+ "Set the Database Table"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "Region", (char *)NULL,
+ 0, FLB_TRUE, offsetof(struct flb_td, region_str),
+ "Set the Region: us or jp"
+ },
+ /* EOF */
+ {0}
+};
+
+/* Plugin reference */
+struct flb_output_plugin out_td_plugin = {
+ .name = "td",
+ .description = "Treasure Data",
+ .cb_init = cb_td_init,
+ .cb_pre_run = NULL,
+ .cb_flush = cb_td_flush,
+ .cb_exit = cb_td_exit,
+ .config_map = config_map,
+ .flags = FLB_IO_TLS,
+};
diff --git a/src/fluent-bit/plugins/out_td/td.h b/src/fluent-bit/plugins/out_td/td.h
new file mode 100644
index 000000000..1050289ce
--- /dev/null
+++ b/src/fluent-bit/plugins/out_td/td.h
@@ -0,0 +1,24 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_OUT_TD_H
+#define FLB_OUT_TD_H
+
+
+#endif
diff --git a/src/fluent-bit/plugins/out_td/td_config.c b/src/fluent-bit/plugins/out_td/td_config.c
new file mode 100644
index 000000000..ac5be6693
--- /dev/null
+++ b/src/fluent-bit/plugins/out_td/td_config.c
@@ -0,0 +1,86 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include "td_config.h"
+#include <stdlib.h>
+
+struct flb_td *td_config_init(struct flb_output_instance *ins)
+{
+ int ret;
+ struct flb_td *ctx;
+
+
+ /* Allocate context */
+ ctx = flb_calloc(1, sizeof(struct flb_td));
+ if (!ctx) {
+ flb_errno();
+ return NULL;
+ }
+ ctx->ins = ins;
+ ctx->fd = -1;
+
+ ret = flb_output_config_map_set(ins, (void *)ctx);
+ if (ret == -1) {
+ flb_plg_error(ins, "unable to load configuration");
+ flb_free(ctx);
+ return NULL;
+ }
+
+ if (ctx->api == NULL) {
+ flb_plg_error(ins, "error reading API key value");
+ flb_free(ctx);
+ return NULL;
+ }
+
+ if (ctx->db_name == NULL) {
+ flb_plg_error(ins, "error reading Database name");
+ flb_free(ctx);
+ return NULL;
+ }
+
+ if (ctx->db_table == NULL) {
+ flb_plg_error(ins, "error reading Table name");
+ flb_free(ctx);
+ return NULL;
+ }
+
+ /* Lookup desired region */
+ if (ctx->region_str) {
+ if (strcasecmp(ctx->region_str, "us") == 0) {
+ ctx->region = FLB_TD_REGION_US;
+ }
+ else if (strcasecmp(ctx->region_str, "jp") == 0) {
+ ctx->region = FLB_TD_REGION_JP;
+ }
+ else {
+ flb_plg_error(ctx->ins, "invalid region in configuration");
+ flb_free(ctx);
+ return NULL;
+ }
+ }
+ else {
+ ctx->region = FLB_TD_REGION_US;
+ }
+
+ flb_plg_info(ctx->ins, "Treasure Data / database='%s' table='%s'",
+ ctx->db_name, ctx->db_table);
+
+ return ctx;
+}
diff --git a/src/fluent-bit/plugins/out_td/td_config.h b/src/fluent-bit/plugins/out_td/td_config.h
new file mode 100644
index 000000000..f2412fddb
--- /dev/null
+++ b/src/fluent-bit/plugins/out_td/td_config.h
@@ -0,0 +1,41 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_TD_CONFIG_H
+#define FLB_TD_CONFIG_H
+
+#include <fluent-bit/flb_output_plugin.h>
+
+#define FLB_TD_REGION_US 0
+#define FLB_TD_REGION_JP 1
+
+struct flb_td {
+ int fd; /* Socket to destination/backend */
+ int region; /* TD Region end-point */
+ flb_sds_t region_str;
+ const char *api;
+ const char *db_name;
+ const char *db_table;
+ struct flb_upstream *u;
+ struct flb_output_instance *ins;
+};
+
+struct flb_td *td_config_init(struct flb_output_instance *ins);
+
+#endif
diff --git a/src/fluent-bit/plugins/out_td/td_http.c b/src/fluent-bit/plugins/out_td/td_http.c
new file mode 100644
index 000000000..539408044
--- /dev/null
+++ b/src/fluent-bit/plugins/out_td/td_http.c
@@ -0,0 +1,94 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_gzip.h>
+#include <fluent-bit/flb_config.h>
+#include <fluent-bit/flb_http_client.h>
+
+#include "td_config.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+
+#define TD_HTTP_HEADER_SIZE 512
+
+struct flb_http_client *td_http_client(struct flb_connection *u_conn,
+ void *data, size_t len,
+ char **body,
+ struct flb_td *ctx,
+ struct flb_config *config)
+{
+ int ret;
+ int pos = 0;
+ int api_len;
+ size_t gz_size;
+ void *gz_data;
+ char *tmp;
+ struct flb_http_client *c;
+
+ /* Compress data */
+ ret = flb_gzip_compress(data, len, &gz_data, &gz_size);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "error compressing data");
+ return NULL;
+ }
+
+ /* Compose URI */
+ tmp = flb_malloc(512);
+ if (!tmp) {
+ flb_free(gz_data);
+ return NULL;
+ }
+ snprintf(tmp, 256,
+ "/v3/table/import/%s/%s/msgpack.gz",
+ ctx->db_name, ctx->db_table);
+
+ /* Create client */
+ c = flb_http_client(u_conn, FLB_HTTP_PUT, tmp,
+ gz_data, gz_size, NULL, 0, NULL, 0);
+ if (!c) {
+ flb_free(tmp);
+ flb_free(gz_data);
+ return NULL;
+ }
+
+ /* Add custom headers */
+ tmp[pos++] = 'T';
+ tmp[pos++] = 'D';
+ tmp[pos++] = '1';
+ tmp[pos++] = ' ';
+
+ api_len = strlen(ctx->api);
+ memcpy(tmp + pos, ctx->api, api_len);
+ pos += api_len;
+
+ flb_http_add_header(c,
+ "Authorization", 13,
+ tmp, pos);
+ flb_http_add_header(c,
+ "Content-Type", 12,
+ "application/gzip", 16);
+ flb_free(tmp);
+ *body = gz_data;
+
+ return c;
+}
diff --git a/src/fluent-bit/plugins/out_td/td_http.h b/src/fluent-bit/plugins/out_td/td_http.h
new file mode 100644
index 000000000..30d398fe1
--- /dev/null
+++ b/src/fluent-bit/plugins/out_td/td_http.h
@@ -0,0 +1,35 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_OUT_TD_HTTP_H
+#define FLB_OUT_TD_HTTP_H
+
+#include <fluent-bit/flb_config.h>
+#include "td_config.h"
+
+char *td_http_request(void *data, size_t len,
+ size_t *out_len,
+ struct flb_td *ctx, struct flb_config *config);
+
+struct flb_http_client *td_http_client(struct flb_connection *u_conn,
+ void *data, size_t len,
+ char **body,
+ struct flb_td *ctx,
+ struct flb_config *config);
+#endif
diff --git a/src/fluent-bit/plugins/out_udp/CMakeLists.txt b/src/fluent-bit/plugins/out_udp/CMakeLists.txt
new file mode 100644
index 000000000..ad4f7e411
--- /dev/null
+++ b/src/fluent-bit/plugins/out_udp/CMakeLists.txt
@@ -0,0 +1,6 @@
+set(src
+ udp.c
+ udp_conf.c
+ )
+
+FLB_PLUGIN(out_udp "${src}" "mk_core")
diff --git a/src/fluent-bit/plugins/out_udp/udp.c b/src/fluent-bit/plugins/out_udp/udp.c
new file mode 100644
index 000000000..8e0044bfe
--- /dev/null
+++ b/src/fluent-bit/plugins/out_udp/udp.c
@@ -0,0 +1,351 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_str.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_sds.h>
+#include <fluent-bit/flb_config_map.h>
+#include <fluent-bit/flb_log_event_decoder.h>
+#include <msgpack.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <assert.h>
+
+#include "udp.h"
+#include "udp_conf.h"
+
+static int cb_udp_init(struct flb_output_instance *ins,
+ struct flb_config *config, void *data)
+{
+ struct flb_out_udp *ctx = NULL;
+ (void) data;
+
+ ctx = flb_udp_conf_create(ins, config);
+ if (!ctx) {
+ return -1;
+ }
+
+ /* Set the plugin context */
+ flb_output_set_context(ins, ctx);
+
+ return 0;
+}
+
+static int deliver_chunks_raw(struct flb_out_udp *ctx,
+ const char *tag, int tag_len,
+ const void *in_data, size_t in_size)
+{
+ int ret;
+ flb_sds_t buf = NULL;
+ flb_sds_t str;
+ msgpack_object map;
+ ssize_t send_result;
+ struct flb_log_event_decoder log_decoder;
+ struct flb_log_event log_event;
+
+ buf = flb_sds_create_size(in_size);
+ if (!buf) {
+ return FLB_ERROR;
+ }
+
+ ret = flb_log_event_decoder_init(&log_decoder, (char *) in_data, in_size);
+
+ if (ret != FLB_EVENT_DECODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event decoder initialization error : %d", ret);
+
+ flb_sds_destroy(buf);
+
+ return -1;
+ }
+
+ while ((ret = flb_log_event_decoder_next(
+ &log_decoder,
+ &log_event)) == FLB_EVENT_DECODER_SUCCESS) {
+ map = *log_event.body;
+
+ str = flb_ra_translate(ctx->ra_raw_message_key, (char *) tag, tag_len, map, NULL);
+ if (!str) {
+ continue;
+ }
+
+ ret = flb_sds_cat_safe(&buf, str, flb_sds_len(str));
+ if (ret != 0) {
+ flb_plg_error(ctx->ins, "failed to compose payload from '%s'", str);
+ }
+ flb_sds_destroy(str);
+
+ /* append a new line */
+ flb_sds_cat_safe(&buf, "\n", 1);
+
+ if (flb_sds_len(buf) > 65535) {
+ flb_plg_debug(ctx->ins, "record size exceeds maximum datagram size : %zu", flb_sds_len(buf));
+ }
+
+ send_result = send(ctx->endpoint_descriptor,
+ buf,
+ flb_sds_len(buf),
+ 0);
+
+ if (send_result == -1) {
+ flb_log_event_decoder_destroy(&log_decoder);
+ flb_sds_destroy(buf);
+
+ return FLB_RETRY;
+ }
+
+ flb_sds_len_set(buf, 0);
+ buf[0] = '\0';
+ }
+
+ flb_log_event_decoder_destroy(&log_decoder);
+ flb_sds_destroy(buf);
+
+ return FLB_OK;
+}
+
+static int deliver_chunks_json(struct flb_out_udp *ctx,
+ const char *tag, int tag_len,
+ const void *in_data, size_t in_size)
+{
+ int ret;
+ size_t off = 0;
+ flb_sds_t json = NULL;
+ ssize_t send_result;
+ size_t previous_offset;
+ int append_new_line;
+ struct flb_log_event_decoder log_decoder;
+ struct flb_log_event log_event;
+
+ ret = flb_log_event_decoder_init(&log_decoder, (char *) in_data, in_size);
+
+ if (ret != FLB_EVENT_DECODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event decoder initialization error : %d", ret);
+
+ return FLB_ERROR;
+ }
+
+ previous_offset = 0;
+
+ while ((ret = flb_log_event_decoder_next(
+ &log_decoder,
+ &log_event)) == FLB_EVENT_DECODER_SUCCESS) {
+ off = log_decoder.offset;
+
+ json = flb_pack_msgpack_to_json_format(&((char *) in_data)[previous_offset],
+ off - previous_offset,
+ ctx->out_format,
+ ctx->json_date_format,
+ ctx->date_key);
+ if (!json) {
+ flb_plg_error(ctx->ins, "error formatting JSON payload");
+
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ return FLB_ERROR;
+ }
+
+ previous_offset = off;
+ append_new_line = FLB_FALSE;
+
+ if (flb_sds_len(json) > 0) {
+ if (json[flb_sds_len(json) - 1] != '\n') {
+ append_new_line = FLB_TRUE;
+ }
+
+ if (append_new_line) {
+ ret = flb_sds_cat_safe(&json, "\n", 1);
+
+ if (ret != 0) {
+ flb_log_event_decoder_destroy(&log_decoder);
+ flb_sds_destroy(json);
+
+ return FLB_RETRY;
+ }
+ }
+
+ if (flb_sds_len(json) > 65535) {
+ flb_plg_debug(ctx->ins, "record size exceeds maximum datagram size : %zu", flb_sds_len(json));
+ }
+
+ send_result = send(ctx->endpoint_descriptor,
+ json,
+ flb_sds_len(json),
+ 0);
+
+ if (send_result == -1) {
+ flb_log_event_decoder_destroy(&log_decoder);
+ flb_sds_destroy(json);
+
+ return FLB_RETRY;
+ }
+ }
+
+ flb_sds_destroy(json);
+ }
+
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ return FLB_OK;
+}
+
+static int deliver_chunks_msgpack(struct flb_out_udp *ctx,
+ const char *tag, int tag_len,
+ const void *in_data, size_t in_size)
+{
+ size_t off = 0;
+ ssize_t send_result;
+ size_t previous_offset;
+ struct flb_log_event_decoder log_decoder;
+ struct flb_log_event log_event;
+ int ret;
+
+ ret = flb_log_event_decoder_init(&log_decoder, (char *) in_data, in_size);
+
+ if (ret != FLB_EVENT_DECODER_SUCCESS) {
+ flb_plg_error(ctx->ins,
+ "Log event decoder initialization error : %d", ret);
+
+ return FLB_RETRY;
+ }
+
+ previous_offset = 0;
+
+ while ((ret = flb_log_event_decoder_next(
+ &log_decoder,
+ &log_event)) == FLB_EVENT_DECODER_SUCCESS) {
+ off = log_decoder.offset;
+
+ if ((off - previous_offset) > 65535) {
+ flb_plg_debug(ctx->ins, "record size exceeds maximum datagram size : %zu", (off - previous_offset));
+ }
+
+ send_result = send(ctx->endpoint_descriptor,
+ &((char *) in_data)[previous_offset],
+ off - previous_offset,
+ 0);
+
+ if (send_result == -1) {
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ return FLB_RETRY;
+ }
+
+ previous_offset = off;
+ }
+
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ return FLB_OK;
+}
+
+static void cb_udp_flush(struct flb_event_chunk *event_chunk,
+ struct flb_output_flush *out_flush,
+ struct flb_input_instance *i_ins,
+ void *out_context,
+ struct flb_config *config)
+{
+ int ret = FLB_ERROR;
+ struct flb_out_udp *ctx = out_context;
+
+ (void) i_ins;
+
+ if (ctx->ra_raw_message_key != NULL) {
+ ret = deliver_chunks_raw(ctx,
+ event_chunk->tag,
+ flb_sds_len(event_chunk->tag),
+ event_chunk->data,
+ event_chunk->size);
+ }
+ else if (ctx->out_format == FLB_PACK_JSON_FORMAT_NONE) {
+ ret = deliver_chunks_msgpack(ctx,
+ event_chunk->tag,
+ flb_sds_len(event_chunk->tag),
+ event_chunk->data,
+ event_chunk->size);
+ }
+ else {
+ ret = deliver_chunks_json(ctx,
+ event_chunk->tag,
+ flb_sds_len(event_chunk->tag),
+ event_chunk->data,
+ event_chunk->size);
+ }
+
+ return FLB_OUTPUT_RETURN(ret);
+}
+
+static int cb_udp_exit(void *data, struct flb_config *config)
+{
+ struct flb_out_udp *ctx = data;
+
+ flb_udp_conf_destroy(ctx);
+
+ return 0;
+}
+
+/* Configuration properties map */
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_STR, "format", "json_lines",
+ 0, FLB_FALSE, 0,
+ "Specify the payload format, supported formats: msgpack, json, "
+ "json_lines or json_stream."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "json_date_format", "double",
+ 0, FLB_FALSE, 0,
+ FBL_PACK_JSON_DATE_FORMAT_DESCRIPTION
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "json_date_key", "date",
+ 0, FLB_TRUE, offsetof(struct flb_out_udp, json_date_key),
+ "Specify the name of the date field in output."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "raw_message_key", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_out_udp, raw_message_key),
+ "use a raw message key for the message."
+ },
+
+ /* EOF */
+ {0}
+};
+
+/* Plugin reference */
+struct flb_output_plugin out_udp_plugin = {
+ .name = "udp",
+ .description = "UDP Output",
+ .cb_init = cb_udp_init,
+ .cb_flush = cb_udp_flush,
+ .cb_exit = cb_udp_exit,
+ .config_map = config_map,
+
+ .workers = 2,
+ .flags = FLB_OUTPUT_NET,
+};
diff --git a/src/fluent-bit/plugins/out_udp/udp.h b/src/fluent-bit/plugins/out_udp/udp.h
new file mode 100644
index 000000000..522367ee0
--- /dev/null
+++ b/src/fluent-bit/plugins/out_udp/udp.h
@@ -0,0 +1,47 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_OUT_UDP_H
+#define FLB_OUT_UDP_H
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_stream.h>
+#include <fluent-bit/flb_connection.h>
+#include <fluent-bit/flb_record_accessor.h>
+
+struct flb_out_udp {
+ /* Output format */
+ int out_format;
+ flb_sds_t raw_message_key;
+ struct flb_record_accessor *ra_raw_message_key;
+
+ char *host;
+ int port;
+
+ flb_sockfd_t endpoint_descriptor;
+
+ /* Timestamp format */
+ int json_date_format;
+ flb_sds_t json_date_key;
+ flb_sds_t date_key;
+
+ struct flb_output_instance *ins;
+};
+
+#endif
diff --git a/src/fluent-bit/plugins/out_udp/udp_conf.c b/src/fluent-bit/plugins/out_udp/udp_conf.c
new file mode 100644
index 000000000..2f5509d3a
--- /dev/null
+++ b/src/fluent-bit/plugins/out_udp/udp_conf.c
@@ -0,0 +1,135 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_sds.h>
+#include <fluent-bit/flb_pack.h>
+
+#include "udp.h"
+#include "udp_conf.h"
+
+struct flb_out_udp *flb_udp_conf_create(struct flb_output_instance *ins,
+ struct flb_config *config)
+{
+ int ret;
+ const char *tmp;
+ struct flb_out_udp *ctx = NULL;
+
+ /* Allocate plugin context */
+ ctx = flb_calloc(1, sizeof(struct flb_out_udp));
+ if (!ctx) {
+ flb_errno();
+ return NULL;
+ }
+ ctx->ins = ins;
+
+ ret = flb_output_config_map_set(ins, (void *) ctx);
+ if (ret == -1) {
+ flb_free(ctx);
+ return NULL;
+ }
+
+ /* Set default network configuration if not set */
+ flb_output_net_default("127.0.0.1", 5170, ins);
+
+ /* raw message key mode */
+ if (ctx->raw_message_key) {
+ ctx->ra_raw_message_key = flb_ra_create(ctx->raw_message_key, FLB_TRUE);
+ if (!ctx->ra_raw_message_key) {
+ flb_plg_error(ctx->ins, "could not create record accessor for raw_message_key");
+ flb_free(ctx);
+ return NULL;
+ }
+ }
+
+ /* Output format */
+ ctx->out_format = FLB_PACK_JSON_FORMAT_NONE;
+ tmp = flb_output_get_property("format", ins);
+ if (tmp) {
+ ret = flb_pack_to_json_format_type(tmp);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "unrecognized 'format' option '%s'. "
+ "Using 'msgpack'", tmp);
+ }
+ else {
+ ctx->out_format = ret;
+ }
+ }
+
+ /* Date key */
+ ctx->date_key = ctx->json_date_key;
+ tmp = flb_output_get_property("json_date_key", ins);
+ if (tmp) {
+ /* Just check if we have to disable it */
+ if (flb_utils_bool(tmp) == FLB_FALSE) {
+ ctx->date_key = NULL;
+ }
+ }
+
+ /* Date format for JSON output */
+ ctx->json_date_format = FLB_PACK_JSON_DATE_DOUBLE;
+ tmp = flb_output_get_property("json_date_format", ins);
+ if (tmp) {
+ ret = flb_pack_to_json_date_type(tmp);
+ if (ret == -1) {
+ flb_plg_error(ctx->ins, "unrecognized 'json_date_format' option '%s'. "
+ "Using 'double'", tmp);
+ }
+ else {
+ ctx->json_date_format = ret;
+ }
+ }
+
+ ctx->host = ins->host.name;
+ ctx->port = ins->host.port;
+
+ ctx->endpoint_descriptor = flb_net_udp_connect(ins->host.name,
+ ins->host.port,
+ ins->net_setup.source_address);
+
+ if (ctx->endpoint_descriptor < 0) {
+ flb_udp_conf_destroy(ctx);
+
+ flb_plg_error(ctx->ins, "Error creating upstream socket");
+
+ ctx = NULL;
+ }
+
+ return ctx;
+}
+
+void flb_udp_conf_destroy(struct flb_out_udp *ctx)
+{
+ if (!ctx) {
+ return;
+ }
+
+ if (ctx->ra_raw_message_key) {
+ flb_ra_destroy(ctx->ra_raw_message_key);
+ }
+
+ if (ctx->endpoint_descriptor >= 0) {
+ flb_socket_close(ctx->endpoint_descriptor);
+ }
+
+ flb_free(ctx);
+
+ ctx = NULL;
+}
diff --git a/src/fluent-bit/plugins/out_udp/udp_conf.h b/src/fluent-bit/plugins/out_udp/udp_conf.h
new file mode 100644
index 000000000..58cba4564
--- /dev/null
+++ b/src/fluent-bit/plugins/out_udp/udp_conf.h
@@ -0,0 +1,32 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_OUT_UDP_CONF_H
+#define FLB_OUT_UDP_CONF_H
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_output.h>
+
+#include "udp.h"
+
+struct flb_out_udp *flb_udp_conf_create(struct flb_output_instance *ins,
+ struct flb_config *config);
+void flb_udp_conf_destroy(struct flb_out_udp *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/out_vivo_exporter/CMakeLists.txt b/src/fluent-bit/plugins/out_vivo_exporter/CMakeLists.txt
new file mode 100644
index 000000000..e458b1ff4
--- /dev/null
+++ b/src/fluent-bit/plugins/out_vivo_exporter/CMakeLists.txt
@@ -0,0 +1,15 @@
+if(NOT FLB_HTTP_SERVER)
+ message(
+ FATAL_ERROR
+ "Vivo Exporter output plugin requires built-in HTTP Server be enabled:
+ Use -DFLB_HTTP_SERVER=On option to enable it"
+ )
+endif()
+
+set(src
+ vivo_http.c
+ vivo_stream.c
+ vivo.c
+ )
+
+FLB_PLUGIN(out_vivo_exporter "${src}" "")
diff --git a/src/fluent-bit/plugins/out_vivo_exporter/vivo.c b/src/fluent-bit/plugins/out_vivo_exporter/vivo.c
new file mode 100644
index 000000000..85e1e0159
--- /dev/null
+++ b/src/fluent-bit/plugins/out_vivo_exporter/vivo.c
@@ -0,0 +1,343 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2023 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_kv.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_log_event_decoder.h>
+#include <fluent-bit/flb_log_event_encoder.h>
+
+#include "vivo.h"
+#include "vivo_http.h"
+#include "vivo_stream.h"
+
+static flb_sds_t format_logs(struct flb_event_chunk *event_chunk)
+{
+ struct flb_log_event_decoder log_decoder;
+ struct flb_log_event log_event;
+ int result;
+ int i;
+ flb_sds_t out_js;
+ flb_sds_t out_buf = NULL;
+ msgpack_sbuffer tmp_sbuf;
+ msgpack_packer tmp_pck;
+
+ result = flb_log_event_decoder_init(&log_decoder,
+ (char *) event_chunk->data,
+ event_chunk->size);
+
+ if (result != FLB_EVENT_DECODER_SUCCESS) {
+ return NULL;
+ }
+
+ out_buf = flb_sds_create_size((event_chunk->size * 2) / 4);
+ if (!out_buf) {
+ flb_errno();
+ return NULL;
+ }
+
+ /* Create temporary msgpack buffer */
+ msgpack_sbuffer_init(&tmp_sbuf);
+ msgpack_packer_init(&tmp_pck, &tmp_sbuf, msgpack_sbuffer_write);
+
+ while ((result = flb_log_event_decoder_next(
+ &log_decoder,
+ &log_event)) == FLB_EVENT_DECODER_SUCCESS) {
+ /*
+ * If the caller specified FLB_PACK_JSON_DATE_FLUENT, we format the data
+ * by using the following structure:
+ *
+ * [[TIMESTAMP, {"_tag": "...", ...MORE_METADATA}], {RECORD CONTENT}]
+ */
+ msgpack_pack_array(&tmp_pck, 2);
+ msgpack_pack_array(&tmp_pck, 2);
+ msgpack_pack_uint64(&tmp_pck, flb_time_to_nanosec(&log_event.timestamp));
+
+ /* add tag only */
+ msgpack_pack_map(&tmp_pck, 1 + log_event.metadata->via.map.size);
+
+ msgpack_pack_str(&tmp_pck, 4);
+ msgpack_pack_str_body(&tmp_pck, "_tag", 4);
+
+ msgpack_pack_str(&tmp_pck, flb_sds_len(event_chunk->tag));
+ msgpack_pack_str_body(&tmp_pck, event_chunk->tag, flb_sds_len(event_chunk->tag));
+
+ /* Append remaining keys/values */
+ for (i = 0;
+ i < log_event.metadata->via.map.size;
+ i++) {
+ msgpack_pack_object(&tmp_pck,
+ log_event.metadata->via.map.ptr[i].key);
+ msgpack_pack_object(&tmp_pck,
+ log_event.metadata->via.map.ptr[i].val);
+ }
+
+ /* pack the remaining content */
+ msgpack_pack_map(&tmp_pck, log_event.body->via.map.size);
+
+ /* Append remaining keys/values */
+ for (i = 0;
+ i < log_event.body->via.map.size;
+ i++) {
+ msgpack_pack_object(&tmp_pck,
+ log_event.body->via.map.ptr[i].key);
+ msgpack_pack_object(&tmp_pck,
+ log_event.body->via.map.ptr[i].val);
+ }
+
+ /* Concatenate by using break lines */
+ out_js = flb_msgpack_raw_to_json_sds(tmp_sbuf.data, tmp_sbuf.size);
+ if (!out_js) {
+ flb_sds_destroy(out_buf);
+ msgpack_sbuffer_destroy(&tmp_sbuf);
+ flb_log_event_decoder_destroy(&log_decoder);
+ return NULL;
+ }
+
+ /*
+ * One map record has been converted, now append it to the
+ * outgoing out_buf sds variable.
+ */
+ flb_sds_cat_safe(&out_buf, out_js, flb_sds_len(out_js));
+ flb_sds_cat_safe(&out_buf, "\n", 1);
+
+ flb_sds_destroy(out_js);
+ msgpack_sbuffer_clear(&tmp_sbuf);
+ }
+
+ /* Release the unpacker */
+ flb_log_event_decoder_destroy(&log_decoder);
+
+ msgpack_sbuffer_destroy(&tmp_sbuf);
+
+ return out_buf;
+}
+
+static int logs_event_chunk_append(struct vivo_exporter *ctx,
+ struct flb_event_chunk *event_chunk)
+{
+ size_t len;
+ flb_sds_t json;
+ struct vivo_stream_entry *entry;
+
+
+ json = format_logs(event_chunk);
+ if (!json) {
+ flb_plg_error(ctx->ins, "cannot convert logs chunk to JSON");
+ return -1;
+ }
+
+ /* append content to the stream */
+ len = flb_sds_len(json);
+ entry = vivo_stream_append(ctx->stream_logs, json, len);
+
+ flb_sds_destroy(json);
+
+ if (!entry) {
+ flb_plg_error(ctx->ins, "cannot append JSON log to stream");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int metrics_traces_event_chunk_append(struct vivo_exporter *ctx,
+ struct vivo_stream *vs,
+ struct flb_event_chunk *event_chunk)
+{
+ size_t len;
+ flb_sds_t json;
+ struct vivo_stream_entry *entry;
+
+ /* Convert msgpack to readable JSON format */
+ json = flb_msgpack_raw_to_json_sds(event_chunk->data, event_chunk->size);
+ if (!json) {
+ flb_plg_error(ctx->ins, "cannot convert metrics chunk to JSON");
+ return -1;
+ }
+
+ flb_sds_cat_safe(&json, "\n", 1);
+
+ /* append content to the stream */
+ len = flb_sds_len(json);
+ entry = vivo_stream_append(vs, json, len);
+
+ flb_sds_destroy(json);
+
+ if (!entry) {
+ flb_plg_error(ctx->ins, "cannot append JSON log to stream");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int cb_vivo_init(struct flb_output_instance *ins,
+ struct flb_config *config,
+ void *data)
+{
+ int ret;
+ struct vivo_exporter *ctx;
+
+ flb_output_net_default("0.0.0.0", 2025 , ins);
+
+ ctx = flb_calloc(1, sizeof(struct vivo_exporter));
+ if (!ctx) {
+ flb_errno();
+ return -1;
+ }
+ ctx->ins = ins;
+
+ ret = flb_output_config_map_set(ins, (void *) ctx);
+ if (ret == -1) {
+ flb_free(ctx);
+ return -1;
+ }
+
+ flb_output_set_context(ins, ctx);
+
+ /* Load config map */
+ ret = flb_output_config_map_set(ins, (void *) ctx);
+ if (ret == -1) {
+ return -1;
+ }
+
+ /* Create Streams */
+ ctx->stream_logs = vivo_stream_create(ctx);
+ if (!ctx->stream_logs) {
+ return -1;
+ }
+
+ ctx->stream_metrics = vivo_stream_create(ctx);
+ if (!ctx->stream_metrics) {
+ return -1;
+ }
+
+ ctx->stream_traces = vivo_stream_create(ctx);
+ if (!ctx->stream_traces) {
+ return -1;
+ }
+
+ /* HTTP Server context */
+ ctx->http = vivo_http_server_create(ctx,
+ ins->host.name, ins->host.port, config);
+ if (!ctx->http) {
+ flb_plg_error(ctx->ins, "could not initialize HTTP server, aborting");
+ return -1;
+ }
+
+ /* Start HTTP Server */
+ ret = vivo_http_server_start(ctx->http);
+ if (ret == -1) {
+ return -1;
+ }
+
+ flb_plg_info(ctx->ins, "listening iface=%s tcp_port=%d",
+ ins->host.name, ins->host.port);
+
+ return 0;
+}
+
+static void cb_vivo_flush(struct flb_event_chunk *event_chunk,
+ struct flb_output_flush *out_flush,
+ struct flb_input_instance *ins, void *out_context,
+ struct flb_config *config)
+{
+ int ret = -1;
+ struct vivo_exporter *ctx = out_context;
+
+#ifdef FLB_HAVE_METRICS
+ if (event_chunk->type == FLB_EVENT_TYPE_METRICS) {
+ ret = metrics_traces_event_chunk_append(ctx, ctx->stream_metrics, event_chunk);
+ }
+#endif
+ if (event_chunk->type == FLB_EVENT_TYPE_LOGS) {
+ ret = logs_event_chunk_append(ctx, event_chunk);
+ }
+ else if (event_chunk->type == FLB_EVENT_TYPE_TRACES) {
+ ret = metrics_traces_event_chunk_append(ctx, ctx->stream_traces, event_chunk);
+ }
+
+ if (ret == 0) {
+ FLB_OUTPUT_RETURN(FLB_OK);
+ }
+
+ FLB_OUTPUT_RETURN(FLB_ERROR);
+}
+
+static int cb_vivo_exit(void *data, struct flb_config *config)
+{
+ struct vivo_exporter *ctx = data;
+
+ if (!ctx) {
+ return 0;
+ }
+
+ if (ctx->http) {
+ vivo_http_server_stop(ctx->http);
+ vivo_http_server_destroy(ctx->http);
+ }
+
+ vivo_stream_destroy(ctx->stream_logs);
+ vivo_stream_destroy(ctx->stream_metrics);
+ vivo_stream_destroy(ctx->stream_traces);
+
+ flb_free(ctx);
+
+ return 0;
+}
+
+/* Configuration properties map */
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_BOOL, "empty_stream_on_read", "off",
+ 0, FLB_TRUE, offsetof(struct vivo_exporter, empty_stream_on_read),
+ "If enabled, when an HTTP client consumes the data from a stream, the queue "
+ "content will be removed"
+ },
+
+ {
+ FLB_CONFIG_MAP_SIZE, "stream_queue_size", "20M",
+ 0, FLB_TRUE, offsetof(struct vivo_exporter, stream_queue_size),
+ "Specify the maximum queue size per stream. Each specific stream for logs, metrics "
+ "and traces can hold up to 'stream_queue_size' bytes."
+ },
+
+ {
+ FLB_CONFIG_MAP_STR, "http_cors_allow_origin", NULL,
+ 0, FLB_TRUE, offsetof(struct vivo_exporter, http_cors_allow_origin),
+ "Specify the value for the HTTP Access-Control-Allow-Origin header (CORS)"
+ },
+
+ /* EOF */
+ {0}
+};
+
+/* Plugin reference */
+struct flb_output_plugin out_vivo_exporter_plugin = {
+ .name = "vivo_exporter",
+ .description = "Vivo Exporter",
+ .cb_init = cb_vivo_init,
+ .cb_flush = cb_vivo_flush,
+ .cb_exit = cb_vivo_exit,
+ .flags = FLB_OUTPUT_NET,
+ .event_type = FLB_OUTPUT_LOGS | FLB_OUTPUT_METRICS | FLB_OUTPUT_TRACES,
+ .config_map = config_map,
+ .workers = 1,
+};
diff --git a/src/fluent-bit/plugins/out_vivo_exporter/vivo.h b/src/fluent-bit/plugins/out_vivo_exporter/vivo.h
new file mode 100644
index 000000000..943c40364
--- /dev/null
+++ b/src/fluent-bit/plugins/out_vivo_exporter/vivo.h
@@ -0,0 +1,45 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2023 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_VIVO_EXPORTER_H
+#define FLB_VIVO_EXPORTER_H
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_ring_buffer.h>
+
+#define VIVO_RING_BUFFER_SIZE 10
+
+/* Plugin context */
+struct vivo_exporter {
+ void *http;
+
+ void *stream_logs;
+ void *stream_metrics;
+ void *stream_traces;
+
+ /* options */
+ int empty_stream_on_read;
+ size_t stream_queue_size;
+ flb_sds_t http_cors_allow_origin;
+
+ /* instance context */
+ struct flb_output_instance *ins;
+};
+
+#endif
diff --git a/src/fluent-bit/plugins/out_vivo_exporter/vivo_http.c b/src/fluent-bit/plugins/out_vivo_exporter/vivo_http.c
new file mode 100644
index 000000000..efd39dcc8
--- /dev/null
+++ b/src/fluent-bit/plugins/out_vivo_exporter/vivo_http.c
@@ -0,0 +1,266 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2023 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <fluent-bit/flb_http_server.h>
+
+#include "vivo.h"
+#include "vivo_http.h"
+#include "vivo_stream.h"
+
+#define VIVO_CONTENT_TYPE "Content-Type"
+#define VIVO_CONTENT_TYPE_JSON "application/json"
+#define VIVO_STREAM_START_ID "Vivo-Stream-Start-ID"
+#define VIVO_STREAM_END_ID "Vivo-Stream-End-ID"
+
+static int stream_get_uri_properties(mk_request_t *request,
+ int64_t *from, int64_t *to, int64_t *limit)
+{
+ char *ptr;
+ flb_sds_t buf;
+
+ *from = -1;
+ *to = -1;
+ *limit = -1;
+
+ buf = flb_sds_create_len(request->query_string.data, request->query_string.len);
+ if (!buf) {
+ return -1;
+ }
+
+ ptr = strstr(buf, "from=");
+ if (ptr) {
+ *from = atol(ptr + 5);
+ }
+
+ ptr = strstr(buf, "to=");
+ if (ptr) {
+ *to = atol(ptr + 3);
+ }
+
+ ptr = strstr(buf, "limit=");
+ if (ptr) {
+ *limit = atol(ptr + 6);
+ }
+
+ flb_sds_destroy(buf);
+
+ return 0;
+}
+
+static void headers_set(mk_request_t *request, struct vivo_stream *vs)
+{
+ struct vivo_exporter *ctx;
+
+
+ /* parent context */
+ ctx = vs->parent;
+
+ /* content type */
+ mk_http_header(request,
+ VIVO_CONTENT_TYPE, sizeof(VIVO_CONTENT_TYPE) - 1,
+ VIVO_CONTENT_TYPE_JSON, sizeof(VIVO_CONTENT_TYPE_JSON) - 1);
+
+ /* CORS */
+ if (ctx->http_cors_allow_origin) {
+ mk_http_header(request,
+ "Access-Control-Allow-Origin",
+ sizeof("Access-Control-Allow-Origin") - 1,
+ ctx->http_cors_allow_origin,
+ flb_sds_len(ctx->http_cors_allow_origin));
+
+ mk_http_header(request,
+ "Access-Control-Allow-Headers",
+ sizeof("Access-Control-Allow-Headers") - 1,
+ "Origin, X-Requested-With, Content-Type, Accept",
+ sizeof("Origin, X-Requested-With, Content-Type, Accept") - 1);
+
+ mk_http_header(request,
+ "Access-Control-Expose-Headers",
+ sizeof("Access-Control-Expose-Headers") - 1,
+ "vivo-stream-start-id, vivo-stream-end-id",
+ sizeof("vivo-stream-start-id, vivo-stream-end-id") - 1);
+
+ }
+}
+
+static void serve_content(mk_request_t *request, struct vivo_stream *vs)
+{
+ int64_t from = -1;
+ int64_t to = -1;
+ int64_t limit = -1;
+ int64_t stream_start_id = -1;
+ int64_t stream_end_id = -1;
+ flb_sds_t payload;
+ flb_sds_t str_start;
+ flb_sds_t str_end;
+
+
+ if (request->query_string.len > 0) {
+ stream_get_uri_properties(request, &from, &to, &limit);
+ }
+
+ payload = vivo_stream_get_content(vs, from, to, limit,
+ &stream_start_id, &stream_end_id);
+ if (!payload) {
+ mk_http_status(request, 500);
+ return;
+ }
+
+ if (flb_sds_len(payload) == 0) {
+ mk_http_status(request, 200);
+ headers_set(request, vs);
+ flb_sds_destroy(payload);
+ return;
+ }
+
+ mk_http_status(request, 200);
+
+ /* set response headers */
+ headers_set(request, vs);
+
+ /* stream ids served: compose buffer and set headers */
+ str_start = flb_sds_create_size(32);
+ flb_sds_printf(&str_start, "%" PRId64, stream_start_id);
+
+ str_end = flb_sds_create_size(32);
+ flb_sds_printf(&str_end, "%" PRId64, stream_end_id);
+
+ mk_http_header(request,
+ VIVO_STREAM_START_ID, sizeof(VIVO_STREAM_START_ID) - 1,
+ str_start, flb_sds_len(str_start));
+
+ mk_http_header(request,
+ VIVO_STREAM_END_ID, sizeof(VIVO_STREAM_END_ID) - 1,
+ str_end, flb_sds_len(str_end));
+
+ /* send payload */
+ mk_http_send(request, payload, flb_sds_len(payload), NULL);
+
+ /* release */
+ flb_sds_destroy(payload);
+ flb_sds_destroy(str_start);
+ flb_sds_destroy(str_end);
+}
+
+/* HTTP endpoint: /logs */
+static void cb_logs(mk_request_t *request, void *data)
+{
+ struct vivo_exporter *ctx;
+
+ ctx = (struct vivo_exporter *) data;
+
+ serve_content(request, ctx->stream_logs);
+ mk_http_done(request);
+}
+
+/* HTTP endpoint: /metrics */
+static void cb_metrics(mk_request_t *request, void *data)
+{
+ struct vivo_exporter *ctx;
+
+ ctx = (struct vivo_exporter *) data;
+
+ serve_content(request, ctx->stream_metrics);
+ mk_http_done(request);
+}
+
+static void cb_traces(mk_request_t *request, void *data)
+{
+ struct vivo_exporter *ctx;
+
+ ctx = (struct vivo_exporter *) data;
+
+ serve_content(request, ctx->stream_traces);
+ mk_http_done(request);
+}
+
+/* HTTP endpoint: / (root) */
+static void cb_root(mk_request_t *request, void *data)
+{
+ (void) data;
+
+ mk_http_status(request, 200);
+ mk_http_send(request, "Fluent Bit Vivo Exporter\n", 24, NULL);
+ mk_http_done(request);
+}
+
+struct vivo_http *vivo_http_server_create(struct vivo_exporter *ctx,
+ const char *listen,
+ int tcp_port,
+ struct flb_config *config)
+{
+ int vid;
+ char tmp[32];
+ struct vivo_http *ph;
+
+ ph = flb_malloc(sizeof(struct vivo_http));
+ if (!ph) {
+ flb_errno();
+ return NULL;
+ }
+ ph->config = config;
+
+ /* HTTP Server context */
+ ph->ctx = mk_create();
+ if (!ph->ctx) {
+ flb_free(ph);
+ return NULL;
+ }
+
+ /* Compose listen address */
+ snprintf(tmp, sizeof(tmp) -1, "%s:%d", listen, tcp_port);
+ mk_config_set(ph->ctx,
+ "Listen", tmp,
+ "Workers", "1",
+ NULL);
+
+ /* Virtual host */
+ vid = mk_vhost_create(ph->ctx, NULL);
+ ph->vid = vid;
+
+ /* Set HTTP URI callbacks */
+ mk_vhost_handler(ph->ctx, vid, "/logs", cb_logs, ctx);
+ mk_vhost_handler(ph->ctx, vid, "/metrics", cb_metrics, ctx);
+ mk_vhost_handler(ph->ctx, vid, "/traces", cb_traces, ctx);
+ mk_vhost_handler(ph->ctx, vid, "/", cb_root, NULL);
+
+ return ph;
+}
+
+void vivo_http_server_destroy(struct vivo_http *ph)
+{
+ if (ph) {
+ /* TODO: release mk_vhost */
+ if (ph->ctx) {
+ mk_destroy(ph->ctx);
+ }
+ flb_free(ph);
+ }
+}
+
+int vivo_http_server_start(struct vivo_http *ph)
+{
+ return mk_start(ph->ctx);
+}
+
+int vivo_http_server_stop(struct vivo_http *ph)
+{
+ return mk_stop(ph->ctx);
+}
diff --git a/src/fluent-bit/plugins/out_vivo_exporter/vivo_http.h b/src/fluent-bit/plugins/out_vivo_exporter/vivo_http.h
new file mode 100644
index 000000000..77453d289
--- /dev/null
+++ b/src/fluent-bit/plugins/out_vivo_exporter/vivo_http.h
@@ -0,0 +1,56 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2023 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_VIVO_EXPORTER_HTTP_H
+#define FLB_VIVO_EXPORTER_HTTP_H
+
+#include <fluent-bit/flb_output_plugin.h>
+#include <monkey/mk_lib.h>
+
+#include "vivo.h"
+
+/* HTTP response payload received through a Message Queue */
+struct vivo_http_buf {
+ int users;
+ char *buf_data;
+ size_t buf_size;
+ struct mk_list _head;
+};
+
+/* Vivo HTTP Server context */
+struct vivo_http {
+ mk_ctx_t *ctx; /* Monkey HTTP Context */
+ int vid; /* Virtual host ID */
+ int qid_metrics; /* Queue ID for Metrics buffer */
+ struct flb_config *config; /* Fluent Bit context */
+};
+
+struct vivo_http *vivo_http_server_create(struct vivo_exporter *ctx,
+ const char *listen,
+ int tcp_port,
+ struct flb_config *config);
+void vivo_http_server_destroy(struct vivo_http *ph);
+
+int vivo_http_server_start(struct vivo_http *ph);
+int vivo_http_server_stop(struct vivo_http *ph);
+
+int vivo_http_server_mq_push_metrics(struct vivo_http *ph,
+ void *data, size_t size);
+
+#endif
diff --git a/src/fluent-bit/plugins/out_vivo_exporter/vivo_stream.c b/src/fluent-bit/plugins/out_vivo_exporter/vivo_stream.c
new file mode 100644
index 000000000..9c8edb9ea
--- /dev/null
+++ b/src/fluent-bit/plugins/out_vivo_exporter/vivo_stream.c
@@ -0,0 +1,239 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2023 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_mem.h>
+#include <fluent-bit/flb_log.h>
+#include <fluent-bit/flb_sds.h>
+
+#include "vivo.h"
+#include "vivo_stream.h"
+
+static inline void stream_lock(struct vivo_stream *vs)
+{
+ pthread_mutex_lock(&vs->stream_mutex);
+}
+
+static inline void stream_unlock(struct vivo_stream *vs)
+{
+ pthread_mutex_unlock(&vs->stream_mutex);
+}
+
+struct vivo_stream *vivo_stream_create(struct vivo_exporter *ctx)
+{
+ struct vivo_stream *vs;
+
+ vs = flb_calloc(1, sizeof(struct vivo_stream));
+ if (!vs) {
+ flb_errno();
+ return NULL;
+ }
+ vs->parent = ctx;
+ vs->entries_added = 0;
+ pthread_mutex_init(&vs->stream_mutex, NULL);
+ mk_list_init(&vs->entries);
+ mk_list_init(&vs->purge);
+
+ return vs;
+}
+
+static uint64_t vivo_stream_get_new_id(struct vivo_stream *vs)
+{
+ uint64_t id = 0;
+
+ stream_lock(vs);
+
+ /* to get the next id, we simply use the value of the counter 'entries' added */
+ id = vs->entries_added;
+
+ stream_unlock(vs);
+
+ return id;
+}
+
+
+struct vivo_stream_entry *vivo_stream_entry_create(struct vivo_stream *vs,
+ void *data, size_t size)
+{
+ struct vivo_stream_entry *e;
+
+ if (size == 0) {
+ return NULL;
+ }
+
+ e = flb_calloc(1, sizeof(struct vivo_stream_entry));
+ if (!e) {
+ flb_errno();
+ return NULL;
+ }
+ e->id = vivo_stream_get_new_id(vs);
+
+ e->data = flb_sds_create_len(data, size);
+ if (!e->data) {
+ flb_free(e);
+ return NULL;
+ }
+
+ return e;
+}
+
+/*
+ * NOTE: this function must always invoked under the stream_mutex in a locked state, we don't do the lock
+ * inside the function since the caller might be itering the parent list
+ */
+static void vivo_stream_entry_destroy(struct vivo_stream *vs, struct vivo_stream_entry *e)
+{
+ mk_list_del(&e->_head);
+ vs->current_bytes_size -= flb_sds_len(e->data);
+ flb_sds_destroy(e->data);
+ flb_free(e);
+}
+
+/* NOTE: this function must run inside a stream_lock()/stream_unlock() protection */
+static void vivo_stream_cleanup(struct vivo_stream *vs)
+{
+ struct mk_list *tmp;
+ struct mk_list *head;
+ struct vivo_stream_entry *e;
+
+ mk_list_foreach_safe(head, tmp, &vs->entries) {
+ e = mk_list_entry(head, struct vivo_stream_entry, _head);
+ vivo_stream_entry_destroy(vs, e);
+ }
+}
+
+void vivo_stream_destroy(struct vivo_stream *vs)
+{
+ struct mk_list *tmp;
+ struct mk_list *head;
+ struct vivo_stream_entry *e;
+
+ stream_lock(vs);
+ mk_list_foreach_safe(head, tmp, &vs->entries) {
+ e = mk_list_entry(head, struct vivo_stream_entry, _head);
+ vivo_stream_entry_destroy(vs, e);
+ }
+ stream_unlock(vs);
+
+ flb_free(vs);
+}
+
+flb_sds_t vivo_stream_get_content(struct vivo_stream *vs, int64_t from, int64_t to,
+ int64_t limit,
+ int64_t *stream_start_id, int64_t *stream_end_id)
+{
+ int64_t count = 0;
+ flb_sds_t buf;
+ struct mk_list *head;
+ struct vivo_stream_entry *e;
+ struct vivo_exporter *ctx = vs->parent;
+
+ buf = flb_sds_create_size(vs->current_bytes_size);
+ if (!buf) {
+ return NULL;
+ }
+
+ stream_lock(vs);
+
+ mk_list_foreach(head, &vs->entries) {
+ e = mk_list_entry(head, struct vivo_stream_entry, _head);
+
+ if (e->id < from && from != -1) {
+ continue;
+ }
+
+ if (e->id > to && to != -1 && to != 0) {
+ break;
+ }
+
+ if (count == 0) {
+ *stream_start_id = e->id;
+ }
+
+ flb_sds_cat_safe(&buf, e->data, flb_sds_len(e->data));
+
+ *stream_end_id = e->id;
+ count++;
+
+ if (limit > 0 && count >= limit) {
+ break;
+ }
+ }
+
+ if (ctx->empty_stream_on_read) {
+ vivo_stream_cleanup(vs);
+ }
+
+ stream_unlock(vs);
+
+ return buf;
+}
+
+/* Remove entries from the stream until cleanup 'size' bytes. This function is inside a stream_lock()/stream_unlock() */
+static void vivo_stream_make_room(struct vivo_stream *vs, size_t size)
+{
+ size_t deleted = 0;
+ struct mk_list *tmp;
+ struct mk_list *head;
+ struct vivo_stream_entry *e;
+
+ mk_list_foreach_safe(head, tmp, &vs->entries) {
+ e = mk_list_entry(head, struct vivo_stream_entry, _head);
+ deleted += flb_sds_len(e->data);
+ vivo_stream_entry_destroy(vs, e);
+ if (deleted >= size) {
+ break;
+ }
+ }
+}
+
+struct vivo_stream_entry *vivo_stream_append(struct vivo_stream *vs, void *data, size_t size)
+{
+ struct vivo_stream_entry *e;
+ struct vivo_exporter *ctx = vs->parent;
+
+ e = vivo_stream_entry_create(vs, data, size);
+ if (!e) {
+ return NULL;
+ }
+
+ stream_lock(vs);
+
+ /* check queue space */
+ if (vs->current_bytes_size + size > ctx->stream_queue_size) {
+ /* free up some space */
+ if (mk_list_size(&vs->entries) == 0) {
+ /* do nothing, the user size setup is smaller that the incoming size, let it pass */
+ }
+ else {
+ /* release at least 'size' bytes */
+ vivo_stream_make_room(vs, size);
+ }
+ }
+
+ /* add entry to the end of the list */
+ mk_list_add(&e->_head, &vs->entries);
+
+ vs->entries_added++;
+ vs->current_bytes_size += size;
+
+ stream_unlock(vs);
+
+ return e;
+}
diff --git a/src/fluent-bit/plugins/out_vivo_exporter/vivo_stream.h b/src/fluent-bit/plugins/out_vivo_exporter/vivo_stream.h
new file mode 100644
index 000000000..fb0ca6053
--- /dev/null
+++ b/src/fluent-bit/plugins/out_vivo_exporter/vivo_stream.h
@@ -0,0 +1,59 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2023 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_VIVO_STREAM_H
+#define FLB_VIVO_STREAM_H
+
+#include <fluent-bit/flb_info.h>
+
+#include "vivo.h"
+
+struct vivo_stream_entry {
+ int64_t id;
+ flb_sds_t data;
+ struct mk_list _head;
+};
+
+struct vivo_stream {
+ size_t entries_added;
+
+ size_t current_bytes_size;
+
+ struct mk_list entries;
+ struct mk_list purge;
+
+ /* mutex to protect the context */
+ pthread_mutex_t stream_mutex;
+
+ /* back reference to struct vivo_exporter context */
+ void *parent;
+};
+
+
+struct vivo_stream *vivo_stream_create(struct vivo_exporter *ctx);
+void vivo_stream_destroy(struct vivo_stream *vs);
+struct vivo_stream_entry *vivo_stream_entry_create(struct vivo_stream *vs,
+ void *data, size_t size);
+struct vivo_stream_entry *vivo_stream_append(struct vivo_stream *vs, void *data,
+ size_t size);
+flb_sds_t vivo_stream_get_content(struct vivo_stream *vs, int64_t from, int64_t to,
+ int64_t limit,
+ int64_t *stream_start_id, int64_t *stream_end_id);
+
+#endif
diff --git a/src/fluent-bit/plugins/out_websocket/CMakeLists.txt b/src/fluent-bit/plugins/out_websocket/CMakeLists.txt
new file mode 100644
index 000000000..5f715a173
--- /dev/null
+++ b/src/fluent-bit/plugins/out_websocket/CMakeLists.txt
@@ -0,0 +1,5 @@
+set(src
+ websocket_conf.c
+ websocket.c)
+
+FLB_PLUGIN(out_websocket "${src}" "")
diff --git a/src/fluent-bit/plugins/out_websocket/websocket.c b/src/fluent-bit/plugins/out_websocket/websocket.c
new file mode 100644
index 000000000..6a196e16d
--- /dev/null
+++ b/src/fluent-bit/plugins/out_websocket/websocket.c
@@ -0,0 +1,331 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_output.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_network.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_upstream.h>
+#include <fluent-bit/flb_crypto.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_sds.h>
+#include <fluent-bit/flb_http_client.h>
+#include <fluent-bit/flb_config_map.h>
+#include <msgpack.h>
+
+#include "websocket.h"
+#include "websocket_conf.h"
+struct flb_output_plugin out_websocket_plugin;
+
+#define SECURED_BY "Fluent Bit"
+
+
+static int flb_ws_handshake(struct flb_connection *u_conn,
+ struct flb_out_ws *ctx)
+{
+ int ret;
+ size_t bytes_sent;
+ struct flb_http_client *c;
+
+ if (!u_conn) {
+ flb_error("[output_ws] upstream connection error");
+ return -1;
+ }
+
+ /* Compose HTTP Client request */
+ c = flb_http_client(u_conn, FLB_HTTP_GET, ctx->uri,
+ NULL, 0, NULL, 0, NULL, 0);
+ if (!c) {
+ flb_upstream_conn_release(u_conn);
+ return -1;
+ }
+
+ flb_http_buffer_size(c, ctx->buffer_size);
+ flb_http_add_header(c, "Upgrade", 7, "websocket", 9);
+ flb_http_add_header(c, "Connection", 10, "Upgrade", 7);
+ flb_http_add_header(c, "Sec-WebSocket-Key", 17, "dGhlIHNhbXBsZSBub25jZQ==", 24);
+ flb_http_add_header(c, "Sec-WebSocket-Version", 21, "13", 2);
+
+ /* Perform request*/
+ ret = flb_http_do(c, &bytes_sent);
+
+ if (ret != 0 || c->resp.status != 101) {
+ if (c->resp.payload_size > 0) {
+ flb_debug("[output_ws] Websocket Server Response\n%s",
+ c->resp.payload);
+ }
+ flb_http_client_destroy(c);
+ flb_upstream_conn_release(u_conn);
+ flb_debug("[out_ws] Http Get Operation ret = %i, http resp = %i", ret, c->resp.status);
+ return -1;
+ }
+ flb_http_client_destroy(c);
+ return 0;
+}
+
+static void flb_ws_mask(char *data, int len, char *mask)
+{
+ int i;
+ for (i=0;i<len;++i) {
+ *(data+i) ^= *(mask+(i%4));
+ }
+}
+
+static int flb_ws_sendDataFrameHeader(struct flb_connection *u_conn,
+ struct flb_out_ws *ctx, const void *data, size_t bytes)
+{
+ int ret = -1;
+ char* data_frame_head;
+ size_t bytes_sent;
+ int data_frame_head_len = 0;
+ //TODO use random function to generate masking_key
+ char masking_key[4] = {0x12, 0x34, 0x56, 0x78};
+ unsigned long long payloadSize = bytes;
+
+ flb_ws_mask((char *)data, payloadSize, masking_key);
+ if (payloadSize < 126) {
+ data_frame_head = (char *)flb_malloc(6);
+ if (!data_frame_head) {
+ flb_errno();
+ return -1;
+ }
+ data_frame_head[0] = 0x81;
+ data_frame_head[1] = (payloadSize & 0xff) | 0x80;
+ data_frame_head[2] = masking_key[0];
+ data_frame_head[3] = masking_key[1];
+ data_frame_head[4] = masking_key[2];
+ data_frame_head[5] = masking_key[3];
+ data_frame_head_len = 6;
+ }
+ else if (payloadSize < 65536) {
+ data_frame_head = (char *)flb_malloc(8);
+ if (!data_frame_head) {
+ flb_errno();
+ return -1;
+ }
+ data_frame_head[0] = 0x81;
+ data_frame_head[1] = 126 | 0x80;
+ data_frame_head[2] = (payloadSize >> 8) & 0xff;
+ data_frame_head[3] = (payloadSize >> 0) & 0xff;
+ data_frame_head[4] = masking_key[0];
+ data_frame_head[5] = masking_key[1];
+ data_frame_head[6] = masking_key[2];
+ data_frame_head[7] = masking_key[3];
+ data_frame_head_len = 8;
+ }
+ else {
+ data_frame_head = (char *)flb_malloc(14);
+ if (!data_frame_head) {
+ flb_errno();
+ return -1;
+ }
+ data_frame_head[0] = 0x81;
+ data_frame_head[1] = 127 | 0x80;
+ data_frame_head[2] = (payloadSize >> 56) & 0xff;
+ data_frame_head[3] = (payloadSize >> 48) & 0xff;
+ data_frame_head[4] = (payloadSize >> 40) & 0xff;
+ data_frame_head[5] = (payloadSize >> 32) & 0xff;
+ data_frame_head[6] = (payloadSize >> 24) & 0xff;
+ data_frame_head[7] = (payloadSize >> 16) & 0xff;
+ data_frame_head[8] = (payloadSize >> 8) & 0xff;
+ data_frame_head[9] = (payloadSize >> 0) & 0xff;
+ data_frame_head[10] = masking_key[0];
+ data_frame_head[11] = masking_key[1];
+ data_frame_head[12] = masking_key[2];
+ data_frame_head[13] = masking_key[3];
+ data_frame_head_len = 14;
+ }
+ ret = flb_io_net_write(u_conn, data_frame_head, data_frame_head_len, &bytes_sent);
+ if (ret == -1) {
+ flb_error("[out_ws] could not write dataframe header");
+ goto error;
+ }
+ flb_free(data_frame_head);
+ return 0;
+
+error:
+ flb_free(data_frame_head);
+ return -1;
+}
+
+static int cb_ws_init(struct flb_output_instance *ins,
+ struct flb_config *config, void *data)
+{
+ struct flb_out_ws *ctx = NULL;
+
+ ctx = flb_ws_conf_create(ins, config);
+ if (!ctx) {
+ return -1;
+ }
+
+ ctx->handshake = 1;
+ ctx->last_input_timestamp = time(NULL);
+ flb_output_set_context(ins, ctx);
+ return 0;
+}
+
+static int cb_ws_exit(void *data, struct flb_config *config)
+{
+ struct flb_out_ws *ctx = data;
+ flb_ws_conf_destroy(ctx);
+ return 0;
+}
+
+static void cb_ws_flush(struct flb_event_chunk *event_chunk,
+ struct flb_output_flush *out_flush,
+ struct flb_input_instance *i_ins,
+ void *out_context,
+ struct flb_config *config)
+{
+ int ret = -1;
+ size_t bytes_sent;
+ flb_sds_t json = NULL;
+ struct flb_upstream *u;
+ struct flb_connection *u_conn;
+ struct flb_out_ws *ctx = out_context;
+ time_t now;
+
+ /* Get upstream context and connection */
+ u = ctx->u;
+ u_conn = flb_upstream_conn_get(u);
+
+ if (!u_conn) {
+ flb_error("[out_ws] no upstream connections available to %s:%i", u->tcp_host, u->tcp_port);
+ ctx->handshake = 1;
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ now = time(NULL);
+
+ //TODO how to determine the interval? conn disconnet is about 30 sec, so we set 20 ssecnds here.
+ flb_debug("[out_ws] interval is %ld and handshake is %d", now - ctx->last_input_timestamp, ctx->handshake);
+ if ((now - ctx->last_input_timestamp > ctx->idle_interval) && (ctx->handshake == 0)) {
+ ctx->handshake = 1;
+ flb_upstream_conn_release(u_conn);
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+ ctx->last_input_timestamp = now;
+
+ if (ctx->handshake == 1) {
+ /* Handshake with websocket server*/
+ flb_info("[out_ws] handshake for ws");
+ ret = flb_ws_handshake(u_conn, ctx);
+ if (ret == -1) {
+ flb_upstream_conn_release(u_conn);
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+ ctx->handshake = 0;
+ }
+
+ /* Data format process*/
+ if (ctx->out_format != FLB_PACK_JSON_FORMAT_NONE) {
+ json = flb_pack_msgpack_to_json_format(event_chunk->data,
+ event_chunk->size,
+ ctx->out_format,
+ ctx->json_date_format,
+ ctx->json_date_key);
+
+ if (!json) {
+ flb_error("[out_ws] error formatting JSON payload");
+ flb_upstream_conn_release(u_conn);
+ FLB_OUTPUT_RETURN(FLB_ERROR);
+ }
+ }
+
+ /* Write message header */
+ if (ctx->out_format == FLB_PACK_JSON_FORMAT_NONE) {
+ ret = flb_ws_sendDataFrameHeader(u_conn, ctx,
+ event_chunk->data,
+ event_chunk->size);
+ }
+ else {
+ ret = flb_ws_sendDataFrameHeader(u_conn, ctx, json, flb_sds_len(json));
+ }
+
+ if (ret == -1) {
+ flb_error("[out_ws] dataFrameHeader sent failed");
+ ctx->handshake = 1;
+ if (json) {
+ flb_sds_destroy(json);
+ }
+ flb_upstream_conn_release(u_conn);
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ /* Write message body*/
+ if (ctx->out_format == FLB_PACK_JSON_FORMAT_NONE) {
+ ret = flb_io_net_write(u_conn,
+ event_chunk->data,
+ event_chunk->size,
+ &bytes_sent);
+ }
+ else {
+ ret = flb_io_net_write(u_conn, json, flb_sds_len(json), &bytes_sent);
+ flb_sds_destroy(json);
+ }
+
+ //flb_info("[out_ws] sendDataFrame number of bytes sent = %i", ret);
+ if (ret == -1) {
+ ctx->handshake = 1;
+ flb_upstream_conn_release(u_conn);
+ FLB_OUTPUT_RETURN(FLB_RETRY);
+ }
+
+ /* Release the connection */
+ flb_upstream_conn_release(u_conn);
+ FLB_OUTPUT_RETURN(FLB_OK);
+}
+
+/* Configuration properties map */
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_STR, "uri", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_out_ws, uri),
+ "Specify an optional URI for the target web socket server, e.g: /something"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "format", NULL,
+ 0, FLB_FALSE, 0,
+ "Set desired payload format: json, json_stream, json_lines, gelf or msgpack"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "json_date_format", "double",
+ 0, FLB_FALSE, 0,
+ "Specify the format of the date"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "json_date_key", "date",
+ 0, FLB_TRUE, offsetof(struct flb_out_ws, json_date_key),
+ "Specify the name of the date field in output"
+ },
+ /* EOF */
+ {0}
+};
+
+/* Plugin reference */
+struct flb_output_plugin out_websocket_plugin = {
+ .name = "websocket",
+ .description = "Websocket",
+ .cb_init = cb_ws_init,
+ .cb_flush = cb_ws_flush,
+ .cb_exit = cb_ws_exit,
+ .config_map = config_map,
+ .flags = FLB_OUTPUT_NET | FLB_IO_OPT_TLS,
+};
diff --git a/src/fluent-bit/plugins/out_websocket/websocket.h b/src/fluent-bit/plugins/out_websocket/websocket.h
new file mode 100644
index 000000000..69007294e
--- /dev/null
+++ b/src/fluent-bit/plugins/out_websocket/websocket.h
@@ -0,0 +1,54 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_OUT_WS
+#define FLB_OUT_WS
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_sds.h>
+
+/*
+ * Configuration: we put this separate from the main
+ * context so every Upstream Node can have it own configuration
+ * reference and pass it smoothly to the required caller.
+ *
+ * On simple mode (no HA), the structure is referenced
+ * by flb_forward->config. In HA mode the structure is referenced
+ * by the Upstream node context as an opaque data type.
+ */
+struct flb_out_ws {
+ int out_format;
+ char *uri;
+ char *host;
+ int port;
+ /* Timestamp format */
+ int json_date_format;
+
+ flb_sds_t json_date_key;
+ size_t buffer_size;
+ struct flb_upstream *u;
+ int handshake;
+ time_t last_input_timestamp;
+ int idle_interval;
+
+ /* Plugin instance */
+ struct flb_output_instance *ins;
+};
+
+#endif
diff --git a/src/fluent-bit/plugins/out_websocket/websocket_conf.c b/src/fluent-bit/plugins/out_websocket/websocket_conf.c
new file mode 100644
index 000000000..9c397a93c
--- /dev/null
+++ b/src/fluent-bit/plugins/out_websocket/websocket_conf.c
@@ -0,0 +1,159 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_output.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_sds.h>
+
+#include "websocket.h"
+#include "websocket_conf.h"
+
+struct flb_out_ws *flb_ws_conf_create(struct flb_output_instance *ins,
+ struct flb_config *config)
+{
+ int ret;
+ int ulen;
+ int io_flags = 0;
+ char *uri = NULL;
+ char *tmp_uri = NULL;
+ const char *tmp;
+ int idle_interval;
+ struct flb_upstream *upstream;
+ struct flb_out_ws *ctx = NULL;
+
+ /* Allocate plugin context */
+ ctx = flb_calloc(1, sizeof(struct flb_out_ws));
+ if (!ctx) {
+ flb_errno();
+ return NULL;
+ }
+ ctx->ins = ins;
+
+ ret = flb_output_config_map_set(ins, (void *) ctx);
+ if (ret == -1) {
+ flb_free(ctx);
+ return NULL;
+ }
+
+ flb_output_net_default("127.0.0.1", 80, ins);
+
+ /* Check if SSL/TLS is enabled */
+#ifdef FLB_HAVE_TLS
+ if (ins->use_tls == FLB_TRUE) {
+ io_flags = FLB_IO_TLS;
+ }
+ else {
+ io_flags = FLB_IO_TCP;
+ }
+#else
+ io_flags = FLB_IO_TCP;
+#endif
+
+ upstream = flb_upstream_create(config, ins->host.name, ins->host.port, io_flags, ins->tls);
+ if (!upstream) {
+ flb_free(ctx);
+ return NULL;
+ }
+
+ /* Output format */
+ ctx->out_format = FLB_PACK_JSON_FORMAT_NONE;
+ tmp = flb_output_get_property("format", ins);
+ if (tmp) {
+ ret = flb_pack_to_json_format_type(tmp);
+ if (ret == -1) {
+ flb_error("[out_ws] unrecognized 'format' option '%s'. Using 'msgpack'", tmp);
+ }
+ else {
+ ctx->out_format = ret;
+ }
+ }
+
+ /* Date format for JSON output */
+ ctx->json_date_format = FLB_PACK_JSON_DATE_DOUBLE;
+ tmp = flb_output_get_property("json_date_format", ins);
+ if (tmp) {
+ ret = flb_pack_to_json_date_type(tmp);
+ if (ret == -1) {
+ flb_error("[out_ws] unrecognized 'json_date_format' option '%s'. Using 'double'", tmp);
+ }
+ else {
+ ctx->json_date_format = ret;
+ }
+ }
+
+ if (ins->host.uri) {
+ uri = flb_strdup(ins->host.uri->full);
+ }
+ else {
+ tmp = flb_output_get_property("uri", ins);
+ if (tmp) {
+ uri = flb_strdup(tmp);
+ }
+ }
+
+ if (!uri) {
+ uri = flb_strdup("/");
+ }
+ else if (uri[0] != '/') {
+ ulen = strlen(uri);
+ tmp_uri = flb_malloc(ulen + 2);
+ tmp_uri[0] = '/';
+ memcpy(tmp_uri + 1, uri, ulen);
+ tmp_uri[ulen + 1] = '\0';
+ flb_free(uri);
+ uri = tmp_uri;
+ }
+
+ idle_interval = ins->net_setup.keepalive_idle_timeout;
+ if (idle_interval > 5) {
+ ctx->idle_interval = idle_interval - 5;
+ } else if (idle_interval <= 2) {
+ flb_error("[out_ws] the keepalive timeout value is smaller than 2, which is meaningless! Please set it higher than 10 seconds. Current value will bring disorder for websocket plugin.");
+ ctx->idle_interval = idle_interval;
+ } else {
+ ctx->idle_interval = idle_interval - 2;
+ }
+
+ ctx->u = upstream;
+ ctx->uri = uri;
+ ctx->host = ins->host.name;
+ ctx->port = ins->host.port;
+
+ flb_output_upstream_set(ctx->u, ins);
+
+ flb_info("[out_ws] we have following parameter %s, %s, %d, %d", ctx->uri, ctx->host, ctx->port, ctx->idle_interval);
+ return ctx;
+}
+
+void flb_ws_conf_destroy(struct flb_out_ws *ctx)
+{
+ flb_info("[out_ws] flb_ws_conf_destroy ");
+ if (!ctx) {
+ return;
+ }
+
+ if (ctx->u) {
+ flb_upstream_destroy(ctx->u);
+ }
+
+ flb_free(ctx->uri);
+ flb_free(ctx);
+}
diff --git a/src/fluent-bit/plugins/out_websocket/websocket_conf.h b/src/fluent-bit/plugins/out_websocket/websocket_conf.h
new file mode 100644
index 000000000..969f84aa5
--- /dev/null
+++ b/src/fluent-bit/plugins/out_websocket/websocket_conf.h
@@ -0,0 +1,32 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLB_OUT_WS_CONF_H
+#define FLB_OUT_WS_CONF_H
+
+#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_output.h>
+
+#include "websocket.h"
+
+struct flb_out_ws *flb_ws_conf_create(struct flb_output_instance *ins,
+ struct flb_config *config);
+void flb_ws_conf_destroy(struct flb_out_ws *ctx);
+
+#endif
diff --git a/src/fluent-bit/plugins/processor_attributes/CMakeLists.txt b/src/fluent-bit/plugins/processor_attributes/CMakeLists.txt
new file mode 100644
index 000000000..db01390ed
--- /dev/null
+++ b/src/fluent-bit/plugins/processor_attributes/CMakeLists.txt
@@ -0,0 +1,4 @@
+set(src
+ attributes.c)
+
+FLB_PLUGIN(processor_attributes "${src}" "")
diff --git a/src/fluent-bit/plugins/processor_attributes/attributes.c b/src/fluent-bit/plugins/processor_attributes/attributes.c
new file mode 100644
index 000000000..a59c07ae2
--- /dev/null
+++ b/src/fluent-bit/plugins/processor_attributes/attributes.c
@@ -0,0 +1,1408 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdio.h>
+#include <math.h>
+
+#include <fluent-bit/flb_regex.h>
+#include <fluent-bit/flb_filter.h>
+#include <fluent-bit/flb_processor_plugin.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_hash.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_processor.h>
+#include <fluent-bit/flb_log_event_decoder.h>
+#include <fluent-bit/flb_log_event_encoder.h>
+
+#include <cmetrics/cmetrics.h>
+#include <cmetrics/cmt_histogram.h>
+#include <cmetrics/cmt_summary.h>
+#include <cmetrics/cmt_untyped.h>
+#include <cmetrics/cmt_counter.h>
+#include <cmetrics/cmt_gauge.h>
+#include <cmetrics/cmt_map.h>
+
+#include <cfl/cfl.h>
+
+#include "variant_utils.h"
+
+typedef int (*attribute_transformer)(void *, struct cfl_variant *value);
+
+struct internal_processor_context {
+ struct mk_list *update_list;
+ struct mk_list *insert_list;
+ struct mk_list *upsert_list;
+ struct mk_list *convert_list;
+ struct mk_list *extract_list;
+ struct mk_list *delete_list;
+ struct mk_list *hash_list;
+
+ /* internal attributes ready to append */
+ struct cfl_list update_attributes;
+ struct cfl_list insert_attributes;
+ struct cfl_list upsert_attributes;
+ struct cfl_list convert_attributes;
+ struct cfl_list extract_attributes;
+ struct mk_list delete_attributes;
+ struct mk_list hash_attributes;
+
+ struct flb_processor_instance *instance;
+ struct flb_config *config;
+};
+
+/*
+ * LOCAL
+ */
+static int hex_encode(unsigned char *input_buffer,
+ size_t input_length,
+ cfl_sds_t *output_buffer)
+{
+ const char hex[] = "0123456789abcdef";
+ cfl_sds_t result;
+ size_t index;
+
+ if (cfl_sds_alloc(*output_buffer) <= (input_length * 2)) {
+ result = cfl_sds_increase(*output_buffer,
+ (input_length * 2) -
+ cfl_sds_alloc(*output_buffer));
+
+ if (result == NULL) {
+ return FLB_FALSE;
+ }
+
+ *output_buffer = result;
+ }
+
+ for (index = 0; index < input_length; index++) {
+ (*output_buffer)[index * 2 + 0] = hex[(input_buffer[index] >> 4) & 0xF];
+ (*output_buffer)[index * 2 + 1] = hex[(input_buffer[index] >> 0) & 0xF];
+ }
+
+ cfl_sds_set_len(*output_buffer, input_length * 2);
+
+ (*output_buffer)[index * 2] = '\0';
+
+ return FLB_TRUE;
+}
+
+static int process_attribute_modification_list_setting(
+ struct flb_processor_instance *plugin_instance,
+ const char *setting_name,
+ struct mk_list *source_list,
+ struct mk_list *destination_list)
+{
+ struct flb_config_map_val *source_entry;
+ struct mk_list *iterator;
+ int result;
+
+ if (source_list == NULL ||
+ mk_list_is_empty(source_list) == 0) {
+
+ return 0;
+ }
+
+ flb_config_map_foreach(iterator, source_entry, source_list) {
+ result = flb_slist_add(destination_list, source_entry->val.str);
+
+ if (result != 0) {
+ flb_plg_error(plugin_instance,
+ "could not append attribute name %s\n",
+ source_entry->val.str);
+
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int process_attribute_modification_kvlist_setting(
+ struct flb_processor_instance *plugin_instance,
+ const char *setting_name,
+ struct mk_list *source_list,
+ struct cfl_list *destination_list)
+{
+ struct cfl_kv *processed_pair;
+ struct flb_config_map_val *source_entry;
+ struct mk_list *iterator;
+ struct flb_slist_entry *value;
+ struct flb_slist_entry *key;
+
+ if (source_list == NULL ||
+ mk_list_is_empty(source_list) == 0) {
+
+ return 0;
+ }
+
+ flb_config_map_foreach(iterator, source_entry, source_list) {
+ if (mk_list_size(source_entry->val.list) != 2) {
+ flb_plg_error(plugin_instance,
+ "'%s' expects a key and a value, "
+ "e.g: '%s version 1.8.0'",
+ setting_name, setting_name);
+
+ return -1;
+ }
+
+ key = mk_list_entry_first(source_entry->val.list,
+ struct flb_slist_entry, _head);
+
+ value = mk_list_entry_last(source_entry->val.list,
+ struct flb_slist_entry, _head);
+
+ processed_pair = cfl_kv_item_create(destination_list,
+ key->str,
+ value->str);
+
+ if (processed_pair == NULL) {
+ flb_plg_error(plugin_instance,
+ "could not append attribute %s=%s\n",
+ key->str,
+ value->str);
+
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static void destroy_context(struct internal_processor_context *context)
+{
+ if (context != NULL) {
+ cfl_kv_release(&context->update_attributes);
+ cfl_kv_release(&context->insert_attributes);
+ cfl_kv_release(&context->upsert_attributes);
+ cfl_kv_release(&context->convert_attributes);
+ cfl_kv_release(&context->extract_attributes);
+ flb_slist_destroy(&context->delete_attributes);
+ flb_slist_destroy(&context->hash_attributes);
+
+ flb_free(context);
+ }
+}
+
+static struct internal_processor_context *
+ create_context(struct flb_processor_instance *processor_instance,
+ struct flb_config *config)
+{
+ struct internal_processor_context *context;
+ int result;
+
+ context = flb_calloc(1, sizeof(struct internal_processor_context));
+
+ if (context != NULL) {
+ context->instance = processor_instance;
+ context->config = config;
+
+ cfl_kv_init(&context->update_attributes);
+ cfl_kv_init(&context->insert_attributes);
+ cfl_kv_init(&context->upsert_attributes);
+ cfl_kv_init(&context->convert_attributes);
+ cfl_kv_init(&context->extract_attributes);
+ flb_slist_create(&context->delete_attributes);
+ flb_slist_create(&context->hash_attributes);
+
+ result = flb_processor_instance_config_map_set(processor_instance, (void *) context);
+
+ if (result == 0) {
+ result = process_attribute_modification_kvlist_setting(
+ processor_instance,
+ "update",
+ context->update_list,
+ &context->update_attributes);
+ }
+
+ if (result == 0) {
+ result = process_attribute_modification_kvlist_setting(
+ processor_instance,
+ "insert",
+ context->insert_list,
+ &context->insert_attributes);
+ }
+
+ if (result == 0) {
+ result = process_attribute_modification_kvlist_setting(
+ processor_instance,
+ "convert",
+ context->convert_list,
+ &context->convert_attributes);
+ }
+
+ if (result == 0) {
+ result = process_attribute_modification_kvlist_setting(
+ processor_instance,
+ "extract",
+ context->extract_list,
+ &context->extract_attributes);
+ }
+
+ if (result == 0) {
+ result = process_attribute_modification_kvlist_setting(
+ processor_instance,
+ "upsert",
+ context->upsert_list,
+ &context->upsert_attributes);
+ }
+
+ if (result == 0) {
+ result = process_attribute_modification_list_setting(
+ processor_instance,
+ "delete",
+ context->delete_list,
+ &context->delete_attributes);
+ }
+
+ if (result == 0) {
+ result = process_attribute_modification_list_setting(
+ processor_instance,
+ "hash",
+ context->hash_list,
+ &context->hash_attributes);
+ }
+
+ if (result != 0) {
+ destroy_context(context);
+
+ context = NULL;
+ }
+ }
+ else {
+ flb_errno();
+ }
+
+ return context;
+}
+
+static int cb_init(struct flb_processor_instance *processor_instance,
+ void *source_plugin_instance,
+ int source_plugin_type,
+ struct flb_config *config)
+{
+ processor_instance->context = (void *) create_context(
+ processor_instance, config);
+
+ if (processor_instance->context == NULL) {
+ return FLB_PROCESSOR_FAILURE;
+ }
+
+ return FLB_PROCESSOR_SUCCESS;
+}
+
+
+static int cb_exit(struct flb_processor_instance *processor_instance)
+{
+ if (processor_instance != NULL &&
+ processor_instance->context != NULL) {
+ destroy_context(processor_instance->context);
+ }
+
+ return FLB_PROCESSOR_SUCCESS;
+}
+
+static int cfl_kvlist_contains(struct cfl_kvlist *kvlist,
+ char *name)
+{
+ struct cfl_list *iterator;
+ struct cfl_kvpair *pair;
+
+ cfl_list_foreach(iterator, &kvlist->list) {
+ pair = cfl_list_entry(iterator,
+ struct cfl_kvpair, _head);
+
+ if (strcasecmp(pair->key, name) == 0) {
+ return FLB_TRUE;
+ }
+ }
+
+ return FLB_FALSE;
+}
+
+static void cfl_kvpair_destroy(struct cfl_kvpair *pair)
+{
+ if (pair != NULL) {
+ if (!cfl_list_entry_is_orphan(&pair->_head)) {
+ cfl_list_del(&pair->_head);
+ }
+
+ if (pair->key != NULL) {
+ cfl_sds_destroy(pair->key);
+ }
+
+ if (pair->val != NULL) {
+ cfl_variant_destroy(pair->val);
+ }
+
+ free(pair);
+ }
+}
+
+static int cfl_kvlist_remove(struct cfl_kvlist *kvlist,
+ char *name)
+{
+ struct cfl_list *iterator_backup;
+ struct cfl_list *iterator;
+ struct cfl_kvpair *pair;
+
+ cfl_list_foreach_safe(iterator, iterator_backup, &kvlist->list) {
+ pair = cfl_list_entry(iterator,
+ struct cfl_kvpair, _head);
+
+ if (strcasecmp(pair->key, name) == 0) {
+ cfl_kvpair_destroy(pair);
+ }
+ }
+
+ return FLB_TRUE;
+}
+
+
+/* local declarations */
+
+
+static cfl_sds_t cfl_variant_convert_to_json(struct cfl_variant *value)
+{
+ cfl_sds_t json_result;
+ mpack_writer_t writer;
+ char *data;
+ size_t size;
+
+ data = NULL;
+ size = 0;
+
+ mpack_writer_init_growable(&writer, &data, &size);
+
+ pack_cfl_variant(&writer, value);
+
+ mpack_writer_destroy(&writer);
+
+ json_result = flb_msgpack_raw_to_json_sds(data, size);
+
+ return json_result;
+}
+
+
+
+static int cfl_variant_convert(struct cfl_variant *input_value,
+ struct cfl_variant **output_value,
+ int output_type)
+{
+ char *converstion_canary;
+ struct cfl_variant temporary_value;
+ int errno_backup;
+
+ errno_backup = errno;
+ *output_value = cfl_variant_create();
+
+ memset(&temporary_value, 0, sizeof(struct cfl_variant));
+
+ temporary_value.type = output_type;
+
+ if (input_value->type == CFL_VARIANT_STRING ||
+ input_value->type == CFL_VARIANT_BYTES ||
+ input_value->type == CFL_VARIANT_REFERENCE) {
+ if (output_type == CFL_VARIANT_STRING ||
+ output_type == CFL_VARIANT_BYTES) {
+ temporary_value.data.as_string =
+ cfl_sds_create_len(
+ input_value->data.as_string,
+ cfl_sds_len(input_value->data.as_string));
+
+ if (temporary_value.data.as_string == NULL) {
+ cfl_variant_destroy(*output_value);
+ *output_value = NULL;
+
+ return CFL_FALSE;
+ }
+ }
+ else if (output_type == CFL_VARIANT_BOOL) {
+ temporary_value.data.as_bool = CFL_FALSE;
+
+ if (strcasecmp(input_value->data.as_string, "true") == 0) {
+ temporary_value.data.as_bool = CFL_TRUE;
+ }
+ else if (strcasecmp(input_value->data.as_string, "off") == 0) {
+ temporary_value.data.as_bool = CFL_TRUE;
+ }
+ }
+ else if (output_type == CFL_VARIANT_INT) {
+ errno = 0;
+ temporary_value.data.as_int64 = strtoimax(input_value->data.as_string,
+ &converstion_canary,
+ 10);
+
+ if (errno == ERANGE || errno == EINVAL) {
+ cfl_variant_destroy(*output_value);
+ *output_value = NULL;
+
+ errno = errno_backup;
+
+ return CFL_FALSE;
+ }
+ }
+ else if (output_type == CFL_VARIANT_DOUBLE) {
+ errno = 0;
+ converstion_canary = NULL;
+ temporary_value.data.as_double = strtod(input_value->data.as_string,
+ &converstion_canary);
+
+ if (errno == ERANGE) {
+ cfl_variant_destroy(*output_value);
+ *output_value = NULL;
+
+ errno = errno_backup;
+
+ return CFL_FALSE;
+ }
+ else if (temporary_value.data.as_double == 0 &&
+ converstion_canary == input_value->data.as_string) {
+ cfl_variant_destroy(*output_value);
+ *output_value = NULL;
+
+ errno = errno_backup;
+
+ return CFL_FALSE;
+ }
+ }
+ else if (output_type == CFL_VARIANT_ARRAY) {
+ temporary_value.data.as_array = cfl_array_create(1);
+
+ if (temporary_value.data.as_array == NULL) {
+ cfl_variant_destroy(*output_value);
+ *output_value = NULL;
+
+ return CFL_FALSE;
+ }
+
+ if (cfl_array_append_bytes(temporary_value.data.as_array,
+ input_value->data.as_bytes,
+ cfl_sds_len(input_value->data.as_bytes)) != 0) {
+ cfl_array_destroy(temporary_value.data.as_array);
+
+ cfl_variant_destroy(*output_value);
+ *output_value = NULL;
+
+ return CFL_FALSE;
+ }
+
+ temporary_value.data.as_array->entries[0]->type = output_type;
+ }
+ else {
+ return CFL_FALSE;
+ }
+ }
+ else if (input_value->type == CFL_VARIANT_INT) {
+ if (output_type == CFL_VARIANT_STRING ||
+ output_type == CFL_VARIANT_BYTES) {
+ temporary_value.data.as_string = cfl_sds_create_size(64);
+
+ if (temporary_value.data.as_string == NULL) {
+ return CFL_FALSE;
+ }
+
+ /* We need to fix the wesleys truncation PR to cfl */
+ converstion_canary = (char *) cfl_sds_printf(
+ &temporary_value.data.as_string,
+ "%" PRIi64,
+ input_value->data.as_int64);
+
+ if (converstion_canary == NULL) {
+ cfl_sds_destroy(temporary_value.data.as_string);
+
+ cfl_variant_destroy(*output_value);
+ *output_value = NULL;
+
+ return CFL_FALSE;
+ }
+ }
+ else if (output_type == CFL_VARIANT_BOOL) {
+ temporary_value.data.as_bool = CFL_FALSE;
+
+ if (input_value->data.as_int64 != 0) {
+ temporary_value.data.as_bool = CFL_TRUE;
+ }
+ }
+ else if (output_type == CFL_VARIANT_INT) {
+ temporary_value.data.as_int64 = input_value->data.as_int64;
+ }
+ else if (output_type == CFL_VARIANT_DOUBLE) {
+ temporary_value.data.as_double = (double) input_value->data.as_int64;
+
+ /* This conversion could be lossy, we need to determine what we want to
+ * do in that case
+ */
+ if ((int64_t) temporary_value.data.as_double != input_value->data.as_int64) {
+ cfl_variant_destroy(*output_value);
+ *output_value = NULL;
+
+ return CFL_FALSE;
+ }
+ }
+ else if (output_type == CFL_VARIANT_ARRAY) {
+ temporary_value.data.as_array = cfl_array_create(1);
+
+ if (temporary_value.data.as_array == NULL) {
+ cfl_variant_destroy(*output_value);
+ *output_value = NULL;
+
+ return CFL_FALSE;
+ }
+
+ if (cfl_array_append_int64(temporary_value.data.as_array,
+ input_value->data.as_int64) != 0) {
+ cfl_array_destroy(temporary_value.data.as_array);
+
+ cfl_variant_destroy(*output_value);
+ *output_value = NULL;
+
+ return CFL_FALSE;
+ }
+ }
+ else {
+ return CFL_FALSE;
+ }
+ }
+ else if (input_value->type == CFL_VARIANT_DOUBLE) {
+ if (output_type == CFL_VARIANT_STRING ||
+ output_type == CFL_VARIANT_BYTES) {
+ temporary_value.data.as_string = cfl_sds_create_size(64);
+
+ if (temporary_value.data.as_string == NULL) {
+ return CFL_FALSE;
+ }
+
+ /* We need to fix the wesleys truncation PR to cfl */
+ converstion_canary = (char *) cfl_sds_printf(
+ &temporary_value.data.as_string,
+ "%.17g",
+ input_value->data.as_double);
+
+ if (converstion_canary == NULL) {
+ cfl_sds_destroy(temporary_value.data.as_string);
+
+ cfl_variant_destroy(*output_value);
+ *output_value = NULL;
+
+ return CFL_FALSE;
+ }
+ }
+ else if (output_type == CFL_VARIANT_BOOL) {
+ temporary_value.data.as_bool = CFL_FALSE;
+
+ if (input_value->data.as_double != 0) {
+ temporary_value.data.as_bool = CFL_TRUE;
+ }
+ }
+ else if (output_type == CFL_VARIANT_INT) {
+ temporary_value.data.as_int64 = (int64_t) round(input_value->data.as_double);
+ }
+ else if (output_type == CFL_VARIANT_DOUBLE) {
+ temporary_value.data.as_double = input_value->data.as_int64;
+ }
+ else if (output_type == CFL_VARIANT_ARRAY) {
+ temporary_value.data.as_array = cfl_array_create(1);
+
+ if (temporary_value.data.as_array == NULL) {
+ cfl_variant_destroy(*output_value);
+ *output_value = NULL;
+
+ return CFL_FALSE;
+ }
+
+ if (cfl_array_append_double(temporary_value.data.as_array,
+ input_value->data.as_double) != 0) {
+ cfl_array_destroy(temporary_value.data.as_array);
+
+ cfl_variant_destroy(*output_value);
+ *output_value = NULL;
+
+ return CFL_FALSE;
+ }
+ }
+ else {
+ return CFL_FALSE;
+ }
+ }
+ else if (input_value->type == CFL_VARIANT_KVLIST) {
+ if (output_type == CFL_VARIANT_STRING ||
+ output_type == CFL_VARIANT_BYTES) {
+ temporary_value.data.as_string = cfl_variant_convert_to_json(input_value);
+
+ if (temporary_value.data.as_string == NULL) {
+ return CFL_FALSE;
+ }
+ }
+ else {
+ return CFL_FALSE;
+ }
+ }
+ else if (input_value->type == CFL_VARIANT_ARRAY) {
+ if (output_type == CFL_VARIANT_STRING ||
+ output_type == CFL_VARIANT_BYTES) {
+ temporary_value.data.as_string = cfl_variant_convert_to_json(input_value);
+
+ if (temporary_value.data.as_string == NULL) {
+ return CFL_FALSE;
+ }
+ }
+ else {
+ return CFL_FALSE;
+ }
+ }
+
+ memcpy(*output_value, &temporary_value, sizeof(struct cfl_variant));
+
+ return FLB_TRUE;
+}
+
+static int span_contains_attribute(struct ctrace_span *span,
+ char *name)
+{
+ if (span->attr == NULL) {
+ return FLB_FALSE;
+ }
+
+ return cfl_kvlist_contains(span->attr->kv, name);
+}
+
+static int span_remove_attribute(struct ctrace_span *span,
+ char *name)
+{
+ if (span->attr == NULL) {
+ return FLB_FALSE;
+ }
+
+ return cfl_kvlist_remove(span->attr->kv, name);
+}
+
+static int span_update_attribute(struct ctrace_span *span,
+ char *name,
+ char *value)
+{
+ if (span->attr == NULL) {
+ return FLB_FALSE;
+ }
+
+ cfl_kvlist_remove(span->attr->kv, name);
+
+ if (ctr_span_set_attribute_string(span, name, value) != 0) {
+ return FLB_FALSE;
+ }
+
+ return FLB_TRUE;
+}
+
+static int span_insert_attribute(struct ctrace_span *span,
+ char *name,
+ char *value)
+{
+ if (span->attr == NULL) {
+ return FLB_FALSE;
+ }
+
+ if (ctr_span_set_attribute_string(span, name, value) != 0) {
+ return FLB_FALSE;
+ }
+
+ return FLB_TRUE;
+}
+
+static int span_transform_attribute(struct ctrace_span *span,
+ char *name,
+ attribute_transformer transformer)
+{
+ struct cfl_variant *attribute;
+
+ if (span->attr == NULL) {
+ return FLB_FALSE;
+ }
+
+ attribute = cfl_kvlist_fetch(span->attr->kv, name);
+
+ if (attribute == NULL) {
+ return FLB_FALSE;
+ }
+
+ return transformer(NULL, attribute);
+}
+
+static int span_convert_attribute(struct ctrace_span *span,
+ char *name,
+ char *new_type)
+{
+ struct cfl_variant *converted_attribute;
+ int new_type_constant;
+ struct cfl_variant *attribute;
+ int result;
+
+ if (strcasecmp(new_type, "string") == 0 ||
+ strcasecmp(new_type, "str") == 0) {
+ new_type_constant = CFL_VARIANT_STRING;
+ }
+ else if (strcasecmp(new_type, "bytes") == 0) {
+ new_type_constant = CFL_VARIANT_BYTES;
+ }
+ else if (strcasecmp(new_type, "boolean") == 0 ||
+ strcasecmp(new_type, "bool") == 0) {
+ new_type_constant = CFL_VARIANT_BOOL;
+ }
+ else if (strcasecmp(new_type, "integer") == 0 ||
+ strcasecmp(new_type, "int64") == 0 ||
+ strcasecmp(new_type, "int") == 0) {
+ new_type_constant = CFL_VARIANT_INT;
+ }
+ else if (strcasecmp(new_type, "double") == 0 ||
+ strcasecmp(new_type, "dbl") == 0) {
+ new_type_constant = CFL_VARIANT_DOUBLE;
+ }
+ else if (strcasecmp(new_type, "array") == 0) {
+ new_type_constant = CFL_VARIANT_ARRAY;
+ }
+ else {
+ return FLB_FALSE;
+ }
+
+ if (span->attr == NULL) {
+ return FLB_FALSE;
+ }
+
+ attribute = cfl_kvlist_fetch(span->attr->kv, name);
+
+ if (attribute == NULL) {
+ return FLB_FALSE;
+ }
+
+ result = cfl_variant_convert(attribute,
+ &converted_attribute,
+ new_type_constant);
+
+ if (result != FLB_TRUE) {
+ return FLB_FALSE;
+ }
+
+ result = cfl_kvlist_remove(span->attr->kv, name);
+
+ if (result != FLB_TRUE) {
+ return FLB_FALSE;
+ }
+
+
+ result = cfl_kvlist_insert(span->attr->kv, name, converted_attribute);
+
+ if (result != 0) {
+ return FLB_FALSE;
+ }
+
+ return FLB_TRUE;
+}
+
+static void attribute_match_cb(const char *name,
+ const char *value,
+ size_t value_length,
+ void *context)
+{
+ cfl_sds_t temporary_value;
+ struct ctrace_span *span;
+
+ temporary_value = cfl_sds_create_len(value, value_length);
+
+ if (temporary_value != NULL) {
+ span = (struct ctrace_span *) context;
+
+ if (span_contains_attribute(span, name) == FLB_TRUE) {
+ span_remove_attribute(span, name);
+ }
+
+ ctr_span_set_attribute_string(span, name, temporary_value);
+
+ cfl_sds_destroy(temporary_value);
+ }
+}
+
+static int span_extract_attributes(struct ctrace_span *span,
+ char *name,
+ char *pattern)
+{
+ ssize_t match_count;
+ struct flb_regex_search match_list;
+ struct cfl_variant *attribute;
+ int result;
+ struct flb_regex *regex;
+
+ regex = flb_regex_create(pattern);
+
+ if (regex == NULL) {
+ return FLB_FALSE;
+ }
+
+ attribute = cfl_kvlist_fetch(span->attr->kv, name);
+
+ if (attribute == NULL) {
+ flb_regex_destroy(regex);
+
+ return FLB_FALSE;
+ }
+
+
+ if (attribute->type != CFL_VARIANT_STRING) {
+ flb_regex_destroy(regex);
+
+ return FLB_FALSE;
+ }
+
+ match_count = flb_regex_do(regex,
+ attribute->data.as_string,
+ cfl_sds_len(attribute->data.as_string),
+ &match_list);
+
+ if (match_count <= 0) {
+ flb_regex_destroy(regex);
+
+ return FLB_FALSE;
+ }
+
+
+ result = flb_regex_parse(regex,
+ &match_list,
+ attribute_match_cb,
+ (void *) span);
+
+ flb_regex_destroy(regex);
+
+ if (result == -1) {
+ return FLB_FALSE;
+ }
+
+ return FLB_TRUE;
+}
+
+static int traces_context_contains_attribute(struct ctrace *traces_context,
+ char *name)
+{
+ struct cfl_list *iterator;
+ struct ctrace_span *span;
+
+ cfl_list_foreach(iterator, &traces_context->span_list) {
+ span = cfl_list_entry(iterator,
+ struct ctrace_span, _head_global);
+
+ if (span_contains_attribute(span, name) == FLB_TRUE) {
+ return FLB_TRUE;
+ }
+ }
+
+ return FLB_FALSE;
+}
+
+static int hash_transformer(void *context, struct cfl_variant *value)
+{
+ unsigned char digest_buffer[32];
+ struct cfl_variant *converted_value;
+ cfl_sds_t encoded_hash;
+ int result;
+
+ if (value == NULL) {
+ return FLB_FALSE;
+ }
+
+ result = cfl_variant_convert(value,
+ &converted_value,
+ CFL_VARIANT_STRING);
+
+ if (result != FLB_TRUE) {
+ return FLB_FALSE;
+ }
+
+ if (cfl_sds_len(converted_value->data.as_string) == 0) {
+ cfl_variant_destroy(converted_value);
+
+ return FLB_TRUE;
+ }
+
+ result = flb_hash_simple(FLB_HASH_SHA256,
+ (unsigned char *) converted_value->data.as_string,
+ cfl_sds_len(converted_value->data.as_string),
+ digest_buffer,
+ sizeof(digest_buffer));
+
+ if (result != FLB_CRYPTO_SUCCESS) {
+ cfl_variant_destroy(converted_value);
+
+ return FLB_FALSE;
+ }
+
+ result = hex_encode(digest_buffer,
+ sizeof(digest_buffer),
+ &converted_value->data.as_string);
+
+ if (result != FLB_TRUE) {
+ cfl_variant_destroy(converted_value);
+
+ return FLB_FALSE;
+ }
+
+ encoded_hash = cfl_sds_create(converted_value->data.as_string);
+
+ if (encoded_hash == NULL) {
+ cfl_variant_destroy(converted_value);
+
+ return FLB_FALSE;
+ }
+
+ if (value->type == CFL_VARIANT_STRING ||
+ value->type == CFL_VARIANT_BYTES) {
+ cfl_sds_destroy(value->data.as_string);
+ }
+ else if (value->type == CFL_VARIANT_ARRAY) {
+ cfl_array_destroy(value->data.as_array);
+ }
+ else if (value->type == CFL_VARIANT_KVLIST) {
+ cfl_kvlist_destroy(value->data.as_kvlist);
+ }
+
+ value->type = CFL_VARIANT_STRING;
+ value->data.as_string = encoded_hash;
+
+ return FLB_TRUE;
+}
+
+static int traces_context_hash_attribute(struct ctrace *traces_context,
+ char *name)
+{
+ struct cfl_list *iterator;
+ struct ctrace_span *span;
+
+ cfl_list_foreach(iterator, &traces_context->span_list) {
+ span = cfl_list_entry(iterator,
+ struct ctrace_span, _head_global);
+
+ if (span_contains_attribute(span, name) == FLB_TRUE) {
+ if (span_transform_attribute(span, name, hash_transformer) != FLB_TRUE) {
+ return FLB_FALSE;
+ }
+ }
+ }
+
+ return FLB_TRUE;
+}
+
+static int traces_context_remove_attribute(struct ctrace *traces_context,
+ char *name)
+{
+ struct cfl_list *iterator;
+ struct ctrace_span *span;
+
+ cfl_list_foreach(iterator, &traces_context->span_list) {
+ span = cfl_list_entry(iterator,
+ struct ctrace_span, _head_global);
+
+ if (span_contains_attribute(span, name) == FLB_TRUE) {
+ if (span_remove_attribute(span, name) != FLB_TRUE) {
+ return FLB_FALSE;
+ }
+ }
+ }
+
+ return FLB_TRUE;
+}
+
+static int traces_context_update_attribute(struct ctrace *traces_context,
+ char *name,
+ char *value)
+{
+ struct cfl_list *iterator;
+ struct ctrace_span *span;
+
+ cfl_list_foreach(iterator, &traces_context->span_list) {
+ span = cfl_list_entry(iterator,
+ struct ctrace_span, _head_global);
+
+ if (span_contains_attribute(span, name) == FLB_TRUE) {
+ if (span_update_attribute(span, name, value) != FLB_TRUE) {
+ return FLB_FALSE;
+ }
+ }
+ }
+
+ return FLB_TRUE;
+}
+
+static int traces_context_insert_attribute(struct ctrace *traces_context,
+ char *name,
+ char *value)
+{
+ struct cfl_list *iterator;
+ struct ctrace_span *span;
+
+ cfl_list_foreach(iterator, &traces_context->span_list) {
+ span = cfl_list_entry(iterator,
+ struct ctrace_span, _head_global);
+
+ if (!span_contains_attribute(span, name) == FLB_TRUE) {
+ if (span_insert_attribute(span, name, value) != FLB_TRUE) {
+ return FLB_FALSE;
+ }
+ }
+ }
+
+ return FLB_TRUE;
+}
+
+static int traces_context_upsert_attribute(struct ctrace *traces_context,
+ char *name,
+ char *value)
+{
+ struct cfl_list *iterator;
+ struct ctrace_span *span;
+
+ cfl_list_foreach(iterator, &traces_context->span_list) {
+ span = cfl_list_entry(iterator,
+ struct ctrace_span, _head_global);
+
+ if (span_contains_attribute(span, name) == FLB_TRUE) {
+ if (span_update_attribute(span, name, value) != FLB_TRUE) {
+ return FLB_FALSE;
+ }
+ }
+ else {
+ if (span_insert_attribute(span, name, value) != FLB_TRUE) {
+ return FLB_FALSE;
+ }
+ }
+ }
+
+ return FLB_TRUE;
+}
+
+static int traces_context_convert_attribute(struct ctrace *traces_context,
+ char *name,
+ char *new_type)
+{
+ struct cfl_list *iterator;
+ struct ctrace_span *span;
+
+ cfl_list_foreach(iterator, &traces_context->span_list) {
+ span = cfl_list_entry(iterator,
+ struct ctrace_span, _head_global);
+
+ if (span_contains_attribute(span, name) == FLB_TRUE) {
+ if (span_convert_attribute(span, name, new_type) != FLB_TRUE) {
+ return FLB_FALSE;
+ }
+ }
+ }
+
+ return FLB_TRUE;
+}
+
+static int traces_context_extract_attribute(struct ctrace *traces_context,
+ char *name,
+ char *pattern)
+{
+ struct cfl_list *iterator;
+ struct ctrace_span *span;
+
+ cfl_list_foreach(iterator, &traces_context->span_list) {
+ span = cfl_list_entry(iterator,
+ struct ctrace_span, _head_global);
+
+ if (span_contains_attribute(span, name) == FLB_TRUE) {
+ if (span_extract_attributes(span, name, pattern) != FLB_TRUE) {
+ return FLB_FALSE;
+ }
+ }
+ }
+
+ return FLB_TRUE;
+}
+
+static int delete_attributes(struct ctrace *traces_context,
+ struct mk_list *attributes)
+{
+ struct mk_list *iterator;
+ int result;
+ struct flb_slist_entry *entry;
+
+ mk_list_foreach(iterator, attributes) {
+ entry = mk_list_entry(iterator, struct flb_slist_entry, _head);
+
+ result = traces_context_contains_attribute(traces_context,
+ entry->str);
+
+ if (result == FLB_TRUE) {
+ result = traces_context_remove_attribute(traces_context,
+ entry->str);
+
+ if (result == FLB_FALSE) {
+ return FLB_PROCESSOR_FAILURE;
+ }
+ }
+ }
+
+ return FLB_PROCESSOR_SUCCESS;
+}
+
+static int update_attributes(struct ctrace *traces_context,
+ struct cfl_list *attributes)
+{
+ struct cfl_list *iterator;
+ int result;
+ struct cfl_kv *pair;
+
+ cfl_list_foreach(iterator, attributes) {
+ pair = cfl_list_entry(iterator, struct cfl_kv, _head);
+
+ result = traces_context_update_attribute(traces_context,
+ pair->key,
+ pair->val);
+
+ if (result == FLB_FALSE) {
+ return FLB_PROCESSOR_FAILURE;
+ }
+ }
+
+ return FLB_PROCESSOR_SUCCESS;
+}
+
+static int upsert_attributes(struct ctrace *traces_context,
+ struct cfl_list *attributes)
+{
+ struct cfl_list *iterator;
+ int result;
+ struct cfl_kv *pair;
+
+ cfl_list_foreach(iterator, attributes) {
+ pair = cfl_list_entry(iterator, struct cfl_kv, _head);
+
+ result = traces_context_upsert_attribute(traces_context,
+ pair->key,
+ pair->val);
+
+ if (result == FLB_FALSE) {
+ return FLB_PROCESSOR_FAILURE;
+ }
+ }
+
+ return FLB_PROCESSOR_SUCCESS;
+}
+
+static int convert_attributes(struct ctrace *traces_context,
+ struct cfl_list *attributes)
+{
+ struct cfl_list *iterator;
+ int result;
+ struct cfl_kv *pair;
+
+ cfl_list_foreach(iterator, attributes) {
+ pair = cfl_list_entry(iterator, struct cfl_kv, _head);
+
+ result = traces_context_convert_attribute(traces_context,
+ pair->key,
+ pair->val);
+
+ if (result == FLB_FALSE) {
+ return FLB_PROCESSOR_FAILURE;
+ }
+ }
+
+ return FLB_PROCESSOR_SUCCESS;
+}
+
+static int extract_attributes(struct ctrace *traces_context,
+ struct cfl_list *attributes)
+{
+ struct cfl_list *iterator;
+ int result;
+ struct cfl_kv *pair;
+
+ cfl_list_foreach(iterator, attributes) {
+ pair = cfl_list_entry(iterator, struct cfl_kv, _head);
+
+ result = traces_context_extract_attribute(traces_context,
+ pair->key,
+ pair->val);
+
+ if (result == FLB_FALSE) {
+ return FLB_PROCESSOR_FAILURE;
+ }
+ }
+
+ return FLB_PROCESSOR_SUCCESS;
+}
+
+static int insert_attributes(struct ctrace *traces_context,
+ struct cfl_list *attributes)
+{
+ struct cfl_list *iterator;
+ int result;
+ struct cfl_kv *pair;
+
+ cfl_list_foreach(iterator, attributes) {
+ pair = cfl_list_entry(iterator, struct cfl_kv, _head);
+
+ result = traces_context_insert_attribute(traces_context,
+ pair->key,
+ pair->val);
+
+ if (result == FLB_FALSE) {
+ return FLB_PROCESSOR_FAILURE;
+ }
+ }
+
+ return FLB_PROCESSOR_SUCCESS;
+}
+
+static int hash_attributes(struct ctrace *traces_context,
+ struct mk_list *attributes)
+{
+ struct mk_list *iterator;
+ int result;
+ struct flb_slist_entry *entry;
+
+ mk_list_foreach(iterator, attributes) {
+ entry = mk_list_entry(iterator, struct flb_slist_entry, _head);
+
+ result = traces_context_contains_attribute(traces_context,
+ entry->str);
+
+ if (result == FLB_TRUE) {
+ result = traces_context_hash_attribute(traces_context,
+ entry->str);
+
+ if (result == FLB_FALSE) {
+ return FLB_PROCESSOR_FAILURE;
+ }
+ }
+ }
+
+ return FLB_PROCESSOR_SUCCESS;
+}
+
+static int cb_process_traces(struct flb_processor_instance *processor_instance,
+ struct ctrace *traces_context,
+ const char *tag,
+ int tag_len)
+{
+ struct internal_processor_context *processor_context;
+ int result;
+
+ processor_context =
+ (struct internal_processor_context *) processor_instance->context;
+
+ result = delete_attributes(traces_context,
+ &processor_context->delete_attributes);
+
+ if (result == FLB_PROCESSOR_SUCCESS) {
+ result = update_attributes(traces_context,
+ &processor_context->update_attributes);
+ }
+
+ if (result == FLB_PROCESSOR_SUCCESS) {
+ result = upsert_attributes(traces_context,
+ &processor_context->upsert_attributes);
+ }
+
+ if (result == FLB_PROCESSOR_SUCCESS) {
+ result = insert_attributes(traces_context,
+ &processor_context->insert_attributes);
+ }
+
+ if (result == FLB_PROCESSOR_SUCCESS) {
+ result = convert_attributes(traces_context,
+ &processor_context->convert_attributes);
+ result = FLB_PROCESSOR_SUCCESS;
+ }
+
+ if (result == FLB_PROCESSOR_SUCCESS) {
+ result = extract_attributes(traces_context,
+ &processor_context->extract_attributes);
+ }
+
+ if (result == FLB_PROCESSOR_SUCCESS) {
+ result = hash_attributes(traces_context,
+ &processor_context->hash_attributes);
+ }
+
+ if (result != FLB_PROCESSOR_SUCCESS) {
+ return FLB_PROCESSOR_FAILURE;
+ }
+
+ return FLB_PROCESSOR_SUCCESS;
+}
+
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_SLIST_1, "update", NULL,
+ FLB_CONFIG_MAP_MULT, FLB_TRUE, offsetof(struct internal_processor_context,
+ update_list),
+ "Updates an attribute. Usage : 'update name value'"
+ },
+ {
+ FLB_CONFIG_MAP_SLIST_1, "insert", NULL,
+ FLB_CONFIG_MAP_MULT, FLB_TRUE, offsetof(struct internal_processor_context,
+ insert_list),
+ "Inserts an attribute. Usage : 'insert name value'"
+ },
+ {
+ FLB_CONFIG_MAP_SLIST_1, "upsert", NULL,
+ FLB_CONFIG_MAP_MULT, FLB_TRUE, offsetof(struct internal_processor_context,
+ upsert_list),
+ "Inserts or updates an attribute. Usage : 'upsert name value'"
+ },
+ {
+ FLB_CONFIG_MAP_SLIST_1, "convert", NULL,
+ FLB_CONFIG_MAP_MULT, FLB_TRUE, offsetof(struct internal_processor_context,
+ convert_list),
+ "Converts an attribute. Usage : 'convert name new_type'"
+ },
+ {
+ FLB_CONFIG_MAP_SLIST_1, "extract", NULL,
+ FLB_CONFIG_MAP_MULT, FLB_TRUE, offsetof(struct internal_processor_context,
+ extract_list),
+ "Extracts regular expression match groups as individual attributes. Usage : 'extract (?P<first_word>[^ ]*) (?P<second_word>[^ ]*)'"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "delete", NULL,
+ FLB_CONFIG_MAP_MULT, FLB_TRUE, offsetof(struct internal_processor_context,
+ delete_list),
+ "Deletes an attribute. Usage : 'delete name'"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "hash", NULL,
+ FLB_CONFIG_MAP_MULT, FLB_TRUE, offsetof(struct internal_processor_context,
+ hash_list),
+ "Replaces an attributes value with its SHA256 hash. Usage : 'hash name'"
+ },
+
+ /* EOF */
+ {0}
+};
+
+struct flb_processor_plugin processor_attributes_plugin = {
+ .name = "attributes",
+ .description = "Modifies metrics attributes",
+ .cb_init = cb_init,
+ .cb_process_logs = NULL,
+ .cb_process_metrics = NULL,
+ .cb_process_traces = cb_process_traces,
+ .cb_exit = cb_exit,
+ .config_map = config_map,
+ .flags = 0
+};
diff --git a/src/fluent-bit/plugins/processor_attributes/variant_utils.h b/src/fluent-bit/plugins/processor_attributes/variant_utils.h
new file mode 100644
index 000000000..7ba376273
--- /dev/null
+++ b/src/fluent-bit/plugins/processor_attributes/variant_utils.h
@@ -0,0 +1,626 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* CMetrics
+ * ========
+ * Copyright 2021-2022 The CMetrics Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef VARIANT_UTILS_H
+#define VARIANT_UTILS_H
+
+#include <mpack/mpack.h>
+
+/* These are the only functions meant for general use,
+ * the reason why the kvlist packing and unpacking
+ * functions are exposed is the internal and external
+ * metadata kvlists in the cmetrics context are not
+ * contained by a variant instance.
+ *
+ * Result :
+ * Upon success all of these return 0, otherwise they will
+ * raise the innermost error code which should be treated
+ * as an opaque value.
+ *
+ * Notes :
+ * When decoding -1 means the check after mpack_read_tag
+ * failed and -2 means the type was not the one expected
+ */
+
+static inline int pack_cfl_variant(mpack_writer_t *writer,
+ struct cfl_variant *value);
+
+static inline int pack_cfl_variant_kvlist(mpack_writer_t *writer,
+ struct cfl_kvlist *kvlist);
+
+static inline int unpack_cfl_variant(mpack_reader_t *reader,
+ struct cfl_variant **value);
+
+static inline int unpack_cfl_kvlist(mpack_reader_t *reader,
+ struct cfl_kvlist **result_kvlist);
+
+/* Packers */
+static inline int pack_cfl_variant_string(mpack_writer_t *writer,
+ char *value)
+{
+ mpack_write_cstr(writer, value);
+
+ return 0;
+}
+
+static inline int pack_cfl_variant_binary(mpack_writer_t *writer,
+ char *value,
+ size_t length)
+{
+ mpack_write_bin(writer, value, length);
+
+ return 0;
+}
+
+static inline int pack_cfl_variant_boolean(mpack_writer_t *writer,
+ unsigned int value)
+{
+ mpack_write_bool(writer, value);
+
+ return 0;
+}
+
+static inline int pack_cfl_variant_int64(mpack_writer_t *writer,
+ int64_t value)
+{
+ mpack_write_int(writer, value);
+
+ return 0;
+}
+
+static inline int pack_cfl_variant_double(mpack_writer_t *writer,
+ double value)
+{
+ mpack_write_double(writer, value);
+
+ return 0;
+}
+
+static inline int pack_cfl_variant_array(mpack_writer_t *writer,
+ struct cfl_array *array)
+{
+ size_t entry_count;
+ struct cfl_variant *entry_value;
+ int result;
+ size_t index;
+
+ entry_count = array->entry_count;
+
+ mpack_start_array(writer, entry_count);
+
+ for (index = 0 ; index < entry_count ; index++) {
+ entry_value = cfl_array_fetch_by_index(array, index);
+
+ if (entry_value == NULL) {
+ return -1;
+ }
+
+ result = pack_cfl_variant(writer, entry_value);
+
+ if (result != 0) {
+ return result;
+ }
+ }
+
+ mpack_finish_array(writer);
+
+ return 0;
+}
+
+static inline int pack_cfl_variant_kvlist(mpack_writer_t *writer,
+ struct cfl_kvlist *kvlist) {
+ size_t entry_count;
+ struct cfl_list *iterator;
+ struct cfl_kvpair *kvpair;
+ int result;
+
+ entry_count = cfl_kvlist_count(kvlist);
+
+ mpack_start_map(writer, entry_count);
+
+ cfl_list_foreach(iterator, &kvlist->list) {
+ kvpair = cfl_list_entry(iterator, struct cfl_kvpair, _head);
+
+ mpack_write_cstr(writer, kvpair->key);
+
+ result = pack_cfl_variant(writer, kvpair->val);
+
+ if (result != 0) {
+ return result;
+ }
+ }
+
+ mpack_finish_map(writer);
+
+ return 0;
+}
+
+static inline int pack_cfl_variant(mpack_writer_t *writer,
+ struct cfl_variant *value)
+{
+ int result;
+
+ if (value->type == CFL_VARIANT_STRING) {
+ result = pack_cfl_variant_string(writer, value->data.as_string);
+ }
+ else if (value->type == CFL_VARIANT_BOOL) {
+ result = pack_cfl_variant_boolean(writer, value->data.as_bool);
+ }
+ else if (value->type == CFL_VARIANT_INT) {
+ result = pack_cfl_variant_int64(writer, value->data.as_int64);
+ }
+ else if (value->type == CFL_VARIANT_DOUBLE) {
+ result = pack_cfl_variant_double(writer, value->data.as_double);
+ }
+ else if (value->type == CFL_VARIANT_ARRAY) {
+ result = pack_cfl_variant_array(writer, value->data.as_array);
+ }
+ else if (value->type == CFL_VARIANT_KVLIST) {
+ result = pack_cfl_variant_kvlist(writer, value->data.as_kvlist);
+ }
+ else if (value->type == CFL_VARIANT_BYTES) {
+ result = pack_cfl_variant_binary(writer,
+ value->data.as_bytes,
+ cfl_sds_len(value->data.as_bytes));
+ }
+ else if (value->type == CFL_VARIANT_REFERENCE) {
+ result = pack_cfl_variant_string(writer, value->data.as_string);
+ }
+ else {
+ result = -1;
+ }
+
+ return result;
+}
+
+/* Unpackers */
+
+static inline int unpack_cfl_variant_read_tag(mpack_reader_t *reader,
+ mpack_tag_t *tag,
+ mpack_type_t expected_type)
+{
+ *tag = mpack_read_tag(reader);
+
+ if (mpack_ok != mpack_reader_error(reader)) {
+ return -1;
+ }
+
+ if (mpack_tag_type(tag) != expected_type) {
+ return -2;
+ }
+
+ return 0;
+}
+
+static inline int unpack_cfl_array(mpack_reader_t *reader,
+ struct cfl_array **result_array)
+{
+ struct cfl_array *internal_array;
+ size_t entry_count;
+ struct cfl_variant *entry_value;
+ int result;
+ size_t index;
+ mpack_tag_t tag;
+
+ result = unpack_cfl_variant_read_tag(reader, &tag, mpack_type_array);
+
+ if (result != 0) {
+ return result;
+ }
+
+ entry_count = mpack_tag_array_count(&tag);
+
+ internal_array = cfl_array_create(entry_count);
+
+ if (internal_array == NULL) {
+ return -3;
+ }
+
+ for (index = 0 ; index < entry_count ; index++) {
+ result = unpack_cfl_variant(reader, &entry_value);
+
+ if (result != 0) {
+ cfl_array_destroy(internal_array);
+
+ return -4;
+ }
+
+ result = cfl_array_append(internal_array, entry_value);
+
+ if (result != 0) {
+ cfl_array_destroy(internal_array);
+
+ return -5;
+ }
+ }
+
+ mpack_done_array(reader);
+
+ if (mpack_reader_error(reader) != mpack_ok) {
+ cfl_array_destroy(internal_array);
+
+ return -6;
+ }
+
+ *result_array = internal_array;
+
+ return 0;
+}
+
+static inline int unpack_cfl_kvlist(mpack_reader_t *reader,
+ struct cfl_kvlist **result_kvlist)
+{
+ struct cfl_kvlist *internal_kvlist;
+ char key_name[256];
+ size_t entry_count;
+ size_t key_length;
+ struct cfl_variant *key_value;
+ mpack_tag_t key_tag;
+ int result;
+ size_t index;
+ mpack_tag_t tag;
+
+ result = unpack_cfl_variant_read_tag(reader, &tag, mpack_type_map);
+
+ if (result != 0) {
+ return result;
+ }
+
+ entry_count = mpack_tag_map_count(&tag);
+
+ internal_kvlist = cfl_kvlist_create();
+
+ if (internal_kvlist == NULL) {
+ return -3;
+ }
+
+ result = 0;
+ key_value = NULL;
+
+ for (index = 0 ; index < entry_count ; index++) {
+ result = unpack_cfl_variant_read_tag(reader, &key_tag, mpack_type_str);
+
+ if (result != 0) {
+ result = -4;
+
+ break;
+ }
+
+ key_length = mpack_tag_str_length(&key_tag);
+
+ if (key_length >= sizeof(key_name)) {
+ result = -5;
+
+ break;
+ }
+
+ mpack_read_cstr(reader, key_name, sizeof(key_name), key_length);
+
+ key_name[key_length] = '\0';
+
+ mpack_done_str(reader);
+
+ if (mpack_ok != mpack_reader_error(reader)) {
+ result = -6;
+
+ break;
+ }
+
+ result = unpack_cfl_variant(reader, &key_value);
+
+ if (result != 0) {
+ result = -7;
+
+ break;
+ }
+
+ result = cfl_kvlist_insert(internal_kvlist, key_name, key_value);
+
+ if (result != 0) {
+ result = -8;
+
+ break;
+ }
+
+ key_value = NULL;
+ }
+
+ mpack_done_map(reader);
+
+ if (mpack_reader_error(reader) != mpack_ok) {
+ result = -9;
+ }
+
+ if (result != 0) {
+ cfl_kvlist_destroy(internal_kvlist);
+
+ if (key_value != NULL) {
+ cfl_variant_destroy(key_value);
+ }
+ }
+ else {
+ *result_kvlist = internal_kvlist;
+ }
+
+ return result;
+}
+
+static inline int unpack_cfl_variant_string(mpack_reader_t *reader,
+ struct cfl_variant **value)
+{
+ size_t value_length;
+ char *value_data;
+ int result;
+ mpack_tag_t tag;
+
+ result = unpack_cfl_variant_read_tag(reader, &tag, mpack_type_str);
+
+ if (result != 0) {
+ return result;
+ }
+
+ value_length = mpack_tag_str_length(&tag);
+
+ value_data = cfl_sds_create_size(value_length + 1);
+
+ if (value_data == NULL) {
+ return -3;
+ }
+
+ cfl_sds_set_len(value_data, value_length);
+
+ mpack_read_cstr(reader, value_data, value_length + 1, value_length);
+
+ mpack_done_str(reader);
+
+ if (mpack_reader_error(reader) != mpack_ok) {
+ cfl_sds_destroy(value_data);
+
+ return -4;
+ }
+
+ *value = cfl_variant_create_from_reference(value_data);
+
+ if (*value == NULL) {
+ return -5;
+ }
+
+ (*value)->type = CFL_VARIANT_STRING;
+
+ return 0;
+}
+
+static inline int unpack_cfl_variant_binary(mpack_reader_t *reader,
+ struct cfl_variant **value)
+{
+ size_t value_length;
+ char *value_data;
+ int result;
+ mpack_tag_t tag;
+
+ result = unpack_cfl_variant_read_tag(reader, &tag, mpack_type_bin);
+
+ if (result != 0) {
+ return result;
+ }
+
+ value_length = mpack_tag_bin_length(&tag);
+
+ value_data = cfl_sds_create_size(value_length);
+
+ if (value_data == NULL) {
+ return -3;
+ }
+
+ cfl_sds_set_len(value_data, value_length);
+
+ mpack_read_bytes(reader, value_data, value_length);
+
+ mpack_done_bin(reader);
+
+ if (mpack_reader_error(reader) != mpack_ok) {
+ cfl_sds_destroy(value_data);
+
+ return -4;
+ }
+
+ *value = cfl_variant_create_from_reference(value_data);
+
+ if (*value == NULL) {
+ return -5;
+ }
+
+ (*value)->type = CFL_VARIANT_BYTES;
+
+ return 0;
+}
+
+static inline int unpack_cfl_variant_boolean(mpack_reader_t *reader,
+ struct cfl_variant **value)
+{
+ int result;
+ mpack_tag_t tag;
+
+ result = unpack_cfl_variant_read_tag(reader, &tag, mpack_type_bool);
+
+ if (result != 0) {
+ return result;
+ }
+
+ *value = cfl_variant_create_from_bool((unsigned int) mpack_tag_bool_value(&tag));
+
+ if (*value == NULL) {
+ return -3;
+ }
+
+ return 0;
+}
+
+static inline int unpack_cfl_variant_uint64(mpack_reader_t *reader,
+ struct cfl_variant **value)
+{
+ int result;
+ mpack_tag_t tag;
+
+ result = unpack_cfl_variant_read_tag(reader, &tag, mpack_type_uint);
+
+ if (result != 0) {
+ return result;
+ }
+
+ *value = cfl_variant_create_from_int64((int64_t) mpack_tag_uint_value(&tag));
+
+ if (*value == NULL) {
+ return -3;
+ }
+
+ return 0;
+}
+
+static inline int unpack_cfl_variant_int64(mpack_reader_t *reader,
+ struct cfl_variant **value)
+{
+ int result;
+ mpack_tag_t tag;
+
+ result = unpack_cfl_variant_read_tag(reader, &tag, mpack_type_int);
+
+ if (result != 0) {
+ return result;
+ }
+
+ *value = cfl_variant_create_from_int64((int64_t) mpack_tag_int_value(&tag));
+
+ if (*value == NULL) {
+ return -3;
+ }
+
+ return 0;
+}
+
+static inline int unpack_cfl_variant_double(mpack_reader_t *reader,
+ struct cfl_variant **value)
+{
+ int result;
+ mpack_tag_t tag;
+
+ result = unpack_cfl_variant_read_tag(reader, &tag, mpack_type_double);
+
+ if (result != 0) {
+ return result;
+ }
+
+ *value = cfl_variant_create_from_double(mpack_tag_double_value(&tag));
+
+ if (*value == NULL) {
+ return -3;
+ }
+
+ return 0;
+}
+
+static inline int unpack_cfl_variant_array(mpack_reader_t *reader,
+ struct cfl_variant **value)
+{
+ struct cfl_array *unpacked_array;
+ int result;
+
+ result = unpack_cfl_array(reader, &unpacked_array);
+
+ if (result != 0) {
+ return result;
+ }
+
+ *value = cfl_variant_create_from_array(unpacked_array);
+
+ if (*value == NULL) {
+ return -3;
+ }
+
+ return 0;
+}
+
+static inline int unpack_cfl_variant_kvlist(mpack_reader_t *reader,
+ struct cfl_variant **value)
+{
+ struct cfl_kvlist *unpacked_kvlist;
+ int result;
+
+ result = unpack_cfl_kvlist(reader, &unpacked_kvlist);
+
+ if (result != 0) {
+ return result;
+ }
+
+ *value = cfl_variant_create_from_kvlist(unpacked_kvlist);
+
+ if (*value == NULL) {
+ return -3;
+ }
+
+ return 0;
+}
+
+static inline int unpack_cfl_variant(mpack_reader_t *reader,
+ struct cfl_variant **value)
+{
+ mpack_type_t value_type;
+ int result;
+ mpack_tag_t tag;
+
+ tag = mpack_peek_tag(reader);
+
+ if (mpack_ok != mpack_reader_error(reader)) {
+ return -1;
+ }
+
+ value_type = mpack_tag_type(&tag);
+
+ if (value_type == mpack_type_str) {
+ result = unpack_cfl_variant_string(reader, value);
+ }
+ else if (value_type == mpack_type_str) {
+ result = unpack_cfl_variant_boolean(reader, value);
+ }
+ else if (value_type == mpack_type_int) {
+ result = unpack_cfl_variant_int64(reader, value);
+ }
+ else if (value_type == mpack_type_uint) {
+ result = unpack_cfl_variant_uint64(reader, value);
+ }
+ else if (value_type == mpack_type_double) {
+ result = unpack_cfl_variant_double(reader, value);
+ }
+ else if (value_type == mpack_type_array) {
+ result = unpack_cfl_variant_array(reader, value);
+ }
+ else if (value_type == mpack_type_map) {
+ result = unpack_cfl_variant_kvlist(reader, value);
+ }
+ else if (value_type == mpack_type_bin) {
+ result = unpack_cfl_variant_binary(reader, value);
+ }
+ else {
+ result = -1;
+ }
+
+ return result;
+}
+
+#endif
diff --git a/src/fluent-bit/plugins/processor_labels/CMakeLists.txt b/src/fluent-bit/plugins/processor_labels/CMakeLists.txt
new file mode 100644
index 000000000..93adb1190
--- /dev/null
+++ b/src/fluent-bit/plugins/processor_labels/CMakeLists.txt
@@ -0,0 +1,4 @@
+set(src
+ labels.c)
+
+FLB_PLUGIN(processor_labels "${src}" "")
diff --git a/src/fluent-bit/plugins/processor_labels/labels.c b/src/fluent-bit/plugins/processor_labels/labels.c
new file mode 100644
index 000000000..2caaadc31
--- /dev/null
+++ b/src/fluent-bit/plugins/processor_labels/labels.c
@@ -0,0 +1,1784 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+
+/* Fluent Bit
+ * ==========
+ * Copyright (C) 2015-2022 The Fluent Bit Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdio.h>
+
+#include <fluent-bit/flb_filter.h>
+#include <fluent-bit/flb_processor_plugin.h>
+#include <fluent-bit/flb_utils.h>
+#include <fluent-bit/flb_time.h>
+#include <fluent-bit/flb_hash.h>
+#include <fluent-bit/flb_pack.h>
+#include <fluent-bit/flb_processor.h>
+#include <fluent-bit/flb_log_event_decoder.h>
+#include <fluent-bit/flb_log_event_encoder.h>
+
+#include <cmetrics/cmetrics.h>
+#include <cmetrics/cmt_histogram.h>
+#include <cmetrics/cmt_summary.h>
+#include <cmetrics/cmt_untyped.h>
+#include <cmetrics/cmt_counter.h>
+#include <cmetrics/cmt_gauge.h>
+#include <cmetrics/cmt_map.h>
+
+#include <cfl/cfl.h>
+
+#define PROMOTE_STATIC_METRICS_ON_LABEL_INSERT
+
+typedef int (*label_transformer)(struct cmt_metric *, cfl_sds_t *value);
+
+struct internal_processor_context {
+ struct mk_list *update_list;
+ struct mk_list *insert_list;
+ struct mk_list *upsert_list;
+ struct mk_list *delete_list;
+ struct mk_list *hash_list;
+
+ /* internal labels ready to append */
+ struct cfl_list update_labels;
+ struct cfl_list insert_labels;
+ struct cfl_list upsert_labels;
+ struct mk_list delete_labels;
+ struct mk_list hash_labels;
+
+ struct flb_processor_instance *instance;
+ struct flb_config *config;
+};
+
+
+/*
+ * CMETRICS
+ */
+
+static void cmt_label_destroy(struct cmt_label *label)
+{
+ if (label != NULL) {
+ if (!cfl_list_entry_is_orphan(&label->_head)) {
+ cfl_list_del(&label->_head);
+ }
+
+ if (label->key != NULL) {
+ cfl_sds_destroy(label->key);
+ }
+
+ if (label->val != NULL) {
+ cfl_sds_destroy(label->val);
+ }
+
+ free(label);
+ }
+}
+
+/* we can't use flb_* memory functions here because this will
+ * be released by cmetrics using the standard allocator.
+ */
+
+static struct cmt_map_label *cmt_map_label_create(char *name)
+{
+ struct cmt_map_label *label;
+
+ label = calloc(1, sizeof(struct cmt_map_label));
+
+ if (label != NULL) {
+ label->name = cfl_sds_create(name);
+
+ if (label->name == NULL) {
+ free(label);
+
+ label = NULL;
+ }
+
+ }
+
+ return label;
+}
+
+static void cmt_map_label_destroy(struct cmt_map_label *label)
+{
+ if (label != NULL) {
+ if (!cfl_list_entry_is_orphan(&label->_head)) {
+ cfl_list_del(&label->_head);
+ }
+
+ if (label->name != NULL) {
+ cfl_sds_destroy(label->name);
+ }
+
+ free(label);
+ }
+}
+
+static struct cmt_metric *map_metric_create(uint64_t hash,
+ int labels_count, char **labels_val)
+{
+ int i;
+ char *name;
+ struct cmt_metric *metric;
+ struct cmt_map_label *label;
+
+ metric = calloc(1, sizeof(struct cmt_metric));
+ if (!metric) {
+ cmt_errno();
+ return NULL;
+ }
+ cfl_list_init(&metric->labels);
+ metric->val = 0.0;
+ metric->hash = hash;
+
+ for (i = 0; i < labels_count; i++) {
+ label = malloc(sizeof(struct cmt_map_label));
+ if (!label) {
+ cmt_errno();
+ goto error;
+ }
+
+ name = labels_val[i];
+ label->name = cfl_sds_create(name);
+ if (!label->name) {
+ cmt_errno();
+ free(label);
+ goto error;
+ }
+ cfl_list_add(&label->_head, &metric->labels);
+ }
+
+ return metric;
+
+ error:
+ free(metric);
+ return NULL;
+}
+
+static void map_metric_destroy(struct cmt_metric *metric)
+{
+ struct cfl_list *tmp;
+ struct cfl_list *head;
+ struct cmt_map_label *label;
+
+ cfl_list_foreach_safe(head, tmp, &metric->labels) {
+ label = cfl_list_entry(head, struct cmt_map_label, _head);
+ cfl_sds_destroy(label->name);
+ cfl_list_del(&label->_head);
+ free(label);
+ }
+
+ if (metric->hist_buckets) {
+ free(metric->hist_buckets);
+ }
+ if (metric->sum_quantiles) {
+ free(metric->sum_quantiles);
+ }
+
+ cfl_list_del(&metric->_head);
+ free(metric);
+}
+
+
+/*
+ * LOCAL
+ */
+static int hex_encode(unsigned char *input_buffer,
+ size_t input_length,
+ cfl_sds_t *output_buffer)
+{
+ const char hex[] = "0123456789abcdef";
+ cfl_sds_t result;
+ size_t index;
+
+ if (cfl_sds_alloc(*output_buffer) <= (input_length * 2)) {
+ result = cfl_sds_increase(*output_buffer,
+ (input_length * 2) -
+ cfl_sds_alloc(*output_buffer));
+
+ if (result == NULL) {
+ return FLB_FALSE;
+ }
+
+ *output_buffer = result;
+ }
+
+ for (index = 0; index < input_length; index++) {
+ (*output_buffer)[index * 2 + 0] = hex[(input_buffer[index] >> 4) & 0xF];
+ (*output_buffer)[index * 2 + 1] = hex[(input_buffer[index] >> 0) & 0xF];
+ }
+
+ cfl_sds_set_len(*output_buffer, input_length * 2);
+
+ (*output_buffer)[index * 2] = '\0';
+
+ return FLB_TRUE;
+}
+
+static int process_label_modification_list_setting(
+ struct flb_processor_instance *plugin_instance,
+ const char *setting_name,
+ struct mk_list *source_list,
+ struct mk_list *destination_list)
+{
+ struct flb_config_map_val *source_entry;
+ struct mk_list *iterator;
+ int result;
+
+ if (source_list == NULL ||
+ mk_list_is_empty(source_list) == 0) {
+
+ return 0;
+ }
+
+ flb_config_map_foreach(iterator, source_entry, source_list) {
+ result = flb_slist_add(destination_list, source_entry->val.str);
+
+ if (result != 0) {
+ flb_plg_error(plugin_instance,
+ "could not append label name %s\n",
+ source_entry->val.str);
+
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int process_label_modification_kvlist_setting(
+ struct flb_processor_instance *plugin_instance,
+ const char *setting_name,
+ struct mk_list *source_list,
+ struct cfl_list *destination_list)
+{
+ struct cfl_kv *processed_pair;
+ struct flb_config_map_val *source_entry;
+ struct mk_list *iterator;
+ struct flb_slist_entry *value;
+ struct flb_slist_entry *key;
+
+ if (source_list == NULL ||
+ mk_list_is_empty(source_list) == 0) {
+
+ return 0;
+ }
+
+ flb_config_map_foreach(iterator, source_entry, source_list) {
+ if (mk_list_size(source_entry->val.list) != 2) {
+ flb_plg_error(plugin_instance,
+ "'%s' expects a key and a value, "
+ "e.g: '%s version 1.8.0'",
+ setting_name, setting_name);
+
+ return -1;
+ }
+
+ key = mk_list_entry_first(source_entry->val.list,
+ struct flb_slist_entry, _head);
+
+ value = mk_list_entry_last(source_entry->val.list,
+ struct flb_slist_entry, _head);
+
+ processed_pair = cfl_kv_item_create(destination_list,
+ key->str,
+ value->str);
+
+ if (processed_pair == NULL) {
+ flb_plg_error(plugin_instance,
+ "could not append label %s=%s\n",
+ key->str,
+ value->str);
+
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static void destroy_context(struct internal_processor_context *context)
+{
+ if (context != NULL) {
+ cfl_kv_release(&context->update_labels);
+ cfl_kv_release(&context->insert_labels);
+ cfl_kv_release(&context->upsert_labels);
+ flb_slist_destroy(&context->delete_labels);
+ flb_slist_destroy(&context->hash_labels);
+
+ flb_free(context);
+ }
+}
+
+static struct internal_processor_context *
+ create_context(struct flb_processor_instance *processor_instance,
+ struct flb_config *config)
+{
+ struct internal_processor_context *context;
+ int result;
+
+ context = flb_calloc(1, sizeof(struct internal_processor_context));
+
+ if (context != NULL) {
+ context->instance = processor_instance;
+ context->config = config;
+
+ cfl_kv_init(&context->update_labels);
+ cfl_kv_init(&context->insert_labels);
+ cfl_kv_init(&context->upsert_labels);
+ flb_slist_create(&context->delete_labels);
+ flb_slist_create(&context->hash_labels);
+
+ result = flb_processor_instance_config_map_set(processor_instance, (void *) context);
+
+ if (result == 0) {
+ result = process_label_modification_kvlist_setting(processor_instance,
+ "update",
+ context->update_list,
+ &context->update_labels);
+ }
+
+ if (result == 0) {
+ result = process_label_modification_kvlist_setting(processor_instance,
+ "insert",
+ context->insert_list,
+ &context->insert_labels);
+ }
+
+ if (result == 0) {
+ result = process_label_modification_kvlist_setting(processor_instance,
+ "upsert",
+ context->upsert_list,
+ &context->upsert_labels);
+ }
+
+ if (result == 0) {
+ result = process_label_modification_list_setting(processor_instance,
+ "delete",
+ context->delete_list,
+ &context->delete_labels);
+ }
+
+ if (result == 0) {
+ result = process_label_modification_list_setting(processor_instance,
+ "hash",
+ context->hash_list,
+ &context->hash_labels);
+ }
+
+ if (result != 0) {
+ destroy_context(context);
+
+ context = NULL;
+ }
+ }
+ else {
+ flb_errno();
+ }
+
+ return context;
+}
+
+static int cb_init(struct flb_processor_instance *processor_instance,
+ void *source_plugin_instance,
+ int source_plugin_type,
+ struct flb_config *config)
+{
+ processor_instance->context = (void *) create_context(
+ processor_instance, config);
+
+ if (processor_instance->context == NULL) {
+ return FLB_PROCESSOR_FAILURE;
+ }
+
+ return FLB_PROCESSOR_SUCCESS;
+}
+
+
+static int cb_exit(struct flb_processor_instance *processor_instance)
+{
+ if (processor_instance != NULL &&
+ processor_instance->context != NULL) {
+ destroy_context(processor_instance->context);
+ }
+
+ return FLB_PROCESSOR_SUCCESS;
+}
+
+static int metrics_context_contains_static_label(struct cmt *metrics_context,
+ char *label_name)
+{
+ struct cfl_list *label_iterator;
+ struct cmt_label *label;
+
+ cfl_list_foreach(label_iterator, &metrics_context->static_labels->list) {
+ label = cfl_list_entry(label_iterator,
+ struct cmt_label, _head);
+
+ if (strcasecmp(label_name, label->key) == 0) {
+ return FLB_TRUE;
+ }
+ }
+
+ return FLB_FALSE;
+}
+
+static int metrics_context_insert_static_label(struct cmt *metrics_context,
+ char *label_name,
+ char *label_value)
+{
+ if (cmt_label_add(metrics_context, label_name, label_value) != 0) {
+ return FLB_FALSE;
+ }
+
+ return FLB_TRUE;
+}
+
+static int metrics_context_update_static_label(struct cmt *metrics_context,
+ char *label_name,
+ char *label_value)
+{
+ struct cfl_list *iterator;
+ cfl_sds_t result;
+ struct cmt_label *label;
+
+ cfl_list_foreach(iterator, &metrics_context->static_labels->list) {
+ label = cfl_list_entry(iterator,
+ struct cmt_label, _head);
+
+ if (strcasecmp(label_name, label->key) == 0) {
+ cfl_sds_set_len(label->val, 0);
+
+ result = cfl_sds_cat(label->val, label_value, strlen(label_value));
+
+ if (result == NULL) {
+ return FLB_FALSE;
+ }
+
+ label->val = result;
+
+ return FLB_TRUE;
+ }
+ }
+
+ return FLB_FALSE;
+}
+
+static int metrics_context_transform_static_label(struct cmt *metrics_context,
+ char *label_name,
+ label_transformer transformer)
+{
+ struct cfl_list *iterator;
+ struct cmt_label *label;
+
+ cfl_list_foreach(iterator, &metrics_context->static_labels->list) {
+ label = cfl_list_entry(iterator,
+ struct cmt_label, _head);
+
+ if (strcasecmp(label_name, label->key) == 0) {
+ return transformer(NULL, &label->val);
+ }
+ }
+
+ return FLB_FALSE;
+}
+
+static int metrics_context_upsert_static_label(struct cmt *metrics_context,
+ char *label_name,
+ char *label_value)
+{
+ int result;
+
+ result = metrics_context_contains_static_label(metrics_context,
+ label_name);
+
+ if (result == FLB_TRUE) {
+ return metrics_context_update_static_label(metrics_context,
+ label_name,
+ label_value);
+ }
+
+ return metrics_context_insert_static_label(metrics_context,
+ label_name,
+ label_value);
+}
+
+static int metrics_context_remove_static_label(struct cmt *metrics_context,
+ char *label_name)
+{
+ struct cfl_list *iterator;
+ struct cmt_label *label;
+
+ cfl_list_foreach(iterator,
+ &metrics_context->static_labels->list) {
+ label = cfl_list_entry(iterator, struct cmt_label, _head);
+
+ if (strcasecmp(label_name, label->key) == 0) {
+ cmt_label_destroy(label);
+
+ return FLB_TRUE;
+ }
+ }
+
+ return FLB_FALSE;
+}
+
+static ssize_t metrics_map_get_label_index(struct cmt_map *map, char *label_name)
+{
+ struct cfl_list *iterator;
+ struct cmt_map_label *label;
+ ssize_t index;
+
+ index = 0;
+
+ cfl_list_foreach(iterator, &map->label_keys) {
+ label = cfl_list_entry(iterator, struct cmt_map_label, _head);
+
+ if (strcasecmp(label_name, label->name) == 0) {
+ return index;
+ }
+
+ index++;
+ }
+
+ return -1;
+}
+
+static ssize_t metrics_map_insert_label_name(struct cmt_map *map, char *label_name)
+{
+ struct cmt_map_label *label;
+ ssize_t index;
+
+ label = cmt_map_label_create(label_name);
+
+ if (label == NULL) {
+ return -1;
+ }
+
+ map->label_count++;
+
+ cfl_list_add(&label->_head, &map->label_keys);
+
+ index = (ssize_t) cfl_list_size(&map->label_keys);
+ index--;
+
+ return index;
+}
+
+static int metrics_map_contains_label(struct cmt_map *map, char *label_name)
+{
+ ssize_t result;
+
+ result = metrics_map_get_label_index(map, label_name);
+
+ if (result != -1) {
+ return FLB_TRUE;
+ }
+
+ return FLB_FALSE;
+}
+
+static int metrics_map_remove_label_name(struct cmt_map *map,
+ size_t label_index)
+{
+ struct cfl_list *iterator;
+ struct cmt_map_label *label;
+ size_t index;
+
+ index = 0;
+
+ cfl_list_foreach(iterator, &map->label_keys) {
+ label = cfl_list_entry(iterator, struct cmt_map_label, _head);
+
+ if (label_index == index) {
+ cmt_map_label_destroy(label);
+
+ return FLB_TRUE;
+ }
+
+ index++;
+ }
+
+ return FLB_FALSE;
+}
+
+int metrics_data_point_remove_label_value(struct cmt_metric *metric,
+ size_t label_index)
+{
+ struct cfl_list *iterator;
+ struct cmt_map_label *label;
+ size_t index;
+
+ index = 0;
+
+ cfl_list_foreach(iterator, &metric->labels) {
+ label = cfl_list_entry(iterator, struct cmt_map_label, _head);
+
+ if (label_index == index) {
+ cmt_map_label_destroy(label);
+
+ return FLB_TRUE;
+ }
+
+ index++;
+ }
+
+ return FLB_FALSE;
+}
+
+int metrics_data_point_transform_label_value(struct cmt_metric *metric,
+ size_t label_index,
+ label_transformer transformer)
+{
+ struct cfl_list *iterator;
+ struct cmt_map_label *label;
+ size_t index;
+
+ index = 0;
+
+ cfl_list_foreach(iterator, &metric->labels) {
+ label = cfl_list_entry(iterator, struct cmt_map_label, _head);
+
+ if (label_index == index) {
+ return transformer(metric, &label->name);
+ }
+
+ index++;
+ }
+
+ return FLB_FALSE;
+}
+
+int metrics_data_point_set_label_value(struct cmt_metric *metric,
+ size_t label_index,
+ char *label_value,
+ int overwrite,
+ int insert)
+{
+ struct cmt_map_label *new_label;
+ struct cfl_list *iterator;
+ cfl_sds_t result;
+ size_t index;
+ struct cmt_map_label *label;
+
+ label = NULL;
+ index = 0;
+
+ cfl_list_foreach(iterator, &metric->labels) {
+ label = cfl_list_entry(iterator, struct cmt_map_label, _head);
+
+ if (label_index == index) {
+ break;
+ }
+
+ index++;
+ }
+
+ if (label_index != index) {
+ return FLB_FALSE;
+ }
+
+ if (insert == FLB_TRUE) {
+ new_label = cmt_map_label_create(label_value);
+
+ if (new_label == NULL) {
+ return FLB_FALSE;
+ }
+
+ if (label != NULL) {
+ cfl_list_add_after(&new_label->_head,
+ &label->_head,
+ &metric->labels);
+ }
+ else {
+ cfl_list_append(&new_label->_head,
+ &metric->labels);
+ }
+ }
+ else {
+ if (label == NULL) {
+ return FLB_FALSE;
+ }
+
+ if (label->name == NULL) {
+ label->name = cfl_sds_create(label_value);
+
+ if (label->name == NULL) {
+ return FLB_FALSE;
+ }
+ }
+ else {
+ if (overwrite == FLB_TRUE ||
+ cfl_sds_len(label->name) == 0) {
+ cfl_sds_set_len(label->name, 0);
+
+ result = cfl_sds_cat(label->name,
+ label_value,
+ strlen(label_value));
+
+ if (result == NULL) {
+ return FLB_FALSE;
+ }
+
+ label->name = result;
+ }
+ }
+ }
+
+ return FLB_TRUE;
+}
+
+
+int metrics_map_convert_static_metric(struct cmt_map *map,
+ size_t label_index,
+ char *label_value)
+{
+ struct cmt_metric *metric;
+ int result;
+ size_t index;
+ cfl_hash_state_t state;
+ uint64_t hash;
+
+ cfl_hash_64bits_reset(&state);
+
+ cfl_hash_64bits_update(&state,
+ map->opts->fqname,
+ cfl_sds_len(map->opts->fqname));
+
+ for (index = 0 ; index < map->label_count ; index++) {
+ if (index != label_index) {
+ cfl_hash_64bits_update(&state,
+ "_NULL_",
+ 6);
+ }
+ else {
+ cfl_hash_64bits_update(&state,
+ label_value,
+ strlen(label_value));
+ }
+ }
+
+ hash = cfl_hash_64bits_digest(&state);
+
+ metric = map_metric_create(hash, 0, NULL);
+
+ if (metric == NULL) {
+ return FLB_FALSE;
+ }
+
+ for (index = 0 ; index < map->label_count ; index++) {
+ if (index != label_index) {
+ result = metrics_data_point_set_label_value(metric,
+ index,
+ "",
+ FLB_TRUE,
+ FLB_TRUE);
+ }
+ else {
+ result = metrics_data_point_set_label_value(metric,
+ index,
+ label_value,
+ FLB_TRUE,
+ FLB_TRUE);
+ }
+
+ if (result != FLB_TRUE) {
+ map_metric_destroy(metric);
+
+ return FLB_FALSE;
+ }
+ }
+
+ metric->val = map->metric.val;
+
+ metric->hist_buckets = map->metric.hist_buckets;
+ metric->hist_count = map->metric.hist_count;
+ metric->hist_sum = map->metric.hist_sum;
+
+ metric->sum_quantiles_set = map->metric.sum_quantiles_set;
+ metric->sum_quantiles = map->metric.sum_quantiles;
+ metric->sum_quantiles_count = map->metric.sum_quantiles_count;
+ metric->sum_count = map->metric.sum_count;
+ metric->sum_sum = map->metric.sum_sum;
+
+ metric->timestamp = map->metric.timestamp;
+
+ map->metric_static_set = 0;
+
+ cfl_list_add(&metric->_head, &map->metrics);
+
+ memset(&map->metric, 0, sizeof(struct cmt_metric));
+
+ return FLB_TRUE;
+}
+
+int metrics_map_remove_label_value(struct cmt_map *map,
+ size_t label_index)
+{
+ struct cfl_list *iterator;
+ struct cmt_metric *metric;
+ int result;
+
+ result = FLB_TRUE;
+
+ cfl_list_foreach(iterator, &map->metrics) {
+ metric = cfl_list_entry(iterator, struct cmt_metric, _head);
+
+ result = metrics_data_point_remove_label_value(metric, label_index);
+
+ if (result == FLB_FALSE) {
+ break;
+ }
+ }
+
+ return result;
+}
+
+int metrics_map_set_label_value(struct cmt_map *map,
+ size_t label_index,
+ char *label_value,
+ int overwrite,
+ int insert)
+{
+ struct cfl_list *iterator;
+ struct cmt_metric *metric;
+ int result;
+
+ result = FLB_TRUE;
+
+ cfl_list_foreach(iterator, &map->metrics) {
+ metric = cfl_list_entry(iterator, struct cmt_metric, _head);
+
+ result = metrics_data_point_set_label_value(metric,
+ label_index,
+ label_value,
+ overwrite,
+ insert);
+
+ if (result == FLB_FALSE) {
+ break;
+ }
+ }
+
+#ifdef PROMOTE_STATIC_METRICS_ON_LABEL_INSERT
+ if (map->metric_static_set == 1) {
+ result = metrics_map_convert_static_metric(map,
+ label_index,
+ label_value);
+
+ if(result == FLB_FALSE) {
+ return FLB_FALSE;
+ }
+ }
+#endif
+
+ return result;
+}
+
+int metrics_map_transform_label_value(struct cmt_map *map,
+ size_t label_index,
+ label_transformer transformer)
+{
+ struct cfl_list *iterator;
+ struct cmt_metric *metric;
+ int result;
+
+ result = FLB_TRUE;
+
+ cfl_list_foreach(iterator, &map->metrics) {
+ metric = cfl_list_entry(iterator, struct cmt_metric, _head);
+
+ result = metrics_data_point_transform_label_value(metric,
+ label_index,
+ transformer);
+
+ if (result == FLB_FALSE) {
+ break;
+ }
+ }
+
+ return result;
+}
+
+int metrics_map_update_label(struct cmt_map *map,
+ char *label_name,
+ char *label_value)
+{
+ ssize_t label_index;
+ int result;
+
+ label_index = metrics_map_get_label_index(map, label_name);
+
+ if (label_index == -1) {
+ return FLB_TRUE;
+ }
+
+ result = metrics_map_set_label_value(map,
+ label_index,
+ label_value,
+ FLB_TRUE,
+ FLB_FALSE);
+
+ if(result == FLB_FALSE) {
+ return FLB_FALSE;
+ }
+
+ return FLB_TRUE;
+}
+
+int metrics_map_transform_label(struct cmt_map *map,
+ char *label_name,
+ label_transformer transformer)
+{
+ ssize_t label_index;
+ int result;
+
+ label_index = metrics_map_get_label_index(map, label_name);
+
+ if (label_index == -1) {
+ return FLB_TRUE;
+ }
+
+ result = metrics_map_transform_label_value(map,
+ label_index,
+ transformer);
+
+ if(result == FLB_FALSE) {
+ return FLB_FALSE;
+ }
+
+ return FLB_TRUE;
+}
+
+int metrics_map_insert_label(struct cmt_map *map,
+ char *label_name,
+ char *label_value)
+{
+ ssize_t label_index;
+ int label_added;
+ int result;
+
+ label_added = FLB_FALSE;
+ label_index = metrics_map_get_label_index(map, label_name);
+
+ if (label_index == -1) {
+ label_index = metrics_map_insert_label_name(map, label_name);
+ label_added = FLB_TRUE;
+ }
+
+ if (label_index == -1) {
+ return FLB_FALSE;
+ }
+
+ result = metrics_map_set_label_value(map,
+ label_index,
+ label_value,
+ FLB_FALSE,
+ label_added);
+
+ if(result == FLB_FALSE) {
+ return FLB_FALSE;
+ }
+
+ return FLB_TRUE;
+}
+
+int metrics_map_upsert_label(struct cmt_map *map,
+ char *label_name,
+ char *label_value)
+{
+ ssize_t label_index;
+ int label_added;
+ int result;
+
+ label_added = FLB_FALSE;
+ label_index = metrics_map_get_label_index(map, label_name);
+
+ if (label_index == -1) {
+ label_index = metrics_map_insert_label_name(map, label_name);
+ label_added = FLB_TRUE;
+ }
+
+ if (label_index == -1) {
+ return FLB_FALSE;
+ }
+
+ result = metrics_map_set_label_value(map,
+ label_index,
+ label_value,
+ FLB_TRUE,
+ label_added);
+
+ if(result == FLB_FALSE) {
+ return FLB_FALSE;
+ }
+
+ return FLB_TRUE;
+}
+
+int metrics_map_remove_label(struct cmt_map *map,
+ char *label_name)
+{
+ ssize_t label_index;
+ int result;
+
+ label_index = metrics_map_get_label_index(map, label_name);
+
+ if (label_index == -1) {
+ return FLB_TRUE;
+ }
+
+ map->label_count--;
+
+ result = metrics_map_remove_label_name(map, label_index);
+
+ if(result == FLB_TRUE) {
+ result = metrics_map_remove_label_value(map, label_index);
+ }
+
+ return result;
+}
+
+static int metrics_context_contains_dynamic_label(struct cmt *metrics_context,
+ char *label_name)
+{
+ struct cfl_list *metric_iterator;
+ struct cmt_histogram *histogram;
+ struct cmt_summary *summary;
+ struct cmt_untyped *untyped;
+ struct cmt_counter *counter;
+ struct cmt_gauge *gauge;
+
+ cfl_list_foreach(metric_iterator, &metrics_context->histograms) {
+ histogram = cfl_list_entry(metric_iterator, struct cmt_histogram, _head);
+
+ if(metrics_map_contains_label(histogram->map, label_name) == FLB_TRUE) {
+ return FLB_TRUE;
+ }
+ }
+
+ cfl_list_foreach(metric_iterator, &metrics_context->summaries) {
+ summary = cfl_list_entry(metric_iterator, struct cmt_summary, _head);
+
+ if(metrics_map_contains_label(summary->map, label_name) == FLB_TRUE) {
+ return FLB_TRUE;
+ }
+ }
+
+ cfl_list_foreach(metric_iterator, &metrics_context->untypeds) {
+ untyped = cfl_list_entry(metric_iterator, struct cmt_untyped, _head);
+
+ if(metrics_map_contains_label(untyped->map, label_name) == FLB_TRUE) {
+ return FLB_TRUE;
+ }
+ }
+
+ cfl_list_foreach(metric_iterator, &metrics_context->counters) {
+ counter = cfl_list_entry(metric_iterator, struct cmt_counter, _head);
+
+ if(metrics_map_contains_label(counter->map, label_name) == FLB_TRUE) {
+ return FLB_TRUE;
+ }
+ }
+
+ cfl_list_foreach(metric_iterator, &metrics_context->gauges) {
+ gauge = cfl_list_entry(metric_iterator, struct cmt_gauge, _head);
+
+ if(metrics_map_contains_label(gauge->map, label_name) == FLB_TRUE) {
+ return FLB_TRUE;
+ }
+ }
+
+ return FLB_FALSE;
+}
+
+static int metrics_context_insert_dynamic_label(struct cmt *metrics_context,
+ char *label_name,
+ char *label_value)
+{
+ struct cfl_list *metric_iterator;
+ struct cmt_histogram *histogram;
+ struct cmt_summary *summary;
+ struct cmt_untyped *untyped;
+ struct cmt_counter *counter;
+ int result;
+ struct cmt_gauge *gauge;
+
+ cfl_list_foreach(metric_iterator, &metrics_context->histograms) {
+ histogram = cfl_list_entry(metric_iterator, struct cmt_histogram, _head);
+
+ result = metrics_map_insert_label(histogram->map,
+ label_name,
+ label_value);
+
+ if (result == FLB_FALSE) {
+ return FLB_FALSE;
+ }
+ }
+
+ cfl_list_foreach(metric_iterator, &metrics_context->summaries) {
+ summary = cfl_list_entry(metric_iterator, struct cmt_summary, _head);
+
+ result = metrics_map_insert_label(summary->map,
+ label_name,
+ label_value);
+
+ if (result == FLB_FALSE) {
+ return FLB_FALSE;
+ }
+ }
+
+ cfl_list_foreach(metric_iterator, &metrics_context->untypeds) {
+ untyped = cfl_list_entry(metric_iterator, struct cmt_untyped, _head);
+
+ result = metrics_map_insert_label(untyped->map,
+ label_name,
+ label_value);
+
+ if (result == FLB_FALSE) {
+ return FLB_FALSE;
+ }
+ }
+
+ cfl_list_foreach(metric_iterator, &metrics_context->counters) {
+ counter = cfl_list_entry(metric_iterator, struct cmt_counter, _head);
+
+ result = metrics_map_insert_label(counter->map,
+ label_name,
+ label_value);
+
+ if (result == FLB_FALSE) {
+ return FLB_FALSE;
+ }
+ }
+
+ cfl_list_foreach(metric_iterator, &metrics_context->gauges) {
+ gauge = cfl_list_entry(metric_iterator, struct cmt_gauge, _head);
+
+ result = metrics_map_insert_label(gauge->map,
+ label_name,
+ label_value);
+
+ if (result == FLB_FALSE) {
+ return FLB_FALSE;
+ }
+ }
+
+ return FLB_TRUE;
+}
+
+static int metrics_context_update_dynamic_label(struct cmt *metrics_context,
+ char *label_name,
+ char *label_value)
+{
+ struct cfl_list *metric_iterator;
+ struct cmt_histogram *histogram;
+ struct cmt_summary *summary;
+ struct cmt_untyped *untyped;
+ struct cmt_counter *counter;
+ int result;
+ struct cmt_gauge *gauge;
+
+ cfl_list_foreach(metric_iterator, &metrics_context->histograms) {
+ histogram = cfl_list_entry(metric_iterator, struct cmt_histogram, _head);
+
+ result = metrics_map_update_label(histogram->map,
+ label_name,
+ label_value);
+
+ if (result == FLB_FALSE) {
+ return FLB_FALSE;
+ }
+ }
+
+ cfl_list_foreach(metric_iterator, &metrics_context->summaries) {
+ summary = cfl_list_entry(metric_iterator, struct cmt_summary, _head);
+
+ result = metrics_map_update_label(summary->map,
+ label_name,
+ label_value);
+
+ if (result == FLB_FALSE) {
+ return FLB_FALSE;
+ }
+ }
+
+ cfl_list_foreach(metric_iterator, &metrics_context->untypeds) {
+ untyped = cfl_list_entry(metric_iterator, struct cmt_untyped, _head);
+
+ result = metrics_map_update_label(untyped->map,
+ label_name,
+ label_value);
+
+ if (result == FLB_FALSE) {
+ return FLB_FALSE;
+ }
+ }
+
+ cfl_list_foreach(metric_iterator, &metrics_context->counters) {
+ counter = cfl_list_entry(metric_iterator, struct cmt_counter, _head);
+
+ result = metrics_map_update_label(counter->map,
+ label_name,
+ label_value);
+
+ if (result == FLB_FALSE) {
+ return FLB_FALSE;
+ }
+ }
+
+ cfl_list_foreach(metric_iterator, &metrics_context->gauges) {
+ gauge = cfl_list_entry(metric_iterator, struct cmt_gauge, _head);
+
+ result = metrics_map_update_label(gauge->map,
+ label_name,
+ label_value);
+
+ if (result == FLB_FALSE) {
+ return FLB_FALSE;
+ }
+ }
+
+ return FLB_TRUE;
+}
+
+static int metrics_context_transform_dynamic_label(struct cmt *metrics_context,
+ char *label_name,
+ label_transformer transformer)
+{
+ struct cfl_list *metric_iterator;
+ struct cmt_histogram *histogram;
+ struct cmt_summary *summary;
+ struct cmt_untyped *untyped;
+ struct cmt_counter *counter;
+ int result;
+ struct cmt_gauge *gauge;
+
+ cfl_list_foreach(metric_iterator, &metrics_context->histograms) {
+ histogram = cfl_list_entry(metric_iterator, struct cmt_histogram, _head);
+
+ result = metrics_map_transform_label(histogram->map,
+ label_name,
+ transformer);
+
+ if (result == FLB_FALSE) {
+ return FLB_FALSE;
+ }
+ }
+
+ cfl_list_foreach(metric_iterator, &metrics_context->summaries) {
+ summary = cfl_list_entry(metric_iterator, struct cmt_summary, _head);
+
+ result = metrics_map_transform_label(summary->map,
+ label_name,
+ transformer);
+
+ if (result == FLB_FALSE) {
+ return FLB_FALSE;
+ }
+ }
+
+ cfl_list_foreach(metric_iterator, &metrics_context->untypeds) {
+ untyped = cfl_list_entry(metric_iterator, struct cmt_untyped, _head);
+
+ result = metrics_map_transform_label(untyped->map,
+ label_name,
+ transformer);
+
+ if (result == FLB_FALSE) {
+ return FLB_FALSE;
+ }
+ }
+
+ cfl_list_foreach(metric_iterator, &metrics_context->counters) {
+ counter = cfl_list_entry(metric_iterator, struct cmt_counter, _head);
+
+ result = metrics_map_transform_label(counter->map,
+ label_name,
+ transformer);
+
+ if (result == FLB_FALSE) {
+ return FLB_FALSE;
+ }
+ }
+
+ cfl_list_foreach(metric_iterator, &metrics_context->gauges) {
+ gauge = cfl_list_entry(metric_iterator, struct cmt_gauge, _head);
+
+ result = metrics_map_transform_label(gauge->map,
+ label_name,
+ transformer);
+
+ if (result == FLB_FALSE) {
+ return FLB_FALSE;
+ }
+ }
+
+ return FLB_TRUE;
+}
+
+static int metrics_context_upsert_dynamic_label(struct cmt *metrics_context,
+ char *label_name,
+ char *label_value)
+{
+ struct cfl_list *metric_iterator;
+ struct cmt_histogram *histogram;
+ struct cmt_summary *summary;
+ struct cmt_untyped *untyped;
+ struct cmt_counter *counter;
+ int result;
+ struct cmt_gauge *gauge;
+
+ cfl_list_foreach(metric_iterator, &metrics_context->histograms) {
+ histogram = cfl_list_entry(metric_iterator, struct cmt_histogram, _head);
+
+ result = metrics_map_upsert_label(histogram->map,
+ label_name,
+ label_value);
+
+ if (result == FLB_FALSE) {
+ return FLB_FALSE;
+ }
+ }
+
+ cfl_list_foreach(metric_iterator, &metrics_context->summaries) {
+ summary = cfl_list_entry(metric_iterator, struct cmt_summary, _head);
+
+ result = metrics_map_upsert_label(summary->map,
+ label_name,
+ label_value);
+
+ if (result == FLB_FALSE) {
+ return FLB_FALSE;
+ }
+ }
+
+ cfl_list_foreach(metric_iterator, &metrics_context->untypeds) {
+ untyped = cfl_list_entry(metric_iterator, struct cmt_untyped, _head);
+
+ result = metrics_map_upsert_label(untyped->map,
+ label_name,
+ label_value);
+
+ if (result == FLB_FALSE) {
+ return FLB_FALSE;
+ }
+ }
+
+ cfl_list_foreach(metric_iterator, &metrics_context->counters) {
+ counter = cfl_list_entry(metric_iterator, struct cmt_counter, _head);
+
+ result = metrics_map_upsert_label(counter->map,
+ label_name,
+ label_value);
+
+ if (result == FLB_FALSE) {
+ return FLB_FALSE;
+ }
+ }
+
+ cfl_list_foreach(metric_iterator, &metrics_context->gauges) {
+ gauge = cfl_list_entry(metric_iterator, struct cmt_gauge, _head);
+
+ result = metrics_map_upsert_label(gauge->map,
+ label_name,
+ label_value);
+
+ if (result == FLB_FALSE) {
+ return FLB_FALSE;
+ }
+ }
+
+ return FLB_TRUE;
+}
+
+static int metrics_context_remove_dynamic_label(struct cmt *metrics_context,
+ char *label_name)
+{
+ struct cfl_list *metric_iterator;
+ struct cmt_histogram *histogram;
+ struct cmt_summary *summary;
+ struct cmt_untyped *untyped;
+ struct cmt_counter *counter;
+ int result;
+ struct cmt_gauge *gauge;
+
+ cfl_list_foreach(metric_iterator, &metrics_context->histograms) {
+ histogram = cfl_list_entry(metric_iterator, struct cmt_histogram, _head);
+
+ result = metrics_map_remove_label(histogram->map,
+ label_name);
+
+ if (result == FLB_FALSE) {
+ return FLB_FALSE;
+ }
+ }
+
+ cfl_list_foreach(metric_iterator, &metrics_context->summaries) {
+ summary = cfl_list_entry(metric_iterator, struct cmt_summary, _head);
+
+ result = metrics_map_remove_label(summary->map,
+ label_name);
+
+ if (result == FLB_FALSE) {
+ return FLB_FALSE;
+ }
+ }
+
+ cfl_list_foreach(metric_iterator, &metrics_context->untypeds) {
+ untyped = cfl_list_entry(metric_iterator, struct cmt_untyped, _head);
+
+ result = metrics_map_remove_label(untyped->map,
+ label_name);
+
+ if (result == FLB_FALSE) {
+ return FLB_FALSE;
+ }
+ }
+
+ cfl_list_foreach(metric_iterator, &metrics_context->counters) {
+ counter = cfl_list_entry(metric_iterator, struct cmt_counter, _head);
+
+ result = metrics_map_remove_label(counter->map,
+ label_name);
+
+ if (result == FLB_FALSE) {
+ return FLB_FALSE;
+ }
+ }
+
+ cfl_list_foreach(metric_iterator, &metrics_context->gauges) {
+ gauge = cfl_list_entry(metric_iterator, struct cmt_gauge, _head);
+
+ result = metrics_map_remove_label(gauge->map,
+ label_name);
+
+ if (result == FLB_FALSE) {
+ return FLB_FALSE;
+ }
+ }
+
+ return FLB_TRUE;
+}
+
+static int update_labels(struct cmt *metrics_context,
+ struct cfl_list *labels)
+{
+ struct cfl_list *iterator;
+ int result;
+ struct cfl_kv *pair;
+
+ cfl_list_foreach(iterator, labels) {
+ pair = cfl_list_entry(iterator, struct cfl_kv, _head);
+
+ result = metrics_context_contains_dynamic_label(metrics_context,
+ pair->key);
+
+ if (result == FLB_TRUE) {
+ result = metrics_context_update_dynamic_label(metrics_context,
+ pair->key,
+ pair->val);
+
+ if (result == FLB_FALSE) {
+ return FLB_FALSE;
+ }
+ }
+
+ result = metrics_context_contains_static_label(metrics_context,
+ pair->key);
+
+ if (result == FLB_TRUE) {
+ result = metrics_context_update_static_label(metrics_context,
+ pair->key,
+ pair->val);
+
+ if (result == FLB_FALSE) {
+ return FLB_FALSE;
+ }
+ }
+ }
+
+ return FLB_PROCESSOR_SUCCESS;
+}
+
+static int insert_labels(struct cmt *metrics_context,
+ struct cfl_list *labels)
+{
+ struct cfl_list *iterator;
+ int result;
+ struct cfl_kv *pair;
+
+ cfl_list_foreach(iterator, labels) {
+ pair = cfl_list_entry(iterator, struct cfl_kv, _head);
+
+ result = metrics_context_contains_dynamic_label(metrics_context,
+ pair->key);
+
+ if (result == FLB_TRUE) {
+ result = metrics_context_insert_dynamic_label(metrics_context,
+ pair->key,
+ pair->val);
+
+ if (result == FLB_FALSE) {
+ return FLB_FALSE;
+ }
+ }
+ else {
+ result = metrics_context_contains_static_label(metrics_context,
+ pair->key);
+
+ if (result == FLB_FALSE) {
+ result = metrics_context_insert_static_label(metrics_context,
+ pair->key,
+ pair->val);
+
+ if (result == FLB_FALSE) {
+ return FLB_FALSE;
+ }
+ }
+ }
+ }
+
+ return FLB_PROCESSOR_SUCCESS;
+}
+
+static int upsert_labels(struct cmt *metrics_context,
+ struct cfl_list *labels)
+{
+ struct cfl_list *iterator;
+ int result;
+ struct cfl_kv *pair;
+
+ cfl_list_foreach(iterator, labels) {
+ pair = cfl_list_entry(iterator, struct cfl_kv, _head);
+
+ result = metrics_context_contains_dynamic_label(metrics_context,
+ pair->key);
+
+ if (result == FLB_TRUE) {
+ result = metrics_context_upsert_dynamic_label(metrics_context,
+ pair->key,
+ pair->val);
+
+ if (result == FLB_FALSE) {
+ return FLB_FALSE;
+ }
+ }
+ else {
+ result = metrics_context_upsert_static_label(metrics_context,
+ pair->key,
+ pair->val);
+
+ if (result == FLB_FALSE) {
+ return FLB_FALSE;
+ }
+ }
+ }
+
+ return FLB_PROCESSOR_SUCCESS;
+}
+
+static int delete_labels(struct cmt *metrics_context,
+ struct mk_list *labels)
+{
+ struct mk_list *iterator;
+ int result;
+ struct flb_slist_entry *entry;
+
+ mk_list_foreach(iterator, labels) {
+ entry = mk_list_entry(iterator, struct flb_slist_entry, _head);
+
+ result = metrics_context_contains_dynamic_label(metrics_context,
+ entry->str);
+
+ if (result == FLB_TRUE) {
+ result = metrics_context_remove_dynamic_label(metrics_context,
+ entry->str);
+
+ if (result == FLB_FALSE) {
+ return FLB_FALSE;
+ }
+ }
+ else {
+ result = metrics_context_contains_static_label(metrics_context,
+ entry->str);
+
+ if (result == FLB_TRUE) {
+ result = metrics_context_remove_static_label(metrics_context,
+ entry->str);
+
+ if (result == FLB_FALSE) {
+ return FLB_FALSE;
+ }
+ }
+ }
+ }
+
+ return FLB_PROCESSOR_SUCCESS;
+}
+
+static int hash_transformer(struct cmt_metric *metric, cfl_sds_t *value)
+{
+ unsigned char digest_buffer[32];
+ int result;
+
+ if (value == NULL) {
+ return FLB_FALSE;
+ }
+
+ if (cfl_sds_len(*value) == 0) {
+ return FLB_TRUE;
+ }
+
+ result = flb_hash_simple(FLB_HASH_SHA256,
+ (unsigned char *) *value,
+ cfl_sds_len(*value),
+ digest_buffer,
+ sizeof(digest_buffer));
+
+ if (result != FLB_CRYPTO_SUCCESS) {
+ return FLB_FALSE;
+ }
+
+ return hex_encode(digest_buffer, sizeof(digest_buffer), value);
+}
+
+static int hash_labels(struct cmt *metrics_context,
+ struct mk_list *labels)
+{
+ struct mk_list *iterator;
+ int result;
+ struct flb_slist_entry *entry;
+
+ mk_list_foreach(iterator, labels) {
+ entry = mk_list_entry(iterator, struct flb_slist_entry, _head);
+
+ result = metrics_context_contains_dynamic_label(metrics_context,
+ entry->str);
+
+ if (result == FLB_TRUE) {
+ result = metrics_context_transform_dynamic_label(metrics_context,
+ entry->str,
+ hash_transformer);
+
+ if (result == FLB_FALSE) {
+ return FLB_FALSE;
+ }
+ }
+ else {
+ result = metrics_context_contains_static_label(metrics_context,
+ entry->str);
+
+ if (result == FLB_TRUE) {
+ result = metrics_context_transform_static_label(metrics_context,
+ entry->str,
+ hash_transformer);
+
+ if (result == FLB_FALSE) {
+ return FLB_FALSE;
+ }
+ }
+ }
+ }
+
+ return FLB_PROCESSOR_SUCCESS;
+}
+
+static int cb_process_metrics(struct flb_processor_instance *processor_instance,
+ struct cmt *metrics_context,
+ const char *tag,
+ int tag_len)
+{
+ struct internal_processor_context *processor_context;
+ int result;
+
+ processor_context =
+ (struct internal_processor_context *) processor_instance->context;
+
+ result = delete_labels(metrics_context,
+ &processor_context->delete_labels);
+
+ if (result == FLB_PROCESSOR_SUCCESS) {
+ result = update_labels(metrics_context,
+ &processor_context->update_labels);
+ }
+
+ if (result == FLB_PROCESSOR_SUCCESS) {
+ result = upsert_labels(metrics_context,
+ &processor_context->upsert_labels);
+ }
+
+ if (result == FLB_PROCESSOR_SUCCESS) {
+ result = insert_labels(metrics_context,
+ &processor_context->insert_labels);
+ }
+
+ if (result == FLB_PROCESSOR_SUCCESS) {
+ result = hash_labels(metrics_context,
+ &processor_context->hash_labels);
+ }
+
+ if (result != FLB_PROCESSOR_SUCCESS) {
+ return FLB_PROCESSOR_FAILURE;
+ }
+
+ return FLB_PROCESSOR_SUCCESS;
+}
+
+static struct flb_config_map config_map[] = {
+ {
+ FLB_CONFIG_MAP_SLIST_1, "update", NULL,
+ FLB_CONFIG_MAP_MULT, FLB_TRUE, offsetof(struct internal_processor_context,
+ update_list),
+ "Updates a label. Usage : 'update label_name value'"
+ },
+ {
+ FLB_CONFIG_MAP_SLIST_1, "insert", NULL,
+ FLB_CONFIG_MAP_MULT, FLB_TRUE, offsetof(struct internal_processor_context,
+ insert_list),
+ "Inserts a label. Usage : 'insert label_name value'"
+ },
+ {
+ FLB_CONFIG_MAP_SLIST_1, "upsert", NULL,
+ FLB_CONFIG_MAP_MULT, FLB_TRUE, offsetof(struct internal_processor_context,
+ upsert_list),
+ "Inserts or updates a label. Usage : 'upsert label_name value'"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "delete", NULL,
+ FLB_CONFIG_MAP_MULT, FLB_TRUE, offsetof(struct internal_processor_context,
+ delete_list),
+ "Deletes a label. Usage : 'delete label_name'"
+ },
+ {
+ FLB_CONFIG_MAP_STR, "hash", NULL,
+ FLB_CONFIG_MAP_MULT, FLB_TRUE, offsetof(struct internal_processor_context,
+ hash_list),
+ "Replaces a labels value with its SHA1 hash. Usage : 'hash label_name'"
+ },
+
+ /* EOF */
+ {0}
+};
+
+struct flb_processor_plugin processor_labels_plugin = {
+ .name = "labels",
+ .description = "Modifies metrics labels",
+ .cb_init = cb_init,
+ .cb_process_logs = NULL,
+ .cb_process_metrics = cb_process_metrics,
+ .cb_process_traces = NULL,
+ .cb_exit = cb_exit,
+ .config_map = config_map,
+ .flags = 0
+};