summaryrefslogtreecommitdiffstats
path: root/fluent-bit/lib/librdkafka-2.1.0/examples
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 02:57:58 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 02:57:58 +0000
commitbe1c7e50e1e8809ea56f2c9d472eccd8ffd73a97 (patch)
tree9754ff1ca740f6346cf8483ec915d4054bc5da2d /fluent-bit/lib/librdkafka-2.1.0/examples
parentInitial commit. (diff)
downloadnetdata-upstream.tar.xz
netdata-upstream.zip
Adding upstream version 1.44.3.upstream/1.44.3upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/examples/.gitignore19
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/examples/CMakeLists.txt40
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/examples/Makefile137
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/examples/README.md38
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/examples/alter_consumer_group_offsets.c338
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/examples/consumer.c260
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/examples/delete_records.c233
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/examples/describe_consumer_groups.c373
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/examples/globals.json11
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/examples/idempotent_producer.c344
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/examples/kafkatest_verifiable_client.cpp961
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/examples/list_consumer_group_offsets.c359
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/examples/list_consumer_groups.c330
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/examples/misc.c287
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/examples/openssl_engine_example.cpp249
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/examples/producer.c251
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/examples/producer.cpp228
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/examples/rdkafka_complex_consumer_example.c617
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/examples/rdkafka_complex_consumer_example.cpp467
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/examples/rdkafka_consume_batch.cpp264
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/examples/rdkafka_example.c853
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/examples/rdkafka_example.cpp679
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/examples/rdkafka_performance.c1780
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/examples/transactions-older-broker.c668
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/examples/transactions.c665
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/examples/win_ssl_cert_store.cpp395
26 files changed, 10846 insertions, 0 deletions
diff --git a/fluent-bit/lib/librdkafka-2.1.0/examples/.gitignore b/fluent-bit/lib/librdkafka-2.1.0/examples/.gitignore
new file mode 100644
index 00000000..4190608c
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/examples/.gitignore
@@ -0,0 +1,19 @@
+consumer
+delete_records
+idempotent_producer
+kafkatest_verifiable_client
+misc
+openssl_engine_example_cpp
+producer
+producer_cpp
+rdkafka_complex_consumer_example
+rdkafka_complex_consumer_example_cpp
+rdkafka_consume_batch
+rdkafka_example
+rdkafka_example_cpp
+rdkafka_performance
+transactions
+list_consumer_groups
+describe_consumer_groups
+list_consumer_group_offsets
+alter_consumer_group_offsets
diff --git a/fluent-bit/lib/librdkafka-2.1.0/examples/CMakeLists.txt b/fluent-bit/lib/librdkafka-2.1.0/examples/CMakeLists.txt
new file mode 100644
index 00000000..bbbb89ad
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/examples/CMakeLists.txt
@@ -0,0 +1,40 @@
+if(WIN32)
+ set(win32_sources ../win32/wingetopt.c ../win32/wingetopt.h)
+endif(WIN32)
+
+add_executable(producer producer.c ${win32_sources})
+target_link_libraries(producer PUBLIC rdkafka)
+
+add_executable(producer_cpp producer.cpp ${win32_sources})
+target_link_libraries(producer_cpp PUBLIC rdkafka++)
+
+add_executable(consumer consumer.c ${win32_sources})
+target_link_libraries(consumer PUBLIC rdkafka)
+
+add_executable(rdkafka_performance rdkafka_performance.c ${win32_sources})
+target_link_libraries(rdkafka_performance PUBLIC rdkafka)
+
+add_executable(rdkafka_example_cpp rdkafka_example.cpp ${win32_sources})
+target_link_libraries(rdkafka_example_cpp PUBLIC rdkafka++)
+
+add_executable(rdkafka_complex_consumer_example_cpp rdkafka_complex_consumer_example.cpp ${win32_sources})
+target_link_libraries(rdkafka_complex_consumer_example_cpp PUBLIC rdkafka++)
+
+add_executable(openssl_engine_example_cpp openssl_engine_example.cpp ${win32_sources})
+target_link_libraries(openssl_engine_example_cpp PUBLIC rdkafka++)
+
+add_executable(misc misc.c ${win32_sources})
+target_link_libraries(misc PUBLIC rdkafka)
+
+
+# The targets below has Unix include dirs and do not compile on Windows.
+if(NOT WIN32)
+ add_executable(rdkafka_example rdkafka_example.c)
+ target_link_libraries(rdkafka_example PUBLIC rdkafka)
+
+ add_executable(rdkafka_complex_consumer_example rdkafka_complex_consumer_example.c)
+ target_link_libraries(rdkafka_complex_consumer_example PUBLIC rdkafka)
+
+ add_executable(kafkatest_verifiable_client kafkatest_verifiable_client.cpp)
+ target_link_libraries(kafkatest_verifiable_client PUBLIC rdkafka++)
+endif(NOT WIN32)
diff --git a/fluent-bit/lib/librdkafka-2.1.0/examples/Makefile b/fluent-bit/lib/librdkafka-2.1.0/examples/Makefile
new file mode 100644
index 00000000..15fba3c2
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/examples/Makefile
@@ -0,0 +1,137 @@
+EXAMPLES ?= rdkafka_example rdkafka_performance rdkafka_example_cpp \
+ rdkafka_complex_consumer_example rdkafka_complex_consumer_example_cpp \
+ kafkatest_verifiable_client \
+ producer consumer idempotent_producer transactions \
+ delete_records \
+ openssl_engine_example_cpp \
+ list_consumer_groups \
+ describe_consumer_groups \
+ list_consumer_group_offsets \
+ alter_consumer_group_offsets \
+ misc
+
+all: $(EXAMPLES)
+
+include ../mklove/Makefile.base
+
+CFLAGS += -I../src
+CXXFLAGS += -I../src-cpp
+
+# librdkafka must be compiled with -gstrict-dwarf, but rdkafka_example must not,
+# due to some clang bug on OSX 10.9
+CPPFLAGS := $(subst strict-dwarf,,$(CPPFLAGS))
+
+rdkafka_example: ../src/librdkafka.a rdkafka_example.c
+ $(CC) $(CPPFLAGS) $(CFLAGS) rdkafka_example.c -o $@ $(LDFLAGS) \
+ ../src/librdkafka.a $(LIBS)
+ @echo "# $@ is ready"
+ @echo "#"
+ @echo "# Run producer (write messages on stdin)"
+ @echo "./$@ -P -t <topic> -p <partition>"
+ @echo ""
+ @echo "# or consumer"
+ @echo "./$@ -C -t <topic> -p <partition>"
+ @echo ""
+ @echo "#"
+ @echo "# More usage options:"
+ @echo "./$@ -h"
+
+producer: ../src/librdkafka.a producer.c
+ $(CC) $(CPPFLAGS) $(CFLAGS) $@.c -o $@ $(LDFLAGS) \
+ ../src/librdkafka.a $(LIBS)
+
+producer_cpp: ../src-cpp/librdkafka++.a ../src/librdkafka.a producer.cpp
+ $(CXX) $(CPPFLAGS) $(CXXFLAGS) producer.cpp -o $@ $(LDFLAGS) \
+ ../src-cpp/librdkafka++.a ../src/librdkafka.a $(LIBS)
+
+consumer: ../src/librdkafka.a consumer.c
+ $(CC) $(CPPFLAGS) $(CFLAGS) $@.c -o $@ $(LDFLAGS) \
+ ../src/librdkafka.a $(LIBS)
+
+idempotent_producer: ../src/librdkafka.a idempotent_producer.c
+ $(CC) $(CPPFLAGS) $(CFLAGS) $@.c -o $@ $(LDFLAGS) \
+ ../src/librdkafka.a $(LIBS)
+
+transactions: ../src/librdkafka.a transactions.c
+ $(CC) $(CPPFLAGS) $(CFLAGS) $@.c -o $@ $(LDFLAGS) \
+ ../src/librdkafka.a $(LIBS)
+
+transactions-older-broker.c: ../src/librdkafka.a transactions-older-broker.c
+ $(CC) $(CPPFLAGS) $(CFLAGS) $@.c -o $@ $(LDFLAGS) \
+ ../src/librdkafka.a $(LIBS)
+
+delete_records: ../src/librdkafka.a delete_records.c
+ $(CC) $(CPPFLAGS) $(CFLAGS) $@.c -o $@ $(LDFLAGS) \
+ ../src/librdkafka.a $(LIBS)
+
+list_consumer_groups: ../src/librdkafka.a list_consumer_groups.c
+ $(CC) $(CPPFLAGS) $(CFLAGS) $@.c -o $@ $(LDFLAGS) \
+ ../src/librdkafka.a $(LIBS)
+
+describe_consumer_groups: ../src/librdkafka.a describe_consumer_groups.c
+ $(CC) $(CPPFLAGS) $(CFLAGS) $@.c -o $@ $(LDFLAGS) \
+ ../src/librdkafka.a $(LIBS)
+
+list_consumer_group_offsets: ../src/librdkafka.a list_consumer_group_offsets.c
+ $(CC) $(CPPFLAGS) $(CFLAGS) $@.c -o $@ $(LDFLAGS) \
+ ../src/librdkafka.a $(LIBS)
+
+alter_consumer_group_offsets: ../src/librdkafka.a alter_consumer_group_offsets.c
+ $(CC) $(CPPFLAGS) $(CFLAGS) $@.c -o $@ $(LDFLAGS) \
+ ../src/librdkafka.a $(LIBS)
+
+rdkafka_complex_consumer_example: ../src/librdkafka.a rdkafka_complex_consumer_example.c
+ $(CC) $(CPPFLAGS) $(CFLAGS) rdkafka_complex_consumer_example.c -o $@ $(LDFLAGS) \
+ ../src/librdkafka.a $(LIBS)
+ @echo "# $@ is ready"
+ @echo "#"
+ @echo "./$@ <topic[:part]> <topic2[:part]> .."
+ @echo ""
+ @echo "#"
+ @echo "# More usage options:"
+ @echo "./$@ -h"
+
+rdkafka_performance: ../src/librdkafka.a rdkafka_performance.c
+ $(CC) $(CPPFLAGS) $(CFLAGS) rdkafka_performance.c -o $@ $(LDFLAGS) \
+ ../src/librdkafka.a $(LIBS)
+ @echo "# $@ is ready"
+ @echo "#"
+ @echo "# Run producer"
+ @echo "./$@ -P -t <topic> -p <partition> -s <msgsize>"
+ @echo ""
+ @echo "# or consumer"
+ @echo "./$@ -C -t <topic> -p <partition>"
+ @echo ""
+ @echo "#"
+ @echo "# More usage options:"
+ @echo "./$@ -h"
+
+
+rdkafka_example_cpp: ../src-cpp/librdkafka++.a ../src/librdkafka.a rdkafka_example.cpp
+ $(CXX) $(CPPFLAGS) $(CXXFLAGS) rdkafka_example.cpp -o $@ $(LDFLAGS) \
+ ../src-cpp/librdkafka++.a ../src/librdkafka.a $(LIBS)
+
+kafkatest_verifiable_client: ../src-cpp/librdkafka++.a ../src/librdkafka.a kafkatest_verifiable_client.cpp
+ $(CXX) $(CPPFLAGS) $(CXXFLAGS) kafkatest_verifiable_client.cpp -o $@ $(LDFLAGS) \
+ ../src-cpp/librdkafka++.a ../src/librdkafka.a $(LIBS)
+
+
+rdkafka_complex_consumer_example_cpp: ../src-cpp/librdkafka++.a ../src/librdkafka.a rdkafka_complex_consumer_example.cpp
+ $(CXX) $(CPPFLAGS) $(CXXFLAGS) rdkafka_complex_consumer_example.cpp -o $@ $(LDFLAGS) \
+ ../src-cpp/librdkafka++.a ../src/librdkafka.a $(LIBS)
+
+rdkafka_consume_batch: ../src-cpp/librdkafka++.a ../src/librdkafka.a rdkafka_consume_batch.cpp
+ $(CXX) $(CPPFLAGS) $(CXXFLAGS) rdkafka_consume_batch.cpp -o $@ $(LDFLAGS) \
+ ../src-cpp/librdkafka++.a ../src/librdkafka.a $(LIBS)
+
+openssl_engine_example_cpp: ../src-cpp/librdkafka++.a ../src/librdkafka.a openssl_engine_example.cpp
+ $(CXX) $(CPPFLAGS) $(CXXFLAGS) openssl_engine_example.cpp -o $@ $(LDFLAGS) \
+ ../src-cpp/librdkafka++.a ../src/librdkafka.a $(LIBS)
+
+misc: ../src/librdkafka.a misc.c
+ $(CC) $(CPPFLAGS) $(CFLAGS) $@.c -o $@ $(LDFLAGS) \
+ ../src/librdkafka.a $(LIBS)
+
+clean:
+ rm -f $(EXAMPLES)
+
diff --git a/fluent-bit/lib/librdkafka-2.1.0/examples/README.md b/fluent-bit/lib/librdkafka-2.1.0/examples/README.md
new file mode 100644
index 00000000..3caee3b8
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/examples/README.md
@@ -0,0 +1,38 @@
+# librdkafka examples
+
+This directory contains example applications utilizing librdkafka.
+The examples are built by running `make` and they will be be linked
+statically or dynamically to librdkafka in the parent `../src` directory.
+
+Begin with the following examples:
+
+ * [consumer.c](consumer.c) - a typical C high-level consumer application.
+ * [producer.c](producer.c) - a typical C producer application.
+ * [producer.cpp](producer.cpp) - a typical C++ producer application.
+ * [idempotent_producer.c](idempotent_producer.c) - Idempotent producer.
+ * [transactions.c](transactions.c) - Full exactly once semantics (EOS)
+ transactional consumer-producer exammple.
+ Requires Apache Kafka 2.5 or later.
+ * [transactions-older-broker.c](transactions-older-broker.c) - Same as
+ `transactions.c` but for Apache Kafka versions 2.4.x and older which
+ lack KIP-447 support.
+ * [misc.c](misc.c) - a collection of miscellaneous usage examples.
+
+
+For more complex uses, see:
+ * [rdkafka_example.c](rdkafka_example.c) - simple consumer, producer, metadata listing, kitchen sink, etc.
+ * [rdkafka_example.cpp](rdkafka_example.cpp) - simple consumer, producer, metadata listing in C++.
+ * [rdkafka_complex_consumer_example.c](rdkafka_complex_consumer_example.c) - a more contrived high-level C consumer example.
+ * [rdkafka_complex_consumer_example.cpp](rdkafka_complex_consumer_example.cpp) - a more contrived high-level C++ consumer example.
+ * [rdkafka_consume_batch.cpp](rdkafka_consume_batch.cpp) - batching high-level C++ consumer example.
+ * [rdkafka_performance.c](rdkafka_performance.c) - performance, benchmark, latency producer and consumer tool.
+ * [kafkatest_verifiable_client.cpp](kafkatest_verifiable_client.cpp) - for use with the official Apache Kafka client system tests.
+ * [openssl_engine_example.cpp](openssl_engine_example.cpp) - metadata listing in C++ over SSL channel established using OpenSSL engine.
+
+
+ For Admin API examples see:
+ * [delete_records.c](delete_records.c) - Delete records.
+ * [list_consumer_groups.c](list_consumer_groups.c) - List consumer groups.
+ * [describe_consumer_groups.c](describe_consumer_groups.c) - Describe consumer groups.
+ * [list_consumer_group_offsets.c](list_consumer_group_offsets.c) - List offsets of a consumer group.
+ * [alter_consumer_group_offsets.c](alter_consumer_group_offsets.c) - Alter offsets of a consumer group.
diff --git a/fluent-bit/lib/librdkafka-2.1.0/examples/alter_consumer_group_offsets.c b/fluent-bit/lib/librdkafka-2.1.0/examples/alter_consumer_group_offsets.c
new file mode 100644
index 00000000..09a52fd7
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/examples/alter_consumer_group_offsets.c
@@ -0,0 +1,338 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2022, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * AlterConsumerGroupOffsets usage example.
+ */
+
+#include <stdio.h>
+#include <signal.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdarg.h>
+
+#ifdef _WIN32
+#include "../win32/wingetopt.h"
+#else
+#include <getopt.h>
+#endif
+
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is builtin from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h"
+
+
+const char *argv0;
+
+static rd_kafka_queue_t *queue; /** Admin result queue.
+ * This is a global so we can
+ * yield in stop() */
+static volatile sig_atomic_t run = 1;
+
+/**
+ * @brief Signal termination of program
+ */
+static void stop(int sig) {
+ if (!run) {
+ fprintf(stderr, "%% Forced termination\n");
+ exit(2);
+ }
+ run = 0;
+ rd_kafka_queue_yield(queue);
+}
+
+
+static void usage(const char *reason, ...) {
+
+ fprintf(stderr,
+ "Alter consumer group offsets usage examples\n"
+ "\n"
+ "Usage: %s <options> <group_id> <topic>\n"
+ " <partition1> <offset1>\n"
+ " <partition2> <offset2>\n"
+ " ...\n"
+ "\n"
+ "Options:\n"
+ " -b <brokers> Bootstrap server list to connect to.\n"
+ " -X <prop=val> Set librdkafka configuration property.\n"
+ " See CONFIGURATION.md for full list.\n"
+ " -d <dbg,..> Enable librdkafka debugging (%s).\n"
+ "\n",
+ argv0, rd_kafka_get_debug_contexts());
+
+ if (reason) {
+ va_list ap;
+ char reasonbuf[512];
+
+ va_start(ap, reason);
+ vsnprintf(reasonbuf, sizeof(reasonbuf), reason, ap);
+ va_end(ap);
+
+ fprintf(stderr, "ERROR: %s\n", reasonbuf);
+ }
+
+ exit(reason ? 1 : 0);
+}
+
+
+#define fatal(...) \
+ do { \
+ fprintf(stderr, "ERROR: "); \
+ fprintf(stderr, __VA_ARGS__); \
+ fprintf(stderr, "\n"); \
+ exit(2); \
+ } while (0)
+
+
+/**
+ * @brief Set config property. Exit on failure.
+ */
+static void conf_set(rd_kafka_conf_t *conf, const char *name, const char *val) {
+ char errstr[512];
+
+ if (rd_kafka_conf_set(conf, name, val, errstr, sizeof(errstr)) !=
+ RD_KAFKA_CONF_OK)
+ fatal("Failed to set %s=%s: %s", name, val, errstr);
+}
+
+
+static void
+print_partition_list(FILE *fp,
+ const rd_kafka_topic_partition_list_t *partitions,
+ int print_offset,
+ const char *prefix) {
+ int i;
+
+ if (partitions->cnt == 0) {
+ fprintf(fp, "%sNo partition found", prefix);
+ }
+ for (i = 0; i < partitions->cnt; i++) {
+ char offset_string[512] = {};
+ *offset_string = '\0';
+ if (print_offset) {
+ snprintf(offset_string, sizeof(offset_string),
+ " offset %" PRId64,
+ partitions->elems[i].offset);
+ }
+ fprintf(fp, "%s%s %s [%" PRId32 "]%s error %s",
+ i > 0 ? "\n" : "", prefix, partitions->elems[i].topic,
+ partitions->elems[i].partition, offset_string,
+ rd_kafka_err2str(partitions->elems[i].err));
+ }
+ fprintf(fp, "\n");
+}
+
+/**
+ * @brief Parse an integer or fail.
+ */
+int64_t parse_int(const char *what, const char *str) {
+ char *end;
+ unsigned long n = strtoull(str, &end, 0);
+
+ if (end != str + strlen(str)) {
+ fprintf(stderr, "%% Invalid input for %s: %s: not an integer\n",
+ what, str);
+ exit(1);
+ }
+
+ return (int64_t)n;
+}
+
+static void
+cmd_alter_consumer_group_offsets(rd_kafka_conf_t *conf, int argc, char **argv) {
+ char errstr[512]; /* librdkafka API error reporting buffer */
+ rd_kafka_t *rk; /* Admin client instance */
+ rd_kafka_AdminOptions_t *options; /* (Optional) Options for
+ * AlterConsumerGroupOffsets() */
+ rd_kafka_event_t *event; /* AlterConsumerGroupOffsets result event */
+ const int min_argc = 2;
+ int i, num_partitions = 0;
+ const char *group_id, *topic;
+ rd_kafka_AlterConsumerGroupOffsets_t *alter_consumer_group_offsets;
+
+ /*
+ * Argument validation
+ */
+ if (argc < min_argc || (argc - min_argc) % 2 != 0) {
+ usage("Wrong number of arguments");
+ }
+
+ num_partitions = (argc - min_argc) / 2;
+ group_id = argv[0];
+ topic = argv[1];
+
+ /*
+ * Create an admin client, it can be created using any client type,
+ * so we choose producer since it requires no extra configuration
+ * and is more light-weight than the consumer.
+ *
+ * NOTE: rd_kafka_new() takes ownership of the conf object
+ * and the application must not reference it again after
+ * this call.
+ */
+ rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr));
+ if (!rk) {
+ fprintf(stderr, "%% Failed to create new producer: %s\n",
+ errstr);
+ exit(1);
+ }
+
+ /* The Admin API is completely asynchronous, results are emitted
+ * on the result queue that is passed to AlterConsumerGroupOffsets() */
+ queue = rd_kafka_queue_new(rk);
+
+ /* Signal handler for clean shutdown */
+ signal(SIGINT, stop);
+
+ /* Set timeout (optional) */
+ options = rd_kafka_AdminOptions_new(
+ rk, RD_KAFKA_ADMIN_OP_ALTERCONSUMERGROUPOFFSETS);
+ if (rd_kafka_AdminOptions_set_request_timeout(
+ options, 30 * 1000 /* 30s */, errstr, sizeof(errstr))) {
+ fprintf(stderr, "%% Failed to set timeout: %s\n", errstr);
+ exit(1);
+ }
+
+ /* Read passed partition-offsets */
+ rd_kafka_topic_partition_list_t *partitions =
+ rd_kafka_topic_partition_list_new(num_partitions);
+ for (i = 0; i < num_partitions; i++) {
+ rd_kafka_topic_partition_list_add(
+ partitions, topic,
+ parse_int("partition", argv[min_argc + i * 2]))
+ ->offset = parse_int("offset", argv[min_argc + 1 + i * 2]);
+ }
+
+ /* Create argument */
+ alter_consumer_group_offsets =
+ rd_kafka_AlterConsumerGroupOffsets_new(group_id, partitions);
+ /* Call AlterConsumerGroupOffsets */
+ rd_kafka_AlterConsumerGroupOffsets(rk, &alter_consumer_group_offsets, 1,
+ options, queue);
+
+ /* Clean up input arguments */
+ rd_kafka_AlterConsumerGroupOffsets_destroy(
+ alter_consumer_group_offsets);
+ rd_kafka_AdminOptions_destroy(options);
+ rd_kafka_topic_partition_list_destroy(partitions);
+
+
+ /* Wait for results */
+ event = rd_kafka_queue_poll(queue, -1 /* indefinitely but limited by
+ * the request timeout set
+ * above (30s) */);
+
+ if (!event) {
+ /* User hit Ctrl-C,
+ * see yield call in stop() signal handler */
+ fprintf(stderr, "%% Cancelled by user\n");
+
+ } else if (rd_kafka_event_error(event)) {
+ /* AlterConsumerGroupOffsets request failed */
+ fprintf(stderr, "%% AlterConsumerGroupOffsets failed: %s\n",
+ rd_kafka_event_error_string(event));
+ exit(1);
+
+ } else {
+ /* AlterConsumerGroupOffsets request succeeded, but individual
+ * partitions may have errors. */
+ const rd_kafka_AlterConsumerGroupOffsets_result_t *result;
+ const rd_kafka_group_result_t **groups;
+ size_t n_groups, i;
+
+ result = rd_kafka_event_AlterConsumerGroupOffsets_result(event);
+ groups = rd_kafka_AlterConsumerGroupOffsets_result_groups(
+ result, &n_groups);
+
+ printf("AlterConsumerGroupOffsets results:\n");
+ for (i = 0; i < n_groups; i++) {
+ const rd_kafka_group_result_t *group = groups[i];
+ const rd_kafka_topic_partition_list_t *partitions =
+ rd_kafka_group_result_partitions(group);
+ print_partition_list(stderr, partitions, 1, " ");
+ }
+ }
+
+ /* Destroy event object when we're done with it.
+ * Note: rd_kafka_event_destroy() allows a NULL event. */
+ rd_kafka_event_destroy(event);
+
+ /* Destroy queue */
+ rd_kafka_queue_destroy(queue);
+
+ /* Destroy the producer instance */
+ rd_kafka_destroy(rk);
+}
+
+int main(int argc, char **argv) {
+ rd_kafka_conf_t *conf; /**< Client configuration object */
+ int opt;
+ argv0 = argv[0];
+
+ /*
+ * Create Kafka client configuration place-holder
+ */
+ conf = rd_kafka_conf_new();
+
+
+ /*
+ * Parse common options
+ */
+ while ((opt = getopt(argc, argv, "b:X:d:")) != -1) {
+ switch (opt) {
+ case 'b':
+ conf_set(conf, "bootstrap.servers", optarg);
+ break;
+
+ case 'X': {
+ char *name = optarg, *val;
+
+ if (!(val = strchr(name, '=')))
+ fatal("-X expects a name=value argument");
+
+ *val = '\0';
+ val++;
+
+ conf_set(conf, name, val);
+ break;
+ }
+
+ case 'd':
+ conf_set(conf, "debug", optarg);
+ break;
+
+ default:
+ usage("Unknown option %c", (char)opt);
+ }
+ }
+
+ cmd_alter_consumer_group_offsets(conf, argc - optind, &argv[optind]);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/examples/consumer.c b/fluent-bit/lib/librdkafka-2.1.0/examples/consumer.c
new file mode 100644
index 00000000..21b27ca7
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/examples/consumer.c
@@ -0,0 +1,260 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2019, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * Simple high-level balanced Apache Kafka consumer
+ * using the Kafka driver from librdkafka
+ * (https://github.com/edenhill/librdkafka)
+ */
+
+#include <stdio.h>
+#include <signal.h>
+#include <string.h>
+#include <ctype.h>
+
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is builtin from within the librdkafka source tree and thus differs. */
+//#include <librdkafka/rdkafka.h>
+#include "rdkafka.h"
+
+
+static volatile sig_atomic_t run = 1;
+
+/**
+ * @brief Signal termination of program
+ */
+static void stop(int sig) {
+ run = 0;
+}
+
+
+
+/**
+ * @returns 1 if all bytes are printable, else 0.
+ */
+static int is_printable(const char *buf, size_t size) {
+ size_t i;
+
+ for (i = 0; i < size; i++)
+ if (!isprint((int)buf[i]))
+ return 0;
+
+ return 1;
+}
+
+
+int main(int argc, char **argv) {
+ rd_kafka_t *rk; /* Consumer instance handle */
+ rd_kafka_conf_t *conf; /* Temporary configuration object */
+ rd_kafka_resp_err_t err; /* librdkafka API error code */
+ char errstr[512]; /* librdkafka API error reporting buffer */
+ const char *brokers; /* Argument: broker list */
+ const char *groupid; /* Argument: Consumer group id */
+ char **topics; /* Argument: list of topics to subscribe to */
+ int topic_cnt; /* Number of topics to subscribe to */
+ rd_kafka_topic_partition_list_t *subscription; /* Subscribed topics */
+ int i;
+
+ /*
+ * Argument validation
+ */
+ if (argc < 4) {
+ fprintf(stderr,
+ "%% Usage: "
+ "%s <broker> <group.id> <topic1> <topic2>..\n",
+ argv[0]);
+ return 1;
+ }
+
+ brokers = argv[1];
+ groupid = argv[2];
+ topics = &argv[3];
+ topic_cnt = argc - 3;
+
+
+ /*
+ * Create Kafka client configuration place-holder
+ */
+ conf = rd_kafka_conf_new();
+
+ /* Set bootstrap broker(s) as a comma-separated list of
+ * host or host:port (default port 9092).
+ * librdkafka will use the bootstrap brokers to acquire the full
+ * set of brokers from the cluster. */
+ if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers, errstr,
+ sizeof(errstr)) != RD_KAFKA_CONF_OK) {
+ fprintf(stderr, "%s\n", errstr);
+ rd_kafka_conf_destroy(conf);
+ return 1;
+ }
+
+ /* Set the consumer group id.
+ * All consumers sharing the same group id will join the same
+ * group, and the subscribed topic' partitions will be assigned
+ * according to the partition.assignment.strategy
+ * (consumer config property) to the consumers in the group. */
+ if (rd_kafka_conf_set(conf, "group.id", groupid, errstr,
+ sizeof(errstr)) != RD_KAFKA_CONF_OK) {
+ fprintf(stderr, "%s\n", errstr);
+ rd_kafka_conf_destroy(conf);
+ return 1;
+ }
+
+ /* If there is no previously committed offset for a partition
+ * the auto.offset.reset strategy will be used to decide where
+ * in the partition to start fetching messages.
+ * By setting this to earliest the consumer will read all messages
+ * in the partition if there was no previously committed offset. */
+ if (rd_kafka_conf_set(conf, "auto.offset.reset", "earliest", errstr,
+ sizeof(errstr)) != RD_KAFKA_CONF_OK) {
+ fprintf(stderr, "%s\n", errstr);
+ rd_kafka_conf_destroy(conf);
+ return 1;
+ }
+
+ /*
+ * Create consumer instance.
+ *
+ * NOTE: rd_kafka_new() takes ownership of the conf object
+ * and the application must not reference it again after
+ * this call.
+ */
+ rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, sizeof(errstr));
+ if (!rk) {
+ fprintf(stderr, "%% Failed to create new consumer: %s\n",
+ errstr);
+ return 1;
+ }
+
+ conf = NULL; /* Configuration object is now owned, and freed,
+ * by the rd_kafka_t instance. */
+
+
+ /* Redirect all messages from per-partition queues to
+ * the main queue so that messages can be consumed with one
+ * call from all assigned partitions.
+ *
+ * The alternative is to poll the main queue (for events)
+ * and each partition queue separately, which requires setting
+ * up a rebalance callback and keeping track of the assignment:
+ * but that is more complex and typically not recommended. */
+ rd_kafka_poll_set_consumer(rk);
+
+
+ /* Convert the list of topics to a format suitable for librdkafka */
+ subscription = rd_kafka_topic_partition_list_new(topic_cnt);
+ for (i = 0; i < topic_cnt; i++)
+ rd_kafka_topic_partition_list_add(subscription, topics[i],
+ /* the partition is ignored
+ * by subscribe() */
+ RD_KAFKA_PARTITION_UA);
+
+ /* Subscribe to the list of topics */
+ err = rd_kafka_subscribe(rk, subscription);
+ if (err) {
+ fprintf(stderr, "%% Failed to subscribe to %d topics: %s\n",
+ subscription->cnt, rd_kafka_err2str(err));
+ rd_kafka_topic_partition_list_destroy(subscription);
+ rd_kafka_destroy(rk);
+ return 1;
+ }
+
+ fprintf(stderr,
+ "%% Subscribed to %d topic(s), "
+ "waiting for rebalance and messages...\n",
+ subscription->cnt);
+
+ rd_kafka_topic_partition_list_destroy(subscription);
+
+
+ /* Signal handler for clean shutdown */
+ signal(SIGINT, stop);
+
+ /* Subscribing to topics will trigger a group rebalance
+ * which may take some time to finish, but there is no need
+ * for the application to handle this idle period in a special way
+ * since a rebalance may happen at any time.
+ * Start polling for messages. */
+
+ while (run) {
+ rd_kafka_message_t *rkm;
+
+ rkm = rd_kafka_consumer_poll(rk, 100);
+ if (!rkm)
+ continue; /* Timeout: no message within 100ms,
+ * try again. This short timeout allows
+ * checking for `run` at frequent intervals.
+ */
+
+ /* consumer_poll() will return either a proper message
+ * or a consumer error (rkm->err is set). */
+ if (rkm->err) {
+ /* Consumer errors are generally to be considered
+ * informational as the consumer will automatically
+ * try to recover from all types of errors. */
+ fprintf(stderr, "%% Consumer error: %s\n",
+ rd_kafka_message_errstr(rkm));
+ rd_kafka_message_destroy(rkm);
+ continue;
+ }
+
+ /* Proper message. */
+ printf("Message on %s [%" PRId32 "] at offset %" PRId64
+ " (leader epoch %" PRId32 "):\n",
+ rd_kafka_topic_name(rkm->rkt), rkm->partition,
+ rkm->offset, rd_kafka_message_leader_epoch(rkm));
+
+ /* Print the message key. */
+ if (rkm->key && is_printable(rkm->key, rkm->key_len))
+ printf(" Key: %.*s\n", (int)rkm->key_len,
+ (const char *)rkm->key);
+ else if (rkm->key)
+ printf(" Key: (%d bytes)\n", (int)rkm->key_len);
+
+ /* Print the message value/payload. */
+ if (rkm->payload && is_printable(rkm->payload, rkm->len))
+ printf(" Value: %.*s\n", (int)rkm->len,
+ (const char *)rkm->payload);
+ else if (rkm->payload)
+ printf(" Value: (%d bytes)\n", (int)rkm->len);
+
+ rd_kafka_message_destroy(rkm);
+ }
+
+
+ /* Close the consumer: commit final offsets and leave the group. */
+ fprintf(stderr, "%% Closing consumer\n");
+ rd_kafka_consumer_close(rk);
+
+
+ /* Destroy the consumer */
+ rd_kafka_destroy(rk);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/examples/delete_records.c b/fluent-bit/lib/librdkafka-2.1.0/examples/delete_records.c
new file mode 100644
index 00000000..2660996a
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/examples/delete_records.c
@@ -0,0 +1,233 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2020, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * Example utility that shows how to use DeleteRecords (AdminAPI)
+ * do delete all messages/records up to (but not including) a specific offset
+ * from one or more topic partitions.
+ */
+
+#include <stdio.h>
+#include <signal.h>
+#include <string.h>
+#include <stdlib.h>
+
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is builtin from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h"
+
+
+static rd_kafka_queue_t *queue; /** Admin result queue.
+ * This is a global so we can
+ * yield in stop() */
+static volatile sig_atomic_t run = 1;
+
+/**
+ * @brief Signal termination of program
+ */
+static void stop(int sig) {
+ if (!run) {
+ fprintf(stderr, "%% Forced termination\n");
+ exit(2);
+ }
+ run = 0;
+ rd_kafka_queue_yield(queue);
+}
+
+
+/**
+ * @brief Parse an integer or fail.
+ */
+int64_t parse_int(const char *what, const char *str) {
+ char *end;
+ unsigned long n = strtoull(str, &end, 0);
+
+ if (end != str + strlen(str)) {
+ fprintf(stderr, "%% Invalid input for %s: %s: not an integer\n",
+ what, str);
+ exit(1);
+ }
+
+ return (int64_t)n;
+}
+
+
+int main(int argc, char **argv) {
+ rd_kafka_conf_t *conf; /* Temporary configuration object */
+ char errstr[512]; /* librdkafka API error reporting buffer */
+ const char *brokers; /* Argument: broker list */
+ rd_kafka_t *rk; /* Admin client instance */
+ rd_kafka_topic_partition_list_t *offsets_before; /* Delete messages up
+ * to but not
+ * including these
+ * offsets */
+ rd_kafka_DeleteRecords_t *del_records; /* Container for offsets_before*/
+ rd_kafka_AdminOptions_t *options; /* (Optional) Options for
+ * DeleteRecords() */
+ rd_kafka_event_t *event; /* DeleteRecords result event */
+ int exitcode = 0;
+ int i;
+
+ /*
+ * Argument validation
+ */
+ if (argc < 5 || (argc - 2) % 3 != 0) {
+ fprintf(stderr,
+ "%% Usage: %s <broker> "
+ "<topic> <partition> <offset_before> "
+ "<topic2> <partition2> <offset_before2> ...\n"
+ "\n"
+ "Delete all messages up to but not including the "
+ "specified offset(s).\n"
+ "\n",
+ argv[0]);
+ return 1;
+ }
+
+ brokers = argv[1];
+
+ /* Parse topic partition offset tuples and add to offsets list */
+ offsets_before = rd_kafka_topic_partition_list_new((argc - 2) / 3);
+ for (i = 2; i < argc; i += 3) {
+ const char *topic = argv[i];
+ int partition = parse_int("partition", argv[i + 1]);
+ int64_t offset = parse_int("offset_before", argv[i + 2]);
+
+ rd_kafka_topic_partition_list_add(offsets_before, topic,
+ partition)
+ ->offset = offset;
+ }
+
+ /*
+ * Create Kafka client configuration place-holder
+ */
+ conf = rd_kafka_conf_new();
+
+ /* Set bootstrap broker(s) as a comma-separated list of
+ * host or host:port (default port 9092).
+ * librdkafka will use the bootstrap brokers to acquire the full
+ * set of brokers from the cluster. */
+ if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers, errstr,
+ sizeof(errstr)) != RD_KAFKA_CONF_OK) {
+ fprintf(stderr, "%s\n", errstr);
+ return 1;
+ }
+ rd_kafka_conf_set(conf, "debug", "admin,topic,metadata", NULL, 0);
+
+ /*
+ * Create an admin client, it can be created using any client type,
+ * so we choose producer since it requires no extra configuration
+ * and is more light-weight than the consumer.
+ *
+ * NOTE: rd_kafka_new() takes ownership of the conf object
+ * and the application must not reference it again after
+ * this call.
+ */
+ rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr));
+ if (!rk) {
+ fprintf(stderr, "%% Failed to create new producer: %s\n",
+ errstr);
+ return 1;
+ }
+
+ /* The Admin API is completely asynchronous, results are emitted
+ * on the result queue that is passed to DeleteRecords() */
+ queue = rd_kafka_queue_new(rk);
+
+ /* Signal handler for clean shutdown */
+ signal(SIGINT, stop);
+
+ /* Set timeout (optional) */
+ options =
+ rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_DELETERECORDS);
+ if (rd_kafka_AdminOptions_set_request_timeout(
+ options, 30 * 1000 /* 30s */, errstr, sizeof(errstr))) {
+ fprintf(stderr, "%% Failed to set timeout: %s\n", errstr);
+ return 1;
+ }
+
+ /* Create argument */
+ del_records = rd_kafka_DeleteRecords_new(offsets_before);
+ /* We're now done with offsets_before */
+ rd_kafka_topic_partition_list_destroy(offsets_before);
+
+ /* Call DeleteRecords */
+ rd_kafka_DeleteRecords(rk, &del_records, 1, options, queue);
+
+ /* Clean up input arguments */
+ rd_kafka_DeleteRecords_destroy(del_records);
+ rd_kafka_AdminOptions_destroy(options);
+
+
+ /* Wait for results */
+ event = rd_kafka_queue_poll(queue, -1 /*indefinitely*/);
+
+ if (!event) {
+ /* User hit Ctrl-C */
+ fprintf(stderr, "%% Cancelled by user\n");
+
+ } else if (rd_kafka_event_error(event)) {
+ /* DeleteRecords request failed */
+ fprintf(stderr, "%% DeleteRecords failed: %s\n",
+ rd_kafka_event_error_string(event));
+ exitcode = 2;
+
+ } else {
+ /* DeleteRecords request succeeded, but individual
+ * partitions may have errors. */
+ const rd_kafka_DeleteRecords_result_t *result;
+ const rd_kafka_topic_partition_list_t *offsets;
+ int i;
+
+ result = rd_kafka_event_DeleteRecords_result(event);
+ offsets = rd_kafka_DeleteRecords_result_offsets(result);
+
+ printf("DeleteRecords results:\n");
+ for (i = 0; i < offsets->cnt; i++)
+ printf(" %s [%" PRId32 "] offset %" PRId64 ": %s\n",
+ offsets->elems[i].topic,
+ offsets->elems[i].partition,
+ offsets->elems[i].offset,
+ rd_kafka_err2str(offsets->elems[i].err));
+ }
+
+ /* Destroy event object when we're done with it.
+ * Note: rd_kafka_event_destroy() allows a NULL event. */
+ rd_kafka_event_destroy(event);
+
+ signal(SIGINT, SIG_DFL);
+
+ /* Destroy queue */
+ rd_kafka_queue_destroy(queue);
+
+ /* Destroy the producer instance */
+ rd_kafka_destroy(rk);
+
+ return exitcode;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/examples/describe_consumer_groups.c b/fluent-bit/lib/librdkafka-2.1.0/examples/describe_consumer_groups.c
new file mode 100644
index 00000000..45b6b8d0
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/examples/describe_consumer_groups.c
@@ -0,0 +1,373 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2022, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * DescribeConsumerGroups usage example.
+ */
+
+#include <stdio.h>
+#include <signal.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdarg.h>
+
+#ifdef _WIN32
+#include "../win32/wingetopt.h"
+#else
+#include <getopt.h>
+#endif
+
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is builtin from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h"
+
+
+const char *argv0;
+
+static rd_kafka_queue_t *queue; /** Admin result queue.
+ * This is a global so we can
+ * yield in stop() */
+static volatile sig_atomic_t run = 1;
+
+/**
+ * @brief Signal termination of program
+ */
+static void stop(int sig) {
+ if (!run) {
+ fprintf(stderr, "%% Forced termination\n");
+ exit(2);
+ }
+ run = 0;
+ rd_kafka_queue_yield(queue);
+}
+
+
+static void usage(const char *reason, ...) {
+
+ fprintf(stderr,
+ "Describe groups usage examples\n"
+ "\n"
+ "Usage: %s <options> <group1> <group2> ...\n"
+ "\n"
+ "Options:\n"
+ " -b <brokers> Bootstrap server list to connect to.\n"
+ " -X <prop=val> Set librdkafka configuration property.\n"
+ " See CONFIGURATION.md for full list.\n"
+ " -d <dbg,..> Enable librdkafka debugging (%s).\n"
+ "\n",
+ argv0, rd_kafka_get_debug_contexts());
+
+ if (reason) {
+ va_list ap;
+ char reasonbuf[512];
+
+ va_start(ap, reason);
+ vsnprintf(reasonbuf, sizeof(reasonbuf), reason, ap);
+ va_end(ap);
+
+ fprintf(stderr, "ERROR: %s\n", reasonbuf);
+ }
+
+ exit(reason ? 1 : 0);
+}
+
+
+#define fatal(...) \
+ do { \
+ fprintf(stderr, "ERROR: "); \
+ fprintf(stderr, __VA_ARGS__); \
+ fprintf(stderr, "\n"); \
+ exit(2); \
+ } while (0)
+
+
+/**
+ * @brief Set config property. Exit on failure.
+ */
+static void conf_set(rd_kafka_conf_t *conf, const char *name, const char *val) {
+ char errstr[512];
+
+ if (rd_kafka_conf_set(conf, name, val, errstr, sizeof(errstr)) !=
+ RD_KAFKA_CONF_OK)
+ fatal("Failed to set %s=%s: %s", name, val, errstr);
+}
+
+
+static void
+print_partition_list(FILE *fp,
+ const rd_kafka_topic_partition_list_t *partitions,
+ int print_offset,
+ const char *prefix) {
+ int i;
+
+ if (partitions->cnt == 0) {
+ fprintf(fp, "%sNo partition found", prefix);
+ }
+ for (i = 0; i < partitions->cnt; i++) {
+ char offset_string[512] = {};
+ *offset_string = '\0';
+ if (print_offset) {
+ snprintf(offset_string, sizeof(offset_string),
+ " offset %" PRId64,
+ partitions->elems[i].offset);
+ }
+ fprintf(fp, "%s%s %s [%" PRId32 "]%s error %s",
+ i > 0 ? "\n" : "", prefix, partitions->elems[i].topic,
+ partitions->elems[i].partition, offset_string,
+ rd_kafka_err2str(partitions->elems[i].err));
+ }
+ fprintf(fp, "\n");
+}
+
+/**
+ * @brief Print group information.
+ */
+static int
+print_groups_info(const rd_kafka_DescribeConsumerGroups_result_t *grpdesc,
+ int groups_cnt) {
+ size_t i;
+ const rd_kafka_ConsumerGroupDescription_t **result_groups;
+ size_t result_groups_cnt;
+ result_groups = rd_kafka_DescribeConsumerGroups_result_groups(
+ grpdesc, &result_groups_cnt);
+
+ if (result_groups_cnt == 0) {
+ if (groups_cnt > 0) {
+ fprintf(stderr, "No matching groups found\n");
+ return 1;
+ } else {
+ fprintf(stderr, "No groups in cluster\n");
+ }
+ }
+
+ for (i = 0; i < result_groups_cnt; i++) {
+ int j, member_cnt;
+ const rd_kafka_error_t *error;
+ const rd_kafka_ConsumerGroupDescription_t *group =
+ result_groups[i];
+ char coordinator_desc[512];
+ const rd_kafka_Node_t *coordinator = NULL;
+ const char *group_id =
+ rd_kafka_ConsumerGroupDescription_group_id(group);
+ const char *partition_assignor =
+ rd_kafka_ConsumerGroupDescription_partition_assignor(group);
+ rd_kafka_consumer_group_state_t state =
+ rd_kafka_ConsumerGroupDescription_state(group);
+ member_cnt =
+ rd_kafka_ConsumerGroupDescription_member_count(group);
+ error = rd_kafka_ConsumerGroupDescription_error(group);
+ coordinator =
+ rd_kafka_ConsumerGroupDescription_coordinator(group);
+ *coordinator_desc = '\0';
+
+ if (coordinator != NULL) {
+ snprintf(coordinator_desc, sizeof(coordinator_desc),
+ ", coordinator [id: %" PRId32
+ ", host: %s"
+ ", port: %" PRIu16 "]",
+ rd_kafka_Node_id(coordinator),
+ rd_kafka_Node_host(coordinator),
+ rd_kafka_Node_port(coordinator));
+ }
+ printf(
+ "Group \"%s\", partition assignor \"%s\", "
+ "state %s%s, with %" PRId32 " member(s)",
+ group_id, partition_assignor,
+ rd_kafka_consumer_group_state_name(state), coordinator_desc,
+ member_cnt);
+ if (error)
+ printf(" error[%" PRId32 "]: %s",
+ rd_kafka_error_code(error),
+ rd_kafka_error_string(error));
+ printf("\n");
+ for (j = 0; j < member_cnt; j++) {
+ const rd_kafka_MemberDescription_t *member =
+ rd_kafka_ConsumerGroupDescription_member(group, j);
+ printf(
+ " Member \"%s\" with client-id %s,"
+ " group instance id: %s, host %s\n",
+ rd_kafka_MemberDescription_consumer_id(member),
+ rd_kafka_MemberDescription_client_id(member),
+ rd_kafka_MemberDescription_group_instance_id(
+ member),
+ rd_kafka_MemberDescription_host(member));
+ const rd_kafka_MemberAssignment_t *assignment =
+ rd_kafka_MemberDescription_assignment(member);
+ const rd_kafka_topic_partition_list_t
+ *topic_partitions =
+ rd_kafka_MemberAssignment_partitions(
+ assignment);
+ if (!topic_partitions) {
+ printf(" No assignment\n");
+ } else if (topic_partitions->cnt == 0) {
+ printf(" Empty assignment\n");
+ } else {
+ printf(" Assignment:\n");
+ print_partition_list(stdout, topic_partitions,
+ 0, " ");
+ }
+ }
+ }
+ return 0;
+}
+
+/**
+ * @brief Call rd_kafka_DescribeConsumerGroups() with a list of
+ * groups.
+ */
+static void
+cmd_describe_consumer_groups(rd_kafka_conf_t *conf, int argc, char **argv) {
+ rd_kafka_t *rk;
+ const char **groups = NULL;
+ char errstr[512];
+ rd_kafka_AdminOptions_t *options;
+ rd_kafka_event_t *event = NULL;
+ int retval = 0;
+ int groups_cnt = 0;
+
+ if (argc >= 1) {
+ groups = (const char **)&argv[0];
+ groups_cnt = argc;
+ }
+
+ /*
+ * Create consumer instance
+ * NOTE: rd_kafka_new() takes ownership of the conf object
+ * and the application must not reference it again after
+ * this call.
+ */
+ rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, sizeof(errstr));
+ if (!rk)
+ fatal("Failed to create new consumer: %s", errstr);
+
+ /*
+ * Describe consumer groups
+ */
+ queue = rd_kafka_queue_new(rk);
+
+ /* Signal handler for clean shutdown */
+ signal(SIGINT, stop);
+
+ options = rd_kafka_AdminOptions_new(
+ rk, RD_KAFKA_ADMIN_OP_DESCRIBECONSUMERGROUPS);
+
+ if (rd_kafka_AdminOptions_set_request_timeout(
+ options, 10 * 1000 /* 10s */, errstr, sizeof(errstr))) {
+ fprintf(stderr, "%% Failed to set timeout: %s\n", errstr);
+ goto exit;
+ }
+
+ rd_kafka_DescribeConsumerGroups(rk, groups, groups_cnt, options, queue);
+
+ /* Wait for results */
+ event = rd_kafka_queue_poll(queue, -1 /* indefinitely but limited by
+ * the request timeout set
+ * above (10s) */);
+
+ if (!event) {
+ /* User hit Ctrl-C,
+ * see yield call in stop() signal handler */
+ fprintf(stderr, "%% Cancelled by user\n");
+
+ } else if (rd_kafka_event_error(event)) {
+ rd_kafka_resp_err_t err = rd_kafka_event_error(event);
+ /* DescribeConsumerGroups request failed */
+ fprintf(stderr,
+ "%% DescribeConsumerGroups failed[%" PRId32 "]: %s\n",
+ err, rd_kafka_event_error_string(event));
+ goto exit;
+
+ } else {
+ /* DescribeConsumerGroups request succeeded, but individual
+ * groups may have errors. */
+ const rd_kafka_DescribeConsumerGroups_result_t *result;
+
+ result = rd_kafka_event_DescribeConsumerGroups_result(event);
+ printf("DescribeConsumerGroups results:\n");
+ retval = print_groups_info(result, groups_cnt);
+ }
+
+
+exit:
+ if (event)
+ rd_kafka_event_destroy(event);
+ rd_kafka_AdminOptions_destroy(options);
+ rd_kafka_queue_destroy(queue);
+ /* Destroy the client instance */
+ rd_kafka_destroy(rk);
+
+ exit(retval);
+}
+
+int main(int argc, char **argv) {
+ rd_kafka_conf_t *conf; /**< Client configuration object */
+ int opt;
+ argv0 = argv[0];
+
+ /*
+ * Create Kafka client configuration place-holder
+ */
+ conf = rd_kafka_conf_new();
+
+
+ /*
+ * Parse common options
+ */
+ while ((opt = getopt(argc, argv, "b:X:d:")) != -1) {
+ switch (opt) {
+ case 'b':
+ conf_set(conf, "bootstrap.servers", optarg);
+ break;
+
+ case 'X': {
+ char *name = optarg, *val;
+
+ if (!(val = strchr(name, '=')))
+ fatal("-X expects a name=value argument");
+
+ *val = '\0';
+ val++;
+
+ conf_set(conf, name, val);
+ break;
+ }
+
+ case 'd':
+ conf_set(conf, "debug", optarg);
+ break;
+
+ default:
+ usage("Unknown option %c", (char)opt);
+ }
+ }
+
+ cmd_describe_consumer_groups(conf, argc - optind, &argv[optind]);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/examples/globals.json b/fluent-bit/lib/librdkafka-2.1.0/examples/globals.json
new file mode 100644
index 00000000..527e1262
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/examples/globals.json
@@ -0,0 +1,11 @@
+{"VerifiableConsumer":
+ {
+ "class": "kafkatest.services.verifiable_client.VerifiableClientApp",
+ "exec_cmd": "/vagrant/tests/c/kafkatest_verifiable_client --consumer --debug cgrp,topic,protocol,broker"
+ },
+ "VerifiableProducer":
+ {
+ "class": "kafkatest.services.verifiable_client.VerifiableClientApp",
+ "exec_cmd": "/vagrant/tests/c/kafkatest_verifiable_client --producer --debug topic,broker"
+ }
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/examples/idempotent_producer.c b/fluent-bit/lib/librdkafka-2.1.0/examples/idempotent_producer.c
new file mode 100644
index 00000000..91b42a4b
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/examples/idempotent_producer.c
@@ -0,0 +1,344 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2019, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * Idempotent Producer example.
+ *
+ * The idempotent producer provides strict ordering and
+ * exactly-once producing guarantees.
+ *
+ * From the application developer's perspective, the only difference
+ * from a standard producer is the enabling of the feature by setting
+ * the `enable.idempotence` configuration property to `true`, and
+ * handling fatal (RD_KAFKA_RESP_ERR__FATAL) errors which are raised when
+ * the idempotent guarantees can't be satisfied.
+ */
+
+#define _DEFAULT_SOURCE /* avoid glibc deprecation warning of _BSD_SOURCE */
+#define _BSD_SOURCE /* vsnprintf() */
+#include <stdio.h>
+#include <signal.h>
+#include <unistd.h>
+#include <string.h>
+
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is builtin from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h"
+
+
+static volatile sig_atomic_t run = 1;
+
+/**
+ * @brief Signal termination of program
+ */
+static void stop(int sig) {
+ run = 0;
+}
+
+
+static int deliveredcnt = 0;
+static int msgerrcnt = 0;
+
+/**
+ * @brief Message delivery report callback.
+ *
+ * This callback is called exactly once per message, indicating if
+ * the message was succesfully delivered
+ * (rkmessage->err == RD_KAFKA_RESP_ERR_NO_ERROR) or permanently
+ * failed delivery (rkmessage->err != RD_KAFKA_RESP_ERR_NO_ERROR).
+ *
+ * The callback is triggered from rd_kafka_poll() or rd_kafka_flush() and
+ * executes on the application's thread.
+ */
+static void
+dr_msg_cb(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque) {
+ if (rkmessage->err) {
+ fprintf(stderr, "%% Message delivery failed: %s\n",
+ rd_kafka_err2str(rkmessage->err));
+ msgerrcnt++;
+ } else {
+ fprintf(stderr,
+ "%% Message delivered (%zd bytes, topic %s, "
+ "partition %" PRId32 ", offset %" PRId64 ")\n",
+ rkmessage->len, rd_kafka_topic_name(rkmessage->rkt),
+ rkmessage->partition, rkmessage->offset);
+ deliveredcnt++;
+ }
+
+ /* The rkmessage is destroyed automatically by librdkafka */
+}
+
+
+/**
+ * @brief Generic error handling callback.
+ *
+ * This callback is triggered by rd_kafka_poll() or rd_kafka_flush()
+ * for client instance-level errors, such as broker connection failures,
+ * authentication issues, etc.
+ *
+ * These errors should generally be considered informational as
+ * the underlying client will automatically try to recover from
+ * any errors encountered, the application does not need to take
+ * action on them.
+ *
+ * But with idempotence truly fatal errors can be raised when
+ * the idempotence guarantees can't be satisfied, these errors
+ * are identified by a the `RD_KAFKA_RESP_ERR__FATAL` error code.
+ */
+static void
+error_cb(rd_kafka_t *rk, int err, const char *reason, void *opaque) {
+ rd_kafka_resp_err_t orig_err;
+ char errstr[512];
+
+ fprintf(stderr, "%% Error: %s: %s\n", rd_kafka_err2name(err), reason);
+
+ if (err != RD_KAFKA_RESP_ERR__FATAL)
+ return;
+
+ /* Fatal error handling.
+ *
+ * When a fatal error is detected by the producer instance,
+ * it will trigger an error_cb with ERR__FATAL set.
+ * The application should use rd_kafka_fatal_error() to extract
+ * the actual underlying error code and description, propagate it
+ * to the user (for troubleshooting), and then terminate the
+ * producer since it will no longer accept any new messages to
+ * produce().
+ *
+ * Note:
+ * After a fatal error has been raised, rd_kafka_produce*() will
+ * fail with the original error code.
+ *
+ * Note:
+ * As an alternative to an error_cb, the application may call
+ * rd_kafka_fatal_error() at any time to check if a fatal error
+ * has occurred, typically after a failing rd_kafka_produce*() call.
+ */
+
+ orig_err = rd_kafka_fatal_error(rk, errstr, sizeof(errstr));
+ fprintf(stderr, "%% FATAL ERROR: %s: %s\n", rd_kafka_err2name(orig_err),
+ errstr);
+
+ /* Clean termination to get delivery results (from rd_kafka_flush())
+ * for all outstanding/in-transit/queued messages. */
+ fprintf(stderr, "%% Terminating on fatal error\n");
+ run = 0;
+}
+
+
+int main(int argc, char **argv) {
+ rd_kafka_t *rk; /* Producer instance handle */
+ rd_kafka_conf_t *conf; /* Temporary configuration object */
+ char errstr[512]; /* librdkafka API error reporting buffer */
+ rd_kafka_resp_err_t err; /* librdkafka API error code */
+ const char *brokers; /* Argument: broker list */
+ const char *topic; /* Argument: topic to produce to */
+ int msgcnt = 0; /* Number of messages produced */
+
+ /*
+ * Argument validation
+ */
+ if (argc != 3) {
+ fprintf(stderr, "%% Usage: %s <broker> <topic>\n", argv[0]);
+ return 1;
+ }
+
+ brokers = argv[1];
+ topic = argv[2];
+
+
+ /*
+ * Create Kafka client configuration place-holder
+ */
+ conf = rd_kafka_conf_new();
+
+ /* Set bootstrap broker(s) as a comma-separated list of
+ * host or host:port (default port 9092).
+ * librdkafka will use the bootstrap brokers to acquire the full
+ * set of brokers from the cluster. */
+ if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers, errstr,
+ sizeof(errstr)) != RD_KAFKA_CONF_OK) {
+ fprintf(stderr, "%s\n", errstr);
+ rd_kafka_conf_destroy(conf);
+ return 1;
+ }
+
+ /* Enable the idempotent producer */
+ if (rd_kafka_conf_set(conf, "enable.idempotence", "true", errstr,
+ sizeof(errstr)) != RD_KAFKA_CONF_OK) {
+ fprintf(stderr, "%s\n", errstr);
+ rd_kafka_conf_destroy(conf);
+ return 1;
+ }
+
+ /* Set the delivery report callback.
+ * This callback will be called once per message to inform
+ * the application if delivery succeeded or failed.
+ * See dr_msg_cb() above. */
+ rd_kafka_conf_set_dr_msg_cb(conf, dr_msg_cb);
+
+ /* Set an error handler callback to catch generic instance-level
+ * errors.
+ *
+ * See the `error_cb()` handler above for how to handle the
+ * fatal errors.
+ */
+ rd_kafka_conf_set_error_cb(conf, error_cb);
+
+
+ /*
+ * Create producer instance.
+ *
+ * NOTE: rd_kafka_new() takes ownership of the conf object
+ * and the application must not reference it again after
+ * this call.
+ */
+ rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr));
+ if (!rk) {
+ fprintf(stderr, "%% Failed to create new producer: %s\n",
+ errstr);
+ return 1;
+ }
+
+ /* Signal handler for clean shutdown */
+ signal(SIGINT, stop);
+
+ fprintf(stderr, "%% Running producer loop. Press Ctrl-C to exit\n");
+
+ while (run) {
+ char buf[64];
+
+ snprintf(buf, sizeof(buf),
+ "Idempotent Producer example message #%d", msgcnt);
+
+ /*
+ * Produce message.
+ * This is an asynchronous call, on success it will only
+ * enqueue the message on the internal producer queue.
+ * The actual delivery attempts to the broker are handled
+ * by background threads.
+ * The previously registered delivery report callback
+ * (dr_msg_cb) is used to signal back to the application
+ * when the message has been delivered (or failed),
+ * and is triggered when the application calls
+ * rd_kafka_poll() or rd_kafka_flush().
+ */
+ retry:
+ err = rd_kafka_producev(
+ rk, RD_KAFKA_V_TOPIC(topic),
+ RD_KAFKA_V_VALUE(buf, strlen(buf)),
+ /* Copy the message payload so the `buf` can
+ * be reused for the next message. */
+ RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), RD_KAFKA_V_END);
+
+ if (err) {
+ /**
+ * Failed to *enqueue* message for producing.
+ */
+ fprintf(stderr,
+ "%% Failed to produce to topic %s: %s\n", topic,
+ rd_kafka_err2str(err));
+
+ if (err == RD_KAFKA_RESP_ERR__QUEUE_FULL) {
+ /* If the internal queue is full, wait for
+ * messages to be delivered and then retry.
+ * The internal queue represents both
+ * messages to be sent and messages that have
+ * been sent or failed, awaiting their
+ * delivery report callback to be called.
+ *
+ * The internal queue is limited by the
+ * configuration property
+ * queue.buffering.max.messages and
+ * queue.buffering.max.kbytes */
+ rd_kafka_poll(rk,
+ 1000 /*block for max 1000ms*/);
+ goto retry;
+ } else {
+ /* Produce failed, most likely due to a
+ * fatal error (will be handled by error_cb()),
+ * bail out. */
+
+ /* Instead of using the error_cb(), an
+ * application may check for fatal errors here
+ * by calling rd_kafka_fatal_error(). */
+ break;
+ }
+ }
+
+ /* A producer application should continually serve
+ * the delivery report queue by calling rd_kafka_poll()
+ * at frequent intervals.
+ * Either put the poll call in your main loop, or in a
+ * dedicated thread, or call it after or before every
+ * rd_kafka_produce*() call.
+ * Just make sure that rd_kafka_poll() is still called
+ * during periods where you are not producing any messages
+ * to make sure previously produced messages have their
+ * delivery report callback served (and any other callbacks
+ * you register). */
+ rd_kafka_poll(rk, 0 /*non-blocking*/);
+
+ msgcnt++;
+
+ /* Since fatal errors can't be triggered in practice,
+ * use the test API to trigger a fabricated error after
+ * some time. */
+ if (msgcnt == 13)
+ rd_kafka_test_fatal_error(
+ rk, RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER,
+ "This is a fabricated error to test the "
+ "fatal error handling");
+
+ /* Short sleep to rate-limit this example.
+ * A real application should not do this. */
+ usleep(500 * 1000); /* 500ms */
+ }
+
+
+ /* Wait for final messages to be delivered or fail.
+ * rd_kafka_flush() is an abstraction over rd_kafka_poll() which
+ * waits for all messages to be delivered. */
+ fprintf(stderr, "%% Flushing outstanding messages..\n");
+ rd_kafka_flush(rk, 10 * 1000 /* wait for max 10 seconds */);
+ fprintf(stderr, "%% %d message(s) produced, %d delivered, %d failed\n",
+ msgcnt, deliveredcnt, msgerrcnt);
+
+ /* Save fatal error prior for using with exit status below. */
+ err = rd_kafka_fatal_error(rk, NULL, 0);
+
+ /* Destroy the producer instance */
+ rd_kafka_destroy(rk);
+
+ /* Exit application with an error (1) if there was a fatal error. */
+ if (err)
+ return 1;
+ else
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/examples/kafkatest_verifiable_client.cpp b/fluent-bit/lib/librdkafka-2.1.0/examples/kafkatest_verifiable_client.cpp
new file mode 100644
index 00000000..bdb8607a
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/examples/kafkatest_verifiable_client.cpp
@@ -0,0 +1,961 @@
+/*
+ * Copyright (c) 2015, Confluent Inc
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * librdkafka version of the Java VerifiableProducer and VerifiableConsumer
+ * for use with the official Kafka client tests.
+ */
+
+
+#include <iostream>
+#include <fstream>
+#include <sstream>
+#include <map>
+#include <string>
+#include <algorithm>
+#include <cstdlib>
+#include <cstdio>
+#include <csignal>
+#include <cstring>
+#include <unistd.h>
+#include <sys/time.h>
+#include <assert.h>
+#include <ctype.h>
+#include <strings.h>
+
+#ifdef _WIN32
+#include "../win32/wingetopt.h"
+#elif _AIX
+#include <unistd.h>
+#else
+#include <getopt.h>
+#endif
+
+/*
+ * Typically include path in a real application would be
+ * #include <librdkafka/rdkafkacpp.h>
+ */
+#include "rdkafkacpp.h"
+
+static volatile sig_atomic_t run = 1;
+static bool exit_eof = false;
+static int verbosity = 1;
+static std::string value_prefix;
+
+class Assignment {
+ public:
+ static std::string name(const std::string &t, int partition) {
+ std::stringstream stm;
+ stm << t << "." << partition;
+ return stm.str();
+ }
+
+ Assignment() :
+ topic(""),
+ partition(-1),
+ consumedMessages(0),
+ minOffset(-1),
+ maxOffset(0) {
+ printf("Created assignment\n");
+ }
+ Assignment(const Assignment &a) {
+ topic = a.topic;
+ partition = a.partition;
+ consumedMessages = a.consumedMessages;
+ minOffset = a.minOffset;
+ maxOffset = a.maxOffset;
+ }
+
+ Assignment &operator=(const Assignment &a) {
+ this->topic = a.topic;
+ this->partition = a.partition;
+ this->consumedMessages = a.consumedMessages;
+ this->minOffset = a.minOffset;
+ this->maxOffset = a.maxOffset;
+ return *this;
+ }
+
+ int operator==(const Assignment &a) const {
+ return !(this->topic == a.topic && this->partition == a.partition);
+ }
+
+ int operator<(const Assignment &a) const {
+ if (this->topic < a.topic)
+ return 1;
+ if (this->topic >= a.topic)
+ return 0;
+ return (this->partition < a.partition);
+ }
+
+ void setup(std::string t, int32_t p) {
+ assert(!t.empty());
+ assert(topic.empty() || topic == t);
+ assert(partition == -1 || partition == p);
+ topic = t;
+ partition = p;
+ }
+
+ std::string topic;
+ int partition;
+ int consumedMessages;
+ int64_t minOffset;
+ int64_t maxOffset;
+};
+
+
+
+static struct {
+ int maxMessages;
+
+ struct {
+ int numAcked;
+ int numSent;
+ int numErr;
+ } producer;
+
+ struct {
+ int consumedMessages;
+ int consumedMessagesLastReported;
+ int consumedMessagesAtLastCommit;
+ bool useAutoCommit;
+ std::map<std::string, Assignment> assignments;
+ } consumer;
+} state = {
+ /* .maxMessages = */ -1};
+
+
+static RdKafka::KafkaConsumer *consumer;
+
+
+static std::string now() {
+ struct timeval tv;
+ gettimeofday(&tv, NULL);
+ time_t t = tv.tv_sec;
+ struct tm tm;
+ char buf[64];
+
+ localtime_r(&t, &tm);
+ strftime(buf, sizeof(buf), "%H:%M:%S", &tm);
+ snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), ".%03d",
+ (int)(tv.tv_usec / 1000));
+
+ return buf;
+}
+
+
+static time_t watchdog_last_kick;
+static const int watchdog_timeout = 20; /* Must be > socket.timeout.ms */
+static void sigwatchdog(int sig) {
+ time_t t = time(NULL);
+ if (watchdog_last_kick + watchdog_timeout <= t) {
+ std::cerr << now() << ": WATCHDOG TIMEOUT ("
+ << (int)(t - watchdog_last_kick) << "s): TERMINATING"
+ << std::endl;
+ int *i = NULL;
+ *i = 100;
+ abort();
+ }
+}
+
+static void watchdog_kick() {
+ watchdog_last_kick = time(NULL);
+
+ /* Safe guard against hangs-on-exit */
+ alarm(watchdog_timeout);
+}
+
+
+
+static void errorString(const std::string &name,
+ const std::string &errmsg,
+ const std::string &topic,
+ const std::string *key,
+ const std::string &value) {
+ std::cout << "{ "
+ << "\"name\": \"" << name << "\", "
+ << "\"_time\": \"" << now() << "\", "
+ << "\"message\": \"" << errmsg << "\", "
+ << "\"topic\": \"" << topic << "\", "
+ << "\"key\": \"" << (key ? *key : "NULL") << "\", "
+ << "\"value\": \"" << value << "\" "
+ << "}" << std::endl;
+}
+
+
+static void successString(const std::string &name,
+ const std::string &topic,
+ int partition,
+ int64_t offset,
+ const std::string *key,
+ const std::string &value) {
+ std::cout << "{ "
+ << "\"name\": \"" << name << "\", "
+ << "\"_time\": \"" << now() << "\", "
+ << "\"topic\": \"" << topic << "\", "
+ << "\"partition\": " << partition << ", "
+ << "\"offset\": " << offset << ", "
+ << "\"key\": \"" << (key ? *key : "NULL") << "\", "
+ << "\"value\": \"" << value << "\" "
+ << "}" << std::endl;
+}
+
+
+#if FIXME
+static void offsetStatus(bool success,
+ const std::string &topic,
+ int partition,
+ int64_t offset,
+ const std::string &errstr) {
+ std::cout << "{ "
+ "\"name\": \"offsets_committed\", "
+ << "\"success\": " << success << ", "
+ << "\"offsets\": [ "
+ << " { "
+ << " \"topic\": \"" << topic << "\", "
+ << " \"partition\": " << partition << ", "
+ << " \"offset\": " << (int)offset << ", "
+ << " \"error\": \"" << errstr << "\" "
+ << " } "
+ << "] }" << std::endl;
+}
+#endif
+
+
+static void sigterm(int sig) {
+ std::cerr << now() << ": Terminating because of signal " << sig << std::endl;
+
+ if (!run) {
+ std::cerr << now() << ": Forced termination" << std::endl;
+ exit(1);
+ }
+ run = 0;
+}
+
+
+class ExampleDeliveryReportCb : public RdKafka::DeliveryReportCb {
+ public:
+ void dr_cb(RdKafka::Message &message) {
+ if (message.err()) {
+ state.producer.numErr++;
+ errorString("producer_send_error", message.errstr(), message.topic_name(),
+ message.key(),
+ std::string(static_cast<const char *>(message.payload()),
+ message.len()));
+ } else {
+ successString("producer_send_success", message.topic_name(),
+ (int)message.partition(), message.offset(), message.key(),
+ std::string(static_cast<const char *>(message.payload()),
+ message.len()));
+ state.producer.numAcked++;
+ }
+ }
+};
+
+
+class ExampleEventCb : public RdKafka::EventCb {
+ public:
+ void event_cb(RdKafka::Event &event) {
+ switch (event.type()) {
+ case RdKafka::Event::EVENT_ERROR:
+ std::cerr << now() << ": ERROR (" << RdKafka::err2str(event.err())
+ << "): " << event.str() << std::endl;
+ break;
+
+ case RdKafka::Event::EVENT_STATS:
+ std::cerr << now() << ": \"STATS\": " << event.str() << std::endl;
+ break;
+
+ case RdKafka::Event::EVENT_LOG:
+ std::cerr << now() << ": LOG-" << event.severity() << "-" << event.fac()
+ << ": " << event.str() << std::endl;
+ break;
+
+ default:
+ std::cerr << now() << ": EVENT " << event.type() << " ("
+ << RdKafka::err2str(event.err()) << "): " << event.str()
+ << std::endl;
+ break;
+ }
+ }
+};
+
+
+/* Use of this partitioner is pretty pointless since no key is provided
+ * in the produce() call. */
+class MyHashPartitionerCb : public RdKafka::PartitionerCb {
+ public:
+ int32_t partitioner_cb(const RdKafka::Topic *topic,
+ const std::string *key,
+ int32_t partition_cnt,
+ void *msg_opaque) {
+ return djb_hash(key->c_str(), key->size()) % partition_cnt;
+ }
+
+ private:
+ static inline unsigned int djb_hash(const char *str, size_t len) {
+ unsigned int hash = 5381;
+ for (size_t i = 0; i < len; i++)
+ hash = ((hash << 5) + hash) + str[i];
+ return hash;
+ }
+};
+
+
+
+/**
+ * Print number of records consumed, every 100 messages or on timeout.
+ */
+static void report_records_consumed(int immediate) {
+ std::map<std::string, Assignment> *assignments = &state.consumer.assignments;
+
+ if (state.consumer.consumedMessages <=
+ state.consumer.consumedMessagesLastReported + (immediate ? 0 : 999))
+ return;
+
+ std::cout << "{ "
+ "\"name\": \"records_consumed\", "
+ << "\"_totcount\": " << state.consumer.consumedMessages << ", "
+ << "\"count\": "
+ << (state.consumer.consumedMessages -
+ state.consumer.consumedMessagesLastReported)
+ << ", "
+ << "\"partitions\": [ ";
+
+ for (std::map<std::string, Assignment>::iterator ii = assignments->begin();
+ ii != assignments->end(); ii++) {
+ Assignment *a = &(*ii).second;
+ assert(!a->topic.empty());
+ std::cout << (ii == assignments->begin() ? "" : ", ") << " { "
+ << " \"topic\": \"" << a->topic << "\", "
+ << " \"partition\": " << a->partition << ", "
+ << " \"minOffset\": " << a->minOffset << ", "
+ << " \"maxOffset\": " << a->maxOffset << " "
+ << " } ";
+ a->minOffset = -1;
+ }
+
+ std::cout << "] }" << std::endl;
+
+ state.consumer.consumedMessagesLastReported = state.consumer.consumedMessages;
+}
+
+
+class ExampleOffsetCommitCb : public RdKafka::OffsetCommitCb {
+ public:
+ void offset_commit_cb(RdKafka::ErrorCode err,
+ std::vector<RdKafka::TopicPartition *> &offsets) {
+ std::cerr << now() << ": Propagate offset for " << offsets.size()
+ << " partitions, error: " << RdKafka::err2str(err) << std::endl;
+
+ /* No offsets to commit, dont report anything. */
+ if (err == RdKafka::ERR__NO_OFFSET)
+ return;
+
+ /* Send up-to-date records_consumed report to make sure consumed > committed
+ */
+ report_records_consumed(1);
+
+ std::cout << "{ "
+ << "\"name\": \"offsets_committed\", "
+ << "\"success\": " << (err ? "false" : "true") << ", "
+ << "\"error\": \"" << (err ? RdKafka::err2str(err) : "") << "\", "
+ << "\"_autocommit\": "
+ << (state.consumer.useAutoCommit ? "true" : "false") << ", "
+ << "\"offsets\": [ ";
+ assert(offsets.size() > 0);
+ for (unsigned int i = 0; i < offsets.size(); i++) {
+ std::cout << (i == 0 ? "" : ", ") << "{ "
+ << " \"topic\": \"" << offsets[i]->topic() << "\", "
+ << " \"partition\": " << offsets[i]->partition() << ", "
+ << " \"offset\": " << (int)offsets[i]->offset() << ", "
+ << " \"error\": \""
+ << (offsets[i]->err() ? RdKafka::err2str(offsets[i]->err())
+ : "")
+ << "\" "
+ << " }";
+ }
+ std::cout << " ] }" << std::endl;
+ }
+};
+
+static ExampleOffsetCommitCb ex_offset_commit_cb;
+
+
+/**
+ * Commit every 1000 messages or whenever there is a consume timeout.
+ */
+static void do_commit(RdKafka::KafkaConsumer *consumer, int immediate) {
+ if (!immediate && (state.consumer.useAutoCommit ||
+ state.consumer.consumedMessagesAtLastCommit + 1000 >
+ state.consumer.consumedMessages))
+ return;
+
+ /* Make sure we report consumption before commit,
+ * otherwise tests may fail because of commit > consumed. */
+ if (state.consumer.consumedMessagesLastReported <
+ state.consumer.consumedMessages)
+ report_records_consumed(1);
+
+ std::cerr << now() << ": committing "
+ << (state.consumer.consumedMessages -
+ state.consumer.consumedMessagesAtLastCommit)
+ << " messages" << std::endl;
+
+ RdKafka::ErrorCode err;
+ err = consumer->commitSync(&ex_offset_commit_cb);
+
+ std::cerr << now() << ": "
+ << "sync commit returned " << RdKafka::err2str(err) << std::endl;
+
+ state.consumer.consumedMessagesAtLastCommit = state.consumer.consumedMessages;
+}
+
+
+void msg_consume(RdKafka::KafkaConsumer *consumer,
+ RdKafka::Message *msg,
+ void *opaque) {
+ switch (msg->err()) {
+ case RdKafka::ERR__TIMED_OUT:
+ /* Try reporting consumed messages */
+ report_records_consumed(1);
+ /* Commit one every consume() timeout instead of on every message.
+ * Also commit on every 1000 messages, whichever comes first. */
+ do_commit(consumer, 1);
+ break;
+
+
+ case RdKafka::ERR_NO_ERROR: {
+ /* Real message */
+ if (verbosity > 2)
+ std::cerr << now() << ": Read msg from " << msg->topic_name() << " ["
+ << (int)msg->partition() << "] at offset " << msg->offset()
+ << std::endl;
+
+ if (state.maxMessages >= 0 &&
+ state.consumer.consumedMessages >= state.maxMessages)
+ return;
+
+
+ Assignment *a = &state.consumer.assignments[Assignment::name(
+ msg->topic_name(), msg->partition())];
+ a->setup(msg->topic_name(), msg->partition());
+
+ a->consumedMessages++;
+ if (a->minOffset == -1)
+ a->minOffset = msg->offset();
+ if (a->maxOffset < msg->offset())
+ a->maxOffset = msg->offset();
+
+ if (msg->key()) {
+ if (verbosity >= 3)
+ std::cerr << now() << ": Key: " << *msg->key() << std::endl;
+ }
+
+ if (verbosity >= 3)
+ fprintf(stderr, "%.*s\n", static_cast<int>(msg->len()),
+ static_cast<const char *>(msg->payload()));
+
+ state.consumer.consumedMessages++;
+
+ report_records_consumed(0);
+
+ do_commit(consumer, 0);
+ } break;
+
+ case RdKafka::ERR__PARTITION_EOF:
+ /* Last message */
+ if (exit_eof) {
+ std::cerr << now() << ": Terminate: exit on EOF" << std::endl;
+ run = 0;
+ }
+ break;
+
+ case RdKafka::ERR__UNKNOWN_TOPIC:
+ case RdKafka::ERR__UNKNOWN_PARTITION:
+ std::cerr << now() << ": Consume failed: " << msg->errstr() << std::endl;
+ run = 0;
+ break;
+
+ case RdKafka::ERR_GROUP_COORDINATOR_NOT_AVAILABLE:
+ std::cerr << now() << ": Warning: " << msg->errstr() << std::endl;
+ break;
+
+ default:
+ /* Errors */
+ std::cerr << now() << ": Consume failed: " << msg->errstr() << std::endl;
+ run = 0;
+ }
+}
+
+
+
+class ExampleConsumeCb : public RdKafka::ConsumeCb {
+ public:
+ void consume_cb(RdKafka::Message &msg, void *opaque) {
+ msg_consume(consumer_, &msg, opaque);
+ }
+ RdKafka::KafkaConsumer *consumer_;
+};
+
+class ExampleRebalanceCb : public RdKafka::RebalanceCb {
+ private:
+ static std::string part_list_json(
+ const std::vector<RdKafka::TopicPartition *> &partitions) {
+ std::ostringstream out;
+ for (unsigned int i = 0; i < partitions.size(); i++)
+ out << (i == 0 ? "" : ", ") << "{ "
+ << " \"topic\": \"" << partitions[i]->topic() << "\", "
+ << " \"partition\": " << partitions[i]->partition() << " }";
+ return out.str();
+ }
+
+ public:
+ void rebalance_cb(RdKafka::KafkaConsumer *consumer,
+ RdKafka::ErrorCode err,
+ std::vector<RdKafka::TopicPartition *> &partitions) {
+ std::cerr << now() << ": rebalance_cb " << RdKafka::err2str(err) << " for "
+ << partitions.size() << " partitions" << std::endl;
+ /* Send message report prior to rebalancing event to make sure they
+ * are accounted for on the "right side" of the rebalance. */
+ report_records_consumed(1);
+
+ if (err == RdKafka::ERR__ASSIGN_PARTITIONS)
+ consumer->assign(partitions);
+ else {
+ do_commit(consumer, 1);
+ consumer->unassign();
+ }
+
+ std::cout << "{ "
+ << "\"name\": \"partitions_"
+ << (err == RdKafka::ERR__ASSIGN_PARTITIONS ? "assigned"
+ : "revoked")
+ << "\", "
+ << "\"partitions\": [ " << part_list_json(partitions) << "] }"
+ << std::endl;
+ }
+};
+
+
+
+/**
+ * @brief Read (Java client) configuration file
+ */
+static void read_conf_file(RdKafka::Conf *conf, const std::string &conf_file) {
+ std::ifstream inf(conf_file.c_str());
+
+ if (!inf) {
+ std::cerr << now() << ": " << conf_file << ": could not open file"
+ << std::endl;
+ exit(1);
+ }
+
+ std::cerr << now() << ": " << conf_file << ": read config file" << std::endl;
+
+ std::string line;
+ int linenr = 0;
+
+ while (std::getline(inf, line)) {
+ linenr++;
+
+ // Ignore comments and empty lines
+ if (line[0] == '#' || line.length() == 0)
+ continue;
+
+ // Match on key=value..
+ size_t d = line.find("=");
+ if (d == 0 || d == std::string::npos) {
+ std::cerr << now() << ": " << conf_file << ":" << linenr << ": " << line
+ << ": ignoring invalid line (expect key=value): "
+ << ::std::endl;
+ continue;
+ }
+
+ std::string key = line.substr(0, d);
+ std::string val = line.substr(d + 1);
+
+ std::string errstr;
+ if (conf->set(key, val, errstr)) {
+ std::cerr << now() << ": " << conf_file << ":" << linenr << ": " << key
+ << "=" << val << ": " << errstr << ": ignoring error"
+ << std::endl;
+ } else {
+ std::cerr << now() << ": " << conf_file << ":" << linenr << ": " << key
+ << "=" << val << ": applied to configuration" << std::endl;
+ }
+ }
+
+ inf.close();
+}
+
+
+
+int main(int argc, char **argv) {
+ std::string brokers = "localhost";
+ std::string errstr;
+ std::vector<std::string> topics;
+ std::string mode = "P";
+ int throughput = 0;
+ int32_t partition = RdKafka::Topic::PARTITION_UA;
+ MyHashPartitionerCb hash_partitioner;
+ int64_t create_time = -1;
+
+ std::cerr << now() << ": librdkafka version " << RdKafka::version_str()
+ << " (" << RdKafka::version() << ")" << std::endl;
+
+ /*
+ * Create configuration objects
+ */
+ RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
+
+ /* Java VerifiableProducer defaults to acks=all */
+ if (conf->set("acks", "all", errstr)) {
+ std::cerr << now() << ": " << errstr << std::endl;
+ exit(1);
+ }
+
+ /* Avoid slow shutdown on error */
+ if (conf->set("message.timeout.ms", "60000", errstr)) {
+ std::cerr << now() << ": " << errstr << std::endl;
+ exit(1);
+ }
+
+ {
+ char hostname[128];
+ gethostname(hostname, sizeof(hostname) - 1);
+ conf->set("client.id", std::string("rdkafka@") + hostname, errstr);
+ }
+
+ conf->set("log.thread.name", "true", errstr);
+
+ /* auto commit is explicitly enabled with --enable-autocommit */
+ conf->set("enable.auto.commit", "false", errstr);
+
+ /* keep protocol request timeouts under the watchdog timeout
+ * to make sure things like commitSync() dont fall victim to the watchdog. */
+ conf->set("socket.timeout.ms", "10000", errstr);
+
+ conf->set("fetch.wait.max.ms", "500", errstr);
+ conf->set("fetch.min.bytes", "4096", errstr);
+
+ conf->set("enable.partition.eof", "true", errstr);
+
+ for (int i = 1; i < argc; i++) {
+ const char *name = argv[i];
+ const char *val = i + 1 < argc ? argv[i + 1] : NULL;
+
+ if (val && !strncmp(val, "-", 1))
+ val = NULL;
+
+ std::cout << now() << ": argument: " << name << " " << (val ? val : "")
+ << std::endl;
+
+ if (val) {
+ if (!strcmp(name, "--topic"))
+ topics.push_back(val);
+ else if (!strcmp(name, "--broker-list"))
+ brokers = val;
+ else if (!strcmp(name, "--max-messages"))
+ state.maxMessages = atoi(val);
+ else if (!strcmp(name, "--throughput"))
+ throughput = atoi(val);
+ else if (!strcmp(name, "--producer.config") ||
+ !strcmp(name, "--consumer.config"))
+ read_conf_file(conf, val);
+ else if (!strcmp(name, "--group-id"))
+ conf->set("group.id", val, errstr);
+ else if (!strcmp(name, "--session-timeout"))
+ conf->set("session.timeout.ms", val, errstr);
+ else if (!strcmp(name, "--reset-policy")) {
+ if (conf->set("auto.offset.reset", val, errstr)) {
+ std::cerr << now() << ": " << errstr << std::endl;
+ exit(1);
+ }
+ } else if (!strcmp(name, "--assignment-strategy")) {
+ /* The system tests pass the Java class name(s) rather than
+ * the configuration value. Fix it.
+ * "org.apache.kafka.clients.consumer.RangeAssignor,.." -> "range,.."
+ */
+ std::string s = val;
+ size_t pos;
+
+ while ((pos = s.find("org.apache.kafka.clients.consumer.")) !=
+ std::string::npos)
+ s.erase(pos, strlen("org.apache.kafka.clients.consumer."));
+
+ while ((pos = s.find("Assignor")) != std::string::npos)
+ s.erase(pos, strlen("Assignor"));
+
+ std::transform(s.begin(), s.end(), s.begin(), tolower);
+
+ std::cerr << now() << ": converted " << name << " " << val << " to "
+ << s << std::endl;
+
+ if (conf->set("partition.assignment.strategy", s.c_str(), errstr)) {
+ std::cerr << now() << ": " << errstr << std::endl;
+ exit(1);
+ }
+ } else if (!strcmp(name, "--value-prefix")) {
+ value_prefix = std::string(val) + ".";
+ } else if (!strcmp(name, "--acks")) {
+ if (conf->set("acks", val, errstr)) {
+ std::cerr << now() << ": " << errstr << std::endl;
+ exit(1);
+ }
+ } else if (!strcmp(name, "--message-create-time")) {
+ create_time = (int64_t)atoi(val);
+ } else if (!strcmp(name, "--debug")) {
+ conf->set("debug", val, errstr);
+ } else if (!strcmp(name, "-X")) {
+ char *s = strdup(val);
+ char *t = strchr(s, '=');
+ if (!t)
+ t = (char *)"";
+ else {
+ *t = '\0';
+ t++;
+ }
+ if (conf->set(s, t, errstr)) {
+ std::cerr << now() << ": " << errstr << std::endl;
+ exit(1);
+ }
+ free(s);
+ } else {
+ std::cerr << now() << ": Unknown option " << name << std::endl;
+ exit(1);
+ }
+
+ i++;
+
+ } else {
+ if (!strcmp(name, "--consumer"))
+ mode = "C";
+ else if (!strcmp(name, "--producer"))
+ mode = "P";
+ else if (!strcmp(name, "--enable-autocommit")) {
+ state.consumer.useAutoCommit = true;
+ conf->set("enable.auto.commit", "true", errstr);
+ } else if (!strcmp(name, "-v"))
+ verbosity++;
+ else if (!strcmp(name, "-q"))
+ verbosity--;
+ else {
+ std::cerr << now() << ": Unknown option or missing argument to " << name
+ << std::endl;
+ exit(1);
+ }
+ }
+ }
+
+ if (topics.empty() || brokers.empty()) {
+ std::cerr << now() << ": Missing --topic and --broker-list" << std::endl;
+ exit(1);
+ }
+
+
+ /*
+ * Set configuration properties
+ */
+ conf->set("metadata.broker.list", brokers, errstr);
+
+ ExampleEventCb ex_event_cb;
+ conf->set("event_cb", &ex_event_cb, errstr);
+
+ signal(SIGINT, sigterm);
+ signal(SIGTERM, sigterm);
+ signal(SIGALRM, sigwatchdog);
+
+
+ if (mode == "P") {
+ /*
+ * Producer mode
+ */
+
+ ExampleDeliveryReportCb ex_dr_cb;
+
+ /* Set delivery report callback */
+ conf->set("dr_cb", &ex_dr_cb, errstr);
+
+ /*
+ * Create producer using accumulated global configuration.
+ */
+ RdKafka::Producer *producer = RdKafka::Producer::create(conf, errstr);
+ if (!producer) {
+ std::cerr << now() << ": Failed to create producer: " << errstr
+ << std::endl;
+ exit(1);
+ }
+
+ std::cerr << now() << ": % Created producer " << producer->name()
+ << std::endl;
+
+ /*
+ * Create topic handle.
+ */
+ RdKafka::Topic *topic =
+ RdKafka::Topic::create(producer, topics[0], NULL, errstr);
+ if (!topic) {
+ std::cerr << now() << ": Failed to create topic: " << errstr << std::endl;
+ exit(1);
+ }
+
+ static const int delay_us = throughput ? 1000000 / throughput : 10;
+
+ if (state.maxMessages == -1)
+ state.maxMessages = 1000000; /* Avoid infinite produce */
+
+ for (int i = 0; run && i < state.maxMessages; i++) {
+ /*
+ * Produce message
+ */
+ std::ostringstream msg;
+ msg << value_prefix << i;
+ while (true) {
+ RdKafka::ErrorCode resp;
+ if (create_time == -1) {
+ resp = producer->produce(
+ topic, partition,
+ RdKafka::Producer::RK_MSG_COPY /* Copy payload */,
+ const_cast<char *>(msg.str().c_str()), msg.str().size(), NULL,
+ NULL);
+ } else {
+ resp = producer->produce(
+ topics[0], partition,
+ RdKafka::Producer::RK_MSG_COPY /* Copy payload */,
+ const_cast<char *>(msg.str().c_str()), msg.str().size(), NULL, 0,
+ create_time, NULL);
+ }
+
+ if (resp == RdKafka::ERR__QUEUE_FULL) {
+ producer->poll(100);
+ continue;
+ } else if (resp != RdKafka::ERR_NO_ERROR) {
+ errorString("producer_send_error", RdKafka::err2str(resp),
+ topic->name(), NULL, msg.str());
+ state.producer.numErr++;
+ } else {
+ state.producer.numSent++;
+ }
+ break;
+ }
+
+ producer->poll(delay_us / 1000);
+ usleep(1000);
+ watchdog_kick();
+ }
+ run = 1;
+
+ while (run && producer->outq_len() > 0) {
+ std::cerr << now() << ": Waiting for " << producer->outq_len()
+ << std::endl;
+ producer->poll(1000);
+ watchdog_kick();
+ }
+
+ std::cerr << now() << ": " << state.producer.numAcked << "/"
+ << state.producer.numSent << "/" << state.maxMessages
+ << " msgs acked/sent/max, " << state.producer.numErr << " errored"
+ << std::endl;
+
+ delete topic;
+ delete producer;
+
+
+ } else if (mode == "C") {
+ /*
+ * Consumer mode
+ */
+
+ conf->set("auto.offset.reset", "smallest", errstr);
+
+ ExampleRebalanceCb ex_rebalance_cb;
+ conf->set("rebalance_cb", &ex_rebalance_cb, errstr);
+
+ conf->set("offset_commit_cb", &ex_offset_commit_cb, errstr);
+
+
+ /*
+ * Create consumer using accumulated global configuration.
+ */
+ consumer = RdKafka::KafkaConsumer::create(conf, errstr);
+ if (!consumer) {
+ std::cerr << now() << ": Failed to create consumer: " << errstr
+ << std::endl;
+ exit(1);
+ }
+
+ std::cerr << now() << ": % Created consumer " << consumer->name()
+ << std::endl;
+
+ /*
+ * Subscribe to topic(s)
+ */
+ RdKafka::ErrorCode resp = consumer->subscribe(topics);
+ if (resp != RdKafka::ERR_NO_ERROR) {
+ std::cerr << now() << ": Failed to subscribe to " << topics.size()
+ << " topics: " << RdKafka::err2str(resp) << std::endl;
+ exit(1);
+ }
+
+ watchdog_kick();
+
+ /*
+ * Consume messages
+ */
+ while (run) {
+ RdKafka::Message *msg = consumer->consume(500);
+ msg_consume(consumer, msg, NULL);
+ delete msg;
+ watchdog_kick();
+ }
+
+ std::cerr << now() << ": Final commit on termination" << std::endl;
+
+ /* Final commit */
+ do_commit(consumer, 1);
+
+ /*
+ * Stop consumer
+ */
+ consumer->close();
+
+ delete consumer;
+ }
+
+ std::cout << "{ \"name\": \"shutdown_complete\" }" << std::endl;
+
+ /*
+ * Wait for RdKafka to decommission.
+ * This is not strictly needed (when check outq_len() above), but
+ * allows RdKafka to clean up all its resources before the application
+ * exits so that memory profilers such as valgrind wont complain about
+ * memory leaks.
+ */
+ RdKafka::wait_destroyed(5000);
+
+ std::cerr << now() << ": EXITING WITH RETURN VALUE 0" << std::endl;
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/examples/list_consumer_group_offsets.c b/fluent-bit/lib/librdkafka-2.1.0/examples/list_consumer_group_offsets.c
new file mode 100644
index 00000000..03e878ee
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/examples/list_consumer_group_offsets.c
@@ -0,0 +1,359 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2022, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * ListConsumerGroupOffsets usage example.
+ */
+
+#include <stdio.h>
+#include <signal.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdarg.h>
+
+#ifdef _WIN32
+#include "../win32/wingetopt.h"
+#else
+#include <getopt.h>
+#endif
+
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is builtin from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h"
+
+
+const char *argv0;
+
+static rd_kafka_queue_t *queue; /** Admin result queue.
+ * This is a global so we can
+ * yield in stop() */
+static volatile sig_atomic_t run = 1;
+
+/**
+ * @brief Signal termination of program
+ */
+static void stop(int sig) {
+ if (!run) {
+ fprintf(stderr, "%% Forced termination\n");
+ exit(2);
+ }
+ run = 0;
+ rd_kafka_queue_yield(queue);
+}
+
+
+static void usage(const char *reason, ...) {
+
+ fprintf(stderr,
+ "List consumer group offsets usage examples\n"
+ "\n"
+ "Usage: %s <options> <group_id> "
+ "<require_stable_offsets>\n"
+ " <topic1> <partition1>\n"
+ " <topic2> <partition2>\n"
+ " ...\n"
+ "\n"
+ "Options:\n"
+ " -b <brokers> Bootstrap server list to connect to.\n"
+ " -X <prop=val> Set librdkafka configuration property.\n"
+ " See CONFIGURATION.md for full list.\n"
+ " -d <dbg,..> Enable librdkafka debugging (%s).\n"
+ "\n",
+ argv0, rd_kafka_get_debug_contexts());
+
+ if (reason) {
+ va_list ap;
+ char reasonbuf[512];
+
+ va_start(ap, reason);
+ vsnprintf(reasonbuf, sizeof(reasonbuf), reason, ap);
+ va_end(ap);
+
+ fprintf(stderr, "ERROR: %s\n", reasonbuf);
+ }
+
+ exit(reason ? 1 : 0);
+}
+
+
+#define fatal(...) \
+ do { \
+ fprintf(stderr, "ERROR: "); \
+ fprintf(stderr, __VA_ARGS__); \
+ fprintf(stderr, "\n"); \
+ exit(2); \
+ } while (0)
+
+
+/**
+ * @brief Set config property. Exit on failure.
+ */
+static void conf_set(rd_kafka_conf_t *conf, const char *name, const char *val) {
+ char errstr[512];
+
+ if (rd_kafka_conf_set(conf, name, val, errstr, sizeof(errstr)) !=
+ RD_KAFKA_CONF_OK)
+ fatal("Failed to set %s=%s: %s", name, val, errstr);
+}
+
+
+static void
+print_partition_list(FILE *fp,
+ const rd_kafka_topic_partition_list_t *partitions,
+ int print_offset,
+ const char *prefix) {
+ int i;
+
+ if (partitions->cnt == 0) {
+ fprintf(fp, "%sNo partition found", prefix);
+ }
+ for (i = 0; i < partitions->cnt; i++) {
+ char offset_string[512] = {};
+ *offset_string = '\0';
+ if (print_offset) {
+ snprintf(offset_string, sizeof(offset_string),
+ " offset %" PRId64,
+ partitions->elems[i].offset);
+ }
+ fprintf(fp, "%s%s %s [%" PRId32 "]%s error %s",
+ i > 0 ? "\n" : "", prefix, partitions->elems[i].topic,
+ partitions->elems[i].partition, offset_string,
+ rd_kafka_err2str(partitions->elems[i].err));
+ }
+ fprintf(fp, "\n");
+}
+
+/**
+ * @brief Parse an integer or fail.
+ */
+int64_t parse_int(const char *what, const char *str) {
+ char *end;
+ unsigned long n = strtoull(str, &end, 0);
+
+ if (end != str + strlen(str)) {
+ fprintf(stderr, "%% Invalid input for %s: %s: not an integer\n",
+ what, str);
+ exit(1);
+ }
+
+ return (int64_t)n;
+}
+
+static void
+cmd_list_consumer_group_offsets(rd_kafka_conf_t *conf, int argc, char **argv) {
+ char errstr[512]; /* librdkafka API error reporting buffer */
+ rd_kafka_t *rk; /* Admin client instance */
+ rd_kafka_AdminOptions_t *options; /* (Optional) Options for
+ * ListConsumerGroupOffsets() */
+ rd_kafka_event_t *event; /* ListConsumerGroupOffsets result event */
+ const int min_argc = 2;
+ char *topic;
+ int partition;
+ int require_stable_offsets = 0, num_partitions = 0;
+ rd_kafka_ListConsumerGroupOffsets_t *list_cgrp_offsets;
+ rd_kafka_error_t *error;
+ const char *group;
+
+ /*
+ * Argument validation
+ */
+ if (argc < min_argc || (argc - min_argc) % 2 != 0)
+ usage("Wrong number of arguments");
+ else {
+ require_stable_offsets =
+ parse_int("require_stable_offsets", argv[1]);
+ if (require_stable_offsets < 0 || require_stable_offsets > 1)
+ usage("Require stable not a 0-1 int");
+ }
+
+ num_partitions = (argc - min_argc) / 2;
+ group = argv[0];
+
+ /*
+ * Create an admin client, it can be created using any client type,
+ * so we choose producer since it requires no extra configuration
+ * and is more light-weight than the consumer.
+ *
+ * NOTE: rd_kafka_new() takes ownership of the conf object
+ * and the application must not reference it again after
+ * this call.
+ */
+ rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr));
+ if (!rk) {
+ fprintf(stderr, "%% Failed to create new producer: %s\n",
+ errstr);
+ exit(1);
+ }
+
+ /* The Admin API is completely asynchronous, results are emitted
+ * on the result queue that is passed to ListConsumerGroupOffsets() */
+ queue = rd_kafka_queue_new(rk);
+
+ /* Signal handler for clean shutdown */
+ signal(SIGINT, stop);
+
+ /* Set timeout (optional) */
+ options = rd_kafka_AdminOptions_new(
+ rk, RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPOFFSETS);
+ if (rd_kafka_AdminOptions_set_request_timeout(
+ options, 30 * 1000 /* 30s */, errstr, sizeof(errstr))) {
+ fprintf(stderr, "%% Failed to set timeout: %s\n", errstr);
+ exit(1);
+ }
+ /* Set requested require stable offsets */
+ if ((error = rd_kafka_AdminOptions_set_require_stable_offsets(
+ options, require_stable_offsets))) {
+ fprintf(stderr, "%% Failed to set require stable offsets: %s\n",
+ rd_kafka_error_string(error));
+ rd_kafka_error_destroy(error);
+ exit(1);
+ }
+
+ /* Read passed partition-offsets */
+ rd_kafka_topic_partition_list_t *partitions = NULL;
+ if (num_partitions > 0) {
+ int i;
+ partitions = rd_kafka_topic_partition_list_new(num_partitions);
+ for (i = 0; i < num_partitions; i++) {
+ topic = argv[min_argc + i * 2];
+ partition =
+ parse_int("partition", argv[min_argc + i * 2 + 1]);
+ rd_kafka_topic_partition_list_add(partitions, topic,
+ partition);
+ }
+ }
+
+ /* Create argument */
+ list_cgrp_offsets =
+ rd_kafka_ListConsumerGroupOffsets_new(group, partitions);
+ /* Call ListConsumerGroupOffsets */
+ rd_kafka_ListConsumerGroupOffsets(rk, &list_cgrp_offsets, 1, options,
+ queue);
+
+ /* Clean up input arguments */
+ rd_kafka_ListConsumerGroupOffsets_destroy(list_cgrp_offsets);
+ rd_kafka_AdminOptions_destroy(options);
+
+
+ /* Wait for results */
+ event = rd_kafka_queue_poll(queue, -1 /* indefinitely but limited by
+ * the request timeout set
+ * above (30s) */);
+
+ if (!event) {
+ /* User hit Ctrl-C,
+ * see yield call in stop() signal handler */
+ fprintf(stderr, "%% Cancelled by user\n");
+
+ } else if (rd_kafka_event_error(event)) {
+ /* ListConsumerGroupOffsets request failed */
+ fprintf(stderr, "%% ListConsumerGroupOffsets failed: %s\n",
+ rd_kafka_event_error_string(event));
+ exit(1);
+
+ } else {
+ /* ListConsumerGroupOffsets request succeeded, but individual
+ * partitions may have errors. */
+ const rd_kafka_ListConsumerGroupOffsets_result_t *result;
+ const rd_kafka_group_result_t **groups;
+ size_t n_groups, i;
+
+ result = rd_kafka_event_ListConsumerGroupOffsets_result(event);
+ groups = rd_kafka_ListConsumerGroupOffsets_result_groups(
+ result, &n_groups);
+
+ printf("ListConsumerGroupOffsets results:\n");
+ for (i = 0; i < n_groups; i++) {
+ const rd_kafka_group_result_t *group = groups[i];
+ const rd_kafka_topic_partition_list_t *partitions =
+ rd_kafka_group_result_partitions(group);
+ print_partition_list(stderr, partitions, 1, " ");
+ }
+ }
+
+ if (partitions)
+ rd_kafka_topic_partition_list_destroy(partitions);
+
+ /* Destroy event object when we're done with it.
+ * Note: rd_kafka_event_destroy() allows a NULL event. */
+ rd_kafka_event_destroy(event);
+
+ /* Destroy queue */
+ rd_kafka_queue_destroy(queue);
+
+ /* Destroy the producer instance */
+ rd_kafka_destroy(rk);
+}
+
+int main(int argc, char **argv) {
+ rd_kafka_conf_t *conf; /**< Client configuration object */
+ int opt;
+ argv0 = argv[0];
+
+ /*
+ * Create Kafka client configuration place-holder
+ */
+ conf = rd_kafka_conf_new();
+
+
+ /*
+ * Parse common options
+ */
+ while ((opt = getopt(argc, argv, "b:X:d:")) != -1) {
+ switch (opt) {
+ case 'b':
+ conf_set(conf, "bootstrap.servers", optarg);
+ break;
+
+ case 'X': {
+ char *name = optarg, *val;
+
+ if (!(val = strchr(name, '=')))
+ fatal("-X expects a name=value argument");
+
+ *val = '\0';
+ val++;
+
+ conf_set(conf, name, val);
+ break;
+ }
+
+ case 'd':
+ conf_set(conf, "debug", optarg);
+ break;
+
+ default:
+ usage("Unknown option %c", (char)opt);
+ }
+ }
+
+ cmd_list_consumer_group_offsets(conf, argc - optind, &argv[optind]);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/examples/list_consumer_groups.c b/fluent-bit/lib/librdkafka-2.1.0/examples/list_consumer_groups.c
new file mode 100644
index 00000000..13656cd6
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/examples/list_consumer_groups.c
@@ -0,0 +1,330 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2022, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * ListConsumerGroups usage example.
+ */
+
+#include <stdio.h>
+#include <signal.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdarg.h>
+
+#ifdef _WIN32
+#include "../win32/wingetopt.h"
+#else
+#include <getopt.h>
+#endif
+
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is builtin from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h"
+
+
+const char *argv0;
+
+static rd_kafka_queue_t *queue; /** Admin result queue.
+ * This is a global so we can
+ * yield in stop() */
+static volatile sig_atomic_t run = 1;
+
+/**
+ * @brief Signal termination of program
+ */
+static void stop(int sig) {
+ if (!run) {
+ fprintf(stderr, "%% Forced termination\n");
+ exit(2);
+ }
+ run = 0;
+ rd_kafka_queue_yield(queue);
+}
+
+
+static void usage(const char *reason, ...) {
+
+ fprintf(stderr,
+ "List groups usage examples\n"
+ "\n"
+ "Usage: %s <options> <state1> <state2> ...\n"
+ "\n"
+ "Options:\n"
+ " -b <brokers> Bootstrap server list to connect to.\n"
+ " -X <prop=val> Set librdkafka configuration property.\n"
+ " See CONFIGURATION.md for full list.\n"
+ " -d <dbg,..> Enable librdkafka debugging (%s).\n"
+ "\n",
+ argv0, rd_kafka_get_debug_contexts());
+
+ if (reason) {
+ va_list ap;
+ char reasonbuf[512];
+
+ va_start(ap, reason);
+ vsnprintf(reasonbuf, sizeof(reasonbuf), reason, ap);
+ va_end(ap);
+
+ fprintf(stderr, "ERROR: %s\n", reasonbuf);
+ }
+
+ exit(reason ? 1 : 0);
+}
+
+
+#define fatal(...) \
+ do { \
+ fprintf(stderr, "ERROR: "); \
+ fprintf(stderr, __VA_ARGS__); \
+ fprintf(stderr, "\n"); \
+ exit(2); \
+ } while (0)
+
+
+/**
+ * @brief Set config property. Exit on failure.
+ */
+static void conf_set(rd_kafka_conf_t *conf, const char *name, const char *val) {
+ char errstr[512];
+
+ if (rd_kafka_conf_set(conf, name, val, errstr, sizeof(errstr)) !=
+ RD_KAFKA_CONF_OK)
+ fatal("Failed to set %s=%s: %s", name, val, errstr);
+}
+
+/**
+ * @brief Print group information.
+ */
+static int print_groups_info(const rd_kafka_ListConsumerGroups_result_t *list) {
+ size_t i;
+ const rd_kafka_ConsumerGroupListing_t **result_groups;
+ const rd_kafka_error_t **errors;
+ size_t result_groups_cnt;
+ size_t result_error_cnt;
+ result_groups =
+ rd_kafka_ListConsumerGroups_result_valid(list, &result_groups_cnt);
+ errors =
+ rd_kafka_ListConsumerGroups_result_errors(list, &result_error_cnt);
+
+ if (result_groups_cnt == 0) {
+ fprintf(stderr, "No matching groups found\n");
+ }
+
+ for (i = 0; i < result_groups_cnt; i++) {
+ const rd_kafka_ConsumerGroupListing_t *group = result_groups[i];
+ const char *group_id =
+ rd_kafka_ConsumerGroupListing_group_id(group);
+ rd_kafka_consumer_group_state_t state =
+ rd_kafka_ConsumerGroupListing_state(group);
+ int is_simple_consumer_group =
+ rd_kafka_ConsumerGroupListing_is_simple_consumer_group(
+ group);
+
+ printf("Group \"%s\", is simple %" PRId32
+ ", "
+ "state %s",
+ group_id, is_simple_consumer_group,
+ rd_kafka_consumer_group_state_name(state));
+ printf("\n");
+ }
+ for (i = 0; i < result_error_cnt; i++) {
+ const rd_kafka_error_t *error = errors[i];
+ printf("Error[%" PRId32 "]: %s\n", rd_kafka_error_code(error),
+ rd_kafka_error_string(error));
+ }
+ return 0;
+}
+
+/**
+ * @brief Parse an integer or fail.
+ */
+int64_t parse_int(const char *what, const char *str) {
+ char *end;
+ unsigned long n = strtoull(str, &end, 0);
+
+ if (end != str + strlen(str)) {
+ fprintf(stderr, "%% Invalid input for %s: %s: not an integer\n",
+ what, str);
+ exit(1);
+ }
+
+ return (int64_t)n;
+}
+
+/**
+ * @brief Call rd_kafka_ListConsumerGroups() with a list of
+ * groups.
+ */
+static void
+cmd_list_consumer_groups(rd_kafka_conf_t *conf, int argc, char **argv) {
+ rd_kafka_t *rk;
+ const char **states_str = NULL;
+ char errstr[512];
+ rd_kafka_AdminOptions_t *options;
+ rd_kafka_event_t *event = NULL;
+ rd_kafka_error_t *error = NULL;
+ int i;
+ int retval = 0;
+ int states_cnt = 0;
+ rd_kafka_consumer_group_state_t *states;
+
+
+ if (argc >= 1) {
+ states_str = (const char **)&argv[0];
+ states_cnt = argc;
+ }
+ states = calloc(states_cnt, sizeof(rd_kafka_consumer_group_state_t));
+ for (i = 0; i < states_cnt; i++) {
+ states[i] = parse_int("state code", states_str[i]);
+ }
+
+ /*
+ * Create consumer instance
+ * NOTE: rd_kafka_new() takes ownership of the conf object
+ * and the application must not reference it again after
+ * this call.
+ */
+ rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, sizeof(errstr));
+ if (!rk)
+ fatal("Failed to create new consumer: %s", errstr);
+
+ /*
+ * List consumer groups
+ */
+ queue = rd_kafka_queue_new(rk);
+
+ /* Signal handler for clean shutdown */
+ signal(SIGINT, stop);
+
+ options =
+ rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPS);
+
+ if (rd_kafka_AdminOptions_set_request_timeout(
+ options, 10 * 1000 /* 10s */, errstr, sizeof(errstr))) {
+ fprintf(stderr, "%% Failed to set timeout: %s\n", errstr);
+ goto exit;
+ }
+
+ if ((error = rd_kafka_AdminOptions_set_match_consumer_group_states(
+ options, states, states_cnt))) {
+ fprintf(stderr, "%% Failed to set states: %s\n",
+ rd_kafka_error_string(error));
+ rd_kafka_error_destroy(error);
+ goto exit;
+ }
+ free(states);
+
+ rd_kafka_ListConsumerGroups(rk, options, queue);
+ rd_kafka_AdminOptions_destroy(options);
+
+ /* Wait for results */
+ event = rd_kafka_queue_poll(queue, -1 /* indefinitely but limited by
+ * the request timeout set
+ * above (10s) */);
+
+ if (!event) {
+ /* User hit Ctrl-C,
+ * see yield call in stop() signal handler */
+ fprintf(stderr, "%% Cancelled by user\n");
+
+ } else if (rd_kafka_event_error(event)) {
+ rd_kafka_resp_err_t err = rd_kafka_event_error(event);
+ /* ListConsumerGroups request failed */
+ fprintf(stderr,
+ "%% ListConsumerGroups failed[%" PRId32 "]: %s\n", err,
+ rd_kafka_event_error_string(event));
+ goto exit;
+
+ } else {
+ /* ListConsumerGroups request succeeded, but individual
+ * groups may have errors. */
+ const rd_kafka_ListConsumerGroups_result_t *result;
+
+ result = rd_kafka_event_ListConsumerGroups_result(event);
+ printf("ListConsumerGroups results:\n");
+ retval = print_groups_info(result);
+ }
+
+
+exit:
+ if (event)
+ rd_kafka_event_destroy(event);
+ rd_kafka_queue_destroy(queue);
+ /* Destroy the client instance */
+ rd_kafka_destroy(rk);
+
+ exit(retval);
+}
+
+int main(int argc, char **argv) {
+ rd_kafka_conf_t *conf; /**< Client configuration object */
+ int opt;
+ argv0 = argv[0];
+
+ /*
+ * Create Kafka client configuration place-holder
+ */
+ conf = rd_kafka_conf_new();
+
+
+ /*
+ * Parse common options
+ */
+ while ((opt = getopt(argc, argv, "b:X:d:")) != -1) {
+ switch (opt) {
+ case 'b':
+ conf_set(conf, "bootstrap.servers", optarg);
+ break;
+
+ case 'X': {
+ char *name = optarg, *val;
+
+ if (!(val = strchr(name, '=')))
+ fatal("-X expects a name=value argument");
+
+ *val = '\0';
+ val++;
+
+ conf_set(conf, name, val);
+ break;
+ }
+
+ case 'd':
+ conf_set(conf, "debug", optarg);
+ break;
+
+ default:
+ usage("Unknown option %c", (char)opt);
+ }
+ }
+
+ cmd_list_consumer_groups(conf, argc - optind, &argv[optind]);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/examples/misc.c b/fluent-bit/lib/librdkafka-2.1.0/examples/misc.c
new file mode 100644
index 00000000..b63ab577
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/examples/misc.c
@@ -0,0 +1,287 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2022, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * A collection of smaller usage examples
+ */
+
+#include <stdio.h>
+#include <signal.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdarg.h>
+
+#ifdef _WIN32
+#include "../win32/wingetopt.h"
+#else
+#include <getopt.h>
+#endif
+
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is builtin from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h"
+
+
+const char *argv0;
+
+
+static void usage(const char *reason, ...) {
+
+ fprintf(stderr,
+ "Miscellaneous librdkafka usage examples\n"
+ "\n"
+ "Usage: %s <options> <command> [<command arguments>]\n"
+ "\n"
+ "Commands:\n"
+ " List groups:\n"
+ " %s -b <brokers> list_groups <group>\n"
+ "\n"
+ " Show librdkafka version:\n"
+ " %s version\n"
+ "\n"
+ "Common options for all commands:\n"
+ " -b <brokers> Bootstrap server list to connect to.\n"
+ " -X <prop=val> Set librdkafka configuration property.\n"
+ " See CONFIGURATION.md for full list.\n"
+ " -d <dbg,..> Enable librdkafka debugging (%s).\n"
+ "\n",
+ argv0, argv0, argv0, rd_kafka_get_debug_contexts());
+
+ if (reason) {
+ va_list ap;
+ char reasonbuf[512];
+
+ va_start(ap, reason);
+ vsnprintf(reasonbuf, sizeof(reasonbuf), reason, ap);
+ va_end(ap);
+
+ fprintf(stderr, "ERROR: %s\n", reasonbuf);
+ }
+
+ exit(reason ? 1 : 0);
+}
+
+
+#define fatal(...) \
+ do { \
+ fprintf(stderr, "ERROR: "); \
+ fprintf(stderr, __VA_ARGS__); \
+ fprintf(stderr, "\n"); \
+ exit(2); \
+ } while (0)
+
+
+/**
+ * @brief Set config property. Exit on failure.
+ */
+static void conf_set(rd_kafka_conf_t *conf, const char *name, const char *val) {
+ char errstr[512];
+
+ if (rd_kafka_conf_set(conf, name, val, errstr, sizeof(errstr)) !=
+ RD_KAFKA_CONF_OK)
+ fatal("Failed to set %s=%s: %s", name, val, errstr);
+}
+
+
+/**
+ * Commands
+ *
+ */
+
+/**
+ * @brief Just print the librdkafka version
+ */
+static void cmd_version(rd_kafka_conf_t *conf, int argc, char **argv) {
+ if (argc)
+ usage("version command takes no arguments");
+
+ printf("librdkafka v%s\n", rd_kafka_version_str());
+ rd_kafka_conf_destroy(conf);
+}
+
+
+/**
+ * @brief Call rd_kafka_list_groups() with an optional groupid argument.
+ */
+static void cmd_list_groups(rd_kafka_conf_t *conf, int argc, char **argv) {
+ rd_kafka_t *rk;
+ const char *groupid = NULL;
+ char errstr[512];
+ rd_kafka_resp_err_t err;
+ const struct rd_kafka_group_list *grplist;
+ int i;
+ int retval = 0;
+
+ if (argc > 1)
+ usage("too many arguments to list_groups");
+
+ if (argc == 1)
+ groupid = argv[0];
+
+ /*
+ * Create consumer instance
+ * NOTE: rd_kafka_new() takes ownership of the conf object
+ * and the application must not reference it again after
+ * this call.
+ */
+ rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, sizeof(errstr));
+ if (!rk)
+ fatal("Failed to create new consumer: %s", errstr);
+
+ /*
+ * List groups
+ */
+ err = rd_kafka_list_groups(rk, groupid, &grplist, 10 * 1000 /*10s*/);
+ if (err)
+ fatal("rd_kafka_list_groups(%s) failed: %s", groupid,
+ rd_kafka_err2str(err));
+
+ if (grplist->group_cnt == 0) {
+ if (groupid) {
+ fprintf(stderr, "Group %s not found\n", groupid);
+ retval = 1;
+ } else {
+ fprintf(stderr, "No groups in cluster\n");
+ }
+ }
+
+ /*
+ * Print group information
+ */
+ for (i = 0; i < grplist->group_cnt; i++) {
+ int j;
+ const struct rd_kafka_group_info *grp = &grplist->groups[i];
+
+ printf(
+ "Group \"%s\" protocol-type %s, protocol %s, "
+ "state %s, with %d member(s))",
+ grp->group, grp->protocol_type, grp->protocol, grp->state,
+ grp->member_cnt);
+ if (grp->err)
+ printf(" error: %s", rd_kafka_err2str(grp->err));
+ printf("\n");
+ for (j = 0; j < grp->member_cnt; j++) {
+ const struct rd_kafka_group_member_info *mb =
+ &grp->members[j];
+ printf(
+ " Member \"%s\" with client-id %s, host %s, "
+ "%d bytes of metadat, %d bytes of assignment\n",
+ mb->member_id, mb->client_id, mb->client_host,
+ mb->member_metadata_size,
+ mb->member_assignment_size);
+ }
+ }
+
+ rd_kafka_group_list_destroy(grplist);
+
+ /* Destroy the client instance */
+ rd_kafka_destroy(rk);
+
+ exit(retval);
+}
+
+
+
+int main(int argc, char **argv) {
+ rd_kafka_conf_t *conf; /**< Client configuration object */
+ int opt, i;
+ const char *cmd;
+ static const struct {
+ const char *cmd;
+ void (*func)(rd_kafka_conf_t *conf, int argc, char **argv);
+ } cmds[] = {
+ {"version", cmd_version},
+ {"list_groups", cmd_list_groups},
+ {NULL},
+ };
+
+ argv0 = argv[0];
+
+ if (argc == 1)
+ usage(NULL);
+
+ /*
+ * Create Kafka client configuration place-holder
+ */
+ conf = rd_kafka_conf_new();
+
+
+ /*
+ * Parse common options
+ */
+ while ((opt = getopt(argc, argv, "b:X:d:")) != -1) {
+ switch (opt) {
+ case 'b':
+ conf_set(conf, "bootstrap.servers", optarg);
+ break;
+
+ case 'X': {
+ char *name = optarg, *val;
+
+ if (!(val = strchr(name, '=')))
+ fatal("-X expects a name=value argument");
+
+ *val = '\0';
+ val++;
+
+ conf_set(conf, name, val);
+ break;
+ }
+
+ case 'd':
+ conf_set(conf, "debug", optarg);
+ break;
+
+ default:
+ usage("Unknown option %c", (char)opt);
+ }
+ }
+
+
+ if (optind == argc)
+ usage("No command specified");
+
+
+ cmd = argv[optind++];
+
+ /*
+ * Find matching command and run it
+ */
+ for (i = 0; cmds[i].cmd; i++) {
+ if (!strcmp(cmds[i].cmd, cmd)) {
+ cmds[i].func(conf, argc - optind, &argv[optind]);
+ exit(0);
+ }
+ }
+
+ usage("Unknown command: %s", cmd);
+
+ /* NOTREACHED */
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/examples/openssl_engine_example.cpp b/fluent-bit/lib/librdkafka-2.1.0/examples/openssl_engine_example.cpp
new file mode 100644
index 00000000..401857e6
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/examples/openssl_engine_example.cpp
@@ -0,0 +1,249 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2021, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * OpenSSL engine integration example. This example fetches metadata
+ * over SSL connection with broker, established using OpenSSL engine.
+ */
+
+#include <iostream>
+#include <string>
+#include <cstdlib>
+#include <cstdio>
+#include <csignal>
+#include <cstring>
+
+#ifdef _WIN32
+#include "../win32/wingetopt.h"
+#elif _AIX
+#include <unistd.h>
+#else
+#include <getopt.h>
+#endif
+
+/*
+ * Typically include path in a real application would be
+ * #include <librdkafka/rdkafkacpp.h>
+ */
+#include "rdkafkacpp.h"
+
+static void metadata_print(const RdKafka::Metadata *metadata) {
+ std::cout << "Number of topics: " << metadata->topics()->size() << std::endl;
+
+ /* Iterate topics */
+ RdKafka::Metadata::TopicMetadataIterator it;
+ for (it = metadata->topics()->begin(); it != metadata->topics()->end(); ++it)
+ std::cout << " " << (*it)->topic() << " has "
+ << (*it)->partitions()->size() << " partitions." << std::endl;
+}
+
+
+class PrintingSSLVerifyCb : public RdKafka::SslCertificateVerifyCb {
+ /* This SSL cert verification callback simply prints the incoming
+ * parameters. It provides no validation, everything is ok. */
+ public:
+ bool ssl_cert_verify_cb(const std::string &broker_name,
+ int32_t broker_id,
+ int *x509_error,
+ int depth,
+ const char *buf,
+ size_t size,
+ std::string &errstr) {
+ std::cout << "ssl_cert_verify_cb :"
+ << ": broker_name=" << broker_name << ", broker_id=" << broker_id
+ << ", x509_error=" << *x509_error << ", depth=" << depth
+ << ", buf size=" << size << std::endl;
+
+ return true;
+ }
+};
+
+
+int main(int argc, char **argv) {
+ std::string brokers;
+ std::string errstr;
+ std::string engine_path;
+ std::string ca_location;
+
+ /*
+ * Create configuration objects
+ */
+ RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
+ std::string engine_id;
+ std::string engine_callback_data;
+ int opt;
+
+ if (conf->set("security.protocol", "ssl", errstr) != RdKafka::Conf::CONF_OK) {
+ std::cerr << errstr << std::endl;
+ exit(1);
+ }
+
+ while ((opt = getopt(argc, argv, "b:p:c:t:d:i:e:X:")) != -1) {
+ switch (opt) {
+ case 'b':
+ brokers = optarg;
+ break;
+ case 'p':
+ engine_path = optarg;
+ break;
+ case 'c':
+ ca_location = optarg;
+ break;
+ case 'i':
+ engine_id = optarg;
+ break;
+ case 'e':
+ engine_callback_data = optarg;
+ break;
+ case 'd':
+ if (conf->set("debug", optarg, errstr) != RdKafka::Conf::CONF_OK) {
+ std::cerr << errstr << std::endl;
+ exit(1);
+ }
+ break;
+ case 'X': {
+ char *name, *val;
+
+ name = optarg;
+ if (!(val = strchr(name, '='))) {
+ std::cerr << "%% Expected -X property=value, not " << name << std::endl;
+ exit(1);
+ }
+
+ *val = '\0';
+ val++;
+
+ if (conf->set(name, val, errstr) != RdKafka::Conf::CONF_OK) {
+ std::cerr << errstr << std::endl;
+ exit(1);
+ }
+ } break;
+
+ default:
+ goto usage;
+ }
+ }
+
+ if (brokers.empty() || engine_path.empty() || optind != argc) {
+ usage:
+ std::string features;
+ conf->get("builtin.features", features);
+ fprintf(stderr,
+ "Usage: %s [options] -b <brokers> -p <engine-path> \n"
+ "\n"
+ "OpenSSL engine integration example. This example fetches\n"
+ "metadata over SSL connection with broker, established using\n"
+ "OpenSSL engine.\n"
+ "\n"
+ "librdkafka version %s (0x%08x, builtin.features \"%s\")\n"
+ "\n"
+ " Options:\n"
+ " -b <brokers> Broker address\n"
+ " -p <engine-path> Path to OpenSSL engine\n"
+ " -i <engine-id> OpenSSL engine id\n"
+ " -e <engine-callback-data> OpenSSL engine callback_data\n"
+ " -c <ca-cert-location> File path to ca cert\n"
+ " -d [facs..] Enable debugging contexts: %s\n"
+ " -X <prop=name> Set arbitrary librdkafka configuration"
+ " property\n"
+ "\n",
+ argv[0], RdKafka::version_str().c_str(), RdKafka::version(),
+ features.c_str(), RdKafka::get_debug_contexts().c_str());
+ exit(1);
+ }
+
+ if (conf->set("bootstrap.servers", brokers, errstr) !=
+ RdKafka::Conf::CONF_OK) {
+ std::cerr << errstr << std::endl;
+ exit(1);
+ }
+
+ if (conf->set("ssl.engine.location", engine_path, errstr) !=
+ RdKafka::Conf::CONF_OK) {
+ std::cerr << errstr << std::endl;
+ exit(1);
+ }
+
+ if (ca_location.length() > 0 && conf->set("ssl.ca.location", ca_location,
+ errstr) != RdKafka::Conf::CONF_OK) {
+ std::cerr << errstr << std::endl;
+ exit(1);
+ }
+
+ if (engine_id.length() > 0 &&
+ conf->set("ssl.engine.id", engine_id, errstr) != RdKafka::Conf::CONF_OK) {
+ std::cerr << errstr << std::endl;
+ exit(1);
+ }
+
+ /* engine_callback_data needs to be persistent
+ * and outlive the lifetime of the Kafka client handle. */
+ if (engine_callback_data.length() > 0 &&
+ conf->set_engine_callback_data((void *)engine_callback_data.c_str(),
+ errstr) != RdKafka::Conf::CONF_OK) {
+ std::cerr << errstr << std::endl;
+ exit(1);
+ }
+
+ /* We use the Certificiate verification callback to print the
+ * certificate name being used. */
+ PrintingSSLVerifyCb ssl_verify_cb;
+
+ if (conf->set("ssl_cert_verify_cb", &ssl_verify_cb, errstr) !=
+ RdKafka::Conf::CONF_OK) {
+ std::cerr << errstr << std::endl;
+ exit(1);
+ }
+
+ /*
+ * Create producer using accumulated global configuration.
+ */
+ RdKafka::Producer *producer = RdKafka::Producer::create(conf, errstr);
+ if (!producer) {
+ std::cerr << "Failed to create producer: " << errstr << std::endl;
+ exit(1);
+ }
+
+ std::cout << "% Created producer " << producer->name() << std::endl;
+
+ class RdKafka::Metadata *metadata;
+
+ /* Fetch metadata */
+ RdKafka::ErrorCode err = producer->metadata(true, NULL, &metadata, 5000);
+ if (err != RdKafka::ERR_NO_ERROR)
+ std::cerr << "%% Failed to acquire metadata: " << RdKafka::err2str(err)
+ << std::endl;
+
+ metadata_print(metadata);
+
+ delete metadata;
+ delete producer;
+ delete conf;
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/examples/producer.c b/fluent-bit/lib/librdkafka-2.1.0/examples/producer.c
new file mode 100644
index 00000000..b6fb7115
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/examples/producer.c
@@ -0,0 +1,251 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2017, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * Simple Apache Kafka producer
+ * using the Kafka driver from librdkafka
+ * (https://github.com/edenhill/librdkafka)
+ */
+
+#include <stdio.h>
+#include <signal.h>
+#include <string.h>
+
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is builtin from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h"
+
+
+static volatile sig_atomic_t run = 1;
+
+/**
+ * @brief Signal termination of program
+ */
+static void stop(int sig) {
+ run = 0;
+ fclose(stdin); /* abort fgets() */
+}
+
+
+/**
+ * @brief Message delivery report callback.
+ *
+ * This callback is called exactly once per message, indicating if
+ * the message was succesfully delivered
+ * (rkmessage->err == RD_KAFKA_RESP_ERR_NO_ERROR) or permanently
+ * failed delivery (rkmessage->err != RD_KAFKA_RESP_ERR_NO_ERROR).
+ *
+ * The callback is triggered from rd_kafka_poll() and executes on
+ * the application's thread.
+ */
+static void
+dr_msg_cb(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque) {
+ if (rkmessage->err)
+ fprintf(stderr, "%% Message delivery failed: %s\n",
+ rd_kafka_err2str(rkmessage->err));
+ else
+ fprintf(stderr,
+ "%% Message delivered (%zd bytes, "
+ "partition %" PRId32 ")\n",
+ rkmessage->len, rkmessage->partition);
+
+ /* The rkmessage is destroyed automatically by librdkafka */
+}
+
+
+
+int main(int argc, char **argv) {
+ rd_kafka_t *rk; /* Producer instance handle */
+ rd_kafka_conf_t *conf; /* Temporary configuration object */
+ char errstr[512]; /* librdkafka API error reporting buffer */
+ char buf[512]; /* Message value temporary buffer */
+ const char *brokers; /* Argument: broker list */
+ const char *topic; /* Argument: topic to produce to */
+
+ /*
+ * Argument validation
+ */
+ if (argc != 3) {
+ fprintf(stderr, "%% Usage: %s <broker> <topic>\n", argv[0]);
+ return 1;
+ }
+
+ brokers = argv[1];
+ topic = argv[2];
+
+
+ /*
+ * Create Kafka client configuration place-holder
+ */
+ conf = rd_kafka_conf_new();
+
+ /* Set bootstrap broker(s) as a comma-separated list of
+ * host or host:port (default port 9092).
+ * librdkafka will use the bootstrap brokers to acquire the full
+ * set of brokers from the cluster. */
+ if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers, errstr,
+ sizeof(errstr)) != RD_KAFKA_CONF_OK) {
+ fprintf(stderr, "%s\n", errstr);
+ return 1;
+ }
+
+ /* Set the delivery report callback.
+ * This callback will be called once per message to inform
+ * the application if delivery succeeded or failed.
+ * See dr_msg_cb() above.
+ * The callback is only triggered from rd_kafka_poll() and
+ * rd_kafka_flush(). */
+ rd_kafka_conf_set_dr_msg_cb(conf, dr_msg_cb);
+
+ /*
+ * Create producer instance.
+ *
+ * NOTE: rd_kafka_new() takes ownership of the conf object
+ * and the application must not reference it again after
+ * this call.
+ */
+ rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr));
+ if (!rk) {
+ fprintf(stderr, "%% Failed to create new producer: %s\n",
+ errstr);
+ return 1;
+ }
+
+ /* Signal handler for clean shutdown */
+ signal(SIGINT, stop);
+
+ fprintf(stderr,
+ "%% Type some text and hit enter to produce message\n"
+ "%% Or just hit enter to only serve delivery reports\n"
+ "%% Press Ctrl-C or Ctrl-D to exit\n");
+
+ while (run && fgets(buf, sizeof(buf), stdin)) {
+ size_t len = strlen(buf);
+ rd_kafka_resp_err_t err;
+
+ if (buf[len - 1] == '\n') /* Remove newline */
+ buf[--len] = '\0';
+
+ if (len == 0) {
+ /* Empty line: only serve delivery reports */
+ rd_kafka_poll(rk, 0 /*non-blocking */);
+ continue;
+ }
+
+ /*
+ * Send/Produce message.
+ * This is an asynchronous call, on success it will only
+ * enqueue the message on the internal producer queue.
+ * The actual delivery attempts to the broker are handled
+ * by background threads.
+ * The previously registered delivery report callback
+ * (dr_msg_cb) is used to signal back to the application
+ * when the message has been delivered (or failed).
+ */
+ retry:
+ err = rd_kafka_producev(
+ /* Producer handle */
+ rk,
+ /* Topic name */
+ RD_KAFKA_V_TOPIC(topic),
+ /* Make a copy of the payload. */
+ RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY),
+ /* Message value and length */
+ RD_KAFKA_V_VALUE(buf, len),
+ /* Per-Message opaque, provided in
+ * delivery report callback as
+ * msg_opaque. */
+ RD_KAFKA_V_OPAQUE(NULL),
+ /* End sentinel */
+ RD_KAFKA_V_END);
+
+ if (err) {
+ /*
+ * Failed to *enqueue* message for producing.
+ */
+ fprintf(stderr,
+ "%% Failed to produce to topic %s: %s\n", topic,
+ rd_kafka_err2str(err));
+
+ if (err == RD_KAFKA_RESP_ERR__QUEUE_FULL) {
+ /* If the internal queue is full, wait for
+ * messages to be delivered and then retry.
+ * The internal queue represents both
+ * messages to be sent and messages that have
+ * been sent or failed, awaiting their
+ * delivery report callback to be called.
+ *
+ * The internal queue is limited by the
+ * configuration property
+ * queue.buffering.max.messages and
+ * queue.buffering.max.kbytes */
+ rd_kafka_poll(rk,
+ 1000 /*block for max 1000ms*/);
+ goto retry;
+ }
+ } else {
+ fprintf(stderr,
+ "%% Enqueued message (%zd bytes) "
+ "for topic %s\n",
+ len, topic);
+ }
+
+
+ /* A producer application should continually serve
+ * the delivery report queue by calling rd_kafka_poll()
+ * at frequent intervals.
+ * Either put the poll call in your main loop, or in a
+ * dedicated thread, or call it after every
+ * rd_kafka_produce() call.
+ * Just make sure that rd_kafka_poll() is still called
+ * during periods where you are not producing any messages
+ * to make sure previously produced messages have their
+ * delivery report callback served (and any other callbacks
+ * you register). */
+ rd_kafka_poll(rk, 0 /*non-blocking*/);
+ }
+
+
+ /* Wait for final messages to be delivered or fail.
+ * rd_kafka_flush() is an abstraction over rd_kafka_poll() which
+ * waits for all messages to be delivered. */
+ fprintf(stderr, "%% Flushing final messages..\n");
+ rd_kafka_flush(rk, 10 * 1000 /* wait for max 10 seconds */);
+
+ /* If the output queue is still not empty there is an issue
+ * with producing messages to the clusters. */
+ if (rd_kafka_outq_len(rk) > 0)
+ fprintf(stderr, "%% %d message(s) were not delivered\n",
+ rd_kafka_outq_len(rk));
+
+ /* Destroy the producer instance */
+ rd_kafka_destroy(rk);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/examples/producer.cpp b/fluent-bit/lib/librdkafka-2.1.0/examples/producer.cpp
new file mode 100755
index 00000000..d4a8a0c4
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/examples/producer.cpp
@@ -0,0 +1,228 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2019, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * Apache Kafka producer
+ * using the Kafka driver from librdkafka
+ * (https://github.com/edenhill/librdkafka)
+ */
+
+#include <iostream>
+#include <string>
+#include <cstdlib>
+#include <cstdio>
+#include <csignal>
+#include <cstring>
+
+#if _AIX
+#include <unistd.h>
+#endif
+
+/*
+ * Typical include path in a real application would be
+ * #include <librdkafka/rdkafkacpp.h>
+ */
+#include "rdkafkacpp.h"
+
+
+static volatile sig_atomic_t run = 1;
+
+static void sigterm(int sig) {
+ run = 0;
+}
+
+
+class ExampleDeliveryReportCb : public RdKafka::DeliveryReportCb {
+ public:
+ void dr_cb(RdKafka::Message &message) {
+ /* If message.err() is non-zero the message delivery failed permanently
+ * for the message. */
+ if (message.err())
+ std::cerr << "% Message delivery failed: " << message.errstr()
+ << std::endl;
+ else
+ std::cerr << "% Message delivered to topic " << message.topic_name()
+ << " [" << message.partition() << "] at offset "
+ << message.offset() << std::endl;
+ }
+};
+
+int main(int argc, char **argv) {
+ if (argc != 3) {
+ std::cerr << "Usage: " << argv[0] << " <brokers> <topic>\n";
+ exit(1);
+ }
+
+ std::string brokers = argv[1];
+ std::string topic = argv[2];
+
+ /*
+ * Create configuration object
+ */
+ RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
+
+ std::string errstr;
+
+ /* Set bootstrap broker(s) as a comma-separated list of
+ * host or host:port (default port 9092).
+ * librdkafka will use the bootstrap brokers to acquire the full
+ * set of brokers from the cluster. */
+ if (conf->set("bootstrap.servers", brokers, errstr) !=
+ RdKafka::Conf::CONF_OK) {
+ std::cerr << errstr << std::endl;
+ exit(1);
+ }
+
+ signal(SIGINT, sigterm);
+ signal(SIGTERM, sigterm);
+
+ /* Set the delivery report callback.
+ * This callback will be called once per message to inform
+ * the application if delivery succeeded or failed.
+ * See dr_msg_cb() above.
+ * The callback is only triggered from ::poll() and ::flush().
+ *
+ * IMPORTANT:
+ * Make sure the DeliveryReport instance outlives the Producer object,
+ * either by putting it on the heap or as in this case as a stack variable
+ * that will NOT go out of scope for the duration of the Producer object.
+ */
+ ExampleDeliveryReportCb ex_dr_cb;
+
+ if (conf->set("dr_cb", &ex_dr_cb, errstr) != RdKafka::Conf::CONF_OK) {
+ std::cerr << errstr << std::endl;
+ exit(1);
+ }
+
+ /*
+ * Create producer instance.
+ */
+ RdKafka::Producer *producer = RdKafka::Producer::create(conf, errstr);
+ if (!producer) {
+ std::cerr << "Failed to create producer: " << errstr << std::endl;
+ exit(1);
+ }
+
+ delete conf;
+
+ /*
+ * Read messages from stdin and produce to broker.
+ */
+ std::cout << "% Type message value and hit enter "
+ << "to produce message." << std::endl;
+
+ for (std::string line; run && std::getline(std::cin, line);) {
+ if (line.empty()) {
+ producer->poll(0);
+ continue;
+ }
+
+ /*
+ * Send/Produce message.
+ * This is an asynchronous call, on success it will only
+ * enqueue the message on the internal producer queue.
+ * The actual delivery attempts to the broker are handled
+ * by background threads.
+ * The previously registered delivery report callback
+ * is used to signal back to the application when the message
+ * has been delivered (or failed permanently after retries).
+ */
+ retry:
+ RdKafka::ErrorCode err = producer->produce(
+ /* Topic name */
+ topic,
+ /* Any Partition: the builtin partitioner will be
+ * used to assign the message to a topic based
+ * on the message key, or random partition if
+ * the key is not set. */
+ RdKafka::Topic::PARTITION_UA,
+ /* Make a copy of the value */
+ RdKafka::Producer::RK_MSG_COPY /* Copy payload */,
+ /* Value */
+ const_cast<char *>(line.c_str()), line.size(),
+ /* Key */
+ NULL, 0,
+ /* Timestamp (defaults to current time) */
+ 0,
+ /* Message headers, if any */
+ NULL,
+ /* Per-message opaque value passed to
+ * delivery report */
+ NULL);
+
+ if (err != RdKafka::ERR_NO_ERROR) {
+ std::cerr << "% Failed to produce to topic " << topic << ": "
+ << RdKafka::err2str(err) << std::endl;
+
+ if (err == RdKafka::ERR__QUEUE_FULL) {
+ /* If the internal queue is full, wait for
+ * messages to be delivered and then retry.
+ * The internal queue represents both
+ * messages to be sent and messages that have
+ * been sent or failed, awaiting their
+ * delivery report callback to be called.
+ *
+ * The internal queue is limited by the
+ * configuration property
+ * queue.buffering.max.messages and queue.buffering.max.kbytes */
+ producer->poll(1000 /*block for max 1000ms*/);
+ goto retry;
+ }
+
+ } else {
+ std::cerr << "% Enqueued message (" << line.size() << " bytes) "
+ << "for topic " << topic << std::endl;
+ }
+
+ /* A producer application should continually serve
+ * the delivery report queue by calling poll()
+ * at frequent intervals.
+ * Either put the poll call in your main loop, or in a
+ * dedicated thread, or call it after every produce() call.
+ * Just make sure that poll() is still called
+ * during periods where you are not producing any messages
+ * to make sure previously produced messages have their
+ * delivery report callback served (and any other callbacks
+ * you register). */
+ producer->poll(0);
+ }
+
+ /* Wait for final messages to be delivered or fail.
+ * flush() is an abstraction over poll() which
+ * waits for all messages to be delivered. */
+ std::cerr << "% Flushing final messages..." << std::endl;
+ producer->flush(10 * 1000 /* wait for max 10 seconds */);
+
+ if (producer->outq_len() > 0)
+ std::cerr << "% " << producer->outq_len()
+ << " message(s) were not delivered" << std::endl;
+
+ delete producer;
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/examples/rdkafka_complex_consumer_example.c b/fluent-bit/lib/librdkafka-2.1.0/examples/rdkafka_complex_consumer_example.c
new file mode 100644
index 00000000..1632b303
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/examples/rdkafka_complex_consumer_example.c
@@ -0,0 +1,617 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2015, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * Apache Kafka high level consumer example program
+ * using the Kafka driver from librdkafka
+ * (https://github.com/edenhill/librdkafka)
+ */
+
+#include <ctype.h>
+#include <signal.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <syslog.h>
+#include <sys/time.h>
+#include <errno.h>
+#include <getopt.h>
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is builtin from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h" /* for Kafka driver */
+
+
+static volatile sig_atomic_t run = 1;
+static rd_kafka_t *rk;
+static int exit_eof = 0;
+static int wait_eof = 0; /* number of partitions awaiting EOF */
+static int quiet = 0;
+static enum {
+ OUTPUT_HEXDUMP,
+ OUTPUT_RAW,
+} output = OUTPUT_HEXDUMP;
+
+static void stop(int sig) {
+ if (!run)
+ exit(1);
+ run = 0;
+ fclose(stdin); /* abort fgets() */
+}
+
+
+static void hexdump(FILE *fp, const char *name, const void *ptr, size_t len) {
+ const char *p = (const char *)ptr;
+ unsigned int of = 0;
+
+
+ if (name)
+ fprintf(fp, "%s hexdump (%zd bytes):\n", name, len);
+
+ for (of = 0; of < len; of += 16) {
+ char hexen[16 * 3 + 1];
+ char charen[16 + 1];
+ int hof = 0;
+
+ int cof = 0;
+ int i;
+
+ for (i = of; i < (int)of + 16 && i < (int)len; i++) {
+ hof += sprintf(hexen + hof, "%02x ", p[i] & 0xff);
+ cof += sprintf(charen + cof, "%c",
+ isprint((int)p[i]) ? p[i] : '.');
+ }
+ fprintf(fp, "%08x: %-48s %-16s\n", of, hexen, charen);
+ }
+}
+
+/**
+ * Kafka logger callback (optional)
+ */
+static void
+logger(const rd_kafka_t *rk, int level, const char *fac, const char *buf) {
+ struct timeval tv;
+ gettimeofday(&tv, NULL);
+ fprintf(stdout, "%u.%03u RDKAFKA-%i-%s: %s: %s\n", (int)tv.tv_sec,
+ (int)(tv.tv_usec / 1000), level, fac, rd_kafka_name(rk), buf);
+}
+
+
+
+/**
+ * Handle and print a consumed message.
+ * Internally crafted messages are also used to propagate state from
+ * librdkafka to the application. The application needs to check
+ * the `rkmessage->err` field for this purpose.
+ */
+static void msg_consume(rd_kafka_message_t *rkmessage) {
+ if (rkmessage->err) {
+ if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) {
+ fprintf(stderr,
+ "%% Consumer reached end of %s [%" PRId32
+ "] "
+ "message queue at offset %" PRId64 "\n",
+ rd_kafka_topic_name(rkmessage->rkt),
+ rkmessage->partition, rkmessage->offset);
+
+ if (exit_eof && --wait_eof == 0) {
+ fprintf(stderr,
+ "%% All partition(s) reached EOF: "
+ "exiting\n");
+ run = 0;
+ }
+
+ return;
+ }
+
+ if (rkmessage->rkt)
+ fprintf(stderr,
+ "%% Consume error for "
+ "topic \"%s\" [%" PRId32
+ "] "
+ "offset %" PRId64 ": %s\n",
+ rd_kafka_topic_name(rkmessage->rkt),
+ rkmessage->partition, rkmessage->offset,
+ rd_kafka_message_errstr(rkmessage));
+ else
+ fprintf(stderr, "%% Consumer error: %s: %s\n",
+ rd_kafka_err2str(rkmessage->err),
+ rd_kafka_message_errstr(rkmessage));
+
+ if (rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION ||
+ rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC)
+ run = 0;
+ return;
+ }
+
+ if (!quiet)
+ fprintf(stdout,
+ "%% Message (topic %s [%" PRId32
+ "], "
+ "offset %" PRId64 ", %zd bytes):\n",
+ rd_kafka_topic_name(rkmessage->rkt),
+ rkmessage->partition, rkmessage->offset,
+ rkmessage->len);
+
+ if (rkmessage->key_len) {
+ if (output == OUTPUT_HEXDUMP)
+ hexdump(stdout, "Message Key", rkmessage->key,
+ rkmessage->key_len);
+ else
+ printf("Key: %.*s\n", (int)rkmessage->key_len,
+ (char *)rkmessage->key);
+ }
+
+ if (output == OUTPUT_HEXDUMP)
+ hexdump(stdout, "Message Payload", rkmessage->payload,
+ rkmessage->len);
+ else
+ printf("%.*s\n", (int)rkmessage->len,
+ (char *)rkmessage->payload);
+}
+
+
+static void
+print_partition_list(FILE *fp,
+ const rd_kafka_topic_partition_list_t *partitions) {
+ int i;
+ for (i = 0; i < partitions->cnt; i++) {
+ fprintf(fp, "%s %s [%" PRId32 "] offset %" PRId64,
+ i > 0 ? "," : "", partitions->elems[i].topic,
+ partitions->elems[i].partition,
+ partitions->elems[i].offset);
+ }
+ fprintf(fp, "\n");
+}
+static void rebalance_cb(rd_kafka_t *rk,
+ rd_kafka_resp_err_t err,
+ rd_kafka_topic_partition_list_t *partitions,
+ void *opaque) {
+ rd_kafka_error_t *error = NULL;
+ rd_kafka_resp_err_t ret_err = RD_KAFKA_RESP_ERR_NO_ERROR;
+
+ fprintf(stderr, "%% Consumer group rebalanced: ");
+
+ switch (err) {
+ case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS:
+ fprintf(stderr, "assigned (%s):\n",
+ rd_kafka_rebalance_protocol(rk));
+ print_partition_list(stderr, partitions);
+
+ if (!strcmp(rd_kafka_rebalance_protocol(rk), "COOPERATIVE"))
+ error = rd_kafka_incremental_assign(rk, partitions);
+ else
+ ret_err = rd_kafka_assign(rk, partitions);
+ wait_eof += partitions->cnt;
+ break;
+
+ case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS:
+ fprintf(stderr, "revoked (%s):\n",
+ rd_kafka_rebalance_protocol(rk));
+ print_partition_list(stderr, partitions);
+
+ if (!strcmp(rd_kafka_rebalance_protocol(rk), "COOPERATIVE")) {
+ error = rd_kafka_incremental_unassign(rk, partitions);
+ wait_eof -= partitions->cnt;
+ } else {
+ ret_err = rd_kafka_assign(rk, NULL);
+ wait_eof = 0;
+ }
+ break;
+
+ default:
+ fprintf(stderr, "failed: %s\n", rd_kafka_err2str(err));
+ rd_kafka_assign(rk, NULL);
+ break;
+ }
+
+ if (error) {
+ fprintf(stderr, "incremental assign failure: %s\n",
+ rd_kafka_error_string(error));
+ rd_kafka_error_destroy(error);
+ } else if (ret_err) {
+ fprintf(stderr, "assign failure: %s\n",
+ rd_kafka_err2str(ret_err));
+ }
+}
+
+
+static int describe_groups(rd_kafka_t *rk, const char *group) {
+ rd_kafka_resp_err_t err;
+ const struct rd_kafka_group_list *grplist;
+ int i;
+
+ err = rd_kafka_list_groups(rk, group, &grplist, 10000);
+
+ if (err) {
+ fprintf(stderr, "%% Failed to acquire group list: %s\n",
+ rd_kafka_err2str(err));
+ return -1;
+ }
+
+ for (i = 0; i < grplist->group_cnt; i++) {
+ const struct rd_kafka_group_info *gi = &grplist->groups[i];
+ int j;
+
+ printf("Group \"%s\" in state %s on broker %d (%s:%d)\n",
+ gi->group, gi->state, gi->broker.id, gi->broker.host,
+ gi->broker.port);
+ if (gi->err)
+ printf(" Error: %s\n", rd_kafka_err2str(gi->err));
+ printf(
+ " Protocol type \"%s\", protocol \"%s\", "
+ "with %d member(s):\n",
+ gi->protocol_type, gi->protocol, gi->member_cnt);
+
+ for (j = 0; j < gi->member_cnt; j++) {
+ const struct rd_kafka_group_member_info *mi;
+ mi = &gi->members[j];
+
+ printf(" \"%s\", client id \"%s\" on host %s\n",
+ mi->member_id, mi->client_id, mi->client_host);
+ printf(" metadata: %d bytes\n",
+ mi->member_metadata_size);
+ printf(" assignment: %d bytes\n",
+ mi->member_assignment_size);
+ }
+ printf("\n");
+ }
+
+ if (group && !grplist->group_cnt)
+ fprintf(stderr, "%% No matching group (%s)\n", group);
+
+ rd_kafka_group_list_destroy(grplist);
+
+ return 0;
+}
+
+
+
+static void sig_usr1(int sig) {
+ rd_kafka_dump(stdout, rk);
+}
+
+int main(int argc, char **argv) {
+ char mode = 'C';
+ char *brokers = "localhost:9092";
+ int opt;
+ rd_kafka_conf_t *conf;
+ char errstr[512];
+ const char *debug = NULL;
+ int do_conf_dump = 0;
+ char tmp[16];
+ rd_kafka_resp_err_t err;
+ char *group = NULL;
+ rd_kafka_topic_partition_list_t *topics;
+ int is_subscription;
+ int i;
+
+ quiet = !isatty(STDIN_FILENO);
+
+ /* Kafka configuration */
+ conf = rd_kafka_conf_new();
+
+ /* Set logger */
+ rd_kafka_conf_set_log_cb(conf, logger);
+
+ /* Quick termination */
+ snprintf(tmp, sizeof(tmp), "%i", SIGIO);
+ rd_kafka_conf_set(conf, "internal.termination.signal", tmp, NULL, 0);
+
+ while ((opt = getopt(argc, argv, "g:b:qd:eX:ADO")) != -1) {
+ switch (opt) {
+ case 'b':
+ brokers = optarg;
+ break;
+ case 'g':
+ group = optarg;
+ break;
+ case 'e':
+ exit_eof = 1;
+ break;
+ case 'd':
+ debug = optarg;
+ break;
+ case 'q':
+ quiet = 1;
+ break;
+ case 'A':
+ output = OUTPUT_RAW;
+ break;
+ case 'X': {
+ char *name, *val;
+ rd_kafka_conf_res_t res;
+
+ if (!strcmp(optarg, "list") ||
+ !strcmp(optarg, "help")) {
+ rd_kafka_conf_properties_show(stdout);
+ exit(0);
+ }
+
+ if (!strcmp(optarg, "dump")) {
+ do_conf_dump = 1;
+ continue;
+ }
+
+ name = optarg;
+ if (!(val = strchr(name, '='))) {
+ fprintf(stderr,
+ "%% Expected "
+ "-X property=value, not %s\n",
+ name);
+ exit(1);
+ }
+
+ *val = '\0';
+ val++;
+
+ res = rd_kafka_conf_set(conf, name, val, errstr,
+ sizeof(errstr));
+
+ if (res != RD_KAFKA_CONF_OK) {
+ fprintf(stderr, "%% %s\n", errstr);
+ exit(1);
+ }
+ } break;
+
+ case 'D':
+ case 'O':
+ mode = opt;
+ break;
+
+ default:
+ goto usage;
+ }
+ }
+
+
+ if (do_conf_dump) {
+ const char **arr;
+ size_t cnt;
+ int pass;
+
+ for (pass = 0; pass < 2; pass++) {
+ if (pass == 0) {
+ arr = rd_kafka_conf_dump(conf, &cnt);
+ printf("# Global config\n");
+ } else {
+ rd_kafka_topic_conf_t *topic_conf =
+ rd_kafka_conf_get_default_topic_conf(conf);
+ if (topic_conf) {
+ printf("# Topic config\n");
+ arr = rd_kafka_topic_conf_dump(
+ topic_conf, &cnt);
+ } else {
+ arr = NULL;
+ }
+ }
+
+ if (!arr)
+ continue;
+
+ for (i = 0; i < (int)cnt; i += 2)
+ printf("%s = %s\n", arr[i], arr[i + 1]);
+
+ printf("\n");
+ rd_kafka_conf_dump_free(arr, cnt);
+ }
+
+ exit(0);
+ }
+
+
+ if (strchr("OC", mode) && optind == argc) {
+ usage:
+ fprintf(stderr,
+ "Usage: %s [options] <topic[:part]> <topic[:part]>..\n"
+ "\n"
+ "librdkafka version %s (0x%08x)\n"
+ "\n"
+ " Options:\n"
+ " -g <group> Consumer group (%s)\n"
+ " -b <brokers> Broker address (%s)\n"
+ " -e Exit consumer when last message\n"
+ " in partition has been received.\n"
+ " -D Describe group.\n"
+ " -O Get commmitted offset(s)\n"
+ " -d [facs..] Enable debugging contexts:\n"
+ " %s\n"
+ " -q Be quiet\n"
+ " -A Raw payload output (consumer)\n"
+ " -X <prop=name> Set arbitrary librdkafka "
+ "configuration property\n"
+ " Use '-X list' to see the full list\n"
+ " of supported properties.\n"
+ "\n"
+ "For balanced consumer groups use the 'topic1 topic2..'"
+ " format\n"
+ "and for static assignment use "
+ "'topic1:part1 topic1:part2 topic2:part1..'\n"
+ "\n",
+ argv[0], rd_kafka_version_str(), rd_kafka_version(),
+ group, brokers, RD_KAFKA_DEBUG_CONTEXTS);
+ exit(1);
+ }
+
+
+ signal(SIGINT, stop);
+ signal(SIGUSR1, sig_usr1);
+
+ if (debug && rd_kafka_conf_set(conf, "debug", debug, errstr,
+ sizeof(errstr)) != RD_KAFKA_CONF_OK) {
+ fprintf(stderr, "%% Debug configuration failed: %s: %s\n",
+ errstr, debug);
+ exit(1);
+ }
+
+ /*
+ * Client/Consumer group
+ */
+
+ if (strchr("CO", mode)) {
+ /* Consumer groups require a group id */
+ if (!group)
+ group = "rdkafka_consumer_example";
+ if (rd_kafka_conf_set(conf, "group.id", group, errstr,
+ sizeof(errstr)) != RD_KAFKA_CONF_OK) {
+ fprintf(stderr, "%% %s\n", errstr);
+ exit(1);
+ }
+
+ /* Callback called on partition assignment changes */
+ rd_kafka_conf_set_rebalance_cb(conf, rebalance_cb);
+
+ rd_kafka_conf_set(conf, "enable.partition.eof", "true", NULL,
+ 0);
+ }
+
+ /* Set bootstrap servers */
+ if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers, errstr,
+ sizeof(errstr)) != RD_KAFKA_CONF_OK) {
+ fprintf(stderr, "%% %s\n", errstr);
+ exit(1);
+ }
+
+ /* Create Kafka handle */
+ if (!(rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr,
+ sizeof(errstr)))) {
+ fprintf(stderr, "%% Failed to create new consumer: %s\n",
+ errstr);
+ exit(1);
+ }
+
+ if (mode == 'D') {
+ int r;
+ /* Describe groups */
+ r = describe_groups(rk, group);
+
+ rd_kafka_destroy(rk);
+ exit(r == -1 ? 1 : 0);
+ }
+
+ /* Redirect rd_kafka_poll() to consumer_poll() */
+ rd_kafka_poll_set_consumer(rk);
+
+ topics = rd_kafka_topic_partition_list_new(argc - optind);
+ is_subscription = 1;
+ for (i = optind; i < argc; i++) {
+ /* Parse "topic[:part] */
+ char *topic = argv[i];
+ char *t;
+ int32_t partition = -1;
+
+ if ((t = strstr(topic, ":"))) {
+ *t = '\0';
+ partition = atoi(t + 1);
+ is_subscription = 0; /* is assignment */
+ wait_eof++;
+ }
+
+ rd_kafka_topic_partition_list_add(topics, topic, partition);
+ }
+
+ if (mode == 'O') {
+ /* Offset query */
+
+ err = rd_kafka_committed(rk, topics, 5000);
+ if (err) {
+ fprintf(stderr, "%% Failed to fetch offsets: %s\n",
+ rd_kafka_err2str(err));
+ exit(1);
+ }
+
+ for (i = 0; i < topics->cnt; i++) {
+ rd_kafka_topic_partition_t *p = &topics->elems[i];
+ printf("Topic \"%s\" partition %" PRId32, p->topic,
+ p->partition);
+ if (p->err)
+ printf(" error %s", rd_kafka_err2str(p->err));
+ else {
+ printf(" offset %" PRId64 "", p->offset);
+
+ if (p->metadata_size)
+ printf(" (%d bytes of metadata)",
+ (int)p->metadata_size);
+ }
+ printf("\n");
+ }
+
+ goto done;
+ }
+
+
+ if (is_subscription) {
+ fprintf(stderr, "%% Subscribing to %d topics\n", topics->cnt);
+
+ if ((err = rd_kafka_subscribe(rk, topics))) {
+ fprintf(stderr,
+ "%% Failed to start consuming topics: %s\n",
+ rd_kafka_err2str(err));
+ exit(1);
+ }
+ } else {
+ fprintf(stderr, "%% Assigning %d partitions\n", topics->cnt);
+
+ if ((err = rd_kafka_assign(rk, topics))) {
+ fprintf(stderr, "%% Failed to assign partitions: %s\n",
+ rd_kafka_err2str(err));
+ }
+ }
+
+ while (run) {
+ rd_kafka_message_t *rkmessage;
+
+ rkmessage = rd_kafka_consumer_poll(rk, 1000);
+ if (rkmessage) {
+ msg_consume(rkmessage);
+ rd_kafka_message_destroy(rkmessage);
+ }
+ }
+
+done:
+ err = rd_kafka_consumer_close(rk);
+ if (err)
+ fprintf(stderr, "%% Failed to close consumer: %s\n",
+ rd_kafka_err2str(err));
+ else
+ fprintf(stderr, "%% Consumer closed\n");
+
+ rd_kafka_topic_partition_list_destroy(topics);
+
+ /* Destroy handle */
+ rd_kafka_destroy(rk);
+
+ /* Let background threads clean up and terminate cleanly. */
+ run = 5;
+ while (run-- > 0 && rd_kafka_wait_destroyed(1000) == -1)
+ printf("Waiting for librdkafka to decommission\n");
+ if (run <= 0)
+ rd_kafka_dump(stdout, rk);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/examples/rdkafka_complex_consumer_example.cpp b/fluent-bit/lib/librdkafka-2.1.0/examples/rdkafka_complex_consumer_example.cpp
new file mode 100644
index 00000000..b4f158cb
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/examples/rdkafka_complex_consumer_example.cpp
@@ -0,0 +1,467 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2014, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * Apache Kafka consumer & producer example programs
+ * using the Kafka driver from librdkafka
+ * (https://github.com/edenhill/librdkafka)
+ */
+
+#include <iostream>
+#include <string>
+#include <cstdlib>
+#include <cstdio>
+#include <csignal>
+#include <cstring>
+
+#ifndef _WIN32
+#include <sys/time.h>
+#else
+#include <windows.h> /* for GetLocalTime */
+#endif
+
+#ifdef _MSC_VER
+#include "../win32/wingetopt.h"
+#elif _AIX
+#include <unistd.h>
+#else
+#include <getopt.h>
+#include <unistd.h>
+#endif
+
+/*
+ * Typically include path in a real application would be
+ * #include <librdkafka/rdkafkacpp.h>
+ */
+#include "rdkafkacpp.h"
+
+
+
+static volatile sig_atomic_t run = 1;
+static bool exit_eof = false;
+static int eof_cnt = 0;
+static int partition_cnt = 0;
+static int verbosity = 1;
+static long msg_cnt = 0;
+static int64_t msg_bytes = 0;
+static void sigterm(int sig) {
+ run = 0;
+}
+
+
+/**
+ * @brief format a string timestamp from the current time
+ */
+static void print_time() {
+#ifndef _WIN32
+ struct timeval tv;
+ char buf[64];
+ gettimeofday(&tv, NULL);
+ strftime(buf, sizeof(buf) - 1, "%Y-%m-%d %H:%M:%S", localtime(&tv.tv_sec));
+ fprintf(stderr, "%s.%03d: ", buf, (int)(tv.tv_usec / 1000));
+#else
+ SYSTEMTIME lt = {0};
+ GetLocalTime(&lt);
+ // %Y-%m-%d %H:%M:%S.xxx:
+ fprintf(stderr, "%04d-%02d-%02d %02d:%02d:%02d.%03d: ", lt.wYear, lt.wMonth,
+ lt.wDay, lt.wHour, lt.wMinute, lt.wSecond, lt.wMilliseconds);
+#endif
+}
+class ExampleEventCb : public RdKafka::EventCb {
+ public:
+ void event_cb(RdKafka::Event &event) {
+ print_time();
+
+ switch (event.type()) {
+ case RdKafka::Event::EVENT_ERROR:
+ if (event.fatal()) {
+ std::cerr << "FATAL ";
+ run = 0;
+ }
+ std::cerr << "ERROR (" << RdKafka::err2str(event.err())
+ << "): " << event.str() << std::endl;
+ break;
+
+ case RdKafka::Event::EVENT_STATS:
+ std::cerr << "\"STATS\": " << event.str() << std::endl;
+ break;
+
+ case RdKafka::Event::EVENT_LOG:
+ fprintf(stderr, "LOG-%i-%s: %s\n", event.severity(), event.fac().c_str(),
+ event.str().c_str());
+ break;
+
+ case RdKafka::Event::EVENT_THROTTLE:
+ std::cerr << "THROTTLED: " << event.throttle_time() << "ms by "
+ << event.broker_name() << " id " << (int)event.broker_id()
+ << std::endl;
+ break;
+
+ default:
+ std::cerr << "EVENT " << event.type() << " ("
+ << RdKafka::err2str(event.err()) << "): " << event.str()
+ << std::endl;
+ break;
+ }
+ }
+};
+
+
+class ExampleRebalanceCb : public RdKafka::RebalanceCb {
+ private:
+ static void part_list_print(
+ const std::vector<RdKafka::TopicPartition *> &partitions) {
+ for (unsigned int i = 0; i < partitions.size(); i++)
+ std::cerr << partitions[i]->topic() << "[" << partitions[i]->partition()
+ << "], ";
+ std::cerr << "\n";
+ }
+
+ public:
+ void rebalance_cb(RdKafka::KafkaConsumer *consumer,
+ RdKafka::ErrorCode err,
+ std::vector<RdKafka::TopicPartition *> &partitions) {
+ std::cerr << "RebalanceCb: " << RdKafka::err2str(err) << ": ";
+
+ part_list_print(partitions);
+
+ RdKafka::Error *error = NULL;
+ RdKafka::ErrorCode ret_err = RdKafka::ERR_NO_ERROR;
+
+ if (err == RdKafka::ERR__ASSIGN_PARTITIONS) {
+ if (consumer->rebalance_protocol() == "COOPERATIVE")
+ error = consumer->incremental_assign(partitions);
+ else
+ ret_err = consumer->assign(partitions);
+ partition_cnt += (int)partitions.size();
+ } else {
+ if (consumer->rebalance_protocol() == "COOPERATIVE") {
+ error = consumer->incremental_unassign(partitions);
+ partition_cnt -= (int)partitions.size();
+ } else {
+ ret_err = consumer->unassign();
+ partition_cnt = 0;
+ }
+ }
+ eof_cnt = 0; /* FIXME: Won't work with COOPERATIVE */
+
+ if (error) {
+ std::cerr << "incremental assign failed: " << error->str() << "\n";
+ delete error;
+ } else if (ret_err)
+ std::cerr << "assign failed: " << RdKafka::err2str(ret_err) << "\n";
+ }
+};
+
+
+void msg_consume(RdKafka::Message *message, void *opaque) {
+ switch (message->err()) {
+ case RdKafka::ERR__TIMED_OUT:
+ break;
+
+ case RdKafka::ERR_NO_ERROR:
+ /* Real message */
+ msg_cnt++;
+ msg_bytes += message->len();
+ if (verbosity >= 3)
+ std::cerr << "Read msg at offset " << message->offset() << std::endl;
+ RdKafka::MessageTimestamp ts;
+ ts = message->timestamp();
+ if (verbosity >= 2 &&
+ ts.type != RdKafka::MessageTimestamp::MSG_TIMESTAMP_NOT_AVAILABLE) {
+ std::string tsname = "?";
+ if (ts.type == RdKafka::MessageTimestamp::MSG_TIMESTAMP_CREATE_TIME)
+ tsname = "create time";
+ else if (ts.type ==
+ RdKafka::MessageTimestamp::MSG_TIMESTAMP_LOG_APPEND_TIME)
+ tsname = "log append time";
+ std::cout << "Timestamp: " << tsname << " " << ts.timestamp << std::endl;
+ }
+ if (verbosity >= 2 && message->key()) {
+ std::cout << "Key: " << *message->key() << std::endl;
+ }
+ if (verbosity >= 1) {
+ printf("%.*s\n", static_cast<int>(message->len()),
+ static_cast<const char *>(message->payload()));
+ }
+ break;
+
+ case RdKafka::ERR__PARTITION_EOF:
+ /* Last message */
+ if (exit_eof && ++eof_cnt == partition_cnt) {
+ std::cerr << "%% EOF reached for all " << partition_cnt << " partition(s)"
+ << std::endl;
+ run = 0;
+ }
+ break;
+
+ case RdKafka::ERR__UNKNOWN_TOPIC:
+ case RdKafka::ERR__UNKNOWN_PARTITION:
+ std::cerr << "Consume failed: " << message->errstr() << std::endl;
+ run = 0;
+ break;
+
+ default:
+ /* Errors */
+ std::cerr << "Consume failed: " << message->errstr() << std::endl;
+ run = 0;
+ }
+}
+
+int main(int argc, char **argv) {
+ std::string brokers = "localhost";
+ std::string errstr;
+ std::string topic_str;
+ std::string mode;
+ std::string debug;
+ std::vector<std::string> topics;
+ bool do_conf_dump = false;
+ int opt;
+
+ /*
+ * Create configuration objects
+ */
+ RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
+
+ ExampleRebalanceCb ex_rebalance_cb;
+ conf->set("rebalance_cb", &ex_rebalance_cb, errstr);
+
+ conf->set("enable.partition.eof", "true", errstr);
+
+ while ((opt = getopt(argc, argv, "g:b:z:qd:eX:AM:qv")) != -1) {
+ switch (opt) {
+ case 'g':
+ if (conf->set("group.id", optarg, errstr) != RdKafka::Conf::CONF_OK) {
+ std::cerr << errstr << std::endl;
+ exit(1);
+ }
+ break;
+ case 'b':
+ brokers = optarg;
+ break;
+ case 'z':
+ if (conf->set("compression.codec", optarg, errstr) !=
+ RdKafka::Conf::CONF_OK) {
+ std::cerr << errstr << std::endl;
+ exit(1);
+ }
+ break;
+ case 'e':
+ exit_eof = true;
+ break;
+ case 'd':
+ debug = optarg;
+ break;
+ case 'M':
+ if (conf->set("statistics.interval.ms", optarg, errstr) !=
+ RdKafka::Conf::CONF_OK) {
+ std::cerr << errstr << std::endl;
+ exit(1);
+ }
+ break;
+ case 'X': {
+ char *name, *val;
+
+ if (!strcmp(optarg, "dump")) {
+ do_conf_dump = true;
+ continue;
+ }
+
+ name = optarg;
+ if (!(val = strchr(name, '='))) {
+ std::cerr << "%% Expected -X property=value, not " << name << std::endl;
+ exit(1);
+ }
+
+ *val = '\0';
+ val++;
+
+ RdKafka::Conf::ConfResult res = conf->set(name, val, errstr);
+ if (res != RdKafka::Conf::CONF_OK) {
+ std::cerr << errstr << std::endl;
+ exit(1);
+ }
+ } break;
+
+ case 'q':
+ verbosity--;
+ break;
+
+ case 'v':
+ verbosity++;
+ break;
+
+ default:
+ goto usage;
+ }
+ }
+
+ for (; optind < argc; optind++)
+ topics.push_back(std::string(argv[optind]));
+
+ if (topics.empty() || optind != argc) {
+ usage:
+ fprintf(stderr,
+ "Usage: %s -g <group-id> [options] topic1 topic2..\n"
+ "\n"
+ "librdkafka version %s (0x%08x)\n"
+ "\n"
+ " Options:\n"
+ " -g <group-id> Consumer group id\n"
+ " -b <brokers> Broker address (localhost:9092)\n"
+ " -z <codec> Enable compression:\n"
+ " none|gzip|snappy\n"
+ " -e Exit consumer when last message\n"
+ " in partition has been received.\n"
+ " -d [facs..] Enable debugging contexts:\n"
+ " %s\n"
+ " -M <intervalms> Enable statistics\n"
+ " -X <prop=name> Set arbitrary librdkafka "
+ "configuration property\n"
+ " Use '-X list' to see the full list\n"
+ " of supported properties.\n"
+ " -q Quiet / Decrease verbosity\n"
+ " -v Increase verbosity\n"
+ "\n"
+ "\n",
+ argv[0], RdKafka::version_str().c_str(), RdKafka::version(),
+ RdKafka::get_debug_contexts().c_str());
+ exit(1);
+ }
+
+ if (exit_eof) {
+ std::string strategy;
+ if (conf->get("partition.assignment.strategy", strategy) ==
+ RdKafka::Conf::CONF_OK &&
+ strategy == "cooperative-sticky") {
+ std::cerr
+ << "Error: this example has not been modified to "
+ << "support -e (exit on EOF) when the partition.assignment.strategy "
+ << "is set to " << strategy << ": remove -e from the command line\n";
+ exit(1);
+ }
+ }
+
+ /*
+ * Set configuration properties
+ */
+ conf->set("metadata.broker.list", brokers, errstr);
+
+ if (!debug.empty()) {
+ if (conf->set("debug", debug, errstr) != RdKafka::Conf::CONF_OK) {
+ std::cerr << errstr << std::endl;
+ exit(1);
+ }
+ }
+
+ ExampleEventCb ex_event_cb;
+ conf->set("event_cb", &ex_event_cb, errstr);
+
+ if (do_conf_dump) {
+ std::list<std::string> *dump;
+ dump = conf->dump();
+ std::cout << "# Global config" << std::endl;
+
+ for (std::list<std::string>::iterator it = dump->begin();
+ it != dump->end();) {
+ std::cout << *it << " = ";
+ it++;
+ std::cout << *it << std::endl;
+ it++;
+ }
+ std::cout << std::endl;
+
+ exit(0);
+ }
+
+ signal(SIGINT, sigterm);
+ signal(SIGTERM, sigterm);
+
+
+ /*
+ * Consumer mode
+ */
+
+ /*
+ * Create consumer using accumulated global configuration.
+ */
+ RdKafka::KafkaConsumer *consumer =
+ RdKafka::KafkaConsumer::create(conf, errstr);
+ if (!consumer) {
+ std::cerr << "Failed to create consumer: " << errstr << std::endl;
+ exit(1);
+ }
+
+ delete conf;
+
+ std::cout << "% Created consumer " << consumer->name() << std::endl;
+
+
+ /*
+ * Subscribe to topics
+ */
+ RdKafka::ErrorCode err = consumer->subscribe(topics);
+ if (err) {
+ std::cerr << "Failed to subscribe to " << topics.size()
+ << " topics: " << RdKafka::err2str(err) << std::endl;
+ exit(1);
+ }
+
+ /*
+ * Consume messages
+ */
+ while (run) {
+ RdKafka::Message *msg = consumer->consume(1000);
+ msg_consume(msg, NULL);
+ delete msg;
+ }
+
+#ifndef _WIN32
+ alarm(10);
+#endif
+
+ /*
+ * Stop consumer
+ */
+ consumer->close();
+ delete consumer;
+
+ std::cerr << "% Consumed " << msg_cnt << " messages (" << msg_bytes
+ << " bytes)" << std::endl;
+
+ /*
+ * Wait for RdKafka to decommission.
+ * This is not strictly needed (with check outq_len() above), but
+ * allows RdKafka to clean up all its resources before the application
+ * exits so that memory profilers such as valgrind wont complain about
+ * memory leaks.
+ */
+ RdKafka::wait_destroyed(5000);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/examples/rdkafka_consume_batch.cpp b/fluent-bit/lib/librdkafka-2.1.0/examples/rdkafka_consume_batch.cpp
new file mode 100644
index 00000000..576b396f
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/examples/rdkafka_consume_batch.cpp
@@ -0,0 +1,264 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2018, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * Apache Kafka consumer & producer example programs
+ * using the Kafka driver from librdkafka
+ * (https://github.com/edenhill/librdkafka)
+ *
+ * This example shows how to read batches of messages.
+ * Note that messages are fetched from the broker in batches regardless
+ * of how the application polls messages from librdkafka, this example
+ * merely shows how to accumulate a set of messages in the application.
+ */
+
+#include <iostream>
+#include <string>
+#include <cstdlib>
+#include <cstdio>
+#include <csignal>
+#include <cstring>
+
+#ifndef _WIN32
+#include <sys/time.h>
+#endif
+
+#ifdef _WIN32
+#include "../win32/wingetopt.h"
+#include <atltime.h>
+#elif _AIX
+#include <unistd.h>
+#else
+#include <getopt.h>
+#include <unistd.h>
+#endif
+
+/*
+ * Typically include path in a real application would be
+ * #include <librdkafka/rdkafkacpp.h>
+ */
+#include "rdkafkacpp.h"
+
+
+
+static volatile sig_atomic_t run = 1;
+
+static void sigterm(int sig) {
+ run = 0;
+}
+
+
+
+/**
+ * @returns the current wall-clock time in milliseconds
+ */
+static int64_t now() {
+#ifndef _WIN32
+ struct timeval tv;
+ gettimeofday(&tv, NULL);
+ return ((int64_t)tv.tv_sec * 1000) + (tv.tv_usec / 1000);
+#else
+#error "now() not implemented for Windows, please submit a PR"
+#endif
+}
+
+
+
+/**
+ * @brief Accumulate a batch of \p batch_size messages, but wait
+ * no longer than \p batch_tmout milliseconds.
+ */
+static std::vector<RdKafka::Message *> consume_batch(
+ RdKafka::KafkaConsumer *consumer,
+ size_t batch_size,
+ int batch_tmout) {
+ std::vector<RdKafka::Message *> msgs;
+ msgs.reserve(batch_size);
+
+ int64_t end = now() + batch_tmout;
+ int remaining_timeout = batch_tmout;
+
+ while (msgs.size() < batch_size) {
+ RdKafka::Message *msg = consumer->consume(remaining_timeout);
+
+ switch (msg->err()) {
+ case RdKafka::ERR__TIMED_OUT:
+ delete msg;
+ return msgs;
+
+ case RdKafka::ERR_NO_ERROR:
+ msgs.push_back(msg);
+ break;
+
+ default:
+ std::cerr << "%% Consumer error: " << msg->errstr() << std::endl;
+ run = 0;
+ delete msg;
+ return msgs;
+ }
+
+ remaining_timeout = end - now();
+ if (remaining_timeout < 0)
+ break;
+ }
+
+ return msgs;
+}
+
+
+int main(int argc, char **argv) {
+ std::string errstr;
+ std::string topic_str;
+ std::vector<std::string> topics;
+ int batch_size = 100;
+ int batch_tmout = 1000;
+
+ /* Create configuration objects */
+ RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
+
+ if (conf->set("enable.partition.eof", "false", errstr) !=
+ RdKafka::Conf::CONF_OK) {
+ std::cerr << errstr << std::endl;
+ exit(1);
+ }
+
+ /* Read command line arguments */
+ int opt;
+ while ((opt = getopt(argc, argv, "g:B:T:b:X:")) != -1) {
+ switch (opt) {
+ case 'g':
+ if (conf->set("group.id", optarg, errstr) != RdKafka::Conf::CONF_OK) {
+ std::cerr << errstr << std::endl;
+ exit(1);
+ }
+ break;
+
+ case 'B':
+ batch_size = atoi(optarg);
+ break;
+
+ case 'T':
+ batch_tmout = atoi(optarg);
+ break;
+
+ case 'b':
+ if (conf->set("bootstrap.servers", optarg, errstr) !=
+ RdKafka::Conf::CONF_OK) {
+ std::cerr << errstr << std::endl;
+ exit(1);
+ }
+ break;
+
+ case 'X': {
+ char *name, *val;
+
+ name = optarg;
+ if (!(val = strchr(name, '='))) {
+ std::cerr << "%% Expected -X property=value, not " << name << std::endl;
+ exit(1);
+ }
+
+ *val = '\0';
+ val++;
+
+ if (conf->set(name, val, errstr) != RdKafka::Conf::CONF_OK) {
+ std::cerr << errstr << std::endl;
+ exit(1);
+ }
+ } break;
+
+ default:
+ goto usage;
+ }
+ }
+
+ /* Topics to consume */
+ for (; optind < argc; optind++)
+ topics.push_back(std::string(argv[optind]));
+
+ if (topics.empty() || optind != argc) {
+ usage:
+ fprintf(
+ stderr,
+ "Usage: %s -g <group-id> -B <batch-size> [options] topic1 topic2..\n"
+ "\n"
+ "librdkafka version %s (0x%08x)\n"
+ "\n"
+ " Options:\n"
+ " -g <group-id> Consumer group id\n"
+ " -B <batch-size> How many messages to batch (default: 100).\n"
+ " -T <batch-tmout> How long to wait for batch-size to accumulate in "
+ "milliseconds. (default 1000 ms)\n"
+ " -b <brokers> Broker address (localhost:9092)\n"
+ " -X <prop=name> Set arbitrary librdkafka configuration property\n"
+ "\n",
+ argv[0], RdKafka::version_str().c_str(), RdKafka::version());
+ exit(1);
+ }
+
+
+ signal(SIGINT, sigterm);
+ signal(SIGTERM, sigterm);
+
+ /* Create consumer */
+ RdKafka::KafkaConsumer *consumer =
+ RdKafka::KafkaConsumer::create(conf, errstr);
+ if (!consumer) {
+ std::cerr << "Failed to create consumer: " << errstr << std::endl;
+ exit(1);
+ }
+
+ delete conf;
+
+ /* Subscribe to topics */
+ RdKafka::ErrorCode err = consumer->subscribe(topics);
+ if (err) {
+ std::cerr << "Failed to subscribe to " << topics.size()
+ << " topics: " << RdKafka::err2str(err) << std::endl;
+ exit(1);
+ }
+
+ /* Consume messages in batches of \p batch_size */
+ while (run) {
+ auto msgs = consume_batch(consumer, batch_size, batch_tmout);
+ std::cout << "Accumulated " << msgs.size() << " messages:" << std::endl;
+
+ for (auto &msg : msgs) {
+ std::cout << " Message in " << msg->topic_name() << " ["
+ << msg->partition() << "] at offset " << msg->offset()
+ << std::endl;
+ delete msg;
+ }
+ }
+
+ /* Close and destroy consumer */
+ consumer->close();
+ delete consumer;
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/examples/rdkafka_example.c b/fluent-bit/lib/librdkafka-2.1.0/examples/rdkafka_example.c
new file mode 100644
index 00000000..91415318
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/examples/rdkafka_example.c
@@ -0,0 +1,853 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * Apache Kafka consumer & producer example programs
+ * using the Kafka driver from librdkafka
+ * (https://github.com/edenhill/librdkafka)
+ */
+
+#include <ctype.h>
+#include <signal.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <syslog.h>
+#include <time.h>
+#include <sys/time.h>
+#include <getopt.h>
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is builtin from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h" /* for Kafka driver */
+
+
+static volatile sig_atomic_t run = 1;
+static rd_kafka_t *rk;
+static int exit_eof = 0;
+static int quiet = 0;
+static enum {
+ OUTPUT_HEXDUMP,
+ OUTPUT_RAW,
+} output = OUTPUT_HEXDUMP;
+
+static void stop(int sig) {
+ run = 0;
+ fclose(stdin); /* abort fgets() */
+}
+
+
+static void hexdump(FILE *fp, const char *name, const void *ptr, size_t len) {
+ const char *p = (const char *)ptr;
+ size_t of = 0;
+
+
+ if (name)
+ fprintf(fp, "%s hexdump (%zd bytes):\n", name, len);
+
+ for (of = 0; of < len; of += 16) {
+ char hexen[16 * 3 + 1];
+ char charen[16 + 1];
+ int hof = 0;
+
+ int cof = 0;
+ int i;
+
+ for (i = of; i < (int)of + 16 && i < (int)len; i++) {
+ hof += sprintf(hexen + hof, "%02x ", p[i] & 0xff);
+ cof += sprintf(charen + cof, "%c",
+ isprint((int)p[i]) ? p[i] : '.');
+ }
+ fprintf(fp, "%08zx: %-48s %-16s\n", of, hexen, charen);
+ }
+}
+
+/**
+ * Kafka logger callback (optional)
+ */
+static void
+logger(const rd_kafka_t *rk, int level, const char *fac, const char *buf) {
+ struct timeval tv;
+ gettimeofday(&tv, NULL);
+ fprintf(stderr, "%u.%03u RDKAFKA-%i-%s: %s: %s\n", (int)tv.tv_sec,
+ (int)(tv.tv_usec / 1000), level, fac,
+ rk ? rd_kafka_name(rk) : NULL, buf);
+}
+
+
+/**
+ * Message delivery report callback using the richer rd_kafka_message_t object.
+ */
+static void msg_delivered(rd_kafka_t *rk,
+ const rd_kafka_message_t *rkmessage,
+ void *opaque) {
+ if (rkmessage->err)
+ fprintf(stderr,
+ "%% Message delivery failed (broker %" PRId32 "): %s\n",
+ rd_kafka_message_broker_id(rkmessage),
+ rd_kafka_err2str(rkmessage->err));
+ else if (!quiet)
+ fprintf(stderr,
+ "%% Message delivered (%zd bytes, offset %" PRId64
+ ", "
+ "partition %" PRId32 ", broker %" PRId32 "): %.*s\n",
+ rkmessage->len, rkmessage->offset, rkmessage->partition,
+ rd_kafka_message_broker_id(rkmessage),
+ (int)rkmessage->len, (const char *)rkmessage->payload);
+}
+
+
+static void msg_consume(rd_kafka_message_t *rkmessage, void *opaque) {
+ if (rkmessage->err) {
+ if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) {
+ fprintf(stderr,
+ "%% Consumer reached end of %s [%" PRId32
+ "] "
+ "message queue at offset %" PRId64 "\n",
+ rd_kafka_topic_name(rkmessage->rkt),
+ rkmessage->partition, rkmessage->offset);
+
+ if (exit_eof)
+ run = 0;
+
+ return;
+ }
+
+ fprintf(stderr,
+ "%% Consume error for topic \"%s\" [%" PRId32
+ "] "
+ "offset %" PRId64 ": %s\n",
+ rd_kafka_topic_name(rkmessage->rkt),
+ rkmessage->partition, rkmessage->offset,
+ rd_kafka_message_errstr(rkmessage));
+
+ if (rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION ||
+ rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC)
+ run = 0;
+ return;
+ }
+
+ if (!quiet) {
+ rd_kafka_timestamp_type_t tstype;
+ int64_t timestamp;
+ rd_kafka_headers_t *hdrs;
+
+ fprintf(stdout,
+ "%% Message (offset %" PRId64
+ ", %zd bytes, "
+ "broker %" PRId32 "):\n",
+ rkmessage->offset, rkmessage->len,
+ rd_kafka_message_broker_id(rkmessage));
+
+ timestamp = rd_kafka_message_timestamp(rkmessage, &tstype);
+ if (tstype != RD_KAFKA_TIMESTAMP_NOT_AVAILABLE) {
+ const char *tsname = "?";
+ if (tstype == RD_KAFKA_TIMESTAMP_CREATE_TIME)
+ tsname = "create time";
+ else if (tstype == RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME)
+ tsname = "log append time";
+
+ fprintf(stdout,
+ "%% Message timestamp: %s %" PRId64
+ " (%ds ago)\n",
+ tsname, timestamp,
+ !timestamp ? 0
+ : (int)time(NULL) -
+ (int)(timestamp / 1000));
+ }
+
+ if (!rd_kafka_message_headers(rkmessage, &hdrs)) {
+ size_t idx = 0;
+ const char *name;
+ const void *val;
+ size_t size;
+
+ fprintf(stdout, "%% Headers:");
+
+ while (!rd_kafka_header_get_all(hdrs, idx++, &name,
+ &val, &size)) {
+ fprintf(stdout, "%s%s=", idx == 1 ? " " : ", ",
+ name);
+ if (val)
+ fprintf(stdout, "\"%.*s\"", (int)size,
+ (const char *)val);
+ else
+ fprintf(stdout, "NULL");
+ }
+ fprintf(stdout, "\n");
+ }
+ }
+
+ if (rkmessage->key_len) {
+ if (output == OUTPUT_HEXDUMP)
+ hexdump(stdout, "Message Key", rkmessage->key,
+ rkmessage->key_len);
+ else
+ printf("Key: %.*s\n", (int)rkmessage->key_len,
+ (char *)rkmessage->key);
+ }
+
+ if (output == OUTPUT_HEXDUMP)
+ hexdump(stdout, "Message Payload", rkmessage->payload,
+ rkmessage->len);
+ else
+ printf("%.*s\n", (int)rkmessage->len,
+ (char *)rkmessage->payload);
+}
+
+
+static void metadata_print(const char *topic,
+ const struct rd_kafka_metadata *metadata) {
+ int i, j, k;
+ int32_t controllerid;
+
+ printf("Metadata for %s (from broker %" PRId32 ": %s):\n",
+ topic ?: "all topics", metadata->orig_broker_id,
+ metadata->orig_broker_name);
+
+ controllerid = rd_kafka_controllerid(rk, 0);
+
+
+ /* Iterate brokers */
+ printf(" %i brokers:\n", metadata->broker_cnt);
+ for (i = 0; i < metadata->broker_cnt; i++)
+ printf(" broker %" PRId32 " at %s:%i%s\n",
+ metadata->brokers[i].id, metadata->brokers[i].host,
+ metadata->brokers[i].port,
+ controllerid == metadata->brokers[i].id ? " (controller)"
+ : "");
+
+ /* Iterate topics */
+ printf(" %i topics:\n", metadata->topic_cnt);
+ for (i = 0; i < metadata->topic_cnt; i++) {
+ const struct rd_kafka_metadata_topic *t = &metadata->topics[i];
+ printf(" topic \"%s\" with %i partitions:", t->topic,
+ t->partition_cnt);
+ if (t->err) {
+ printf(" %s", rd_kafka_err2str(t->err));
+ if (t->err == RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE)
+ printf(" (try again)");
+ }
+ printf("\n");
+
+ /* Iterate topic's partitions */
+ for (j = 0; j < t->partition_cnt; j++) {
+ const struct rd_kafka_metadata_partition *p;
+ p = &t->partitions[j];
+ printf(" partition %" PRId32
+ ", "
+ "leader %" PRId32 ", replicas: ",
+ p->id, p->leader);
+
+ /* Iterate partition's replicas */
+ for (k = 0; k < p->replica_cnt; k++)
+ printf("%s%" PRId32, k > 0 ? "," : "",
+ p->replicas[k]);
+
+ /* Iterate partition's ISRs */
+ printf(", isrs: ");
+ for (k = 0; k < p->isr_cnt; k++)
+ printf("%s%" PRId32, k > 0 ? "," : "",
+ p->isrs[k]);
+ if (p->err)
+ printf(", %s\n", rd_kafka_err2str(p->err));
+ else
+ printf("\n");
+ }
+ }
+}
+
+
+static void sig_usr1(int sig) {
+ rd_kafka_dump(stdout, rk);
+}
+
+int main(int argc, char **argv) {
+ rd_kafka_topic_t *rkt;
+ char *brokers = "localhost:9092";
+ char mode = 'C';
+ char *topic = NULL;
+ int partition = RD_KAFKA_PARTITION_UA;
+ int opt;
+ rd_kafka_conf_t *conf;
+ rd_kafka_topic_conf_t *topic_conf;
+ char errstr[512];
+ int64_t start_offset = 0;
+ int do_conf_dump = 0;
+ char tmp[16];
+ int64_t seek_offset = 0;
+ int64_t tmp_offset = 0;
+ int get_wmarks = 0;
+ rd_kafka_headers_t *hdrs = NULL;
+ rd_kafka_resp_err_t err;
+
+ /* Kafka configuration */
+ conf = rd_kafka_conf_new();
+
+ /* Set logger */
+ rd_kafka_conf_set_log_cb(conf, logger);
+
+ /* Quick termination */
+ snprintf(tmp, sizeof(tmp), "%i", SIGIO);
+ rd_kafka_conf_set(conf, "internal.termination.signal", tmp, NULL, 0);
+
+ /* Topic configuration */
+ topic_conf = rd_kafka_topic_conf_new();
+
+ while ((opt = getopt(argc, argv, "PCLt:p:b:z:qd:o:eX:As:H:")) != -1) {
+ switch (opt) {
+ case 'P':
+ case 'C':
+ case 'L':
+ mode = opt;
+ break;
+ case 't':
+ topic = optarg;
+ break;
+ case 'p':
+ partition = atoi(optarg);
+ break;
+ case 'b':
+ brokers = optarg;
+ break;
+ case 'z':
+ if (rd_kafka_conf_set(conf, "compression.codec", optarg,
+ errstr, sizeof(errstr)) !=
+ RD_KAFKA_CONF_OK) {
+ fprintf(stderr, "%% %s\n", errstr);
+ exit(1);
+ }
+ break;
+ case 'o':
+ case 's':
+ if (!strcmp(optarg, "end"))
+ tmp_offset = RD_KAFKA_OFFSET_END;
+ else if (!strcmp(optarg, "beginning"))
+ tmp_offset = RD_KAFKA_OFFSET_BEGINNING;
+ else if (!strcmp(optarg, "stored"))
+ tmp_offset = RD_KAFKA_OFFSET_STORED;
+ else if (!strcmp(optarg, "wmark"))
+ get_wmarks = 1;
+ else {
+ tmp_offset = strtoll(optarg, NULL, 10);
+
+ if (tmp_offset < 0)
+ tmp_offset =
+ RD_KAFKA_OFFSET_TAIL(-tmp_offset);
+ }
+
+ if (opt == 'o')
+ start_offset = tmp_offset;
+ else if (opt == 's')
+ seek_offset = tmp_offset;
+ break;
+ case 'e':
+ exit_eof = 1;
+ break;
+ case 'd':
+ if (rd_kafka_conf_set(conf, "debug", optarg, errstr,
+ sizeof(errstr)) !=
+ RD_KAFKA_CONF_OK) {
+ fprintf(stderr,
+ "%% Debug configuration failed: "
+ "%s: %s\n",
+ errstr, optarg);
+ exit(1);
+ }
+ break;
+ case 'q':
+ quiet = 1;
+ break;
+ case 'A':
+ output = OUTPUT_RAW;
+ break;
+ case 'H': {
+ char *name, *val;
+ size_t name_sz = -1;
+
+ name = optarg;
+ val = strchr(name, '=');
+ if (val) {
+ name_sz = (size_t)(val - name);
+ val++; /* past the '=' */
+ }
+
+ if (!hdrs)
+ hdrs = rd_kafka_headers_new(8);
+
+ err = rd_kafka_header_add(hdrs, name, name_sz, val, -1);
+ if (err) {
+ fprintf(stderr,
+ "%% Failed to add header %s: %s\n",
+ name, rd_kafka_err2str(err));
+ exit(1);
+ }
+ } break;
+
+ case 'X': {
+ char *name, *val;
+ rd_kafka_conf_res_t res;
+
+ if (!strcmp(optarg, "list") ||
+ !strcmp(optarg, "help")) {
+ rd_kafka_conf_properties_show(stdout);
+ exit(0);
+ }
+
+ if (!strcmp(optarg, "dump")) {
+ do_conf_dump = 1;
+ continue;
+ }
+
+ name = optarg;
+ if (!(val = strchr(name, '='))) {
+ char dest[512];
+ size_t dest_size = sizeof(dest);
+ /* Return current value for property. */
+
+ res = RD_KAFKA_CONF_UNKNOWN;
+ if (!strncmp(name, "topic.", strlen("topic.")))
+ res = rd_kafka_topic_conf_get(
+ topic_conf, name + strlen("topic."),
+ dest, &dest_size);
+ if (res == RD_KAFKA_CONF_UNKNOWN)
+ res = rd_kafka_conf_get(
+ conf, name, dest, &dest_size);
+
+ if (res == RD_KAFKA_CONF_OK) {
+ printf("%s = %s\n", name, dest);
+ exit(0);
+ } else {
+ fprintf(stderr, "%% %s property\n",
+ res == RD_KAFKA_CONF_UNKNOWN
+ ? "Unknown"
+ : "Invalid");
+ exit(1);
+ }
+ }
+
+ *val = '\0';
+ val++;
+
+ res = RD_KAFKA_CONF_UNKNOWN;
+ /* Try "topic." prefixed properties on topic
+ * conf first, and then fall through to global if
+ * it didnt match a topic configuration property. */
+ if (!strncmp(name, "topic.", strlen("topic.")))
+ res = rd_kafka_topic_conf_set(
+ topic_conf, name + strlen("topic."), val,
+ errstr, sizeof(errstr));
+
+ if (res == RD_KAFKA_CONF_UNKNOWN)
+ res = rd_kafka_conf_set(conf, name, val, errstr,
+ sizeof(errstr));
+
+ if (res != RD_KAFKA_CONF_OK) {
+ fprintf(stderr, "%% %s\n", errstr);
+ exit(1);
+ }
+ } break;
+
+ default:
+ goto usage;
+ }
+ }
+
+
+ if (do_conf_dump) {
+ const char **arr;
+ size_t cnt;
+ int pass;
+
+ for (pass = 0; pass < 2; pass++) {
+ int i;
+
+ if (pass == 0) {
+ arr = rd_kafka_conf_dump(conf, &cnt);
+ printf("# Global config\n");
+ } else {
+ printf("# Topic config\n");
+ arr =
+ rd_kafka_topic_conf_dump(topic_conf, &cnt);
+ }
+
+ for (i = 0; i < (int)cnt; i += 2)
+ printf("%s = %s\n", arr[i], arr[i + 1]);
+
+ printf("\n");
+
+ rd_kafka_conf_dump_free(arr, cnt);
+ }
+
+ exit(0);
+ }
+
+
+ if (optind != argc || (mode != 'L' && !topic)) {
+ usage:
+ fprintf(stderr,
+ "Usage: %s -C|-P|-L -t <topic> "
+ "[-p <partition>] [-b <host1:port1,host2:port2,..>]\n"
+ "\n"
+ "librdkafka version %s (0x%08x)\n"
+ "\n"
+ " Options:\n"
+ " -C | -P Consumer or Producer mode\n"
+ " -L Metadata list mode\n"
+ " -t <topic> Topic to fetch / produce\n"
+ " -p <num> Partition (random partitioner)\n"
+ " -b <brokers> Broker address (localhost:9092)\n"
+ " -z <codec> Enable compression:\n"
+ " none|gzip|snappy|lz4|zstd\n"
+ " -o <offset> Start offset (consumer):\n"
+ " beginning, end, NNNNN or -NNNNN\n"
+ " wmark returns the current hi&lo "
+ "watermarks.\n"
+ " -e Exit consumer when last message\n"
+ " in partition has been received.\n"
+ " -d [facs..] Enable debugging contexts:\n"
+ " %s\n"
+ " -q Be quiet\n"
+ " -A Raw payload output (consumer)\n"
+ " -H <name[=value]> Add header to message (producer)\n"
+ " -X <prop=name> Set arbitrary librdkafka "
+ "configuration property\n"
+ " Properties prefixed with \"topic.\" "
+ "will be set on topic object.\n"
+ " -X list Show full list of supported "
+ "properties.\n"
+ " -X dump Show configuration\n"
+ " -X <prop> Get single property value\n"
+ "\n"
+ " In Consumer mode:\n"
+ " writes fetched messages to stdout\n"
+ " In Producer mode:\n"
+ " reads messages from stdin and sends to broker\n"
+ " In List mode:\n"
+ " queries broker for metadata information, "
+ "topic is optional.\n"
+ "\n"
+ "\n"
+ "\n",
+ argv[0], rd_kafka_version_str(), rd_kafka_version(),
+ RD_KAFKA_DEBUG_CONTEXTS);
+ exit(1);
+ }
+
+ if ((mode == 'C' && !isatty(STDIN_FILENO)) ||
+ (mode != 'C' && !isatty(STDOUT_FILENO)))
+ quiet = 1;
+
+
+ signal(SIGINT, stop);
+ signal(SIGUSR1, sig_usr1);
+
+ /* Set bootstrap servers */
+ if (brokers &&
+ rd_kafka_conf_set(conf, "bootstrap.servers", brokers, errstr,
+ sizeof(errstr)) != RD_KAFKA_CONF_OK) {
+ fprintf(stderr, "%% %s\n", errstr);
+ exit(1);
+ }
+
+ if (mode == 'P') {
+ /*
+ * Producer
+ */
+ char buf[2048];
+ int sendcnt = 0;
+
+ /* Set up a message delivery report callback.
+ * It will be called once for each message, either on successful
+ * delivery to broker, or upon failure to deliver to broker. */
+ rd_kafka_conf_set_dr_msg_cb(conf, msg_delivered);
+
+ /* Create Kafka handle */
+ if (!(rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr,
+ sizeof(errstr)))) {
+ fprintf(stderr,
+ "%% Failed to create new producer: %s\n",
+ errstr);
+ exit(1);
+ }
+
+ /* Create topic */
+ rkt = rd_kafka_topic_new(rk, topic, topic_conf);
+ topic_conf = NULL; /* Now owned by topic */
+
+ if (!quiet)
+ fprintf(stderr,
+ "%% Type stuff and hit enter to send\n");
+
+ while (run && fgets(buf, sizeof(buf), stdin)) {
+ size_t len = strlen(buf);
+ if (buf[len - 1] == '\n')
+ buf[--len] = '\0';
+
+ err = RD_KAFKA_RESP_ERR_NO_ERROR;
+
+ /* Send/Produce message. */
+ if (hdrs) {
+ rd_kafka_headers_t *hdrs_copy;
+
+ hdrs_copy = rd_kafka_headers_copy(hdrs);
+
+ err = rd_kafka_producev(
+ rk, RD_KAFKA_V_RKT(rkt),
+ RD_KAFKA_V_PARTITION(partition),
+ RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY),
+ RD_KAFKA_V_VALUE(buf, len),
+ RD_KAFKA_V_HEADERS(hdrs_copy),
+ RD_KAFKA_V_END);
+
+ if (err)
+ rd_kafka_headers_destroy(hdrs_copy);
+
+ } else {
+ if (rd_kafka_produce(
+ rkt, partition, RD_KAFKA_MSG_F_COPY,
+ /* Payload and length */
+ buf, len,
+ /* Optional key and its length */
+ NULL, 0,
+ /* Message opaque, provided in
+ * delivery report callback as
+ * msg_opaque. */
+ NULL) == -1) {
+ err = rd_kafka_last_error();
+ }
+ }
+
+ if (err) {
+ fprintf(stderr,
+ "%% Failed to produce to topic %s "
+ "partition %i: %s\n",
+ rd_kafka_topic_name(rkt), partition,
+ rd_kafka_err2str(err));
+
+ /* Poll to handle delivery reports */
+ rd_kafka_poll(rk, 0);
+ continue;
+ }
+
+ if (!quiet)
+ fprintf(stderr,
+ "%% Sent %zd bytes to topic "
+ "%s partition %i\n",
+ len, rd_kafka_topic_name(rkt),
+ partition);
+ sendcnt++;
+ /* Poll to handle delivery reports */
+ rd_kafka_poll(rk, 0);
+ }
+
+ /* Poll to handle delivery reports */
+ rd_kafka_poll(rk, 0);
+
+ /* Wait for messages to be delivered */
+ while (run && rd_kafka_outq_len(rk) > 0)
+ rd_kafka_poll(rk, 100);
+
+ /* Destroy topic */
+ rd_kafka_topic_destroy(rkt);
+
+ /* Destroy the handle */
+ rd_kafka_destroy(rk);
+
+ } else if (mode == 'C') {
+ /*
+ * Consumer
+ */
+
+ rd_kafka_conf_set(conf, "enable.partition.eof", "true", NULL,
+ 0);
+
+ /* Create Kafka handle */
+ if (!(rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr,
+ sizeof(errstr)))) {
+ fprintf(stderr,
+ "%% Failed to create new consumer: %s\n",
+ errstr);
+ exit(1);
+ }
+
+ if (get_wmarks) {
+ int64_t lo, hi;
+
+ /* Only query for hi&lo partition watermarks */
+
+ if ((err = rd_kafka_query_watermark_offsets(
+ rk, topic, partition, &lo, &hi, 5000))) {
+ fprintf(stderr,
+ "%% query_watermark_offsets() "
+ "failed: %s\n",
+ rd_kafka_err2str(err));
+ exit(1);
+ }
+
+ printf(
+ "%s [%d]: low - high offsets: "
+ "%" PRId64 " - %" PRId64 "\n",
+ topic, partition, lo, hi);
+
+ rd_kafka_destroy(rk);
+ exit(0);
+ }
+
+
+ /* Create topic */
+ rkt = rd_kafka_topic_new(rk, topic, topic_conf);
+ topic_conf = NULL; /* Now owned by topic */
+
+ /* Start consuming */
+ if (rd_kafka_consume_start(rkt, partition, start_offset) ==
+ -1) {
+ err = rd_kafka_last_error();
+ fprintf(stderr, "%% Failed to start consuming: %s\n",
+ rd_kafka_err2str(err));
+ if (err == RD_KAFKA_RESP_ERR__INVALID_ARG)
+ fprintf(stderr,
+ "%% Broker based offset storage "
+ "requires a group.id, "
+ "add: -X group.id=yourGroup\n");
+ exit(1);
+ }
+
+ while (run) {
+ rd_kafka_message_t *rkmessage;
+
+ /* Poll for errors, etc. */
+ rd_kafka_poll(rk, 0);
+
+ /* Consume single message.
+ * See rdkafka_performance.c for high speed
+ * consuming of messages. */
+ rkmessage = rd_kafka_consume(rkt, partition, 1000);
+ if (!rkmessage) /* timeout */
+ continue;
+
+ msg_consume(rkmessage, NULL);
+
+ /* Return message to rdkafka */
+ rd_kafka_message_destroy(rkmessage);
+
+ if (seek_offset) {
+ err = rd_kafka_seek(rkt, partition, seek_offset,
+ 2000);
+ if (err)
+ printf("Seek failed: %s\n",
+ rd_kafka_err2str(err));
+ else
+ printf("Seeked to %" PRId64 "\n",
+ seek_offset);
+ seek_offset = 0;
+ }
+ }
+
+ /* Stop consuming */
+ rd_kafka_consume_stop(rkt, partition);
+
+ while (rd_kafka_outq_len(rk) > 0)
+ rd_kafka_poll(rk, 10);
+
+ /* Destroy topic */
+ rd_kafka_topic_destroy(rkt);
+
+ /* Destroy handle */
+ rd_kafka_destroy(rk);
+
+ } else if (mode == 'L') {
+ err = RD_KAFKA_RESP_ERR_NO_ERROR;
+
+ /* Create Kafka handle */
+ if (!(rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr,
+ sizeof(errstr)))) {
+ fprintf(stderr,
+ "%% Failed to create new producer: %s\n",
+ errstr);
+ exit(1);
+ }
+
+ /* Create topic */
+ if (topic) {
+ rkt = rd_kafka_topic_new(rk, topic, topic_conf);
+ topic_conf = NULL; /* Now owned by topic */
+ } else
+ rkt = NULL;
+
+ while (run) {
+ const struct rd_kafka_metadata *metadata;
+
+ /* Fetch metadata */
+ err = rd_kafka_metadata(rk, rkt ? 0 : 1, rkt, &metadata,
+ 5000);
+ if (err != RD_KAFKA_RESP_ERR_NO_ERROR) {
+ fprintf(stderr,
+ "%% Failed to acquire metadata: %s\n",
+ rd_kafka_err2str(err));
+ run = 0;
+ break;
+ }
+
+ metadata_print(topic, metadata);
+
+ rd_kafka_metadata_destroy(metadata);
+ run = 0;
+ }
+
+ /* Destroy topic */
+ if (rkt)
+ rd_kafka_topic_destroy(rkt);
+
+ /* Destroy the handle */
+ rd_kafka_destroy(rk);
+
+ if (topic_conf)
+ rd_kafka_topic_conf_destroy(topic_conf);
+
+
+ /* Exit right away, dont wait for background cleanup, we haven't
+ * done anything important anyway. */
+ exit(err ? 2 : 0);
+ }
+
+ if (hdrs)
+ rd_kafka_headers_destroy(hdrs);
+
+ if (topic_conf)
+ rd_kafka_topic_conf_destroy(topic_conf);
+
+ /* Let background threads clean up and terminate cleanly. */
+ run = 5;
+ while (run-- > 0 && rd_kafka_wait_destroyed(1000) == -1)
+ printf("Waiting for librdkafka to decommission\n");
+ if (run <= 0)
+ rd_kafka_dump(stdout, rk);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/examples/rdkafka_example.cpp b/fluent-bit/lib/librdkafka-2.1.0/examples/rdkafka_example.cpp
new file mode 100644
index 00000000..91c3440b
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/examples/rdkafka_example.cpp
@@ -0,0 +1,679 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2014, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * Apache Kafka consumer & producer example programs
+ * using the Kafka driver from librdkafka
+ * (https://github.com/edenhill/librdkafka)
+ */
+
+#include <iostream>
+#include <string>
+#include <cstdlib>
+#include <cstdio>
+#include <csignal>
+#include <cstring>
+
+#ifdef _WIN32
+#include "../win32/wingetopt.h"
+#elif _AIX
+#include <unistd.h>
+#else
+#include <getopt.h>
+#endif
+
+/*
+ * Typically include path in a real application would be
+ * #include <librdkafka/rdkafkacpp.h>
+ */
+#include "rdkafkacpp.h"
+
+
+static void metadata_print(const std::string &topic,
+ const RdKafka::Metadata *metadata) {
+ std::cout << "Metadata for " << (topic.empty() ? "" : "all topics")
+ << "(from broker " << metadata->orig_broker_id() << ":"
+ << metadata->orig_broker_name() << std::endl;
+
+ /* Iterate brokers */
+ std::cout << " " << metadata->brokers()->size() << " brokers:" << std::endl;
+ RdKafka::Metadata::BrokerMetadataIterator ib;
+ for (ib = metadata->brokers()->begin(); ib != metadata->brokers()->end();
+ ++ib) {
+ std::cout << " broker " << (*ib)->id() << " at " << (*ib)->host() << ":"
+ << (*ib)->port() << std::endl;
+ }
+ /* Iterate topics */
+ std::cout << metadata->topics()->size() << " topics:" << std::endl;
+ RdKafka::Metadata::TopicMetadataIterator it;
+ for (it = metadata->topics()->begin(); it != metadata->topics()->end();
+ ++it) {
+ std::cout << " topic \"" << (*it)->topic() << "\" with "
+ << (*it)->partitions()->size() << " partitions:";
+
+ if ((*it)->err() != RdKafka::ERR_NO_ERROR) {
+ std::cout << " " << err2str((*it)->err());
+ if ((*it)->err() == RdKafka::ERR_LEADER_NOT_AVAILABLE)
+ std::cout << " (try again)";
+ }
+ std::cout << std::endl;
+
+ /* Iterate topic's partitions */
+ RdKafka::TopicMetadata::PartitionMetadataIterator ip;
+ for (ip = (*it)->partitions()->begin(); ip != (*it)->partitions()->end();
+ ++ip) {
+ std::cout << " partition " << (*ip)->id() << ", leader "
+ << (*ip)->leader() << ", replicas: ";
+
+ /* Iterate partition's replicas */
+ RdKafka::PartitionMetadata::ReplicasIterator ir;
+ for (ir = (*ip)->replicas()->begin(); ir != (*ip)->replicas()->end();
+ ++ir) {
+ std::cout << (ir == (*ip)->replicas()->begin() ? "" : ",") << *ir;
+ }
+
+ /* Iterate partition's ISRs */
+ std::cout << ", isrs: ";
+ RdKafka::PartitionMetadata::ISRSIterator iis;
+ for (iis = (*ip)->isrs()->begin(); iis != (*ip)->isrs()->end(); ++iis)
+ std::cout << (iis == (*ip)->isrs()->begin() ? "" : ",") << *iis;
+
+ if ((*ip)->err() != RdKafka::ERR_NO_ERROR)
+ std::cout << ", " << RdKafka::err2str((*ip)->err()) << std::endl;
+ else
+ std::cout << std::endl;
+ }
+ }
+}
+
+static volatile sig_atomic_t run = 1;
+static bool exit_eof = false;
+
+static void sigterm(int sig) {
+ run = 0;
+}
+
+
+class ExampleDeliveryReportCb : public RdKafka::DeliveryReportCb {
+ public:
+ void dr_cb(RdKafka::Message &message) {
+ std::string status_name;
+ switch (message.status()) {
+ case RdKafka::Message::MSG_STATUS_NOT_PERSISTED:
+ status_name = "NotPersisted";
+ break;
+ case RdKafka::Message::MSG_STATUS_POSSIBLY_PERSISTED:
+ status_name = "PossiblyPersisted";
+ break;
+ case RdKafka::Message::MSG_STATUS_PERSISTED:
+ status_name = "Persisted";
+ break;
+ default:
+ status_name = "Unknown?";
+ break;
+ }
+ std::cout << "Message delivery for (" << message.len()
+ << " bytes): " << status_name << ": " << message.errstr()
+ << std::endl;
+ if (message.key())
+ std::cout << "Key: " << *(message.key()) << ";" << std::endl;
+ }
+};
+
+
+class ExampleEventCb : public RdKafka::EventCb {
+ public:
+ void event_cb(RdKafka::Event &event) {
+ switch (event.type()) {
+ case RdKafka::Event::EVENT_ERROR:
+ if (event.fatal()) {
+ std::cerr << "FATAL ";
+ run = 0;
+ }
+ std::cerr << "ERROR (" << RdKafka::err2str(event.err())
+ << "): " << event.str() << std::endl;
+ break;
+
+ case RdKafka::Event::EVENT_STATS:
+ std::cerr << "\"STATS\": " << event.str() << std::endl;
+ break;
+
+ case RdKafka::Event::EVENT_LOG:
+ fprintf(stderr, "LOG-%i-%s: %s\n", event.severity(), event.fac().c_str(),
+ event.str().c_str());
+ break;
+
+ default:
+ std::cerr << "EVENT " << event.type() << " ("
+ << RdKafka::err2str(event.err()) << "): " << event.str()
+ << std::endl;
+ break;
+ }
+ }
+};
+
+
+/* Use of this partitioner is pretty pointless since no key is provided
+ * in the produce() call. */
+class MyHashPartitionerCb : public RdKafka::PartitionerCb {
+ public:
+ int32_t partitioner_cb(const RdKafka::Topic *topic,
+ const std::string *key,
+ int32_t partition_cnt,
+ void *msg_opaque) {
+ return djb_hash(key->c_str(), key->size()) % partition_cnt;
+ }
+
+ private:
+ static inline unsigned int djb_hash(const char *str, size_t len) {
+ unsigned int hash = 5381;
+ for (size_t i = 0; i < len; i++)
+ hash = ((hash << 5) + hash) + str[i];
+ return hash;
+ }
+};
+
+void msg_consume(RdKafka::Message *message, void *opaque) {
+ const RdKafka::Headers *headers;
+
+ switch (message->err()) {
+ case RdKafka::ERR__TIMED_OUT:
+ break;
+
+ case RdKafka::ERR_NO_ERROR:
+ /* Real message */
+ std::cout << "Read msg at offset " << message->offset() << std::endl;
+ if (message->key()) {
+ std::cout << "Key: " << *message->key() << std::endl;
+ }
+ headers = message->headers();
+ if (headers) {
+ std::vector<RdKafka::Headers::Header> hdrs = headers->get_all();
+ for (size_t i = 0; i < hdrs.size(); i++) {
+ const RdKafka::Headers::Header hdr = hdrs[i];
+
+ if (hdr.value() != NULL)
+ printf(" Header: %s = \"%.*s\"\n", hdr.key().c_str(),
+ (int)hdr.value_size(), (const char *)hdr.value());
+ else
+ printf(" Header: %s = NULL\n", hdr.key().c_str());
+ }
+ }
+ printf("%.*s\n", static_cast<int>(message->len()),
+ static_cast<const char *>(message->payload()));
+ break;
+
+ case RdKafka::ERR__PARTITION_EOF:
+ /* Last message */
+ if (exit_eof) {
+ run = 0;
+ }
+ break;
+
+ case RdKafka::ERR__UNKNOWN_TOPIC:
+ case RdKafka::ERR__UNKNOWN_PARTITION:
+ std::cerr << "Consume failed: " << message->errstr() << std::endl;
+ run = 0;
+ break;
+
+ default:
+ /* Errors */
+ std::cerr << "Consume failed: " << message->errstr() << std::endl;
+ run = 0;
+ }
+}
+
+
+class ExampleConsumeCb : public RdKafka::ConsumeCb {
+ public:
+ void consume_cb(RdKafka::Message &msg, void *opaque) {
+ msg_consume(&msg, opaque);
+ }
+};
+
+
+
+int main(int argc, char **argv) {
+ std::string brokers = "localhost";
+ std::string errstr;
+ std::string topic_str;
+ std::string mode;
+ std::string debug;
+ int32_t partition = RdKafka::Topic::PARTITION_UA;
+ int64_t start_offset = RdKafka::Topic::OFFSET_BEGINNING;
+ bool do_conf_dump = false;
+ int opt;
+ MyHashPartitionerCb hash_partitioner;
+ int use_ccb = 0;
+
+ /*
+ * Create configuration objects
+ */
+ RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
+ RdKafka::Conf *tconf = RdKafka::Conf::create(RdKafka::Conf::CONF_TOPIC);
+
+
+ while ((opt = getopt(argc, argv, "PCLt:p:b:z:qd:o:eX:AM:f:")) != -1) {
+ switch (opt) {
+ case 'P':
+ case 'C':
+ case 'L':
+ mode = opt;
+ break;
+ case 't':
+ topic_str = optarg;
+ break;
+ case 'p':
+ if (!strcmp(optarg, "random"))
+ /* default */;
+ else if (!strcmp(optarg, "hash")) {
+ if (tconf->set("partitioner_cb", &hash_partitioner, errstr) !=
+ RdKafka::Conf::CONF_OK) {
+ std::cerr << errstr << std::endl;
+ exit(1);
+ }
+ } else
+ partition = std::atoi(optarg);
+ break;
+ case 'b':
+ brokers = optarg;
+ break;
+ case 'z':
+ if (conf->set("compression.codec", optarg, errstr) !=
+ RdKafka::Conf::CONF_OK) {
+ std::cerr << errstr << std::endl;
+ exit(1);
+ }
+ break;
+ case 'o':
+ if (!strcmp(optarg, "end"))
+ start_offset = RdKafka::Topic::OFFSET_END;
+ else if (!strcmp(optarg, "beginning"))
+ start_offset = RdKafka::Topic::OFFSET_BEGINNING;
+ else if (!strcmp(optarg, "stored"))
+ start_offset = RdKafka::Topic::OFFSET_STORED;
+ else
+ start_offset = strtoll(optarg, NULL, 10);
+ break;
+ case 'e':
+ exit_eof = true;
+ break;
+ case 'd':
+ debug = optarg;
+ break;
+ case 'M':
+ if (conf->set("statistics.interval.ms", optarg, errstr) !=
+ RdKafka::Conf::CONF_OK) {
+ std::cerr << errstr << std::endl;
+ exit(1);
+ }
+ break;
+ case 'X': {
+ char *name, *val;
+
+ if (!strcmp(optarg, "dump")) {
+ do_conf_dump = true;
+ continue;
+ }
+
+ name = optarg;
+ if (!(val = strchr(name, '='))) {
+ std::cerr << "%% Expected -X property=value, not " << name << std::endl;
+ exit(1);
+ }
+
+ *val = '\0';
+ val++;
+
+ /* Try "topic." prefixed properties on topic
+ * conf first, and then fall through to global if
+ * it didnt match a topic configuration property. */
+ RdKafka::Conf::ConfResult res;
+ if (!strncmp(name, "topic.", strlen("topic.")))
+ res = tconf->set(name + strlen("topic."), val, errstr);
+ else
+ res = conf->set(name, val, errstr);
+
+ if (res != RdKafka::Conf::CONF_OK) {
+ std::cerr << errstr << std::endl;
+ exit(1);
+ }
+ } break;
+
+ case 'f':
+ if (!strcmp(optarg, "ccb"))
+ use_ccb = 1;
+ else {
+ std::cerr << "Unknown option: " << optarg << std::endl;
+ exit(1);
+ }
+ break;
+
+ default:
+ goto usage;
+ }
+ }
+
+ if (mode.empty() || (topic_str.empty() && mode != "L") || optind != argc) {
+ usage:
+ std::string features;
+ conf->get("builtin.features", features);
+ fprintf(stderr,
+ "Usage: %s [-C|-P] -t <topic> "
+ "[-p <partition>] [-b <host1:port1,host2:port2,..>]\n"
+ "\n"
+ "librdkafka version %s (0x%08x, builtin.features \"%s\")\n"
+ "\n"
+ " Options:\n"
+ " -C | -P Consumer or Producer mode\n"
+ " -L Metadata list mode\n"
+ " -t <topic> Topic to fetch / produce\n"
+ " -p <num> Partition (random partitioner)\n"
+ " -p <func> Use partitioner:\n"
+ " random (default), hash\n"
+ " -b <brokers> Broker address (localhost:9092)\n"
+ " -z <codec> Enable compression:\n"
+ " none|gzip|snappy|lz4|zstd\n"
+ " -o <offset> Start offset (consumer)\n"
+ " -e Exit consumer when last message\n"
+ " in partition has been received.\n"
+ " -d [facs..] Enable debugging contexts:\n"
+ " %s\n"
+ " -M <intervalms> Enable statistics\n"
+ " -X <prop=name> Set arbitrary librdkafka "
+ "configuration property\n"
+ " Properties prefixed with \"topic.\" "
+ "will be set on topic object.\n"
+ " Use '-X list' to see the full list\n"
+ " of supported properties.\n"
+ " -f <flag> Set option:\n"
+ " ccb - use consume_callback\n"
+ "\n"
+ " In Consumer mode:\n"
+ " writes fetched messages to stdout\n"
+ " In Producer mode:\n"
+ " reads messages from stdin and sends to broker\n"
+ "\n"
+ "\n"
+ "\n",
+ argv[0], RdKafka::version_str().c_str(), RdKafka::version(),
+ features.c_str(), RdKafka::get_debug_contexts().c_str());
+ exit(1);
+ }
+
+
+ /*
+ * Set configuration properties
+ */
+ conf->set("metadata.broker.list", brokers, errstr);
+
+ if (!debug.empty()) {
+ if (conf->set("debug", debug, errstr) != RdKafka::Conf::CONF_OK) {
+ std::cerr << errstr << std::endl;
+ exit(1);
+ }
+ }
+
+ ExampleEventCb ex_event_cb;
+ conf->set("event_cb", &ex_event_cb, errstr);
+
+ if (do_conf_dump) {
+ int pass;
+
+ for (pass = 0; pass < 2; pass++) {
+ std::list<std::string> *dump;
+ if (pass == 0) {
+ dump = conf->dump();
+ std::cout << "# Global config" << std::endl;
+ } else {
+ dump = tconf->dump();
+ std::cout << "# Topic config" << std::endl;
+ }
+
+ for (std::list<std::string>::iterator it = dump->begin();
+ it != dump->end();) {
+ std::cout << *it << " = ";
+ it++;
+ std::cout << *it << std::endl;
+ it++;
+ }
+ std::cout << std::endl;
+ }
+ exit(0);
+ }
+
+ signal(SIGINT, sigterm);
+ signal(SIGTERM, sigterm);
+
+
+ if (mode == "P") {
+ /*
+ * Producer mode
+ */
+
+ if (topic_str.empty())
+ goto usage;
+
+ ExampleDeliveryReportCb ex_dr_cb;
+
+ /* Set delivery report callback */
+ conf->set("dr_cb", &ex_dr_cb, errstr);
+
+ conf->set("default_topic_conf", tconf, errstr);
+
+ /*
+ * Create producer using accumulated global configuration.
+ */
+ RdKafka::Producer *producer = RdKafka::Producer::create(conf, errstr);
+ if (!producer) {
+ std::cerr << "Failed to create producer: " << errstr << std::endl;
+ exit(1);
+ }
+
+ std::cout << "% Created producer " << producer->name() << std::endl;
+
+
+ /*
+ * Read messages from stdin and produce to broker.
+ */
+ for (std::string line; run && std::getline(std::cin, line);) {
+ if (line.empty()) {
+ producer->poll(0);
+ continue;
+ }
+
+ RdKafka::Headers *headers = RdKafka::Headers::create();
+ headers->add("my header", "header value");
+ headers->add("other header", "yes");
+
+ /*
+ * Produce message
+ */
+ RdKafka::ErrorCode resp =
+ producer->produce(topic_str, partition,
+ RdKafka::Producer::RK_MSG_COPY /* Copy payload */,
+ /* Value */
+ const_cast<char *>(line.c_str()), line.size(),
+ /* Key */
+ NULL, 0,
+ /* Timestamp (defaults to now) */
+ 0,
+ /* Message headers, if any */
+ headers,
+ /* Per-message opaque value passed to
+ * delivery report */
+ NULL);
+ if (resp != RdKafka::ERR_NO_ERROR) {
+ std::cerr << "% Produce failed: " << RdKafka::err2str(resp)
+ << std::endl;
+ delete headers; /* Headers are automatically deleted on produce()
+ * success. */
+ } else {
+ std::cerr << "% Produced message (" << line.size() << " bytes)"
+ << std::endl;
+ }
+
+ producer->poll(0);
+ }
+ run = 1;
+
+ while (run && producer->outq_len() > 0) {
+ std::cerr << "Waiting for " << producer->outq_len() << std::endl;
+ producer->poll(1000);
+ }
+
+ delete producer;
+
+
+ } else if (mode == "C") {
+ /*
+ * Consumer mode
+ */
+
+ conf->set("enable.partition.eof", "true", errstr);
+
+ if (topic_str.empty())
+ goto usage;
+
+ /*
+ * Create consumer using accumulated global configuration.
+ */
+ RdKafka::Consumer *consumer = RdKafka::Consumer::create(conf, errstr);
+ if (!consumer) {
+ std::cerr << "Failed to create consumer: " << errstr << std::endl;
+ exit(1);
+ }
+
+ std::cout << "% Created consumer " << consumer->name() << std::endl;
+
+ /*
+ * Create topic handle.
+ */
+ RdKafka::Topic *topic =
+ RdKafka::Topic::create(consumer, topic_str, tconf, errstr);
+ if (!topic) {
+ std::cerr << "Failed to create topic: " << errstr << std::endl;
+ exit(1);
+ }
+
+ /*
+ * Start consumer for topic+partition at start offset
+ */
+ RdKafka::ErrorCode resp = consumer->start(topic, partition, start_offset);
+ if (resp != RdKafka::ERR_NO_ERROR) {
+ std::cerr << "Failed to start consumer: " << RdKafka::err2str(resp)
+ << std::endl;
+ exit(1);
+ }
+
+ ExampleConsumeCb ex_consume_cb;
+
+ /*
+ * Consume messages
+ */
+ while (run) {
+ if (use_ccb) {
+ consumer->consume_callback(topic, partition, 1000, &ex_consume_cb,
+ &use_ccb);
+ } else {
+ RdKafka::Message *msg = consumer->consume(topic, partition, 1000);
+ msg_consume(msg, NULL);
+ delete msg;
+ }
+ consumer->poll(0);
+ }
+
+ /*
+ * Stop consumer
+ */
+ consumer->stop(topic, partition);
+
+ consumer->poll(1000);
+
+ delete topic;
+ delete consumer;
+ } else {
+ /* Metadata mode */
+
+ /*
+ * Create producer using accumulated global configuration.
+ */
+ RdKafka::Producer *producer = RdKafka::Producer::create(conf, errstr);
+ if (!producer) {
+ std::cerr << "Failed to create producer: " << errstr << std::endl;
+ exit(1);
+ }
+
+ std::cout << "% Created producer " << producer->name() << std::endl;
+
+ /*
+ * Create topic handle.
+ */
+ RdKafka::Topic *topic = NULL;
+ if (!topic_str.empty()) {
+ topic = RdKafka::Topic::create(producer, topic_str, tconf, errstr);
+ if (!topic) {
+ std::cerr << "Failed to create topic: " << errstr << std::endl;
+ exit(1);
+ }
+ }
+
+ while (run) {
+ class RdKafka::Metadata *metadata;
+
+ /* Fetch metadata */
+ RdKafka::ErrorCode err =
+ producer->metadata(!topic, topic, &metadata, 5000);
+ if (err != RdKafka::ERR_NO_ERROR) {
+ std::cerr << "%% Failed to acquire metadata: " << RdKafka::err2str(err)
+ << std::endl;
+ run = 0;
+ break;
+ }
+
+ metadata_print(topic_str, metadata);
+
+ delete metadata;
+ run = 0;
+ }
+ }
+
+ delete conf;
+ delete tconf;
+
+ /*
+ * Wait for RdKafka to decommission.
+ * This is not strictly needed (when check outq_len() above), but
+ * allows RdKafka to clean up all its resources before the application
+ * exits so that memory profilers such as valgrind wont complain about
+ * memory leaks.
+ */
+ RdKafka::wait_destroyed(5000);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/examples/rdkafka_performance.c b/fluent-bit/lib/librdkafka-2.1.0/examples/rdkafka_performance.c
new file mode 100644
index 00000000..a12bb747
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/examples/rdkafka_performance.c
@@ -0,0 +1,1780 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * Apache Kafka consumer & producer performance tester
+ * using the Kafka driver from librdkafka
+ * (https://github.com/edenhill/librdkafka)
+ */
+
+#ifdef _MSC_VER
+#define _CRT_SECURE_NO_WARNINGS /* Silence nonsense on MSVC */
+#endif
+
+#include "../src/rd.h"
+
+#define _GNU_SOURCE /* for strndup() */
+#include <ctype.h>
+#include <signal.h>
+#include <string.h>
+#include <errno.h>
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is built from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h" /* for Kafka driver */
+/* Do not include these defines from your program, they will not be
+ * provided by librdkafka. */
+#include "rd.h"
+#include "rdtime.h"
+
+#ifdef _WIN32
+#include "../win32/wingetopt.h"
+#include "../win32/wintime.h"
+#endif
+
+
+static volatile sig_atomic_t run = 1;
+static int forever = 1;
+static rd_ts_t dispintvl = 1000;
+static int do_seq = 0;
+static int exit_after = 0;
+static int exit_eof = 0;
+static FILE *stats_fp;
+static int dr_disp_div;
+static int verbosity = 1;
+static int latency_mode = 0;
+static FILE *latency_fp = NULL;
+static int msgcnt = -1;
+static int incremental_mode = 0;
+static int partition_cnt = 0;
+static int eof_cnt = 0;
+static int with_dr = 1;
+static int read_hdrs = 0;
+
+
+static void stop(int sig) {
+ if (!run)
+ exit(0);
+ run = 0;
+}
+
+static long int msgs_wait_cnt = 0;
+static long int msgs_wait_produce_cnt = 0;
+static rd_ts_t t_end;
+static rd_kafka_t *global_rk;
+
+struct avg {
+ int64_t val;
+ int cnt;
+ uint64_t ts_start;
+};
+
+static struct {
+ rd_ts_t t_start;
+ rd_ts_t t_end;
+ rd_ts_t t_end_send;
+ uint64_t msgs;
+ uint64_t msgs_last;
+ uint64_t msgs_dr_ok;
+ uint64_t msgs_dr_err;
+ uint64_t bytes_dr_ok;
+ uint64_t bytes;
+ uint64_t bytes_last;
+ uint64_t tx;
+ uint64_t tx_err;
+ uint64_t avg_rtt;
+ uint64_t offset;
+ rd_ts_t t_fetch_latency;
+ rd_ts_t t_last;
+ rd_ts_t t_enobufs_last;
+ rd_ts_t t_total;
+ rd_ts_t latency_last;
+ rd_ts_t latency_lo;
+ rd_ts_t latency_hi;
+ rd_ts_t latency_sum;
+ int latency_cnt;
+ int64_t last_offset;
+} cnt;
+
+
+uint64_t wall_clock(void) {
+ struct timeval tv;
+ gettimeofday(&tv, NULL);
+ return ((uint64_t)tv.tv_sec * 1000000LLU) + ((uint64_t)tv.tv_usec);
+}
+
+static void err_cb(rd_kafka_t *rk, int err, const char *reason, void *opaque) {
+ if (err == RD_KAFKA_RESP_ERR__FATAL) {
+ char errstr[512];
+ err = rd_kafka_fatal_error(rk, errstr, sizeof(errstr));
+ printf("%% FATAL ERROR CALLBACK: %s: %s: %s\n",
+ rd_kafka_name(rk), rd_kafka_err2str(err), errstr);
+ } else {
+ printf("%% ERROR CALLBACK: %s: %s: %s\n", rd_kafka_name(rk),
+ rd_kafka_err2str(err), reason);
+ }
+}
+
+static void throttle_cb(rd_kafka_t *rk,
+ const char *broker_name,
+ int32_t broker_id,
+ int throttle_time_ms,
+ void *opaque) {
+ printf("%% THROTTLED %dms by %s (%" PRId32 ")\n", throttle_time_ms,
+ broker_name, broker_id);
+}
+
+static void offset_commit_cb(rd_kafka_t *rk,
+ rd_kafka_resp_err_t err,
+ rd_kafka_topic_partition_list_t *offsets,
+ void *opaque) {
+ int i;
+
+ if (err || verbosity >= 2)
+ printf("%% Offset commit of %d partition(s): %s\n",
+ offsets->cnt, rd_kafka_err2str(err));
+
+ for (i = 0; i < offsets->cnt; i++) {
+ rd_kafka_topic_partition_t *rktpar = &offsets->elems[i];
+ if (rktpar->err || verbosity >= 2)
+ printf("%% %s [%" PRId32 "] @ %" PRId64 ": %s\n",
+ rktpar->topic, rktpar->partition, rktpar->offset,
+ rd_kafka_err2str(err));
+ }
+}
+
+/**
+ * @brief Add latency measurement
+ */
+static void latency_add(int64_t ts, const char *who) {
+ if (ts > cnt.latency_hi)
+ cnt.latency_hi = ts;
+ if (!cnt.latency_lo || ts < cnt.latency_lo)
+ cnt.latency_lo = ts;
+ cnt.latency_last = ts;
+ cnt.latency_cnt++;
+ cnt.latency_sum += ts;
+ if (latency_fp)
+ fprintf(latency_fp, "%" PRIu64 "\n", ts);
+}
+
+
+static void msg_delivered(rd_kafka_t *rk,
+ const rd_kafka_message_t *rkmessage,
+ void *opaque) {
+ static rd_ts_t last;
+ rd_ts_t now = rd_clock();
+ static int msgs;
+
+ msgs++;
+
+ msgs_wait_cnt--;
+
+ if (rkmessage->err)
+ cnt.msgs_dr_err++;
+ else {
+ cnt.msgs_dr_ok++;
+ cnt.bytes_dr_ok += rkmessage->len;
+ }
+
+ if (latency_mode) {
+ /* Extract latency */
+ int64_t source_ts;
+ if (sscanf(rkmessage->payload, "LATENCY:%" SCNd64,
+ &source_ts) == 1)
+ latency_add(wall_clock() - source_ts, "producer");
+ }
+
+
+ if ((rkmessage->err && (cnt.msgs_dr_err < 50 ||
+ !(cnt.msgs_dr_err % (dispintvl / 1000)))) ||
+ !last || msgs_wait_cnt < 5 || !(msgs_wait_cnt % dr_disp_div) ||
+ (now - last) >= dispintvl * 1000 || verbosity >= 3) {
+ if (rkmessage->err && verbosity >= 2)
+ printf("%% Message delivery failed (broker %" PRId32
+ "): "
+ "%s [%" PRId32
+ "]: "
+ "%s (%li remain)\n",
+ rd_kafka_message_broker_id(rkmessage),
+ rd_kafka_topic_name(rkmessage->rkt),
+ rkmessage->partition,
+ rd_kafka_err2str(rkmessage->err), msgs_wait_cnt);
+ else if (verbosity > 2)
+ printf("%% Message delivered (offset %" PRId64
+ ", broker %" PRId32
+ "): "
+ "%li remain\n",
+ rkmessage->offset,
+ rd_kafka_message_broker_id(rkmessage),
+ msgs_wait_cnt);
+ if (verbosity >= 3 && do_seq)
+ printf(" --> \"%.*s\"\n", (int)rkmessage->len,
+ (const char *)rkmessage->payload);
+ last = now;
+ }
+
+ cnt.last_offset = rkmessage->offset;
+
+ if (msgs_wait_produce_cnt == 0 && msgs_wait_cnt == 0 && !forever) {
+ if (verbosity >= 2 && cnt.msgs > 0) {
+ double error_percent =
+ (double)(cnt.msgs - cnt.msgs_dr_ok) / cnt.msgs *
+ 100;
+ printf(
+ "%% Messages delivered with failure "
+ "percentage of %.5f%%\n",
+ error_percent);
+ }
+ t_end = rd_clock();
+ run = 0;
+ }
+
+ if (exit_after && exit_after <= msgs) {
+ printf("%% Hard exit after %i messages, as requested\n",
+ exit_after);
+ exit(0);
+ }
+}
+
+
+static void msg_consume(rd_kafka_message_t *rkmessage, void *opaque) {
+
+ if (rkmessage->err) {
+ if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) {
+ cnt.offset = rkmessage->offset;
+
+ if (verbosity >= 1)
+ printf(
+ "%% Consumer reached end of "
+ "%s [%" PRId32
+ "] "
+ "message queue at offset %" PRId64 "\n",
+ rd_kafka_topic_name(rkmessage->rkt),
+ rkmessage->partition, rkmessage->offset);
+
+ if (exit_eof && ++eof_cnt == partition_cnt)
+ run = 0;
+
+ return;
+ }
+
+ printf("%% Consume error for topic \"%s\" [%" PRId32
+ "] "
+ "offset %" PRId64 ": %s\n",
+ rkmessage->rkt ? rd_kafka_topic_name(rkmessage->rkt)
+ : "",
+ rkmessage->partition, rkmessage->offset,
+ rd_kafka_message_errstr(rkmessage));
+
+ if (rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION ||
+ rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC)
+ run = 0;
+
+ cnt.msgs_dr_err++;
+ return;
+ }
+
+ /* Start measuring from first message received */
+ if (!cnt.t_start)
+ cnt.t_start = cnt.t_last = rd_clock();
+
+ cnt.offset = rkmessage->offset;
+ cnt.msgs++;
+ cnt.bytes += rkmessage->len;
+
+ if (verbosity >= 3 || (verbosity >= 2 && !(cnt.msgs % 1000000)))
+ printf("@%" PRId64 ": %.*s: %.*s\n", rkmessage->offset,
+ (int)rkmessage->key_len, (char *)rkmessage->key,
+ (int)rkmessage->len, (char *)rkmessage->payload);
+
+
+ if (latency_mode) {
+ int64_t remote_ts, ts;
+
+ if (rkmessage->len > 8 &&
+ !memcmp(rkmessage->payload, "LATENCY:", 8) &&
+ sscanf(rkmessage->payload, "LATENCY:%" SCNd64,
+ &remote_ts) == 1) {
+ ts = wall_clock() - remote_ts;
+ if (ts > 0 && ts < (1000000 * 60 * 5)) {
+ latency_add(ts, "consumer");
+ } else {
+ if (verbosity >= 1)
+ printf(
+ "Received latency timestamp is too "
+ "far off: %" PRId64
+ "us (message offset %" PRId64
+ "): ignored\n",
+ ts, rkmessage->offset);
+ }
+ } else if (verbosity > 1)
+ printf("not a LATENCY payload: %.*s\n",
+ (int)rkmessage->len, (char *)rkmessage->payload);
+ }
+
+ if (read_hdrs) {
+ rd_kafka_headers_t *hdrs;
+ /* Force parsing of headers but don't do anything with them. */
+ rd_kafka_message_headers(rkmessage, &hdrs);
+ }
+
+ if (msgcnt != -1 && (int)cnt.msgs >= msgcnt)
+ run = 0;
+}
+
+
+static void rebalance_cb(rd_kafka_t *rk,
+ rd_kafka_resp_err_t err,
+ rd_kafka_topic_partition_list_t *partitions,
+ void *opaque) {
+ rd_kafka_error_t *error = NULL;
+ rd_kafka_resp_err_t ret_err = RD_KAFKA_RESP_ERR_NO_ERROR;
+
+ if (exit_eof && !strcmp(rd_kafka_rebalance_protocol(rk), "COOPERATIVE"))
+ fprintf(stderr,
+ "%% This example has not been modified to "
+ "support -e (exit on EOF) when "
+ "partition.assignment.strategy "
+ "is set to an incremental/cooperative strategy: "
+ "-e will not behave as expected\n");
+
+ switch (err) {
+ case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS:
+ fprintf(stderr,
+ "%% Group rebalanced (%s): "
+ "%d new partition(s) assigned\n",
+ rd_kafka_rebalance_protocol(rk), partitions->cnt);
+
+ if (!strcmp(rd_kafka_rebalance_protocol(rk), "COOPERATIVE")) {
+ error = rd_kafka_incremental_assign(rk, partitions);
+ } else {
+ ret_err = rd_kafka_assign(rk, partitions);
+ eof_cnt = 0;
+ }
+
+ partition_cnt += partitions->cnt;
+ break;
+
+ case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS:
+ fprintf(stderr,
+ "%% Group rebalanced (%s): %d partition(s) revoked\n",
+ rd_kafka_rebalance_protocol(rk), partitions->cnt);
+
+ if (!strcmp(rd_kafka_rebalance_protocol(rk), "COOPERATIVE")) {
+ error = rd_kafka_incremental_unassign(rk, partitions);
+ partition_cnt -= partitions->cnt;
+ } else {
+ ret_err = rd_kafka_assign(rk, NULL);
+ partition_cnt = 0;
+ }
+
+ eof_cnt = 0; /* FIXME: Not correct for incremental case */
+ break;
+
+ default:
+ break;
+ }
+
+ if (error) {
+ fprintf(stderr, "%% incremental assign failure: %s\n",
+ rd_kafka_error_string(error));
+ rd_kafka_error_destroy(error);
+ } else if (ret_err) {
+ fprintf(stderr, "%% assign failure: %s\n",
+ rd_kafka_err2str(ret_err));
+ }
+}
+
+
+/**
+ * Find and extract single value from a two-level search.
+ * First find 'field1', then find 'field2' and extract its value.
+ * Returns 0 on miss else the value.
+ */
+static uint64_t json_parse_fields(const char *json,
+ const char **end,
+ const char *field1,
+ const char *field2) {
+ const char *t = json;
+ const char *t2;
+ int len1 = (int)strlen(field1);
+ int len2 = (int)strlen(field2);
+
+ while ((t2 = strstr(t, field1))) {
+ uint64_t v;
+
+ t = t2;
+ t += len1;
+
+ /* Find field */
+ if (!(t2 = strstr(t, field2)))
+ continue;
+ t2 += len2;
+
+ while (isspace((int)*t2))
+ t2++;
+
+ v = strtoull(t2, (char **)&t, 10);
+ if (t2 == t)
+ continue;
+
+ *end = t;
+ return v;
+ }
+
+ *end = t + strlen(t);
+ return 0;
+}
+
+/**
+ * Parse various values from rdkafka stats
+ */
+static void json_parse_stats(const char *json) {
+ const char *t;
+#define MAX_AVGS 100 /* max number of brokers to scan for rtt */
+ uint64_t avg_rtt[MAX_AVGS + 1];
+ int avg_rtt_i = 0;
+
+ /* Store totals at end of array */
+ avg_rtt[MAX_AVGS] = 0;
+
+ /* Extract all broker RTTs */
+ t = json;
+ while (avg_rtt_i < MAX_AVGS && *t) {
+ avg_rtt[avg_rtt_i] =
+ json_parse_fields(t, &t, "\"rtt\":", "\"avg\":");
+
+ /* Skip low RTT values, means no messages are passing */
+ if (avg_rtt[avg_rtt_i] < 100 /*0.1ms*/)
+ continue;
+
+
+ avg_rtt[MAX_AVGS] += avg_rtt[avg_rtt_i];
+ avg_rtt_i++;
+ }
+
+ if (avg_rtt_i > 0)
+ avg_rtt[MAX_AVGS] /= avg_rtt_i;
+
+ cnt.avg_rtt = avg_rtt[MAX_AVGS];
+}
+
+
+static int stats_cb(rd_kafka_t *rk, char *json, size_t json_len, void *opaque) {
+
+ /* Extract values for our own stats */
+ json_parse_stats(json);
+
+ if (stats_fp)
+ fprintf(stats_fp, "%s\n", json);
+ return 0;
+}
+
+#define _OTYPE_TAB 0x1 /* tabular format */
+#define _OTYPE_SUMMARY 0x2 /* summary format */
+#define _OTYPE_FORCE 0x4 /* force output regardless of interval timing */
+static void
+print_stats(rd_kafka_t *rk, int mode, int otype, const char *compression) {
+ rd_ts_t now = rd_clock();
+ rd_ts_t t_total;
+ static int rows_written = 0;
+ int print_header;
+ double latency_avg = 0.0f;
+ char extra[512];
+ int extra_of = 0;
+ *extra = '\0';
+
+ if (!(otype & _OTYPE_FORCE) &&
+ (((otype & _OTYPE_SUMMARY) && verbosity == 0) ||
+ cnt.t_last + dispintvl > now))
+ return;
+
+ print_header = !rows_written || (verbosity > 0 && !(rows_written % 20));
+
+ if (cnt.t_end_send)
+ t_total = cnt.t_end_send - cnt.t_start;
+ else if (cnt.t_end)
+ t_total = cnt.t_end - cnt.t_start;
+ else if (cnt.t_start)
+ t_total = now - cnt.t_start;
+ else
+ t_total = 1;
+
+ if (latency_mode && cnt.latency_cnt)
+ latency_avg = (double)cnt.latency_sum / (double)cnt.latency_cnt;
+
+ if (mode == 'P') {
+
+ if (otype & _OTYPE_TAB) {
+#define ROW_START() \
+ do { \
+ } while (0)
+#define COL_HDR(NAME) printf("| %10.10s ", (NAME))
+#define COL_PR64(NAME, VAL) printf("| %10" PRIu64 " ", (VAL))
+#define COL_PRF(NAME, VAL) printf("| %10.2f ", (VAL))
+#define ROW_END() \
+ do { \
+ printf("\n"); \
+ rows_written++; \
+ } while (0)
+
+ if (print_header) {
+ /* First time, print header */
+ ROW_START();
+ COL_HDR("elapsed");
+ COL_HDR("msgs");
+ COL_HDR("bytes");
+ COL_HDR("rtt");
+ COL_HDR("dr");
+ COL_HDR("dr_m/s");
+ COL_HDR("dr_MB/s");
+ COL_HDR("dr_err");
+ COL_HDR("tx_err");
+ COL_HDR("outq");
+ COL_HDR("offset");
+ if (latency_mode) {
+ COL_HDR("lat_curr");
+ COL_HDR("lat_avg");
+ COL_HDR("lat_lo");
+ COL_HDR("lat_hi");
+ }
+
+ ROW_END();
+ }
+
+ ROW_START();
+ COL_PR64("elapsed", t_total / 1000);
+ COL_PR64("msgs", cnt.msgs);
+ COL_PR64("bytes", cnt.bytes);
+ COL_PR64("rtt", cnt.avg_rtt / 1000);
+ COL_PR64("dr", cnt.msgs_dr_ok);
+ COL_PR64("dr_m/s",
+ ((cnt.msgs_dr_ok * 1000000) / t_total));
+ COL_PRF("dr_MB/s",
+ (float)((cnt.bytes_dr_ok) / (float)t_total));
+ COL_PR64("dr_err", cnt.msgs_dr_err);
+ COL_PR64("tx_err", cnt.tx_err);
+ COL_PR64("outq",
+ rk ? (uint64_t)rd_kafka_outq_len(rk) : 0);
+ COL_PR64("offset", (uint64_t)cnt.last_offset);
+ if (latency_mode) {
+ COL_PRF("lat_curr", cnt.latency_last / 1000.0f);
+ COL_PRF("lat_avg", latency_avg / 1000.0f);
+ COL_PRF("lat_lo", cnt.latency_lo / 1000.0f);
+ COL_PRF("lat_hi", cnt.latency_hi / 1000.0f);
+ }
+ ROW_END();
+ }
+
+ if (otype & _OTYPE_SUMMARY) {
+ printf("%% %" PRIu64
+ " messages produced "
+ "(%" PRIu64
+ " bytes), "
+ "%" PRIu64
+ " delivered "
+ "(offset %" PRId64 ", %" PRIu64
+ " failed) "
+ "in %" PRIu64 "ms: %" PRIu64
+ " msgs/s and "
+ "%.02f MB/s, "
+ "%" PRIu64
+ " produce failures, %i in queue, "
+ "%s compression\n",
+ cnt.msgs, cnt.bytes, cnt.msgs_dr_ok,
+ cnt.last_offset, cnt.msgs_dr_err, t_total / 1000,
+ ((cnt.msgs_dr_ok * 1000000) / t_total),
+ (float)((cnt.bytes_dr_ok) / (float)t_total),
+ cnt.tx_err, rk ? rd_kafka_outq_len(rk) : 0,
+ compression);
+ }
+
+ } else {
+
+ if (otype & _OTYPE_TAB) {
+ if (print_header) {
+ /* First time, print header */
+ ROW_START();
+ COL_HDR("elapsed");
+ COL_HDR("msgs");
+ COL_HDR("bytes");
+ COL_HDR("rtt");
+ COL_HDR("m/s");
+ COL_HDR("MB/s");
+ COL_HDR("rx_err");
+ COL_HDR("offset");
+ if (latency_mode) {
+ COL_HDR("lat_curr");
+ COL_HDR("lat_avg");
+ COL_HDR("lat_lo");
+ COL_HDR("lat_hi");
+ }
+ ROW_END();
+ }
+
+ ROW_START();
+ COL_PR64("elapsed", t_total / 1000);
+ COL_PR64("msgs", cnt.msgs);
+ COL_PR64("bytes", cnt.bytes);
+ COL_PR64("rtt", cnt.avg_rtt / 1000);
+ COL_PR64("m/s", ((cnt.msgs * 1000000) / t_total));
+ COL_PRF("MB/s", (float)((cnt.bytes) / (float)t_total));
+ COL_PR64("rx_err", cnt.msgs_dr_err);
+ COL_PR64("offset", cnt.offset);
+ if (latency_mode) {
+ COL_PRF("lat_curr", cnt.latency_last / 1000.0f);
+ COL_PRF("lat_avg", latency_avg / 1000.0f);
+ COL_PRF("lat_lo", cnt.latency_lo / 1000.0f);
+ COL_PRF("lat_hi", cnt.latency_hi / 1000.0f);
+ }
+ ROW_END();
+ }
+
+ if (otype & _OTYPE_SUMMARY) {
+ if (latency_avg >= 1.0f)
+ extra_of += rd_snprintf(
+ extra + extra_of, sizeof(extra) - extra_of,
+ ", latency "
+ "curr/avg/lo/hi "
+ "%.2f/%.2f/%.2f/%.2fms",
+ cnt.latency_last / 1000.0f,
+ latency_avg / 1000.0f,
+ cnt.latency_lo / 1000.0f,
+ cnt.latency_hi / 1000.0f);
+ printf("%% %" PRIu64 " messages (%" PRIu64
+ " bytes) "
+ "consumed in %" PRIu64 "ms: %" PRIu64
+ " msgs/s "
+ "(%.02f MB/s)"
+ "%s\n",
+ cnt.msgs, cnt.bytes, t_total / 1000,
+ ((cnt.msgs * 1000000) / t_total),
+ (float)((cnt.bytes) / (float)t_total), extra);
+ }
+
+ if (incremental_mode && now > cnt.t_last) {
+ uint64_t i_msgs = cnt.msgs - cnt.msgs_last;
+ uint64_t i_bytes = cnt.bytes - cnt.bytes_last;
+ uint64_t i_time = cnt.t_last ? now - cnt.t_last : 0;
+
+ printf("%% INTERVAL: %" PRIu64
+ " messages "
+ "(%" PRIu64
+ " bytes) "
+ "consumed in %" PRIu64 "ms: %" PRIu64
+ " msgs/s "
+ "(%.02f MB/s)"
+ "%s\n",
+ i_msgs, i_bytes, i_time / 1000,
+ ((i_msgs * 1000000) / i_time),
+ (float)((i_bytes) / (float)i_time), extra);
+ }
+ }
+
+ cnt.t_last = now;
+ cnt.msgs_last = cnt.msgs;
+ cnt.bytes_last = cnt.bytes;
+}
+
+
+static void sig_usr1(int sig) {
+ rd_kafka_dump(stdout, global_rk);
+}
+
+
+/**
+ * @brief Read config from file
+ * @returns -1 on error, else 0.
+ */
+static int read_conf_file(rd_kafka_conf_t *conf, const char *path) {
+ FILE *fp;
+ char buf[512];
+ int line = 0;
+ char errstr[512];
+
+ if (!(fp = fopen(path, "r"))) {
+ fprintf(stderr, "%% Failed to open %s: %s\n", path,
+ strerror(errno));
+ return -1;
+ }
+
+ while (fgets(buf, sizeof(buf), fp)) {
+ char *s = buf;
+ char *t;
+ rd_kafka_conf_res_t r = RD_KAFKA_CONF_UNKNOWN;
+
+ line++;
+
+ while (isspace((int)*s))
+ s++;
+
+ if (!*s || *s == '#')
+ continue;
+
+ if ((t = strchr(buf, '\n')))
+ *t = '\0';
+
+ t = strchr(buf, '=');
+ if (!t || t == s || !*(t + 1)) {
+ fprintf(stderr, "%% %s:%d: expected key=value\n", path,
+ line);
+ fclose(fp);
+ return -1;
+ }
+
+ *(t++) = '\0';
+
+ /* Try global config */
+ r = rd_kafka_conf_set(conf, s, t, errstr, sizeof(errstr));
+
+ if (r == RD_KAFKA_CONF_OK)
+ continue;
+
+ fprintf(stderr, "%% %s:%d: %s=%s: %s\n", path, line, s, t,
+ errstr);
+ fclose(fp);
+ return -1;
+ }
+
+ fclose(fp);
+
+ return 0;
+}
+
+
+static rd_kafka_resp_err_t do_produce(rd_kafka_t *rk,
+ rd_kafka_topic_t *rkt,
+ int32_t partition,
+ int msgflags,
+ void *payload,
+ size_t size,
+ const void *key,
+ size_t key_size,
+ const rd_kafka_headers_t *hdrs) {
+
+ /* Send/Produce message. */
+ if (hdrs) {
+ rd_kafka_headers_t *hdrs_copy;
+ rd_kafka_resp_err_t err;
+
+ hdrs_copy = rd_kafka_headers_copy(hdrs);
+
+ err = rd_kafka_producev(
+ rk, RD_KAFKA_V_RKT(rkt), RD_KAFKA_V_PARTITION(partition),
+ RD_KAFKA_V_MSGFLAGS(msgflags),
+ RD_KAFKA_V_VALUE(payload, size),
+ RD_KAFKA_V_KEY(key, key_size),
+ RD_KAFKA_V_HEADERS(hdrs_copy), RD_KAFKA_V_END);
+
+ if (err)
+ rd_kafka_headers_destroy(hdrs_copy);
+
+ return err;
+
+ } else {
+ if (rd_kafka_produce(rkt, partition, msgflags, payload, size,
+ key, key_size, NULL) == -1)
+ return rd_kafka_last_error();
+ }
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+/**
+ * @brief Sleep for \p sleep_us microseconds.
+ */
+static void do_sleep(int sleep_us) {
+ if (sleep_us > 100) {
+#ifdef _WIN32
+ Sleep(sleep_us / 1000);
+#else
+ usleep(sleep_us);
+#endif
+ } else {
+ rd_ts_t next = rd_clock() + (rd_ts_t)sleep_us;
+ while (next > rd_clock())
+ ;
+ }
+}
+
+
+int main(int argc, char **argv) {
+ char *brokers = NULL;
+ char mode = 'C';
+ char *topic = NULL;
+ const char *key = NULL;
+ int *partitions = NULL;
+ int opt;
+ int sendflags = 0;
+ char *msgpattern = "librdkafka_performance testing!";
+ int msgsize = -1;
+ const char *debug = NULL;
+ int do_conf_dump = 0;
+ rd_ts_t now;
+ char errstr[512];
+ uint64_t seq = 0;
+ int seed = (int)time(NULL);
+ rd_kafka_t *rk;
+ rd_kafka_topic_t *rkt;
+ rd_kafka_conf_t *conf;
+ rd_kafka_queue_t *rkqu = NULL;
+ const char *compression = "no";
+ int64_t start_offset = 0;
+ int batch_size = 0;
+ int idle = 0;
+ const char *stats_cmd = NULL;
+ char *stats_intvlstr = NULL;
+ char tmp[128];
+ char *tmp2;
+ int otype = _OTYPE_SUMMARY;
+ double dtmp;
+ int rate_sleep = 0;
+ rd_kafka_topic_partition_list_t *topics;
+ int exitcode = 0;
+ rd_kafka_headers_t *hdrs = NULL;
+ rd_kafka_resp_err_t err;
+
+ /* Kafka configuration */
+ conf = rd_kafka_conf_new();
+ rd_kafka_conf_set_error_cb(conf, err_cb);
+ rd_kafka_conf_set_throttle_cb(conf, throttle_cb);
+ rd_kafka_conf_set_offset_commit_cb(conf, offset_commit_cb);
+
+#ifdef SIGIO
+ /* Quick termination */
+ rd_snprintf(tmp, sizeof(tmp), "%i", SIGIO);
+ rd_kafka_conf_set(conf, "internal.termination.signal", tmp, NULL, 0);
+#endif
+
+ /* Producer config */
+ rd_kafka_conf_set(conf, "linger.ms", "1000", NULL, 0);
+ rd_kafka_conf_set(conf, "message.send.max.retries", "3", NULL, 0);
+ rd_kafka_conf_set(conf, "retry.backoff.ms", "500", NULL, 0);
+
+ /* Consumer config */
+ /* Tell rdkafka to (try to) maintain 1M messages
+ * in its internal receive buffers. This is to avoid
+ * application -> rdkafka -> broker per-message ping-pong
+ * latency.
+ * The larger the local queue, the higher the performance.
+ * Try other values with: ... -X queued.min.messages=1000
+ */
+ rd_kafka_conf_set(conf, "queued.min.messages", "1000000", NULL, 0);
+ rd_kafka_conf_set(conf, "session.timeout.ms", "6000", NULL, 0);
+ rd_kafka_conf_set(conf, "auto.offset.reset", "earliest", NULL, 0);
+
+ topics = rd_kafka_topic_partition_list_new(1);
+
+ while ((opt = getopt(argc, argv,
+ "PCG:t:p:b:s:k:c:fi:MDd:m:S:x:"
+ "R:a:z:o:X:B:eT:Y:qvIur:lA:OwNH:")) != -1) {
+ switch (opt) {
+ case 'G':
+ if (rd_kafka_conf_set(conf, "group.id", optarg, errstr,
+ sizeof(errstr)) !=
+ RD_KAFKA_CONF_OK) {
+ fprintf(stderr, "%% %s\n", errstr);
+ exit(1);
+ }
+ /* FALLTHRU */
+ case 'P':
+ case 'C':
+ mode = opt;
+ break;
+ case 't':
+ rd_kafka_topic_partition_list_add(
+ topics, optarg, RD_KAFKA_PARTITION_UA);
+ break;
+ case 'p':
+ partition_cnt++;
+ partitions = realloc(partitions, sizeof(*partitions) *
+ partition_cnt);
+ partitions[partition_cnt - 1] = atoi(optarg);
+ break;
+
+ case 'b':
+ brokers = optarg;
+ break;
+ case 's':
+ msgsize = atoi(optarg);
+ break;
+ case 'k':
+ key = optarg;
+ break;
+ case 'c':
+ msgcnt = atoi(optarg);
+ break;
+ case 'D':
+ sendflags |= RD_KAFKA_MSG_F_FREE;
+ break;
+ case 'i':
+ dispintvl = atoi(optarg);
+ break;
+ case 'm':
+ msgpattern = optarg;
+ break;
+ case 'S':
+ seq = strtoull(optarg, NULL, 10);
+ do_seq = 1;
+ break;
+ case 'x':
+ exit_after = atoi(optarg);
+ break;
+ case 'R':
+ seed = atoi(optarg);
+ break;
+ case 'a':
+ if (rd_kafka_conf_set(conf, "acks", optarg, errstr,
+ sizeof(errstr)) !=
+ RD_KAFKA_CONF_OK) {
+ fprintf(stderr, "%% %s\n", errstr);
+ exit(1);
+ }
+ break;
+ case 'B':
+ batch_size = atoi(optarg);
+ break;
+ case 'z':
+ if (rd_kafka_conf_set(conf, "compression.codec", optarg,
+ errstr, sizeof(errstr)) !=
+ RD_KAFKA_CONF_OK) {
+ fprintf(stderr, "%% %s\n", errstr);
+ exit(1);
+ }
+ compression = optarg;
+ break;
+ case 'o':
+ if (!strcmp(optarg, "end"))
+ start_offset = RD_KAFKA_OFFSET_END;
+ else if (!strcmp(optarg, "beginning"))
+ start_offset = RD_KAFKA_OFFSET_BEGINNING;
+ else if (!strcmp(optarg, "stored"))
+ start_offset = RD_KAFKA_OFFSET_STORED;
+ else {
+ start_offset = strtoll(optarg, NULL, 10);
+
+ if (start_offset < 0)
+ start_offset =
+ RD_KAFKA_OFFSET_TAIL(-start_offset);
+ }
+
+ break;
+ case 'e':
+ exit_eof = 1;
+ break;
+ case 'd':
+ debug = optarg;
+ break;
+ case 'H':
+ if (!strcmp(optarg, "parse"))
+ read_hdrs = 1;
+ else {
+ char *name, *val;
+ size_t name_sz = -1;
+
+ name = optarg;
+ val = strchr(name, '=');
+ if (val) {
+ name_sz = (size_t)(val - name);
+ val++; /* past the '=' */
+ }
+
+ if (!hdrs)
+ hdrs = rd_kafka_headers_new(8);
+
+ err = rd_kafka_header_add(hdrs, name, name_sz,
+ val, -1);
+ if (err) {
+ fprintf(
+ stderr,
+ "%% Failed to add header %s: %s\n",
+ name, rd_kafka_err2str(err));
+ exit(1);
+ }
+ }
+ break;
+ case 'X': {
+ char *name, *val;
+ rd_kafka_conf_res_t res;
+
+ if (!strcmp(optarg, "list") ||
+ !strcmp(optarg, "help")) {
+ rd_kafka_conf_properties_show(stdout);
+ exit(0);
+ }
+
+ if (!strcmp(optarg, "dump")) {
+ do_conf_dump = 1;
+ continue;
+ }
+
+ name = optarg;
+ if (!(val = strchr(name, '='))) {
+ fprintf(stderr,
+ "%% Expected "
+ "-X property=value, not %s\n",
+ name);
+ exit(1);
+ }
+
+ *val = '\0';
+ val++;
+
+ if (!strcmp(name, "file")) {
+ if (read_conf_file(conf, val) == -1)
+ exit(1);
+ break;
+ }
+
+ res = rd_kafka_conf_set(conf, name, val, errstr,
+ sizeof(errstr));
+
+ if (res != RD_KAFKA_CONF_OK) {
+ fprintf(stderr, "%% %s\n", errstr);
+ exit(1);
+ }
+ } break;
+
+ case 'T':
+ stats_intvlstr = optarg;
+ break;
+ case 'Y':
+ stats_cmd = optarg;
+ break;
+
+ case 'q':
+ verbosity--;
+ break;
+
+ case 'v':
+ verbosity++;
+ break;
+
+ case 'I':
+ idle = 1;
+ break;
+
+ case 'u':
+ otype = _OTYPE_TAB;
+ verbosity--; /* remove some fluff */
+ break;
+
+ case 'r':
+ dtmp = strtod(optarg, &tmp2);
+ if (tmp2 == optarg ||
+ (dtmp >= -0.001 && dtmp <= 0.001)) {
+ fprintf(stderr, "%% Invalid rate: %s\n",
+ optarg);
+ exit(1);
+ }
+
+ rate_sleep = (int)(1000000.0 / dtmp);
+ break;
+
+ case 'l':
+ latency_mode = 1;
+ break;
+
+ case 'A':
+ if (!(latency_fp = fopen(optarg, "w"))) {
+ fprintf(stderr, "%% Cant open %s: %s\n", optarg,
+ strerror(errno));
+ exit(1);
+ }
+ break;
+
+ case 'M':
+ incremental_mode = 1;
+ break;
+
+ case 'N':
+ with_dr = 0;
+ break;
+
+ default:
+ fprintf(stderr, "Unknown option: %c\n", opt);
+ goto usage;
+ }
+ }
+
+ if (topics->cnt == 0 || optind != argc) {
+ if (optind < argc)
+ fprintf(stderr, "Unknown argument: %s\n", argv[optind]);
+ usage:
+ fprintf(
+ stderr,
+ "Usage: %s [-C|-P] -t <topic> "
+ "[-p <partition>] [-b <broker,broker..>] [options..]\n"
+ "\n"
+ "librdkafka version %s (0x%08x)\n"
+ "\n"
+ " Options:\n"
+ " -C | -P | Consumer or Producer mode\n"
+ " -G <groupid> High-level Kafka Consumer mode\n"
+ " -t <topic> Topic to consume / produce\n"
+ " -p <num> Partition (defaults to random). "
+ "Multiple partitions are allowed in -C consumer mode.\n"
+ " -M Print consumer interval stats\n"
+ " -b <brokers> Broker address list (host[:port],..)\n"
+ " -s <size> Message size (producer)\n"
+ " -k <key> Message key (producer)\n"
+ " -H <name[=value]> Add header to message (producer)\n"
+ " -H parse Read message headers (consumer)\n"
+ " -c <cnt> Messages to transmit/receive\n"
+ " -x <cnt> Hard exit after transmitting <cnt> "
+ "messages (producer)\n"
+ " -D Copy/Duplicate data buffer (producer)\n"
+ " -i <ms> Display interval\n"
+ " -m <msg> Message payload pattern\n"
+ " -S <start> Send a sequence number starting at "
+ "<start> as payload\n"
+ " -R <seed> Random seed value (defaults to time)\n"
+ " -a <acks> Required acks (producer): "
+ "-1, 0, 1, >1\n"
+ " -B <size> Consume batch size (# of msgs)\n"
+ " -z <codec> Enable compression:\n"
+ " none|gzip|snappy\n"
+ " -o <offset> Start offset (consumer)\n"
+ " beginning, end, NNNNN or -NNNNN\n"
+ " -d [facs..] Enable debugging contexts:\n"
+ " %s\n"
+ " -X <prop=name> Set arbitrary librdkafka "
+ "configuration property\n"
+ " -X file=<path> Read config from file.\n"
+ " -X list Show full list of supported properties.\n"
+ " -X dump Show configuration\n"
+ " -T <intvl> Enable statistics from librdkafka at "
+ "specified interval (ms)\n"
+ " -Y <command> Pipe statistics to <command>\n"
+ " -I Idle: dont produce any messages\n"
+ " -q Decrease verbosity\n"
+ " -v Increase verbosity (default 1)\n"
+ " -u Output stats in table format\n"
+ " -r <rate> Producer msg/s limit\n"
+ " -l Latency measurement.\n"
+ " Needs two matching instances, one\n"
+ " consumer and one producer, both\n"
+ " running with the -l switch.\n"
+ " -l Producer: per-message latency stats\n"
+ " -A <file> Write per-message latency stats to "
+ "<file>. Requires -l\n"
+ " -O Report produced offset (producer)\n"
+ " -N No delivery reports (producer)\n"
+ "\n"
+ " In Consumer mode:\n"
+ " consumes messages and prints thruput\n"
+ " If -B <..> is supplied the batch consumer\n"
+ " mode is used, else the callback mode is used.\n"
+ "\n"
+ " In Producer mode:\n"
+ " writes messages of size -s <..> and prints thruput\n"
+ "\n",
+ argv[0], rd_kafka_version_str(), rd_kafka_version(),
+ RD_KAFKA_DEBUG_CONTEXTS);
+ exit(1);
+ }
+
+
+ dispintvl *= 1000; /* us */
+
+ if (verbosity > 1)
+ printf("%% Using random seed %i, verbosity level %i\n", seed,
+ verbosity);
+ srand(seed);
+ signal(SIGINT, stop);
+#ifdef SIGUSR1
+ signal(SIGUSR1, sig_usr1);
+#endif
+
+
+ if (debug && rd_kafka_conf_set(conf, "debug", debug, errstr,
+ sizeof(errstr)) != RD_KAFKA_CONF_OK) {
+ printf("%% Debug configuration failed: %s: %s\n", errstr,
+ debug);
+ exit(1);
+ }
+
+ /* Always enable stats (for RTT extraction), and if user supplied
+ * the -T <intvl> option we let her take part of the stats aswell. */
+ rd_kafka_conf_set_stats_cb(conf, stats_cb);
+
+ if (!stats_intvlstr) {
+ /* if no user-desired stats, adjust stats interval
+ * to the display interval. */
+ rd_snprintf(tmp, sizeof(tmp), "%" PRId64, dispintvl / 1000);
+ }
+
+ if (rd_kafka_conf_set(conf, "statistics.interval.ms",
+ stats_intvlstr ? stats_intvlstr : tmp, errstr,
+ sizeof(errstr)) != RD_KAFKA_CONF_OK) {
+ fprintf(stderr, "%% %s\n", errstr);
+ exit(1);
+ }
+
+ if (do_conf_dump) {
+ const char **arr;
+ size_t cnt;
+ int pass;
+
+ for (pass = 0; pass < 2; pass++) {
+ int i;
+
+ if (pass == 0) {
+ arr = rd_kafka_conf_dump(conf, &cnt);
+ printf("# Global config\n");
+ } else {
+ rd_kafka_topic_conf_t *topic_conf =
+ rd_kafka_conf_get_default_topic_conf(conf);
+
+ if (topic_conf) {
+ printf("# Topic config\n");
+ arr = rd_kafka_topic_conf_dump(
+ topic_conf, &cnt);
+ } else {
+ arr = NULL;
+ }
+ }
+
+ if (!arr)
+ continue;
+
+ for (i = 0; i < (int)cnt; i += 2)
+ printf("%s = %s\n", arr[i], arr[i + 1]);
+
+ printf("\n");
+
+ rd_kafka_conf_dump_free(arr, cnt);
+ }
+
+ exit(0);
+ }
+
+ if (latency_mode)
+ do_seq = 0;
+
+ if (stats_intvlstr) {
+ /* User enabled stats (-T) */
+
+#ifndef _WIN32
+ if (stats_cmd) {
+ if (!(stats_fp = popen(stats_cmd,
+#ifdef __linux__
+ "we"
+#else
+ "w"
+#endif
+ ))) {
+ fprintf(stderr,
+ "%% Failed to start stats command: "
+ "%s: %s",
+ stats_cmd, strerror(errno));
+ exit(1);
+ }
+ } else
+#endif
+ stats_fp = stdout;
+ }
+
+ if (msgcnt != -1)
+ forever = 0;
+
+ if (msgsize == -1)
+ msgsize = (int)strlen(msgpattern);
+
+ topic = topics->elems[0].topic;
+
+ if (mode == 'C' || mode == 'G')
+ rd_kafka_conf_set(conf, "enable.partition.eof", "true", NULL,
+ 0);
+
+ if (read_hdrs && mode == 'P') {
+ fprintf(stderr, "%% producer can not read headers\n");
+ exit(1);
+ }
+
+ if (hdrs && mode != 'P') {
+ fprintf(stderr, "%% consumer can not add headers\n");
+ exit(1);
+ }
+
+ /* Set bootstrap servers */
+ if (brokers &&
+ rd_kafka_conf_set(conf, "bootstrap.servers", brokers, errstr,
+ sizeof(errstr)) != RD_KAFKA_CONF_OK) {
+ fprintf(stderr, "%% %s\n", errstr);
+ exit(1);
+ }
+
+ if (mode == 'P') {
+ /*
+ * Producer
+ */
+ char *sbuf;
+ char *pbuf;
+ int outq;
+ int keylen = key ? (int)strlen(key) : 0;
+ off_t rof = 0;
+ size_t plen = strlen(msgpattern);
+ int partition =
+ partitions ? partitions[0] : RD_KAFKA_PARTITION_UA;
+
+ if (latency_mode) {
+ int minlen = (int)(strlen("LATENCY:") +
+ strlen("18446744073709551615 ") + 1);
+ msgsize = RD_MAX(minlen, msgsize);
+ sendflags |= RD_KAFKA_MSG_F_COPY;
+ } else if (do_seq) {
+ int minlen = (int)strlen("18446744073709551615 ") + 1;
+ if (msgsize < minlen)
+ msgsize = minlen;
+
+ /* Force duplication of payload */
+ sendflags |= RD_KAFKA_MSG_F_FREE;
+ }
+
+ sbuf = malloc(msgsize);
+
+ /* Copy payload content to new buffer */
+ while (rof < msgsize) {
+ size_t xlen = RD_MIN((size_t)msgsize - rof, plen);
+ memcpy(sbuf + rof, msgpattern, xlen);
+ rof += (off_t)xlen;
+ }
+
+ if (msgcnt == -1)
+ printf("%% Sending messages of size %i bytes\n",
+ msgsize);
+ else
+ printf("%% Sending %i messages of size %i bytes\n",
+ msgcnt, msgsize);
+
+ if (with_dr)
+ rd_kafka_conf_set_dr_msg_cb(conf, msg_delivered);
+
+ /* Create Kafka handle */
+ if (!(rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr,
+ sizeof(errstr)))) {
+ fprintf(stderr,
+ "%% Failed to create Kafka producer: %s\n",
+ errstr);
+ exit(1);
+ }
+
+ global_rk = rk;
+
+ /* Explicitly create topic to avoid per-msg lookups. */
+ rkt = rd_kafka_topic_new(rk, topic, NULL);
+
+
+ if (rate_sleep && verbosity >= 2)
+ fprintf(stderr,
+ "%% Inter message rate limiter sleep %ius\n",
+ rate_sleep);
+
+ dr_disp_div = msgcnt / 50;
+ if (dr_disp_div == 0)
+ dr_disp_div = 10;
+
+ cnt.t_start = cnt.t_last = rd_clock();
+
+ msgs_wait_produce_cnt = msgcnt;
+
+ while (run && (msgcnt == -1 || (int)cnt.msgs < msgcnt)) {
+ /* Send/Produce message. */
+
+ if (idle) {
+ rd_kafka_poll(rk, 1000);
+ continue;
+ }
+
+ if (latency_mode) {
+ rd_snprintf(sbuf, msgsize - 1,
+ "LATENCY:%" PRIu64, wall_clock());
+ } else if (do_seq) {
+ rd_snprintf(sbuf, msgsize - 1, "%" PRIu64 ": ",
+ seq);
+ seq++;
+ }
+
+ if (sendflags & RD_KAFKA_MSG_F_FREE) {
+ /* Duplicate memory */
+ pbuf = malloc(msgsize);
+ memcpy(pbuf, sbuf, msgsize);
+ } else
+ pbuf = sbuf;
+
+ if (msgsize == 0)
+ pbuf = NULL;
+
+ cnt.tx++;
+ while (run && (err = do_produce(
+ rk, rkt, partition, sendflags, pbuf,
+ msgsize, key, keylen, hdrs))) {
+ if (err == RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION)
+ printf(
+ "%% No such partition: "
+ "%" PRId32 "\n",
+ partition);
+ else if (verbosity >= 3 ||
+ (err !=
+ RD_KAFKA_RESP_ERR__QUEUE_FULL &&
+ verbosity >= 1))
+ printf(
+ "%% produce error: %s%s\n",
+ rd_kafka_err2str(err),
+ err == RD_KAFKA_RESP_ERR__QUEUE_FULL
+ ? " (backpressure)"
+ : "");
+
+ cnt.tx_err++;
+ if (err != RD_KAFKA_RESP_ERR__QUEUE_FULL) {
+ run = 0;
+ break;
+ }
+ now = rd_clock();
+ if (verbosity >= 2 &&
+ cnt.t_enobufs_last + dispintvl <= now) {
+ printf(
+ "%% Backpressure %i "
+ "(tx %" PRIu64
+ ", "
+ "txerr %" PRIu64 ")\n",
+ rd_kafka_outq_len(rk), cnt.tx,
+ cnt.tx_err);
+ cnt.t_enobufs_last = now;
+ }
+
+ /* Poll to handle delivery reports */
+ rd_kafka_poll(rk, 10);
+
+ print_stats(rk, mode, otype, compression);
+ }
+
+ msgs_wait_cnt++;
+ if (msgs_wait_produce_cnt != -1)
+ msgs_wait_produce_cnt--;
+ cnt.msgs++;
+ cnt.bytes += msgsize;
+
+ /* Must poll to handle delivery reports */
+ if (rate_sleep) {
+ rd_ts_t next = rd_clock() + (rd_ts_t)rate_sleep;
+ do {
+ rd_kafka_poll(
+ rk,
+ (int)RD_MAX(0, (next - rd_clock()) /
+ 1000));
+ } while (next > rd_clock());
+ } else if (cnt.msgs % 1000 == 0) {
+ rd_kafka_poll(rk, 0);
+ }
+
+ print_stats(rk, mode, otype, compression);
+ }
+
+ forever = 0;
+ if (verbosity >= 2)
+ printf(
+ "%% All messages produced, "
+ "now waiting for %li deliveries\n",
+ msgs_wait_cnt);
+
+ /* Wait for messages to be delivered */
+ while (run && rd_kafka_poll(rk, 1000) != -1)
+ print_stats(rk, mode, otype, compression);
+
+
+ outq = rd_kafka_outq_len(rk);
+ if (verbosity >= 2)
+ printf("%% %i messages in outq\n", outq);
+ cnt.msgs -= outq;
+ cnt.t_end = t_end;
+
+ if (cnt.tx_err > 0)
+ printf("%% %" PRIu64 " backpressures for %" PRIu64
+ " produce calls: %.3f%% backpressure rate\n",
+ cnt.tx_err, cnt.tx,
+ ((double)cnt.tx_err / (double)cnt.tx) * 100.0);
+
+ /* Destroy topic */
+ rd_kafka_topic_destroy(rkt);
+
+ /* Destroy the handle */
+ rd_kafka_destroy(rk);
+ global_rk = rk = NULL;
+
+ free(sbuf);
+
+ exitcode = cnt.msgs == cnt.msgs_dr_ok ? 0 : 1;
+
+ } else if (mode == 'C') {
+ /*
+ * Consumer
+ */
+
+ rd_kafka_message_t **rkmessages = NULL;
+ size_t i = 0;
+
+ /* Create Kafka handle */
+ if (!(rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr,
+ sizeof(errstr)))) {
+ fprintf(stderr,
+ "%% Failed to create Kafka consumer: %s\n",
+ errstr);
+ exit(1);
+ }
+
+ global_rk = rk;
+
+ /* Create topic to consume from */
+ rkt = rd_kafka_topic_new(rk, topic, NULL);
+
+ /* Batch consumer */
+ if (batch_size)
+ rkmessages = malloc(sizeof(*rkmessages) * batch_size);
+
+ /* Start consuming */
+ rkqu = rd_kafka_queue_new(rk);
+ for (i = 0; i < (size_t)partition_cnt; ++i) {
+ const int r = rd_kafka_consume_start_queue(
+ rkt, partitions[i], start_offset, rkqu);
+
+ if (r == -1) {
+ fprintf(
+ stderr, "%% Error creating queue: %s\n",
+ rd_kafka_err2str(rd_kafka_last_error()));
+ exit(1);
+ }
+ }
+
+ while (run && (msgcnt == -1 || msgcnt > (int)cnt.msgs)) {
+ /* Consume messages.
+ * A message may either be a real message, or
+ * an error signaling (if rkmessage->err is set).
+ */
+ uint64_t fetch_latency;
+ ssize_t r;
+
+ fetch_latency = rd_clock();
+
+ if (batch_size) {
+ int partition = partitions
+ ? partitions[0]
+ : RD_KAFKA_PARTITION_UA;
+
+ /* Batch fetch mode */
+ r = rd_kafka_consume_batch(rkt, partition, 1000,
+ rkmessages,
+ batch_size);
+ if (r != -1) {
+ for (i = 0; (ssize_t)i < r; i++) {
+ msg_consume(rkmessages[i],
+ NULL);
+ rd_kafka_message_destroy(
+ rkmessages[i]);
+ }
+ }
+ } else {
+ /* Queue mode */
+ r = rd_kafka_consume_callback_queue(
+ rkqu, 1000, msg_consume, NULL);
+ }
+
+ cnt.t_fetch_latency += rd_clock() - fetch_latency;
+ if (r == -1)
+ fprintf(
+ stderr, "%% Error: %s\n",
+ rd_kafka_err2str(rd_kafka_last_error()));
+ else if (r > 0 && rate_sleep) {
+ /* Simulate processing time
+ * if `-r <rate>` was set. */
+ do_sleep(rate_sleep);
+ }
+
+
+ print_stats(rk, mode, otype, compression);
+
+ /* Poll to handle stats callbacks */
+ rd_kafka_poll(rk, 0);
+ }
+ cnt.t_end = rd_clock();
+
+ /* Stop consuming */
+ for (i = 0; i < (size_t)partition_cnt; ++i) {
+ int r = rd_kafka_consume_stop(rkt, (int32_t)i);
+ if (r == -1) {
+ fprintf(
+ stderr, "%% Error in consume_stop: %s\n",
+ rd_kafka_err2str(rd_kafka_last_error()));
+ }
+ }
+ rd_kafka_queue_destroy(rkqu);
+
+ /* Destroy topic */
+ rd_kafka_topic_destroy(rkt);
+
+ if (batch_size)
+ free(rkmessages);
+
+ /* Destroy the handle */
+ rd_kafka_destroy(rk);
+
+ global_rk = rk = NULL;
+
+ } else if (mode == 'G') {
+ /*
+ * High-level balanced Consumer
+ */
+ rd_kafka_message_t **rkmessages = NULL;
+
+ rd_kafka_conf_set_rebalance_cb(conf, rebalance_cb);
+
+ /* Create Kafka handle */
+ if (!(rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr,
+ sizeof(errstr)))) {
+ fprintf(stderr,
+ "%% Failed to create Kafka consumer: %s\n",
+ errstr);
+ exit(1);
+ }
+
+ /* Forward all events to consumer queue */
+ rd_kafka_poll_set_consumer(rk);
+
+ global_rk = rk;
+
+ err = rd_kafka_subscribe(rk, topics);
+ if (err) {
+ fprintf(stderr, "%% Subscribe failed: %s\n",
+ rd_kafka_err2str(err));
+ exit(1);
+ }
+ fprintf(stderr, "%% Waiting for group rebalance..\n");
+
+ if (batch_size) {
+ rkmessages = malloc(sizeof(*rkmessages) * batch_size);
+ } else {
+ rkmessages = malloc(sizeof(*rkmessages));
+ }
+
+ rkqu = rd_kafka_queue_get_consumer(rk);
+
+ while (run && (msgcnt == -1 || msgcnt > (int)cnt.msgs)) {
+ /* Consume messages.
+ * A message may either be a real message, or
+ * an event (if rkmessage->err is set).
+ */
+ uint64_t fetch_latency;
+ ssize_t r;
+
+ fetch_latency = rd_clock();
+
+ if (batch_size) {
+ /* Batch fetch mode */
+ ssize_t i = 0;
+ r = rd_kafka_consume_batch_queue(
+ rkqu, 1000, rkmessages, batch_size);
+ if (r != -1) {
+ for (i = 0; i < r; i++) {
+ msg_consume(rkmessages[i],
+ NULL);
+ rd_kafka_message_destroy(
+ rkmessages[i]);
+ }
+ }
+
+ if (r == -1)
+ fprintf(stderr, "%% Error: %s\n",
+ rd_kafka_err2str(
+ rd_kafka_last_error()));
+ else if (r > 0 && rate_sleep) {
+ /* Simulate processing time
+ * if `-r <rate>` was set. */
+ do_sleep(rate_sleep);
+ }
+
+ } else {
+ rkmessages[0] =
+ rd_kafka_consumer_poll(rk, 1000);
+ if (rkmessages[0]) {
+ msg_consume(rkmessages[0], NULL);
+ rd_kafka_message_destroy(rkmessages[0]);
+
+ /* Simulate processing time
+ * if `-r <rate>` was set. */
+ if (rate_sleep)
+ do_sleep(rate_sleep);
+ }
+ }
+
+ cnt.t_fetch_latency += rd_clock() - fetch_latency;
+
+ print_stats(rk, mode, otype, compression);
+ }
+ cnt.t_end = rd_clock();
+
+ err = rd_kafka_consumer_close(rk);
+ if (err)
+ fprintf(stderr, "%% Failed to close consumer: %s\n",
+ rd_kafka_err2str(err));
+
+ free(rkmessages);
+ rd_kafka_queue_destroy(rkqu);
+ rd_kafka_destroy(rk);
+ }
+
+ if (hdrs)
+ rd_kafka_headers_destroy(hdrs);
+
+ print_stats(NULL, mode, otype | _OTYPE_FORCE, compression);
+
+ if (cnt.t_fetch_latency && cnt.msgs)
+ printf("%% Average application fetch latency: %" PRIu64 "us\n",
+ cnt.t_fetch_latency / cnt.msgs);
+
+ if (latency_fp)
+ fclose(latency_fp);
+
+ if (stats_fp) {
+#ifndef _WIN32
+ pclose(stats_fp);
+#endif
+ stats_fp = NULL;
+ }
+
+ if (partitions)
+ free(partitions);
+
+ rd_kafka_topic_partition_list_destroy(topics);
+
+ /* Let background threads clean up and terminate cleanly. */
+ rd_kafka_wait_destroyed(2000);
+
+ return exitcode;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/examples/transactions-older-broker.c b/fluent-bit/lib/librdkafka-2.1.0/examples/transactions-older-broker.c
new file mode 100644
index 00000000..e9f8d06f
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/examples/transactions-older-broker.c
@@ -0,0 +1,668 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2020, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * @name Transactions example for Apache Kafka <= 2.4.0 (no KIP-447 support).
+ *
+ * This example show-cases a simple transactional consume-process-produce
+ * application that reads messages from an input topic, extracts all
+ * numbers from the message's value string, adds them up, and sends
+ * the sum to the output topic as part of a transaction.
+ * The transaction is committed every 5 seconds or 100 messages, whichever
+ * comes first. As the transaction is committed a new transaction is started.
+ *
+ * @remark This example does not yet support incremental rebalancing and thus
+ * not the cooperative-sticky partition.assignment.strategy.
+ */
+
+#include <stdio.h>
+#include <signal.h>
+#include <unistd.h>
+#include <string.h>
+#include <stdlib.h>
+#include <time.h>
+#include <ctype.h>
+
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is builtin from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h"
+
+
+static volatile sig_atomic_t run = 1;
+
+static rd_kafka_t *consumer;
+
+/* From command-line arguments */
+static const char *brokers, *input_topic, *output_topic;
+
+
+/**
+ * @struct This is the per input partition state, constisting of
+ * a transactional producer and the in-memory state for the current transaction.
+ * This demo simply finds all numbers (ascii string numbers) in the message
+ * payload and adds them.
+ */
+struct state {
+ rd_kafka_t *producer; /**< Per-input partition output producer */
+ rd_kafka_topic_partition_t *rktpar; /**< Back-pointer to the
+ * input partition. */
+ time_t last_commit; /**< Last transaction commit */
+ int msgcnt; /**< Number of messages processed in current txn */
+};
+/* Current assignment for the input consumer.
+ * The .opaque field of each partition points to an allocated 'struct state'.
+ */
+static rd_kafka_topic_partition_list_t *assigned_partitions;
+
+
+
+/**
+ * @brief A fatal error has occurred, immediately exit the application.
+ */
+#define fatal(...) \
+ do { \
+ fprintf(stderr, "FATAL ERROR: "); \
+ fprintf(stderr, __VA_ARGS__); \
+ fprintf(stderr, "\n"); \
+ exit(1); \
+ } while (0)
+
+/**
+ * @brief Same as fatal() but takes an rd_kafka_error_t object, prints its
+ * error message, destroys the object and then exits fatally.
+ */
+#define fatal_error(what, error) \
+ do { \
+ fprintf(stderr, "FATAL ERROR: %s: %s: %s\n", what, \
+ rd_kafka_error_name(error), \
+ rd_kafka_error_string(error)); \
+ rd_kafka_error_destroy(error); \
+ exit(1); \
+ } while (0)
+
+/**
+ * @brief Signal termination of program
+ */
+static void stop(int sig) {
+ run = 0;
+}
+
+
+/**
+ * @brief Message delivery report callback.
+ *
+ * This callback is called exactly once per message, indicating if
+ * the message was succesfully delivered
+ * (rkmessage->err == RD_KAFKA_RESP_ERR_NO_ERROR) or permanently
+ * failed delivery (rkmessage->err != RD_KAFKA_RESP_ERR_NO_ERROR).
+ *
+ * The callback is triggered from rd_kafka_poll(), rd_kafka_flush(),
+ * rd_kafka_abort_transaction() and rd_kafka_commit_transaction() and
+ * executes on the application's thread.
+ *
+ * The current transactional will enter the abortable state if any
+ * message permanently fails delivery and the application must then
+ * call rd_kafka_abort_transaction(). But it does not need to be done from
+ * here, this state is checked by all the transactional APIs and it is better
+ * to perform this error checking when calling
+ * rd_kafka_send_offsets_to_transaction() and rd_kafka_commit_transaction().
+ * In the case of transactional producing the delivery report callback is
+ * mostly useful for logging the produce failures.
+ */
+static void
+dr_msg_cb(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque) {
+ if (rkmessage->err)
+ fprintf(stderr, "%% Message delivery failed: %s\n",
+ rd_kafka_err2str(rkmessage->err));
+
+ /* The rkmessage is destroyed automatically by librdkafka */
+}
+
+
+
+/**
+ * @brief Create a transactional producer for the given input pratition
+ * and begin a new transaction.
+ */
+static rd_kafka_t *
+create_transactional_producer(const rd_kafka_topic_partition_t *rktpar) {
+ rd_kafka_conf_t *conf = rd_kafka_conf_new();
+ rd_kafka_t *rk;
+ char errstr[256];
+ rd_kafka_error_t *error;
+ char transactional_id[256];
+
+ snprintf(transactional_id, sizeof(transactional_id),
+ "librdkafka_transactions_older_example_%s-%d", rktpar->topic,
+ rktpar->partition);
+
+ if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers, errstr,
+ sizeof(errstr)) != RD_KAFKA_CONF_OK ||
+ rd_kafka_conf_set(conf, "transactional.id", transactional_id,
+ errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK ||
+ rd_kafka_conf_set(conf, "transaction.timeout.ms", "60000", errstr,
+ sizeof(errstr)) != RD_KAFKA_CONF_OK)
+ fatal("Failed to configure producer: %s", errstr);
+
+ /* This callback will be called once per message to indicate
+ * final delivery status. */
+ rd_kafka_conf_set_dr_msg_cb(conf, dr_msg_cb);
+
+ /* Create producer */
+ rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr));
+ if (!rk) {
+ rd_kafka_conf_destroy(conf);
+ fatal("Failed to create producer: %s", errstr);
+ }
+
+ /* Initialize transactions, this is only performed once
+ * per transactional producer to acquire its producer id, et.al. */
+ error = rd_kafka_init_transactions(rk, -1);
+ if (error)
+ fatal_error("init_transactions()", error);
+
+
+ /* Begin a new transaction */
+ error = rd_kafka_begin_transaction(rk);
+ if (error)
+ fatal_error("begin_transaction()", error);
+
+ return rk;
+}
+
+
+/**
+ * @brief Abort the current transaction and destroy the producer.
+ */
+static void destroy_transactional_producer(rd_kafka_t *rk) {
+ rd_kafka_error_t *error;
+
+ fprintf(stdout, "%s: aborting transaction and terminating producer\n",
+ rd_kafka_name(rk));
+
+ /* Abort the current transaction, ignore any errors
+ * since we're terminating the producer anyway. */
+ error = rd_kafka_abort_transaction(rk, -1);
+ if (error) {
+ fprintf(stderr,
+ "WARNING: Ignoring abort_transaction() error since "
+ "producer is being destroyed: %s\n",
+ rd_kafka_error_string(error));
+ rd_kafka_error_destroy(error);
+ }
+
+ rd_kafka_destroy(rk);
+}
+
+
+
+/**
+ * @brief Abort the current transaction and rewind consumer offsets to
+ * position where the transaction last started, i.e., the committed
+ * consumer offset.
+ */
+static void abort_transaction_and_rewind(struct state *state) {
+ rd_kafka_topic_t *rkt =
+ rd_kafka_topic_new(consumer, state->rktpar->topic, NULL);
+ rd_kafka_topic_partition_list_t *offset;
+ rd_kafka_resp_err_t err;
+ rd_kafka_error_t *error;
+
+ fprintf(stdout,
+ "Aborting transaction and rewinding offset for %s [%d]\n",
+ state->rktpar->topic, state->rktpar->partition);
+
+ /* Abort the current transaction */
+ error = rd_kafka_abort_transaction(state->producer, -1);
+ if (error)
+ fatal_error("Failed to abort transaction", error);
+
+ /* Begin a new transaction */
+ error = rd_kafka_begin_transaction(state->producer);
+ if (error)
+ fatal_error("Failed to begin transaction", error);
+
+ /* Get committed offset for this partition */
+ offset = rd_kafka_topic_partition_list_new(1);
+ rd_kafka_topic_partition_list_add(offset, state->rktpar->topic,
+ state->rktpar->partition);
+
+ /* Note: Timeout must be lower than max.poll.interval.ms */
+ err = rd_kafka_committed(consumer, offset, 10 * 1000);
+ if (err)
+ fatal("Failed to acquire committed offset for %s [%d]: %s",
+ state->rktpar->topic, (int)state->rktpar->partition,
+ rd_kafka_err2str(err));
+
+ /* Seek to committed offset, or start of partition if no
+ * no committed offset is available. */
+ err = rd_kafka_seek(rkt, state->rktpar->partition,
+ offset->elems[0].offset < 0
+ ?
+ /* No committed offset, start from beginning */
+ RD_KAFKA_OFFSET_BEGINNING
+ :
+ /* Use committed offset */
+ offset->elems[0].offset,
+ 0);
+
+ if (err)
+ fatal("Failed to seek %s [%d]: %s", state->rktpar->topic,
+ (int)state->rktpar->partition, rd_kafka_err2str(err));
+
+ rd_kafka_topic_destroy(rkt);
+}
+
+
+/**
+ * @brief Commit the current transaction and start a new transaction.
+ */
+static void commit_transaction_and_start_new(struct state *state) {
+ rd_kafka_error_t *error;
+ rd_kafka_resp_err_t err;
+ rd_kafka_consumer_group_metadata_t *cgmd;
+ rd_kafka_topic_partition_list_t *offset;
+
+ fprintf(stdout, "Committing transaction for %s [%d]\n",
+ state->rktpar->topic, state->rktpar->partition);
+
+ /* Send the input consumer's offset to transaction
+ * to commit those offsets along with the transaction itself,
+ * this is what guarantees exactly-once-semantics (EOS), that
+ * input (offsets) and output (messages) are committed atomically. */
+
+ /* Get the consumer's current group state */
+ cgmd = rd_kafka_consumer_group_metadata(consumer);
+
+ /* Get consumer's current position for this partition */
+ offset = rd_kafka_topic_partition_list_new(1);
+ rd_kafka_topic_partition_list_add(offset, state->rktpar->topic,
+ state->rktpar->partition);
+ err = rd_kafka_position(consumer, offset);
+ if (err)
+ fatal("Failed to get consumer position for %s [%d]: %s",
+ state->rktpar->topic, state->rktpar->partition,
+ rd_kafka_err2str(err));
+
+ /* Send offsets to transaction coordinator */
+ error = rd_kafka_send_offsets_to_transaction(state->producer, offset,
+ cgmd, -1);
+ rd_kafka_consumer_group_metadata_destroy(cgmd);
+ rd_kafka_topic_partition_list_destroy(offset);
+ if (error) {
+ if (rd_kafka_error_txn_requires_abort(error)) {
+ fprintf(stderr,
+ "WARNING: Failed to send offsets to "
+ "transaction: %s: %s: aborting transaction\n",
+ rd_kafka_error_name(error),
+ rd_kafka_error_string(error));
+ rd_kafka_error_destroy(error);
+ abort_transaction_and_rewind(state);
+ return;
+ } else {
+ fatal_error("Failed to send offsets to transaction",
+ error);
+ }
+ }
+
+ /* Commit the transaction */
+ error = rd_kafka_commit_transaction(state->producer, -1);
+ if (error) {
+ if (rd_kafka_error_txn_requires_abort(error)) {
+ fprintf(stderr,
+ "WARNING: Failed to commit transaction: "
+ "%s: %s: aborting transaction\n",
+ rd_kafka_error_name(error),
+ rd_kafka_error_string(error));
+ abort_transaction_and_rewind(state);
+ rd_kafka_error_destroy(error);
+ return;
+ } else {
+ fatal_error("Failed to commit transaction", error);
+ }
+ }
+
+ /* Begin new transaction */
+ error = rd_kafka_begin_transaction(state->producer);
+ if (error)
+ fatal_error("Failed to begin new transaction", error);
+}
+
+/**
+ * @brief The rebalance will be triggered (from rd_kafka_consumer_poll())
+ * when the consumer's partition assignment is assigned or revoked.
+ *
+ * Prior to KIP-447 being supported there must be one transactional output
+ * producer for each consumed input partition, so we create and destroy
+ * these producer's from this callback.
+ */
+static void
+consumer_group_rebalance_cb(rd_kafka_t *rk,
+ rd_kafka_resp_err_t err,
+ rd_kafka_topic_partition_list_t *partitions,
+ void *opaque) {
+ int i;
+
+ if (!strcmp(rd_kafka_rebalance_protocol(rk), "COOPERATIVE"))
+ fatal(
+ "This example has not yet been modified to work with "
+ "cooperative incremental rebalancing "
+ "(partition.assignment.strategy=cooperative-sticky)");
+
+ switch (err) {
+ case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS:
+ assigned_partitions =
+ rd_kafka_topic_partition_list_copy(partitions);
+
+ fprintf(stdout, "Consumer group rebalanced, new assignment:\n");
+
+ /* Create a transactional producer for each input partition */
+ for (i = 0; i < assigned_partitions->cnt; i++) {
+ /* Store the partition-to-producer mapping
+ * in the partition's opaque field. */
+ rd_kafka_topic_partition_t *rktpar =
+ &assigned_partitions->elems[i];
+ struct state *state = calloc(1, sizeof(*state));
+
+ state->producer = create_transactional_producer(rktpar);
+ state->rktpar = rktpar;
+ rktpar->opaque = state;
+ state->last_commit = time(NULL);
+
+ fprintf(stdout,
+ " %s [%d] with transactional producer %s\n",
+ rktpar->topic, rktpar->partition,
+ rd_kafka_name(state->producer));
+ }
+
+ /* Let the consumer know the rebalance has been handled
+ * by calling assign.
+ * This will also tell the consumer to start fetching messages
+ * for the assigned partitions. */
+ rd_kafka_assign(rk, partitions);
+ break;
+
+ case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS:
+ fprintf(stdout,
+ "Consumer group rebalanced, assignment revoked\n");
+
+ /* Abort the current transactions and destroy all producers */
+ for (i = 0; i < assigned_partitions->cnt; i++) {
+ /* Store the partition-to-producer mapping
+ * in the partition's opaque field. */
+ struct state *state =
+ (struct state *)assigned_partitions->elems[i]
+ .opaque;
+
+ destroy_transactional_producer(state->producer);
+ free(state);
+ }
+
+ rd_kafka_topic_partition_list_destroy(assigned_partitions);
+ assigned_partitions = NULL;
+
+ /* Let the consumer know the rebalance has been handled
+ * and revoke the current assignment. */
+ rd_kafka_assign(rk, NULL);
+ break;
+
+ default:
+ /* NOTREACHED */
+ fatal("Unexpected rebalance event: %s", rd_kafka_err2name(err));
+ }
+}
+
+
+/**
+ * @brief Create the input consumer.
+ */
+static rd_kafka_t *create_input_consumer(const char *brokers,
+ const char *input_topic) {
+ rd_kafka_conf_t *conf = rd_kafka_conf_new();
+ rd_kafka_t *rk;
+ char errstr[256];
+ rd_kafka_resp_err_t err;
+ rd_kafka_topic_partition_list_t *topics;
+
+ if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers, errstr,
+ sizeof(errstr)) != RD_KAFKA_CONF_OK ||
+ rd_kafka_conf_set(conf, "group.id",
+ "librdkafka_transactions_older_example_group",
+ errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK ||
+ /* The input consumer's offsets are explicitly committed with the
+ * output producer's transaction using
+ * rd_kafka_send_offsets_to_transaction(), so auto commits
+ * must be disabled. */
+ rd_kafka_conf_set(conf, "enable.auto.commit", "false", errstr,
+ sizeof(errstr)) != RD_KAFKA_CONF_OK) {
+ fatal("Failed to configure consumer: %s", errstr);
+ }
+
+ /* This callback will be called when the consumer group is rebalanced
+ * and the consumer's partition assignment is assigned or revoked. */
+ rd_kafka_conf_set_rebalance_cb(conf, consumer_group_rebalance_cb);
+
+ /* Create consumer */
+ rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, sizeof(errstr));
+ if (!rk) {
+ rd_kafka_conf_destroy(conf);
+ fatal("Failed to create consumer: %s", errstr);
+ }
+
+ /* Forward all partition messages to the main queue and
+ * rd_kafka_consumer_poll(). */
+ rd_kafka_poll_set_consumer(rk);
+
+ /* Subscribe to the input topic */
+ topics = rd_kafka_topic_partition_list_new(1);
+ rd_kafka_topic_partition_list_add(topics, input_topic,
+ /* The partition is ignored in
+ * rd_kafka_subscribe() */
+ RD_KAFKA_PARTITION_UA);
+ err = rd_kafka_subscribe(rk, topics);
+ rd_kafka_topic_partition_list_destroy(topics);
+ if (err) {
+ rd_kafka_destroy(rk);
+ fatal("Failed to subscribe to %s: %s\n", input_topic,
+ rd_kafka_err2str(err));
+ }
+
+ return rk;
+}
+
+
+/**
+ * @brief Find and parse next integer string in \p start.
+ * @returns Pointer after found integer string, or NULL if not found.
+ */
+static const void *
+find_next_int(const void *start, const void *end, int *intp) {
+ const char *p;
+ int collecting = 0;
+ int num = 0;
+
+ for (p = (const char *)start; p < (const char *)end; p++) {
+ if (isdigit((int)(*p))) {
+ collecting = 1;
+ num = (num * 10) + ((int)*p - ((int)'0'));
+ } else if (collecting)
+ break;
+ }
+
+ if (!collecting)
+ return NULL; /* No integer string found */
+
+ *intp = num;
+
+ return p;
+}
+
+
+/**
+ * @brief Process a message from the input consumer by parsing all
+ * integer strings, adding them, and then producing the sum
+ * the output topic using the transactional producer for the given
+ * inut partition.
+ */
+static void process_message(struct state *state,
+ const rd_kafka_message_t *rkmessage) {
+ int num;
+ long unsigned sum = 0;
+ const void *p, *end;
+ rd_kafka_resp_err_t err;
+ char value[64];
+
+ if (rkmessage->len == 0)
+ return; /* Ignore empty messages */
+
+ p = rkmessage->payload;
+ end = ((const char *)rkmessage->payload) + rkmessage->len;
+
+ /* Find and sum all numbers in the message */
+ while ((p = find_next_int(p, end, &num)))
+ sum += num;
+
+ if (sum == 0)
+ return; /* No integers in message, ignore it. */
+
+ snprintf(value, sizeof(value), "%lu", sum);
+
+ /* Emit output message on transactional producer */
+ while (1) {
+ err = rd_kafka_producev(
+ state->producer, RD_KAFKA_V_TOPIC(output_topic),
+ /* Use same key as input message */
+ RD_KAFKA_V_KEY(rkmessage->key, rkmessage->key_len),
+ /* Value is the current sum of this
+ * transaction. */
+ RD_KAFKA_V_VALUE(value, strlen(value)),
+ /* Copy value since it is allocated on the stack */
+ RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), RD_KAFKA_V_END);
+
+ if (!err)
+ break;
+ else if (err == RD_KAFKA_RESP_ERR__QUEUE_FULL) {
+ /* If output queue fills up we need to wait for
+ * some delivery reports and then retry. */
+ rd_kafka_poll(state->producer, 100);
+ continue;
+ } else {
+ fprintf(stderr,
+ "WARNING: Failed to produce message to %s: "
+ "%s: aborting transaction\n",
+ output_topic, rd_kafka_err2str(err));
+ abort_transaction_and_rewind(state);
+ return;
+ }
+ }
+}
+
+
+int main(int argc, char **argv) {
+ /*
+ * Argument validation
+ */
+ if (argc != 4) {
+ fprintf(stderr,
+ "%% Usage: %s <broker> <input-topic> <output-topic>\n",
+ argv[0]);
+ return 1;
+ }
+
+ brokers = argv[1];
+ input_topic = argv[2];
+ output_topic = argv[3];
+
+ /* Signal handler for clean shutdown */
+ signal(SIGINT, stop);
+
+ consumer = create_input_consumer(brokers, input_topic);
+
+ fprintf(stdout,
+ "Expecting integers to sum on input topic %s ...\n"
+ "To generate input messages you can use:\n"
+ " $ seq 1 100 | examples/producer %s %s\n",
+ input_topic, brokers, input_topic);
+
+ while (run) {
+ rd_kafka_message_t *msg;
+ struct state *state;
+ rd_kafka_topic_partition_t *rktpar;
+
+ /* Wait for new mesages or error events */
+ msg = rd_kafka_consumer_poll(consumer, 1000 /*1 second*/);
+ if (!msg)
+ continue;
+
+ if (msg->err) {
+ /* Client errors are typically just informational
+ * since the client will automatically try to recover
+ * from all types of errors.
+ * It is thus sufficient for the application to log and
+ * continue operating when an error is received. */
+ fprintf(stderr, "WARNING: Consumer error: %s\n",
+ rd_kafka_message_errstr(msg));
+ rd_kafka_message_destroy(msg);
+ continue;
+ }
+
+ /* Find output producer for this input partition */
+ rktpar = rd_kafka_topic_partition_list_find(
+ assigned_partitions, rd_kafka_topic_name(msg->rkt),
+ msg->partition);
+ if (!rktpar)
+ fatal(
+ "BUG: No output producer for assigned "
+ "partition %s [%d]",
+ rd_kafka_topic_name(msg->rkt), (int)msg->partition);
+
+ /* Get state struct for this partition */
+ state = (struct state *)rktpar->opaque;
+
+ /* Process message */
+ process_message(state, msg);
+
+ rd_kafka_message_destroy(msg);
+
+ /* Commit transaction every 100 messages or 5 seconds */
+ if (++state->msgcnt > 100 ||
+ state->last_commit + 5 <= time(NULL)) {
+ commit_transaction_and_start_new(state);
+ state->msgcnt = 0;
+ state->last_commit = time(NULL);
+ }
+ }
+
+ fprintf(stdout, "Closing consumer\n");
+ rd_kafka_consumer_close(consumer);
+
+ rd_kafka_destroy(consumer);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/examples/transactions.c b/fluent-bit/lib/librdkafka-2.1.0/examples/transactions.c
new file mode 100644
index 00000000..0a8b9a8c
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/examples/transactions.c
@@ -0,0 +1,665 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2020, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * @name Transactions example for Apache Kafka 2.5.0 (KIP-447) and later.
+ *
+ * This example show-cases a simple transactional consume-process-produce
+ * application that reads messages from an input topic, extracts all
+ * numbers from the message's value string, adds them up, and sends
+ * the sum to the output topic as part of a transaction.
+ * The transaction is committed every 5 seconds or 100 messages, whichever
+ * comes first. As the transaction is committed a new transaction is started.
+ *
+ * This example makes use of incremental rebalancing (KIP-429) and the
+ * cooperative-sticky partition.assignment.strategy on the consumer, providing
+ * hitless rebalances.
+ */
+
+#include <stdio.h>
+#include <signal.h>
+#include <unistd.h>
+#include <string.h>
+#include <stdlib.h>
+#include <time.h>
+#include <ctype.h>
+
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is builtin from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h"
+
+
+static volatile sig_atomic_t run = 1;
+
+/**
+ * @brief A fatal error has occurred, immediately exit the application.
+ */
+#define fatal(...) \
+ do { \
+ fprintf(stderr, "FATAL ERROR: "); \
+ fprintf(stderr, __VA_ARGS__); \
+ fprintf(stderr, "\n"); \
+ exit(1); \
+ } while (0)
+
+/**
+ * @brief Same as fatal() but takes an rd_kafka_error_t object, prints its
+ * error message, destroys the object and then exits fatally.
+ */
+#define fatal_error(what, error) \
+ do { \
+ fprintf(stderr, "FATAL ERROR: %s: %s: %s\n", what, \
+ rd_kafka_error_name(error), \
+ rd_kafka_error_string(error)); \
+ rd_kafka_error_destroy(error); \
+ exit(1); \
+ } while (0)
+
+/**
+ * @brief Signal termination of program
+ */
+static void stop(int sig) {
+ run = 0;
+}
+
+
+/**
+ * @brief Message delivery report callback.
+ *
+ * This callback is called exactly once per message, indicating if
+ * the message was succesfully delivered
+ * (rkmessage->err == RD_KAFKA_RESP_ERR_NO_ERROR) or permanently
+ * failed delivery (rkmessage->err != RD_KAFKA_RESP_ERR_NO_ERROR).
+ *
+ * The callback is triggered from rd_kafka_poll(), rd_kafka_flush(),
+ * rd_kafka_abort_transaction() and rd_kafka_commit_transaction() and
+ * executes on the application's thread.
+ *
+ * The current transactional will enter the abortable state if any
+ * message permanently fails delivery and the application must then
+ * call rd_kafka_abort_transaction(). But it does not need to be done from
+ * here, this state is checked by all the transactional APIs and it is better
+ * to perform this error checking when calling
+ * rd_kafka_send_offsets_to_transaction() and rd_kafka_commit_transaction().
+ * In the case of transactional producing the delivery report callback is
+ * mostly useful for logging the produce failures.
+ */
+static void
+dr_msg_cb(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque) {
+ if (rkmessage->err)
+ fprintf(stderr, "%% Message delivery failed: %s\n",
+ rd_kafka_err2str(rkmessage->err));
+
+ /* The rkmessage is destroyed automatically by librdkafka */
+}
+
+
+
+/**
+ * @brief Create a transactional producer.
+ */
+static rd_kafka_t *create_transactional_producer(const char *brokers,
+ const char *output_topic) {
+ rd_kafka_conf_t *conf = rd_kafka_conf_new();
+ rd_kafka_t *rk;
+ char errstr[256];
+ rd_kafka_error_t *error;
+
+ if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers, errstr,
+ sizeof(errstr)) != RD_KAFKA_CONF_OK ||
+ rd_kafka_conf_set(conf, "transactional.id",
+ "librdkafka_transactions_example", errstr,
+ sizeof(errstr)) != RD_KAFKA_CONF_OK)
+ fatal("Failed to configure producer: %s", errstr);
+
+ /* This callback will be called once per message to indicate
+ * final delivery status. */
+ rd_kafka_conf_set_dr_msg_cb(conf, dr_msg_cb);
+
+ /* Create producer */
+ rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr));
+ if (!rk) {
+ rd_kafka_conf_destroy(conf);
+ fatal("Failed to create producer: %s", errstr);
+ }
+
+ /* Initialize transactions, this is only performed once
+ * per transactional producer to acquire its producer id, et.al. */
+ error = rd_kafka_init_transactions(rk, -1);
+ if (error)
+ fatal_error("init_transactions()", error);
+
+ return rk;
+}
+
+
+/**
+ * @brief Rewind consumer's consume position to the last committed offsets
+ * for the current assignment.
+ */
+static void rewind_consumer(rd_kafka_t *consumer) {
+ rd_kafka_topic_partition_list_t *offsets;
+ rd_kafka_resp_err_t err;
+ rd_kafka_error_t *error;
+ int i;
+
+ /* Get committed offsets for the current assignment, if there
+ * is a current assignment. */
+ err = rd_kafka_assignment(consumer, &offsets);
+ if (err) {
+ fprintf(stderr, "No current assignment to rewind: %s\n",
+ rd_kafka_err2str(err));
+ return;
+ }
+
+ if (offsets->cnt == 0) {
+ fprintf(stderr, "No current assignment to rewind\n");
+ rd_kafka_topic_partition_list_destroy(offsets);
+ return;
+ }
+
+ /* Note: Timeout must be lower than max.poll.interval.ms */
+ err = rd_kafka_committed(consumer, offsets, 10 * 1000);
+ if (err)
+ fatal("Failed to acquire committed offsets: %s",
+ rd_kafka_err2str(err));
+
+ /* Seek to committed offset, or start of partition if no
+ * committed offset is available. */
+ for (i = 0; i < offsets->cnt; i++) {
+ /* No committed offset, start from beginning */
+ if (offsets->elems[i].offset < 0)
+ offsets->elems[i].offset = RD_KAFKA_OFFSET_BEGINNING;
+ }
+
+ /* Perform seek */
+ error = rd_kafka_seek_partitions(consumer, offsets, -1);
+ if (error)
+ fatal_error("Failed to seek", error);
+
+ rd_kafka_topic_partition_list_destroy(offsets);
+}
+
+/**
+ * @brief Abort the current transaction and rewind consumer offsets to
+ * position where the transaction last started, i.e., the committed
+ * consumer offset, then begin a new transaction.
+ */
+static void abort_transaction_and_rewind(rd_kafka_t *consumer,
+ rd_kafka_t *producer) {
+ rd_kafka_error_t *error;
+
+ fprintf(stdout, "Aborting transaction and rewinding offsets\n");
+
+ /* Abort the current transaction */
+ error = rd_kafka_abort_transaction(producer, -1);
+ if (error)
+ fatal_error("Failed to abort transaction", error);
+
+ /* Rewind consumer */
+ rewind_consumer(consumer);
+
+ /* Begin a new transaction */
+ error = rd_kafka_begin_transaction(producer);
+ if (error)
+ fatal_error("Failed to begin transaction", error);
+}
+
+
+/**
+ * @brief Commit the current transaction.
+ *
+ * @returns 1 if transaction was successfully committed, or 0
+ * if the current transaction was aborted.
+ */
+static int commit_transaction(rd_kafka_t *consumer, rd_kafka_t *producer) {
+ rd_kafka_error_t *error;
+ rd_kafka_resp_err_t err;
+ rd_kafka_consumer_group_metadata_t *cgmd;
+ rd_kafka_topic_partition_list_t *offsets;
+
+ fprintf(stdout, "Committing transaction\n");
+
+ /* Send the input consumer's offset to transaction
+ * to commit those offsets along with the transaction itself,
+ * this is what guarantees exactly-once-semantics (EOS), that
+ * input (offsets) and output (messages) are committed atomically. */
+
+ /* Get the consumer's current group metadata state */
+ cgmd = rd_kafka_consumer_group_metadata(consumer);
+
+ /* Get consumer's current assignment */
+ err = rd_kafka_assignment(consumer, &offsets);
+ if (err || offsets->cnt == 0) {
+ /* No partition offsets to commit because consumer
+ * (most likely) lost the assignment, abort transaction. */
+ if (err)
+ fprintf(stderr,
+ "Failed to get consumer assignment to commit: "
+ "%s\n",
+ rd_kafka_err2str(err));
+ else
+ rd_kafka_topic_partition_list_destroy(offsets);
+
+ error = rd_kafka_abort_transaction(producer, -1);
+ if (error)
+ fatal_error("Failed to abort transaction", error);
+
+ return 0;
+ }
+
+ /* Get consumer's current position for this partition */
+ err = rd_kafka_position(consumer, offsets);
+ if (err)
+ fatal("Failed to get consumer position: %s",
+ rd_kafka_err2str(err));
+
+ /* Send offsets to transaction coordinator */
+ error =
+ rd_kafka_send_offsets_to_transaction(producer, offsets, cgmd, -1);
+ rd_kafka_consumer_group_metadata_destroy(cgmd);
+ rd_kafka_topic_partition_list_destroy(offsets);
+ if (error) {
+ if (rd_kafka_error_txn_requires_abort(error)) {
+ fprintf(stderr,
+ "WARNING: Failed to send offsets to "
+ "transaction: %s: %s: aborting transaction\n",
+ rd_kafka_error_name(error),
+ rd_kafka_error_string(error));
+ rd_kafka_error_destroy(error);
+
+ /* Abort transaction */
+ error = rd_kafka_abort_transaction(producer, -1);
+ if (error)
+ fatal_error("Failed to abort transaction",
+ error);
+ return 0;
+ } else {
+ fatal_error("Failed to send offsets to transaction",
+ error);
+ }
+ }
+
+ /* Commit the transaction */
+ error = rd_kafka_commit_transaction(producer, -1);
+ if (error) {
+ if (rd_kafka_error_txn_requires_abort(error)) {
+ fprintf(stderr,
+ "WARNING: Failed to commit transaction: "
+ "%s: %s: aborting transaction\n",
+ rd_kafka_error_name(error),
+ rd_kafka_error_string(error));
+ rd_kafka_error_destroy(error);
+
+ /* Abort transaction */
+ error = rd_kafka_abort_transaction(producer, -1);
+ if (error)
+ fatal_error("Failed to abort transaction",
+ error);
+ return 0;
+ } else {
+ fatal_error("Failed to commit transaction", error);
+ }
+ }
+
+ return 1;
+}
+
+/**
+ * @brief Commit the current transaction and start a new transaction.
+ */
+static void commit_transaction_and_start_new(rd_kafka_t *consumer,
+ rd_kafka_t *producer) {
+ rd_kafka_error_t *error;
+
+ /* Commit transaction.
+ * If commit failed the transaction is aborted and we need
+ * to rewind the consumer to the last committed offsets. */
+ if (!commit_transaction(consumer, producer))
+ rewind_consumer(consumer);
+
+ /* Begin new transaction */
+ error = rd_kafka_begin_transaction(producer);
+ if (error)
+ fatal_error("Failed to begin new transaction", error);
+}
+
+/**
+ * @brief The rebalance will be triggered (from rd_kafka_consumer_poll())
+ * when the consumer's partition assignment is assigned or revoked.
+ */
+static void
+consumer_group_rebalance_cb(rd_kafka_t *consumer,
+ rd_kafka_resp_err_t err,
+ rd_kafka_topic_partition_list_t *partitions,
+ void *opaque) {
+ rd_kafka_t *producer = (rd_kafka_t *)opaque;
+ rd_kafka_error_t *error;
+
+ switch (err) {
+ case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS:
+ fprintf(stdout,
+ "Consumer group rebalanced: "
+ "%d new partition(s) assigned\n",
+ partitions->cnt);
+
+ /* Start fetching messages for the assigned partitions
+ * and add them to the consumer's local assignment. */
+ error = rd_kafka_incremental_assign(consumer, partitions);
+ if (error)
+ fatal_error("Incremental assign failed", error);
+ break;
+
+ case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS:
+ if (rd_kafka_assignment_lost(consumer)) {
+ fprintf(stdout,
+ "Consumer group rebalanced: assignment lost: "
+ "aborting current transaction\n");
+
+ error = rd_kafka_abort_transaction(producer, -1);
+ if (error)
+ fatal_error("Failed to abort transaction",
+ error);
+ } else {
+ fprintf(stdout,
+ "Consumer group rebalanced: %d partition(s) "
+ "revoked: committing current transaction\n",
+ partitions->cnt);
+
+ commit_transaction(consumer, producer);
+ }
+
+ /* Begin new transaction */
+ error = rd_kafka_begin_transaction(producer);
+ if (error)
+ fatal_error("Failed to begin transaction", error);
+
+ /* Stop fetching messages for the revoekd partitions
+ * and remove them from the consumer's local assignment. */
+ error = rd_kafka_incremental_unassign(consumer, partitions);
+ if (error)
+ fatal_error("Incremental unassign failed", error);
+ break;
+
+ default:
+ /* NOTREACHED */
+ fatal("Unexpected rebalance event: %s", rd_kafka_err2name(err));
+ }
+}
+
+
+/**
+ * @brief Create the input consumer.
+ */
+static rd_kafka_t *create_input_consumer(const char *brokers,
+ const char *input_topic,
+ rd_kafka_t *producer) {
+ rd_kafka_conf_t *conf = rd_kafka_conf_new();
+ rd_kafka_t *rk;
+ char errstr[256];
+ rd_kafka_resp_err_t err;
+ rd_kafka_topic_partition_list_t *topics;
+
+ if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers, errstr,
+ sizeof(errstr)) != RD_KAFKA_CONF_OK ||
+ rd_kafka_conf_set(conf, "group.id",
+ "librdkafka_transactions_example_group", errstr,
+ sizeof(errstr)) != RD_KAFKA_CONF_OK ||
+ rd_kafka_conf_set(conf, "partition.assignment.strategy",
+ "cooperative-sticky", errstr,
+ sizeof(errstr)) != RD_KAFKA_CONF_OK ||
+ rd_kafka_conf_set(conf, "auto.offset.reset", "earliest", errstr,
+ sizeof(errstr)) != RD_KAFKA_CONF_OK ||
+ /* The input consumer's offsets are explicitly committed with the
+ * output producer's transaction using
+ * rd_kafka_send_offsets_to_transaction(), so auto commits
+ * must be disabled. */
+ rd_kafka_conf_set(conf, "enable.auto.commit", "false", errstr,
+ sizeof(errstr)) != RD_KAFKA_CONF_OK) {
+ fatal("Failed to configure consumer: %s", errstr);
+ }
+
+ /* This callback will be called when the consumer group is rebalanced
+ * and the consumer's partition assignment is assigned or revoked. */
+ rd_kafka_conf_set_rebalance_cb(conf, consumer_group_rebalance_cb);
+
+ /* The producer handle is needed in the consumer's rebalance callback
+ * to be able to abort and commit transactions, so we pass the
+ * producer as the consumer's opaque. */
+ rd_kafka_conf_set_opaque(conf, producer);
+
+ /* Create consumer */
+ rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, sizeof(errstr));
+ if (!rk) {
+ rd_kafka_conf_destroy(conf);
+ fatal("Failed to create consumer: %s", errstr);
+ }
+
+ /* Forward all partition messages to the main queue and
+ * rd_kafka_consumer_poll(). */
+ rd_kafka_poll_set_consumer(rk);
+
+ /* Subscribe to the input topic */
+ topics = rd_kafka_topic_partition_list_new(1);
+ rd_kafka_topic_partition_list_add(topics, input_topic,
+ /* The partition is ignored in
+ * rd_kafka_subscribe() */
+ RD_KAFKA_PARTITION_UA);
+ err = rd_kafka_subscribe(rk, topics);
+ rd_kafka_topic_partition_list_destroy(topics);
+ if (err) {
+ rd_kafka_destroy(rk);
+ fatal("Failed to subscribe to %s: %s\n", input_topic,
+ rd_kafka_err2str(err));
+ }
+
+ return rk;
+}
+
+
+/**
+ * @brief Find and parse next integer string in \p start.
+ * @returns Pointer after found integer string, or NULL if not found.
+ */
+static const void *
+find_next_int(const void *start, const void *end, int *intp) {
+ const char *p;
+ int collecting = 0;
+ int num = 0;
+
+ for (p = (const char *)start; p < (const char *)end; p++) {
+ if (isdigit((int)(*p))) {
+ collecting = 1;
+ num = (num * 10) + ((int)*p - ((int)'0'));
+ } else if (collecting)
+ break;
+ }
+
+ if (!collecting)
+ return NULL; /* No integer string found */
+
+ *intp = num;
+
+ return p;
+}
+
+
+/**
+ * @brief Process a message from the input consumer by parsing all
+ * integer strings, adding them, and then producing the sum
+ * the output topic using the transactional producer for the given
+ * inut partition.
+ */
+static void process_message(rd_kafka_t *consumer,
+ rd_kafka_t *producer,
+ const char *output_topic,
+ const rd_kafka_message_t *rkmessage) {
+ int num;
+ long unsigned sum = 0;
+ const void *p, *end;
+ rd_kafka_resp_err_t err;
+ char value[64];
+
+ if (rkmessage->len == 0)
+ return; /* Ignore empty messages */
+
+ p = rkmessage->payload;
+ end = ((const char *)rkmessage->payload) + rkmessage->len;
+
+ /* Find and sum all numbers in the message */
+ while ((p = find_next_int(p, end, &num)))
+ sum += num;
+
+ if (sum == 0)
+ return; /* No integers in message, ignore it. */
+
+ snprintf(value, sizeof(value), "%lu", sum);
+
+ /* Emit output message on transactional producer */
+ while (1) {
+ err = rd_kafka_producev(
+ producer, RD_KAFKA_V_TOPIC(output_topic),
+ /* Use same key as input message */
+ RD_KAFKA_V_KEY(rkmessage->key, rkmessage->key_len),
+ /* Value is the current sum of this
+ * transaction. */
+ RD_KAFKA_V_VALUE(value, strlen(value)),
+ /* Copy value since it is allocated on the stack */
+ RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), RD_KAFKA_V_END);
+
+ if (!err)
+ break;
+ else if (err == RD_KAFKA_RESP_ERR__QUEUE_FULL) {
+ /* If output queue fills up we need to wait for
+ * some delivery reports and then retry. */
+ rd_kafka_poll(producer, 100);
+ continue;
+ } else {
+ fprintf(stderr,
+ "WARNING: Failed to produce message to %s: "
+ "%s: aborting transaction\n",
+ output_topic, rd_kafka_err2str(err));
+ abort_transaction_and_rewind(consumer, producer);
+ return;
+ }
+ }
+}
+
+
+int main(int argc, char **argv) {
+ rd_kafka_t *producer, *consumer;
+ int msgcnt = 0;
+ time_t last_commit = 0;
+ const char *brokers, *input_topic, *output_topic;
+ rd_kafka_error_t *error;
+
+ /*
+ * Argument validation
+ */
+ if (argc != 4) {
+ fprintf(stderr,
+ "%% Usage: %s <broker> <input-topic> <output-topic>\n",
+ argv[0]);
+ return 1;
+ }
+
+ brokers = argv[1];
+ input_topic = argv[2];
+ output_topic = argv[3];
+
+ /* Signal handler for clean shutdown */
+ signal(SIGINT, stop);
+
+ producer = create_transactional_producer(brokers, output_topic);
+
+ consumer = create_input_consumer(brokers, input_topic, producer);
+
+ fprintf(stdout,
+ "Expecting integers to sum on input topic %s ...\n"
+ "To generate input messages you can use:\n"
+ " $ seq 1 100 | examples/producer %s %s\n"
+ "Observe summed integers on output topic %s:\n"
+ " $ examples/consumer %s just-watching %s\n"
+ "\n",
+ input_topic, brokers, input_topic, output_topic, brokers,
+ output_topic);
+
+ /* Begin transaction and start waiting for messages */
+ error = rd_kafka_begin_transaction(producer);
+ if (error)
+ fatal_error("Failed to begin transaction", error);
+
+ while (run) {
+ rd_kafka_message_t *msg;
+
+ /* Commit transaction every 100 messages or 5 seconds */
+ if (msgcnt > 0 &&
+ (msgcnt > 100 || last_commit + 5 <= time(NULL))) {
+ printf("msgcnt %d, elapsed %d\n", msgcnt,
+ (int)(time(NULL) - last_commit));
+ commit_transaction_and_start_new(consumer, producer);
+ msgcnt = 0;
+ last_commit = time(NULL);
+ }
+
+ /* Wait for new mesages or error events */
+ msg = rd_kafka_consumer_poll(consumer, 1000 /*1 second*/);
+ if (!msg)
+ continue; /* Poll timeout */
+
+ if (msg->err) {
+ /* Client errors are typically just informational
+ * since the client will automatically try to recover
+ * from all types of errors.
+ * It is thus sufficient for the application to log and
+ * continue operating when a consumer error is
+ * encountered. */
+ fprintf(stderr, "WARNING: Consumer error: %s\n",
+ rd_kafka_message_errstr(msg));
+ rd_kafka_message_destroy(msg);
+ continue;
+ }
+
+ /* Process message */
+ process_message(consumer, producer, output_topic, msg);
+
+ rd_kafka_message_destroy(msg);
+
+ msgcnt++;
+ }
+
+ fprintf(stdout, "Closing consumer\n");
+ rd_kafka_consumer_close(consumer);
+ rd_kafka_destroy(consumer);
+
+ fprintf(stdout, "Closing producer\n");
+ rd_kafka_destroy(producer);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/examples/win_ssl_cert_store.cpp b/fluent-bit/lib/librdkafka-2.1.0/examples/win_ssl_cert_store.cpp
new file mode 100644
index 00000000..a80dfea3
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/examples/win_ssl_cert_store.cpp
@@ -0,0 +1,395 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2019, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * Example of utilizing the Windows Certificate store with SSL.
+ */
+
+#include <iostream>
+#include <string>
+#include <cstdlib>
+#include <cstdio>
+#include <csignal>
+#include <cstring>
+#include <sstream>
+
+#include "../win32/wingetopt.h"
+#include <windows.h>
+#include <wincrypt.h>
+
+/*
+ * Typically include path in a real application would be
+ * #include <librdkafka/rdkafkacpp.h>
+ */
+#include "rdkafkacpp.h"
+
+
+
+class ExampleStoreRetriever {
+ public:
+ ExampleStoreRetriever(std::string const &subject, std::string const &pass) :
+ m_cert_subject(subject),
+ m_password(pass),
+ m_cert_store(NULL),
+ m_cert_ctx(NULL) {
+ load_certificate();
+ }
+
+ ~ExampleStoreRetriever() {
+ if (m_cert_ctx)
+ CertFreeCertificateContext(m_cert_ctx);
+
+ if (m_cert_store)
+ CertCloseStore(m_cert_store, 0);
+ }
+
+ /* @returns the public key in DER format */
+ const std::vector<unsigned char> get_public_key() {
+ std::vector<unsigned char> buf((size_t)m_cert_ctx->cbCertEncoded);
+ buf.assign((const char *)m_cert_ctx->pbCertEncoded,
+ (const char *)m_cert_ctx->pbCertEncoded +
+ (size_t)m_cert_ctx->cbCertEncoded);
+ return buf;
+ }
+
+ /* @returns the private key in PCKS#12 format */
+ const std::vector<unsigned char> get_private_key() {
+ ssize_t ret = 0;
+ /*
+ * In order to export the private key the certificate
+ * must first be marked as exportable.
+ *
+ * Steps to export the certificate
+ * 1) Create an in-memory cert store
+ * 2) Add the certificate to the store
+ * 3) Export the private key from the in-memory store
+ */
+
+ /* Create an in-memory cert store */
+ HCERTSTORE hMemStore =
+ CertOpenStore(CERT_STORE_PROV_MEMORY, 0, NULL, 0, NULL);
+ if (!hMemStore)
+ throw "Failed to create in-memory cert store: " +
+ GetErrorMsg(GetLastError());
+
+ /* Add certificate to store */
+ if (!CertAddCertificateContextToStore(hMemStore, m_cert_ctx,
+ CERT_STORE_ADD_USE_EXISTING, NULL))
+ throw "Failed to add certificate to store: " +
+ GetErrorMsg(GetLastError());
+
+ /*
+ * Export private key from cert
+ */
+ CRYPT_DATA_BLOB db = {NULL};
+
+ std::wstring w_password(m_password.begin(), m_password.end());
+
+ /* Acquire output size */
+ if (!PFXExportCertStoreEx(hMemStore, &db, w_password.c_str(), NULL,
+ EXPORT_PRIVATE_KEYS | REPORT_NO_PRIVATE_KEY |
+ REPORT_NOT_ABLE_TO_EXPORT_PRIVATE_KEY))
+ throw "Failed to export private key: " + GetErrorMsg(GetLastError());
+
+ std::vector<unsigned char> buf;
+
+ buf.resize(db.cbData);
+ db.pbData = &buf[0];
+
+ /* Extract key */
+ if (!PFXExportCertStoreEx(hMemStore, &db, w_password.c_str(), NULL,
+ EXPORT_PRIVATE_KEYS | REPORT_NO_PRIVATE_KEY |
+ REPORT_NOT_ABLE_TO_EXPORT_PRIVATE_KEY))
+ throw "Failed to export private key (PFX): " +
+ GetErrorMsg(GetLastError());
+
+ CertCloseStore(hMemStore, 0);
+
+ buf.resize(db.cbData);
+
+ return buf;
+ }
+
+ private:
+ void load_certificate() {
+ if (m_cert_ctx)
+ return;
+
+ m_cert_store = CertOpenStore(CERT_STORE_PROV_SYSTEM, 0, NULL,
+ CERT_SYSTEM_STORE_CURRENT_USER, L"My");
+ if (!m_cert_store)
+ throw "Failed to open cert store: " + GetErrorMsg(GetLastError());
+
+ m_cert_ctx = CertFindCertificateInStore(
+ m_cert_store, X509_ASN_ENCODING, 0, CERT_FIND_SUBJECT_STR,
+ /* should probally do a better std::string to std::wstring conversion */
+ std::wstring(m_cert_subject.begin(), m_cert_subject.end()).c_str(),
+ NULL);
+ if (!m_cert_ctx) {
+ CertCloseStore(m_cert_store, 0);
+ m_cert_store = NULL;
+ throw "Certificate " + m_cert_subject +
+ " not found in cert store: " + GetErrorMsg(GetLastError());
+ }
+ }
+
+ std::string GetErrorMsg(unsigned long error) {
+ char *message = NULL;
+ size_t ret = FormatMessageA(
+ FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM, nullptr,
+ error, 0, (char *)&message, 0, nullptr);
+ if (ret == 0) {
+ std::stringstream ss;
+
+ ss << std::string("could not format message for ") << error;
+ return ss.str();
+ } else {
+ std::string result(message, ret);
+ LocalFree(message);
+ return result;
+ }
+ }
+
+ private:
+ std::string m_cert_subject;
+ std::string m_password;
+ PCCERT_CONTEXT m_cert_ctx;
+ HCERTSTORE m_cert_store;
+};
+
+
+class PrintingSSLVerifyCb : public RdKafka::SslCertificateVerifyCb {
+ /* This SSL cert verification callback simply prints the certificates
+ * in the certificate chain.
+ * It provides no validation, everything is ok. */
+ public:
+ bool ssl_cert_verify_cb(const std::string &broker_name,
+ int32_t broker_id,
+ int *x509_error,
+ int depth,
+ const char *buf,
+ size_t size,
+ std::string &errstr) {
+ PCCERT_CONTEXT ctx = CertCreateCertificateContext(
+ X509_ASN_ENCODING | PKCS_7_ASN_ENCODING, (const uint8_t *)buf,
+ static_cast<unsigned long>(size));
+
+ if (!ctx)
+ std::cerr << "Failed to parse certificate" << std::endl;
+
+ char subject[256] = "n/a";
+ char issuer[256] = "n/a";
+
+ CertGetNameStringA(ctx, CERT_NAME_FRIENDLY_DISPLAY_TYPE, 0, NULL, subject,
+ sizeof(subject));
+
+ CertGetNameStringA(ctx, CERT_NAME_FRIENDLY_DISPLAY_TYPE,
+ CERT_NAME_ISSUER_FLAG, NULL, issuer, sizeof(issuer));
+
+ std::cerr << "Broker " << broker_name << " (" << broker_id << "): "
+ << "certificate depth " << depth << ", X509 error " << *x509_error
+ << ", subject " << subject << ", issuer " << issuer << std::endl;
+
+ if (ctx)
+ CertFreeCertificateContext(ctx);
+
+ return true;
+ }
+};
+
+
+/**
+ * @brief Print the brokers in the cluster.
+ */
+static void print_brokers(RdKafka::Handle *handle,
+ const RdKafka::Metadata *md) {
+ std::cout << md->brokers()->size() << " broker(s) in cluster "
+ << handle->clusterid(0) << std::endl;
+
+ /* Iterate brokers */
+ RdKafka::Metadata::BrokerMetadataIterator ib;
+ for (ib = md->brokers()->begin(); ib != md->brokers()->end(); ++ib)
+ std::cout << " broker " << (*ib)->id() << " at " << (*ib)->host() << ":"
+ << (*ib)->port() << std::endl;
+}
+
+
+int main(int argc, char **argv) {
+ std::string brokers;
+ std::string errstr;
+ std::string cert_subject;
+ std::string priv_key_pass;
+
+ /*
+ * Create configuration objects
+ */
+ RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
+ RdKafka::Conf *tconf = RdKafka::Conf::create(RdKafka::Conf::CONF_TOPIC);
+
+ int opt;
+ while ((opt = getopt(argc, argv, "b:d:X:s:p:")) != -1) {
+ switch (opt) {
+ case 'b':
+ brokers = optarg;
+ break;
+ case 'd':
+ if (conf->set("debug", optarg, errstr) != RdKafka::Conf::CONF_OK) {
+ std::cerr << errstr << std::endl;
+ exit(1);
+ }
+ break;
+ case 'X': {
+ char *name, *val;
+
+ name = optarg;
+ if (!(val = strchr(name, '='))) {
+ std::cerr << "%% Expected -X property=value, not " << name << std::endl;
+ exit(1);
+ }
+
+ *val = '\0';
+ val++;
+
+ if (conf->set(name, val, errstr) != RdKafka::Conf::CONF_OK) {
+ std::cerr << errstr << std::endl;
+ exit(1);
+ }
+ } break;
+
+ case 's':
+ cert_subject = optarg;
+ break;
+
+ case 'p':
+ priv_key_pass = optarg;
+ if (conf->set("ssl.key.password", optarg, errstr) !=
+ RdKafka::Conf::CONF_OK) {
+ std::cerr << errstr << std::endl;
+ exit(1);
+ }
+
+ break;
+
+ default:
+ goto usage;
+ }
+ }
+
+ if (brokers.empty() || optind != argc) {
+ usage:
+ std::string features;
+ conf->get("builtin.features", features);
+ fprintf(stderr,
+ "Usage: %s [options] -b <brokers> -s <cert-subject> -p "
+ "<priv-key-password>\n"
+ "\n"
+ "Windows Certificate Store integration example.\n"
+ "Use certlm.msc or mmc to view your certificates.\n"
+ "\n"
+ "librdkafka version %s (0x%08x, builtin.features \"%s\")\n"
+ "\n"
+ " Options:\n"
+ " -b <brokers> Broker address\n"
+ " -s <cert> The subject name of the client's SSL "
+ "certificate to use\n"
+ " -p <pass> The private key password\n"
+ " -d [facs..] Enable debugging contexts: %s\n"
+ " -X <prop=name> Set arbitrary librdkafka "
+ "configuration property\n"
+ "\n",
+ argv[0], RdKafka::version_str().c_str(), RdKafka::version(),
+ features.c_str(), RdKafka::get_debug_contexts().c_str());
+ exit(1);
+ }
+
+ if (!cert_subject.empty()) {
+ try {
+ /* Load certificates from the Windows store */
+ ExampleStoreRetriever certStore(cert_subject, priv_key_pass);
+
+ std::vector<unsigned char> pubkey, privkey;
+
+ pubkey = certStore.get_public_key();
+ privkey = certStore.get_private_key();
+
+ if (conf->set_ssl_cert(RdKafka::CERT_PUBLIC_KEY, RdKafka::CERT_ENC_DER,
+ &pubkey[0], pubkey.size(),
+ errstr) != RdKafka::Conf::CONF_OK)
+ throw "Failed to set public key: " + errstr;
+
+ if (conf->set_ssl_cert(RdKafka::CERT_PRIVATE_KEY,
+ RdKafka::CERT_ENC_PKCS12, &privkey[0],
+ privkey.size(), errstr) != RdKafka::Conf::CONF_OK)
+ throw "Failed to set private key: " + errstr;
+
+ } catch (const std::string &ex) {
+ std::cerr << ex << std::endl;
+ exit(1);
+ }
+ }
+
+
+ /*
+ * Set configuration properties
+ */
+ conf->set("bootstrap.servers", brokers, errstr);
+
+ /* We use the Certificiate verification callback to print the
+ * certificate chains being used. */
+ PrintingSSLVerifyCb ssl_verify_cb;
+
+ if (conf->set("ssl_cert_verify_cb", &ssl_verify_cb, errstr) !=
+ RdKafka::Conf::CONF_OK) {
+ std::cerr << errstr << std::endl;
+ exit(1);
+ }
+
+ /* Create any type of client, producering being the cheapest. */
+ RdKafka::Producer *producer = RdKafka::Producer::create(conf, errstr);
+ if (!producer) {
+ std::cerr << "Failed to create producer: " << errstr << std::endl;
+ exit(1);
+ }
+
+ RdKafka::Metadata *metadata;
+
+ /* Fetch metadata */
+ RdKafka::ErrorCode err = producer->metadata(false, NULL, &metadata, 5000);
+ if (err != RdKafka::ERR_NO_ERROR) {
+ std::cerr << "%% Failed to acquire metadata: " << RdKafka::err2str(err)
+ << std::endl;
+ exit(1);
+ }
+
+ print_brokers(producer, metadata);
+
+ delete metadata;
+ delete producer;
+
+ return 0;
+}