From b485aab7e71c1625cfc27e0f92c9509f42378458 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sun, 5 May 2024 13:19:16 +0200 Subject: Adding upstream version 1.45.3+dfsg. Signed-off-by: Daniel Baumann --- .../lib/librdkafka-2.1.0/.clang-format-cpp | 103 + src/fluent-bit/lib/librdkafka-2.1.0/.dir-locals.el | 10 + src/fluent-bit/lib/librdkafka-2.1.0/.formatignore | 18 + src/fluent-bit/lib/librdkafka-2.1.0/.gdbmacros | 19 + .../lib/librdkafka-2.1.0/.github/ISSUE_TEMPLATE | 34 + src/fluent-bit/lib/librdkafka-2.1.0/.gitignore | 33 + .../lib/librdkafka-2.1.0/.semaphore/project.yml | 43 + .../lib/librdkafka-2.1.0/.semaphore/semaphore.yml | 364 + src/fluent-bit/lib/librdkafka-2.1.0/CHANGELOG.md | 1218 +++ src/fluent-bit/lib/librdkafka-2.1.0/CMakeLists.txt | 291 + .../lib/librdkafka-2.1.0/CODE_OF_CONDUCT.md | 46 + .../lib/librdkafka-2.1.0/CONFIGURATION.md | 183 + .../lib/librdkafka-2.1.0/CONTRIBUTING.md | 425 + src/fluent-bit/lib/librdkafka-2.1.0/Doxyfile | 2375 +++++ .../lib/librdkafka-2.1.0/INTRODUCTION.md | 2069 +++++ src/fluent-bit/lib/librdkafka-2.1.0/LICENSE | 25 + src/fluent-bit/lib/librdkafka-2.1.0/LICENSE.cjson | 22 + src/fluent-bit/lib/librdkafka-2.1.0/LICENSE.crc32c | 28 + src/fluent-bit/lib/librdkafka-2.1.0/LICENSE.fnv1a | 18 + .../lib/librdkafka-2.1.0/LICENSE.hdrhistogram | 27 + src/fluent-bit/lib/librdkafka-2.1.0/LICENSE.lz4 | 26 + .../lib/librdkafka-2.1.0/LICENSE.murmur2 | 25 + src/fluent-bit/lib/librdkafka-2.1.0/LICENSE.pycrc | 23 + src/fluent-bit/lib/librdkafka-2.1.0/LICENSE.queue | 31 + src/fluent-bit/lib/librdkafka-2.1.0/LICENSE.regexp | 5 + src/fluent-bit/lib/librdkafka-2.1.0/LICENSE.snappy | 36 + .../lib/librdkafka-2.1.0/LICENSE.tinycthread | 26 + .../lib/librdkafka-2.1.0/LICENSE.wingetopt | 49 + src/fluent-bit/lib/librdkafka-2.1.0/LICENSES.txt | 392 + src/fluent-bit/lib/librdkafka-2.1.0/Makefile | 124 + src/fluent-bit/lib/librdkafka-2.1.0/README.md | 198 + src/fluent-bit/lib/librdkafka-2.1.0/README.win32 | 26 + src/fluent-bit/lib/librdkafka-2.1.0/STATISTICS.md | 624 ++ src/fluent-bit/lib/librdkafka-2.1.0/configure | 214 + src/fluent-bit/lib/librdkafka-2.1.0/configure.self | 331 + .../lib/librdkafka-2.1.0/debian/.gitignore | 6 + .../lib/librdkafka-2.1.0/debian/changelog | 111 + src/fluent-bit/lib/librdkafka-2.1.0/debian/compat | 1 + src/fluent-bit/lib/librdkafka-2.1.0/debian/control | 71 + .../lib/librdkafka-2.1.0/debian/copyright | 99 + .../lib/librdkafka-2.1.0/debian/gbp.conf | 9 + .../librdkafka-2.1.0/debian/librdkafka++1.install | 1 + .../debian/librdkafka-dev.examples | 2 + .../librdkafka-2.1.0/debian/librdkafka-dev.install | 9 + .../lib/librdkafka-2.1.0/debian/librdkafka1.docs | 5 + .../librdkafka-2.1.0/debian/librdkafka1.install | 1 + .../librdkafka-2.1.0/debian/librdkafka1.symbols | 135 + src/fluent-bit/lib/librdkafka-2.1.0/debian/rules | 19 + .../lib/librdkafka-2.1.0/debian/source/format | 1 + src/fluent-bit/lib/librdkafka-2.1.0/debian/watch | 2 + src/fluent-bit/lib/librdkafka-2.1.0/dev-conf.sh | 123 + .../lib/librdkafka-2.1.0/examples/.gitignore | 19 + .../lib/librdkafka-2.1.0/examples/CMakeLists.txt | 40 + .../lib/librdkafka-2.1.0/examples/Makefile | 137 + .../lib/librdkafka-2.1.0/examples/README.md | 38 + .../examples/alter_consumer_group_offsets.c | 338 + .../lib/librdkafka-2.1.0/examples/consumer.c | 260 + .../lib/librdkafka-2.1.0/examples/delete_records.c | 233 + .../examples/describe_consumer_groups.c | 373 + .../lib/librdkafka-2.1.0/examples/globals.json | 11 + .../examples/idempotent_producer.c | 344 + .../examples/kafkatest_verifiable_client.cpp | 961 ++ .../examples/list_consumer_group_offsets.c | 359 + .../examples/list_consumer_groups.c | 330 + .../lib/librdkafka-2.1.0/examples/misc.c | 287 + .../examples/openssl_engine_example.cpp | 249 + .../lib/librdkafka-2.1.0/examples/producer.c | 251 + .../lib/librdkafka-2.1.0/examples/producer.cpp | 228 + .../examples/rdkafka_complex_consumer_example.c | 617 ++ .../examples/rdkafka_complex_consumer_example.cpp | 467 + .../examples/rdkafka_consume_batch.cpp | 264 + .../librdkafka-2.1.0/examples/rdkafka_example.c | 853 ++ .../librdkafka-2.1.0/examples/rdkafka_example.cpp | 679 ++ .../examples/rdkafka_performance.c | 1780 ++++ .../examples/transactions-older-broker.c | 668 ++ .../lib/librdkafka-2.1.0/examples/transactions.c | 665 ++ .../examples/win_ssl_cert_store.cpp | 395 + src/fluent-bit/lib/librdkafka-2.1.0/lds-gen.py | 73 + src/fluent-bit/lib/librdkafka-2.1.0/mainpage.doxy | 40 + .../lib/librdkafka-2.1.0/mklove/.gitignore | 1 + .../lib/librdkafka-2.1.0/mklove/Makefile.base | 329 + .../mklove/modules/configure.atomics | 144 + .../librdkafka-2.1.0/mklove/modules/configure.base | 2484 ++++++ .../mklove/modules/configure.builtin | 70 + .../librdkafka-2.1.0/mklove/modules/configure.cc | 186 + .../librdkafka-2.1.0/mklove/modules/configure.cxx | 8 + .../mklove/modules/configure.fileversion | 65 + .../mklove/modules/configure.gitversion | 29 + .../mklove/modules/configure.good_cflags | 18 + .../librdkafka-2.1.0/mklove/modules/configure.host | 132 + .../librdkafka-2.1.0/mklove/modules/configure.lib | 49 + .../mklove/modules/configure.libcurl | 99 + .../mklove/modules/configure.libsasl2 | 36 + .../mklove/modules/configure.libssl | 147 + .../mklove/modules/configure.libzstd | 58 + .../mklove/modules/configure.parseversion | 95 + .../librdkafka-2.1.0/mklove/modules/configure.pic | 16 + .../mklove/modules/configure.socket | 20 + .../librdkafka-2.1.0/mklove/modules/configure.zlib | 61 + .../mklove/modules/patches/README.md | 8 + .../libcurl.0000-no-runtime-linking-check.patch | 11 + ...0000-osx-rand-include-fix-OpenSSL-PR16409.patch | 56 + .../lib/librdkafka-2.1.0/packaging/RELEASE.md | 311 + .../packaging/alpine/build-alpine.sh | 38 + .../librdkafka-2.1.0/packaging/archlinux/PKGBUILD | 30 + .../packaging/cmake/Config.cmake.in | 37 + .../packaging/cmake/Modules/FindLZ4.cmake | 38 + .../packaging/cmake/Modules/FindZSTD.cmake | 27 + .../packaging/cmake/Modules/LICENSE.FindZstd | 178 + .../lib/librdkafka-2.1.0/packaging/cmake/README.md | 38 + .../librdkafka-2.1.0/packaging/cmake/config.h.in | 52 + .../packaging/cmake/parseversion.cmake | 60 + .../librdkafka-2.1.0/packaging/cmake/rdkafka.pc.in | 12 + .../packaging/cmake/try_compile/atomic_32_test.c | 8 + .../packaging/cmake/try_compile/atomic_64_test.c | 8 + .../packaging/cmake/try_compile/c11threads_test.c | 14 + .../packaging/cmake/try_compile/crc32c_hw_test.c | 27 + .../packaging/cmake/try_compile/dlopen_test.c | 11 + .../packaging/cmake/try_compile/libsasl2_test.c | 7 + .../try_compile/pthread_setname_darwin_test.c | 6 + .../try_compile/pthread_setname_freebsd_test.c | 7 + .../cmake/try_compile/pthread_setname_gnu_test.c | 5 + .../packaging/cmake/try_compile/rand_r_test.c | 7 + .../cmake/try_compile/rdkafka_setup.cmake | 122 + .../packaging/cmake/try_compile/regex_test.c | 10 + .../packaging/cmake/try_compile/strndup_test.c | 5 + .../packaging/cmake/try_compile/sync_32_test.c | 8 + .../packaging/cmake/try_compile/sync_64_test.c | 8 + .../lib/librdkafka-2.1.0/packaging/cp/README.md | 14 + .../librdkafka-2.1.0/packaging/cp/check_features.c | 64 + .../librdkafka-2.1.0/packaging/cp/verify-deb.sh | 34 + .../packaging/cp/verify-packages.sh | 43 + .../librdkafka-2.1.0/packaging/cp/verify-rpm.sh | 38 + .../librdkafka-2.1.0/packaging/debian/.gitignore | 6 + .../librdkafka-2.1.0/packaging/debian/changelog | 66 + .../lib/librdkafka-2.1.0/packaging/debian/compat | 1 + .../lib/librdkafka-2.1.0/packaging/debian/control | 49 + .../librdkafka-2.1.0/packaging/debian/copyright | 84 + .../lib/librdkafka-2.1.0/packaging/debian/docs | 5 + .../lib/librdkafka-2.1.0/packaging/debian/gbp.conf | 9 + .../packaging/debian/librdkafka-dev.dirs | 2 + .../packaging/debian/librdkafka-dev.examples | 2 + .../packaging/debian/librdkafka-dev.install | 6 + .../packaging/debian/librdkafka-dev.substvars | 1 + .../packaging/debian/librdkafka.dsc | 16 + .../packaging/debian/librdkafka1-dbg.substvars | 1 + .../packaging/debian/librdkafka1.dirs | 1 + .../packaging/debian/librdkafka1.install | 2 + .../debian/librdkafka1.postinst.debhelper | 5 + .../packaging/debian/librdkafka1.postrm.debhelper | 5 + .../packaging/debian/librdkafka1.symbols | 64 + .../lib/librdkafka-2.1.0/packaging/debian/rules | 19 + .../packaging/debian/source/format | 1 + .../lib/librdkafka-2.1.0/packaging/debian/watch | 2 + .../lib/librdkafka-2.1.0/packaging/get_version.py | 21 + .../librdkafka-2.1.0/packaging/homebrew/README.md | 15 + .../packaging/homebrew/brew-update-pr.sh | 31 + .../configure-build-msys2-mingw-static.sh | 52 + .../mingw-w64/configure-build-msys2-mingw.sh | 21 + .../packaging/mingw-w64/run-tests.sh | 6 + .../packaging/mingw-w64/semaphoreci-build.sh | 38 + .../packaging/mingw-w64/travis-before-install.sh | 20 + .../librdkafka-2.1.0/packaging/nuget/.gitignore | 7 + .../lib/librdkafka-2.1.0/packaging/nuget/README.md | 78 + .../librdkafka-2.1.0/packaging/nuget/artifact.py | 177 + .../librdkafka-2.1.0/packaging/nuget/cleanup-s3.py | 143 + .../msvcr120.zip | Bin 0 -> 679055 bytes .../msvcr140.zip | Bin 0 -> 516022 bytes .../msvcr120.zip | Bin 0 -> 662837 bytes .../msvcr140.zip | Bin 0 -> 621912 bytes .../lib/librdkafka-2.1.0/packaging/nuget/nuget.sh | 21 + .../packaging/nuget/nugetpackage.py | 286 + .../librdkafka-2.1.0/packaging/nuget/packaging.py | 448 + .../packaging/nuget/push-to-nuget.sh | 21 + .../librdkafka-2.1.0/packaging/nuget/release.py | 167 + .../packaging/nuget/requirements.txt | 3 + .../packaging/nuget/staticpackage.py | 178 + .../nuget/templates/librdkafka.redist.nuspec | 21 + .../nuget/templates/librdkafka.redist.props | 18 + .../nuget/templates/librdkafka.redist.targets | 19 + .../packaging/nuget/zfile/__init__.py | 0 .../packaging/nuget/zfile/zfile.py | 98 + .../lib/librdkafka-2.1.0/packaging/rpm/.gitignore | 7 + .../lib/librdkafka-2.1.0/packaging/rpm/Makefile | 92 + .../lib/librdkafka-2.1.0/packaging/rpm/README.md | 23 + .../librdkafka-2.1.0/packaging/rpm/el7-x86_64.cfg | 40 + .../librdkafka-2.1.0/packaging/rpm/librdkafka.spec | 118 + .../packaging/rpm/mock-on-docker.sh | 97 + .../packaging/rpm/tests/.gitignore | 2 + .../librdkafka-2.1.0/packaging/rpm/tests/Makefile | 25 + .../librdkafka-2.1.0/packaging/rpm/tests/README.md | 8 + .../packaging/rpm/tests/run-test.sh | 49 + .../packaging/rpm/tests/test-on-docker.sh | 56 + .../librdkafka-2.1.0/packaging/rpm/tests/test.c | 77 + .../librdkafka-2.1.0/packaging/rpm/tests/test.cpp | 34 + .../packaging/tools/build-deb-package.sh | 64 + .../packaging/tools/build-debian.sh | 65 + .../packaging/tools/build-manylinux.sh | 68 + .../packaging/tools/build-release-artifacts.sh | 138 + .../packaging/tools/distro-build.sh | 38 + .../packaging/tools/gh-release-checksums.py | 39 + .../packaging/tools/rdutcoverage.sh | 25 + .../packaging/tools/requirements.txt | 2 + .../packaging/tools/style-format.sh | 148 + src/fluent-bit/lib/librdkafka-2.1.0/service.yml | 18 + .../lib/librdkafka-2.1.0/src-cpp/CMakeLists.txt | 90 + .../lib/librdkafka-2.1.0/src-cpp/ConfImpl.cpp | 84 + .../lib/librdkafka-2.1.0/src-cpp/ConsumerImpl.cpp | 244 + .../lib/librdkafka-2.1.0/src-cpp/HandleImpl.cpp | 425 + .../lib/librdkafka-2.1.0/src-cpp/HeadersImpl.cpp | 48 + .../librdkafka-2.1.0/src-cpp/KafkaConsumerImpl.cpp | 296 + .../lib/librdkafka-2.1.0/src-cpp/Makefile | 55 + .../lib/librdkafka-2.1.0/src-cpp/MessageImpl.cpp | 38 + .../lib/librdkafka-2.1.0/src-cpp/MetadataImpl.cpp | 170 + .../lib/librdkafka-2.1.0/src-cpp/ProducerImpl.cpp | 197 + .../lib/librdkafka-2.1.0/src-cpp/QueueImpl.cpp | 70 + .../lib/librdkafka-2.1.0/src-cpp/README.md | 16 + .../lib/librdkafka-2.1.0/src-cpp/RdKafka.cpp | 59 + .../lib/librdkafka-2.1.0/src-cpp/TopicImpl.cpp | 124 + .../src-cpp/TopicPartitionImpl.cpp | 57 + .../lib/librdkafka-2.1.0/src-cpp/rdkafkacpp.h | 3764 ++++++++ .../lib/librdkafka-2.1.0/src-cpp/rdkafkacpp_int.h | 1628 ++++ .../lib/librdkafka-2.1.0/src/CMakeLists.txt | 364 + src/fluent-bit/lib/librdkafka-2.1.0/src/Makefile | 97 + src/fluent-bit/lib/librdkafka-2.1.0/src/cJSON.c | 2834 ++++++ src/fluent-bit/lib/librdkafka-2.1.0/src/cJSON.h | 398 + src/fluent-bit/lib/librdkafka-2.1.0/src/crc32c.c | 430 + src/fluent-bit/lib/librdkafka-2.1.0/src/crc32c.h | 38 + .../lib/librdkafka-2.1.0/src/generate_proto.sh | 66 + .../librdkafka-2.1.0/src/librdkafka_cgrp_synch.png | Bin 0 -> 93796 bytes src/fluent-bit/lib/librdkafka-2.1.0/src/lz4.c | 2498 ++++++ src/fluent-bit/lib/librdkafka-2.1.0/src/lz4.h | 774 ++ src/fluent-bit/lib/librdkafka-2.1.0/src/lz4frame.c | 1899 ++++ src/fluent-bit/lib/librdkafka-2.1.0/src/lz4frame.h | 623 ++ .../lib/librdkafka-2.1.0/src/lz4frame_static.h | 47 + src/fluent-bit/lib/librdkafka-2.1.0/src/lz4hc.c | 1615 ++++ src/fluent-bit/lib/librdkafka-2.1.0/src/lz4hc.h | 413 + src/fluent-bit/lib/librdkafka-2.1.0/src/queue.h | 850 ++ src/fluent-bit/lib/librdkafka-2.1.0/src/rd.h | 436 + src/fluent-bit/lib/librdkafka-2.1.0/src/rdaddr.c | 255 + src/fluent-bit/lib/librdkafka-2.1.0/src/rdaddr.h | 203 + src/fluent-bit/lib/librdkafka-2.1.0/src/rdatomic.h | 226 + src/fluent-bit/lib/librdkafka-2.1.0/src/rdavg.h | 259 + src/fluent-bit/lib/librdkafka-2.1.0/src/rdavl.c | 210 + src/fluent-bit/lib/librdkafka-2.1.0/src/rdavl.h | 250 + src/fluent-bit/lib/librdkafka-2.1.0/src/rdbuf.c | 1880 ++++ src/fluent-bit/lib/librdkafka-2.1.0/src/rdbuf.h | 373 + src/fluent-bit/lib/librdkafka-2.1.0/src/rdcrc32.c | 114 + src/fluent-bit/lib/librdkafka-2.1.0/src/rdcrc32.h | 170 + src/fluent-bit/lib/librdkafka-2.1.0/src/rddl.c | 179 + src/fluent-bit/lib/librdkafka-2.1.0/src/rddl.h | 43 + src/fluent-bit/lib/librdkafka-2.1.0/src/rdendian.h | 174 + src/fluent-bit/lib/librdkafka-2.1.0/src/rdfloat.h | 67 + src/fluent-bit/lib/librdkafka-2.1.0/src/rdfnv1a.c | 113 + src/fluent-bit/lib/librdkafka-2.1.0/src/rdfnv1a.h | 35 + src/fluent-bit/lib/librdkafka-2.1.0/src/rdgz.c | 120 + src/fluent-bit/lib/librdkafka-2.1.0/src/rdgz.h | 46 + .../lib/librdkafka-2.1.0/src/rdhdrhistogram.c | 721 ++ .../lib/librdkafka-2.1.0/src/rdhdrhistogram.h | 87 + src/fluent-bit/lib/librdkafka-2.1.0/src/rdhttp.c | 511 ++ src/fluent-bit/lib/librdkafka-2.1.0/src/rdhttp.h | 83 + .../lib/librdkafka-2.1.0/src/rdinterval.h | 159 + src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka.c | 5026 +++++++++++ src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka.h | 9340 ++++++++++++++++++++ .../lib/librdkafka-2.1.0/src/rdkafka_admin.c | 6668 ++++++++++++++ .../lib/librdkafka-2.1.0/src/rdkafka_admin.h | 482 + .../lib/librdkafka-2.1.0/src/rdkafka_assignment.c | 968 ++ .../lib/librdkafka-2.1.0/src/rdkafka_assignment.h | 73 + .../lib/librdkafka-2.1.0/src/rdkafka_assignor.c | 1065 +++ .../lib/librdkafka-2.1.0/src/rdkafka_assignor.h | 212 + .../lib/librdkafka-2.1.0/src/rdkafka_aux.c | 278 + .../lib/librdkafka-2.1.0/src/rdkafka_aux.h | 120 + .../lib/librdkafka-2.1.0/src/rdkafka_background.c | 221 + .../lib/librdkafka-2.1.0/src/rdkafka_broker.c | 5867 ++++++++++++ .../lib/librdkafka-2.1.0/src/rdkafka_broker.h | 607 ++ .../lib/librdkafka-2.1.0/src/rdkafka_buf.c | 530 ++ .../lib/librdkafka-2.1.0/src/rdkafka_buf.h | 1407 +++ .../lib/librdkafka-2.1.0/src/rdkafka_cert.c | 552 ++ .../lib/librdkafka-2.1.0/src/rdkafka_cert.h | 61 + .../lib/librdkafka-2.1.0/src/rdkafka_cgrp.c | 5969 +++++++++++++ .../lib/librdkafka-2.1.0/src/rdkafka_cgrp.h | 383 + .../lib/librdkafka-2.1.0/src/rdkafka_conf.c | 4362 +++++++++ .../lib/librdkafka-2.1.0/src/rdkafka_conf.h | 650 ++ .../lib/librdkafka-2.1.0/src/rdkafka_confval.h | 97 + .../lib/librdkafka-2.1.0/src/rdkafka_coord.c | 623 ++ .../lib/librdkafka-2.1.0/src/rdkafka_coord.h | 132 + .../lib/librdkafka-2.1.0/src/rdkafka_error.c | 228 + .../lib/librdkafka-2.1.0/src/rdkafka_error.h | 80 + .../lib/librdkafka-2.1.0/src/rdkafka_event.c | 426 + .../lib/librdkafka-2.1.0/src/rdkafka_event.h | 118 + .../lib/librdkafka-2.1.0/src/rdkafka_feature.c | 460 + .../lib/librdkafka-2.1.0/src/rdkafka_feature.h | 102 + .../lib/librdkafka-2.1.0/src/rdkafka_fetcher.c | 1145 +++ .../lib/librdkafka-2.1.0/src/rdkafka_fetcher.h | 41 + .../lib/librdkafka-2.1.0/src/rdkafka_header.c | 220 + .../lib/librdkafka-2.1.0/src/rdkafka_header.h | 76 + .../lib/librdkafka-2.1.0/src/rdkafka_idempotence.c | 807 ++ .../lib/librdkafka-2.1.0/src/rdkafka_idempotence.h | 144 + .../lib/librdkafka-2.1.0/src/rdkafka_int.h | 1054 +++ .../lib/librdkafka-2.1.0/src/rdkafka_interceptor.c | 819 ++ .../lib/librdkafka-2.1.0/src/rdkafka_interceptor.h | 104 + .../lib/librdkafka-2.1.0/src/rdkafka_lz4.c | 450 + .../lib/librdkafka-2.1.0/src/rdkafka_lz4.h | 49 + .../lib/librdkafka-2.1.0/src/rdkafka_metadata.c | 1468 +++ .../lib/librdkafka-2.1.0/src/rdkafka_metadata.h | 212 + .../librdkafka-2.1.0/src/rdkafka_metadata_cache.c | 836 ++ .../lib/librdkafka-2.1.0/src/rdkafka_mock.c | 2585 ++++++ .../lib/librdkafka-2.1.0/src/rdkafka_mock.h | 373 + .../lib/librdkafka-2.1.0/src/rdkafka_mock_cgrp.c | 687 ++ .../librdkafka-2.1.0/src/rdkafka_mock_handlers.c | 2218 +++++ .../lib/librdkafka-2.1.0/src/rdkafka_mock_int.h | 538 ++ .../lib/librdkafka-2.1.0/src/rdkafka_msg.c | 2517 ++++++ .../lib/librdkafka-2.1.0/src/rdkafka_msg.h | 583 ++ .../lib/librdkafka-2.1.0/src/rdkafka_msgbatch.h | 62 + .../lib/librdkafka-2.1.0/src/rdkafka_msgset.h | 82 + .../librdkafka-2.1.0/src/rdkafka_msgset_reader.c | 1794 ++++ .../librdkafka-2.1.0/src/rdkafka_msgset_writer.c | 1445 +++ .../lib/librdkafka-2.1.0/src/rdkafka_offset.c | 1548 ++++ .../lib/librdkafka-2.1.0/src/rdkafka_offset.h | 135 + .../lib/librdkafka-2.1.0/src/rdkafka_op.c | 928 ++ .../lib/librdkafka-2.1.0/src/rdkafka_op.h | 778 ++ .../lib/librdkafka-2.1.0/src/rdkafka_partition.c | 4301 +++++++++ .../lib/librdkafka-2.1.0/src/rdkafka_partition.h | 1058 +++ .../lib/librdkafka-2.1.0/src/rdkafka_pattern.c | 228 + .../lib/librdkafka-2.1.0/src/rdkafka_pattern.h | 70 + .../lib/librdkafka-2.1.0/src/rdkafka_plugin.c | 213 + .../lib/librdkafka-2.1.0/src/rdkafka_plugin.h | 41 + .../lib/librdkafka-2.1.0/src/rdkafka_proto.h | 655 ++ .../lib/librdkafka-2.1.0/src/rdkafka_protocol.h | 120 + .../lib/librdkafka-2.1.0/src/rdkafka_queue.c | 1085 +++ .../lib/librdkafka-2.1.0/src/rdkafka_queue.h | 1171 +++ .../librdkafka-2.1.0/src/rdkafka_range_assignor.c | 138 + .../lib/librdkafka-2.1.0/src/rdkafka_request.c | 5378 +++++++++++ .../lib/librdkafka-2.1.0/src/rdkafka_request.h | 463 + .../src/rdkafka_roundrobin_assignor.c | 123 + .../lib/librdkafka-2.1.0/src/rdkafka_sasl.c | 522 ++ .../lib/librdkafka-2.1.0/src/rdkafka_sasl.h | 63 + .../lib/librdkafka-2.1.0/src/rdkafka_sasl_cyrus.c | 720 ++ .../lib/librdkafka-2.1.0/src/rdkafka_sasl_int.h | 89 + .../src/rdkafka_sasl_oauthbearer.c | 1825 ++++ .../src/rdkafka_sasl_oauthbearer.h | 52 + .../src/rdkafka_sasl_oauthbearer_oidc.c | 604 ++ .../src/rdkafka_sasl_oauthbearer_oidc.h | 37 + .../lib/librdkafka-2.1.0/src/rdkafka_sasl_plain.c | 142 + .../lib/librdkafka-2.1.0/src/rdkafka_sasl_scram.c | 973 ++ .../lib/librdkafka-2.1.0/src/rdkafka_sasl_win32.c | 548 ++ .../lib/librdkafka-2.1.0/src/rdkafka_ssl.c | 1841 ++++ .../lib/librdkafka-2.1.0/src/rdkafka_ssl.h | 57 + .../librdkafka-2.1.0/src/rdkafka_sticky_assignor.c | 3428 +++++++ .../librdkafka-2.1.0/src/rdkafka_subscription.c | 278 + .../lib/librdkafka-2.1.0/src/rdkafka_timer.c | 384 + .../lib/librdkafka-2.1.0/src/rdkafka_timer.h | 114 + .../lib/librdkafka-2.1.0/src/rdkafka_topic.c | 1900 ++++ .../lib/librdkafka-2.1.0/src/rdkafka_topic.h | 311 + .../lib/librdkafka-2.1.0/src/rdkafka_transport.c | 1295 +++ .../lib/librdkafka-2.1.0/src/rdkafka_transport.h | 94 + .../librdkafka-2.1.0/src/rdkafka_transport_int.h | 100 + .../lib/librdkafka-2.1.0/src/rdkafka_txnmgr.c | 3249 +++++++ .../lib/librdkafka-2.1.0/src/rdkafka_txnmgr.h | 171 + .../lib/librdkafka-2.1.0/src/rdkafka_zstd.c | 226 + .../lib/librdkafka-2.1.0/src/rdkafka_zstd.h | 57 + src/fluent-bit/lib/librdkafka-2.1.0/src/rdlist.c | 546 ++ src/fluent-bit/lib/librdkafka-2.1.0/src/rdlist.h | 421 + src/fluent-bit/lib/librdkafka-2.1.0/src/rdlog.c | 89 + src/fluent-bit/lib/librdkafka-2.1.0/src/rdlog.h | 41 + src/fluent-bit/lib/librdkafka-2.1.0/src/rdmap.c | 487 + src/fluent-bit/lib/librdkafka-2.1.0/src/rdmap.h | 487 + .../lib/librdkafka-2.1.0/src/rdmurmur2.c | 167 + .../lib/librdkafka-2.1.0/src/rdmurmur2.h | 35 + src/fluent-bit/lib/librdkafka-2.1.0/src/rdports.c | 61 + src/fluent-bit/lib/librdkafka-2.1.0/src/rdports.h | 38 + src/fluent-bit/lib/librdkafka-2.1.0/src/rdposix.h | 250 + src/fluent-bit/lib/librdkafka-2.1.0/src/rdrand.c | 70 + src/fluent-bit/lib/librdkafka-2.1.0/src/rdrand.h | 43 + src/fluent-bit/lib/librdkafka-2.1.0/src/rdregex.c | 156 + src/fluent-bit/lib/librdkafka-2.1.0/src/rdregex.h | 43 + src/fluent-bit/lib/librdkafka-2.1.0/src/rdsignal.h | 57 + src/fluent-bit/lib/librdkafka-2.1.0/src/rdstring.c | 629 ++ src/fluent-bit/lib/librdkafka-2.1.0/src/rdstring.h | 93 + .../lib/librdkafka-2.1.0/src/rdsysqueue.h | 404 + src/fluent-bit/lib/librdkafka-2.1.0/src/rdtime.h | 309 + src/fluent-bit/lib/librdkafka-2.1.0/src/rdtypes.h | 86 + .../lib/librdkafka-2.1.0/src/rdunittest.c | 529 ++ .../lib/librdkafka-2.1.0/src/rdunittest.h | 230 + src/fluent-bit/lib/librdkafka-2.1.0/src/rdvarint.c | 134 + src/fluent-bit/lib/librdkafka-2.1.0/src/rdvarint.h | 165 + src/fluent-bit/lib/librdkafka-2.1.0/src/rdwin32.h | 382 + src/fluent-bit/lib/librdkafka-2.1.0/src/rdxxhash.c | 1187 +++ src/fluent-bit/lib/librdkafka-2.1.0/src/rdxxhash.h | 372 + src/fluent-bit/lib/librdkafka-2.1.0/src/regexp.c | 1347 +++ src/fluent-bit/lib/librdkafka-2.1.0/src/regexp.h | 41 + src/fluent-bit/lib/librdkafka-2.1.0/src/snappy.c | 1866 ++++ src/fluent-bit/lib/librdkafka-2.1.0/src/snappy.h | 62 + .../lib/librdkafka-2.1.0/src/snappy_compat.h | 138 + .../librdkafka-2.1.0/src/statistics_schema.json | 444 + .../lib/librdkafka-2.1.0/src/tinycthread.c | 932 ++ .../lib/librdkafka-2.1.0/src/tinycthread.h | 503 ++ .../lib/librdkafka-2.1.0/src/tinycthread_extra.c | 175 + .../lib/librdkafka-2.1.0/src/tinycthread_extra.h | 208 + .../lib/librdkafka-2.1.0/src/win32_config.h | 58 + .../lib/librdkafka-2.1.0/tests/.gitignore | 15 + .../lib/librdkafka-2.1.0/tests/0000-unittests.c | 72 + .../lib/librdkafka-2.1.0/tests/0001-multiobj.c | 98 + .../lib/librdkafka-2.1.0/tests/0002-unkpart.c | 244 + .../lib/librdkafka-2.1.0/tests/0003-msgmaxsize.c | 173 + .../lib/librdkafka-2.1.0/tests/0004-conf.c | 865 ++ .../lib/librdkafka-2.1.0/tests/0005-order.c | 133 + .../lib/librdkafka-2.1.0/tests/0006-symbols.c | 163 + .../lib/librdkafka-2.1.0/tests/0007-autotopic.c | 136 + .../lib/librdkafka-2.1.0/tests/0008-reqacks.c | 179 + .../lib/librdkafka-2.1.0/tests/0009-mock_cluster.c | 99 + .../librdkafka-2.1.0/tests/0011-produce_batch.c | 576 ++ .../librdkafka-2.1.0/tests/0012-produce_consume.c | 537 ++ .../lib/librdkafka-2.1.0/tests/0013-null-msgs.c | 473 + .../librdkafka-2.1.0/tests/0014-reconsume-191.c | 512 ++ .../lib/librdkafka-2.1.0/tests/0015-offset_seeks.c | 172 + .../librdkafka-2.1.0/tests/0016-client_swname.c | 166 + .../lib/librdkafka-2.1.0/tests/0017-compression.c | 142 + .../lib/librdkafka-2.1.0/tests/0018-cgrp_term.c | 332 + .../lib/librdkafka-2.1.0/tests/0019-list_groups.c | 289 + .../lib/librdkafka-2.1.0/tests/0020-destroy_hang.c | 162 + .../lib/librdkafka-2.1.0/tests/0021-rkt_destroy.c | 71 + .../librdkafka-2.1.0/tests/0022-consume_batch.c | 212 + .../lib/librdkafka-2.1.0/tests/0025-timers.c | 147 + .../librdkafka-2.1.0/tests/0026-consume_pause.c | 541 ++ .../librdkafka-2.1.0/tests/0028-long_topicnames.c | 79 + .../librdkafka-2.1.0/tests/0029-assign_offset.c | 198 + .../librdkafka-2.1.0/tests/0030-offset_commit.c | 589 ++ .../lib/librdkafka-2.1.0/tests/0031-get_offsets.c | 119 + .../librdkafka-2.1.0/tests/0033-regex_subscribe.c | 509 ++ .../lib/librdkafka-2.1.0/tests/0034-offset_reset.c | 377 + .../lib/librdkafka-2.1.0/tests/0035-api_version.c | 73 + .../librdkafka-2.1.0/tests/0036-partial_fetch.c | 86 + .../tests/0037-destroy_hang_local.c | 85 + .../lib/librdkafka-2.1.0/tests/0038-performance.c | 120 + .../lib/librdkafka-2.1.0/tests/0039-event.c | 284 + .../lib/librdkafka-2.1.0/tests/0040-io_event.c | 251 + .../librdkafka-2.1.0/tests/0041-fetch_max_bytes.c | 96 + .../lib/librdkafka-2.1.0/tests/0042-many_topics.c | 252 + .../librdkafka-2.1.0/tests/0043-no_connection.c | 77 + .../librdkafka-2.1.0/tests/0044-partition_cnt.c | 93 + .../librdkafka-2.1.0/tests/0045-subscribe_update.c | 459 + .../lib/librdkafka-2.1.0/tests/0046-rkt_cache.c | 65 + .../tests/0047-partial_buf_tmout.c | 97 + .../lib/librdkafka-2.1.0/tests/0048-partitioner.c | 283 + .../tests/0049-consume_conn_close.c | 162 + .../librdkafka-2.1.0/tests/0050-subscribe_adds.c | 124 + .../lib/librdkafka-2.1.0/tests/0051-assign_adds.c | 125 + .../librdkafka-2.1.0/tests/0052-msg_timestamps.c | 220 + .../lib/librdkafka-2.1.0/tests/0053-stats_cb.cpp | 535 ++ .../librdkafka-2.1.0/tests/0054-offset_time.cpp | 236 + .../librdkafka-2.1.0/tests/0055-producer_latency.c | 366 + .../tests/0056-balanced_group_mt.c | 311 + .../librdkafka-2.1.0/tests/0057-invalid_topic.cpp | 112 + .../lib/librdkafka-2.1.0/tests/0058-log.cpp | 123 + .../lib/librdkafka-2.1.0/tests/0059-bsearch.cpp | 237 + .../lib/librdkafka-2.1.0/tests/0060-op_prio.cpp | 163 + .../librdkafka-2.1.0/tests/0061-consumer_lag.cpp | 275 + .../lib/librdkafka-2.1.0/tests/0062-stats_event.c | 126 + .../lib/librdkafka-2.1.0/tests/0063-clusterid.cpp | 180 + .../lib/librdkafka-2.1.0/tests/0064-interceptors.c | 481 + .../lib/librdkafka-2.1.0/tests/0065-yield.cpp | 140 + .../lib/librdkafka-2.1.0/tests/0066-plugins.cpp | 129 + .../librdkafka-2.1.0/tests/0067-empty_topic.cpp | 148 + .../librdkafka-2.1.0/tests/0068-produce_timeout.c | 138 + .../tests/0069-consumer_add_parts.c | 123 + .../lib/librdkafka-2.1.0/tests/0070-null_empty.cpp | 197 + .../lib/librdkafka-2.1.0/tests/0072-headers_ut.c | 448 + .../lib/librdkafka-2.1.0/tests/0073-headers.c | 381 + .../lib/librdkafka-2.1.0/tests/0074-producev.c | 87 + .../lib/librdkafka-2.1.0/tests/0075-retry.c | 252 + .../librdkafka-2.1.0/tests/0076-produce_retry.c | 350 + .../lib/librdkafka-2.1.0/tests/0077-compaction.c | 357 + .../lib/librdkafka-2.1.0/tests/0078-c_from_cpp.cpp | 96 + .../lib/librdkafka-2.1.0/tests/0079-fork.c | 93 + .../lib/librdkafka-2.1.0/tests/0080-admin_ut.c | 2535 ++++++ .../lib/librdkafka-2.1.0/tests/0081-admin.c | 3797 ++++++++ .../tests/0082-fetch_max_bytes.cpp | 133 + .../lib/librdkafka-2.1.0/tests/0083-cb_event.c | 228 + .../librdkafka-2.1.0/tests/0084-destroy_flags.c | 211 + .../lib/librdkafka-2.1.0/tests/0085-headers.cpp | 388 + .../lib/librdkafka-2.1.0/tests/0086-purge.c | 334 + .../tests/0088-produce_metadata_timeout.c | 162 + .../tests/0089-max_poll_interval.c | 358 + .../lib/librdkafka-2.1.0/tests/0090-idempotence.c | 172 + .../tests/0091-max_poll_interval_timeout.c | 297 + .../lib/librdkafka-2.1.0/tests/0092-mixed_msgver.c | 97 + .../lib/librdkafka-2.1.0/tests/0093-holb.c | 197 + .../tests/0094-idempotence_msg_timeout.c | 230 + .../tests/0095-all_brokers_down.cpp | 122 + .../lib/librdkafka-2.1.0/tests/0097-ssl_verify.cpp | 466 + .../librdkafka-2.1.0/tests/0098-consumer-txn.cpp | 1218 +++ .../librdkafka-2.1.0/tests/0099-commit_metadata.c | 189 + .../tests/0100-thread_interceptors.cpp | 195 + .../tests/0101-fetch-from-follower.cpp | 446 + .../tests/0102-static_group_rebalance.c | 535 ++ .../lib/librdkafka-2.1.0/tests/0103-transactions.c | 1297 +++ .../tests/0104-fetch_from_follower_mock.c | 617 ++ .../tests/0105-transactions_mock.c | 3926 ++++++++ .../tests/0106-cgrp_sess_timeout.c | 300 + .../librdkafka-2.1.0/tests/0107-topic_recreate.c | 259 + .../tests/0109-auto_create_topics.cpp | 218 + .../lib/librdkafka-2.1.0/tests/0110-batch_size.cpp | 183 + .../tests/0111-delay_create_topics.cpp | 127 + .../tests/0112-assign_unknown_part.c | 98 + .../tests/0113-cooperative_rebalance.cpp | 3170 +++++++ .../tests/0114-sticky_partitioning.cpp | 176 + .../librdkafka-2.1.0/tests/0115-producer_auth.cpp | 179 + .../tests/0116-kafkaconsumer_close.cpp | 214 + .../lib/librdkafka-2.1.0/tests/0117-mock_errors.c | 324 + .../librdkafka-2.1.0/tests/0118-commit_rebalance.c | 121 + .../librdkafka-2.1.0/tests/0119-consumer_auth.cpp | 148 + .../tests/0120-asymmetric_subscription.c | 183 + .../lib/librdkafka-2.1.0/tests/0121-clusterid.c | 118 + .../tests/0122-buffer_cleaning_after_rebalance.c | 226 + .../tests/0123-connections_max_idle.c | 98 + .../tests/0124-openssl_invalid_engine.c | 69 + .../librdkafka-2.1.0/tests/0125-immediate_flush.c | 78 + .../librdkafka-2.1.0/tests/0126-oauthbearer_oidc.c | 213 + .../tests/0128-sasl_callback_queue.cpp | 125 + .../tests/0129-fetch_aborted_msgs.c | 78 + .../librdkafka-2.1.0/tests/0130-store_offsets.c | 127 + .../librdkafka-2.1.0/tests/0131-connect_timeout.c | 81 + .../tests/0132-strategy_ordering.c | 171 + .../lib/librdkafka-2.1.0/tests/0133-ssl_keys.c | 113 + .../lib/librdkafka-2.1.0/tests/0134-ssl_provider.c | 92 + .../tests/0135-sasl_credentials.cpp | 143 + .../lib/librdkafka-2.1.0/tests/0136-resolve_cb.c | 181 + .../tests/0137-barrier_batch_consume.c | 608 ++ .../lib/librdkafka-2.1.0/tests/0138-admin_mock.c | 189 + .../lib/librdkafka-2.1.0/tests/1000-unktopic.c | 164 + .../lib/librdkafka-2.1.0/tests/8000-idle.cpp | 60 + .../lib/librdkafka-2.1.0/tests/CMakeLists.txt | 154 + .../librdkafka-2.1.0/tests/LibrdkafkaTestApp.py | 256 + src/fluent-bit/lib/librdkafka-2.1.0/tests/Makefile | 182 + .../lib/librdkafka-2.1.0/tests/README.md | 505 ++ .../lib/librdkafka-2.1.0/tests/autotest.sh | 33 + .../lib/librdkafka-2.1.0/tests/backtrace.gdb | 30 + .../librdkafka-2.1.0/tests/broker_version_tests.py | 297 + .../lib/librdkafka-2.1.0/tests/buildbox.sh | 17 + .../tests/cleanup-checker-tests.sh | 20 + .../lib/librdkafka-2.1.0/tests/cluster_testing.py | 183 + .../librdkafka-2.1.0/tests/delete-test-topics.sh | 56 + .../librdkafka-2.1.0/tests/fixtures/ssl/.gitignore | 11 + .../librdkafka-2.1.0/tests/fixtures/ssl/Makefile | 8 + .../librdkafka-2.1.0/tests/fixtures/ssl/README.md | 13 + .../tests/fixtures/ssl/client.keystore.p12 | Bin 0 -> 4345 bytes .../tests/fixtures/ssl/client2.certificate.pem | 109 + .../tests/fixtures/ssl/client2.key | 34 + .../tests/fixtures/ssl/create_keys.sh | 93 + .../lib/librdkafka-2.1.0/tests/fuzzers/.gitignore | 1 + .../lib/librdkafka-2.1.0/tests/fuzzers/Makefile | 12 + .../lib/librdkafka-2.1.0/tests/fuzzers/README.md | 31 + .../librdkafka-2.1.0/tests/fuzzers/fuzz_regex.c | 74 + .../lib/librdkafka-2.1.0/tests/fuzzers/helpers.h | 90 + .../lib/librdkafka-2.1.0/tests/gen-ssl-certs.sh | 165 + .../tests/interactive_broker_version.py | 363 + .../tests/interceptor_test/.gitignore | 1 + .../tests/interceptor_test/CMakeLists.txt | 16 + .../tests/interceptor_test/Makefile | 22 + .../tests/interceptor_test/interceptor_test.c | 314 + .../tests/interceptor_test/interceptor_test.h | 54 + .../lib/librdkafka-2.1.0/tests/java/.gitignore | 1 + .../tests/java/IncrementalRebalanceCli.java | 97 + .../lib/librdkafka-2.1.0/tests/java/Makefile | 12 + .../librdkafka-2.1.0/tests/java/Murmur2Cli.java | 46 + .../lib/librdkafka-2.1.0/tests/java/README.md | 14 + .../tests/java/TransactionProducerCli.java | 162 + .../lib/librdkafka-2.1.0/tests/java/run-class.sh | 11 + .../librdkafka-2.1.0/tests/librdkafka.suppressions | 483 + .../lib/librdkafka-2.1.0/tests/lz4_manual_test.sh | 59 + .../tests/multi-broker-version-test.sh | 50 + .../lib/librdkafka-2.1.0/tests/parse-refcnt.sh | 43 + .../lib/librdkafka-2.1.0/tests/performance_plot.py | 115 + .../librdkafka-2.1.0/tests/plugin_test/Makefile | 19 + .../tests/plugin_test/plugin_test.c | 58 + .../lib/librdkafka-2.1.0/tests/requirements.txt | 2 + .../librdkafka-2.1.0/tests/run-consumer-tests.sh | 16 + .../librdkafka-2.1.0/tests/run-producer-tests.sh | 16 + .../lib/librdkafka-2.1.0/tests/run-test.sh | 140 + src/fluent-bit/lib/librdkafka-2.1.0/tests/rusage.c | 249 + .../lib/librdkafka-2.1.0/tests/sasl_test.py | 328 + .../lib/librdkafka-2.1.0/tests/scenarios/README.md | 6 + .../lib/librdkafka-2.1.0/tests/scenarios/ak23.json | 6 + .../librdkafka-2.1.0/tests/scenarios/default.json | 5 + .../tests/scenarios/noautocreate.json | 5 + src/fluent-bit/lib/librdkafka-2.1.0/tests/sockem.c | 801 ++ src/fluent-bit/lib/librdkafka-2.1.0/tests/sockem.h | 85 + .../lib/librdkafka-2.1.0/tests/sockem_ctrl.c | 145 + .../lib/librdkafka-2.1.0/tests/sockem_ctrl.h | 61 + src/fluent-bit/lib/librdkafka-2.1.0/tests/test.c | 6960 +++++++++++++++ .../lib/librdkafka-2.1.0/tests/test.conf.example | 27 + src/fluent-bit/lib/librdkafka-2.1.0/tests/test.h | 936 ++ .../lib/librdkafka-2.1.0/tests/testcpp.cpp | 126 + .../lib/librdkafka-2.1.0/tests/testcpp.h | 360 + .../lib/librdkafka-2.1.0/tests/testshared.h | 402 + .../lib/librdkafka-2.1.0/tests/tools/README.md | 4 + .../librdkafka-2.1.0/tests/tools/stats/README.md | 21 + .../librdkafka-2.1.0/tests/tools/stats/filter.jq | 42 + .../librdkafka-2.1.0/tests/tools/stats/graph.py | 150 + .../tests/tools/stats/requirements.txt | 3 + .../librdkafka-2.1.0/tests/tools/stats/to_csv.py | 124 + .../lib/librdkafka-2.1.0/tests/until-fail.sh | 87 + .../librdkafka-2.1.0/tests/xxxx-assign_partition.c | 122 + .../lib/librdkafka-2.1.0/tests/xxxx-metadata.cpp | 159 + src/fluent-bit/lib/librdkafka-2.1.0/vcpkg.json | 23 + .../lib/librdkafka-2.1.0/win32/.gitignore | 109 + .../lib/librdkafka-2.1.0/win32/README.md | 5 + .../lib/librdkafka-2.1.0/win32/build-package.bat | 3 + .../lib/librdkafka-2.1.0/win32/build.bat | 19 + .../lib/librdkafka-2.1.0/win32/common.vcxproj | 84 + .../lib/librdkafka-2.1.0/win32/install-openssl.ps1 | 33 + .../interceptor_test/interceptor_test.vcxproj | 87 + .../win32/librdkafka.autopkg.template | 55 + .../win32/librdkafka.master.testing.targets | 13 + .../lib/librdkafka-2.1.0/win32/librdkafka.sln | 226 + .../lib/librdkafka-2.1.0/win32/librdkafka.vcxproj | 258 + .../win32/librdkafkacpp/librdkafkacpp.vcxproj | 104 + .../lib/librdkafka-2.1.0/win32/msbuild.ps1 | 15 + .../openssl_engine_example.vcxproj | 132 + .../lib/librdkafka-2.1.0/win32/package-zip.ps1 | 46 + .../win32/packages/repositories.config | 4 + .../lib/librdkafka-2.1.0/win32/push-package.bat | 4 + .../rdkafka_complex_consumer_example_cpp.vcxproj | 67 + .../win32/rdkafka_example/rdkafka_example.vcxproj | 97 + .../rdkafka_performance.vcxproj | 97 + .../lib/librdkafka-2.1.0/win32/setup-msys2.ps1 | 31 + .../lib/librdkafka-2.1.0/win32/setup-vcpkg.ps1 | 13 + .../lib/librdkafka-2.1.0/win32/tests/.gitignore | 3 + .../librdkafka-2.1.0/win32/tests/test.conf.example | 25 + .../lib/librdkafka-2.1.0/win32/tests/tests.vcxproj | 237 + .../win_ssl_cert_store/win_ssl_cert_store.vcxproj | 132 + .../lib/librdkafka-2.1.0/win32/wingetopt.c | 564 ++ .../lib/librdkafka-2.1.0/win32/wingetopt.h | 100 + .../lib/librdkafka-2.1.0/win32/wintime.h | 33 + 635 files changed, 237653 insertions(+) create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/.clang-format-cpp create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/.dir-locals.el create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/.formatignore create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/.gdbmacros create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/.github/ISSUE_TEMPLATE create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/.gitignore create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/.semaphore/project.yml create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/.semaphore/semaphore.yml create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/CHANGELOG.md create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/CMakeLists.txt create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/CODE_OF_CONDUCT.md create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/CONFIGURATION.md create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/CONTRIBUTING.md create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/Doxyfile create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/INTRODUCTION.md create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/LICENSE create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/LICENSE.cjson create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/LICENSE.crc32c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/LICENSE.fnv1a create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/LICENSE.hdrhistogram create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/LICENSE.lz4 create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/LICENSE.murmur2 create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/LICENSE.pycrc create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/LICENSE.queue create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/LICENSE.regexp create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/LICENSE.snappy create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/LICENSE.tinycthread create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/LICENSE.wingetopt create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/LICENSES.txt create mode 100755 src/fluent-bit/lib/librdkafka-2.1.0/Makefile create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/README.md create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/README.win32 create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/STATISTICS.md create mode 100755 src/fluent-bit/lib/librdkafka-2.1.0/configure create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/configure.self create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/debian/.gitignore create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/debian/changelog create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/debian/compat create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/debian/control create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/debian/copyright create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/debian/gbp.conf create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/debian/librdkafka++1.install create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/debian/librdkafka-dev.examples create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/debian/librdkafka-dev.install create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/debian/librdkafka1.docs create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/debian/librdkafka1.install create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/debian/librdkafka1.symbols create mode 100755 src/fluent-bit/lib/librdkafka-2.1.0/debian/rules create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/debian/source/format create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/debian/watch create mode 100755 src/fluent-bit/lib/librdkafka-2.1.0/dev-conf.sh create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/examples/.gitignore create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/examples/CMakeLists.txt create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/examples/Makefile create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/examples/README.md create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/examples/alter_consumer_group_offsets.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/examples/consumer.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/examples/delete_records.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/examples/describe_consumer_groups.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/examples/globals.json create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/examples/idempotent_producer.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/examples/kafkatest_verifiable_client.cpp create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/examples/list_consumer_group_offsets.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/examples/list_consumer_groups.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/examples/misc.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/examples/openssl_engine_example.cpp create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/examples/producer.c create mode 100755 src/fluent-bit/lib/librdkafka-2.1.0/examples/producer.cpp create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/examples/rdkafka_complex_consumer_example.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/examples/rdkafka_complex_consumer_example.cpp create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/examples/rdkafka_consume_batch.cpp create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/examples/rdkafka_example.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/examples/rdkafka_example.cpp create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/examples/rdkafka_performance.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/examples/transactions-older-broker.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/examples/transactions.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/examples/win_ssl_cert_store.cpp create mode 100755 src/fluent-bit/lib/librdkafka-2.1.0/lds-gen.py create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/mainpage.doxy create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/mklove/.gitignore create mode 100755 src/fluent-bit/lib/librdkafka-2.1.0/mklove/Makefile.base create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.atomics create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.base create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.builtin create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.cc create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.cxx create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.fileversion create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.gitversion create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.good_cflags create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.host create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.lib create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.libcurl create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.libsasl2 create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.libssl create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.libzstd create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.parseversion create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.pic create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.socket create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.zlib create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/patches/README.md create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/patches/libcurl.0000-no-runtime-linking-check.patch create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/patches/libssl.0000-osx-rand-include-fix-OpenSSL-PR16409.patch create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/RELEASE.md create mode 100755 src/fluent-bit/lib/librdkafka-2.1.0/packaging/alpine/build-alpine.sh create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/archlinux/PKGBUILD create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/Config.cmake.in create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/Modules/FindLZ4.cmake create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/Modules/FindZSTD.cmake create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/Modules/LICENSE.FindZstd create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/README.md create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/config.h.in create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/parseversion.cmake create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/rdkafka.pc.in create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/atomic_32_test.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/atomic_64_test.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/c11threads_test.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/crc32c_hw_test.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/dlopen_test.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/libsasl2_test.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/pthread_setname_darwin_test.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/pthread_setname_freebsd_test.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/pthread_setname_gnu_test.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/rand_r_test.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/rdkafka_setup.cmake create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/regex_test.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/strndup_test.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/sync_32_test.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/sync_64_test.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/cp/README.md create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/cp/check_features.c create mode 100755 src/fluent-bit/lib/librdkafka-2.1.0/packaging/cp/verify-deb.sh create mode 100755 src/fluent-bit/lib/librdkafka-2.1.0/packaging/cp/verify-packages.sh create mode 100755 src/fluent-bit/lib/librdkafka-2.1.0/packaging/cp/verify-rpm.sh create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/.gitignore create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/changelog create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/compat create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/control create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/copyright create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/docs create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/gbp.conf create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/librdkafka-dev.dirs create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/librdkafka-dev.examples create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/librdkafka-dev.install create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/librdkafka-dev.substvars create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/librdkafka.dsc create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/librdkafka1-dbg.substvars create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/librdkafka1.dirs create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/librdkafka1.install create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/librdkafka1.postinst.debhelper create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/librdkafka1.postrm.debhelper create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/librdkafka1.symbols create mode 100755 src/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/rules create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/source/format create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/watch create mode 100755 src/fluent-bit/lib/librdkafka-2.1.0/packaging/get_version.py create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/homebrew/README.md create mode 100755 src/fluent-bit/lib/librdkafka-2.1.0/packaging/homebrew/brew-update-pr.sh create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/mingw-w64/configure-build-msys2-mingw-static.sh create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/mingw-w64/configure-build-msys2-mingw.sh create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/mingw-w64/run-tests.sh create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/mingw-w64/semaphoreci-build.sh create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/mingw-w64/travis-before-install.sh create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/.gitignore create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/README.md create mode 100755 src/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/artifact.py create mode 100755 src/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/cleanup-s3.py create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/common/p-common__plat-windows__arch-win32__bldtype-Release/msvcr120.zip create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/common/p-common__plat-windows__arch-win32__bldtype-Release/msvcr140.zip create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/common/p-common__plat-windows__arch-x64__bldtype-Release/msvcr120.zip create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/common/p-common__plat-windows__arch-x64__bldtype-Release/msvcr140.zip create mode 100755 src/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/nuget.sh create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/nugetpackage.py create mode 100755 src/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/packaging.py create mode 100755 src/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/push-to-nuget.sh create mode 100755 src/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/release.py create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/requirements.txt create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/staticpackage.py create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/templates/librdkafka.redist.nuspec create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/templates/librdkafka.redist.props create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/templates/librdkafka.redist.targets create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/zfile/__init__.py create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/zfile/zfile.py create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/rpm/.gitignore create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/rpm/Makefile create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/rpm/README.md create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/rpm/el7-x86_64.cfg create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/rpm/librdkafka.spec create mode 100755 src/fluent-bit/lib/librdkafka-2.1.0/packaging/rpm/mock-on-docker.sh create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/rpm/tests/.gitignore create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/rpm/tests/Makefile create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/rpm/tests/README.md create mode 100755 src/fluent-bit/lib/librdkafka-2.1.0/packaging/rpm/tests/run-test.sh create mode 100755 src/fluent-bit/lib/librdkafka-2.1.0/packaging/rpm/tests/test-on-docker.sh create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/rpm/tests/test.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/rpm/tests/test.cpp create mode 100755 src/fluent-bit/lib/librdkafka-2.1.0/packaging/tools/build-deb-package.sh create mode 100755 src/fluent-bit/lib/librdkafka-2.1.0/packaging/tools/build-debian.sh create mode 100755 src/fluent-bit/lib/librdkafka-2.1.0/packaging/tools/build-manylinux.sh create mode 100755 src/fluent-bit/lib/librdkafka-2.1.0/packaging/tools/build-release-artifacts.sh create mode 100755 src/fluent-bit/lib/librdkafka-2.1.0/packaging/tools/distro-build.sh create mode 100755 src/fluent-bit/lib/librdkafka-2.1.0/packaging/tools/gh-release-checksums.py create mode 100755 src/fluent-bit/lib/librdkafka-2.1.0/packaging/tools/rdutcoverage.sh create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/packaging/tools/requirements.txt create mode 100755 src/fluent-bit/lib/librdkafka-2.1.0/packaging/tools/style-format.sh create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/service.yml create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src-cpp/CMakeLists.txt create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src-cpp/ConfImpl.cpp create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src-cpp/ConsumerImpl.cpp create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src-cpp/HandleImpl.cpp create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src-cpp/HeadersImpl.cpp create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src-cpp/KafkaConsumerImpl.cpp create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src-cpp/Makefile create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src-cpp/MessageImpl.cpp create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src-cpp/MetadataImpl.cpp create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src-cpp/ProducerImpl.cpp create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src-cpp/QueueImpl.cpp create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src-cpp/README.md create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src-cpp/RdKafka.cpp create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src-cpp/TopicImpl.cpp create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src-cpp/TopicPartitionImpl.cpp create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src-cpp/rdkafkacpp.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src-cpp/rdkafkacpp_int.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/CMakeLists.txt create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/Makefile create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/cJSON.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/cJSON.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/crc32c.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/crc32c.h create mode 100755 src/fluent-bit/lib/librdkafka-2.1.0/src/generate_proto.sh create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/librdkafka_cgrp_synch.png create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/lz4.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/lz4.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/lz4frame.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/lz4frame.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/lz4frame_static.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/lz4hc.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/lz4hc.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/queue.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rd.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdaddr.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdaddr.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdatomic.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdavg.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdavl.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdavl.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdbuf.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdbuf.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdcrc32.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdcrc32.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rddl.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rddl.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdendian.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdfloat.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdfnv1a.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdfnv1a.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdgz.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdgz.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdhdrhistogram.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdhdrhistogram.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdhttp.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdhttp.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdinterval.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_admin.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_admin.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_assignment.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_assignment.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_assignor.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_assignor.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_aux.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_aux.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_background.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_broker.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_broker.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_buf.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_buf.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_cert.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_cert.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_cgrp.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_cgrp.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_conf.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_conf.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_confval.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_coord.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_coord.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_error.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_error.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_event.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_event.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_feature.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_feature.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_fetcher.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_fetcher.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_header.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_header.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_idempotence.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_idempotence.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_int.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_interceptor.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_interceptor.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_lz4.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_lz4.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_metadata.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_metadata.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_metadata_cache.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_mock.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_mock.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_mock_cgrp.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_mock_handlers.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_mock_int.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_msg.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_msg.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_msgbatch.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_msgset.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_msgset_reader.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_msgset_writer.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_offset.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_offset.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_op.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_op.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_partition.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_partition.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_pattern.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_pattern.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_plugin.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_plugin.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_proto.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_protocol.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_queue.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_queue.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_range_assignor.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_request.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_request.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_roundrobin_assignor.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_cyrus.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_int.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_oauthbearer.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_oauthbearer.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_oauthbearer_oidc.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_oauthbearer_oidc.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_plain.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_scram.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_win32.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_ssl.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_ssl.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sticky_assignor.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_subscription.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_timer.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_timer.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_topic.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_topic.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_transport.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_transport.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_transport_int.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_txnmgr.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_txnmgr.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_zstd.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_zstd.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdlist.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdlist.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdlog.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdlog.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdmap.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdmap.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdmurmur2.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdmurmur2.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdports.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdports.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdposix.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdrand.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdrand.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdregex.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdregex.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdsignal.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdstring.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdstring.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdsysqueue.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdtime.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdtypes.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdunittest.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdunittest.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdvarint.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdvarint.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdwin32.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdxxhash.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/rdxxhash.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/regexp.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/regexp.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/snappy.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/snappy.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/snappy_compat.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/statistics_schema.json create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/tinycthread.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/tinycthread.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/tinycthread_extra.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/tinycthread_extra.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/src/win32_config.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/.gitignore create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0000-unittests.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0001-multiobj.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0002-unkpart.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0003-msgmaxsize.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0004-conf.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0005-order.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0006-symbols.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0007-autotopic.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0008-reqacks.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0009-mock_cluster.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0011-produce_batch.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0012-produce_consume.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0013-null-msgs.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0014-reconsume-191.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0015-offset_seeks.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0016-client_swname.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0017-compression.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0018-cgrp_term.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0019-list_groups.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0020-destroy_hang.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0021-rkt_destroy.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0022-consume_batch.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0025-timers.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0026-consume_pause.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0028-long_topicnames.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0029-assign_offset.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0030-offset_commit.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0031-get_offsets.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0033-regex_subscribe.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0034-offset_reset.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0035-api_version.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0036-partial_fetch.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0037-destroy_hang_local.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0038-performance.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0039-event.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0040-io_event.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0041-fetch_max_bytes.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0042-many_topics.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0043-no_connection.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0044-partition_cnt.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0045-subscribe_update.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0046-rkt_cache.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0047-partial_buf_tmout.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0048-partitioner.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0049-consume_conn_close.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0050-subscribe_adds.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0051-assign_adds.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0052-msg_timestamps.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0053-stats_cb.cpp create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0054-offset_time.cpp create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0055-producer_latency.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0056-balanced_group_mt.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0057-invalid_topic.cpp create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0058-log.cpp create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0059-bsearch.cpp create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0060-op_prio.cpp create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0061-consumer_lag.cpp create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0062-stats_event.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0063-clusterid.cpp create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0064-interceptors.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0065-yield.cpp create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0066-plugins.cpp create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0067-empty_topic.cpp create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0068-produce_timeout.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0069-consumer_add_parts.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0070-null_empty.cpp create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0072-headers_ut.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0073-headers.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0074-producev.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0075-retry.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0076-produce_retry.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0077-compaction.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0078-c_from_cpp.cpp create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0079-fork.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0080-admin_ut.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0081-admin.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0082-fetch_max_bytes.cpp create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0083-cb_event.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0084-destroy_flags.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0085-headers.cpp create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0086-purge.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0088-produce_metadata_timeout.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0089-max_poll_interval.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0090-idempotence.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0091-max_poll_interval_timeout.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0092-mixed_msgver.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0093-holb.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0094-idempotence_msg_timeout.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0095-all_brokers_down.cpp create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0097-ssl_verify.cpp create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0098-consumer-txn.cpp create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0099-commit_metadata.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0100-thread_interceptors.cpp create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0101-fetch-from-follower.cpp create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0102-static_group_rebalance.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0103-transactions.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0104-fetch_from_follower_mock.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0105-transactions_mock.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0106-cgrp_sess_timeout.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0107-topic_recreate.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0109-auto_create_topics.cpp create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0110-batch_size.cpp create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0111-delay_create_topics.cpp create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0112-assign_unknown_part.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0113-cooperative_rebalance.cpp create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0114-sticky_partitioning.cpp create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0115-producer_auth.cpp create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0116-kafkaconsumer_close.cpp create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0117-mock_errors.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0118-commit_rebalance.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0119-consumer_auth.cpp create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0120-asymmetric_subscription.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0121-clusterid.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0122-buffer_cleaning_after_rebalance.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0123-connections_max_idle.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0124-openssl_invalid_engine.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0125-immediate_flush.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0126-oauthbearer_oidc.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0128-sasl_callback_queue.cpp create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0129-fetch_aborted_msgs.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0130-store_offsets.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0131-connect_timeout.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0132-strategy_ordering.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0133-ssl_keys.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0134-ssl_provider.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0135-sasl_credentials.cpp create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0136-resolve_cb.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0137-barrier_batch_consume.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/0138-admin_mock.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/1000-unktopic.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/8000-idle.cpp create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/CMakeLists.txt create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/LibrdkafkaTestApp.py create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/Makefile create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/README.md create mode 100755 src/fluent-bit/lib/librdkafka-2.1.0/tests/autotest.sh create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/backtrace.gdb create mode 100755 src/fluent-bit/lib/librdkafka-2.1.0/tests/broker_version_tests.py create mode 100755 src/fluent-bit/lib/librdkafka-2.1.0/tests/buildbox.sh create mode 100755 src/fluent-bit/lib/librdkafka-2.1.0/tests/cleanup-checker-tests.sh create mode 100755 src/fluent-bit/lib/librdkafka-2.1.0/tests/cluster_testing.py create mode 100755 src/fluent-bit/lib/librdkafka-2.1.0/tests/delete-test-topics.sh create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/fixtures/ssl/.gitignore create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/fixtures/ssl/Makefile create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/fixtures/ssl/README.md create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/fixtures/ssl/client.keystore.p12 create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/fixtures/ssl/client2.certificate.pem create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/fixtures/ssl/client2.key create mode 100755 src/fluent-bit/lib/librdkafka-2.1.0/tests/fixtures/ssl/create_keys.sh create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/fuzzers/.gitignore create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/fuzzers/Makefile create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/fuzzers/README.md create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/fuzzers/fuzz_regex.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/fuzzers/helpers.h create mode 100755 src/fluent-bit/lib/librdkafka-2.1.0/tests/gen-ssl-certs.sh create mode 100755 src/fluent-bit/lib/librdkafka-2.1.0/tests/interactive_broker_version.py create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/interceptor_test/.gitignore create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/interceptor_test/CMakeLists.txt create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/interceptor_test/Makefile create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/interceptor_test/interceptor_test.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/interceptor_test/interceptor_test.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/java/.gitignore create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/java/IncrementalRebalanceCli.java create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/java/Makefile create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/java/Murmur2Cli.java create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/java/README.md create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/java/TransactionProducerCli.java create mode 100755 src/fluent-bit/lib/librdkafka-2.1.0/tests/java/run-class.sh create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/librdkafka.suppressions create mode 100755 src/fluent-bit/lib/librdkafka-2.1.0/tests/lz4_manual_test.sh create mode 100755 src/fluent-bit/lib/librdkafka-2.1.0/tests/multi-broker-version-test.sh create mode 100755 src/fluent-bit/lib/librdkafka-2.1.0/tests/parse-refcnt.sh create mode 100755 src/fluent-bit/lib/librdkafka-2.1.0/tests/performance_plot.py create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/plugin_test/Makefile create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/plugin_test/plugin_test.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/requirements.txt create mode 100755 src/fluent-bit/lib/librdkafka-2.1.0/tests/run-consumer-tests.sh create mode 100755 src/fluent-bit/lib/librdkafka-2.1.0/tests/run-producer-tests.sh create mode 100755 src/fluent-bit/lib/librdkafka-2.1.0/tests/run-test.sh create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/rusage.c create mode 100755 src/fluent-bit/lib/librdkafka-2.1.0/tests/sasl_test.py create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/scenarios/README.md create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/scenarios/ak23.json create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/scenarios/default.json create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/scenarios/noautocreate.json create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/sockem.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/sockem.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/sockem_ctrl.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/sockem_ctrl.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/test.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/test.conf.example create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/test.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/testcpp.cpp create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/testcpp.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/testshared.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/tools/README.md create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/tools/stats/README.md create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/tools/stats/filter.jq create mode 100755 src/fluent-bit/lib/librdkafka-2.1.0/tests/tools/stats/graph.py create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/tools/stats/requirements.txt create mode 100755 src/fluent-bit/lib/librdkafka-2.1.0/tests/tools/stats/to_csv.py create mode 100755 src/fluent-bit/lib/librdkafka-2.1.0/tests/until-fail.sh create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/xxxx-assign_partition.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/tests/xxxx-metadata.cpp create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/vcpkg.json create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/win32/.gitignore create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/win32/README.md create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/win32/build-package.bat create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/win32/build.bat create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/win32/common.vcxproj create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/win32/install-openssl.ps1 create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/win32/interceptor_test/interceptor_test.vcxproj create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/win32/librdkafka.autopkg.template create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/win32/librdkafka.master.testing.targets create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/win32/librdkafka.sln create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/win32/librdkafka.vcxproj create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/win32/librdkafkacpp/librdkafkacpp.vcxproj create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/win32/msbuild.ps1 create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/win32/openssl_engine_example/openssl_engine_example.vcxproj create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/win32/package-zip.ps1 create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/win32/packages/repositories.config create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/win32/push-package.bat create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/win32/rdkafka_complex_consumer_example_cpp/rdkafka_complex_consumer_example_cpp.vcxproj create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/win32/rdkafka_example/rdkafka_example.vcxproj create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/win32/rdkafka_performance/rdkafka_performance.vcxproj create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/win32/setup-msys2.ps1 create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/win32/setup-vcpkg.ps1 create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/win32/tests/.gitignore create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/win32/tests/test.conf.example create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/win32/tests/tests.vcxproj create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/win32/win_ssl_cert_store/win_ssl_cert_store.vcxproj create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/win32/wingetopt.c create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/win32/wingetopt.h create mode 100644 src/fluent-bit/lib/librdkafka-2.1.0/win32/wintime.h (limited to 'src/fluent-bit/lib/librdkafka-2.1.0') diff --git a/src/fluent-bit/lib/librdkafka-2.1.0/.clang-format-cpp b/src/fluent-bit/lib/librdkafka-2.1.0/.clang-format-cpp new file mode 100644 index 000000000..1e102adfe --- /dev/null +++ b/src/fluent-bit/lib/librdkafka-2.1.0/.clang-format-cpp @@ -0,0 +1,103 @@ +--- +BasedOnStyle: Google +Language: Cpp +AccessModifierOffset: -1 +AlignAfterOpenBracket: Align +AlignConsecutiveMacros: true +AlignConsecutiveAssignments: true +AlignConsecutiveDeclarations: false +AlignEscapedNewlines: Right +AlignOperands: true +AlignTrailingComments: true +AllowAllArgumentsOnNextLine: true +AllowAllConstructorInitializersOnNextLine: true +AllowAllParametersOfDeclarationOnNextLine: false +AllowShortBlocksOnASingleLine: Never +AllowShortCaseLabelsOnASingleLine: false +AllowShortFunctionsOnASingleLine: None +AllowShortLambdasOnASingleLine: All +AllowShortIfStatementsOnASingleLine: Never +AllowShortLoopsOnASingleLine: false +AlwaysBreakAfterDefinitionReturnType: None +AlwaysBreakAfterReturnType: None +AlwaysBreakBeforeMultilineStrings: true +AlwaysBreakTemplateDeclarations: Yes +BinPackArguments: true +BinPackParameters: false +BreakBeforeBinaryOperators: None +BreakBeforeBraces: Custom +BreakBeforeInheritanceComma: false +BreakInheritanceList: BeforeColon +BreakBeforeTernaryOperators: true +BreakConstructorInitializersBeforeComma: false +BreakConstructorInitializers: AfterColon +BreakAfterJavaFieldAnnotations: false +BreakStringLiterals: true +ColumnLimit: 80 +CommentPragmas: '^ IWYU pragma:' +CompactNamespaces: false +ConstructorInitializerAllOnOneLineOrOnePerLine: true +ConstructorInitializerIndentWidth: 4 +ContinuationIndentWidth: 4 +Cpp11BracedListStyle: true +DeriveLineEnding: true +DerivePointerAlignment: false +DisableFormat: false +ExperimentalAutoDetectBinPacking: false +FixNamespaceComments: true +IncludeBlocks: Preserve +IncludeIsMainRegex: '([-_](test|unittest))?$' +IncludeIsMainSourceRegex: '' +IndentCaseLabels: false +IndentGotoLabels: true +IndentPPDirectives: None +IndentWidth: 2 +IndentWrappedFunctionNames: false +JavaScriptQuotes: Leave +JavaScriptWrapImports: true +KeepEmptyLinesAtTheStartOfBlocks: false +MacroBlockBegin: '' +MacroBlockEnd: '' +MaxEmptyLinesToKeep: 3 +NamespaceIndentation: None +ObjCBinPackProtocolList: Never +ObjCBlockIndentWidth: 2 +ObjCSpaceAfterProperty: false +ObjCSpaceBeforeProtocolList: true +PenaltyBreakAssignment: 2 +PenaltyBreakBeforeFirstCallParameter: 1 +PenaltyBreakComment: 300 +PenaltyBreakFirstLessLess: 120 +PenaltyBreakString: 1000 +PenaltyBreakTemplateDeclaration: 10 +PenaltyExcessCharacter: 1000000 +PenaltyReturnTypeOnItsOwnLine: 200 +PointerAlignment: Right +ReflowComments: true +SortIncludes: false +SortUsingDeclarations: true +SpaceAfterCStyleCast: false +SpaceAfterLogicalNot: false +SpaceAfterTemplateKeyword: true +SpaceBeforeAssignmentOperators: true +SpaceBeforeCpp11BracedList: true +SpaceBeforeCtorInitializerColon: true +SpaceBeforeInheritanceColon: true +SpaceBeforeParens: ControlStatements +SpaceBeforeRangeBasedForLoopColon: true +SpaceInEmptyBlock: false +SpaceInEmptyParentheses: false +SpacesBeforeTrailingComments: 2 +SpacesInAngles: false +SpacesInConditionalStatement: false +SpacesInContainerLiterals: false +SpacesInCStyleCastParentheses: false +SpacesInParentheses: false +SpacesInSquareBrackets: false +SpaceBeforeSquareBrackets: false +Standard: Auto +TabWidth: 8 +UseCRLF: false +UseTab: Never +... + diff --git a/src/fluent-bit/lib/librdkafka-2.1.0/.dir-locals.el b/src/fluent-bit/lib/librdkafka-2.1.0/.dir-locals.el new file mode 100644 index 000000000..b8c8f1e74 --- /dev/null +++ b/src/fluent-bit/lib/librdkafka-2.1.0/.dir-locals.el @@ -0,0 +1,10 @@ +((nil + (compile-command . "LC_ALL=C make -C $(git rev-parse --show-toplevel) -kw -j")) + (c-mode + (c-file-style . "linux") + (tab-width . 8) + (indent-tabs-mode . nil)) +) + +(if (file-exists-p (concat (dir-locals-find-file "./") "TAGS")) + (visit-tags-table (concat (dir-locals-find-file "./") "TAGS"))) diff --git a/src/fluent-bit/lib/librdkafka-2.1.0/.formatignore b/src/fluent-bit/lib/librdkafka-2.1.0/.formatignore new file mode 100644 index 000000000..7d4a45c7b --- /dev/null +++ b/src/fluent-bit/lib/librdkafka-2.1.0/.formatignore @@ -0,0 +1,18 @@ +# Files to not check/fix coding style for. +# These files are imported from other sources and we want to maintain +# them in the original form to make future updates easier. +src/lz4.c +src/lz4.h +src/lz4frame.c +src/lz4frame.h +src/lz4hc.c +src/lz4hc.h +src/queue.h +src/crc32c.c +src/crc32c.h +src/snappy.c +src/snappy.h +src/snappy_compat.h +src/tinycthread.c +src/tinycthread.h +src/regexp.h diff --git a/src/fluent-bit/lib/librdkafka-2.1.0/.gdbmacros b/src/fluent-bit/lib/librdkafka-2.1.0/.gdbmacros new file mode 100644 index 000000000..a04366fd1 --- /dev/null +++ b/src/fluent-bit/lib/librdkafka-2.1.0/.gdbmacros @@ -0,0 +1,19 @@ + +# Print rd_kafka_msgq_t +define dump_msgq + set $rkmq = $arg0 + set $rkm = $rkmq.rkmq_msgs.tqh_first + set $exp_msgid = 0 + set $cnt = 0 + while $rkm != 0 + set $msgid = $rkm.rkm_u.producer.msgid + printf "#%d ((rd_kafka_msgq_t *)%p) msgid %llu\n", $cnt, $rkm, $msgid + if $exp_msgid != 0 && $exp_msgid != $msgid + printf " ^ WARNING: expected msgid %llu, not %llu\n", $exp_msgid, $msgid + end + set $exp_msgid = $msgid + 1 + set $rkm = $rkm.rkm_link.tqe_next + set $cnt++ + end +end + diff --git a/src/fluent-bit/lib/librdkafka-2.1.0/.github/ISSUE_TEMPLATE b/src/fluent-bit/lib/librdkafka-2.1.0/.github/ISSUE_TEMPLATE new file mode 100644 index 000000000..ed7b6165f --- /dev/null +++ b/src/fluent-bit/lib/librdkafka-2.1.0/.github/ISSUE_TEMPLATE @@ -0,0 +1,34 @@ +Read the FAQ first: https://github.com/edenhill/librdkafka/wiki/FAQ + +Do NOT create issues for questions, use the discussion forum: https://github.com/edenhill/librdkafka/discussions + + + +Description +=========== + + + +How to reproduce +================ + + + +**IMPORTANT**: Always try to reproduce the issue on the latest released version (see https://github.com/edenhill/librdkafka/releases), if it can't be reproduced on the latest version the issue has been fixed. + + +Checklist +========= + +**IMPORTANT**: We will close issues where the checklist has not been completed. + +Please provide the following information: + + - [x] librdkafka version (release number or git tag): `` + - [ ] Apache Kafka version: `` + - [ ] librdkafka client configuration: `` + - [ ] Operating system: `` + - [ ] Provide logs (with `debug=..` as necessary) from librdkafka + - [ ] Provide broker log excerpts + - [ ] Critical issue + diff --git a/src/fluent-bit/lib/librdkafka-2.1.0/.gitignore b/src/fluent-bit/lib/librdkafka-2.1.0/.gitignore new file mode 100644 index 000000000..31c5061e3 --- /dev/null +++ b/src/fluent-bit/lib/librdkafka-2.1.0/.gitignore @@ -0,0 +1,33 @@ +config.h +config.log* +config.cache +Makefile.config +rdkafka*.pc +*~ +\#* +*.o +*.so +*.so.? +*.dylib +*.a +*.d +librdkafka*.lds +core +vgcore.* +*dSYM/ +*.offset +SOURCES +gmon.out +*.gz +*.tgz +*.bz2 +*.deb +*.rpm +staging-docs +tmp +stats*.json +test_report*.json +cov-int +gdbrun*.gdb +TAGS +vcpkg_installed diff --git a/src/fluent-bit/lib/librdkafka-2.1.0/.semaphore/project.yml b/src/fluent-bit/lib/librdkafka-2.1.0/.semaphore/project.yml new file mode 100644 index 000000000..4ba05ab89 --- /dev/null +++ b/src/fluent-bit/lib/librdkafka-2.1.0/.semaphore/project.yml @@ -0,0 +1,43 @@ +# This file is managed by ServiceBot plugin - Semaphore. The content in this file is created using a common +# template and configurations in service.yml. +# Modifications in this file will be overwritten by generated content in the nightly run. +# For more information, please refer to the page: +# https://confluentinc.atlassian.net/wiki/spaces/Foundations/pages/2871296194/Add+SemaphoreCI +apiVersion: v1alpha +kind: Project +metadata: + name: librdkafka + description: "" +spec: + visibility: private + repository: + url: git@github.com:confluentinc/librdkafka.git + run_on: + - tags + - branches + pipeline_file: .semaphore/semaphore.yml + integration_type: github_app + status: + pipeline_files: + - path: .semaphore/semaphore.yml + level: pipeline + whitelist: + branches: + - master + - /semaphore.*/ + - /dev_.*/ + - /feature\/.*/ + custom_permissions: true + debug_permissions: + - empty + - default_branch + - non_default_branch + - pull_request + - forked_pull_request + - tag + attach_permissions: + - default_branch + - non_default_branch + - pull_request + - forked_pull_request + - tag diff --git a/src/fluent-bit/lib/librdkafka-2.1.0/.semaphore/semaphore.yml b/src/fluent-bit/lib/librdkafka-2.1.0/.semaphore/semaphore.yml new file mode 100644 index 000000000..275bb76aa --- /dev/null +++ b/src/fluent-bit/lib/librdkafka-2.1.0/.semaphore/semaphore.yml @@ -0,0 +1,364 @@ +version: v1.0 +name: 'librdkafka build and release artifact pipeline' +agent: + machine: + type: s1-prod-macos-arm64 +global_job_config: + prologue: + commands: + - checkout + - mkdir artifacts + - mkdir dest +blocks: + - name: 'OSX arm64/m1' + dependencies: [] + task: + agent: + machine: + type: s1-prod-macos-arm64 + env_vars: + - name: ARTIFACT_KEY + value: p-librdkafka__plat-osx__arch-arm64__lnk-all + epilogue: + commands: + - '[[ -z $SEMAPHORE_GIT_TAG_NAME ]] || artifact push workflow artifacts/ --destination artifacts/${ARTIFACT_KEY}/' + jobs: + - name: 'Build' + commands: + - ./configure --install-deps --source-deps-only --enable-static --disable-lz4-ext --enable-strip + - make -j all examples check + - examples/rdkafka_example -X builtin.features + - otool -L src/librdkafka.dylib + - otool -L src-cpp/librdkafka++.dylib + - make -j -C tests build + - make -C tests run_local_quick + - DESTDIR="$PWD/dest" make install + - (cd dest && tar cvzf ../artifacts/librdkafka.tgz .) + + + - name: 'OSX x64' + dependencies: [] + task: + agent: + machine: + type: s1-prod-macos + env_vars: + - name: ARTIFACT_KEY + value: p-librdkafka__plat-osx__arch-x64__lnk-all + epilogue: + commands: + - '[[ -z $SEMAPHORE_GIT_TAG_NAME ]] || artifact push workflow artifacts/ --destination artifacts/${ARTIFACT_KEY}/' + jobs: + - name: 'Build' + commands: + - ./configure --install-deps --source-deps-only --enable-static --disable-lz4-ext --enable-strip + - make -j all examples check + - examples/rdkafka_example -X builtin.features + - otool -L src/librdkafka.dylib + - otool -L src-cpp/librdkafka++.dylib + - make -j -C tests build + - make -C tests run_local_quick + - DESTDIR="$PWD/dest" make install + - (cd dest && tar cvzf ../artifacts/librdkafka.tgz .) + + + - name: 'Style check' + dependencies: [] + skip: + # Skip for release tags, we don't want style checks + # to fail the release build. + when: "tag =~ '^v[0-9]\\.'" + task: + agent: + machine: + type: s1-prod-ubuntu20-04-amd64-2 + jobs: + - name: 'Style check' + commands: + - sudo apt install -y clang-format-10 python3 python3-pip python3-setuptools + - python3 -m pip install -r packaging/tools/requirements.txt + - CLANG_FORMAT=clang-format-10 make style-check + + + - name: 'Build documentation' + dependencies: [] + task: + agent: + machine: + type: s1-prod-ubuntu20-04-amd64-2 + jobs: + - name: 'Generate documentation' + commands: + - sudo apt install -y doxygen graphviz + - make docs + - (cd staging-docs && tar cvzf ../artifacts/librdkafka-docs.tgz .) + - '[[ -z $SEMAPHORE_GIT_TAG_NAME ]] || artifact push workflow artifacts/librdkafka-docs.tgz --destination artifacts/librdkafka-docs.tgz' + + + - name: 'Linux Ubuntu x64: source build' + dependencies: [] + skip: + # Skip for release tags, we don't want flaky CI tests + # to fail the release build. + when: "tag =~ '^v[0-9]\\.'" + task: + agent: + machine: + type: s1-prod-ubuntu20-04-amd64-2 + jobs: + - name: 'Build and integration tests' + commands: + - wget -O rapidjson-dev.deb https://launchpad.net/ubuntu/+archive/primary/+files/rapidjson-dev_1.1.0+dfsg2-3_all.deb + - sudo dpkg -i rapidjson-dev.deb + - python3 -m pip install -U pip + - python3 -m pip -V + - python3 -m pip install -r tests/requirements.txt + - ./configure --install-deps + # split these up + - ./packaging/tools/rdutcoverage.sh + - make copyright-check + - make -j all examples check + - echo "Verifying that CONFIGURATION.md does not have manual changes" + - git diff --exit-code CONFIGURATION.md + - examples/rdkafka_example -X builtin.features + - ldd src/librdkafka.so.1 + - ldd src-cpp/librdkafka++.so.1 + - make -j -C tests build + - make -C tests run_local_quick + - DESTDIR="$PWD/dest" make install + - (cd tests && python3 -m trivup.clusters.KafkaCluster --version 3.1.0 --cmd 'make quick') + + + - name: 'Linux x64: release artifact docker builds' + dependencies: [] + run: + when: "tag =~ '^v[0-9]\\.'" + task: + agent: + machine: + type: s1-prod-ubuntu20-04-amd64-2 + epilogue: + commands: + - '[[ -z $SEMAPHORE_GIT_TAG_NAME ]] || artifact push workflow artifacts/ --destination artifacts/${ARTIFACT_KEY}/' + jobs: + - name: 'Build: centos6 glibc +gssapi' + env_vars: + - name: ARTIFACT_KEY + value: p-librdkafka__plat-linux__dist-centos6__arch-x64__lnk-std__extra-gssapi + commands: + - packaging/tools/build-release-artifacts.sh quay.io/pypa/manylinux2010_x86_64 artifacts/librdkafka.tgz + + - name: 'Build: centos6 glibc' + env_vars: + - name: ARTIFACT_KEY + value: p-librdkafka__plat-linux__dist-centos6__arch-x64__lnk-all + commands: + - packaging/tools/build-release-artifacts.sh --disable-gssapi quay.io/pypa/manylinux2010_x86_64 artifacts/librdkafka.tgz + + - name: 'Build: centos7 glibc +gssapi' + env_vars: + - name: ARTIFACT_KEY + value: p-librdkafka__plat-linux__dist-centos7__arch-x64__lnk-std__extra-gssapi + commands: + - packaging/tools/build-release-artifacts.sh quay.io/pypa/manylinux2014_x86_64 artifacts/librdkafka.tgz + + - name: 'Build: centos7 glibc' + env_vars: + - name: ARTIFACT_KEY + value: p-librdkafka__plat-linux__dist-centos7__arch-x64__lnk-all + commands: + - packaging/tools/build-release-artifacts.sh --disable-gssapi quay.io/pypa/manylinux2014_x86_64 artifacts/librdkafka.tgz + + - name: 'Build: alpine musl +gssapi' + env_vars: + - name: ARTIFACT_KEY + value: p-librdkafka__plat-linux__dist-alpine__arch-x64__lnk-std__extra-gssapi + commands: + - packaging/tools/build-release-artifacts.sh alpine:3.16 artifacts/librdkafka.tgz + + - name: 'Build: alpine musl' + env_vars: + - name: ARTIFACT_KEY + value: p-librdkafka__plat-linux__dist-alpine__arch-x64__lnk-all + commands: + - packaging/tools/build-release-artifacts.sh --disable-gssapi alpine:3.16 artifacts/librdkafka.tgz + + + - name: 'Linux arm64: release artifact docker builds' + dependencies: [] + task: + agent: + machine: + type: s1-prod-ubuntu20-04-arm64-1 + epilogue: + commands: + - '[[ -z $SEMAPHORE_GIT_TAG_NAME ]] || artifact push workflow artifacts/ --destination artifacts/${ARTIFACT_KEY}/' + jobs: + - name: 'Build: centos7 glibc +gssapi' + env_vars: + - name: ARTIFACT_KEY + value: p-librdkafka__plat-linux__dist-centos7__arch-arm64__lnk-std__extra-gssapi + commands: + - packaging/tools/build-release-artifacts.sh quay.io/pypa/manylinux2014_aarch64 artifacts/librdkafka.tgz + + - name: 'Build: centos7 glibc' + env_vars: + - name: ARTIFACT_KEY + value: p-librdkafka__plat-linux__dist-centos7__arch-arm64__lnk-all + commands: + - packaging/tools/build-release-artifacts.sh --disable-gssapi quay.io/pypa/manylinux2014_aarch64 artifacts/librdkafka.tgz + + - name: 'Build: alpine musl +gssapi' + env_vars: + - name: ARTIFACT_KEY + value: p-librdkafka__plat-linux__dist-alpine__arch-arm64__lnk-all__extra-gssapi + commands: + - packaging/tools/build-release-artifacts.sh alpine:3.16 artifacts/librdkafka.tgz + + - name: 'Build: alpine musl' + env_vars: + - name: ARTIFACT_KEY + value: p-librdkafka__plat-linux__dist-alpine__arch-arm64__lnk-all + commands: + - packaging/tools/build-release-artifacts.sh --disable-gssapi alpine:3.16 artifacts/librdkafka.tgz + + + - name: 'Windows x64: MinGW-w64' + dependencies: [] + task: + agent: + machine: + type: s1-prod-windows + env_vars: + - name: CHERE_INVOKING + value: 'yes' + - name: MSYSTEM + value: UCRT64 + prologue: + commands: + - cache restore msys2-x64-${Env:ARTIFACT_KEY} + # Set up msys2 + - "& .\\win32\\setup-msys2.ps1" + - cache delete msys2-x64-${Env:ARTIFACT_KEY} + - cache store msys2-x64-${Env:ARTIFACT_KEY} c:/msys64 + epilogue: + commands: + - if ($env:SEMAPHORE_GIT_TAG_NAME -ne "") { artifact push workflow artifacts/ --destination artifacts/$Env:ARTIFACT_KEY/ } + jobs: + - name: 'Build: MinGW-w64 Dynamic' + env_vars: + - name: ARTIFACT_KEY + value: p-librdkafka__plat-windows__dist-mingw__arch-x64__lnk-std + commands: + - C:\msys64\usr\bin\bash -lc './packaging/mingw-w64/semaphoreci-build.sh ./artifacts/librdkafka.tgz' + + - name: 'Build: MinGW-w64 Static' + env_vars: + - name: ARTIFACT_KEY + value: p-librdkafka__plat-windows__dist-mingw__arch-x64__lnk-static + commands: + - C:\msys64\usr\bin\bash -lc './packaging/mingw-w64/semaphoreci-build.sh --static ./artifacts/librdkafka.tgz' + + - name: 'Windows x64: Windows SDK 10.0 / MSVC v142 / VS 2019' + dependencies: [] + task: + agent: + machine: + type: s1-prod-windows + env_vars: + # Disable vcpkg telemetry + - name: VCPKG_DISABLE_METRICS + value: 'yes' + prologue: + commands: + # install vcpkg in the parent directory. + - pwd + - cd .. + # Restore vcpkg caches, if any. + - cache restore vcpkg-archives-$Env:ARTIFACT_KEY + # Setup vcpkg + - "& .\\librdkafka\\win32\\setup-vcpkg.ps1" + - cd librdkafka + - ..\vcpkg\vcpkg integrate install + # Install required packages. + - ..\vcpkg\vcpkg --feature-flags=versions install --triplet $Env:triplet + - cd .. + - pwd + # Store vcpkg caches + - ls vcpkg/ + - echo $Env:VCPKG_ROOT + - cache delete vcpkg-archives-$Env:ARTIFACT_KEY + - cache store vcpkg-archives-$Env:ARTIFACT_KEY C:/Users/semaphore/AppData/Local/vcpkg/archives + - pwd + - cd librdkafka + epilogue: + commands: + - Get-ChildItem . -include *.dll -recurse + - Get-ChildItem . -include *.lib -recurse + - if ($env:SEMAPHORE_GIT_TAG_NAME -ne "") { artifact push workflow artifacts/ --destination artifacts/$Env:ARTIFACT_KEY/ } + jobs: + - name: 'Build: MSVC x64' + env_vars: + - name: triplet + value: x64-windows + - name: ARTIFACT_KEY + value: p-librdkafka__plat-windows__dist-msvc__arch-x64__lnk-std + commands: + - "& .\\win32\\msbuild.ps1 -platform x64" + - "& .\\win32\\package-zip.ps1 -platform x64" + - name: 'Build: MSVC x86' + env_vars: + - name: triplet + value: x86-windows + - name: ARTIFACT_KEY + value: p-librdkafka__plat-windows__dist-msvc__arch-x86__lnk-std + commands: + - "& .\\win32\\msbuild.ps1 -platform Win32" + - "& .\\win32\\package-zip.ps1 -platform Win32" + + - name: 'Packaging' + dependencies: + - 'Build documentation' + - 'OSX arm64/m1' + - 'OSX x64' + - 'Linux x64: release artifact docker builds' + - 'Linux arm64: release artifact docker builds' + - 'Windows x64: MinGW-w64' + - 'Windows x64: Windows SDK 10.0 / MSVC v142 / VS 2019' + run: + when: "tag =~ '^v[0-9]\\.'" + task: + agent: + machine: + type: s1-prod-ubuntu20-04-amd64-2 + jobs: + - name: 'Build NuGet and static packages' + commands: + # Get all artifacts from previous jobs in this workflow/pipeline. + - artifact pull workflow artifacts + - mkdir -p packages + # Prepare packaging tools + - cd packaging/nuget + - python3 -m pip install -U -r requirements.txt + # Create NuGet package + # We need --ignore-tag since the jobs don't add the tag to + # the artifact path, and they don't need to since these artifacts + # are part of the same workflow. + - ./release.py --directory ../../artifacts --ignore-tag --class NugetPackage ${SEMAPHORE_GIT_TAG_NAME} + - cp -v librdkafka.redist.*.nupkg ../../packages + # Create static package + - ./release.py --directory ../../artifacts --ignore-tag --class StaticPackage ${SEMAPHORE_GIT_TAG_NAME} + - cp -v librdkafka-static-bundle*.tgz ../../packages + - cd ../../ + # Copy generated docs to packages for inclusion in the tar ball + - cp -v artifacts/librdkafka-docs.tgz packages/ + # Maker super tar ball of all packages + - cd packages + - tar cvf librdkafka-packages-${SEMAPHORE_GIT_TAG_NAME}-${SEMAPHORE_WORKFLOW_ID}.tar . + # Provide some extra details + - ls -la + - sha256sum * + - cd .. + # Upload all packages to project artifact store + - artifact push project packages --destination librdkafka-packages-${SEMAPHORE_GIT_TAG_NAME}-${SEMAPHORE_WORKFLOW_ID} + - echo Thank you diff --git a/src/fluent-bit/lib/librdkafka-2.1.0/CHANGELOG.md b/src/fluent-bit/lib/librdkafka-2.1.0/CHANGELOG.md new file mode 100644 index 000000000..857526c6e --- /dev/null +++ b/src/fluent-bit/lib/librdkafka-2.1.0/CHANGELOG.md @@ -0,0 +1,1218 @@ +# librdkafka v2.1.0 + +librdkafka v2.1.0 is a feature release: + +* [KIP-320](https://cwiki.apache.org/confluence/display/KAFKA/KIP-320%3A+Allow+fetchers+to+detect+and+handle+log+truncation) + Allow fetchers to detect and handle log truncation (#4122). +* Fix a reference count issue blocking the consumer from closing (#4187). +* Fix a protocol issue with ListGroups API, where an extra + field was appended for API Versions greater than or equal to 3 (#4207). +* Fix an issue with `max.poll.interval.ms`, where polling any queue would cause + the timeout to be reset (#4176). +* Fix seek partition timeout, was one thousand times lower than the passed + value (#4230). +* Fix multiple inconsistent behaviour in batch APIs during **pause** or **resume** operations (#4208). + See **Consumer fixes** section below for more information. +* Update lz4.c from upstream. Fixes [CVE-2021-3520](https://github.com/advisories/GHSA-gmc7-pqv9-966m) + (by @filimonov, #4232). +* Upgrade OpenSSL to v3.0.8 with various security fixes, + check the [release notes](https://www.openssl.org/news/cl30.txt) (#4215). + +## Enhancements + + * Added `rd_kafka_topic_partition_get_leader_epoch()` (and `set..()`). + * Added partition leader epoch APIs: + - `rd_kafka_topic_partition_get_leader_epoch()` (and `set..()`) + - `rd_kafka_message_leader_epoch()` + - `rd_kafka_*assign()` and `rd_kafka_seek_partitions()` now supports + partitions with a leader epoch set. + - `rd_kafka_offsets_for_times()` will return per-partition leader-epochs. + - `leader_epoch`, `stored_leader_epoch`, and `committed_leader_epoch` + added to per-partition statistics. + + +## Fixes + +### OpenSSL fixes + + * Fixed OpenSSL static build not able to use external modules like FIPS + provider module. + +### Consumer fixes + + * A reference count issue was blocking the consumer from closing. + The problem would happen when a partition is lost, because forcibly + unassigned from the consumer or if the corresponding topic is deleted. + * When using `rd_kafka_seek_partitions`, the remaining timeout was + converted from microseconds to milliseconds but the expected unit + for that parameter is microseconds. + * Fixed known issues related to Batch Consume APIs mentioned in v2.0.0 + release notes. + * Fixed `rd_kafka_consume_batch()` and `rd_kafka_consume_batch_queue()` + intermittently updating `app_offset` and `store_offset` incorrectly when + **pause** or **resume** was being used for a partition. + * Fixed `rd_kafka_consume_batch()` and `rd_kafka_consume_batch_queue()` + intermittently skipping offsets when **pause** or **resume** was being + used for a partition. + + +## Known Issues + +### Consume Batch API + + * When `rd_kafka_consume_batch()` and `rd_kafka_consume_batch_queue()` APIs are used with + any of the **seek**, **pause**, **resume** or **rebalancing** operation, `on_consume` + interceptors might be called incorrectly (maybe multiple times) for not consumed messages. + + + +# librdkafka v2.0.2 + +librdkafka v2.0.2 is a bugfix release: + +* Fix OpenSSL version in Win32 nuget package (#4152). + + + +# librdkafka v2.0.1 + +librdkafka v2.0.1 is a bugfix release: + +* Fixed nuget package for Linux ARM64 release (#4150). + + + +# librdkafka v2.0.0 + +librdkafka v2.0.0 is a feature release: + + * [KIP-88](https://cwiki.apache.org/confluence/display/KAFKA/KIP-88%3A+OffsetFetch+Protocol+Update) + OffsetFetch Protocol Update (#3995). + * [KIP-222](https://cwiki.apache.org/confluence/display/KAFKA/KIP-222+-+Add+Consumer+Group+operations+to+Admin+API) + Add Consumer Group operations to Admin API (started by @lesterfan, #3995). + * [KIP-518](https://cwiki.apache.org/confluence/display/KAFKA/KIP-518%3A+Allow+listing+consumer+groups+per+state) + Allow listing consumer groups per state (#3995). + * [KIP-396](https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=97551484) + Partially implemented: support for AlterConsumerGroupOffsets + (started by @lesterfan, #3995). + * OpenSSL 3.0.x support - the maximum bundled OpenSSL version is now 3.0.7 (previously 1.1.1q). + * Fixes to the transactional and idempotent producer. + + +## Upgrade considerations + +### OpenSSL 3.0.x + +#### OpenSSL default ciphers + +The introduction of OpenSSL 3.0.x in the self-contained librdkafka bundles +changes the default set of available ciphers, in particular all obsolete +or insecure ciphers and algorithms as listed in the +OpenSSL [legacy](https://www.openssl.org/docs/man3.0/man7/OSSL_PROVIDER-legacy.html) +manual page are now disabled by default. + +**WARNING**: These ciphers are disabled for security reasons and it is +highly recommended NOT to use them. + +Should you need to use any of these old ciphers you'll need to explicitly +enable the `legacy` provider by configuring `ssl.providers=default,legacy` +on the librdkafka client. + +#### OpenSSL engines and providers + +OpenSSL 3.0.x deprecates the use of engines, which is being replaced by +providers. As such librdkafka will emit a deprecation warning if +`ssl.engine.location` is configured. + +OpenSSL providers may be configured with the new `ssl.providers` +configuration property. + +### Broker TLS certificate hostname verification + +The default value for `ssl.endpoint.identification.algorithm` has been +changed from `none` (no hostname verification) to `https`, which enables +broker hostname verification (to counter man-in-the-middle +impersonation attacks) by default. + +To restore the previous behaviour, set `ssl.endpoint.identification.algorithm` to `none`. + +## Known Issues + +### Poor Consumer batch API messaging guarantees + +The Consumer Batch APIs `rd_kafka_consume_batch()` and `rd_kafka_consume_batch_queue()` +are not thread safe if `rkmessages_size` is greater than 1 and any of the **seek**, +**pause**, **resume** or **rebalancing** operation is performed in parallel with any of +the above APIs. Some of the messages might be lost, or erroneously returned to the +application, in the above scenario. + +It is strongly recommended to use the Consumer Batch APIs and the mentioned +operations in sequential order in order to get consistent result. + +For **rebalancing** operation to work in sequencial manner, please set `rebalance_cb` +configuration property (refer [examples/rdkafka_complex_consumer_example.c] +(examples/rdkafka_complex_consumer_example.c) for the help with the usage) for the consumer. + +## Enhancements + + * Self-contained static libraries can now be built on Linux arm64 (#4005). + * Updated to zlib 1.2.13, zstd 1.5.2, and curl 7.86.0 in self-contained + librdkafka bundles. + * Added `on_broker_state_change()` interceptor + * The C++ API no longer returns strings by const value, which enables better move optimization in callers. + * Added `rd_kafka_sasl_set_credentials()` API to update SASL credentials. + * Setting `allow.auto.create.topics` will no longer give a warning if used by a producer, since that is an expected use case. + Improvement in documentation for this property. + * Added a `resolve_cb` configuration setting that permits using custom DNS resolution logic. + * Added `rd_kafka_mock_broker_error_stack_cnt()`. + * The librdkafka.redist NuGet package has been updated to have fewer external + dependencies for its bundled librdkafka builds, as everything but cyrus-sasl + is now built-in. There are bundled builds with and without linking to + cyrus-sasl for maximum compatibility. + * Admin API DescribeGroups() now provides the group instance id + for static members [KIP-345](https://cwiki.apache.org/confluence/display/KAFKA/KIP-345%3A+Introduce+static+membership+protocol+to+reduce+consumer+rebalances) (#3995). + + +## Fixes + +### General fixes + + * Windows: couldn't read a PKCS#12 keystore correctly because binary mode + wasn't explicitly set and Windows defaults to text mode. + * Fixed memory leak when loading SSL certificates (@Mekk, #3930) + * Load all CA certificates from `ssl.ca.pem`, not just the first one. + * Each HTTP request made when using OAUTHBEARER OIDC would leak a small + amount of memory. + +### Transactional producer fixes + + * When a PID epoch bump is requested and the producer is waiting + to reconnect to the transaction coordinator, a failure in a find coordinator + request could cause an assert to fail. This is fixed by retrying when the + coordinator is known (#4020). + * Transactional APIs (except `send_offsets_for_transaction()`) that + timeout due to low timeout_ms may now be resumed by calling the same API + again, as the operation continues in the background. + * For fatal idempotent producer errors that may be recovered by bumping the + epoch the current transaction must first be aborted prior to the epoch bump. + This is now handled correctly, which fixes issues seen with fenced + transactional producers on fatal idempotency errors. + * Timeouts for EndTxn requests (transaction commits and aborts) are now + automatically retried and the error raised to the application is also + a retriable error. + * TxnOffsetCommitRequests were retried immediately upon temporary errors in + `send_offsets_to_transactions()`, causing excessive network requests. + These retries are now delayed 500ms. + * If `init_transactions()` is called with an infinite timeout (-1), + the timeout will be limited to 2 * `transaction.timeout.ms`. + The application may retry and resume the call if a retriable error is + returned. + + +### Consumer fixes + + * Back-off and retry JoinGroup request if coordinator load is in progress. + * Fix `rd_kafka_consume_batch()` and `rd_kafka_consume_batch_queue()` skipping + other partitions' offsets intermittently when **seek**, **pause**, **resume** + or **rebalancing** is used for a partition. + * Fix `rd_kafka_consume_batch()` and `rd_kafka_consume_batch_queue()` + intermittently returing incorrect partitions' messages if **rebalancing** + happens during these operations. + +# librdkafka v1.9.2 + +librdkafka v1.9.2 is a maintenance release: + + * The SASL OAUTHBEAR OIDC POST field was sometimes truncated by one byte (#3192). + * The bundled version of OpenSSL has been upgraded to version 1.1.1q for non-Windows builds. Windows builds remain on OpenSSL 1.1.1n for the time being. + * The bundled version of Curl has been upgraded to version 7.84.0. + + + +# librdkafka v1.9.1 + +librdkafka v1.9.1 is a maintenance release: + + * The librdkafka.redist NuGet package now contains OSX M1/arm64 builds. + * Self-contained static libraries can now be built on OSX M1 too, thanks to + disabling curl's configure runtime check. + + + +# librdkafka v1.9.0 + +librdkafka v1.9.0 is a feature release: + + * Added KIP-768 OUATHBEARER OIDC support (by @jliunyu, #3560) + * Added KIP-140 Admin API ACL support (by @emasab, #2676) + + +## Upgrade considerations + + * Consumer: + `rd_kafka_offsets_store()` (et.al) will now return an error for any + partition that is not currently assigned (through `rd_kafka_*assign()`). + This prevents a race condition where an application would store offsets + after the assigned partitions had been revoked (which resets the stored + offset), that could cause these old stored offsets to be committed later + when the same partitions were assigned to this consumer again - effectively + overwriting any committed offsets by any consumers that were assigned the + same partitions previously. This would typically result in the offsets + rewinding and messages to be reprocessed. + As an extra effort to avoid this situation the stored offset is now + also reset when partitions are assigned (through `rd_kafka_*assign()`). + Applications that explicitly call `..offset*_store()` will now need + to handle the case where `RD_KAFKA_RESP_ERR__STATE` is returned + in the per-partition `.err` field - meaning the partition is no longer + assigned to this consumer and the offset could not be stored for commit. + + +## Enhancements + + * Improved producer queue scheduling. Fixes the performance regression + introduced in v1.7.0 for some produce patterns. (#3538, #2912) + * Windows: Added native Win32 IO/Queue scheduling. This removes the + internal TCP loopback connections that were previously used for timely + queue wakeups. + * Added `socket.connection.setup.timeout.ms` (default 30s). + The maximum time allowed for broker connection setups (TCP connection as + well as SSL and SASL handshakes) is now limited to this value. + This fixes the issue with stalled broker connections in the case of network + or load balancer problems. + The Java clients has an exponential backoff to this timeout which is + limited by `socket.connection.setup.timeout.max.ms` - this was not + implemented in librdkafka due to differences in connection handling and + `ERR__ALL_BROKERS_DOWN` error reporting. Having a lower initial connection + setup timeout and then increase the timeout for the next attempt would + yield possibly false-positive `ERR__ALL_BROKERS_DOWN` too early. + * SASL OAUTHBEARER refresh callbacks can now be scheduled for execution + on librdkafka's background thread. This solves the problem where an + application has a custom SASL OAUTHBEARER refresh callback and thus needs to + call `rd_kafka_poll()` (et.al.) at least once to trigger the + refresh callback before being able to connect to brokers. + With the new `rd_kafka_conf_enable_sasl_queue()` configuration API and + `rd_kafka_sasl_background_callbacks_enable()` the refresh callbacks + can now be triggered automatically on the librdkafka background thread. + * `rd_kafka_queue_get_background()` now creates the background thread + if not already created. + * Added `rd_kafka_consumer_close_queue()` and `rd_kafka_consumer_closed()`. + This allow applications and language bindings to implement asynchronous + consumer close. + * Bundled zlib upgraded to version 1.2.12. + * Bundled OpenSSL upgraded to 1.1.1n. + * Added `test.mock.broker.rtt` to simulate RTT/latency for mock brokers. + + +## Fixes + +### General fixes + + * Fix various 1 second delays due to internal broker threads blocking on IO + even though there are events to handle. + These delays could be seen randomly in any of the non produce/consume + request APIs, such as `commit_transaction()`, `list_groups()`, etc. + * Windows: some applications would crash with an error message like + `no OPENSSL_Applink()` written to the console if `ssl.keystore.location` + was configured. + This regression was introduced in v1.8.0 due to use of vcpkgs and how + keystore file was read. #3554. + * Windows 32-bit only: 64-bit atomic reads were in fact not atomic and could + in rare circumstances yield incorrect values. + One manifestation of this issue was the `max.poll.interval.ms` consumer + timer expiring even though the application was polling according to profile. + Fixed by @WhiteWind (#3815). + * `rd_kafka_clusterid()` would previously fail with timeout if + called on cluster with no visible topics (#3620). + The clusterid is now returned as soon as metadata has been retrieved. + * Fix hang in `rd_kafka_list_groups()` if there are no available brokers + to connect to (#3705). + * Millisecond timeouts (`timeout_ms`) in various APIs, such as `rd_kafka_poll()`, + was limited to roughly 36 hours before wrapping. (#3034) + * If a metadata request triggered by `rd_kafka_metadata()` or consumer group rebalancing + encountered a non-retriable error it would not be propagated to the caller and thus + cause a stall or timeout, this has now been fixed. (@aiquestion, #3625) + * AdminAPI `DeleteGroups()` and `DeleteConsumerGroupOffsets()`: + if the given coordinator connection was not up by the time these calls were + initiated and the first connection attempt failed then no further connection + attempts were performed, ulimately leading to the calls timing out. + This is now fixed by keep retrying to connect to the group coordinator + until the connection is successful or the call times out. + Additionally, the coordinator will be now re-queried once per second until + the coordinator comes up or the call times out, to detect change in + coordinators. + * Mock cluster `rd_kafka_mock_broker_set_down()` would previously + accept and then disconnect new connections, it now refuses new connections. + + +### Consumer fixes + + * `rd_kafka_offsets_store()` (et.al) will now return an error for any + partition that is not currently assigned (through `rd_kafka_*assign()`). + See **Upgrade considerations** above for more information. + * `rd_kafka_*assign()` will now reset/clear the stored offset. + See **Upgrade considerations** above for more information. + * `seek()` followed by `pause()` would overwrite the seeked offset when + later calling `resume()`. This is now fixed. (#3471). + **Note**: Avoid storing offsets (`offsets_store()`) after calling + `seek()` as this may later interfere with resuming a paused partition, + instead store offsets prior to calling seek. + * A `ERR_MSG_SIZE_TOO_LARGE` consumer error would previously be raised + if the consumer received a maximum sized FetchResponse only containing + (transaction) aborted messages with no control messages. The fetching did + not stop, but some applications would terminate upon receiving this error. + No error is now raised in this case. (#2993) + Thanks to @jacobmikesell for providing an application to reproduce the + issue. + * The consumer no longer backs off the next fetch request (default 500ms) when + the parsed fetch response is truncated (which is a valid case). + This should speed up the message fetch rate in case of maximum sized + fetch responses. + * Fix consumer crash (`assert: rkbuf->rkbuf_rkb`) when parsing + malformed JoinGroupResponse consumer group metadata state. + * Fix crash (`cant handle op type`) when using `consume_batch_queue()` (et.al) + and an OAUTHBEARER refresh callback was set. + The callback is now triggered by the consume call. (#3263) + * Fix `partition.assignment.strategy` ordering when multiple strategies are configured. + If there is more than one eligible strategy, preference is determined by the + configured order of strategies. The partitions are assigned to group members according + to the strategy order preference now. (#3818) + * Any form of unassign*() (absolute or incremental) is now allowed during + consumer close rebalancing and they're all treated as absolute unassigns. + (@kevinconaway) + + +### Transactional producer fixes + + * Fix message loss in idempotent/transactional producer. + A corner case has been identified that may cause idempotent/transactional + messages to be lost despite being reported as successfully delivered: + During cluster instability a restarting broker may report existing topics + as non-existent for some time before it is able to acquire up to date + cluster and topic metadata. + If an idempotent/transactional producer updates its topic metadata cache + from such a broker the producer will consider the topic to be removed from + the cluster and thus remove its local partition objects for the given topic. + This also removes the internal message sequence number counter for the given + partitions. + If the producer later receives proper topic metadata for the cluster the + previously "removed" topics will be rediscovered and new partition objects + will be created in the producer. These new partition objects, with no + knowledge of previous incarnations, would start counting partition messages + at zero again. + If new messages were produced for these partitions by the same producer + instance, the same message sequence numbers would be sent to the broker. + If the broker still maintains state for the producer's PID and Epoch it could + deem that these messages with reused sequence numbers had already been + written to the log and treat them as legit duplicates. + This would seem to the producer that these new messages were successfully + written to the partition log by the broker when they were in fact discarded + as duplicates, leading to silent message loss. + The fix included in this release is to save the per-partition idempotency + state when a partition is removed, and then recover and use that saved + state if the partition comes back at a later time. + * The transactional producer would retry (re)initializing its PID if a + `PRODUCER_FENCED` error was returned from the + broker (added in Apache Kafka 2.8), which could cause the producer to + seemingly hang. + This error code is now correctly handled by raising a fatal error. + * If the given group coordinator connection was not up by the time + `send_offsets_to_transactions()` was called, and the first connection + attempt failed then no further connection attempts were performed, ulimately + leading to `send_offsets_to_transactions()` timing out, and possibly + also the transaction timing out on the transaction coordinator. + This is now fixed by keep retrying to connect to the group coordinator + until the connection is successful or the call times out. + Additionally, the coordinator will be now re-queried once per second until + the coordinator comes up or the call times out, to detect change in + coordinators. + + +### Producer fixes + + * Improved producer queue wakeup scheduling. This should significantly + decrease the number of wakeups and thus syscalls for high message rate + producers. (#3538, #2912) + * The logic for enforcing that `message.timeout.ms` is greather than + an explicitly configured `linger.ms` was incorrect and instead of + erroring out early the lingering time was automatically adjusted to the + message timeout, ignoring the configured `linger.ms`. + This has now been fixed so that an error is returned when instantiating the + producer. Thanks to @larry-cdn77 for analysis and test-cases. (#3709) + + +# librdkafka v1.8.2 + +librdkafka v1.8.2 is a maintenance release. + +## Enhancements + + * Added `ssl.ca.pem` to add CA certificate by PEM string. (#2380) + * Prebuilt binaries for Mac OSX now contain statically linked OpenSSL v1.1.1l. + Previously the OpenSSL version was either v1.1.1 or v1.0.2 depending on + build type. + +## Fixes + + * The `librdkafka.redist` 1.8.0 package had two flaws: + - the linux-arm64 .so build was a linux-x64 build. + - the included Windows MSVC 140 runtimes for x64 were infact x86. + The release script has been updated to verify the architectures of + provided artifacts to avoid this happening in the future. + * Prebuilt binaries for Mac OSX Sierra (10.12) and older are no longer provided. + This affects [confluent-kafka-go](https://github.com/confluentinc/confluent-kafka-go). + * Some of the prebuilt binaries for Linux were built on Ubuntu 14.04, + these builds are now performed on Ubuntu 16.04 instead. + This may affect users on ancient Linux distributions. + * It was not possible to configure `ssl.ca.location` on OSX, the property + would automatically revert back to `probe` (default value). + This regression was introduced in v1.8.0. (#3566) + * librdkafka's internal timers would not start if the timeout was set to 0, + which would result in some timeout operations not being enforced correctly, + e.g., the transactional producer API timeouts. + These timers are now started with a timeout of 1 microsecond. + +### Transactional producer fixes + + * Upon quick repeated leader changes the transactional producer could receive + an `OUT_OF_ORDER_SEQUENCE` error from the broker, which triggered an + Epoch bump on the producer resulting in an InitProducerIdRequest being sent + to the transaction coordinator in the middle of a transaction. + This request would start a new transaction on the coordinator, but the + producer would still think (erroneously) it was in current transaction. + Any messages produced in the current transaction prior to this event would + be silently lost when the application committed the transaction, leading + to message loss. + This has been fixed by setting the Abortable transaction error state + in the producer. #3575. + * The transactional producer could stall during a transaction if the transaction + coordinator changed while adding offsets to the transaction (send_offsets_to_transaction()). + This stall lasted until the coordinator connection went down, the + transaction timed out, transaction was aborted, or messages were produced + to a new partition, whichever came first. #3571. + + + +*Note: there was no v1.8.1 librdkafka release* + + +# librdkafka v1.8.0 + +librdkafka v1.8.0 is a security release: + + * Upgrade bundled zlib version from 1.2.8 to 1.2.11 in the `librdkafka.redist` + NuGet package. The updated zlib version fixes CVEs: + CVE-2016-9840, CVE-2016-9841, CVE-2016-9842, CVE-2016-9843 + See https://github.com/edenhill/librdkafka/issues/2934 for more information. + * librdkafka now uses [vcpkg](https://vcpkg.io/) for up-to-date Windows + dependencies in the `librdkafka.redist` NuGet package: + OpenSSL 1.1.1l, zlib 1.2.11, zstd 1.5.0. + * The upstream dependency (OpenSSL, zstd, zlib) source archive checksums are + now verified when building with `./configure --install-deps`. + These builds are used by the librdkafka builds bundled with + confluent-kafka-go, confluent-kafka-python and confluent-kafka-dotnet. + + +## Enhancements + + * Producer `flush()` now overrides the `linger.ms` setting for the duration + of the `flush()` call, effectively triggering immediate transmission of + queued messages. (#3489) + +## Fixes + +### General fixes + + * Correctly detect presence of zlib via compilation check. (Chris Novakovic) + * `ERR__ALL_BROKERS_DOWN` is no longer emitted when the coordinator + connection goes down, only when all standard named brokers have been tried. + This fixes the issue with `ERR__ALL_BROKERS_DOWN` being triggered on + `consumer_close()`. It is also now only emitted if the connection was fully + up (past handshake), and not just connected. + * `rd_kafka_query_watermark_offsets()`, `rd_kafka_offsets_for_times()`, + `consumer_lag` metric, and `auto.offset.reset` now honour + `isolation.level` and will return the Last Stable Offset (LSO) + when `isolation.level` is set to `read_committed` (default), rather than + the uncommitted high-watermark when it is set to `read_uncommitted`. (#3423) + * SASL GSSAPI is now usable when `sasl.kerberos.min.time.before.relogin` + is set to 0 - which disables ticket refreshes (by @mpekalski, #3431). + * Rename internal crc32c() symbol to rd_crc32c() to avoid conflict with + other static libraries (#3421). + * `txidle` and `rxidle` in the statistics object was emitted as 18446744073709551615 when no idle was known. -1 is now emitted instead. (#3519) + + +### Consumer fixes + + * Automatically retry offset commits on `ERR_REQUEST_TIMED_OUT`, + `ERR_COORDINATOR_NOT_AVAILABLE`, and `ERR_NOT_COORDINATOR` (#3398). + Offset commits will be retried twice. + * Timed auto commits did not work when only using assign() and not subscribe(). + This regression was introduced in v1.7.0. + * If the topics matching the current subscription changed (or the application + updated the subscription) while there was an outstanding JoinGroup or + SyncGroup request, an additional request would sometimes be sent before + handling the response of the first. This in turn lead to internal state + issues that could cause a crash or malbehaviour. + The consumer will now wait for any outstanding JoinGroup or SyncGroup + responses before re-joining the group. + * `auto.offset.reset` could previously be triggered by temporary errors, + such as disconnects and timeouts (after the two retries are exhausted). + This is now fixed so that the auto offset reset policy is only triggered + for permanent errors. + * The error that triggers `auto.offset.reset` is now logged to help the + application owner identify the reason of the reset. + * If a rebalance takes longer than a consumer's `session.timeout.ms`, the + consumer will remain in the group as long as it receives heartbeat responses + from the broker. + + +### Admin fixes + + * `DeleteRecords()` could crash if one of the underlying requests + (for a given partition leader) failed at the transport level (e.g., timeout). + (#3476). + + + +# librdkafka v1.7.0 + +librdkafka v1.7.0 is feature release: + + * [KIP-360](https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=89068820) - Improve reliability of transactional producer. + Requires Apache Kafka 2.5 or later. + * OpenSSL Engine support (`ssl.engine.location`) by @adinigam and @ajbarb. + + +## Enhancements + + * Added `connections.max.idle.ms` to automatically close idle broker + connections. + This feature is disabled by default unless `bootstrap.servers` contains + the string `azure` in which case the default is set to <4 minutes to improve + connection reliability and circumvent limitations with the Azure load + balancers (see #3109 for more information). + * Bumped to OpenSSL 1.1.1k in binary librdkafka artifacts. + * The binary librdkafka artifacts for Alpine are now using Alpine 3.12. + OpenSSL 1.1.1k. + * Improved static librdkafka Windows builds using MinGW (@neptoess, #3130). + * The `librdkafka.redist` NuGet package now has updated zlib, zstd and + OpenSSL versions (from vcpkg). + + +## Security considerations + + * The zlib version bundled with the `librdkafka.redist` NuGet package has now been upgraded + from zlib 1.2.8 to 1.2.11, fixing the following CVEs: + * CVE-2016-9840: undefined behaviour (compiler dependent) in inflate (decompression) code: this is used by the librdkafka consumer. Risk of successfully exploitation through consumed messages is eastimated very low. + * CVE-2016-9841: undefined behaviour (compiler dependent) in inflate code: this is used by the librdkafka consumer. Risk of successfully exploitation through consumed messages is eastimated very low. + * CVE-2016-9842: undefined behaviour in inflateMark(): this API is not used by librdkafka. + * CVE-2016-9843: issue in crc32_big() which is called from crc32_z(): this API is not used by librdkafka. + +## Upgrade considerations + + * The C++ `oauthbearer_token_refresh_cb()` was missing a `Handle *` + argument that has now been added. This is a breaking change but the original + function signature is considered a bug. + This change only affects C++ OAuth developers. + * [KIP-735](https://cwiki.apache.org/confluence/display/KAFKA/KIP-735%3A+Increase+default+consumer+session+timeout) The consumer `session.timeout.ms` + default was changed from 10 to 45 seconds to make consumer groups more + robust and less sensitive to temporary network and cluster issues. + * Statistics: `consumer_lag` is now using the `committed_offset`, + while the new `consumer_lag_stored` is using `stored_offset` + (offset to be committed). + This is more correct than the previous `consumer_lag` which was using + either `committed_offset` or `app_offset` (last message passed + to application). + * The `librdkafka.redist` NuGet package is now built with MSVC runtime v140 + (VS 2015). Previous versions were built with MSVC runtime v120 (VS 2013). + + +## Fixes + +### General fixes + + * Fix accesses to freed metadata cache mutexes on client termination (#3279) + * There was a race condition on receiving updated metadata where a broker id + update (such as bootstrap to proper broker transformation) could finish after + the topic metadata cache was updated, leading to existing brokers seemingly + being not available. + One occurrence of this issue was query_watermark_offsets() that could return + `ERR__UNKNOWN_PARTITION` for existing partitions shortly after the + client instance was created. + * The OpenSSL context is now initialized with `TLS_client_method()` + (on OpenSSL >= 1.1.0) instead of the deprecated and outdated + `SSLv23_client_method()`. + * The initial cluster connection on client instance creation could sometimes + be delayed up to 1 second if a `group.id` or `transactional.id` + was configured (#3305). + * Speed up triggering of new broker connections in certain cases by exiting + the broker thread io/op poll loop when a wakeup op is received. + * SASL GSSAPI: The Kerberos kinit refresh command was triggered from + `rd_kafka_new()` which made this call blocking if the refresh command + was taking long. The refresh is now performed by the background rdkafka + main thread. + * Fix busy-loop (100% CPU on the broker threads) during the handshake phase + of an SSL connection. + * Disconnects during SSL handshake are now propagated as transport errors + rather than SSL errors, since these disconnects are at the transport level + (e.g., incorrect listener, flaky load balancer, etc) and not due to SSL + issues. + * Increment metadata fast refresh interval backoff exponentially (@ajbarb, #3237). + * Unthrottled requests are no longer counted in the `brokers[].throttle` + statistics object. + * Log CONFWARN warning when global topic configuration properties + are overwritten by explicitly setting a `default_topic_conf`. + +### Consumer fixes + + * If a rebalance happened during a `consume_batch..()` call the already + accumulated messages for revoked partitions were not purged, which would + pass messages to the application for partitions that were no longer owned + by the consumer. Fixed by @jliunyu. #3340. + * Fix balancing and reassignment issues with the cooperative-sticky assignor. + #3306. + * Fix incorrect detection of first rebalance in sticky assignor (@hallfox). + * Aborted transactions with no messages produced to a partition could + cause further successfully committed messages in the same Fetch response to + be ignored, resulting in consumer-side message loss. + A log message along the lines `Abort txn ctrl msg bad order at offset + 7501: expected before or at 7702: messages in aborted transactions may be delivered to the application` + would be seen. + This is a rare occurrence where a transactional producer would register with + the partition but not produce any messages before aborting the transaction. + * The consumer group deemed cached metadata up to date by checking + `topic.metadata.refresh.interval.ms`: if this property was set too low + it would cause cached metadata to be unusable and new metadata to be fetched, + which could delay the time it took for a rebalance to settle. + It now correctly uses `metadata.max.age.ms` instead. + * The consumer group timed auto commit would attempt commits during rebalances, + which could result in "Illegal generation" errors. This is now fixed, the + timed auto committer is only employed in the steady state when no rebalances + are taking places. Offsets are still auto committed when partitions are + revoked. + * Retriable FindCoordinatorRequest errors are no longer propagated to + the application as they are retried automatically. + * Fix rare crash (assert `rktp_started`) on consumer termination + (introduced in v1.6.0). + * Fix unaligned access and possibly corrupted snappy decompression when + building with MSVC (@azat) + * A consumer configured with the `cooperative-sticky` assignor did + not actively Leave the group on unsubscribe(). This delayed the + rebalance for the remaining group members by up to `session.timeout.ms`. + * The current subscription list was sometimes leaked when unsubscribing. + +### Producer fixes + + * The timeout value of `flush()` was not respected when delivery reports + were scheduled as events (such as for confluent-kafka-go) rather than + callbacks. + * There was a race conditition in `purge()` which could cause newly + created partition objects, or partitions that were changing leaders, to + not have their message queues purged. This could cause + `abort_transaction()` to time out. This issue is now fixed. + * In certain high-thruput produce rate patterns producing could stall for + 1 second, regardless of `linger.ms`, due to rate-limiting of internal + queue wakeups. This is now fixed by not rate-limiting queue wakeups but + instead limiting them to one wakeup per queue reader poll. #2912. + +### Transactional Producer fixes + + * KIP-360: Fatal Idempotent producer errors are now recoverable by the + transactional producer and will raise a `txn_requires_abort()` error. + * If the cluster went down between `produce()` and `commit_transaction()` + and before any partitions had been registered with the coordinator, the + messages would time out but the commit would succeed because nothing + had been sent to the coordinator. This is now fixed. + * If the current transaction failed while `commit_transaction()` was + checking the current transaction state an invalid state transaction could + occur which in turn would trigger a assertion crash. + This issue showed up as "Invalid txn state transition: .." crashes, and is + now fixed by properly synchronizing both checking and transition of state. + + + +# librdkafka v1.6.1 + +librdkafka v1.6.1 is a maintenance release. + +## Upgrade considerations + + * Fatal idempotent producer errors are now also fatal to the transactional + producer. This is a necessary step to maintain data integrity prior to + librdkafka supporting KIP-360. Applications should check any transactional + API errors for the is_fatal flag and decommission the transactional producer + if the flag is set. + * The consumer error raised by `auto.offset.reset=error` now has error-code + set to `ERR__AUTO_OFFSET_RESET` to allow an application to differentiate + between auto offset resets and other consumer errors. + + +## Fixes + +### General fixes + + * Admin API and transactional `send_offsets_to_transaction()` coordinator + requests, such as TxnOffsetCommitRequest, could in rare cases be sent + multiple times which could cause a crash. + * `ssl.ca.location=probe` is now enabled by default on Mac OSX since the + librdkafka-bundled OpenSSL might not have the same default CA search paths + as the system or brew installed OpenSSL. Probing scans all known locations. + +### Transactional Producer fixes + + * Fatal idempotent producer errors are now also fatal to the transactional + producer. + * The transactional producer could crash if the transaction failed while + `send_offsets_to_transaction()` was called. + * Group coordinator requests for transactional + `send_offsets_to_transaction()` calls would leak memory if the + underlying request was attempted to be sent after the transaction had + failed. + * When gradually producing to multiple partitions (resulting in multiple + underlying AddPartitionsToTxnRequests) subsequent partitions could get + stuck in pending state under certain conditions. These pending partitions + would not send queued messages to the broker and eventually trigger + message timeouts, failing the current transaction. This is now fixed. + * Committing an empty transaction (no messages were produced and no + offsets were sent) would previously raise a fatal error due to invalid state + on the transaction coordinator. We now allow empty/no-op transactions to + be committed. + +### Consumer fixes + + * The consumer will now retry indefinitely (or until the assignment is changed) + to retrieve committed offsets. This fixes the issue where only two retries + were attempted when outstanding transactions were blocking OffsetFetch + requests with `ERR_UNSTABLE_OFFSET_COMMIT`. #3265 + + + + + +# librdkafka v1.6.0 + +librdkafka v1.6.0 is feature release: + + * [KIP-429 Incremental rebalancing](https://cwiki.apache.org/confluence/display/KAFKA/KIP-429%3A+Kafka+Consumer+Incremental+Rebalance+Protocol) with sticky + consumer group partition assignor (KIP-54) (by @mhowlett). + * [KIP-480 Sticky producer partitioning](https://cwiki.apache.org/confluence/display/KAFKA/KIP-480%3A+Sticky+Partitioner) (`sticky.partitioning.linger.ms`) - + achieves higher throughput and lower latency through sticky selection + of random partition (by @abbycriswell). + * AdminAPI: Add support for `DeleteRecords()`, `DeleteGroups()` and + `DeleteConsumerGroupOffsets()` (by @gridaphobe) + * [KIP-447 Producer scalability for exactly once semantics](https://cwiki.apache.org/confluence/display/KAFKA/KIP-447%3A+Producer+scalability+for+exactly+once+semantics) - + allows a single transactional producer to be used for multiple input + partitions. Requires Apache Kafka 2.5 or later. + * Transactional producer fixes and improvements, see **Transactional Producer fixes** below. + * The [librdkafka.redist](https://www.nuget.org/packages/librdkafka.redist/) + NuGet package now supports Linux ARM64/Aarch64. + + +## Upgrade considerations + + * Sticky producer partitioning (`sticky.partitioning.linger.ms`) is + enabled by default (10 milliseconds) which affects the distribution of + randomly partitioned messages, where previously these messages would be + evenly distributed over the available partitions they are now partitioned + to a single partition for the duration of the sticky time + (10 milliseconds by default) before a new random sticky partition + is selected. + * The new KIP-447 transactional producer scalability guarantees are only + supported on Apache Kafka 2.5 or later, on earlier releases you will + need to use one producer per input partition for EOS. This limitation + is not enforced by the producer or broker. + * Error handling for the transactional producer has been improved, see + the **Transactional Producer fixes** below for more information. + + +## Known issues + + * The Transactional Producer's API timeout handling is inconsistent with the + underlying protocol requests, it is therefore strongly recommended that + applications call `rd_kafka_commit_transaction()` and + `rd_kafka_abort_transaction()` with the `timeout_ms` parameter + set to `-1`, which will use the remaining transaction timeout. + + +## Enhancements + + * KIP-107, KIP-204: AdminAPI: Added `DeleteRecords()` (by @gridaphobe). + * KIP-229: AdminAPI: Added `DeleteGroups()` (by @gridaphobe). + * KIP-496: AdminAPI: Added `DeleteConsumerGroupOffsets()`. + * KIP-464: AdminAPI: Added support for broker-side default partition count + and replication factor for `CreateTopics()`. + * Windows: Added `ssl.ca.certificate.stores` to specify a list of + Windows Certificate Stores to read CA certificates from, e.g., + `CA,Root`. `Root` remains the default store. + * Use reentrant `rand_r()` on supporting platforms which decreases lock + contention (@azat). + * Added `assignor` debug context for troubleshooting consumer partition + assignments. + * Updated to OpenSSL v1.1.1i when building dependencies. + * Update bundled lz4 (used when `./configure --disable-lz4-ext`) to v1.9.3 + which has vast performance improvements. + * Added `rd_kafka_conf_get_default_topic_conf()` to retrieve the + default topic configuration object from a global configuration object. + * Added `conf` debugging context to `debug` - shows set configuration + properties on client and topic instantiation. Sensitive properties + are redacted. + * Added `rd_kafka_queue_yield()` to cancel a blocking queue call. + * Will now log a warning when multiple ClusterIds are seen, which is an + indication that the client might be erroneously configured to connect to + multiple clusters which is not supported. + * Added `rd_kafka_seek_partitions()` to seek multiple partitions to + per-partition specific offsets. + + +## Fixes + +### General fixes + + * Fix a use-after-free crash when certain coordinator requests were retried. + * The C++ `oauthbearer_set_token()` function would call `free()` on + a `new`-created pointer, possibly leading to crashes or heap corruption (#3194) + +### Consumer fixes + + * The consumer assignment and consumer group implementations have been + decoupled, simplified and made more strict and robust. This will sort out + a number of edge cases for the consumer where the behaviour was previously + undefined. + * Partition fetch state was not set to STOPPED if OffsetCommit failed. + * The session timeout is now enforced locally also when the coordinator + connection is down, which was not previously the case. + + +### Transactional Producer fixes + + * Transaction commit or abort failures on the broker, such as when the + producer was fenced by a newer instance, were not propagated to the + application resulting in failed commits seeming successful. + This was a critical race condition for applications that had a delay after + producing messages (or sendings offsets) before committing or + aborting the transaction. This issue has now been fixed and test coverage + improved. + * The transactional producer API would return `RD_KAFKA_RESP_ERR__STATE` + when API calls were attempted after the transaction had failed, we now + try to return the error that caused the transaction to fail in the first + place, such as `RD_KAFKA_RESP_ERR__FENCED` when the producer has + been fenced, or `RD_KAFKA_RESP_ERR__TIMED_OUT` when the transaction + has timed out. + * Transactional producer retry count for transactional control protocol + requests has been increased from 3 to infinite, retriable errors + are now automatically retried by the producer until success or the + transaction timeout is exceeded. This fixes the case where + `rd_kafka_send_offsets_to_transaction()` would fail the current + transaction into an abortable state when `CONCURRENT_TRANSACTIONS` was + returned by the broker (which is a transient error) and the 3 retries + were exhausted. + + +### Producer fixes + + * Calling `rd_kafka_topic_new()` with a topic config object with + `message.timeout.ms` set could sometimes adjust the global `linger.ms` + property (if not explicitly configured) which was not desired, this is now + fixed and the auto adjustment is only done based on the + `default_topic_conf` at producer creation. + * `rd_kafka_flush()` could previously return `RD_KAFKA_RESP_ERR__TIMED_OUT` + just as the timeout was reached if the messages had been flushed but + there were now no more messages. This has been fixed. + + + + +# librdkafka v1.5.3 + +librdkafka v1.5.3 is a maintenance release. + +## Upgrade considerations + + * CentOS 6 is now EOL and is no longer included in binary librdkafka packages, + such as NuGet. + +## Fixes + +### General fixes + + * Fix a use-after-free crash when certain coordinator requests were retried. + * Coordinator requests could be left uncollected on instance destroy which + could lead to hang. + * Fix rare 1 second stalls by forcing rdkafka main thread wakeup when a new + next-timer-to-be-fired is scheduled. + * Fix additional cases where broker-side automatic topic creation might be + triggered unexpectedly. + * AdminAPI: The operation_timeout (on-broker timeout) previously defaulted to 0, + but now defaults to `socket.timeout.ms` (60s). + * Fix possible crash for Admin API protocol requests that fail at the + transport layer or prior to sending. + + +### Consumer fixes + + * Consumer would not filter out messages for aborted transactions + if the messages were compressed (#3020). + * Consumer destroy without prior `close()` could hang in certain + cgrp states (@gridaphobe, #3127). + * Fix possible null dereference in `Message::errstr()` (#3140). + * The `roundrobin` partition assignment strategy could get stuck in an + endless loop or generate uneven assignments in case the group members + had asymmetric subscriptions (e.g., c1 subscribes to t1,t2 while c2 + subscribes to t2,t3). (#3159) + * Mixing committed and logical or absolute offsets in the partitions + passed to `rd_kafka_assign()` would in previous released ignore the + logical or absolute offsets and use the committed offsets for all partitions. + This is now fixed. (#2938) + + + + +# librdkafka v1.5.2 + +librdkafka v1.5.2 is a maintenance release. + + +## Upgrade considerations + + * The default value for the producer configuration property `retries` has + been increased from 2 to infinity, effectively limiting Produce retries to + only `message.timeout.ms`. + As the reasons for the automatic internal retries vary (various broker error + codes as well as transport layer issues), it doesn't make much sense to limit + the number of retries for retriable errors, but instead only limit the + retries based on the allowed time to produce a message. + * The default value for the producer configuration property + `request.timeout.ms` has been increased from 5 to 30 seconds to match + the Apache Kafka Java producer default. + This change yields increased robustness for broker-side congestion. + + +## Enhancements + + * The generated `CONFIGURATION.md` (through `rd_kafka_conf_properties_show())`) + now include all properties and values, regardless if they were included in + the build, and setting a disabled property or value through + `rd_kafka_conf_set()` now returns `RD_KAFKA_CONF_INVALID` and provides + a more useful error string saying why the property can't be set. + * Consumer configs on producers and vice versa will now be logged with + warning messages on client instantiation. + +## Fixes + +### Security fixes + + * There was an incorrect call to zlib's `inflateGetHeader()` with + unitialized memory pointers that could lead to the GZIP header of a fetched + message batch to be copied to arbitrary memory. + This function call has now been completely removed since the result was + not used. + Reported by Ilja van Sprundel. + + +### General fixes + + * `rd_kafka_topic_opaque()` (used by the C++ API) would cause object + refcounting issues when used on light-weight (error-only) topic objects + such as consumer errors (#2693). + * Handle name resolution failures when formatting IP addresses in error logs, + and increase printed hostname limit to ~256 bytes (was ~60). + * Broker sockets would be closed twice (thus leading to potential race + condition with fd-reuse in other threads) if a custom `socket_cb` would + return error. + +### Consumer fixes + + * The `roundrobin` `partition.assignment.strategy` could crash (assert) + for certain combinations of members and partitions. + This is a regression in v1.5.0. (#3024) + * The C++ `KafkaConsumer` destructor did not destroy the underlying + C `rd_kafka_t` instance, causing a leak if `close()` was not used. + * Expose rich error strings for C++ Consumer `Message->errstr()`. + * The consumer could get stuck if an outstanding commit failed during + rebalancing (#2933). + * Topic authorization errors during fetching are now reported only once (#3072). + +### Producer fixes + + * Topic authorization errors are now properly propagated for produced messages, + both through delivery reports and as `ERR_TOPIC_AUTHORIZATION_FAILED` + return value from `produce*()` (#2215) + * Treat cluster authentication failures as fatal in the transactional + producer (#2994). + * The transactional producer code did not properly reference-count partition + objects which could in very rare circumstances lead to a use-after-free bug + if a topic was deleted from the cluster when a transaction was using it. + * `ERR_KAFKA_STORAGE_ERROR` is now correctly treated as a retriable + produce error (#3026). + * Messages that timed out locally would not fail the ongoing transaction. + If the application did not take action on failed messages in its delivery + report callback and went on to commit the transaction, the transaction would + be successfully committed, simply omitting the failed messages. + * EndTxnRequests (sent on commit/abort) are only retried in allowed + states (#3041). + Previously the transaction could hang on commit_transaction() if an abortable + error was hit and the EndTxnRequest was to be retried. + + +*Note: there was no v1.5.1 librdkafka release* + + + + +# librdkafka v1.5.0 + +The v1.5.0 release brings usability improvements, enhancements and fixes to +librdkafka. + +## Enhancements + + * Improved broker connection error reporting with more useful information and + hints on the cause of the problem. + * Consumer: Propagate errors when subscribing to unavailable topics (#1540) + * Producer: Add `batch.size` producer configuration property (#638) + * Add `topic.metadata.propagation.max.ms` to allow newly manually created + topics to be propagated throughout the cluster before reporting them + as non-existent. This fixes race issues where CreateTopics() is + quickly followed by produce(). + * Prefer least idle connection for periodic metadata refreshes, et.al., + to allow truly idle connections to time out and to avoid load-balancer-killed + idle connection errors (#2845) + * Added `rd_kafka_event_debug_contexts()` to get the debug contexts for + a debug log line (by @wolfchimneyrock). + * Added Test scenarios which define the cluster configuration. + * Added MinGW-w64 builds (@ed-alertedh, #2553) + * `./configure --enable-XYZ` now requires the XYZ check to pass, + and `--disable-XYZ` disables the feature altogether (@benesch) + * Added `rd_kafka_produceva()` which takes an array of produce arguments + for situations where the existing `rd_kafka_producev()` va-arg approach + can't be used. + * Added `rd_kafka_message_broker_id()` to see the broker that a message + was produced or fetched from, or an error was associated with. + * Added RTT/delay simulation to mock brokers. + + +## Upgrade considerations + + * Subscribing to non-existent and unauthorized topics will now propagate + errors `RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART` and + `RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED` to the application through + the standard consumer error (the err field in the message object). + * Consumer will no longer trigger auto creation of topics, + `allow.auto.create.topics=true` may be used to re-enable the old deprecated + functionality. + * The default consumer pre-fetch queue threshold `queued.max.messages.kbytes` + has been decreased from 1GB to 64MB to avoid excessive network usage for low + and medium throughput consumer applications. High throughput consumer + applications may need to manually set this property to a higher value. + * The default consumer Fetch wait time has been increased from 100ms to 500ms + to avoid excessive network usage for low throughput topics. + * If OpenSSL is linked statically, or `ssl.ca.location=probe` is configured, + librdkafka will probe known CA certificate paths and automatically use the + first one found. This should alleviate the need to configure + `ssl.ca.location` when the statically linked OpenSSL's OPENSSLDIR differs + from the system's CA certificate path. + * The heuristics for handling Apache Kafka < 0.10 brokers has been removed to + improve connection error handling for modern Kafka versions. + Users on Brokers 0.9.x or older should already be configuring + `api.version.request=false` and `broker.version.fallback=...` so there + should be no functional change. + * The default producer batch accumulation time, `linger.ms`, has been changed + from 0.5ms to 5ms to improve batch sizes and throughput while reducing + the per-message protocol overhead. + Applications that require lower produce latency than 5ms will need to + manually set `linger.ms` to a lower value. + * librdkafka's build tooling now requires Python 3.x (python3 interpreter). + + +## Fixes + +### General fixes + + * The client could crash in rare circumstances on ApiVersion or + SaslHandshake request timeouts (#2326) + * `./configure --LDFLAGS='a=b, c=d'` with arguments containing = are now + supported (by @sky92zwq). + * `./configure` arguments now take precedence over cached `configure` variables + from previous invocation. + * Fix theoretical crash on coord request failure. + * Unknown partition error could be triggered for existing partitions when + additional partitions were added to a topic (@benesch, #2915) + * Quickly refresh topic metadata for desired but non-existent partitions. + This will speed up the initial discovery delay when new partitions are added + to an existing topic (#2917). + + +### Consumer fixes + + * The roundrobin partition assignor could crash if subscriptions + where asymmetrical (different sets from different members of the group). + Thanks to @ankon and @wilmai for identifying the root cause (#2121). + * The consumer assignors could ignore some topics if there were more subscribed + topics than consumers in taking part in the assignment. + * The consumer would connect to all partition leaders of a topic even + for partitions that were not being consumed (#2826). + * Initial consumer group joins should now be a couple of seconds quicker + thanks expedited query intervals (@benesch). + * Fix crash and/or inconsistent subscriptions when using multiple consumers + (in the same process) with wildcard topics on Windows. + * Don't propagate temporary offset lookup errors to application. + * Immediately refresh topic metadata when partitions are reassigned to other + brokers, avoiding a fetch stall of up to `topic.metadata.refresh.interval.ms`. (#2955) + * Memory for batches containing control messages would not be freed when + using the batch consume APIs (@pf-qiu, #2990). + + +### Producer fixes + + * Proper locking for transaction state in EndTxn handler. + + + +# librdkafka v1.4.4 + +v1.4.4 is a maintenance release with the following fixes and enhancements: + + * Transactional producer could crash on request timeout due to dereferencing + NULL pointer of non-existent response object. + * Mark `rd_kafka_send_offsets_to_transaction()` CONCURRENT_TRANSACTION (et.al) + errors as retriable. + * Fix crash on transactional coordinator FindCoordinator request failure. + * Minimize broker re-connect delay when broker's connection is needed to + send requests. + * Proper locking for transaction state in EndTxn handler. + * `socket.timeout.ms` was ignored when `transactional.id` was set. + * Added RTT/delay simulation to mock brokers. + +*Note: there was no v1.4.3 librdkafka release* + + + +# librdkafka v1.4.2 + +v1.4.2 is a maintenance release with the following fixes and enhancements: + + * Fix produce/consume hang after partition goes away and comes back, + such as when a topic is deleted and re-created. + * Consumer: Reset the stored offset when partitions are un-assign()ed (fixes #2782). + This fixes the case where a manual offset-less commit() or the auto-committer + would commit a stored offset from a previous assignment before + a new message was consumed by the application. + * Probe known CA cert paths and set default `ssl.ca.location` accordingly + if OpenSSL is statically linked or `ssl.ca.location` is set to `probe`. + * Per-partition OffsetCommit errors were unhandled (fixes #2791) + * Seed the PRNG (random number generator) by default, allow application to + override with `enable.random.seed=false` (#2795) + * Fix stack overwrite (of 1 byte) when SaslHandshake MechCnt is zero + * Align bundled c11 threads (tinycthreads) constants to glibc and musl (#2681) + * Fix return value of rd_kafka_test_fatal_error() (by @ckb42) + * Ensure CMake sets disabled defines to zero on Windows (@benesch) + + +*Note: there was no v1.4.1 librdkafka release* + + + + + +# Older releases + +See https://github.com/edenhill/librdkafka/releases diff --git a/src/fluent-bit/lib/librdkafka-2.1.0/CMakeLists.txt b/src/fluent-bit/lib/librdkafka-2.1.0/CMakeLists.txt new file mode 100644 index 000000000..7f3dd0fc6 --- /dev/null +++ b/src/fluent-bit/lib/librdkafka-2.1.0/CMakeLists.txt @@ -0,0 +1,291 @@ +cmake_minimum_required(VERSION 3.2) + +include("packaging/cmake/parseversion.cmake") +parseversion("src/rdkafka.h") + +project(RdKafka VERSION ${RDKAFKA_VERSION}) + +set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/packaging/cmake/Modules/") + +# Options. No 'RDKAFKA_' prefix to match old C++ code. { + +# This option doesn't affect build in fact, only C code +# (see 'rd_kafka_version_str'). In CMake the build type feature usually used +# (like Debug, Release, etc.). +option(WITHOUT_OPTIMIZATION "Disable optimization" OFF) + +option(ENABLE_DEVEL "Enable development asserts, checks, etc" OFF) +option(ENABLE_REFCNT_DEBUG "Enable refcnt debugging" OFF) +set(TRYCOMPILE_SRC_DIR "${CMAKE_CURRENT_LIST_DIR}/packaging/cmake/try_compile") +set(BUILT_WITH "CMAKE") + +# Toolchain { +list(APPEND BUILT_WITH "${CMAKE_C_COMPILER_ID}") +list(APPEND BUILT_WITH "${CMAKE_CXX_COMPILER_ID}") +# } + +# PkgConfig { +find_package(PkgConfig QUIET) +if(PkgConfig_FOUND) + set(WITH_PKGCONFIG ON) + list(APPEND BUILT_WITH "PKGCONFIG") +endif() +# } + +# LIBM { +include(CheckLibraryExists) +check_library_exists(m pow "" WITH_HDRHISTOGRAM) +if(WITH_HDRHISTOGRAM) + list(APPEND BUILT_WITH "HDRHISTOGRAM") +endif() +# } + +# ZLIB { +find_package(ZLIB QUIET) +if(ZLIB_FOUND) + set(with_zlib_default ON) +else() + set(with_zlib_default OFF) +endif() +option(WITH_ZLIB "With ZLIB" ${with_zlib_default}) +if(WITH_ZLIB) + list(APPEND BUILT_WITH "ZLIB") +endif() +# } + +# CURL { +find_package(CURL QUIET) +if(CURL_FOUND) + set(with_curl_default ON) +else() + set(with_curl_default OFF) +endif() +option(WITH_CURL "With CURL" ${with_curl_default}) +if(WITH_CURL) + list(APPEND BUILT_WITH "CURL") +endif() +# } + +# ZSTD { +find_package(ZSTD QUIET) +if(ZSTD_FOUND) + set(with_zstd_default ON) +else() + set(with_zstd_default OFF) +endif() +option(WITH_ZSTD "With ZSTD" ${with_zstd_default}) +if(WITH_ZSTD) + list(APPEND BUILT_WITH "ZSTD") +endif() +# } + +# LibDL { +try_compile( + WITH_LIBDL + "${CMAKE_CURRENT_BINARY_DIR}/try_compile" + "${TRYCOMPILE_SRC_DIR}/dlopen_test.c" + LINK_LIBRARIES "${CMAKE_DL_LIBS}" +) +if(WITH_LIBDL) + list(APPEND BUILT_WITH "LIBDL") +endif() +# } + +# WITH_PLUGINS { +if(WITH_LIBDL OR WIN32) + set(with_plugins_default ON) +else() + set(with_plugins_default OFF) +endif() +option(WITH_PLUGINS "With plugin support" ${with_plugins_default}) +if(WITH_PLUGINS) + list(APPEND BUILT_WITH "PLUGINS") +endif() +# } + +# OpenSSL { +if(WITH_BUNDLED_SSL) # option from 'h2o' parent project + set(with_ssl_default ON) +else() + find_package(OpenSSL QUIET) + if(OpenSSL_FOUND) + set(with_ssl_default ON) + else() + set(with_ssl_default OFF) + endif() +endif() +option(WITH_SSL "With SSL" ${with_ssl_default}) +if(WITH_SSL) + list(APPEND BUILT_WITH "SSL") +endif() +# } + +# SASL { +if(WIN32) + set(with_sasl_default ON) +else() + if(PkgConfig_FOUND) + pkg_check_modules(SASL libsasl2) + if(SASL_FOUND) + set(with_sasl_default ON) + else() + try_compile( + WITH_SASL_CYRUS_BOOL + "${CMAKE_CURRENT_BINARY_DIR}/try_compile" + "${TRYCOMPILE_SRC_DIR}/libsasl2_test.c" + LINK_LIBRARIES "-lsasl2" + ) + if(WITH_SASL_CYRUS_BOOL) + set(with_sasl_default ON) + set(SASL_LIBRARIES "-lsasl2") + else() + set(with_sasl_default OFF) + endif() + endif() + endif() +endif() +option(WITH_SASL "With SASL" ${with_sasl_default}) +if(WITH_SASL) + if(SASL_FOUND) + link_directories(${SASL_LIBRARY_DIRS}) + endif() + if(WITH_SSL) + set(WITH_SASL_SCRAM ON) + set(WITH_SASL_OAUTHBEARER ON) + list(APPEND BUILT_WITH "SASL_SCRAM SASL_OAUTHBEARER") + endif() + if(NOT WIN32) + set(WITH_SASL_CYRUS ON) + list(APPEND BUILT_WITH "SASL_CYRUS") + endif() +endif() +# } + +if(WITH_SSL AND WITH_CURL) + set(WITH_OAUTHBEARER_OIDC ON) +endif() + +# LZ4 { +option(ENABLE_LZ4_EXT "Enable external LZ4 library support" ON) +set(WITH_LZ4_EXT OFF) +if(ENABLE_LZ4_EXT) + find_package(LZ4) + if(LZ4_FOUND) + set(WITH_LZ4_EXT ON) + list(APPEND BUILT_WITH "LZ4_EXT") + else() + message(STATUS "Using bundled LZ4 implementation.") + endif() +endif() +# } + +option(RDKAFKA_BUILD_STATIC "Build static rdkafka library" OFF) +option(RDKAFKA_BUILD_EXAMPLES "Build examples" ON) +option(RDKAFKA_BUILD_TESTS "Build tests" ON) +if(WIN32) + option(WITHOUT_WIN32_CONFIG "Avoid including win32_config.h on cmake builds" ON) +endif(WIN32) + +# In: +# * TRYCOMPILE_SRC_DIR +# Out: +# * HAVE_ATOMICS_32 +# * HAVE_ATOMICS_32_SYNC +# * HAVE_ATOMICS_64 +# * HAVE_ATOMICS_64_SYNC +# * HAVE_REGEX +# * HAVE_STRNDUP +# * HAVE_PTHREAD_SETNAME_GNU +# * HAVE_PTHREAD_SETNAME_DARWIN +# * HAVE_PTHREAD_SETNAME_FREEBSD +# * WITH_C11THREADS +# * WITH_CRC32C_HW +# * LINK_ATOMIC +include("packaging/cmake/try_compile/rdkafka_setup.cmake") +if(WITH_C11THREADS) + list(APPEND BUILT_WITH "C11THREADS") +endif() +if(WITH_CRC32C_HW) + list(APPEND BUILT_WITH "CRC32C_HW") +endif() + +set(GENERATED_DIR "${CMAKE_CURRENT_BINARY_DIR}/generated") + +# In: +# * WITHOUT_OPTIMIZATION +# * ENABLE_DEVEL +# * ENABLE_REFCNT_DEBUG +# * HAVE_ATOMICS_32 +# * HAVE_ATOMICS_32_SYNC +# * HAVE_ATOMICS_64 +# * HAVE_ATOMICS_64_SYNC +# * WITH_ZLIB +# * WITH_SSL +# * WITH_SASL +# * HAVE_REGEX +# * HAVE_STRNDUP +# * HAVE_PTHREAD_SETNAME_GNU +# * HAVE_PTHREAD_SETNAME_DARWIN +# * HAVE_PTHREAD_SETNAME_FREEBSD +list(APPEND BUILT_WITH "SNAPPY") +list(APPEND BUILT_WITH "SOCKEM") +string(REPLACE ";" " " BUILT_WITH "${BUILT_WITH}") +configure_file("packaging/cmake/config.h.in" "${GENERATED_DIR}/config.h") + +# Installation (https://github.com/forexample/package-example) { + +include(GNUInstallDirs) + +set(config_install_dir "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}") + +set(generated_dir "${CMAKE_CURRENT_BINARY_DIR}/generated") + +set(project_config "${generated_dir}/${PROJECT_NAME}Config.cmake") +set(project_version "${generated_dir}/${PROJECT_NAME}ConfigVersion.cmake") +set(targets_export_name "${PROJECT_NAME}Targets") +set(namespace "${PROJECT_NAME}::") + +include(CMakePackageConfigHelpers) + +# In: +# * targets_export_name +# * PROJECT_NAME +configure_package_config_file( + "packaging/cmake/Config.cmake.in" + "${project_config}" + INSTALL_DESTINATION "${config_install_dir}" +) + +write_basic_package_version_file( + "${project_version}" + VERSION ${PROJECT_VERSION} + COMPATIBILITY AnyNewerVersion +) + +install( + FILES "${project_config}" "${project_version}" "packaging/cmake/Modules/FindLZ4.cmake" + DESTINATION "${config_install_dir}" +) + +install( + EXPORT "${targets_export_name}" + NAMESPACE "${namespace}" + DESTINATION "${config_install_dir}" +) + +install( + FILES LICENSES.txt + DESTINATION "share/licenses/librdkafka" +) + +add_subdirectory(src) +add_subdirectory(src-cpp) + +if(RDKAFKA_BUILD_EXAMPLES) + add_subdirectory(examples) +endif() + +if(RDKAFKA_BUILD_TESTS) + enable_testing() + add_subdirectory(tests) +endif() diff --git a/src/fluent-bit/lib/librdkafka-2.1.0/CODE_OF_CONDUCT.md b/src/fluent-bit/lib/librdkafka-2.1.0/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..dbbde19c9 --- /dev/null +++ b/src/fluent-bit/lib/librdkafka-2.1.0/CODE_OF_CONDUCT.md @@ -0,0 +1,46 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at rdkafka@edenhill.se. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/src/fluent-bit/lib/librdkafka-2.1.0/CONFIGURATION.md b/src/fluent-bit/lib/librdkafka-2.1.0/CONFIGURATION.md new file mode 100644 index 000000000..0ebec417c --- /dev/null +++ b/src/fluent-bit/lib/librdkafka-2.1.0/CONFIGURATION.md @@ -0,0 +1,183 @@ +# Configuration properties +## Global configuration properties + +Property | C/P | Range | Default | Importance | Description +-----------------------------------------|-----|-----------------|--------------:|------------| -------------------------- +builtin.features | * | | gzip, snappy, ssl, sasl, regex, lz4, sasl_gssapi, sasl_plain, sasl_scram, plugins, zstd, sasl_oauthbearer, http, oidc | low | Indicates the builtin features for this build of librdkafka. An application can either query this value or attempt to set it with its list of required features to check for library support.
*Type: CSV flags* +client.id | * | | rdkafka | low | Client identifier.
*Type: string* +metadata.broker.list | * | | | high | Initial list of brokers as a CSV list of broker host or host:port. The application may also use `rd_kafka_brokers_add()` to add brokers during runtime.
*Type: string* +bootstrap.servers | * | | | high | Alias for `metadata.broker.list`: Initial list of brokers as a CSV list of broker host or host:port. The application may also use `rd_kafka_brokers_add()` to add brokers during runtime.
*Type: string* +message.max.bytes | * | 1000 .. 1000000000 | 1000000 | medium | Maximum Kafka protocol request message size. Due to differing framing overhead between protocol versions the producer is unable to reliably enforce a strict max message limit at produce time and may exceed the maximum size by one message in protocol ProduceRequests, the broker will enforce the the topic's `max.message.bytes` limit (see Apache Kafka documentation).
*Type: integer* +message.copy.max.bytes | * | 0 .. 1000000000 | 65535 | low | Maximum size for message to be copied to buffer. Messages larger than this will be passed by reference (zero-copy) at the expense of larger iovecs.
*Type: integer* +receive.message.max.bytes | * | 1000 .. 2147483647 | 100000000 | medium | Maximum Kafka protocol response message size. This serves as a safety precaution to avoid memory exhaustion in case of protocol hickups. This value must be at least `fetch.max.bytes` + 512 to allow for protocol overhead; the value is adjusted automatically unless the configuration property is explicitly set.
*Type: integer* +max.in.flight.requests.per.connection | * | 1 .. 1000000 | 1000000 | low | Maximum number of in-flight requests per broker connection. This is a generic property applied to all broker communication, however it is primarily relevant to produce requests. In particular, note that other mechanisms limit the number of outstanding consumer fetch request per broker to one.
*Type: integer* +max.in.flight | * | 1 .. 1000000 | 1000000 | low | Alias for `max.in.flight.requests.per.connection`: Maximum number of in-flight requests per broker connection. This is a generic property applied to all broker communication, however it is primarily relevant to produce requests. In particular, note that other mechanisms limit the number of outstanding consumer fetch request per broker to one.
*Type: integer* +topic.metadata.refresh.interval.ms | * | -1 .. 3600000 | 300000 | low | Period of time in milliseconds at which topic and broker metadata is refreshed in order to proactively discover any new brokers, topics, partitions or partition leader changes. Use -1 to disable the intervalled refresh (not recommended). If there are no locally referenced topics (no topic objects created, no messages produced, no subscription or no assignment) then only the broker list will be refreshed every interval but no more often than every 10s.
*Type: integer* +metadata.max.age.ms | * | 1 .. 86400000 | 900000 | low | Metadata cache max age. Defaults to topic.metadata.refresh.interval.ms * 3
*Type: integer* +topic.metadata.refresh.fast.interval.ms | * | 1 .. 60000 | 250 | low | When a topic loses its leader a new metadata request will be enqueued with this initial interval, exponentially increasing until the topic metadata has been refreshed. This is used to recover quickly from transitioning leader brokers.
*Type: integer* +topic.metadata.refresh.fast.cnt | * | 0 .. 1000 | 10 | low | **DEPRECATED** No longer used.
*Type: integer* +topic.metadata.refresh.sparse | * | true, false | true | low | Sparse metadata requests (consumes less network bandwidth)
*Type: boolean* +topic.metadata.propagation.max.ms | * | 0 .. 3600000 | 30000 | low | Apache Kafka topic creation is asynchronous and it takes some time for a new topic to propagate throughout the cluster to all brokers. If a client requests topic metadata after manual topic creation but before the topic has been fully propagated to the broker the client is requesting metadata from, the topic will seem to be non-existent and the client will mark the topic as such, failing queued produced messages with `ERR__UNKNOWN_TOPIC`. This setting delays marking a topic as non-existent until the configured propagation max time has passed. The maximum propagation time is calculated from the time the topic is first referenced in the client, e.g., on produce().
*Type: integer* +topic.blacklist | * | | | low | Topic blacklist, a comma-separated list of regular expressions for matching topic names that should be ignored in broker metadata information as if the topics did not exist.
*Type: pattern list* +debug | * | generic, broker, topic, metadata, feature, queue, msg, protocol, cgrp, security, fetch, interceptor, plugin, consumer, admin, eos, mock, assignor, conf, all | | medium | A comma-separated list of debug contexts to enable. Detailed Producer debugging: broker,topic,msg. Consumer: consumer,cgrp,topic,fetch
*Type: CSV flags* +socket.timeout.ms | * | 10 .. 300000 | 60000 | low | Default timeout for network requests. Producer: ProduceRequests will use the lesser value of `socket.timeout.ms` and remaining `message.timeout.ms` for the first message in the batch. Consumer: FetchRequests will use `fetch.wait.max.ms` + `socket.timeout.ms`. Admin: Admin requests will use `socket.timeout.ms` or explicitly set `rd_kafka_AdminOptions_set_operation_timeout()` value.
*Type: integer* +socket.blocking.max.ms | * | 1 .. 60000 | 1000 | low | **DEPRECATED** No longer used.
*Type: integer* +socket.send.buffer.bytes | * | 0 .. 100000000 | 0 | low | Broker socket send buffer size. System default is used if 0.
*Type: integer* +socket.receive.buffer.bytes | * | 0 .. 100000000 | 0 | low | Broker socket receive buffer size. System default is used if 0.
*Type: integer* +socket.keepalive.enable | * | true, false | false | low | Enable TCP keep-alives (SO_KEEPALIVE) on broker sockets
*Type: boolean* +socket.nagle.disable | * | true, false | false | low | Disable the Nagle algorithm (TCP_NODELAY) on broker sockets.
*Type: boolean* +socket.max.fails | * | 0 .. 1000000 | 1 | low | Disconnect from broker when this number of send failures (e.g., timed out requests) is reached. Disable with 0. WARNING: It is highly recommended to leave this setting at its default value of 1 to avoid the client and broker to become desynchronized in case of request timeouts. NOTE: The connection is automatically re-established.
*Type: integer* +broker.address.ttl | * | 0 .. 86400000 | 1000 | low | How long to cache the broker address resolving results (milliseconds).
*Type: integer* +broker.address.family | * | any, v4, v6 | any | low | Allowed broker IP address families: any, v4, v6
*Type: enum value* +socket.connection.setup.timeout.ms | * | 1000 .. 2147483647 | 30000 | medium | Maximum time allowed for broker connection setup (TCP connection setup as well SSL and SASL handshake). If the connection to the broker is not fully functional after this the connection will be closed and retried.
*Type: integer* +connections.max.idle.ms | * | 0 .. 2147483647 | 0 | medium | Close broker connections after the specified time of inactivity. Disable with 0. If this property is left at its default value some heuristics are performed to determine a suitable default value, this is currently limited to identifying brokers on Azure (see librdkafka issue #3109 for more info).
*Type: integer* +reconnect.backoff.jitter.ms | * | 0 .. 3600000 | 0 | low | **DEPRECATED** No longer used. See `reconnect.backoff.ms` and `reconnect.backoff.max.ms`.
*Type: integer* +reconnect.backoff.ms | * | 0 .. 3600000 | 100 | medium | The initial time to wait before reconnecting to a broker after the connection has been closed. The time is increased exponentially until `reconnect.backoff.max.ms` is reached. -25% to +50% jitter is applied to each reconnect backoff. A value of 0 disables the backoff and reconnects immediately.
*Type: integer* +reconnect.backoff.max.ms | * | 0 .. 3600000 | 10000 | medium | The maximum time to wait before reconnecting to a broker after the connection has been closed.
*Type: integer* +statistics.interval.ms | * | 0 .. 86400000 | 0 | high | librdkafka statistics emit interval. The application also needs to register a stats callback using `rd_kafka_conf_set_stats_cb()`. The granularity is 1000ms. A value of 0 disables statistics.
*Type: integer* +enabled_events | * | 0 .. 2147483647 | 0 | low | See `rd_kafka_conf_set_events()`
*Type: integer* +error_cb | * | | | low | Error callback (set with rd_kafka_conf_set_error_cb())
*Type: see dedicated API* +throttle_cb | * | | | low | Throttle callback (set with rd_kafka_conf_set_throttle_cb())
*Type: see dedicated API* +stats_cb | * | | | low | Statistics callback (set with rd_kafka_conf_set_stats_cb())
*Type: see dedicated API* +log_cb | * | | | low | Log callback (set with rd_kafka_conf_set_log_cb())
*Type: see dedicated API* +log_level | * | 0 .. 7 | 6 | low | Logging level (syslog(3) levels)
*Type: integer* +log.queue | * | true, false | false | low | Disable spontaneous log_cb from internal librdkafka threads, instead enqueue log messages on queue set with `rd_kafka_set_log_queue()` and serve log callbacks or events through the standard poll APIs. **NOTE**: Log messages will linger in a temporary queue until the log queue has been set.
*Type: boolean* +log.thread.name | * | true, false | true | low | Print internal thread name in log messages (useful for debugging librdkafka internals)
*Type: boolean* +enable.random.seed | * | true, false | true | low | If enabled librdkafka will initialize the PRNG with srand(current_time.milliseconds) on the first invocation of rd_kafka_new() (required only if rand_r() is not available on your platform). If disabled the application must call srand() prior to calling rd_kafka_new().
*Type: boolean* +log.connection.close | * | true, false | true | low | Log broker disconnects. It might be useful to turn this off when interacting with 0.9 brokers with an aggressive `connections.max.idle.ms` value.
*Type: boolean* +background_event_cb | * | | | low | Background queue event callback (set with rd_kafka_conf_set_background_event_cb())
*Type: see dedicated API* +socket_cb | * | | | low | Socket creation callback to provide race-free CLOEXEC
*Type: see dedicated API* +connect_cb | * | | | low | Socket connect callback
*Type: see dedicated API* +closesocket_cb | * | | | low | Socket close callback
*Type: see dedicated API* +open_cb | * | | | low | File open callback to provide race-free CLOEXEC
*Type: see dedicated API* +resolve_cb | * | | | low | Address resolution callback (set with rd_kafka_conf_set_resolve_cb()).
*Type: see dedicated API* +opaque | * | | | low | Application opaque (set with rd_kafka_conf_set_opaque())
*Type: see dedicated API* +default_topic_conf | * | | | low | Default topic configuration for automatically subscribed topics
*Type: see dedicated API* +internal.termination.signal | * | 0 .. 128 | 0 | low | Signal that librdkafka will use to quickly terminate on rd_kafka_destroy(). If this signal is not set then there will be a delay before rd_kafka_wait_destroyed() returns true as internal threads are timing out their system calls. If this signal is set however the delay will be minimal. The application should mask this signal as an internal signal handler is installed.
*Type: integer* +api.version.request | * | true, false | true | high | Request broker's supported API versions to adjust functionality to available protocol features. If set to false, or the ApiVersionRequest fails, the fallback version `broker.version.fallback` will be used. **NOTE**: Depends on broker version >=0.10.0. If the request is not supported by (an older) broker the `broker.version.fallback` fallback is used.
*Type: boolean* +api.version.request.timeout.ms | * | 1 .. 300000 | 10000 | low | Timeout for broker API version requests.
*Type: integer* +api.version.fallback.ms | * | 0 .. 604800000 | 0 | medium | Dictates how long the `broker.version.fallback` fallback is used in the case the ApiVersionRequest fails. **NOTE**: The ApiVersionRequest is only issued when a new connection to the broker is made (such as after an upgrade).
*Type: integer* +broker.version.fallback | * | | 0.10.0 | medium | Older broker versions (before 0.10.0) provide no way for a client to query for supported protocol features (ApiVersionRequest, see `api.version.request`) making it impossible for the client to know what features it may use. As a workaround a user may set this property to the expected broker version and the client will automatically adjust its feature set accordingly if the ApiVersionRequest fails (or is disabled). The fallback broker version will be used for `api.version.fallback.ms`. Valid values are: 0.9.0, 0.8.2, 0.8.1, 0.8.0. Any other value >= 0.10, such as 0.10.2.1, enables ApiVersionRequests.
*Type: string* +allow.auto.create.topics | * | true, false | false | low | Allow automatic topic creation on the broker when subscribing to or assigning non-existent topics. The broker must also be configured with `auto.create.topics.enable=true` for this configuration to take effect. Note: the default value (true) for the producer is different from the default value (false) for the consumer. Further, the consumer default value is different from the Java consumer (true), and this property is not supported by the Java producer. Requires broker version >= 0.11.0.0, for older broker versions only the broker configuration applies.
*Type: boolean* +security.protocol | * | plaintext, ssl, sasl_plaintext, sasl_ssl | plaintext | high | Protocol used to communicate with brokers.
*Type: enum value* +ssl.cipher.suites | * | | | low | A cipher suite is a named combination of authentication, encryption, MAC and key exchange algorithm used to negotiate the security settings for a network connection using TLS or SSL network protocol. See manual page for `ciphers(1)` and `SSL_CTX_set_cipher_list(3).
*Type: string* +ssl.curves.list | * | | | low | The supported-curves extension in the TLS ClientHello message specifies the curves (standard/named, or 'explicit' GF(2^k) or GF(p)) the client is willing to have the server use. See manual page for `SSL_CTX_set1_curves_list(3)`. OpenSSL >= 1.0.2 required.
*Type: string* +ssl.sigalgs.list | * | | | low | The client uses the TLS ClientHello signature_algorithms extension to indicate to the server which signature/hash algorithm pairs may be used in digital signatures. See manual page for `SSL_CTX_set1_sigalgs_list(3)`. OpenSSL >= 1.0.2 required.
*Type: string* +ssl.key.location | * | | | low | Path to client's private key (PEM) used for authentication.
*Type: string* +ssl.key.password | * | | | low | Private key passphrase (for use with `ssl.key.location` and `set_ssl_cert()`)
*Type: string* +ssl.key.pem | * | | | low | Client's private key string (PEM format) used for authentication.
*Type: string* +ssl_key | * | | | low | Client's private key as set by rd_kafka_conf_set_ssl_cert()
*Type: see dedicated API* +ssl.certificate.location | * | | | low | Path to client's public key (PEM) used for authentication.
*Type: string* +ssl.certificate.pem | * | | | low | Client's public key string (PEM format) used for authentication.
*Type: string* +ssl_certificate | * | | | low | Client's public key as set by rd_kafka_conf_set_ssl_cert()
*Type: see dedicated API* +ssl.ca.location | * | | | low | File or directory path to CA certificate(s) for verifying the broker's key. Defaults: On Windows the system's CA certificates are automatically looked up in the Windows Root certificate store. On Mac OSX this configuration defaults to `probe`. It is recommended to install openssl using Homebrew, to provide CA certificates. On Linux install the distribution's ca-certificates package. If OpenSSL is statically linked or `ssl.ca.location` is set to `probe` a list of standard paths will be probed and the first one found will be used as the default CA certificate location path. If OpenSSL is dynamically linked the OpenSSL library's default path will be used (see `OPENSSLDIR` in `openssl version -a`).
*Type: string* +ssl.ca.pem | * | | | low | CA certificate string (PEM format) for verifying the broker's key.
*Type: string* +ssl_ca | * | | | low | CA certificate as set by rd_kafka_conf_set_ssl_cert()
*Type: see dedicated API* +ssl.ca.certificate.stores | * | | Root | low | Comma-separated list of Windows Certificate stores to load CA certificates from. Certificates will be loaded in the same order as stores are specified. If no certificates can be loaded from any of the specified stores an error is logged and the OpenSSL library's default CA location is used instead. Store names are typically one or more of: MY, Root, Trust, CA.
*Type: string* +ssl.crl.location | * | | | low | Path to CRL for verifying broker's certificate validity.
*Type: string* +ssl.keystore.location | * | | | low | Path to client's keystore (PKCS#12) used for authentication.
*Type: string* +ssl.keystore.password | * | | | low | Client's keystore (PKCS#12) password.
*Type: string* +ssl.providers | * | | | low | Comma-separated list of OpenSSL 3.0.x implementation providers. E.g., "default,legacy".
*Type: string* +ssl.engine.location | * | | | low | **DEPRECATED** Path to OpenSSL engine library. OpenSSL >= 1.1.x required. DEPRECATED: OpenSSL engine support is deprecated and should be replaced by OpenSSL 3 providers.
*Type: string* +ssl.engine.id | * | | dynamic | low | OpenSSL engine id is the name used for loading engine.
*Type: string* +ssl_engine_callback_data | * | | | low | OpenSSL engine callback data (set with rd_kafka_conf_set_engine_callback_data()).
*Type: see dedicated API* +enable.ssl.certificate.verification | * | true, false | true | low | Enable OpenSSL's builtin broker (server) certificate verification. This verification can be extended by the application by implementing a certificate_verify_cb.
*Type: boolean* +ssl.endpoint.identification.algorithm | * | none, https | https | low | Endpoint identification algorithm to validate broker hostname using broker certificate. https - Server (broker) hostname verification as specified in RFC2818. none - No endpoint verification. OpenSSL >= 1.0.2 required.
*Type: enum value* +ssl.certificate.verify_cb | * | | | low | Callback to verify the broker certificate chain.
*Type: see dedicated API* +sasl.mechanisms | * | | GSSAPI | high | SASL mechanism to use for authentication. Supported: GSSAPI, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512, OAUTHBEARER. **NOTE**: Despite the name only one mechanism must be configured.
*Type: string* +sasl.mechanism | * | | GSSAPI | high | Alias for `sasl.mechanisms`: SASL mechanism to use for authentication. Supported: GSSAPI, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512, OAUTHBEARER. **NOTE**: Despite the name only one mechanism must be configured.
*Type: string* +sasl.kerberos.service.name | * | | kafka | low | Kerberos principal name that Kafka runs as, not including /hostname@REALM
*Type: string* +sasl.kerberos.principal | * | | kafkaclient | low | This client's Kerberos principal name. (Not supported on Windows, will use the logon user's principal).
*Type: string* +sasl.kerberos.kinit.cmd | * | | kinit -R -t "%{sasl.kerberos.keytab}" -k %{sasl.kerberos.principal} \|\| kinit -t "%{sasl.kerberos.keytab}" -k %{sasl.kerberos.principal} | low | Shell command to refresh or acquire the client's Kerberos ticket. This command is executed on client creation and every sasl.kerberos.min.time.before.relogin (0=disable). %{config.prop.name} is replaced by corresponding config object value.
*Type: string* +sasl.kerberos.keytab | * | | | low | Path to Kerberos keytab file. This configuration property is only used as a variable in `sasl.kerberos.kinit.cmd` as ` ... -t "%{sasl.kerberos.keytab}"`.
*Type: string* +sasl.kerberos.min.time.before.relogin | * | 0 .. 86400000 | 60000 | low | Minimum time in milliseconds between key refresh attempts. Disable automatic key refresh by setting this property to 0.
*Type: integer* +sasl.username | * | | | high | SASL username for use with the PLAIN and SASL-SCRAM-.. mechanisms
*Type: string* +sasl.password | * | | | high | SASL password for use with the PLAIN and SASL-SCRAM-.. mechanism
*Type: string* +sasl.oauthbearer.config | * | | | low | SASL/OAUTHBEARER configuration. The format is implementation-dependent and must be parsed accordingly. The default unsecured token implementation (see https://tools.ietf.org/html/rfc7515#appendix-A.5) recognizes space-separated name=value pairs with valid names including principalClaimName, principal, scopeClaimName, scope, and lifeSeconds. The default value for principalClaimName is "sub", the default value for scopeClaimName is "scope", and the default value for lifeSeconds is 3600. The scope value is CSV format with the default value being no/empty scope. For example: `principalClaimName=azp principal=admin scopeClaimName=roles scope=role1,role2 lifeSeconds=600`. In addition, SASL extensions can be communicated to the broker via `extension_NAME=value`. For example: `principal=admin extension_traceId=123`
*Type: string* +enable.sasl.oauthbearer.unsecure.jwt | * | true, false | false | low | Enable the builtin unsecure JWT OAUTHBEARER token handler if no oauthbearer_refresh_cb has been set. This builtin handler should only be used for development or testing, and not in production.
*Type: boolean* +oauthbearer_token_refresh_cb | * | | | low | SASL/OAUTHBEARER token refresh callback (set with rd_kafka_conf_set_oauthbearer_token_refresh_cb(), triggered by rd_kafka_poll(), et.al. This callback will be triggered when it is time to refresh the client's OAUTHBEARER token. Also see `rd_kafka_conf_enable_sasl_queue()`.
*Type: see dedicated API* +sasl.oauthbearer.method | * | default, oidc | default | low | Set to "default" or "oidc" to control which login method to be used. If set to "oidc", the following properties must also be be specified: `sasl.oauthbearer.client.id`, `sasl.oauthbearer.client.secret`, and `sasl.oauthbearer.token.endpoint.url`.
*Type: enum value* +sasl.oauthbearer.client.id | * | | | low | Public identifier for the application. Must be unique across all clients that the authorization server handles. Only used when `sasl.oauthbearer.method` is set to "oidc".
*Type: string* +sasl.oauthbearer.client.secret | * | | | low | Client secret only known to the application and the authorization server. This should be a sufficiently random string that is not guessable. Only used when `sasl.oauthbearer.method` is set to "oidc".
*Type: string* +sasl.oauthbearer.scope | * | | | low | Client use this to specify the scope of the access request to the broker. Only used when `sasl.oauthbearer.method` is set to "oidc".
*Type: string* +sasl.oauthbearer.extensions | * | | | low | Allow additional information to be provided to the broker. Comma-separated list of key=value pairs. E.g., "supportFeatureX=true,organizationId=sales-emea".Only used when `sasl.oauthbearer.method` is set to "oidc".
*Type: string* +sasl.oauthbearer.token.endpoint.url | * | | | low | OAuth/OIDC issuer token endpoint HTTP(S) URI used to retrieve token. Only used when `sasl.oauthbearer.method` is set to "oidc".
*Type: string* +plugin.library.paths | * | | | low | List of plugin libraries to load (; separated). The library search path is platform dependent (see dlopen(3) for Unix and LoadLibrary() for Windows). If no filename extension is specified the platform-specific extension (such as .dll or .so) will be appended automatically.
*Type: string* +interceptors | * | | | low | Interceptors added through rd_kafka_conf_interceptor_add_..() and any configuration handled by interceptors.
*Type: see dedicated API* +group.id | C | | | high | Client group id string. All clients sharing the same group.id belong to the same group.
*Type: string* +group.instance.id | C | | | medium | Enable static group membership. Static group members are able to leave and rejoin a group within the configured `session.timeout.ms` without prompting a group rebalance. This should be used in combination with a larger `session.timeout.ms` to avoid group rebalances caused by transient unavailability (e.g. process restarts). Requires broker version >= 2.3.0.
*Type: string* +partition.assignment.strategy | C | | range,roundrobin | medium | The name of one or more partition assignment strategies. The elected group leader will use a strategy supported by all members of the group to assign partitions to group members. If there is more than one eligible strategy, preference is determined by the order of this list (strategies earlier in the list have higher priority). Cooperative and non-cooperative (eager) strategies must not be mixed. Available strategies: range, roundrobin, cooperative-sticky.
*Type: string* +session.timeout.ms | C | 1 .. 3600000 | 45000 | high | Client group session and failure detection timeout. The consumer sends periodic heartbeats (heartbeat.interval.ms) to indicate its liveness to the broker. If no hearts are received by the broker for a group member within the session timeout, the broker will remove the consumer from the group and trigger a rebalance. The allowed range is configured with the **broker** configuration properties `group.min.session.timeout.ms` and `group.max.session.timeout.ms`. Also see `max.poll.interval.ms`.
*Type: integer* +heartbeat.interval.ms | C | 1 .. 3600000 | 3000 | low | Group session keepalive heartbeat interval.
*Type: integer* +group.protocol.type | C | | consumer | low | Group protocol type. NOTE: Currently, the only supported group protocol type is `consumer`.
*Type: string* +coordinator.query.interval.ms | C | 1 .. 3600000 | 600000 | low | How often to query for the current client group coordinator. If the currently assigned coordinator is down the configured query interval will be divided by ten to more quickly recover in case of coordinator reassignment.
*Type: integer* +max.poll.interval.ms | C | 1 .. 86400000 | 300000 | high | Maximum allowed time between calls to consume messages (e.g., rd_kafka_consumer_poll()) for high-level consumers. If this interval is exceeded the consumer is considered failed and the group will rebalance in order to reassign the partitions to another consumer group member. Warning: Offset commits may be not possible at this point. Note: It is recommended to set `enable.auto.offset.store=false` for long-time processing applications and then explicitly store offsets (using offsets_store()) *after* message processing, to make sure offsets are not auto-committed prior to processing has finished. The interval is checked two times per second. See KIP-62 for more information.
*Type: integer* +enable.auto.commit | C | true, false | true | high | Automatically and periodically commit offsets in the background. Note: setting this to false does not prevent the consumer from fetching previously committed start offsets. To circumvent this behaviour set specific start offsets per partition in the call to assign().
*Type: boolean* +auto.commit.interval.ms | C | 0 .. 86400000 | 5000 | medium | The frequency in milliseconds that the consumer offsets are committed (written) to offset storage. (0 = disable). This setting is used by the high-level consumer.
*Type: integer* +enable.auto.offset.store | C | true, false | true | high | Automatically store offset of last message provided to application. The offset store is an in-memory store of the next offset to (auto-)commit for each partition.
*Type: boolean* +queued.min.messages | C | 1 .. 10000000 | 100000 | medium | Minimum number of messages per topic+partition librdkafka tries to maintain in the local consumer queue.
*Type: integer* +queued.max.messages.kbytes | C | 1 .. 2097151 | 65536 | medium | Maximum number of kilobytes of queued pre-fetched messages in the local consumer queue. If using the high-level consumer this setting applies to the single consumer queue, regardless of the number of partitions. When using the legacy simple consumer or when separate partition queues are used this setting applies per partition. This value may be overshot by fetch.message.max.bytes. This property has higher priority than queued.min.messages.
*Type: integer* +fetch.wait.max.ms | C | 0 .. 300000 | 500 | low | Maximum time the broker may wait to fill the Fetch response with fetch.min.bytes of messages.
*Type: integer* +fetch.message.max.bytes | C | 1 .. 1000000000 | 1048576 | medium | Initial maximum number of bytes per topic+partition to request when fetching messages from the broker. If the client encounters a message larger than this value it will gradually try to increase it until the entire message can be fetched.
*Type: integer* +max.partition.fetch.bytes | C | 1 .. 1000000000 | 1048576 | medium | Alias for `fetch.message.max.bytes`: Initial maximum number of bytes per topic+partition to request when fetching messages from the broker. If the client encounters a message larger than this value it will gradually try to increase it until the entire message can be fetched.
*Type: integer* +fetch.max.bytes | C | 0 .. 2147483135 | 52428800 | medium | Maximum amount of data the broker shall return for a Fetch request. Messages are fetched in batches by the consumer and if the first message batch in the first non-empty partition of the Fetch request is larger than this value, then the message batch will still be returned to ensure the consumer can make progress. The maximum message batch size accepted by the broker is defined via `message.max.bytes` (broker config) or `max.message.bytes` (broker topic config). `fetch.max.bytes` is automatically adjusted upwards to be at least `message.max.bytes` (consumer config).
*Type: integer* +fetch.min.bytes | C | 1 .. 100000000 | 1 | low | Minimum number of bytes the broker responds with. If fetch.wait.max.ms expires the accumulated data will be sent to the client regardless of this setting.
*Type: integer* +fetch.error.backoff.ms | C | 0 .. 300000 | 500 | medium | How long to postpone the next fetch request for a topic+partition in case of a fetch error.
*Type: integer* +offset.store.method | C | none, file, broker | broker | low | **DEPRECATED** Offset commit store method: 'file' - DEPRECATED: local file store (offset.store.path, et.al), 'broker' - broker commit store (requires Apache Kafka 0.8.2 or later on the broker).
*Type: enum value* +isolation.level | C | read_uncommitted, read_committed | read_committed | high | Controls how to read messages written transactionally: `read_committed` - only return transactional messages which have been committed. `read_uncommitted` - return all messages, even transactional messages which have been aborted.
*Type: enum value* +consume_cb | C | | | low | Message consume callback (set with rd_kafka_conf_set_consume_cb())
*Type: see dedicated API* +rebalance_cb | C | | | low | Called after consumer group has been rebalanced (set with rd_kafka_conf_set_rebalance_cb())
*Type: see dedicated API* +offset_commit_cb | C | | | low | Offset commit result propagation callback. (set with rd_kafka_conf_set_offset_commit_cb())
*Type: see dedicated API* +enable.partition.eof | C | true, false | false | low | Emit RD_KAFKA_RESP_ERR__PARTITION_EOF event whenever the consumer reaches the end of a partition.
*Type: boolean* +check.crcs | C | true, false | false | medium | Verify CRC32 of consumed messages, ensuring no on-the-wire or on-disk corruption to the messages occurred. This check comes at slightly increased CPU usage.
*Type: boolean* +client.rack | * | | | low | A rack identifier for this client. This can be any string value which indicates where this client is physically located. It corresponds with the broker config `broker.rack`.
*Type: string* +transactional.id | P | | | high | Enables the transactional producer. The transactional.id is used to identify the same transactional producer instance across process restarts. It allows the producer to guarantee that transactions corresponding to earlier instances of the same producer have been finalized prior to starting any new transactions, and that any zombie instances are fenced off. If no transactional.id is provided, then the producer is limited to idempotent delivery (if enable.idempotence is set). Requires broker version >= 0.11.0.
*Type: string* +transaction.timeout.ms | P | 1000 .. 2147483647 | 60000 | medium | The maximum amount of time in milliseconds that the transaction coordinator will wait for a transaction status update from the producer before proactively aborting the ongoing transaction. If this value is larger than the `transaction.max.timeout.ms` setting in the broker, the init_transactions() call will fail with ERR_INVALID_TRANSACTION_TIMEOUT. The transaction timeout automatically adjusts `message.timeout.ms` and `socket.timeout.ms`, unless explicitly configured in which case they must not exceed the transaction timeout (`socket.timeout.ms` must be at least 100ms lower than `transaction.timeout.ms`). This is also the default timeout value if no timeout (-1) is supplied to the transactional API methods.
*Type: integer* +enable.idempotence | P | true, false | false | high | When set to `true`, the producer will ensure that messages are successfully produced exactly once and in the original produce order. The following configuration properties are adjusted automatically (if not modified by the user) when idempotence is enabled: `max.in.flight.requests.per.connection=5` (must be less than or equal to 5), `retries=INT32_MAX` (must be greater than 0), `acks=all`, `queuing.strategy=fifo`. Producer instantation will fail if user-supplied configuration is incompatible.
*Type: boolean* +enable.gapless.guarantee | P | true, false | false | low | **EXPERIMENTAL**: subject to change or removal. When set to `true`, any error that could result in a gap in the produced message series when a batch of messages fails, will raise a fatal error (ERR__GAPLESS_GUARANTEE) and stop the producer. Messages failing due to `message.timeout.ms` are not covered by this guarantee. Requires `enable.idempotence=true`.
*Type: boolean* +queue.buffering.max.messages | P | 0 .. 2147483647 | 100000 | high | Maximum number of messages allowed on the producer queue. This queue is shared by all topics and partitions. A value of 0 disables this limit.
*Type: integer* +queue.buffering.max.kbytes | P | 1 .. 2147483647 | 1048576 | high | Maximum total message size sum allowed on the producer queue. This queue is shared by all topics and partitions. This property has higher priority than queue.buffering.max.messages.
*Type: integer* +queue.buffering.max.ms | P | 0 .. 900000 | 5 | high | Delay in milliseconds to wait for messages in the producer queue to accumulate before constructing message batches (MessageSets) to transmit to brokers. A higher value allows larger and more effective (less overhead, improved compression) batches of messages to accumulate at the expense of increased message delivery latency.
*Type: float* +linger.ms | P | 0 .. 900000 | 5 | high | Alias for `queue.buffering.max.ms`: Delay in milliseconds to wait for messages in the producer queue to accumulate before constructing message batches (MessageSets) to transmit to brokers. A higher value allows larger and more effective (less overhead, improved compression) batches of messages to accumulate at the expense of increased message delivery latency.
*Type: float* +message.send.max.retries | P | 0 .. 2147483647 | 2147483647 | high | How many times to retry sending a failing Message. **Note:** retrying may cause reordering unless `enable.idempotence` is set to true.
*Type: integer* +retries | P | 0 .. 2147483647 | 2147483647 | high | Alias for `message.send.max.retries`: How many times to retry sending a failing Message. **Note:** retrying may cause reordering unless `enable.idempotence` is set to true.
*Type: integer* +retry.backoff.ms | P | 1 .. 300000 | 100 | medium | The backoff time in milliseconds before retrying a protocol request.
*Type: integer* +queue.buffering.backpressure.threshold | P | 1 .. 1000000 | 1 | low | The threshold of outstanding not yet transmitted broker requests needed to backpressure the producer's message accumulator. If the number of not yet transmitted requests equals or exceeds this number, produce request creation that would have otherwise been triggered (for example, in accordance with linger.ms) will be delayed. A lower number yields larger and more effective batches. A higher value can improve latency when using compression on slow machines.
*Type: integer* +compression.codec | P | none, gzip, snappy, lz4, zstd | none | medium | compression codec to use for compressing message sets. This is the default value for all topics, may be overridden by the topic configuration property `compression.codec`.
*Type: enum value* +compression.type | P | none, gzip, snappy, lz4, zstd | none | medium | Alias for `compression.codec`: compression codec to use for compressing message sets. This is the default value for all topics, may be overridden by the topic configuration property `compression.codec`.
*Type: enum value* +batch.num.messages | P | 1 .. 1000000 | 10000 | medium | Maximum number of messages batched in one MessageSet. The total MessageSet size is also limited by batch.size and message.max.bytes.
*Type: integer* +batch.size | P | 1 .. 2147483647 | 1000000 | medium | Maximum size (in bytes) of all messages batched in one MessageSet, including protocol framing overhead. This limit is applied after the first message has been added to the batch, regardless of the first message's size, this is to ensure that messages that exceed batch.size are produced. The total MessageSet size is also limited by batch.num.messages and message.max.bytes.
*Type: integer* +delivery.report.only.error | P | true, false | false | low | Only provide delivery reports for failed messages.
*Type: boolean* +dr_cb | P | | | low | Delivery report callback (set with rd_kafka_conf_set_dr_cb())
*Type: see dedicated API* +dr_msg_cb | P | | | low | Delivery report callback (set with rd_kafka_conf_set_dr_msg_cb())
*Type: see dedicated API* +sticky.partitioning.linger.ms | P | 0 .. 900000 | 10 | low | Delay in milliseconds to wait to assign new sticky partitions for each topic. By default, set to double the time of linger.ms. To disable sticky behavior, set to 0. This behavior affects messages with the key NULL in all cases, and messages with key lengths of zero when the consistent_random partitioner is in use. These messages would otherwise be assigned randomly. A higher value allows for more effective batching of these messages.
*Type: integer* + + +## Topic configuration properties + +Property | C/P | Range | Default | Importance | Description +-----------------------------------------|-----|-----------------|--------------:|------------| -------------------------- +request.required.acks | P | -1 .. 1000 | -1 | high | This field indicates the number of acknowledgements the leader broker must receive from ISR brokers before responding to the request: *0*=Broker does not send any response/ack to client, *-1* or *all*=Broker will block until message is committed by all in sync replicas (ISRs). If there are less than `min.insync.replicas` (broker configuration) in the ISR set the produce request will fail.
*Type: integer* +acks | P | -1 .. 1000 | -1 | high | Alias for `request.required.acks`: This field indicates the number of acknowledgements the leader broker must receive from ISR brokers before responding to the request: *0*=Broker does not send any response/ack to client, *-1* or *all*=Broker will block until message is committed by all in sync replicas (ISRs). If there are less than `min.insync.replicas` (broker configuration) in the ISR set the produce request will fail.
*Type: integer* +request.timeout.ms | P | 1 .. 900000 | 30000 | medium | The ack timeout of the producer request in milliseconds. This value is only enforced by the broker and relies on `request.required.acks` being != 0.
*Type: integer* +message.timeout.ms | P | 0 .. 2147483647 | 300000 | high | Local message timeout. This value is only enforced locally and limits the time a produced message waits for successful delivery. A time of 0 is infinite. This is the maximum time librdkafka may use to deliver a message (including retries). Delivery error occurs when either the retry count or the message timeout are exceeded. The message timeout is automatically adjusted to `transaction.timeout.ms` if `transactional.id` is configured.
*Type: integer* +delivery.timeout.ms | P | 0 .. 2147483647 | 300000 | high | Alias for `message.timeout.ms`: Local message timeout. This value is only enforced locally and limits the time a produced message waits for successful delivery. A time of 0 is infinite. This is the maximum time librdkafka may use to deliver a message (including retries). Delivery error occurs when either the retry count or the message timeout are exceeded. The message timeout is automatically adjusted to `transaction.timeout.ms` if `transactional.id` is configured.
*Type: integer* +queuing.strategy | P | fifo, lifo | fifo | low | **EXPERIMENTAL**: subject to change or removal. **DEPRECATED** Producer queuing strategy. FIFO preserves produce ordering, while LIFO prioritizes new messages.
*Type: enum value* +produce.offset.report | P | true, false | false | low | **DEPRECATED** No longer used.
*Type: boolean* +partitioner | P | | consistent_random | high | Partitioner: `random` - random distribution, `consistent` - CRC32 hash of key (Empty and NULL keys are mapped to single partition), `consistent_random` - CRC32 hash of key (Empty and NULL keys are randomly partitioned), `murmur2` - Java Producer compatible Murmur2 hash of key (NULL keys are mapped to single partition), `murmur2_random` - Java Producer compatible Murmur2 hash of key (NULL keys are randomly partitioned. This is functionally equivalent to the default partitioner in the Java Producer.), `fnv1a` - FNV-1a hash of key (NULL keys are mapped to single partition), `fnv1a_random` - FNV-1a hash of key (NULL keys are randomly partitioned).
*Type: string* +partitioner_cb | P | | | low | Custom partitioner callback (set with rd_kafka_topic_conf_set_partitioner_cb())
*Type: see dedicated API* +msg_order_cmp | P | | | low | **EXPERIMENTAL**: subject to change or removal. **DEPRECATED** Message queue ordering comparator (set with rd_kafka_topic_conf_set_msg_order_cmp()). Also see `queuing.strategy`.
*Type: see dedicated API* +opaque | * | | | low | Application opaque (set with rd_kafka_topic_conf_set_opaque())
*Type: see dedicated API* +compression.codec | P | none, gzip, snappy, lz4, zstd, inherit | inherit | high | Compression codec to use for compressing message sets. inherit = inherit global compression.codec configuration.
*Type: enum value* +compression.type | P | none, gzip, snappy, lz4, zstd | none | medium | Alias for `compression.codec`: compression codec to use for compressing message sets. This is the default value for all topics, may be overridden by the topic configuration property `compression.codec`.
*Type: enum value* +compression.level | P | -1 .. 12 | -1 | medium | Compression level parameter for algorithm selected by configuration property `compression.codec`. Higher values will result in better compression at the cost of more CPU usage. Usable range is algorithm-dependent: [0-9] for gzip; [0-12] for lz4; only 0 for snappy; -1 = codec-dependent default compression level.
*Type: integer* +auto.commit.enable | C | true, false | true | low | **DEPRECATED** [**LEGACY PROPERTY:** This property is used by the simple legacy consumer only. When using the high-level KafkaConsumer, the global `enable.auto.commit` property must be used instead]. If true, periodically commit offset of the last message handed to the application. This committed offset will be used when the process restarts to pick up where it left off. If false, the application will have to call `rd_kafka_offset_store()` to store an offset (optional). Offsets will be written to broker or local file according to offset.store.method.
*Type: boolean* +enable.auto.commit | C | true, false | true | low | **DEPRECATED** Alias for `auto.commit.enable`: [**LEGACY PROPERTY:** This property is used by the simple legacy consumer only. When using the high-level KafkaConsumer, the global `enable.auto.commit` property must be used instead]. If true, periodically commit offset of the last message handed to the application. This committed offset will be used when the process restarts to pick up where it left off. If false, the application will have to call `rd_kafka_offset_store()` to store an offset (optional). Offsets will be written to broker or local file according to offset.store.method.
*Type: boolean* +auto.commit.interval.ms | C | 10 .. 86400000 | 60000 | high | [**LEGACY PROPERTY:** This setting is used by the simple legacy consumer only. When using the high-level KafkaConsumer, the global `auto.commit.interval.ms` property must be used instead]. The frequency in milliseconds that the consumer offsets are committed (written) to offset storage.
*Type: integer* +auto.offset.reset | C | smallest, earliest, beginning, largest, latest, end, error | largest | high | Action to take when there is no initial offset in offset store or the desired offset is out of range: 'smallest','earliest' - automatically reset the offset to the smallest offset, 'largest','latest' - automatically reset the offset to the largest offset, 'error' - trigger an error (ERR__AUTO_OFFSET_RESET) which is retrieved by consuming messages and checking 'message->err'.
*Type: enum value* +offset.store.path | C | | . | low | **DEPRECATED** Path to local file for storing offsets. If the path is a directory a filename will be automatically generated in that directory based on the topic and partition. File-based offset storage will be removed in a future version.
*Type: string* +offset.store.sync.interval.ms | C | -1 .. 86400000 | -1 | low | **DEPRECATED** fsync() interval for the offset file, in milliseconds. Use -1 to disable syncing, and 0 for immediate sync after each write. File-based offset storage will be removed in a future version.
*Type: integer* +offset.store.method | C | file, broker | broker | low | **DEPRECATED** Offset commit store method: 'file' - DEPRECATED: local file store (offset.store.path, et.al), 'broker' - broker commit store (requires "group.id" to be configured and Apache Kafka 0.8.2 or later on the broker.).
*Type: enum value* +consume.callback.max.messages | C | 0 .. 1000000 | 0 | low | Maximum number of messages to dispatch in one `rd_kafka_consume_callback*()` call (0 = unlimited)
*Type: integer* + +### C/P legend: C = Consumer, P = Producer, * = both diff --git a/src/fluent-bit/lib/librdkafka-2.1.0/CONTRIBUTING.md b/src/fluent-bit/lib/librdkafka-2.1.0/CONTRIBUTING.md new file mode 100644 index 000000000..45ab45f9b --- /dev/null +++ b/src/fluent-bit/lib/librdkafka-2.1.0/CONTRIBUTING.md @@ -0,0 +1,425 @@ +# Contributing to librdkafka + +(This document is based on [curl's CONTRIBUTE.md](https://github.com/curl/curl/blob/master/docs/CONTRIBUTE.md) - thank you!) + +This document is intended to offer guidelines on how to best contribute to the +librdkafka project. This concerns new features as well as bug fixes and +general improvements. + +### License and copyright + +When contributing with code, you agree to put your changes and new code under +the same license librdkafka is already using unless stated and agreed +otherwise. + +When changing existing source code, you do not alter the copyright of the +original file(s). The copyright will still be owned by the original creator(s) +or those who have been assigned copyright by the original author(s). + +By submitting a patch to the librdkafka, you are assumed to have the right +to the code and to be allowed by your employer or whatever to hand over that +patch/code to us. We will credit you for your changes as far as possible, to +give credit but also to keep a trace back to who made what changes. Please +always provide us with your full real name when contributing! + +Official librdkafka project maintainer(s) assume ownership and copyright +ownership of all accepted submissions. + + +## Write a good patch + +### API and ABI compatibility guarantees + +librdkafka maintains a strict API and ABI compatibility guarantee, we guarantee +not to break existing applications and we honour the SONAME version. + +**Note:** ABI compatibility is guaranteed only for the C library, not C++. + +**Note to librdkafka maintainers:** + +Don't think we can or should bump the SONAME version, it will break all +existing applications relying on librdkafka, and there's no change important +enough to warrant that. +Instead deprecate (but keep) old APIs and add new better APIs as required. +Deprecate APIs through documentation (`@deprecate ..`) rather than +compiler hints (`RD_DEPRECATED`) - since the latter will cause compilation +warnings/errors for users. + + +#### Changes to existing APIs + +Existing public APIs MUST NEVER be changed, as this would be a breaking API +and ABI change. This line must never be crossed. + +This means that no changes are allowed to: + * public function or method signatures - arguments, types, return values. + * public structs - existing fields may not be modified and new fields must + not be added. + + +As for semantic changes (i.e., a function changes its behaviour), these are +allowed under the following conditions: + + * the existing behaviour that is changed is not documented and not widely + relied upon. Typically this revolves around what error codes a function + returns. + * the existing behaviour is well known but is clearly wrong and consistently + trips people up. + +All such changes must be clearly stated in the "Upgrade considerations" section +of the release in CHANGELOG.md. + + +#### New public APIs + +Since changes to existing APIs are strictly limited to the above rules, it is +also clear that new APIs must be delicately designed to be complete and future +proof, since once they've been introduced they can never be changed. + + * Never add public structs - there are some public structs in librdkafka + and they were all mistakes, they've all been headaches. + Instead add private types and provide accessor methods to set/get values. + This allows future extension without breaking existing applications. + * Avoid adding synchronous APIs, try to make them asynch by the use of + `rd_kafka_queue_t` result queues, if possible. + This may complicate the APIs a bit, but they're most of the time abstracted + in higher-level language clients and it allows both synchronous and + asynchronous usage. + + + +### Portability + +librdkafka is highly portable and needs to stay that way; this means we're +limited to almost-but-not-quite C99, and standard library (libc, et.al) +functions that are generally available across platforms. + +Also avoid adding new dependencies since dependency availability across +platforms and package managers are a common problem. + +If an external dependency is required, make sure that it is available as a +vcpkg, and also add it as a source build dependency to mklove +(see mklove/modules/configure.libcurl for an example) so that it can be built +and linked statically into librdkafka as part of the packaging process. + +Less is more. Don't try to be fancy, be boring. + + +### Follow code style + +When writing C code, follow the code style already established in +the project. Consistent style makes code easier to read and mistakes less +likely to happen. + +clang-format is used to check, and fix, the style for C/C++ files, +while flake8 and autopep8 is used for the Python scripts. + +You must check the style before committing by running `make style-check-changed` +from the top-level directory, and if any style errors are reported you can +automatically fix them using `make style-fix-changed` (or just run +that command directly). + +The Python code may need some manual fixing since autopep8 is unable to fix +all warnings reported by flake8, in particular it will not split long lines, +in which case a ` # noqa: E501` may be needed to turn off the warning. + +See the end of this document for the C style guide to use in librdkafka. + + +### Write Separate Changes + +It is annoying when you get a huge patch from someone that is said to fix 511 +odd problems, but discussions and opinions don't agree with 510 of them - or +509 of them were already fixed in a different way. Then the person merging +this change needs to extract the single interesting patch from somewhere +within the huge pile of source, and that gives a lot of extra work. + +Preferably, each fix that correct a problem should be in its own patch/commit +with its own description/commit message stating exactly what they correct so +that all changes can be selectively applied by the maintainer or other +interested parties. + +Also, separate changes enable bisecting much better when we track problems +and regression in the future. + +### Patch Against Recent Sources + +Please try to make your patches against latest master branch. + +### Test Cases + +Bugfixes should also include a new test case in the regression test suite +that verifies the bug is fixed. +Create a new tests/00-.c file and +try to reproduce the issue in its most simple form. +Verify that the test case fails for earlier versions and passes with your +bugfix in-place. + +New features and APIs should also result in an added test case. + +Submitted patches must pass all existing tests. +For more information on the test suite see [tests/README.md]. + + + +## How to get your changes into the main sources + +File a [pull request on github](https://github.com/edenhill/librdkafka/pulls) + +Your change will be reviewed and discussed there and you will be +expected to correct flaws pointed out and update accordingly, or the change +risk stalling and eventually just get deleted without action. As a submitter +of a change, you are the owner of that change until it has been merged. + +Make sure to monitor your PR on github and answer questions and/or +fix nits/flaws. This is very important. We will take lack of replies as a +sign that you're not very anxious to get your patch accepted and we tend to +simply drop such changes. + +When you adjust your pull requests after review, please squash the +commits so that we can review the full updated version more easily +and keep history cleaner. + +For example: + + # Interactive rebase to let you squash/fixup commits + $ git rebase -i master + + # Mark fixes-on-fixes commits as 'fixup' (or just 'f') in the + # first column. These will be silently integrated into the + # previous commit, so make sure to move the fixup-commit to + # the line beneath the parent commit. + + # Since this probably rewrote the history of previously pushed + # commits you will need to make a force push, which is usually + # a bad idea but works good for pull requests. + $ git push --force origin your_feature_branch + + +### Write good commit messages + +A short guide to how to write good commit messages. + + ---- start ---- + [area]: [short line describing the main effect] [(#issuenumber)] + -- empty line -- + [full description, no wider than 72 columns that describe as much as + possible as to why this change is made, and possibly what things + it fixes and everything else that is related] + ---- stop ---- + +Example: + + cgrp: Restart query timer on all heartbeat failures (#10023) + + If unhandled errors were received in HeartbeatResponse + the cgrp could get stuck in a state where it would not + refresh its coordinator. + + +**Important**: Rebase your PR branch on top of master (`git rebase -i master`) + and squash interim commits (to make a clean and readable git history) + before pushing. Use force push to keep your history clean even after + the initial PR push. + +**Note**: Good PRs with bad commit messages or messy commit history + such as "fixed review comment", will be squashed up in + to a single commit with a proper commit message. + + +### Add changelog + +If the changes in the PR affects the end user in any way, such as for a user +visible bug fix, new feature, API or doc change, etc, a release changelog item +needs to be added to [CHANGELOG.md](CHANGELOG.md) for the next release. + +Add a single line to the appropriate section (Enhancements, Fixes, ..) +outlining the change, an issue number (if any), and your name or GitHub +user id for attribution. + +E.g.: +``` +## Enhancements + * Improve commit() async parameter documentation (Paul Nit, #123) +``` + + + +# librdkafka C style and naming guide + +*Note: The code format style is enforced by our clang-format and pep8 rules, +so that is not covered here.* + +## C standard "C98" + +This is a mix of C89 and C99, to be compatible with old MSVC versions. + +Notable, it is C99 with the following limitations: + + * No variable declarations after statements. + * No in-line variable declarations. + + +## Function and globals naming + +Use self-explanatory hierarchical snake-case naming. +Pretty much all symbols should start with `rd_kafka_`, followed by +their subsystem (e.g., `cgrp`, `broker`, `buf`, etc..), followed by an +action (e.g, `find`, `get`, `clear`, ..). + +The exceptions are: + - Protocol requests and fields, use their Apache Kafka CamelCase names, .e.g: + `rd_kafka_ProduceRequest()` and `int16_t ErrorCode`. + - Public APIs that closely mimic the Apache Kafka Java counterpart, e.g., + the Admin API: `rd_kafka_DescribeConsumerGroups()`. + + +## Variable naming + +For existing types use the type prefix as variable name. +The type prefix is typically the first part of struct member fields. +Example: + + * `rd_kafka_broker_t` has field names starting with `rkb_..`, thus broker + variable names should be named `rkb` + +Be consistent with using the same variable name for the same type throughout +the code, it makes reading the code much easier as the type can be easily +inferred from the variable. + +For other types use reasonably concise but descriptive names. +`i` and `j` are typical int iterators. + +## Variable declaration + +Variables must be declared at the head of a scope, no in-line variable +declarations after statements are allowed. + +## Function parameters/arguments + +For internal functions assume that all function parameters are properly +specified, there is no need to check arguments for non-NULL, etc. +Any maluse internally is a bug, and not something we need to preemptively +protect against - the test suites should cover most of the code anyway - so +put your efforts there instead. + +For arguments that may be NULL, i.e., optional arguments, we explicitlly +document in the function docstring that the argument is optional (NULL), +but there is no need to do this for non-optional arguments. + +## Indenting + +Use 8 spaces indent, no tabs, same as the Linux kernel. +In emacs, use `c-set-style "linux`. +For C++, use Google's C++ style. + +Fix formatting issues by running `make style-fix-changed` prior to committing. + + +## Comments + +Use `/* .. */` comments, not `// ..` + +For functions, use doxygen syntax, e.g.: + + /** + * @brief + * .. + * @returns + */ + + +Make sure to comment non-obvious code and situations where the full +context of an operation is not easily graspable. + +Also make sure to update existing comments when the code changes. + + +## Line length + +Try hard to keep line length below 80 characters, when this is not possible +exceed it with reason. + + +## Braces + +Braces go on the same line as their enveloping statement: + + int some_func (..) { + while (1) { + if (1) { + do something; + .. + } else { + do something else; + .. + } + } + + /* Single line scopes should not have braces */ + if (1) + hi(); + else if (2) + /* Say hello */ + hello(); + else + bye(); + + +## Spaces + +All expression parentheses should be prefixed and suffixed with a single space: + + int some_func (int a) { + + if (1) + ....; + + for (i = 0 ; i < 19 ; i++) { + + + } + } + + +Use space around operators: + + int a = 2; + + if (b >= 3) + c += 2; + +Except for these: + + d++; + --e; + + +## New block on new line + +New blocks should be on a new line: + + if (1) + new(); + else + old(); + + +## Parentheses + +Don't assume the reader knows C operator precedence by heart for complex +statements, add parentheses to ease readability and make the intent clear. + + +## ifdef hell + +Avoid ifdef's as much as possible. +Platform support checking should be performed in configure.librdkafka. + + + + + +# librdkafka C++ style guide + +Follow [Google's C++ style guide](https://google.github.io/styleguide/cppguide.html) diff --git a/src/fluent-bit/lib/librdkafka-2.1.0/Doxyfile b/src/fluent-bit/lib/librdkafka-2.1.0/Doxyfile new file mode 100644 index 000000000..33fc31a4e --- /dev/null +++ b/src/fluent-bit/lib/librdkafka-2.1.0/Doxyfile @@ -0,0 +1,2375 @@ +# Doxyfile 1.8.9.1 + +# This file describes the settings to be used by the documentation system +# doxygen (www.doxygen.org) for a project. +# +# All text after a double hash (##) is considered a comment and is placed in +# front of the TAG it is preceding. +# +# All text after a single hash (#) is considered a comment and will be ignored. +# The format is: +# TAG = value [value, ...] +# For lists, items can also be appended using: +# TAG += value [value, ...] +# Values that contain spaces should be placed between quotes (\" \"). + +#--------------------------------------------------------------------------- +# Project related configuration options +#--------------------------------------------------------------------------- + +# This tag specifies the encoding used for all characters in the config file +# that follow. The default is UTF-8 which is also the encoding used for all text +# before the first occurrence of this tag. Doxygen uses libiconv (or the iconv +# built into libc) for the transcoding. See http://www.gnu.org/software/libiconv +# for the list of possible encodings. +# The default value is: UTF-8. + +DOXYFILE_ENCODING = UTF-8 + +# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by +# double-quotes, unless you are using Doxywizard) that should identify the +# project for which the documentation is generated. This name is used in the +# title of most generated pages and in a few other places. +# The default value is: My Project. + +PROJECT_NAME = "librdkafka" + +# The PROJECT_NUMBER tag can be used to enter a project or revision number. This +# could be handy for archiving the generated documentation or if some version +# control system is used. + +PROJECT_NUMBER = + +# Using the PROJECT_BRIEF tag one can provide an optional one line description +# for a project that appears at the top of each page and should give viewer a +# quick idea about the purpose of the project. Keep the description short. + +PROJECT_BRIEF = "The Apache Kafka C/C++ client library" + +# With the PROJECT_LOGO tag one can specify a logo or an icon that is included +# in the documentation. The maximum height of the logo should not exceed 55 +# pixels and the maximum width should not exceed 200 pixels. Doxygen will copy +# the logo to the output directory. + +#PROJECT_LOGO = kafka_logo.png + +# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path +# into which the generated documentation will be written. If a relative path is +# entered, it will be relative to the location where doxygen was started. If +# left blank the current directory will be used. + +OUTPUT_DIRECTORY = staging-docs + +# If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub- +# directories (in 2 levels) under the output directory of each output format and +# will distribute the generated files over these directories. Enabling this +# option can be useful when feeding doxygen a huge amount of source files, where +# putting all generated files in the same directory would otherwise causes +# performance problems for the file system. +# The default value is: NO. + +CREATE_SUBDIRS = NO + +# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII +# characters to appear in the names of generated files. If set to NO, non-ASCII +# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode +# U+3044. +# The default value is: NO. + +ALLOW_UNICODE_NAMES = NO + +# The OUTPUT_LANGUAGE tag is used to specify the language in which all +# documentation generated by doxygen is written. Doxygen will use this +# information to generate all constant output in the proper language. +# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese, +# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States), +# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian, +# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages), +# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian, +# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian, +# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish, +# Ukrainian and Vietnamese. +# The default value is: English. + +OUTPUT_LANGUAGE = English + +# If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member +# descriptions after the members that are listed in the file and class +# documentation (similar to Javadoc). Set to NO to disable this. +# The default value is: YES. + +BRIEF_MEMBER_DESC = YES + +# If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief +# description of a member or function before the detailed description +# +# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the +# brief descriptions will be completely suppressed. +# The default value is: YES. + +REPEAT_BRIEF = YES + +# This tag implements a quasi-intelligent brief description abbreviator that is +# used to form the text in various listings. Each string in this list, if found +# as the leading text of the brief description, will be stripped from the text +# and the result, after processing the whole list, is used as the annotated +# text. Otherwise, the brief description is used as-is. If left blank, the +# following values are used ($name is automatically replaced with the name of +# the entity):The $name class, The $name widget, The $name file, is, provides, +# specifies, contains, represents, a, an and the. + +ABBREVIATE_BRIEF = + +# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then +# doxygen will generate a detailed section even if there is only a brief +# description. +# The default value is: NO. + +ALWAYS_DETAILED_SEC = NO + +# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all +# inherited members of a class in the documentation of that class as if those +# members were ordinary class members. Constructors, destructors and assignment +# operators of the base classes will not be shown. +# The default value is: NO. + +INLINE_INHERITED_MEMB = NO + +# If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path +# before files name in the file list and in the header files. If set to NO the +# shortest path that makes the file name unique will be used +# The default value is: YES. + +FULL_PATH_NAMES = YES + +# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path. +# Stripping is only done if one of the specified strings matches the left-hand +# part of the path. The tag can be used to show relative paths in the file list. +# If left blank the directory from which doxygen is run is used as the path to +# strip. +# +# Note that you can specify absolute paths here, but also relative paths, which +# will be relative from the directory where doxygen is started. +# This tag requires that the tag FULL_PATH_NAMES is set to YES. + +STRIP_FROM_PATH = + +# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the +# path mentioned in the documentation of a class, which tells the reader which +# header file to include in order to use a class. If left blank only the name of +# the header file containing the class definition is used. Otherwise one should +# specify the list of include paths that are normally passed to the compiler +# using the -I flag. + +STRIP_FROM_INC_PATH = + +# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but +# less readable) file names. This can be useful is your file systems doesn't +# support long names like on DOS, Mac, or CD-ROM. +# The default value is: NO. + +SHORT_NAMES = NO + +# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the +# first line (until the first dot) of a Javadoc-style comment as the brief +# description. If set to NO, the Javadoc-style will behave just like regular Qt- +# style comments (thus requiring an explicit @brief command for a brief +# description.) +# The default value is: NO. + +JAVADOC_AUTOBRIEF = NO + +# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first +# line (until the first dot) of a Qt-style comment as the brief description. If +# set to NO, the Qt-style will behave just like regular Qt-style comments (thus +# requiring an explicit \brief command for a brief description.) +# The default value is: NO. + +QT_AUTOBRIEF = NO + +# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a +# multi-line C++ special comment block (i.e. a block of //! or /// comments) as +# a brief description. This used to be the default behavior. The new default is +# to treat a multi-line C++ comment block as a detailed description. Set this +# tag to YES if you prefer the old behavior instead. +# +# Note that setting this tag to YES also means that rational rose comments are +# not recognized any more. +# The default value is: NO. + +MULTILINE_CPP_IS_BRIEF = NO + +# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the +# documentation from any documented member that it re-implements. +# The default value is: YES. + +INHERIT_DOCS = YES + +# If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new +# page for each member. If set to NO, the documentation of a member will be part +# of the file/class/namespace that contains it. +# The default value is: NO. + +SEPARATE_MEMBER_PAGES = NO + +# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen +# uses this value to replace tabs by spaces in code fragments. +# Minimum value: 1, maximum value: 16, default value: 4. + +TAB_SIZE = 4 + +# This tag can be used to specify a number of aliases that act as commands in +# the documentation. An alias has the form: +# name=value +# For example adding +# "sideeffect=@par Side Effects:\n" +# will allow you to put the command \sideeffect (or @sideeffect) in the +# documentation, which will result in a user-defined paragraph with heading +# "Side Effects:". You can put \n's in the value part of an alias to insert +# newlines. + +ALIASES = "locality=@par Thread restriction:" +ALIASES += "locks=@par Lock restriction:" +# Automatically escape @REALM in CONFIGURATION.md +ALIASES += "REALM=\@REALM" + +# This tag can be used to specify a number of word-keyword mappings (TCL only). +# A mapping has the form "name=value". For example adding "class=itcl::class" +# will allow you to use the command class in the itcl::class meaning. + +TCL_SUBST = + +# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources +# only. Doxygen will then generate output that is more tailored for C. For +# instance, some of the names that are used will be different. The list of all +# members will be omitted, etc. +# The default value is: NO. + +OPTIMIZE_OUTPUT_FOR_C = YES + +# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or +# Python sources only. Doxygen will then generate output that is more tailored +# for that language. For instance, namespaces will be presented as packages, +# qualified scopes will look different, etc. +# The default value is: NO. + +OPTIMIZE_OUTPUT_JAVA = NO + +# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran +# sources. Doxygen will then generate output that is tailored for Fortran. +# The default value is: NO. + +OPTIMIZE_FOR_FORTRAN = NO + +# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL +# sources. Doxygen will then generate output that is tailored for VHDL. +# The default value is: NO. + +OPTIMIZE_OUTPUT_VHDL = NO + +# Doxygen selects the parser to use depending on the extension of the files it +# parses. With this tag you can assign which parser to use for a given +# extension. Doxygen has a built-in mapping, but you can override or extend it +# using this tag. The format is ext=language, where ext is a file extension, and +# language is one of the parsers supported by doxygen: IDL, Java, Javascript, +# C#, C, C++, D, PHP, Objective-C, Python, Fortran (fixed format Fortran: +# FortranFixed, free formatted Fortran: FortranFree, unknown formatted Fortran: +# Fortran. In the later case the parser tries to guess whether the code is fixed +# or free formatted code, this is the default for Fortran type files), VHDL. For +# instance to make doxygen treat .inc files as Fortran files (default is PHP), +# and .f files as C (default is Fortran), use: inc=Fortran f=C. +# +# Note: For files without extension you can use no_extension as a placeholder. +# +# Note that for custom extensions you also need to set FILE_PATTERNS otherwise +# the files are not read by doxygen. + +EXTENSION_MAPPING = + +# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments +# according to the Markdown format, which allows for more readable +# documentation. See http://daringfireball.net/projects/markdown/ for details. +# The output of markdown processing is further processed by doxygen, so you can +# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in +# case of backward compatibilities issues. +# The default value is: YES. + +MARKDOWN_SUPPORT = YES + +# When enabled doxygen tries to link words that correspond to documented +# classes, or namespaces to their corresponding documentation. Such a link can +# be prevented in individual cases by putting a % sign in front of the word or +# globally by setting AUTOLINK_SUPPORT to NO. +# The default value is: YES. + +AUTOLINK_SUPPORT = YES + +# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want +# to include (a tag file for) the STL sources as input, then you should set this +# tag to YES in order to let doxygen match functions declarations and +# definitions whose arguments contain STL classes (e.g. func(std::string); +# versus func(std::string) {}). This also make the inheritance and collaboration +# diagrams that involve STL classes more complete and accurate. +# The default value is: NO. + +BUILTIN_STL_SUPPORT = NO + +# If you use Microsoft's C++/CLI language, you should set this option to YES to +# enable parsing support. +# The default value is: NO. + +CPP_CLI_SUPPORT = NO + +# Set the SIP_SUPPORT tag to YES if your project consists of sip (see: +# http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen +# will parse them like normal C++ but will assume all classes use public instead +# of private inheritance when no explicit protection keyword is present. +# The default value is: NO. + +SIP_SUPPORT = NO + +# For Microsoft's IDL there are propget and propput attributes to indicate +# getter and setter methods for a property. Setting this option to YES will make +# doxygen to replace the get and set methods by a property in the documentation. +# This will only work if the methods are indeed getting or setting a simple +# type. If this is not the case, or you want to show the methods anyway, you +# should set this option to NO. +# The default value is: YES. + +IDL_PROPERTY_SUPPORT = YES + +# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC +# tag is set to YES then doxygen will reuse the documentation of the first +# member in the group (if any) for the other members of the group. By default +# all members of a group must be documented explicitly. +# The default value is: NO. + +DISTRIBUTE_GROUP_DOC = NO + +# Set the SUBGROUPING tag to YES to allow class member groups of the same type +# (for instance a group of public functions) to be put as a subgroup of that +# type (e.g. under the Public Functions section). Set it to NO to prevent +# subgrouping. Alternatively, this can be done per class using the +# \nosubgrouping command. +# The default value is: YES. + +SUBGROUPING = YES + +# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions +# are shown inside the group in which they are included (e.g. using \ingroup) +# instead of on a separate page (for HTML and Man pages) or section (for LaTeX +# and RTF). +# +# Note that this feature does not work in combination with +# SEPARATE_MEMBER_PAGES. +# The default value is: NO. + +INLINE_GROUPED_CLASSES = NO + +# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions +# with only public data fields or simple typedef fields will be shown inline in +# the documentation of the scope in which they are defined (i.e. file, +# namespace, or group documentation), provided this scope is documented. If set +# to NO, structs, classes, and unions are shown on a separate page (for HTML and +# Man pages) or section (for LaTeX and RTF). +# The default value is: NO. + +INLINE_SIMPLE_STRUCTS = NO + +# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or +# enum is documented as struct, union, or enum with the name of the typedef. So +# typedef struct TypeS {} TypeT, will appear in the documentation as a struct +# with name TypeT. When disabled the typedef will appear as a member of a file, +# namespace, or class. And the struct will be named TypeS. This can typically be +# useful for C code in case the coding convention dictates that all compound +# types are typedef'ed and only the typedef is referenced, never the tag name. +# The default value is: NO. + +TYPEDEF_HIDES_STRUCT = YES + +# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This +# cache is used to resolve symbols given their name and scope. Since this can be +# an expensive process and often the same symbol appears multiple times in the +# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small +# doxygen will become slower. If the cache is too large, memory is wasted. The +# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range +# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536 +# symbols. At the end of a run doxygen will report the cache usage and suggest +# the optimal cache size from a speed point of view. +# Minimum value: 0, maximum value: 9, default value: 0. + +LOOKUP_CACHE_SIZE = 0 + +#--------------------------------------------------------------------------- +# Build related configuration options +#--------------------------------------------------------------------------- + +# If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in +# documentation are documented, even if no documentation was available. Private +# class members and static file members will be hidden unless the +# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES. +# Note: This will also disable the warnings about undocumented members that are +# normally produced when WARNINGS is set to YES. +# The default value is: NO. + +EXTRACT_ALL = NO + +# If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will +# be included in the documentation. +# The default value is: NO. + +EXTRACT_PRIVATE = NO + +# If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal +# scope will be included in the documentation. +# The default value is: NO. + +EXTRACT_PACKAGE = NO + +# If the EXTRACT_STATIC tag is set to YES, all static members of a file will be +# included in the documentation. +# The default value is: NO. + +EXTRACT_STATIC = YES + +# If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined +# locally in source files will be included in the documentation. If set to NO, +# only classes defined in header files are included. Does not have any effect +# for Java sources. +# The default value is: YES. + +EXTRACT_LOCAL_CLASSES = YES + +# This flag is only useful for Objective-C code. If set to YES, local methods, +# which are defined in the implementation section but not in the interface are +# included in the documentation. If set to NO, only methods in the interface are +# included. +# The default value is: NO. + +EXTRACT_LOCAL_METHODS = NO + +# If this flag is set to YES, the members of anonymous namespaces will be +# extracted and appear in the documentation as a namespace called +# 'anonymous_namespace{file}', where file will be replaced with the base name of +# the file that contains the anonymous namespace. By default anonymous namespace +# are hidden. +# The default value is: NO. + +EXTRACT_ANON_NSPACES = NO + +# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all +# undocumented members inside documented classes or files. If set to NO these +# members will be included in the various overviews, but no documentation +# section is generated. This option has no effect if EXTRACT_ALL is enabled. +# The default value is: NO. + +HIDE_UNDOC_MEMBERS = NO + +# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all +# undocumented classes that are normally visible in the class hierarchy. If set +# to NO, these classes will be included in the various overviews. This option +# has no effect if EXTRACT_ALL is enabled. +# The default value is: NO. + +HIDE_UNDOC_CLASSES = NO + +# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend +# (class|struct|union) declarations. If set to NO, these declarations will be +# included in the documentation. +# The default value is: NO. + +HIDE_FRIEND_COMPOUNDS = NO + +# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any +# documentation blocks found inside the body of a function. If set to NO, these +# blocks will be appended to the function's detailed documentation block. +# The default value is: NO. + +HIDE_IN_BODY_DOCS = NO + +# The INTERNAL_DOCS tag determines if documentation that is typed after a +# \internal command is included. If the tag is set to NO then the documentation +# will be excluded. Set it to YES to include the internal documentation. +# The default value is: NO. + +INTERNAL_DOCS = NO + +# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file +# names in lower-case letters. If set to YES, upper-case letters are also +# allowed. This is useful if you have classes or files whose names only differ +# in case and if your file system supports case sensitive file names. Windows +# and Mac users are advised to set this option to NO. +# The default value is: system dependent. + +CASE_SENSE_NAMES = YES + +# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with +# their full class and namespace scopes in the documentation. If set to YES, the +# scope will be hidden. +# The default value is: NO. + +HIDE_SCOPE_NAMES = NO + +# If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will +# append additional text to a page's title, such as Class Reference. If set to +# YES the compound reference will be hidden. +# The default value is: NO. + +HIDE_COMPOUND_REFERENCE= NO + +# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of +# the files that are included by a file in the documentation of that file. +# The default value is: YES. + +SHOW_INCLUDE_FILES = YES + +# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each +# grouped member an include statement to the documentation, telling the reader +# which file to include in order to use the member. +# The default value is: NO. + +SHOW_GROUPED_MEMB_INC = NO + +# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include +# files with double quotes in the documentation rather than with sharp brackets. +# The default value is: NO. + +FORCE_LOCAL_INCLUDES = NO + +# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the +# documentation for inline members. +# The default value is: YES. + +INLINE_INFO = YES + +# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the +# (detailed) documentation of file and class members alphabetically by member +# name. If set to NO, the members will appear in declaration order. +# The default value is: YES. + +SORT_MEMBER_DOCS = NO + +# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief +# descriptions of file, namespace and class members alphabetically by member +# name. If set to NO, the members will appear in declaration order. Note that +# this will also influence the order of the classes in the class list. +# The default value is: NO. + +SORT_BRIEF_DOCS = NO + +# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the +# (brief and detailed) documentation of class members so that constructors and +# destructors are listed first. If set to NO the constructors will appear in the +# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS. +# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief +# member documentation. +# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting +# detailed member documentation. +# The default value is: NO. + +SORT_MEMBERS_CTORS_1ST = NO + +# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy +# of group names into alphabetical order. If set to NO the group names will +# appear in their defined order. +# The default value is: NO. + +SORT_GROUP_NAMES = NO + +# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by +# fully-qualified names, including namespaces. If set to NO, the class list will +# be sorted only by class name, not including the namespace part. +# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. +# Note: This option applies only to the class list, not to the alphabetical +# list. +# The default value is: NO. + +SORT_BY_SCOPE_NAME = NO + +# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper +# type resolution of all parameters of a function it will reject a match between +# the prototype and the implementation of a member function even if there is +# only one candidate or it is obvious which candidate to choose by doing a +# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still +# accept a match between prototype and implementation in such cases. +# The default value is: NO. + +STRICT_PROTO_MATCHING = NO + +# The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo +# list. This list is created by putting \todo commands in the documentation. +# The default value is: YES. + +GENERATE_TODOLIST = YES + +# The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test +# list. This list is created by putting \test commands in the documentation. +# The default value is: YES. + +GENERATE_TESTLIST = YES + +# The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug +# list. This list is created by putting \bug commands in the documentation. +# The default value is: YES. + +GENERATE_BUGLIST = YES + +# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO) +# the deprecated list. This list is created by putting \deprecated commands in +# the documentation. +# The default value is: YES. + +GENERATE_DEPRECATEDLIST= YES + +# The ENABLED_SECTIONS tag can be used to enable conditional documentation +# sections, marked by \if ... \endif and \cond +# ... \endcond blocks. + +ENABLED_SECTIONS = + +# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the +# initial value of a variable or macro / define can have for it to appear in the +# documentation. If the initializer consists of more lines than specified here +# it will be hidden. Use a value of 0 to hide initializers completely. The +# appearance of the value of individual variables and macros / defines can be +# controlled using \showinitializer or \hideinitializer command in the +# documentation regardless of this setting. +# Minimum value: 0, maximum value: 10000, default value: 30. + +MAX_INITIALIZER_LINES = 30 + +# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at +# the bottom of the documentation of classes and structs. If set to YES, the +# list will mention the files that were used to generate the documentation. +# The default value is: YES. + +SHOW_USED_FILES = YES + +# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This +# will remove the Files entry from the Quick Index and from the Folder Tree View +# (if specified). +# The default value is: YES. + +SHOW_FILES = YES + +# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces +# page. This will remove the Namespaces entry from the Quick Index and from the +# Folder Tree View (if specified). +# The default value is: YES. + +SHOW_NAMESPACES = NO + +# The FILE_VERSION_FILTER tag can be used to specify a program or script that +# doxygen should invoke to get the current version for each file (typically from +# the version control system). Doxygen will invoke the program by executing (via +# popen()) the command command input-file, where command is the value of the +# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided +# by doxygen. Whatever the program writes to standard output is used as the file +# version. For an example see the documentation. + +FILE_VERSION_FILTER = + +# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed +# by doxygen. The layout file controls the global structure of the generated +# output files in an output format independent way. To create the layout file +# that represents doxygen's defaults, run doxygen with the -l option. You can +# optionally specify a file name after the option, if omitted DoxygenLayout.xml +# will be used as the name of the layout file. +# +# Note that if you run doxygen from a directory containing a file called +# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE +# tag is left empty. + +LAYOUT_FILE = + +# The CITE_BIB_FILES tag can be used to specify one or more bib files containing +# the reference definitions. This must be a list of .bib files. The .bib +# extension is automatically appended if omitted. This requires the bibtex tool +# to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info. +# For LaTeX the style of the bibliography can be controlled using +# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the +# search path. See also \cite for info how to create references. + +CITE_BIB_FILES = + +#--------------------------------------------------------------------------- +# Configuration options related to warning and progress messages +#--------------------------------------------------------------------------- + +# The QUIET tag can be used to turn on/off the messages that are generated to +# standard output by doxygen. If QUIET is set to YES this implies that the +# messages are off. +# The default value is: NO. + +QUIET = YES + +# The WARNINGS tag can be used to turn on/off the warning messages that are +# generated to standard error (stderr) by doxygen. If WARNINGS is set to YES +# this implies that the warnings are on. +# +# Tip: Turn warnings on while writing the documentation. +# The default value is: YES. + +WARNINGS = YES + +# Treat all warnings as errors. +WARN_AS_ERROR = YES + +# If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate +# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag +# will automatically be disabled. +# The default value is: YES. + +WARN_IF_UNDOCUMENTED = YES + +# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for +# potential errors in the documentation, such as not documenting some parameters +# in a documented function, or documenting parameters that don't exist or using +# markup commands wrongly. +# The default value is: YES. + +WARN_IF_DOC_ERROR = YES + +# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that +# are documented, but have no documentation for their parameters or return +# value. If set to NO, doxygen will only warn about wrong or incomplete +# parameter documentation, but not about the absence of documentation. +# The default value is: NO. + +WARN_NO_PARAMDOC = NO + +# The WARN_FORMAT tag determines the format of the warning messages that doxygen +# can produce. The string should contain the $file, $line, and $text tags, which +# will be replaced by the file and line number from which the warning originated +# and the warning text. Optionally the format may contain $version, which will +# be replaced by the version of the file (if it could be obtained via +# FILE_VERSION_FILTER) +# The default value is: $file:$line: $text. + +WARN_FORMAT = "$file:$line: $text" + +# The WARN_LOGFILE tag can be used to specify a file to which warning and error +# messages should be written. If left blank the output is written to standard +# error (stderr). + +WARN_LOGFILE = + +#--------------------------------------------------------------------------- +# Configuration options related to the input files +#--------------------------------------------------------------------------- + +# The INPUT tag is used to specify the files and/or directories that contain +# documented source files. You may enter file names like myfile.cpp or +# directories like /usr/src/myproject. Separate the files or directories with +# spaces. +# Note: If this tag is empty the current directory is searched. + +INPUT = mainpage.doxy INTRODUCTION.md CONFIGURATION.md STATISTICS.md src/rdkafka.h src-cpp/rdkafkacpp.h + +# This tag can be used to specify the character encoding of the source files +# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses +# libiconv (or the iconv built into libc) for the transcoding. See the libiconv +# documentation (see: http://www.gnu.org/software/libiconv) for the list of +# possible encodings. +# The default value is: UTF-8. + +INPUT_ENCODING = UTF-8 + +# If the value of the INPUT tag contains directories, you can use the +# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and +# *.h) to filter out the source-files in the directories. If left blank the +# following patterns are tested:*.c, *.cc, *.cxx, *.cpp, *.c++, *.java, *.ii, +# *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, *.hh, *.hxx, *.hpp, +# *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, *.m, *.markdown, +# *.md, *.mm, *.dox, *.py, *.f90, *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf, +# *.qsf, *.as and *.js. + +FILE_PATTERNS = + +# The RECURSIVE tag can be used to specify whether or not subdirectories should +# be searched for input files as well. +# The default value is: NO. + +RECURSIVE = NO + +# The EXCLUDE tag can be used to specify files and/or directories that should be +# excluded from the INPUT source files. This way you can easily exclude a +# subdirectory from a directory tree whose root is specified with the INPUT tag. +# +# Note that relative paths are relative to the directory from which doxygen is +# run. + +EXCLUDE = + +# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or +# directories that are symbolic links (a Unix file system feature) are excluded +# from the input. +# The default value is: NO. + +EXCLUDE_SYMLINKS = NO + +# If the value of the INPUT tag contains directories, you can use the +# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude +# certain files from those directories. +# +# Note that the wildcards are matched against the file with absolute path, so to +# exclude all test directories for example use the pattern */test/* + +EXCLUDE_PATTERNS = + +# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names +# (namespaces, classes, functions, etc.) that should be excluded from the +# output. The symbol name can be a fully qualified name, a word, or if the +# wildcard * is used, a substring. Examples: ANamespace, AClass, +# AClass::ANamespace, ANamespace::*Test +# +# Note that the wildcards are matched against the file with absolute path, so to +# exclude all test directories use the pattern */test/* + +EXCLUDE_SYMBOLS = + +# The EXAMPLE_PATH tag can be used to specify one or more files or directories +# that contain example code fragments that are included (see the \include +# command). + +EXAMPLE_PATH = + +# If the value of the EXAMPLE_PATH tag contains directories, you can use the +# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and +# *.h) to filter out the source-files in the directories. If left blank all +# files are included. + +EXAMPLE_PATTERNS = + +# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be +# searched for input files to be used with the \include or \dontinclude commands +# irrespective of the value of the RECURSIVE tag. +# The default value is: NO. + +EXAMPLE_RECURSIVE = NO + +# The IMAGE_PATH tag can be used to specify one or more files or directories +# that contain images that are to be included in the documentation (see the +# \image command). + +IMAGE_PATH = src + +# The INPUT_FILTER tag can be used to specify a program that doxygen should +# invoke to filter for each input file. Doxygen will invoke the filter program +# by executing (via popen()) the command: +# +# +# +# where is the value of the INPUT_FILTER tag, and is the +# name of an input file. Doxygen will then use the output that the filter +# program writes to standard output. If FILTER_PATTERNS is specified, this tag +# will be ignored. +# +# Note that the filter must not add or remove lines; it is applied before the +# code is scanned, but not when the output code is generated. If lines are added +# or removed, the anchors will not be placed correctly. + +INPUT_FILTER = + +# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern +# basis. Doxygen will compare the file name with each pattern and apply the +# filter if there is a match. The filters are a list of the form: pattern=filter +# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how +# filters are used. If the FILTER_PATTERNS tag is empty or if none of the +# patterns match the file name, INPUT_FILTER is applied. + +FILTER_PATTERNS = + +# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using +# INPUT_FILTER) will also be used to filter the input files that are used for +# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES). +# The default value is: NO. + +FILTER_SOURCE_FILES = NO + +# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file +# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and +# it is also possible to disable source filtering for a specific pattern using +# *.ext= (so without naming a filter). +# This tag requires that the tag FILTER_SOURCE_FILES is set to YES. + +FILTER_SOURCE_PATTERNS = + +# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that +# is part of the input, its contents will be placed on the main page +# (index.html). This can be useful if you have a project on for instance GitHub +# and want to reuse the introduction page also for the doxygen output. + +USE_MDFILE_AS_MAINPAGE = + +#--------------------------------------------------------------------------- +# Configuration options related to source browsing +#--------------------------------------------------------------------------- + +# If the SOURCE_BROWSER tag is set to YES then a list of source files will be +# generated. Documented entities will be cross-referenced with these sources. +# +# Note: To get rid of all source code in the generated output, make sure that +# also VERBATIM_HEADERS is set to NO. +# The default value is: NO. + +SOURCE_BROWSER = NO + +# Setting the INLINE_SOURCES tag to YES will include the body of functions, +# classes and enums directly into the documentation. +# The default value is: NO. + +INLINE_SOURCES = NO + +# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any +# special comment blocks from generated source code fragments. Normal C, C++ and +# Fortran comments will always remain visible. +# The default value is: YES. + +STRIP_CODE_COMMENTS = YES + +# If the REFERENCED_BY_RELATION tag is set to YES then for each documented +# function all documented functions referencing it will be listed. +# The default value is: NO. + +REFERENCED_BY_RELATION = NO + +# If the REFERENCES_RELATION tag is set to YES then for each documented function +# all documented entities called/used by that function will be listed. +# The default value is: NO. + +REFERENCES_RELATION = NO + +# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set +# to YES then the hyperlinks from functions in REFERENCES_RELATION and +# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will +# link to the documentation. +# The default value is: YES. + +REFERENCES_LINK_SOURCE = YES + +# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the +# source code will show a tooltip with additional information such as prototype, +# brief description and links to the definition and documentation. Since this +# will make the HTML file larger and loading of large files a bit slower, you +# can opt to disable this feature. +# The default value is: YES. +# This tag requires that the tag SOURCE_BROWSER is set to YES. + +SOURCE_TOOLTIPS = YES + +# If the USE_HTAGS tag is set to YES then the references to source code will +# point to the HTML generated by the htags(1) tool instead of doxygen built-in +# source browser. The htags tool is part of GNU's global source tagging system +# (see http://www.gnu.org/software/global/global.html). You will need version +# 4.8.6 or higher. +# +# To use it do the following: +# - Install the latest version of global +# - Enable SOURCE_BROWSER and USE_HTAGS in the config file +# - Make sure the INPUT points to the root of the source tree +# - Run doxygen as normal +# +# Doxygen will invoke htags (and that will in turn invoke gtags), so these +# tools must be available from the command line (i.e. in the search path). +# +# The result: instead of the source browser generated by doxygen, the links to +# source code will now point to the output of htags. +# The default value is: NO. +# This tag requires that the tag SOURCE_BROWSER is set to YES. + +USE_HTAGS = NO + +# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a +# verbatim copy of the header file for each class for which an include is +# specified. Set to NO to disable this. +# See also: Section \class. +# The default value is: YES. + +VERBATIM_HEADERS = YES + +# If the CLANG_ASSISTED_PARSING tag is set to YES then doxygen will use the +# clang parser (see: http://clang.llvm.org/) for more accurate parsing at the +# cost of reduced performance. This can be particularly helpful with template +# rich C++ code for which doxygen's built-in parser lacks the necessary type +# information. +# Note: The availability of this option depends on whether or not doxygen was +# compiled with the --with-libclang option. +# The default value is: NO. + +#CLANG_ASSISTED_PARSING = NO + +# If clang assisted parsing is enabled you can provide the compiler with command +# line options that you would normally use when invoking the compiler. Note that +# the include paths will already be set by doxygen for the files and directories +# specified with INPUT and INCLUDE_PATH. +# This tag requires that the tag CLANG_ASSISTED_PARSING is set to YES. + +#CLANG_OPTIONS = + +#--------------------------------------------------------------------------- +# Configuration options related to the alphabetical class index +#--------------------------------------------------------------------------- + +# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all +# compounds will be generated. Enable this if the project contains a lot of +# classes, structs, unions or interfaces. +# The default value is: YES. + +ALPHABETICAL_INDEX = YES + +# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in +# which the alphabetical index list will be split. +# Minimum value: 1, maximum value: 20, default value: 5. +# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. + +COLS_IN_ALPHA_INDEX = 5 + +# In case all classes in a project start with a common prefix, all classes will +# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag +# can be used to specify a prefix (or a list of prefixes) that should be ignored +# while generating the index headers. +# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. + +IGNORE_PREFIX = + +#--------------------------------------------------------------------------- +# Configuration options related to the HTML output +#--------------------------------------------------------------------------- + +# If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output +# The default value is: YES. + +GENERATE_HTML = YES + +# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a +# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of +# it. +# The default directory is: html. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_OUTPUT = html + +# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each +# generated HTML page (for example: .htm, .php, .asp). +# The default value is: .html. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FILE_EXTENSION = .html + +# The HTML_HEADER tag can be used to specify a user-defined HTML header file for +# each generated HTML page. If the tag is left blank doxygen will generate a +# standard header. +# +# To get valid HTML the header file that includes any scripts and style sheets +# that doxygen needs, which is dependent on the configuration options used (e.g. +# the setting GENERATE_TREEVIEW). It is highly recommended to start with a +# default header using +# doxygen -w html new_header.html new_footer.html new_stylesheet.css +# YourConfigFile +# and then modify the file new_header.html. See also section "Doxygen usage" +# for information on how to generate the default header that doxygen normally +# uses. +# Note: The header is subject to change so you typically have to regenerate the +# default header when upgrading to a newer version of doxygen. For a description +# of the possible markers and block names see the documentation. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_HEADER = + +# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each +# generated HTML page. If the tag is left blank doxygen will generate a standard +# footer. See HTML_HEADER for more information on how to generate a default +# footer and what special commands can be used inside the footer. See also +# section "Doxygen usage" for information on how to generate the default footer +# that doxygen normally uses. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FOOTER = + +# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style +# sheet that is used by each HTML page. It can be used to fine-tune the look of +# the HTML output. If left blank doxygen will generate a default style sheet. +# See also section "Doxygen usage" for information on how to generate the style +# sheet that doxygen normally uses. +# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as +# it is more robust and this tag (HTML_STYLESHEET) will in the future become +# obsolete. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_STYLESHEET = + +# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined +# cascading style sheets that are included after the standard style sheets +# created by doxygen. Using this option one can overrule certain style aspects. +# This is preferred over using HTML_STYLESHEET since it does not replace the +# standard style sheet and is therefore more robust against future updates. +# Doxygen will copy the style sheet files to the output directory. +# Note: The order of the extra style sheet files is of importance (e.g. the last +# style sheet in the list overrules the setting of the previous ones in the +# list). For an example see the documentation. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_EXTRA_STYLESHEET = + +# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or +# other source files which should be copied to the HTML output directory. Note +# that these files will be copied to the base HTML output directory. Use the +# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these +# files. In the HTML_STYLESHEET file, use the file name only. Also note that the +# files will be copied as-is; there are no commands or markers available. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_EXTRA_FILES = + +# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen +# will adjust the colors in the style sheet and background images according to +# this color. Hue is specified as an angle on a colorwheel, see +# http://en.wikipedia.org/wiki/Hue for more information. For instance the value +# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300 +# purple, and 360 is red again. +# Minimum value: 0, maximum value: 359, default value: 220. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_HUE = 220 + +# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors +# in the HTML output. For a value of 0 the output will use grayscales only. A +# value of 255 will produce the most vivid colors. +# Minimum value: 0, maximum value: 255, default value: 100. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_SAT = 100 + +# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the +# luminance component of the colors in the HTML output. Values below 100 +# gradually make the output lighter, whereas values above 100 make the output +# darker. The value divided by 100 is the actual gamma applied, so 80 represents +# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not +# change the gamma. +# Minimum value: 40, maximum value: 240, default value: 80. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_GAMMA = 80 + +# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML +# page will contain the date and time when the page was generated. Setting this +# to YES can help to show when doxygen was last run and thus if the +# documentation is up to date. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_TIMESTAMP = YES + +# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML +# documentation will contain sections that can be hidden and shown after the +# page has loaded. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_DYNAMIC_SECTIONS = NO + +# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries +# shown in the various tree structured indices initially; the user can expand +# and collapse entries dynamically later on. Doxygen will expand the tree to +# such a level that at most the specified number of entries are visible (unless +# a fully collapsed tree already exceeds this amount). So setting the number of +# entries 1 will produce a full collapsed tree by default. 0 is a special value +# representing an infinite number of entries and will result in a full expanded +# tree by default. +# Minimum value: 0, maximum value: 9999, default value: 100. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_INDEX_NUM_ENTRIES = 100 + +# If the GENERATE_DOCSET tag is set to YES, additional index files will be +# generated that can be used as input for Apple's Xcode 3 integrated development +# environment (see: http://developer.apple.com/tools/xcode/), introduced with +# OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a +# Makefile in the HTML output directory. Running make will produce the docset in +# that directory and running make install will install the docset in +# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at +# startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html +# for more information. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_DOCSET = NO + +# This tag determines the name of the docset feed. A documentation feed provides +# an umbrella under which multiple documentation sets from a single provider +# (such as a company or product suite) can be grouped. +# The default value is: Doxygen generated docs. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_FEEDNAME = "librdkafka documentation" + +# This tag specifies a string that should uniquely identify the documentation +# set bundle. This should be a reverse domain-name style string, e.g. +# com.mycompany.MyDocSet. Doxygen will append .docset to the name. +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_BUNDLE_ID = se.edenhill.librdkafka + +# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify +# the documentation publisher. This should be a reverse domain-name style +# string, e.g. com.mycompany.MyDocSet.documentation. +# The default value is: org.doxygen.Publisher. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_PUBLISHER_ID = se.edenhill + +# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher. +# The default value is: Publisher. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_PUBLISHER_NAME = Magnus Edenhill + +# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three +# additional HTML index files: index.hhp, index.hhc, and index.hhk. The +# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop +# (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on +# Windows. +# +# The HTML Help Workshop contains a compiler that can convert all HTML output +# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML +# files are now used as the Windows 98 help format, and will replace the old +# Windows help format (.hlp) on all Windows platforms in the future. Compressed +# HTML files also contain an index, a table of contents, and you can search for +# words in the documentation. The HTML workshop also contains a viewer for +# compressed HTML files. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_HTMLHELP = NO + +# The CHM_FILE tag can be used to specify the file name of the resulting .chm +# file. You can add a path in front of the file if the result should not be +# written to the html output directory. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +CHM_FILE = + +# The HHC_LOCATION tag can be used to specify the location (absolute path +# including file name) of the HTML help compiler (hhc.exe). If non-empty, +# doxygen will try to run the HTML help compiler on the generated index.hhp. +# The file has to be specified with full path. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +HHC_LOCATION = + +# The GENERATE_CHI flag controls if a separate .chi index file is generated +# (YES) or that it should be included in the primary .chm file (NO). +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +GENERATE_CHI = NO + +# The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc) +# and project file content. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +CHM_INDEX_ENCODING = + +# The BINARY_TOC flag controls whether a binary table of contents is generated +# (YES) or a normal table of contents (NO) in the .chm file. Furthermore it +# enables the Previous and Next buttons. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +BINARY_TOC = NO + +# The TOC_EXPAND flag can be set to YES to add extra items for group members to +# the table of contents of the HTML help documentation and to the tree view. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +TOC_EXPAND = NO + +# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and +# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that +# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help +# (.qch) of the generated HTML documentation. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_QHP = NO + +# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify +# the file name of the resulting .qch file. The path specified is relative to +# the HTML output folder. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QCH_FILE = + +# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help +# Project output. For more information please see Qt Help Project / Namespace +# (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace). +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_NAMESPACE = se.edenhill.librdkafka + +# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt +# Help Project output. For more information please see Qt Help Project / Virtual +# Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual- +# folders). +# The default value is: doc. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_VIRTUAL_FOLDER = doc + +# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom +# filter to add. For more information please see Qt Help Project / Custom +# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- +# filters). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_CUST_FILTER_NAME = + +# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the +# custom filter to add. For more information please see Qt Help Project / Custom +# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- +# filters). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_CUST_FILTER_ATTRS = + +# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this +# project's filter section matches. Qt Help Project / Filter Attributes (see: +# http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_SECT_FILTER_ATTRS = + +# The QHG_LOCATION tag can be used to specify the location of Qt's +# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the +# generated .qhp file. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHG_LOCATION = + +# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be +# generated, together with the HTML files, they form an Eclipse help plugin. To +# install this plugin and make it available under the help contents menu in +# Eclipse, the contents of the directory containing the HTML and XML files needs +# to be copied into the plugins directory of eclipse. The name of the directory +# within the plugins directory should be the same as the ECLIPSE_DOC_ID value. +# After copying Eclipse needs to be restarted before the help appears. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_ECLIPSEHELP = NO + +# A unique identifier for the Eclipse help plugin. When installing the plugin +# the directory name containing the HTML and XML files should also have this +# name. Each documentation set should have its own identifier. +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES. + +ECLIPSE_DOC_ID = se.edenhill.librdkafka + +# If you want full control over the layout of the generated HTML pages it might +# be necessary to disable the index and replace it with your own. The +# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top +# of each HTML page. A value of NO enables the index and the value YES disables +# it. Since the tabs in the index contain the same information as the navigation +# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +DISABLE_INDEX = NO + +# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index +# structure should be generated to display hierarchical information. If the tag +# value is set to YES, a side panel will be generated containing a tree-like +# index structure (just like the one that is generated for HTML Help). For this +# to work a browser that supports JavaScript, DHTML, CSS and frames is required +# (i.e. any modern browser). Windows users are probably better off using the +# HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can +# further fine-tune the look of the index. As an example, the default style +# sheet generated by doxygen has an example that shows how to put an image at +# the root of the tree instead of the PROJECT_NAME. Since the tree basically has +# the same information as the tab index, you could consider setting +# DISABLE_INDEX to YES when enabling this option. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_TREEVIEW = YES + +# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that +# doxygen will group on one line in the generated HTML documentation. +# +# Note that a value of 0 will completely suppress the enum values from appearing +# in the overview section. +# Minimum value: 0, maximum value: 20, default value: 4. +# This tag requires that the tag GENERATE_HTML is set to YES. + +ENUM_VALUES_PER_LINE = 1 + +# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used +# to set the initial width (in pixels) of the frame in which the tree is shown. +# Minimum value: 0, maximum value: 1500, default value: 250. +# This tag requires that the tag GENERATE_HTML is set to YES. + +TREEVIEW_WIDTH = 250 + +# If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to +# external symbols imported via tag files in a separate window. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +EXT_LINKS_IN_WINDOW = NO + +# Use this tag to change the font size of LaTeX formulas included as images in +# the HTML documentation. When you change the font size after a successful +# doxygen run you need to manually remove any form_*.png images from the HTML +# output directory to force them to be regenerated. +# Minimum value: 8, maximum value: 50, default value: 10. +# This tag requires that the tag GENERATE_HTML is set to YES. + +FORMULA_FONTSIZE = 10 + +# Use the FORMULA_TRANPARENT tag to determine whether or not the images +# generated for formulas are transparent PNGs. Transparent PNGs are not +# supported properly for IE 6.0, but are supported on all modern browsers. +# +# Note that when changing this option you need to delete any form_*.png files in +# the HTML output directory before the changes have effect. +# The default value is: YES. +# This tag requires that the tag GENERATE_HTML is set to YES. + +FORMULA_TRANSPARENT = YES + +# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see +# http://www.mathjax.org) which uses client side Javascript for the rendering +# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX +# installed or if you want to formulas look prettier in the HTML output. When +# enabled you may also need to install MathJax separately and configure the path +# to it using the MATHJAX_RELPATH option. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +USE_MATHJAX = NO + +# When MathJax is enabled you can set the default output format to be used for +# the MathJax output. See the MathJax site (see: +# http://docs.mathjax.org/en/latest/output.html) for more details. +# Possible values are: HTML-CSS (which is slower, but has the best +# compatibility), NativeMML (i.e. MathML) and SVG. +# The default value is: HTML-CSS. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_FORMAT = HTML-CSS + +# When MathJax is enabled you need to specify the location relative to the HTML +# output directory using the MATHJAX_RELPATH option. The destination directory +# should contain the MathJax.js script. For instance, if the mathjax directory +# is located at the same level as the HTML output directory, then +# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax +# Content Delivery Network so you can quickly see the result without installing +# MathJax. However, it is strongly recommended to install a local copy of +# MathJax from http://www.mathjax.org before deployment. +# The default value is: http://cdn.mathjax.org/mathjax/latest. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest + +# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax +# extension names that should be enabled during MathJax rendering. For example +# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_EXTENSIONS = + +# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces +# of code that will be used on startup of the MathJax code. See the MathJax site +# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an +# example see the documentation. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_CODEFILE = + +# When the SEARCHENGINE tag is enabled doxygen will generate a search box for +# the HTML output. The underlying search engine uses javascript and DHTML and +# should work on any modern browser. Note that when using HTML help +# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET) +# there is already a search function so this one should typically be disabled. +# For large projects the javascript based search engine can be slow, then +# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to +# search using the keyboard; to jump to the search box use + S +# (what the is depends on the OS and browser, but it is typically +# , /